1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD$ 22 */ 23 24 /* 25 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 26 * Copyright (c) 2016, Joyent, Inc. All rights reserved. 27 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 28 */ 29 30 /* 31 * DTrace - Dynamic Tracing for Solaris 32 * 33 * This is the implementation of the Solaris Dynamic Tracing framework 34 * (DTrace). The user-visible interface to DTrace is described at length in 35 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 36 * library, the in-kernel DTrace framework, and the DTrace providers are 37 * described in the block comments in the <sys/dtrace.h> header file. The 38 * internal architecture of DTrace is described in the block comments in the 39 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 40 * implementation very much assume mastery of all of these sources; if one has 41 * an unanswered question about the implementation, one should consult them 42 * first. 43 * 44 * The functions here are ordered roughly as follows: 45 * 46 * - Probe context functions 47 * - Probe hashing functions 48 * - Non-probe context utility functions 49 * - Matching functions 50 * - Provider-to-Framework API functions 51 * - Probe management functions 52 * - DIF object functions 53 * - Format functions 54 * - Predicate functions 55 * - ECB functions 56 * - Buffer functions 57 * - Enabling functions 58 * - DOF functions 59 * - Anonymous enabling functions 60 * - Consumer state functions 61 * - Helper functions 62 * - Hook functions 63 * - Driver cookbook functions 64 * 65 * Each group of functions begins with a block comment labelled the "DTrace 66 * [Group] Functions", allowing one to find each block by searching forward 67 * on capital-f functions. 68 */ 69 #include <sys/errno.h> 70 #ifndef illumos 71 #include <sys/time.h> 72 #endif 73 #include <sys/stat.h> 74 #include <sys/modctl.h> 75 #include <sys/conf.h> 76 #include <sys/systm.h> 77 #ifdef illumos 78 #include <sys/ddi.h> 79 #include <sys/sunddi.h> 80 #endif 81 #include <sys/cpuvar.h> 82 #include <sys/kmem.h> 83 #ifdef illumos 84 #include <sys/strsubr.h> 85 #endif 86 #include <sys/sysmacros.h> 87 #include <sys/dtrace_impl.h> 88 #include <sys/atomic.h> 89 #include <sys/cmn_err.h> 90 #ifdef illumos 91 #include <sys/mutex_impl.h> 92 #include <sys/rwlock_impl.h> 93 #endif 94 #include <sys/ctf_api.h> 95 #ifdef illumos 96 #include <sys/panic.h> 97 #include <sys/priv_impl.h> 98 #endif 99 #include <sys/policy.h> 100 #ifdef illumos 101 #include <sys/cred_impl.h> 102 #include <sys/procfs_isa.h> 103 #endif 104 #include <sys/taskq.h> 105 #ifdef illumos 106 #include <sys/mkdev.h> 107 #include <sys/kdi.h> 108 #endif 109 #include <sys/zone.h> 110 #include <sys/socket.h> 111 #include <netinet/in.h> 112 #include "strtolctype.h" 113 114 /* FreeBSD includes: */ 115 #ifndef illumos 116 #include <sys/callout.h> 117 #include <sys/ctype.h> 118 #include <sys/eventhandler.h> 119 #include <sys/limits.h> 120 #include <sys/linker.h> 121 #include <sys/kdb.h> 122 #include <sys/kernel.h> 123 #include <sys/malloc.h> 124 #include <sys/lock.h> 125 #include <sys/mutex.h> 126 #include <sys/ptrace.h> 127 #include <sys/rwlock.h> 128 #include <sys/sx.h> 129 #include <sys/sysctl.h> 130 131 #include <sys/dtrace_bsd.h> 132 133 #include <netinet/in.h> 134 135 #include "dtrace_cddl.h" 136 #include "dtrace_debug.c" 137 #endif 138 139 /* 140 * DTrace Tunable Variables 141 * 142 * The following variables may be tuned by adding a line to /etc/system that 143 * includes both the name of the DTrace module ("dtrace") and the name of the 144 * variable. For example: 145 * 146 * set dtrace:dtrace_destructive_disallow = 1 147 * 148 * In general, the only variables that one should be tuning this way are those 149 * that affect system-wide DTrace behavior, and for which the default behavior 150 * is undesirable. Most of these variables are tunable on a per-consumer 151 * basis using DTrace options, and need not be tuned on a system-wide basis. 152 * When tuning these variables, avoid pathological values; while some attempt 153 * is made to verify the integrity of these variables, they are not considered 154 * part of the supported interface to DTrace, and they are therefore not 155 * checked comprehensively. Further, these variables should not be tuned 156 * dynamically via "mdb -kw" or other means; they should only be tuned via 157 * /etc/system. 158 */ 159 int dtrace_destructive_disallow = 0; 160 #ifndef illumos 161 /* Positive logic version of dtrace_destructive_disallow for loader tunable */ 162 int dtrace_allow_destructive = 1; 163 #endif 164 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 165 size_t dtrace_difo_maxsize = (256 * 1024); 166 dtrace_optval_t dtrace_dof_maxsize = (8 * 1024 * 1024); 167 size_t dtrace_statvar_maxsize = (16 * 1024); 168 size_t dtrace_actions_max = (16 * 1024); 169 size_t dtrace_retain_max = 1024; 170 dtrace_optval_t dtrace_helper_actions_max = 128; 171 dtrace_optval_t dtrace_helper_providers_max = 32; 172 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 173 size_t dtrace_strsize_default = 256; 174 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 175 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 176 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 177 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 178 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 179 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 180 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 181 dtrace_optval_t dtrace_nspec_default = 1; 182 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 183 dtrace_optval_t dtrace_stackframes_default = 20; 184 dtrace_optval_t dtrace_ustackframes_default = 20; 185 dtrace_optval_t dtrace_jstackframes_default = 50; 186 dtrace_optval_t dtrace_jstackstrsize_default = 512; 187 int dtrace_msgdsize_max = 128; 188 hrtime_t dtrace_chill_max = MSEC2NSEC(500); /* 500 ms */ 189 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 190 int dtrace_devdepth_max = 32; 191 int dtrace_err_verbose; 192 hrtime_t dtrace_deadman_interval = NANOSEC; 193 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 194 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 195 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC; 196 #ifndef illumos 197 int dtrace_memstr_max = 4096; 198 #endif 199 200 /* 201 * DTrace External Variables 202 * 203 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 204 * available to DTrace consumers via the backtick (`) syntax. One of these, 205 * dtrace_zero, is made deliberately so: it is provided as a source of 206 * well-known, zero-filled memory. While this variable is not documented, 207 * it is used by some translators as an implementation detail. 208 */ 209 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 210 211 /* 212 * DTrace Internal Variables 213 */ 214 #ifdef illumos 215 static dev_info_t *dtrace_devi; /* device info */ 216 #endif 217 #ifdef illumos 218 static vmem_t *dtrace_arena; /* probe ID arena */ 219 static vmem_t *dtrace_minor; /* minor number arena */ 220 #else 221 static taskq_t *dtrace_taskq; /* task queue */ 222 static struct unrhdr *dtrace_arena; /* Probe ID number. */ 223 #endif 224 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 225 static int dtrace_nprobes; /* number of probes */ 226 static dtrace_provider_t *dtrace_provider; /* provider list */ 227 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 228 static int dtrace_opens; /* number of opens */ 229 static int dtrace_helpers; /* number of helpers */ 230 static int dtrace_getf; /* number of unpriv getf()s */ 231 #ifdef illumos 232 static void *dtrace_softstate; /* softstate pointer */ 233 #endif 234 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 235 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 236 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 237 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 238 static int dtrace_toxranges; /* number of toxic ranges */ 239 static int dtrace_toxranges_max; /* size of toxic range array */ 240 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 241 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 242 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 243 static kthread_t *dtrace_panicked; /* panicking thread */ 244 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 245 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 246 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 247 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 248 static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */ 249 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 250 static int dtrace_dynvar_failclean; /* dynvars failed to clean */ 251 #ifndef illumos 252 static struct mtx dtrace_unr_mtx; 253 MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 254 static eventhandler_tag dtrace_kld_load_tag; 255 static eventhandler_tag dtrace_kld_unload_try_tag; 256 #endif 257 258 /* 259 * DTrace Locking 260 * DTrace is protected by three (relatively coarse-grained) locks: 261 * 262 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 263 * including enabling state, probes, ECBs, consumer state, helper state, 264 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 265 * probe context is lock-free -- synchronization is handled via the 266 * dtrace_sync() cross call mechanism. 267 * 268 * (2) dtrace_provider_lock is required when manipulating provider state, or 269 * when provider state must be held constant. 270 * 271 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 272 * when meta provider state must be held constant. 273 * 274 * The lock ordering between these three locks is dtrace_meta_lock before 275 * dtrace_provider_lock before dtrace_lock. (In particular, there are 276 * several places where dtrace_provider_lock is held by the framework as it 277 * calls into the providers -- which then call back into the framework, 278 * grabbing dtrace_lock.) 279 * 280 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 281 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 282 * role as a coarse-grained lock; it is acquired before both of these locks. 283 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 284 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 285 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 286 * acquired _between_ dtrace_provider_lock and dtrace_lock. 287 */ 288 static kmutex_t dtrace_lock; /* probe state lock */ 289 static kmutex_t dtrace_provider_lock; /* provider state lock */ 290 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 291 292 #ifndef illumos 293 /* XXX FreeBSD hacks. */ 294 #define cr_suid cr_svuid 295 #define cr_sgid cr_svgid 296 #define ipaddr_t in_addr_t 297 #define mod_modname pathname 298 #define vuprintf vprintf 299 #define ttoproc(_a) ((_a)->td_proc) 300 #define crgetzoneid(_a) 0 301 #define NCPU MAXCPU 302 #define SNOCD 0 303 #define CPU_ON_INTR(_a) 0 304 305 #define PRIV_EFFECTIVE (1 << 0) 306 #define PRIV_DTRACE_KERNEL (1 << 1) 307 #define PRIV_DTRACE_PROC (1 << 2) 308 #define PRIV_DTRACE_USER (1 << 3) 309 #define PRIV_PROC_OWNER (1 << 4) 310 #define PRIV_PROC_ZONE (1 << 5) 311 #define PRIV_ALL ~0 312 313 SYSCTL_DECL(_debug_dtrace); 314 SYSCTL_DECL(_kern_dtrace); 315 #endif 316 317 #ifdef illumos 318 #define curcpu CPU->cpu_id 319 #endif 320 321 322 /* 323 * DTrace Provider Variables 324 * 325 * These are the variables relating to DTrace as a provider (that is, the 326 * provider of the BEGIN, END, and ERROR probes). 327 */ 328 static dtrace_pattr_t dtrace_provider_attr = { 329 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 330 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 331 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 332 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 333 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 334 }; 335 336 static void 337 dtrace_nullop(void) 338 {} 339 340 static dtrace_pops_t dtrace_provider_ops = { 341 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 342 (void (*)(void *, modctl_t *))dtrace_nullop, 343 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 344 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 345 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 346 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 347 NULL, 348 NULL, 349 NULL, 350 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 351 }; 352 353 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 354 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 355 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 356 357 /* 358 * DTrace Helper Tracing Variables 359 * 360 * These variables should be set dynamically to enable helper tracing. The 361 * only variables that should be set are dtrace_helptrace_enable (which should 362 * be set to a non-zero value to allocate helper tracing buffers on the next 363 * open of /dev/dtrace) and dtrace_helptrace_disable (which should be set to a 364 * non-zero value to deallocate helper tracing buffers on the next close of 365 * /dev/dtrace). When (and only when) helper tracing is disabled, the 366 * buffer size may also be set via dtrace_helptrace_bufsize. 367 */ 368 int dtrace_helptrace_enable = 0; 369 int dtrace_helptrace_disable = 0; 370 int dtrace_helptrace_bufsize = 16 * 1024 * 1024; 371 uint32_t dtrace_helptrace_nlocals; 372 static dtrace_helptrace_t *dtrace_helptrace_buffer; 373 static uint32_t dtrace_helptrace_next = 0; 374 static int dtrace_helptrace_wrapped = 0; 375 376 /* 377 * DTrace Error Hashing 378 * 379 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 380 * table. This is very useful for checking coverage of tests that are 381 * expected to induce DIF or DOF processing errors, and may be useful for 382 * debugging problems in the DIF code generator or in DOF generation . The 383 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 384 */ 385 #ifdef DEBUG 386 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 387 static const char *dtrace_errlast; 388 static kthread_t *dtrace_errthread; 389 static kmutex_t dtrace_errlock; 390 #endif 391 392 /* 393 * DTrace Macros and Constants 394 * 395 * These are various macros that are useful in various spots in the 396 * implementation, along with a few random constants that have no meaning 397 * outside of the implementation. There is no real structure to this cpp 398 * mishmash -- but is there ever? 399 */ 400 #define DTRACE_HASHSTR(hash, probe) \ 401 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 402 403 #define DTRACE_HASHNEXT(hash, probe) \ 404 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 405 406 #define DTRACE_HASHPREV(hash, probe) \ 407 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 408 409 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 410 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 411 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 412 413 #define DTRACE_AGGHASHSIZE_SLEW 17 414 415 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 416 417 /* 418 * The key for a thread-local variable consists of the lower 61 bits of the 419 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 420 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 421 * equal to a variable identifier. This is necessary (but not sufficient) to 422 * assure that global associative arrays never collide with thread-local 423 * variables. To guarantee that they cannot collide, we must also define the 424 * order for keying dynamic variables. That order is: 425 * 426 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 427 * 428 * Because the variable-key and the tls-key are in orthogonal spaces, there is 429 * no way for a global variable key signature to match a thread-local key 430 * signature. 431 */ 432 #ifdef illumos 433 #define DTRACE_TLS_THRKEY(where) { \ 434 uint_t intr = 0; \ 435 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 436 for (; actv; actv >>= 1) \ 437 intr++; \ 438 ASSERT(intr < (1 << 3)); \ 439 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 440 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 441 } 442 #else 443 #define DTRACE_TLS_THRKEY(where) { \ 444 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 445 uint_t intr = 0; \ 446 uint_t actv = _c->cpu_intr_actv; \ 447 for (; actv; actv >>= 1) \ 448 intr++; \ 449 ASSERT(intr < (1 << 3)); \ 450 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 451 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 452 } 453 #endif 454 455 #define DT_BSWAP_8(x) ((x) & 0xff) 456 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 457 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 458 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 459 460 #define DT_MASK_LO 0x00000000FFFFFFFFULL 461 462 #define DTRACE_STORE(type, tomax, offset, what) \ 463 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 464 465 #ifndef __x86 466 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 467 if (addr & (size - 1)) { \ 468 *flags |= CPU_DTRACE_BADALIGN; \ 469 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 470 return (0); \ 471 } 472 #else 473 #define DTRACE_ALIGNCHECK(addr, size, flags) 474 #endif 475 476 /* 477 * Test whether a range of memory starting at testaddr of size testsz falls 478 * within the range of memory described by addr, sz. We take care to avoid 479 * problems with overflow and underflow of the unsigned quantities, and 480 * disallow all negative sizes. Ranges of size 0 are allowed. 481 */ 482 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 483 ((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \ 484 (testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \ 485 (testaddr) + (testsz) >= (testaddr)) 486 487 #define DTRACE_RANGE_REMAIN(remp, addr, baseaddr, basesz) \ 488 do { \ 489 if ((remp) != NULL) { \ 490 *(remp) = (uintptr_t)(baseaddr) + (basesz) - (addr); \ 491 } \ 492 _NOTE(CONSTCOND) } while (0) 493 494 495 /* 496 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 497 * alloc_sz on the righthand side of the comparison in order to avoid overflow 498 * or underflow in the comparison with it. This is simpler than the INRANGE 499 * check above, because we know that the dtms_scratch_ptr is valid in the 500 * range. Allocations of size zero are allowed. 501 */ 502 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 503 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 504 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 505 506 #define DTRACE_LOADFUNC(bits) \ 507 /*CSTYLED*/ \ 508 uint##bits##_t \ 509 dtrace_load##bits(uintptr_t addr) \ 510 { \ 511 size_t size = bits / NBBY; \ 512 /*CSTYLED*/ \ 513 uint##bits##_t rval; \ 514 int i; \ 515 volatile uint16_t *flags = (volatile uint16_t *) \ 516 &cpu_core[curcpu].cpuc_dtrace_flags; \ 517 \ 518 DTRACE_ALIGNCHECK(addr, size, flags); \ 519 \ 520 for (i = 0; i < dtrace_toxranges; i++) { \ 521 if (addr >= dtrace_toxrange[i].dtt_limit) \ 522 continue; \ 523 \ 524 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 525 continue; \ 526 \ 527 /* \ 528 * This address falls within a toxic region; return 0. \ 529 */ \ 530 *flags |= CPU_DTRACE_BADADDR; \ 531 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 532 return (0); \ 533 } \ 534 \ 535 *flags |= CPU_DTRACE_NOFAULT; \ 536 /*CSTYLED*/ \ 537 rval = *((volatile uint##bits##_t *)addr); \ 538 *flags &= ~CPU_DTRACE_NOFAULT; \ 539 \ 540 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 541 } 542 543 #ifdef _LP64 544 #define dtrace_loadptr dtrace_load64 545 #else 546 #define dtrace_loadptr dtrace_load32 547 #endif 548 549 #define DTRACE_DYNHASH_FREE 0 550 #define DTRACE_DYNHASH_SINK 1 551 #define DTRACE_DYNHASH_VALID 2 552 553 #define DTRACE_MATCH_NEXT 0 554 #define DTRACE_MATCH_DONE 1 555 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 556 #define DTRACE_STATE_ALIGN 64 557 558 #define DTRACE_FLAGS2FLT(flags) \ 559 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 560 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 561 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 562 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 563 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 564 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 565 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 566 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 567 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 568 DTRACEFLT_UNKNOWN) 569 570 #define DTRACEACT_ISSTRING(act) \ 571 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 572 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 573 574 /* Function prototype definitions: */ 575 static size_t dtrace_strlen(const char *, size_t); 576 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 577 static void dtrace_enabling_provide(dtrace_provider_t *); 578 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 579 static void dtrace_enabling_matchall(void); 580 static void dtrace_enabling_reap(void); 581 static dtrace_state_t *dtrace_anon_grab(void); 582 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 583 dtrace_state_t *, uint64_t, uint64_t); 584 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 585 static void dtrace_buffer_drop(dtrace_buffer_t *); 586 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when); 587 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 588 dtrace_state_t *, dtrace_mstate_t *); 589 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 590 dtrace_optval_t); 591 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 592 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 593 uint16_t dtrace_load16(uintptr_t); 594 uint32_t dtrace_load32(uintptr_t); 595 uint64_t dtrace_load64(uintptr_t); 596 uint8_t dtrace_load8(uintptr_t); 597 void dtrace_dynvar_clean(dtrace_dstate_t *); 598 dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 599 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 600 uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 601 static int dtrace_priv_proc(dtrace_state_t *); 602 static void dtrace_getf_barrier(void); 603 static int dtrace_canload_remains(uint64_t, size_t, size_t *, 604 dtrace_mstate_t *, dtrace_vstate_t *); 605 static int dtrace_canstore_remains(uint64_t, size_t, size_t *, 606 dtrace_mstate_t *, dtrace_vstate_t *); 607 608 /* 609 * DTrace Probe Context Functions 610 * 611 * These functions are called from probe context. Because probe context is 612 * any context in which C may be called, arbitrarily locks may be held, 613 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 614 * As a result, functions called from probe context may only call other DTrace 615 * support functions -- they may not interact at all with the system at large. 616 * (Note that the ASSERT macro is made probe-context safe by redefining it in 617 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 618 * loads are to be performed from probe context, they _must_ be in terms of 619 * the safe dtrace_load*() variants. 620 * 621 * Some functions in this block are not actually called from probe context; 622 * for these functions, there will be a comment above the function reading 623 * "Note: not called from probe context." 624 */ 625 void 626 dtrace_panic(const char *format, ...) 627 { 628 va_list alist; 629 630 va_start(alist, format); 631 #ifdef __FreeBSD__ 632 vpanic(format, alist); 633 #else 634 dtrace_vpanic(format, alist); 635 #endif 636 va_end(alist); 637 } 638 639 int 640 dtrace_assfail(const char *a, const char *f, int l) 641 { 642 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 643 644 /* 645 * We just need something here that even the most clever compiler 646 * cannot optimize away. 647 */ 648 return (a[(uintptr_t)f]); 649 } 650 651 /* 652 * Atomically increment a specified error counter from probe context. 653 */ 654 static void 655 dtrace_error(uint32_t *counter) 656 { 657 /* 658 * Most counters stored to in probe context are per-CPU counters. 659 * However, there are some error conditions that are sufficiently 660 * arcane that they don't merit per-CPU storage. If these counters 661 * are incremented concurrently on different CPUs, scalability will be 662 * adversely affected -- but we don't expect them to be white-hot in a 663 * correctly constructed enabling... 664 */ 665 uint32_t oval, nval; 666 667 do { 668 oval = *counter; 669 670 if ((nval = oval + 1) == 0) { 671 /* 672 * If the counter would wrap, set it to 1 -- assuring 673 * that the counter is never zero when we have seen 674 * errors. (The counter must be 32-bits because we 675 * aren't guaranteed a 64-bit compare&swap operation.) 676 * To save this code both the infamy of being fingered 677 * by a priggish news story and the indignity of being 678 * the target of a neo-puritan witch trial, we're 679 * carefully avoiding any colorful description of the 680 * likelihood of this condition -- but suffice it to 681 * say that it is only slightly more likely than the 682 * overflow of predicate cache IDs, as discussed in 683 * dtrace_predicate_create(). 684 */ 685 nval = 1; 686 } 687 } while (dtrace_cas32(counter, oval, nval) != oval); 688 } 689 690 /* 691 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 692 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 693 */ 694 /* BEGIN CSTYLED */ 695 DTRACE_LOADFUNC(8) 696 DTRACE_LOADFUNC(16) 697 DTRACE_LOADFUNC(32) 698 DTRACE_LOADFUNC(64) 699 /* END CSTYLED */ 700 701 static int 702 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 703 { 704 if (dest < mstate->dtms_scratch_base) 705 return (0); 706 707 if (dest + size < dest) 708 return (0); 709 710 if (dest + size > mstate->dtms_scratch_ptr) 711 return (0); 712 713 return (1); 714 } 715 716 static int 717 dtrace_canstore_statvar(uint64_t addr, size_t sz, size_t *remain, 718 dtrace_statvar_t **svars, int nsvars) 719 { 720 int i; 721 size_t maxglobalsize, maxlocalsize; 722 723 if (nsvars == 0) 724 return (0); 725 726 maxglobalsize = dtrace_statvar_maxsize + sizeof (uint64_t); 727 maxlocalsize = maxglobalsize * NCPU; 728 729 for (i = 0; i < nsvars; i++) { 730 dtrace_statvar_t *svar = svars[i]; 731 uint8_t scope; 732 size_t size; 733 734 if (svar == NULL || (size = svar->dtsv_size) == 0) 735 continue; 736 737 scope = svar->dtsv_var.dtdv_scope; 738 739 /* 740 * We verify that our size is valid in the spirit of providing 741 * defense in depth: we want to prevent attackers from using 742 * DTrace to escalate an orthogonal kernel heap corruption bug 743 * into the ability to store to arbitrary locations in memory. 744 */ 745 VERIFY((scope == DIFV_SCOPE_GLOBAL && size <= maxglobalsize) || 746 (scope == DIFV_SCOPE_LOCAL && size <= maxlocalsize)); 747 748 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, 749 svar->dtsv_size)) { 750 DTRACE_RANGE_REMAIN(remain, addr, svar->dtsv_data, 751 svar->dtsv_size); 752 return (1); 753 } 754 } 755 756 return (0); 757 } 758 759 /* 760 * Check to see if the address is within a memory region to which a store may 761 * be issued. This includes the DTrace scratch areas, and any DTrace variable 762 * region. The caller of dtrace_canstore() is responsible for performing any 763 * alignment checks that are needed before stores are actually executed. 764 */ 765 static int 766 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 767 dtrace_vstate_t *vstate) 768 { 769 return (dtrace_canstore_remains(addr, sz, NULL, mstate, vstate)); 770 } 771 772 /* 773 * Implementation of dtrace_canstore which communicates the upper bound of the 774 * allowed memory region. 775 */ 776 static int 777 dtrace_canstore_remains(uint64_t addr, size_t sz, size_t *remain, 778 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 779 { 780 /* 781 * First, check to see if the address is in scratch space... 782 */ 783 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 784 mstate->dtms_scratch_size)) { 785 DTRACE_RANGE_REMAIN(remain, addr, mstate->dtms_scratch_base, 786 mstate->dtms_scratch_size); 787 return (1); 788 } 789 790 /* 791 * Now check to see if it's a dynamic variable. This check will pick 792 * up both thread-local variables and any global dynamically-allocated 793 * variables. 794 */ 795 if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base, 796 vstate->dtvs_dynvars.dtds_size)) { 797 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 798 uintptr_t base = (uintptr_t)dstate->dtds_base + 799 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 800 uintptr_t chunkoffs; 801 dtrace_dynvar_t *dvar; 802 803 /* 804 * Before we assume that we can store here, we need to make 805 * sure that it isn't in our metadata -- storing to our 806 * dynamic variable metadata would corrupt our state. For 807 * the range to not include any dynamic variable metadata, 808 * it must: 809 * 810 * (1) Start above the hash table that is at the base of 811 * the dynamic variable space 812 * 813 * (2) Have a starting chunk offset that is beyond the 814 * dtrace_dynvar_t that is at the base of every chunk 815 * 816 * (3) Not span a chunk boundary 817 * 818 * (4) Not be in the tuple space of a dynamic variable 819 * 820 */ 821 if (addr < base) 822 return (0); 823 824 chunkoffs = (addr - base) % dstate->dtds_chunksize; 825 826 if (chunkoffs < sizeof (dtrace_dynvar_t)) 827 return (0); 828 829 if (chunkoffs + sz > dstate->dtds_chunksize) 830 return (0); 831 832 dvar = (dtrace_dynvar_t *)((uintptr_t)addr - chunkoffs); 833 834 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) 835 return (0); 836 837 if (chunkoffs < sizeof (dtrace_dynvar_t) + 838 ((dvar->dtdv_tuple.dtt_nkeys - 1) * sizeof (dtrace_key_t))) 839 return (0); 840 841 DTRACE_RANGE_REMAIN(remain, addr, dvar, dstate->dtds_chunksize); 842 return (1); 843 } 844 845 /* 846 * Finally, check the static local and global variables. These checks 847 * take the longest, so we perform them last. 848 */ 849 if (dtrace_canstore_statvar(addr, sz, remain, 850 vstate->dtvs_locals, vstate->dtvs_nlocals)) 851 return (1); 852 853 if (dtrace_canstore_statvar(addr, sz, remain, 854 vstate->dtvs_globals, vstate->dtvs_nglobals)) 855 return (1); 856 857 return (0); 858 } 859 860 861 /* 862 * Convenience routine to check to see if the address is within a memory 863 * region in which a load may be issued given the user's privilege level; 864 * if not, it sets the appropriate error flags and loads 'addr' into the 865 * illegal value slot. 866 * 867 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 868 * appropriate memory access protection. 869 */ 870 static int 871 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 872 dtrace_vstate_t *vstate) 873 { 874 return (dtrace_canload_remains(addr, sz, NULL, mstate, vstate)); 875 } 876 877 /* 878 * Implementation of dtrace_canload which communicates the uppoer bound of the 879 * allowed memory region. 880 */ 881 static int 882 dtrace_canload_remains(uint64_t addr, size_t sz, size_t *remain, 883 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 884 { 885 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 886 file_t *fp; 887 888 /* 889 * If we hold the privilege to read from kernel memory, then 890 * everything is readable. 891 */ 892 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) { 893 DTRACE_RANGE_REMAIN(remain, addr, addr, sz); 894 return (1); 895 } 896 897 /* 898 * You can obviously read that which you can store. 899 */ 900 if (dtrace_canstore_remains(addr, sz, remain, mstate, vstate)) 901 return (1); 902 903 /* 904 * We're allowed to read from our own string table. 905 */ 906 if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab, 907 mstate->dtms_difo->dtdo_strlen)) { 908 DTRACE_RANGE_REMAIN(remain, addr, 909 mstate->dtms_difo->dtdo_strtab, 910 mstate->dtms_difo->dtdo_strlen); 911 return (1); 912 } 913 914 if (vstate->dtvs_state != NULL && 915 dtrace_priv_proc(vstate->dtvs_state)) { 916 proc_t *p; 917 918 /* 919 * When we have privileges to the current process, there are 920 * several context-related kernel structures that are safe to 921 * read, even absent the privilege to read from kernel memory. 922 * These reads are safe because these structures contain only 923 * state that (1) we're permitted to read, (2) is harmless or 924 * (3) contains pointers to additional kernel state that we're 925 * not permitted to read (and as such, do not present an 926 * opportunity for privilege escalation). Finally (and 927 * critically), because of the nature of their relation with 928 * the current thread context, the memory associated with these 929 * structures cannot change over the duration of probe context, 930 * and it is therefore impossible for this memory to be 931 * deallocated and reallocated as something else while it's 932 * being operated upon. 933 */ 934 if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t))) { 935 DTRACE_RANGE_REMAIN(remain, addr, curthread, 936 sizeof (kthread_t)); 937 return (1); 938 } 939 940 if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr, 941 sz, curthread->t_procp, sizeof (proc_t))) { 942 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_procp, 943 sizeof (proc_t)); 944 return (1); 945 } 946 947 if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz, 948 curthread->t_cred, sizeof (cred_t))) { 949 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cred, 950 sizeof (cred_t)); 951 return (1); 952 } 953 954 #ifdef illumos 955 if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz, 956 &(p->p_pidp->pid_id), sizeof (pid_t))) { 957 DTRACE_RANGE_REMAIN(remain, addr, &(p->p_pidp->pid_id), 958 sizeof (pid_t)); 959 return (1); 960 } 961 962 if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz, 963 curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) { 964 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cpu, 965 offsetof(cpu_t, cpu_pause_thread)); 966 return (1); 967 } 968 #endif 969 } 970 971 if ((fp = mstate->dtms_getf) != NULL) { 972 uintptr_t psz = sizeof (void *); 973 vnode_t *vp; 974 vnodeops_t *op; 975 976 /* 977 * When getf() returns a file_t, the enabling is implicitly 978 * granted the (transient) right to read the returned file_t 979 * as well as the v_path and v_op->vnop_name of the underlying 980 * vnode. These accesses are allowed after a successful 981 * getf() because the members that they refer to cannot change 982 * once set -- and the barrier logic in the kernel's closef() 983 * path assures that the file_t and its referenced vode_t 984 * cannot themselves be stale (that is, it impossible for 985 * either dtms_getf itself or its f_vnode member to reference 986 * freed memory). 987 */ 988 if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t))) { 989 DTRACE_RANGE_REMAIN(remain, addr, fp, sizeof (file_t)); 990 return (1); 991 } 992 993 if ((vp = fp->f_vnode) != NULL) { 994 size_t slen; 995 #ifdef illumos 996 if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz)) { 997 DTRACE_RANGE_REMAIN(remain, addr, &vp->v_path, 998 psz); 999 return (1); 1000 } 1001 slen = strlen(vp->v_path) + 1; 1002 if (DTRACE_INRANGE(addr, sz, vp->v_path, slen)) { 1003 DTRACE_RANGE_REMAIN(remain, addr, vp->v_path, 1004 slen); 1005 return (1); 1006 } 1007 #endif 1008 1009 if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz)) { 1010 DTRACE_RANGE_REMAIN(remain, addr, &vp->v_op, 1011 psz); 1012 return (1); 1013 } 1014 1015 #ifdef illumos 1016 if ((op = vp->v_op) != NULL && 1017 DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) { 1018 DTRACE_RANGE_REMAIN(remain, addr, 1019 &op->vnop_name, psz); 1020 return (1); 1021 } 1022 1023 if (op != NULL && op->vnop_name != NULL && 1024 DTRACE_INRANGE(addr, sz, op->vnop_name, 1025 (slen = strlen(op->vnop_name) + 1))) { 1026 DTRACE_RANGE_REMAIN(remain, addr, 1027 op->vnop_name, slen); 1028 return (1); 1029 } 1030 #endif 1031 } 1032 } 1033 1034 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 1035 *illval = addr; 1036 return (0); 1037 } 1038 1039 /* 1040 * Convenience routine to check to see if a given string is within a memory 1041 * region in which a load may be issued given the user's privilege level; 1042 * this exists so that we don't need to issue unnecessary dtrace_strlen() 1043 * calls in the event that the user has all privileges. 1044 */ 1045 static int 1046 dtrace_strcanload(uint64_t addr, size_t sz, size_t *remain, 1047 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1048 { 1049 size_t rsize; 1050 1051 /* 1052 * If we hold the privilege to read from kernel memory, then 1053 * everything is readable. 1054 */ 1055 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) { 1056 DTRACE_RANGE_REMAIN(remain, addr, addr, sz); 1057 return (1); 1058 } 1059 1060 /* 1061 * Even if the caller is uninterested in querying the remaining valid 1062 * range, it is required to ensure that the access is allowed. 1063 */ 1064 if (remain == NULL) { 1065 remain = &rsize; 1066 } 1067 if (dtrace_canload_remains(addr, 0, remain, mstate, vstate)) { 1068 size_t strsz; 1069 /* 1070 * Perform the strlen after determining the length of the 1071 * memory region which is accessible. This prevents timing 1072 * information from being used to find NULs in memory which is 1073 * not accessible to the caller. 1074 */ 1075 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, 1076 MIN(sz, *remain)); 1077 if (strsz <= *remain) { 1078 return (1); 1079 } 1080 } 1081 1082 return (0); 1083 } 1084 1085 /* 1086 * Convenience routine to check to see if a given variable is within a memory 1087 * region in which a load may be issued given the user's privilege level. 1088 */ 1089 static int 1090 dtrace_vcanload(void *src, dtrace_diftype_t *type, size_t *remain, 1091 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1092 { 1093 size_t sz; 1094 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1095 1096 /* 1097 * Calculate the max size before performing any checks since even 1098 * DTRACE_ACCESS_KERNEL-credentialed callers expect that this function 1099 * return the max length via 'remain'. 1100 */ 1101 if (type->dtdt_kind == DIF_TYPE_STRING) { 1102 dtrace_state_t *state = vstate->dtvs_state; 1103 1104 if (state != NULL) { 1105 sz = state->dts_options[DTRACEOPT_STRSIZE]; 1106 } else { 1107 /* 1108 * In helper context, we have a NULL state; fall back 1109 * to using the system-wide default for the string size 1110 * in this case. 1111 */ 1112 sz = dtrace_strsize_default; 1113 } 1114 } else { 1115 sz = type->dtdt_size; 1116 } 1117 1118 /* 1119 * If we hold the privilege to read from kernel memory, then 1120 * everything is readable. 1121 */ 1122 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) { 1123 DTRACE_RANGE_REMAIN(remain, (uintptr_t)src, src, sz); 1124 return (1); 1125 } 1126 1127 if (type->dtdt_kind == DIF_TYPE_STRING) { 1128 return (dtrace_strcanload((uintptr_t)src, sz, remain, mstate, 1129 vstate)); 1130 } 1131 return (dtrace_canload_remains((uintptr_t)src, sz, remain, mstate, 1132 vstate)); 1133 } 1134 1135 /* 1136 * Convert a string to a signed integer using safe loads. 1137 * 1138 * NOTE: This function uses various macros from strtolctype.h to manipulate 1139 * digit values, etc -- these have all been checked to ensure they make 1140 * no additional function calls. 1141 */ 1142 static int64_t 1143 dtrace_strtoll(char *input, int base, size_t limit) 1144 { 1145 uintptr_t pos = (uintptr_t)input; 1146 int64_t val = 0; 1147 int x; 1148 boolean_t neg = B_FALSE; 1149 char c, cc, ccc; 1150 uintptr_t end = pos + limit; 1151 1152 /* 1153 * Consume any whitespace preceding digits. 1154 */ 1155 while ((c = dtrace_load8(pos)) == ' ' || c == '\t') 1156 pos++; 1157 1158 /* 1159 * Handle an explicit sign if one is present. 1160 */ 1161 if (c == '-' || c == '+') { 1162 if (c == '-') 1163 neg = B_TRUE; 1164 c = dtrace_load8(++pos); 1165 } 1166 1167 /* 1168 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it 1169 * if present. 1170 */ 1171 if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' || 1172 cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) { 1173 pos += 2; 1174 c = ccc; 1175 } 1176 1177 /* 1178 * Read in contiguous digits until the first non-digit character. 1179 */ 1180 for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base; 1181 c = dtrace_load8(++pos)) 1182 val = val * base + x; 1183 1184 return (neg ? -val : val); 1185 } 1186 1187 /* 1188 * Compare two strings using safe loads. 1189 */ 1190 static int 1191 dtrace_strncmp(char *s1, char *s2, size_t limit) 1192 { 1193 uint8_t c1, c2; 1194 volatile uint16_t *flags; 1195 1196 if (s1 == s2 || limit == 0) 1197 return (0); 1198 1199 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1200 1201 do { 1202 if (s1 == NULL) { 1203 c1 = '\0'; 1204 } else { 1205 c1 = dtrace_load8((uintptr_t)s1++); 1206 } 1207 1208 if (s2 == NULL) { 1209 c2 = '\0'; 1210 } else { 1211 c2 = dtrace_load8((uintptr_t)s2++); 1212 } 1213 1214 if (c1 != c2) 1215 return (c1 - c2); 1216 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 1217 1218 return (0); 1219 } 1220 1221 /* 1222 * Compute strlen(s) for a string using safe memory accesses. The additional 1223 * len parameter is used to specify a maximum length to ensure completion. 1224 */ 1225 static size_t 1226 dtrace_strlen(const char *s, size_t lim) 1227 { 1228 uint_t len; 1229 1230 for (len = 0; len != lim; len++) { 1231 if (dtrace_load8((uintptr_t)s++) == '\0') 1232 break; 1233 } 1234 1235 return (len); 1236 } 1237 1238 /* 1239 * Check if an address falls within a toxic region. 1240 */ 1241 static int 1242 dtrace_istoxic(uintptr_t kaddr, size_t size) 1243 { 1244 uintptr_t taddr, tsize; 1245 int i; 1246 1247 for (i = 0; i < dtrace_toxranges; i++) { 1248 taddr = dtrace_toxrange[i].dtt_base; 1249 tsize = dtrace_toxrange[i].dtt_limit - taddr; 1250 1251 if (kaddr - taddr < tsize) { 1252 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1253 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 1254 return (1); 1255 } 1256 1257 if (taddr - kaddr < size) { 1258 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1259 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 1260 return (1); 1261 } 1262 } 1263 1264 return (0); 1265 } 1266 1267 /* 1268 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 1269 * memory specified by the DIF program. The dst is assumed to be safe memory 1270 * that we can store to directly because it is managed by DTrace. As with 1271 * standard bcopy, overlapping copies are handled properly. 1272 */ 1273 static void 1274 dtrace_bcopy(const void *src, void *dst, size_t len) 1275 { 1276 if (len != 0) { 1277 uint8_t *s1 = dst; 1278 const uint8_t *s2 = src; 1279 1280 if (s1 <= s2) { 1281 do { 1282 *s1++ = dtrace_load8((uintptr_t)s2++); 1283 } while (--len != 0); 1284 } else { 1285 s2 += len; 1286 s1 += len; 1287 1288 do { 1289 *--s1 = dtrace_load8((uintptr_t)--s2); 1290 } while (--len != 0); 1291 } 1292 } 1293 } 1294 1295 /* 1296 * Copy src to dst using safe memory accesses, up to either the specified 1297 * length, or the point that a nul byte is encountered. The src is assumed to 1298 * be unsafe memory specified by the DIF program. The dst is assumed to be 1299 * safe memory that we can store to directly because it is managed by DTrace. 1300 * Unlike dtrace_bcopy(), overlapping regions are not handled. 1301 */ 1302 static void 1303 dtrace_strcpy(const void *src, void *dst, size_t len) 1304 { 1305 if (len != 0) { 1306 uint8_t *s1 = dst, c; 1307 const uint8_t *s2 = src; 1308 1309 do { 1310 *s1++ = c = dtrace_load8((uintptr_t)s2++); 1311 } while (--len != 0 && c != '\0'); 1312 } 1313 } 1314 1315 /* 1316 * Copy src to dst, deriving the size and type from the specified (BYREF) 1317 * variable type. The src is assumed to be unsafe memory specified by the DIF 1318 * program. The dst is assumed to be DTrace variable memory that is of the 1319 * specified type; we assume that we can store to directly. 1320 */ 1321 static void 1322 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type, size_t limit) 1323 { 1324 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1325 1326 if (type->dtdt_kind == DIF_TYPE_STRING) { 1327 dtrace_strcpy(src, dst, MIN(type->dtdt_size, limit)); 1328 } else { 1329 dtrace_bcopy(src, dst, MIN(type->dtdt_size, limit)); 1330 } 1331 } 1332 1333 /* 1334 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1335 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1336 * safe memory that we can access directly because it is managed by DTrace. 1337 */ 1338 static int 1339 dtrace_bcmp(const void *s1, const void *s2, size_t len) 1340 { 1341 volatile uint16_t *flags; 1342 1343 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1344 1345 if (s1 == s2) 1346 return (0); 1347 1348 if (s1 == NULL || s2 == NULL) 1349 return (1); 1350 1351 if (s1 != s2 && len != 0) { 1352 const uint8_t *ps1 = s1; 1353 const uint8_t *ps2 = s2; 1354 1355 do { 1356 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1357 return (1); 1358 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1359 } 1360 return (0); 1361 } 1362 1363 /* 1364 * Zero the specified region using a simple byte-by-byte loop. Note that this 1365 * is for safe DTrace-managed memory only. 1366 */ 1367 static void 1368 dtrace_bzero(void *dst, size_t len) 1369 { 1370 uchar_t *cp; 1371 1372 for (cp = dst; len != 0; len--) 1373 *cp++ = 0; 1374 } 1375 1376 static void 1377 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1378 { 1379 uint64_t result[2]; 1380 1381 result[0] = addend1[0] + addend2[0]; 1382 result[1] = addend1[1] + addend2[1] + 1383 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1384 1385 sum[0] = result[0]; 1386 sum[1] = result[1]; 1387 } 1388 1389 /* 1390 * Shift the 128-bit value in a by b. If b is positive, shift left. 1391 * If b is negative, shift right. 1392 */ 1393 static void 1394 dtrace_shift_128(uint64_t *a, int b) 1395 { 1396 uint64_t mask; 1397 1398 if (b == 0) 1399 return; 1400 1401 if (b < 0) { 1402 b = -b; 1403 if (b >= 64) { 1404 a[0] = a[1] >> (b - 64); 1405 a[1] = 0; 1406 } else { 1407 a[0] >>= b; 1408 mask = 1LL << (64 - b); 1409 mask -= 1; 1410 a[0] |= ((a[1] & mask) << (64 - b)); 1411 a[1] >>= b; 1412 } 1413 } else { 1414 if (b >= 64) { 1415 a[1] = a[0] << (b - 64); 1416 a[0] = 0; 1417 } else { 1418 a[1] <<= b; 1419 mask = a[0] >> (64 - b); 1420 a[1] |= mask; 1421 a[0] <<= b; 1422 } 1423 } 1424 } 1425 1426 /* 1427 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1428 * use native multiplication on those, and then re-combine into the 1429 * resulting 128-bit value. 1430 * 1431 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1432 * hi1 * hi2 << 64 + 1433 * hi1 * lo2 << 32 + 1434 * hi2 * lo1 << 32 + 1435 * lo1 * lo2 1436 */ 1437 static void 1438 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1439 { 1440 uint64_t hi1, hi2, lo1, lo2; 1441 uint64_t tmp[2]; 1442 1443 hi1 = factor1 >> 32; 1444 hi2 = factor2 >> 32; 1445 1446 lo1 = factor1 & DT_MASK_LO; 1447 lo2 = factor2 & DT_MASK_LO; 1448 1449 product[0] = lo1 * lo2; 1450 product[1] = hi1 * hi2; 1451 1452 tmp[0] = hi1 * lo2; 1453 tmp[1] = 0; 1454 dtrace_shift_128(tmp, 32); 1455 dtrace_add_128(product, tmp, product); 1456 1457 tmp[0] = hi2 * lo1; 1458 tmp[1] = 0; 1459 dtrace_shift_128(tmp, 32); 1460 dtrace_add_128(product, tmp, product); 1461 } 1462 1463 /* 1464 * This privilege check should be used by actions and subroutines to 1465 * verify that the user credentials of the process that enabled the 1466 * invoking ECB match the target credentials 1467 */ 1468 static int 1469 dtrace_priv_proc_common_user(dtrace_state_t *state) 1470 { 1471 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1472 1473 /* 1474 * We should always have a non-NULL state cred here, since if cred 1475 * is null (anonymous tracing), we fast-path bypass this routine. 1476 */ 1477 ASSERT(s_cr != NULL); 1478 1479 if ((cr = CRED()) != NULL && 1480 s_cr->cr_uid == cr->cr_uid && 1481 s_cr->cr_uid == cr->cr_ruid && 1482 s_cr->cr_uid == cr->cr_suid && 1483 s_cr->cr_gid == cr->cr_gid && 1484 s_cr->cr_gid == cr->cr_rgid && 1485 s_cr->cr_gid == cr->cr_sgid) 1486 return (1); 1487 1488 return (0); 1489 } 1490 1491 /* 1492 * This privilege check should be used by actions and subroutines to 1493 * verify that the zone of the process that enabled the invoking ECB 1494 * matches the target credentials 1495 */ 1496 static int 1497 dtrace_priv_proc_common_zone(dtrace_state_t *state) 1498 { 1499 #ifdef illumos 1500 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1501 1502 /* 1503 * We should always have a non-NULL state cred here, since if cred 1504 * is null (anonymous tracing), we fast-path bypass this routine. 1505 */ 1506 ASSERT(s_cr != NULL); 1507 1508 if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone) 1509 return (1); 1510 1511 return (0); 1512 #else 1513 return (1); 1514 #endif 1515 } 1516 1517 /* 1518 * This privilege check should be used by actions and subroutines to 1519 * verify that the process has not setuid or changed credentials. 1520 */ 1521 static int 1522 dtrace_priv_proc_common_nocd(void) 1523 { 1524 proc_t *proc; 1525 1526 if ((proc = ttoproc(curthread)) != NULL && 1527 !(proc->p_flag & SNOCD)) 1528 return (1); 1529 1530 return (0); 1531 } 1532 1533 static int 1534 dtrace_priv_proc_destructive(dtrace_state_t *state) 1535 { 1536 int action = state->dts_cred.dcr_action; 1537 1538 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1539 dtrace_priv_proc_common_zone(state) == 0) 1540 goto bad; 1541 1542 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1543 dtrace_priv_proc_common_user(state) == 0) 1544 goto bad; 1545 1546 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1547 dtrace_priv_proc_common_nocd() == 0) 1548 goto bad; 1549 1550 return (1); 1551 1552 bad: 1553 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1554 1555 return (0); 1556 } 1557 1558 static int 1559 dtrace_priv_proc_control(dtrace_state_t *state) 1560 { 1561 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1562 return (1); 1563 1564 if (dtrace_priv_proc_common_zone(state) && 1565 dtrace_priv_proc_common_user(state) && 1566 dtrace_priv_proc_common_nocd()) 1567 return (1); 1568 1569 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1570 1571 return (0); 1572 } 1573 1574 static int 1575 dtrace_priv_proc(dtrace_state_t *state) 1576 { 1577 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1578 return (1); 1579 1580 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1581 1582 return (0); 1583 } 1584 1585 static int 1586 dtrace_priv_kernel(dtrace_state_t *state) 1587 { 1588 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1589 return (1); 1590 1591 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1592 1593 return (0); 1594 } 1595 1596 static int 1597 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1598 { 1599 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1600 return (1); 1601 1602 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1603 1604 return (0); 1605 } 1606 1607 /* 1608 * Determine if the dte_cond of the specified ECB allows for processing of 1609 * the current probe to continue. Note that this routine may allow continued 1610 * processing, but with access(es) stripped from the mstate's dtms_access 1611 * field. 1612 */ 1613 static int 1614 dtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate, 1615 dtrace_ecb_t *ecb) 1616 { 1617 dtrace_probe_t *probe = ecb->dte_probe; 1618 dtrace_provider_t *prov = probe->dtpr_provider; 1619 dtrace_pops_t *pops = &prov->dtpv_pops; 1620 int mode = DTRACE_MODE_NOPRIV_DROP; 1621 1622 ASSERT(ecb->dte_cond); 1623 1624 #ifdef illumos 1625 if (pops->dtps_mode != NULL) { 1626 mode = pops->dtps_mode(prov->dtpv_arg, 1627 probe->dtpr_id, probe->dtpr_arg); 1628 1629 ASSERT((mode & DTRACE_MODE_USER) || 1630 (mode & DTRACE_MODE_KERNEL)); 1631 ASSERT((mode & DTRACE_MODE_NOPRIV_RESTRICT) || 1632 (mode & DTRACE_MODE_NOPRIV_DROP)); 1633 } 1634 1635 /* 1636 * If the dte_cond bits indicate that this consumer is only allowed to 1637 * see user-mode firings of this probe, call the provider's dtps_mode() 1638 * entry point to check that the probe was fired while in a user 1639 * context. If that's not the case, use the policy specified by the 1640 * provider to determine if we drop the probe or merely restrict 1641 * operation. 1642 */ 1643 if (ecb->dte_cond & DTRACE_COND_USERMODE) { 1644 ASSERT(mode != DTRACE_MODE_NOPRIV_DROP); 1645 1646 if (!(mode & DTRACE_MODE_USER)) { 1647 if (mode & DTRACE_MODE_NOPRIV_DROP) 1648 return (0); 1649 1650 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS; 1651 } 1652 } 1653 #endif 1654 1655 /* 1656 * This is more subtle than it looks. We have to be absolutely certain 1657 * that CRED() isn't going to change out from under us so it's only 1658 * legit to examine that structure if we're in constrained situations. 1659 * Currently, the only times we'll this check is if a non-super-user 1660 * has enabled the profile or syscall providers -- providers that 1661 * allow visibility of all processes. For the profile case, the check 1662 * above will ensure that we're examining a user context. 1663 */ 1664 if (ecb->dte_cond & DTRACE_COND_OWNER) { 1665 cred_t *cr; 1666 cred_t *s_cr = state->dts_cred.dcr_cred; 1667 proc_t *proc; 1668 1669 ASSERT(s_cr != NULL); 1670 1671 if ((cr = CRED()) == NULL || 1672 s_cr->cr_uid != cr->cr_uid || 1673 s_cr->cr_uid != cr->cr_ruid || 1674 s_cr->cr_uid != cr->cr_suid || 1675 s_cr->cr_gid != cr->cr_gid || 1676 s_cr->cr_gid != cr->cr_rgid || 1677 s_cr->cr_gid != cr->cr_sgid || 1678 (proc = ttoproc(curthread)) == NULL || 1679 (proc->p_flag & SNOCD)) { 1680 if (mode & DTRACE_MODE_NOPRIV_DROP) 1681 return (0); 1682 1683 #ifdef illumos 1684 mstate->dtms_access &= ~DTRACE_ACCESS_PROC; 1685 #endif 1686 } 1687 } 1688 1689 #ifdef illumos 1690 /* 1691 * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not 1692 * in our zone, check to see if our mode policy is to restrict rather 1693 * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC 1694 * and DTRACE_ACCESS_ARGS 1695 */ 1696 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 1697 cred_t *cr; 1698 cred_t *s_cr = state->dts_cred.dcr_cred; 1699 1700 ASSERT(s_cr != NULL); 1701 1702 if ((cr = CRED()) == NULL || 1703 s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) { 1704 if (mode & DTRACE_MODE_NOPRIV_DROP) 1705 return (0); 1706 1707 mstate->dtms_access &= 1708 ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS); 1709 } 1710 } 1711 #endif 1712 1713 return (1); 1714 } 1715 1716 /* 1717 * Note: not called from probe context. This function is called 1718 * asynchronously (and at a regular interval) from outside of probe context to 1719 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1720 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1721 */ 1722 void 1723 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1724 { 1725 dtrace_dynvar_t *dirty; 1726 dtrace_dstate_percpu_t *dcpu; 1727 dtrace_dynvar_t **rinsep; 1728 int i, j, work = 0; 1729 1730 for (i = 0; i < NCPU; i++) { 1731 dcpu = &dstate->dtds_percpu[i]; 1732 rinsep = &dcpu->dtdsc_rinsing; 1733 1734 /* 1735 * If the dirty list is NULL, there is no dirty work to do. 1736 */ 1737 if (dcpu->dtdsc_dirty == NULL) 1738 continue; 1739 1740 if (dcpu->dtdsc_rinsing != NULL) { 1741 /* 1742 * If the rinsing list is non-NULL, then it is because 1743 * this CPU was selected to accept another CPU's 1744 * dirty list -- and since that time, dirty buffers 1745 * have accumulated. This is a highly unlikely 1746 * condition, but we choose to ignore the dirty 1747 * buffers -- they'll be picked up a future cleanse. 1748 */ 1749 continue; 1750 } 1751 1752 if (dcpu->dtdsc_clean != NULL) { 1753 /* 1754 * If the clean list is non-NULL, then we're in a 1755 * situation where a CPU has done deallocations (we 1756 * have a non-NULL dirty list) but no allocations (we 1757 * also have a non-NULL clean list). We can't simply 1758 * move the dirty list into the clean list on this 1759 * CPU, yet we also don't want to allow this condition 1760 * to persist, lest a short clean list prevent a 1761 * massive dirty list from being cleaned (which in 1762 * turn could lead to otherwise avoidable dynamic 1763 * drops). To deal with this, we look for some CPU 1764 * with a NULL clean list, NULL dirty list, and NULL 1765 * rinsing list -- and then we borrow this CPU to 1766 * rinse our dirty list. 1767 */ 1768 for (j = 0; j < NCPU; j++) { 1769 dtrace_dstate_percpu_t *rinser; 1770 1771 rinser = &dstate->dtds_percpu[j]; 1772 1773 if (rinser->dtdsc_rinsing != NULL) 1774 continue; 1775 1776 if (rinser->dtdsc_dirty != NULL) 1777 continue; 1778 1779 if (rinser->dtdsc_clean != NULL) 1780 continue; 1781 1782 rinsep = &rinser->dtdsc_rinsing; 1783 break; 1784 } 1785 1786 if (j == NCPU) { 1787 /* 1788 * We were unable to find another CPU that 1789 * could accept this dirty list -- we are 1790 * therefore unable to clean it now. 1791 */ 1792 dtrace_dynvar_failclean++; 1793 continue; 1794 } 1795 } 1796 1797 work = 1; 1798 1799 /* 1800 * Atomically move the dirty list aside. 1801 */ 1802 do { 1803 dirty = dcpu->dtdsc_dirty; 1804 1805 /* 1806 * Before we zap the dirty list, set the rinsing list. 1807 * (This allows for a potential assertion in 1808 * dtrace_dynvar(): if a free dynamic variable appears 1809 * on a hash chain, either the dirty list or the 1810 * rinsing list for some CPU must be non-NULL.) 1811 */ 1812 *rinsep = dirty; 1813 dtrace_membar_producer(); 1814 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1815 dirty, NULL) != dirty); 1816 } 1817 1818 if (!work) { 1819 /* 1820 * We have no work to do; we can simply return. 1821 */ 1822 return; 1823 } 1824 1825 dtrace_sync(); 1826 1827 for (i = 0; i < NCPU; i++) { 1828 dcpu = &dstate->dtds_percpu[i]; 1829 1830 if (dcpu->dtdsc_rinsing == NULL) 1831 continue; 1832 1833 /* 1834 * We are now guaranteed that no hash chain contains a pointer 1835 * into this dirty list; we can make it clean. 1836 */ 1837 ASSERT(dcpu->dtdsc_clean == NULL); 1838 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1839 dcpu->dtdsc_rinsing = NULL; 1840 } 1841 1842 /* 1843 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1844 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1845 * This prevents a race whereby a CPU incorrectly decides that 1846 * the state should be something other than DTRACE_DSTATE_CLEAN 1847 * after dtrace_dynvar_clean() has completed. 1848 */ 1849 dtrace_sync(); 1850 1851 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1852 } 1853 1854 /* 1855 * Depending on the value of the op parameter, this function looks-up, 1856 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1857 * allocation is requested, this function will return a pointer to a 1858 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1859 * variable can be allocated. If NULL is returned, the appropriate counter 1860 * will be incremented. 1861 */ 1862 dtrace_dynvar_t * 1863 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1864 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1865 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1866 { 1867 uint64_t hashval = DTRACE_DYNHASH_VALID; 1868 dtrace_dynhash_t *hash = dstate->dtds_hash; 1869 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1870 processorid_t me = curcpu, cpu = me; 1871 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1872 size_t bucket, ksize; 1873 size_t chunksize = dstate->dtds_chunksize; 1874 uintptr_t kdata, lock, nstate; 1875 uint_t i; 1876 1877 ASSERT(nkeys != 0); 1878 1879 /* 1880 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1881 * algorithm. For the by-value portions, we perform the algorithm in 1882 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1883 * bit, and seems to have only a minute effect on distribution. For 1884 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1885 * over each referenced byte. It's painful to do this, but it's much 1886 * better than pathological hash distribution. The efficacy of the 1887 * hashing algorithm (and a comparison with other algorithms) may be 1888 * found by running the ::dtrace_dynstat MDB dcmd. 1889 */ 1890 for (i = 0; i < nkeys; i++) { 1891 if (key[i].dttk_size == 0) { 1892 uint64_t val = key[i].dttk_value; 1893 1894 hashval += (val >> 48) & 0xffff; 1895 hashval += (hashval << 10); 1896 hashval ^= (hashval >> 6); 1897 1898 hashval += (val >> 32) & 0xffff; 1899 hashval += (hashval << 10); 1900 hashval ^= (hashval >> 6); 1901 1902 hashval += (val >> 16) & 0xffff; 1903 hashval += (hashval << 10); 1904 hashval ^= (hashval >> 6); 1905 1906 hashval += val & 0xffff; 1907 hashval += (hashval << 10); 1908 hashval ^= (hashval >> 6); 1909 } else { 1910 /* 1911 * This is incredibly painful, but it beats the hell 1912 * out of the alternative. 1913 */ 1914 uint64_t j, size = key[i].dttk_size; 1915 uintptr_t base = (uintptr_t)key[i].dttk_value; 1916 1917 if (!dtrace_canload(base, size, mstate, vstate)) 1918 break; 1919 1920 for (j = 0; j < size; j++) { 1921 hashval += dtrace_load8(base + j); 1922 hashval += (hashval << 10); 1923 hashval ^= (hashval >> 6); 1924 } 1925 } 1926 } 1927 1928 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1929 return (NULL); 1930 1931 hashval += (hashval << 3); 1932 hashval ^= (hashval >> 11); 1933 hashval += (hashval << 15); 1934 1935 /* 1936 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1937 * comes out to be one of our two sentinel hash values. If this 1938 * actually happens, we set the hashval to be a value known to be a 1939 * non-sentinel value. 1940 */ 1941 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1942 hashval = DTRACE_DYNHASH_VALID; 1943 1944 /* 1945 * Yes, it's painful to do a divide here. If the cycle count becomes 1946 * important here, tricks can be pulled to reduce it. (However, it's 1947 * critical that hash collisions be kept to an absolute minimum; 1948 * they're much more painful than a divide.) It's better to have a 1949 * solution that generates few collisions and still keeps things 1950 * relatively simple. 1951 */ 1952 bucket = hashval % dstate->dtds_hashsize; 1953 1954 if (op == DTRACE_DYNVAR_DEALLOC) { 1955 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1956 1957 for (;;) { 1958 while ((lock = *lockp) & 1) 1959 continue; 1960 1961 if (dtrace_casptr((volatile void *)lockp, 1962 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1963 break; 1964 } 1965 1966 dtrace_membar_producer(); 1967 } 1968 1969 top: 1970 prev = NULL; 1971 lock = hash[bucket].dtdh_lock; 1972 1973 dtrace_membar_consumer(); 1974 1975 start = hash[bucket].dtdh_chain; 1976 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1977 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1978 op != DTRACE_DYNVAR_DEALLOC)); 1979 1980 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1981 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1982 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1983 1984 if (dvar->dtdv_hashval != hashval) { 1985 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1986 /* 1987 * We've reached the sink, and therefore the 1988 * end of the hash chain; we can kick out of 1989 * the loop knowing that we have seen a valid 1990 * snapshot of state. 1991 */ 1992 ASSERT(dvar->dtdv_next == NULL); 1993 ASSERT(dvar == &dtrace_dynhash_sink); 1994 break; 1995 } 1996 1997 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1998 /* 1999 * We've gone off the rails: somewhere along 2000 * the line, one of the members of this hash 2001 * chain was deleted. Note that we could also 2002 * detect this by simply letting this loop run 2003 * to completion, as we would eventually hit 2004 * the end of the dirty list. However, we 2005 * want to avoid running the length of the 2006 * dirty list unnecessarily (it might be quite 2007 * long), so we catch this as early as 2008 * possible by detecting the hash marker. In 2009 * this case, we simply set dvar to NULL and 2010 * break; the conditional after the loop will 2011 * send us back to top. 2012 */ 2013 dvar = NULL; 2014 break; 2015 } 2016 2017 goto next; 2018 } 2019 2020 if (dtuple->dtt_nkeys != nkeys) 2021 goto next; 2022 2023 for (i = 0; i < nkeys; i++, dkey++) { 2024 if (dkey->dttk_size != key[i].dttk_size) 2025 goto next; /* size or type mismatch */ 2026 2027 if (dkey->dttk_size != 0) { 2028 if (dtrace_bcmp( 2029 (void *)(uintptr_t)key[i].dttk_value, 2030 (void *)(uintptr_t)dkey->dttk_value, 2031 dkey->dttk_size)) 2032 goto next; 2033 } else { 2034 if (dkey->dttk_value != key[i].dttk_value) 2035 goto next; 2036 } 2037 } 2038 2039 if (op != DTRACE_DYNVAR_DEALLOC) 2040 return (dvar); 2041 2042 ASSERT(dvar->dtdv_next == NULL || 2043 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 2044 2045 if (prev != NULL) { 2046 ASSERT(hash[bucket].dtdh_chain != dvar); 2047 ASSERT(start != dvar); 2048 ASSERT(prev->dtdv_next == dvar); 2049 prev->dtdv_next = dvar->dtdv_next; 2050 } else { 2051 if (dtrace_casptr(&hash[bucket].dtdh_chain, 2052 start, dvar->dtdv_next) != start) { 2053 /* 2054 * We have failed to atomically swing the 2055 * hash table head pointer, presumably because 2056 * of a conflicting allocation on another CPU. 2057 * We need to reread the hash chain and try 2058 * again. 2059 */ 2060 goto top; 2061 } 2062 } 2063 2064 dtrace_membar_producer(); 2065 2066 /* 2067 * Now set the hash value to indicate that it's free. 2068 */ 2069 ASSERT(hash[bucket].dtdh_chain != dvar); 2070 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 2071 2072 dtrace_membar_producer(); 2073 2074 /* 2075 * Set the next pointer to point at the dirty list, and 2076 * atomically swing the dirty pointer to the newly freed dvar. 2077 */ 2078 do { 2079 next = dcpu->dtdsc_dirty; 2080 dvar->dtdv_next = next; 2081 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 2082 2083 /* 2084 * Finally, unlock this hash bucket. 2085 */ 2086 ASSERT(hash[bucket].dtdh_lock == lock); 2087 ASSERT(lock & 1); 2088 hash[bucket].dtdh_lock++; 2089 2090 return (NULL); 2091 next: 2092 prev = dvar; 2093 continue; 2094 } 2095 2096 if (dvar == NULL) { 2097 /* 2098 * If dvar is NULL, it is because we went off the rails: 2099 * one of the elements that we traversed in the hash chain 2100 * was deleted while we were traversing it. In this case, 2101 * we assert that we aren't doing a dealloc (deallocs lock 2102 * the hash bucket to prevent themselves from racing with 2103 * one another), and retry the hash chain traversal. 2104 */ 2105 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 2106 goto top; 2107 } 2108 2109 if (op != DTRACE_DYNVAR_ALLOC) { 2110 /* 2111 * If we are not to allocate a new variable, we want to 2112 * return NULL now. Before we return, check that the value 2113 * of the lock word hasn't changed. If it has, we may have 2114 * seen an inconsistent snapshot. 2115 */ 2116 if (op == DTRACE_DYNVAR_NOALLOC) { 2117 if (hash[bucket].dtdh_lock != lock) 2118 goto top; 2119 } else { 2120 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 2121 ASSERT(hash[bucket].dtdh_lock == lock); 2122 ASSERT(lock & 1); 2123 hash[bucket].dtdh_lock++; 2124 } 2125 2126 return (NULL); 2127 } 2128 2129 /* 2130 * We need to allocate a new dynamic variable. The size we need is the 2131 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 2132 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 2133 * the size of any referred-to data (dsize). We then round the final 2134 * size up to the chunksize for allocation. 2135 */ 2136 for (ksize = 0, i = 0; i < nkeys; i++) 2137 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 2138 2139 /* 2140 * This should be pretty much impossible, but could happen if, say, 2141 * strange DIF specified the tuple. Ideally, this should be an 2142 * assertion and not an error condition -- but that requires that the 2143 * chunksize calculation in dtrace_difo_chunksize() be absolutely 2144 * bullet-proof. (That is, it must not be able to be fooled by 2145 * malicious DIF.) Given the lack of backwards branches in DIF, 2146 * solving this would presumably not amount to solving the Halting 2147 * Problem -- but it still seems awfully hard. 2148 */ 2149 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 2150 ksize + dsize > chunksize) { 2151 dcpu->dtdsc_drops++; 2152 return (NULL); 2153 } 2154 2155 nstate = DTRACE_DSTATE_EMPTY; 2156 2157 do { 2158 retry: 2159 free = dcpu->dtdsc_free; 2160 2161 if (free == NULL) { 2162 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 2163 void *rval; 2164 2165 if (clean == NULL) { 2166 /* 2167 * We're out of dynamic variable space on 2168 * this CPU. Unless we have tried all CPUs, 2169 * we'll try to allocate from a different 2170 * CPU. 2171 */ 2172 switch (dstate->dtds_state) { 2173 case DTRACE_DSTATE_CLEAN: { 2174 void *sp = &dstate->dtds_state; 2175 2176 if (++cpu >= NCPU) 2177 cpu = 0; 2178 2179 if (dcpu->dtdsc_dirty != NULL && 2180 nstate == DTRACE_DSTATE_EMPTY) 2181 nstate = DTRACE_DSTATE_DIRTY; 2182 2183 if (dcpu->dtdsc_rinsing != NULL) 2184 nstate = DTRACE_DSTATE_RINSING; 2185 2186 dcpu = &dstate->dtds_percpu[cpu]; 2187 2188 if (cpu != me) 2189 goto retry; 2190 2191 (void) dtrace_cas32(sp, 2192 DTRACE_DSTATE_CLEAN, nstate); 2193 2194 /* 2195 * To increment the correct bean 2196 * counter, take another lap. 2197 */ 2198 goto retry; 2199 } 2200 2201 case DTRACE_DSTATE_DIRTY: 2202 dcpu->dtdsc_dirty_drops++; 2203 break; 2204 2205 case DTRACE_DSTATE_RINSING: 2206 dcpu->dtdsc_rinsing_drops++; 2207 break; 2208 2209 case DTRACE_DSTATE_EMPTY: 2210 dcpu->dtdsc_drops++; 2211 break; 2212 } 2213 2214 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 2215 return (NULL); 2216 } 2217 2218 /* 2219 * The clean list appears to be non-empty. We want to 2220 * move the clean list to the free list; we start by 2221 * moving the clean pointer aside. 2222 */ 2223 if (dtrace_casptr(&dcpu->dtdsc_clean, 2224 clean, NULL) != clean) { 2225 /* 2226 * We are in one of two situations: 2227 * 2228 * (a) The clean list was switched to the 2229 * free list by another CPU. 2230 * 2231 * (b) The clean list was added to by the 2232 * cleansing cyclic. 2233 * 2234 * In either of these situations, we can 2235 * just reattempt the free list allocation. 2236 */ 2237 goto retry; 2238 } 2239 2240 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 2241 2242 /* 2243 * Now we'll move the clean list to our free list. 2244 * It's impossible for this to fail: the only way 2245 * the free list can be updated is through this 2246 * code path, and only one CPU can own the clean list. 2247 * Thus, it would only be possible for this to fail if 2248 * this code were racing with dtrace_dynvar_clean(). 2249 * (That is, if dtrace_dynvar_clean() updated the clean 2250 * list, and we ended up racing to update the free 2251 * list.) This race is prevented by the dtrace_sync() 2252 * in dtrace_dynvar_clean() -- which flushes the 2253 * owners of the clean lists out before resetting 2254 * the clean lists. 2255 */ 2256 dcpu = &dstate->dtds_percpu[me]; 2257 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 2258 ASSERT(rval == NULL); 2259 goto retry; 2260 } 2261 2262 dvar = free; 2263 new_free = dvar->dtdv_next; 2264 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 2265 2266 /* 2267 * We have now allocated a new chunk. We copy the tuple keys into the 2268 * tuple array and copy any referenced key data into the data space 2269 * following the tuple array. As we do this, we relocate dttk_value 2270 * in the final tuple to point to the key data address in the chunk. 2271 */ 2272 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 2273 dvar->dtdv_data = (void *)(kdata + ksize); 2274 dvar->dtdv_tuple.dtt_nkeys = nkeys; 2275 2276 for (i = 0; i < nkeys; i++) { 2277 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 2278 size_t kesize = key[i].dttk_size; 2279 2280 if (kesize != 0) { 2281 dtrace_bcopy( 2282 (const void *)(uintptr_t)key[i].dttk_value, 2283 (void *)kdata, kesize); 2284 dkey->dttk_value = kdata; 2285 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 2286 } else { 2287 dkey->dttk_value = key[i].dttk_value; 2288 } 2289 2290 dkey->dttk_size = kesize; 2291 } 2292 2293 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 2294 dvar->dtdv_hashval = hashval; 2295 dvar->dtdv_next = start; 2296 2297 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 2298 return (dvar); 2299 2300 /* 2301 * The cas has failed. Either another CPU is adding an element to 2302 * this hash chain, or another CPU is deleting an element from this 2303 * hash chain. The simplest way to deal with both of these cases 2304 * (though not necessarily the most efficient) is to free our 2305 * allocated block and re-attempt it all. Note that the free is 2306 * to the dirty list and _not_ to the free list. This is to prevent 2307 * races with allocators, above. 2308 */ 2309 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 2310 2311 dtrace_membar_producer(); 2312 2313 do { 2314 free = dcpu->dtdsc_dirty; 2315 dvar->dtdv_next = free; 2316 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 2317 2318 goto top; 2319 } 2320 2321 /*ARGSUSED*/ 2322 static void 2323 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 2324 { 2325 if ((int64_t)nval < (int64_t)*oval) 2326 *oval = nval; 2327 } 2328 2329 /*ARGSUSED*/ 2330 static void 2331 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 2332 { 2333 if ((int64_t)nval > (int64_t)*oval) 2334 *oval = nval; 2335 } 2336 2337 static void 2338 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 2339 { 2340 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 2341 int64_t val = (int64_t)nval; 2342 2343 if (val < 0) { 2344 for (i = 0; i < zero; i++) { 2345 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 2346 quanta[i] += incr; 2347 return; 2348 } 2349 } 2350 } else { 2351 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 2352 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 2353 quanta[i - 1] += incr; 2354 return; 2355 } 2356 } 2357 2358 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 2359 return; 2360 } 2361 2362 ASSERT(0); 2363 } 2364 2365 static void 2366 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 2367 { 2368 uint64_t arg = *lquanta++; 2369 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 2370 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 2371 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 2372 int32_t val = (int32_t)nval, level; 2373 2374 ASSERT(step != 0); 2375 ASSERT(levels != 0); 2376 2377 if (val < base) { 2378 /* 2379 * This is an underflow. 2380 */ 2381 lquanta[0] += incr; 2382 return; 2383 } 2384 2385 level = (val - base) / step; 2386 2387 if (level < levels) { 2388 lquanta[level + 1] += incr; 2389 return; 2390 } 2391 2392 /* 2393 * This is an overflow. 2394 */ 2395 lquanta[levels + 1] += incr; 2396 } 2397 2398 static int 2399 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 2400 uint16_t high, uint16_t nsteps, int64_t value) 2401 { 2402 int64_t this = 1, last, next; 2403 int base = 1, order; 2404 2405 ASSERT(factor <= nsteps); 2406 ASSERT(nsteps % factor == 0); 2407 2408 for (order = 0; order < low; order++) 2409 this *= factor; 2410 2411 /* 2412 * If our value is less than our factor taken to the power of the 2413 * low order of magnitude, it goes into the zeroth bucket. 2414 */ 2415 if (value < (last = this)) 2416 return (0); 2417 2418 for (this *= factor; order <= high; order++) { 2419 int nbuckets = this > nsteps ? nsteps : this; 2420 2421 if ((next = this * factor) < this) { 2422 /* 2423 * We should not generally get log/linear quantizations 2424 * with a high magnitude that allows 64-bits to 2425 * overflow, but we nonetheless protect against this 2426 * by explicitly checking for overflow, and clamping 2427 * our value accordingly. 2428 */ 2429 value = this - 1; 2430 } 2431 2432 if (value < this) { 2433 /* 2434 * If our value lies within this order of magnitude, 2435 * determine its position by taking the offset within 2436 * the order of magnitude, dividing by the bucket 2437 * width, and adding to our (accumulated) base. 2438 */ 2439 return (base + (value - last) / (this / nbuckets)); 2440 } 2441 2442 base += nbuckets - (nbuckets / factor); 2443 last = this; 2444 this = next; 2445 } 2446 2447 /* 2448 * Our value is greater than or equal to our factor taken to the 2449 * power of one plus the high magnitude -- return the top bucket. 2450 */ 2451 return (base); 2452 } 2453 2454 static void 2455 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 2456 { 2457 uint64_t arg = *llquanta++; 2458 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 2459 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 2460 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 2461 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 2462 2463 llquanta[dtrace_aggregate_llquantize_bucket(factor, 2464 low, high, nsteps, nval)] += incr; 2465 } 2466 2467 /*ARGSUSED*/ 2468 static void 2469 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 2470 { 2471 data[0]++; 2472 data[1] += nval; 2473 } 2474 2475 /*ARGSUSED*/ 2476 static void 2477 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 2478 { 2479 int64_t snval = (int64_t)nval; 2480 uint64_t tmp[2]; 2481 2482 data[0]++; 2483 data[1] += nval; 2484 2485 /* 2486 * What we want to say here is: 2487 * 2488 * data[2] += nval * nval; 2489 * 2490 * But given that nval is 64-bit, we could easily overflow, so 2491 * we do this as 128-bit arithmetic. 2492 */ 2493 if (snval < 0) 2494 snval = -snval; 2495 2496 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2497 dtrace_add_128(data + 2, tmp, data + 2); 2498 } 2499 2500 /*ARGSUSED*/ 2501 static void 2502 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2503 { 2504 *oval = *oval + 1; 2505 } 2506 2507 /*ARGSUSED*/ 2508 static void 2509 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2510 { 2511 *oval += nval; 2512 } 2513 2514 /* 2515 * Aggregate given the tuple in the principal data buffer, and the aggregating 2516 * action denoted by the specified dtrace_aggregation_t. The aggregation 2517 * buffer is specified as the buf parameter. This routine does not return 2518 * failure; if there is no space in the aggregation buffer, the data will be 2519 * dropped, and a corresponding counter incremented. 2520 */ 2521 static void 2522 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2523 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2524 { 2525 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2526 uint32_t i, ndx, size, fsize; 2527 uint32_t align = sizeof (uint64_t) - 1; 2528 dtrace_aggbuffer_t *agb; 2529 dtrace_aggkey_t *key; 2530 uint32_t hashval = 0, limit, isstr; 2531 caddr_t tomax, data, kdata; 2532 dtrace_actkind_t action; 2533 dtrace_action_t *act; 2534 uintptr_t offs; 2535 2536 if (buf == NULL) 2537 return; 2538 2539 if (!agg->dtag_hasarg) { 2540 /* 2541 * Currently, only quantize() and lquantize() take additional 2542 * arguments, and they have the same semantics: an increment 2543 * value that defaults to 1 when not present. If additional 2544 * aggregating actions take arguments, the setting of the 2545 * default argument value will presumably have to become more 2546 * sophisticated... 2547 */ 2548 arg = 1; 2549 } 2550 2551 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2552 size = rec->dtrd_offset - agg->dtag_base; 2553 fsize = size + rec->dtrd_size; 2554 2555 ASSERT(dbuf->dtb_tomax != NULL); 2556 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2557 2558 if ((tomax = buf->dtb_tomax) == NULL) { 2559 dtrace_buffer_drop(buf); 2560 return; 2561 } 2562 2563 /* 2564 * The metastructure is always at the bottom of the buffer. 2565 */ 2566 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2567 sizeof (dtrace_aggbuffer_t)); 2568 2569 if (buf->dtb_offset == 0) { 2570 /* 2571 * We just kludge up approximately 1/8th of the size to be 2572 * buckets. If this guess ends up being routinely 2573 * off-the-mark, we may need to dynamically readjust this 2574 * based on past performance. 2575 */ 2576 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2577 2578 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2579 (uintptr_t)tomax || hashsize == 0) { 2580 /* 2581 * We've been given a ludicrously small buffer; 2582 * increment our drop count and leave. 2583 */ 2584 dtrace_buffer_drop(buf); 2585 return; 2586 } 2587 2588 /* 2589 * And now, a pathetic attempt to try to get a an odd (or 2590 * perchance, a prime) hash size for better hash distribution. 2591 */ 2592 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2593 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2594 2595 agb->dtagb_hashsize = hashsize; 2596 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2597 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2598 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2599 2600 for (i = 0; i < agb->dtagb_hashsize; i++) 2601 agb->dtagb_hash[i] = NULL; 2602 } 2603 2604 ASSERT(agg->dtag_first != NULL); 2605 ASSERT(agg->dtag_first->dta_intuple); 2606 2607 /* 2608 * Calculate the hash value based on the key. Note that we _don't_ 2609 * include the aggid in the hashing (but we will store it as part of 2610 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2611 * algorithm: a simple, quick algorithm that has no known funnels, and 2612 * gets good distribution in practice. The efficacy of the hashing 2613 * algorithm (and a comparison with other algorithms) may be found by 2614 * running the ::dtrace_aggstat MDB dcmd. 2615 */ 2616 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2617 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2618 limit = i + act->dta_rec.dtrd_size; 2619 ASSERT(limit <= size); 2620 isstr = DTRACEACT_ISSTRING(act); 2621 2622 for (; i < limit; i++) { 2623 hashval += data[i]; 2624 hashval += (hashval << 10); 2625 hashval ^= (hashval >> 6); 2626 2627 if (isstr && data[i] == '\0') 2628 break; 2629 } 2630 } 2631 2632 hashval += (hashval << 3); 2633 hashval ^= (hashval >> 11); 2634 hashval += (hashval << 15); 2635 2636 /* 2637 * Yes, the divide here is expensive -- but it's generally the least 2638 * of the performance issues given the amount of data that we iterate 2639 * over to compute hash values, compare data, etc. 2640 */ 2641 ndx = hashval % agb->dtagb_hashsize; 2642 2643 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2644 ASSERT((caddr_t)key >= tomax); 2645 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2646 2647 if (hashval != key->dtak_hashval || key->dtak_size != size) 2648 continue; 2649 2650 kdata = key->dtak_data; 2651 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2652 2653 for (act = agg->dtag_first; act->dta_intuple; 2654 act = act->dta_next) { 2655 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2656 limit = i + act->dta_rec.dtrd_size; 2657 ASSERT(limit <= size); 2658 isstr = DTRACEACT_ISSTRING(act); 2659 2660 for (; i < limit; i++) { 2661 if (kdata[i] != data[i]) 2662 goto next; 2663 2664 if (isstr && data[i] == '\0') 2665 break; 2666 } 2667 } 2668 2669 if (action != key->dtak_action) { 2670 /* 2671 * We are aggregating on the same value in the same 2672 * aggregation with two different aggregating actions. 2673 * (This should have been picked up in the compiler, 2674 * so we may be dealing with errant or devious DIF.) 2675 * This is an error condition; we indicate as much, 2676 * and return. 2677 */ 2678 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2679 return; 2680 } 2681 2682 /* 2683 * This is a hit: we need to apply the aggregator to 2684 * the value at this key. 2685 */ 2686 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2687 return; 2688 next: 2689 continue; 2690 } 2691 2692 /* 2693 * We didn't find it. We need to allocate some zero-filled space, 2694 * link it into the hash table appropriately, and apply the aggregator 2695 * to the (zero-filled) value. 2696 */ 2697 offs = buf->dtb_offset; 2698 while (offs & (align - 1)) 2699 offs += sizeof (uint32_t); 2700 2701 /* 2702 * If we don't have enough room to both allocate a new key _and_ 2703 * its associated data, increment the drop count and return. 2704 */ 2705 if ((uintptr_t)tomax + offs + fsize > 2706 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2707 dtrace_buffer_drop(buf); 2708 return; 2709 } 2710 2711 /*CONSTCOND*/ 2712 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2713 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2714 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2715 2716 key->dtak_data = kdata = tomax + offs; 2717 buf->dtb_offset = offs + fsize; 2718 2719 /* 2720 * Now copy the data across. 2721 */ 2722 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2723 2724 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2725 kdata[i] = data[i]; 2726 2727 /* 2728 * Because strings are not zeroed out by default, we need to iterate 2729 * looking for actions that store strings, and we need to explicitly 2730 * pad these strings out with zeroes. 2731 */ 2732 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2733 int nul; 2734 2735 if (!DTRACEACT_ISSTRING(act)) 2736 continue; 2737 2738 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2739 limit = i + act->dta_rec.dtrd_size; 2740 ASSERT(limit <= size); 2741 2742 for (nul = 0; i < limit; i++) { 2743 if (nul) { 2744 kdata[i] = '\0'; 2745 continue; 2746 } 2747 2748 if (data[i] != '\0') 2749 continue; 2750 2751 nul = 1; 2752 } 2753 } 2754 2755 for (i = size; i < fsize; i++) 2756 kdata[i] = 0; 2757 2758 key->dtak_hashval = hashval; 2759 key->dtak_size = size; 2760 key->dtak_action = action; 2761 key->dtak_next = agb->dtagb_hash[ndx]; 2762 agb->dtagb_hash[ndx] = key; 2763 2764 /* 2765 * Finally, apply the aggregator. 2766 */ 2767 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2768 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2769 } 2770 2771 /* 2772 * Given consumer state, this routine finds a speculation in the INACTIVE 2773 * state and transitions it into the ACTIVE state. If there is no speculation 2774 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2775 * incremented -- it is up to the caller to take appropriate action. 2776 */ 2777 static int 2778 dtrace_speculation(dtrace_state_t *state) 2779 { 2780 int i = 0; 2781 dtrace_speculation_state_t current; 2782 uint32_t *stat = &state->dts_speculations_unavail, count; 2783 2784 while (i < state->dts_nspeculations) { 2785 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2786 2787 current = spec->dtsp_state; 2788 2789 if (current != DTRACESPEC_INACTIVE) { 2790 if (current == DTRACESPEC_COMMITTINGMANY || 2791 current == DTRACESPEC_COMMITTING || 2792 current == DTRACESPEC_DISCARDING) 2793 stat = &state->dts_speculations_busy; 2794 i++; 2795 continue; 2796 } 2797 2798 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2799 current, DTRACESPEC_ACTIVE) == current) 2800 return (i + 1); 2801 } 2802 2803 /* 2804 * We couldn't find a speculation. If we found as much as a single 2805 * busy speculation buffer, we'll attribute this failure as "busy" 2806 * instead of "unavail". 2807 */ 2808 do { 2809 count = *stat; 2810 } while (dtrace_cas32(stat, count, count + 1) != count); 2811 2812 return (0); 2813 } 2814 2815 /* 2816 * This routine commits an active speculation. If the specified speculation 2817 * is not in a valid state to perform a commit(), this routine will silently do 2818 * nothing. The state of the specified speculation is transitioned according 2819 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2820 */ 2821 static void 2822 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2823 dtrace_specid_t which) 2824 { 2825 dtrace_speculation_t *spec; 2826 dtrace_buffer_t *src, *dest; 2827 uintptr_t daddr, saddr, dlimit, slimit; 2828 dtrace_speculation_state_t current, new = 0; 2829 intptr_t offs; 2830 uint64_t timestamp; 2831 2832 if (which == 0) 2833 return; 2834 2835 if (which > state->dts_nspeculations) { 2836 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2837 return; 2838 } 2839 2840 spec = &state->dts_speculations[which - 1]; 2841 src = &spec->dtsp_buffer[cpu]; 2842 dest = &state->dts_buffer[cpu]; 2843 2844 do { 2845 current = spec->dtsp_state; 2846 2847 if (current == DTRACESPEC_COMMITTINGMANY) 2848 break; 2849 2850 switch (current) { 2851 case DTRACESPEC_INACTIVE: 2852 case DTRACESPEC_DISCARDING: 2853 return; 2854 2855 case DTRACESPEC_COMMITTING: 2856 /* 2857 * This is only possible if we are (a) commit()'ing 2858 * without having done a prior speculate() on this CPU 2859 * and (b) racing with another commit() on a different 2860 * CPU. There's nothing to do -- we just assert that 2861 * our offset is 0. 2862 */ 2863 ASSERT(src->dtb_offset == 0); 2864 return; 2865 2866 case DTRACESPEC_ACTIVE: 2867 new = DTRACESPEC_COMMITTING; 2868 break; 2869 2870 case DTRACESPEC_ACTIVEONE: 2871 /* 2872 * This speculation is active on one CPU. If our 2873 * buffer offset is non-zero, we know that the one CPU 2874 * must be us. Otherwise, we are committing on a 2875 * different CPU from the speculate(), and we must 2876 * rely on being asynchronously cleaned. 2877 */ 2878 if (src->dtb_offset != 0) { 2879 new = DTRACESPEC_COMMITTING; 2880 break; 2881 } 2882 /*FALLTHROUGH*/ 2883 2884 case DTRACESPEC_ACTIVEMANY: 2885 new = DTRACESPEC_COMMITTINGMANY; 2886 break; 2887 2888 default: 2889 ASSERT(0); 2890 } 2891 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2892 current, new) != current); 2893 2894 /* 2895 * We have set the state to indicate that we are committing this 2896 * speculation. Now reserve the necessary space in the destination 2897 * buffer. 2898 */ 2899 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2900 sizeof (uint64_t), state, NULL)) < 0) { 2901 dtrace_buffer_drop(dest); 2902 goto out; 2903 } 2904 2905 /* 2906 * We have sufficient space to copy the speculative buffer into the 2907 * primary buffer. First, modify the speculative buffer, filling 2908 * in the timestamp of all entries with the current time. The data 2909 * must have the commit() time rather than the time it was traced, 2910 * so that all entries in the primary buffer are in timestamp order. 2911 */ 2912 timestamp = dtrace_gethrtime(); 2913 saddr = (uintptr_t)src->dtb_tomax; 2914 slimit = saddr + src->dtb_offset; 2915 while (saddr < slimit) { 2916 size_t size; 2917 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr; 2918 2919 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) { 2920 saddr += sizeof (dtrace_epid_t); 2921 continue; 2922 } 2923 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs); 2924 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size; 2925 2926 ASSERT3U(saddr + size, <=, slimit); 2927 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t)); 2928 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX); 2929 2930 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp); 2931 2932 saddr += size; 2933 } 2934 2935 /* 2936 * Copy the buffer across. (Note that this is a 2937 * highly subobtimal bcopy(); in the unlikely event that this becomes 2938 * a serious performance issue, a high-performance DTrace-specific 2939 * bcopy() should obviously be invented.) 2940 */ 2941 daddr = (uintptr_t)dest->dtb_tomax + offs; 2942 dlimit = daddr + src->dtb_offset; 2943 saddr = (uintptr_t)src->dtb_tomax; 2944 2945 /* 2946 * First, the aligned portion. 2947 */ 2948 while (dlimit - daddr >= sizeof (uint64_t)) { 2949 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2950 2951 daddr += sizeof (uint64_t); 2952 saddr += sizeof (uint64_t); 2953 } 2954 2955 /* 2956 * Now any left-over bit... 2957 */ 2958 while (dlimit - daddr) 2959 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2960 2961 /* 2962 * Finally, commit the reserved space in the destination buffer. 2963 */ 2964 dest->dtb_offset = offs + src->dtb_offset; 2965 2966 out: 2967 /* 2968 * If we're lucky enough to be the only active CPU on this speculation 2969 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2970 */ 2971 if (current == DTRACESPEC_ACTIVE || 2972 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2973 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2974 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2975 2976 ASSERT(rval == DTRACESPEC_COMMITTING); 2977 } 2978 2979 src->dtb_offset = 0; 2980 src->dtb_xamot_drops += src->dtb_drops; 2981 src->dtb_drops = 0; 2982 } 2983 2984 /* 2985 * This routine discards an active speculation. If the specified speculation 2986 * is not in a valid state to perform a discard(), this routine will silently 2987 * do nothing. The state of the specified speculation is transitioned 2988 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2989 */ 2990 static void 2991 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2992 dtrace_specid_t which) 2993 { 2994 dtrace_speculation_t *spec; 2995 dtrace_speculation_state_t current, new = 0; 2996 dtrace_buffer_t *buf; 2997 2998 if (which == 0) 2999 return; 3000 3001 if (which > state->dts_nspeculations) { 3002 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 3003 return; 3004 } 3005 3006 spec = &state->dts_speculations[which - 1]; 3007 buf = &spec->dtsp_buffer[cpu]; 3008 3009 do { 3010 current = spec->dtsp_state; 3011 3012 switch (current) { 3013 case DTRACESPEC_INACTIVE: 3014 case DTRACESPEC_COMMITTINGMANY: 3015 case DTRACESPEC_COMMITTING: 3016 case DTRACESPEC_DISCARDING: 3017 return; 3018 3019 case DTRACESPEC_ACTIVE: 3020 case DTRACESPEC_ACTIVEMANY: 3021 new = DTRACESPEC_DISCARDING; 3022 break; 3023 3024 case DTRACESPEC_ACTIVEONE: 3025 if (buf->dtb_offset != 0) { 3026 new = DTRACESPEC_INACTIVE; 3027 } else { 3028 new = DTRACESPEC_DISCARDING; 3029 } 3030 break; 3031 3032 default: 3033 ASSERT(0); 3034 } 3035 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 3036 current, new) != current); 3037 3038 buf->dtb_offset = 0; 3039 buf->dtb_drops = 0; 3040 } 3041 3042 /* 3043 * Note: not called from probe context. This function is called 3044 * asynchronously from cross call context to clean any speculations that are 3045 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 3046 * transitioned back to the INACTIVE state until all CPUs have cleaned the 3047 * speculation. 3048 */ 3049 static void 3050 dtrace_speculation_clean_here(dtrace_state_t *state) 3051 { 3052 dtrace_icookie_t cookie; 3053 processorid_t cpu = curcpu; 3054 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 3055 dtrace_specid_t i; 3056 3057 cookie = dtrace_interrupt_disable(); 3058 3059 if (dest->dtb_tomax == NULL) { 3060 dtrace_interrupt_enable(cookie); 3061 return; 3062 } 3063 3064 for (i = 0; i < state->dts_nspeculations; i++) { 3065 dtrace_speculation_t *spec = &state->dts_speculations[i]; 3066 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 3067 3068 if (src->dtb_tomax == NULL) 3069 continue; 3070 3071 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 3072 src->dtb_offset = 0; 3073 continue; 3074 } 3075 3076 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 3077 continue; 3078 3079 if (src->dtb_offset == 0) 3080 continue; 3081 3082 dtrace_speculation_commit(state, cpu, i + 1); 3083 } 3084 3085 dtrace_interrupt_enable(cookie); 3086 } 3087 3088 /* 3089 * Note: not called from probe context. This function is called 3090 * asynchronously (and at a regular interval) to clean any speculations that 3091 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 3092 * is work to be done, it cross calls all CPUs to perform that work; 3093 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 3094 * INACTIVE state until they have been cleaned by all CPUs. 3095 */ 3096 static void 3097 dtrace_speculation_clean(dtrace_state_t *state) 3098 { 3099 int work = 0, rv; 3100 dtrace_specid_t i; 3101 3102 for (i = 0; i < state->dts_nspeculations; i++) { 3103 dtrace_speculation_t *spec = &state->dts_speculations[i]; 3104 3105 ASSERT(!spec->dtsp_cleaning); 3106 3107 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 3108 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 3109 continue; 3110 3111 work++; 3112 spec->dtsp_cleaning = 1; 3113 } 3114 3115 if (!work) 3116 return; 3117 3118 dtrace_xcall(DTRACE_CPUALL, 3119 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 3120 3121 /* 3122 * We now know that all CPUs have committed or discarded their 3123 * speculation buffers, as appropriate. We can now set the state 3124 * to inactive. 3125 */ 3126 for (i = 0; i < state->dts_nspeculations; i++) { 3127 dtrace_speculation_t *spec = &state->dts_speculations[i]; 3128 dtrace_speculation_state_t current, new; 3129 3130 if (!spec->dtsp_cleaning) 3131 continue; 3132 3133 current = spec->dtsp_state; 3134 ASSERT(current == DTRACESPEC_DISCARDING || 3135 current == DTRACESPEC_COMMITTINGMANY); 3136 3137 new = DTRACESPEC_INACTIVE; 3138 3139 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 3140 ASSERT(rv == current); 3141 spec->dtsp_cleaning = 0; 3142 } 3143 } 3144 3145 /* 3146 * Called as part of a speculate() to get the speculative buffer associated 3147 * with a given speculation. Returns NULL if the specified speculation is not 3148 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 3149 * the active CPU is not the specified CPU -- the speculation will be 3150 * atomically transitioned into the ACTIVEMANY state. 3151 */ 3152 static dtrace_buffer_t * 3153 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 3154 dtrace_specid_t which) 3155 { 3156 dtrace_speculation_t *spec; 3157 dtrace_speculation_state_t current, new = 0; 3158 dtrace_buffer_t *buf; 3159 3160 if (which == 0) 3161 return (NULL); 3162 3163 if (which > state->dts_nspeculations) { 3164 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 3165 return (NULL); 3166 } 3167 3168 spec = &state->dts_speculations[which - 1]; 3169 buf = &spec->dtsp_buffer[cpuid]; 3170 3171 do { 3172 current = spec->dtsp_state; 3173 3174 switch (current) { 3175 case DTRACESPEC_INACTIVE: 3176 case DTRACESPEC_COMMITTINGMANY: 3177 case DTRACESPEC_DISCARDING: 3178 return (NULL); 3179 3180 case DTRACESPEC_COMMITTING: 3181 ASSERT(buf->dtb_offset == 0); 3182 return (NULL); 3183 3184 case DTRACESPEC_ACTIVEONE: 3185 /* 3186 * This speculation is currently active on one CPU. 3187 * Check the offset in the buffer; if it's non-zero, 3188 * that CPU must be us (and we leave the state alone). 3189 * If it's zero, assume that we're starting on a new 3190 * CPU -- and change the state to indicate that the 3191 * speculation is active on more than one CPU. 3192 */ 3193 if (buf->dtb_offset != 0) 3194 return (buf); 3195 3196 new = DTRACESPEC_ACTIVEMANY; 3197 break; 3198 3199 case DTRACESPEC_ACTIVEMANY: 3200 return (buf); 3201 3202 case DTRACESPEC_ACTIVE: 3203 new = DTRACESPEC_ACTIVEONE; 3204 break; 3205 3206 default: 3207 ASSERT(0); 3208 } 3209 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 3210 current, new) != current); 3211 3212 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 3213 return (buf); 3214 } 3215 3216 /* 3217 * Return a string. In the event that the user lacks the privilege to access 3218 * arbitrary kernel memory, we copy the string out to scratch memory so that we 3219 * don't fail access checking. 3220 * 3221 * dtrace_dif_variable() uses this routine as a helper for various 3222 * builtin values such as 'execname' and 'probefunc.' 3223 */ 3224 uintptr_t 3225 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 3226 dtrace_mstate_t *mstate) 3227 { 3228 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3229 uintptr_t ret; 3230 size_t strsz; 3231 3232 /* 3233 * The easy case: this probe is allowed to read all of memory, so 3234 * we can just return this as a vanilla pointer. 3235 */ 3236 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 3237 return (addr); 3238 3239 /* 3240 * This is the tougher case: we copy the string in question from 3241 * kernel memory into scratch memory and return it that way: this 3242 * ensures that we won't trip up when access checking tests the 3243 * BYREF return value. 3244 */ 3245 strsz = dtrace_strlen((char *)addr, size) + 1; 3246 3247 if (mstate->dtms_scratch_ptr + strsz > 3248 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3249 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3250 return (0); 3251 } 3252 3253 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 3254 strsz); 3255 ret = mstate->dtms_scratch_ptr; 3256 mstate->dtms_scratch_ptr += strsz; 3257 return (ret); 3258 } 3259 3260 /* 3261 * Return a string from a memoy address which is known to have one or 3262 * more concatenated, individually zero terminated, sub-strings. 3263 * In the event that the user lacks the privilege to access 3264 * arbitrary kernel memory, we copy the string out to scratch memory so that we 3265 * don't fail access checking. 3266 * 3267 * dtrace_dif_variable() uses this routine as a helper for various 3268 * builtin values such as 'execargs'. 3269 */ 3270 static uintptr_t 3271 dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 3272 dtrace_mstate_t *mstate) 3273 { 3274 char *p; 3275 size_t i; 3276 uintptr_t ret; 3277 3278 if (mstate->dtms_scratch_ptr + strsz > 3279 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3280 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3281 return (0); 3282 } 3283 3284 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 3285 strsz); 3286 3287 /* Replace sub-string termination characters with a space. */ 3288 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 3289 p++, i++) 3290 if (*p == '\0') 3291 *p = ' '; 3292 3293 ret = mstate->dtms_scratch_ptr; 3294 mstate->dtms_scratch_ptr += strsz; 3295 return (ret); 3296 } 3297 3298 /* 3299 * This function implements the DIF emulator's variable lookups. The emulator 3300 * passes a reserved variable identifier and optional built-in array index. 3301 */ 3302 static uint64_t 3303 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 3304 uint64_t ndx) 3305 { 3306 /* 3307 * If we're accessing one of the uncached arguments, we'll turn this 3308 * into a reference in the args array. 3309 */ 3310 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 3311 ndx = v - DIF_VAR_ARG0; 3312 v = DIF_VAR_ARGS; 3313 } 3314 3315 switch (v) { 3316 case DIF_VAR_ARGS: 3317 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 3318 if (ndx >= sizeof (mstate->dtms_arg) / 3319 sizeof (mstate->dtms_arg[0])) { 3320 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3321 dtrace_provider_t *pv; 3322 uint64_t val; 3323 3324 pv = mstate->dtms_probe->dtpr_provider; 3325 if (pv->dtpv_pops.dtps_getargval != NULL) 3326 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 3327 mstate->dtms_probe->dtpr_id, 3328 mstate->dtms_probe->dtpr_arg, ndx, aframes); 3329 else 3330 val = dtrace_getarg(ndx, aframes); 3331 3332 /* 3333 * This is regrettably required to keep the compiler 3334 * from tail-optimizing the call to dtrace_getarg(). 3335 * The condition always evaluates to true, but the 3336 * compiler has no way of figuring that out a priori. 3337 * (None of this would be necessary if the compiler 3338 * could be relied upon to _always_ tail-optimize 3339 * the call to dtrace_getarg() -- but it can't.) 3340 */ 3341 if (mstate->dtms_probe != NULL) 3342 return (val); 3343 3344 ASSERT(0); 3345 } 3346 3347 return (mstate->dtms_arg[ndx]); 3348 3349 #ifdef illumos 3350 case DIF_VAR_UREGS: { 3351 klwp_t *lwp; 3352 3353 if (!dtrace_priv_proc(state)) 3354 return (0); 3355 3356 if ((lwp = curthread->t_lwp) == NULL) { 3357 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 3358 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 3359 return (0); 3360 } 3361 3362 return (dtrace_getreg(lwp->lwp_regs, ndx)); 3363 return (0); 3364 } 3365 #else 3366 case DIF_VAR_UREGS: { 3367 struct trapframe *tframe; 3368 3369 if (!dtrace_priv_proc(state)) 3370 return (0); 3371 3372 if ((tframe = curthread->td_frame) == NULL) { 3373 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 3374 cpu_core[curcpu].cpuc_dtrace_illval = 0; 3375 return (0); 3376 } 3377 3378 return (dtrace_getreg(tframe, ndx)); 3379 } 3380 #endif 3381 3382 case DIF_VAR_CURTHREAD: 3383 if (!dtrace_priv_proc(state)) 3384 return (0); 3385 return ((uint64_t)(uintptr_t)curthread); 3386 3387 case DIF_VAR_TIMESTAMP: 3388 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 3389 mstate->dtms_timestamp = dtrace_gethrtime(); 3390 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 3391 } 3392 return (mstate->dtms_timestamp); 3393 3394 case DIF_VAR_VTIMESTAMP: 3395 ASSERT(dtrace_vtime_references != 0); 3396 return (curthread->t_dtrace_vtime); 3397 3398 case DIF_VAR_WALLTIMESTAMP: 3399 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 3400 mstate->dtms_walltimestamp = dtrace_gethrestime(); 3401 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 3402 } 3403 return (mstate->dtms_walltimestamp); 3404 3405 #ifdef illumos 3406 case DIF_VAR_IPL: 3407 if (!dtrace_priv_kernel(state)) 3408 return (0); 3409 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 3410 mstate->dtms_ipl = dtrace_getipl(); 3411 mstate->dtms_present |= DTRACE_MSTATE_IPL; 3412 } 3413 return (mstate->dtms_ipl); 3414 #endif 3415 3416 case DIF_VAR_EPID: 3417 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 3418 return (mstate->dtms_epid); 3419 3420 case DIF_VAR_ID: 3421 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3422 return (mstate->dtms_probe->dtpr_id); 3423 3424 case DIF_VAR_STACKDEPTH: 3425 if (!dtrace_priv_kernel(state)) 3426 return (0); 3427 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 3428 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3429 3430 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 3431 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 3432 } 3433 return (mstate->dtms_stackdepth); 3434 3435 case DIF_VAR_USTACKDEPTH: 3436 if (!dtrace_priv_proc(state)) 3437 return (0); 3438 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 3439 /* 3440 * See comment in DIF_VAR_PID. 3441 */ 3442 if (DTRACE_ANCHORED(mstate->dtms_probe) && 3443 CPU_ON_INTR(CPU)) { 3444 mstate->dtms_ustackdepth = 0; 3445 } else { 3446 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3447 mstate->dtms_ustackdepth = 3448 dtrace_getustackdepth(); 3449 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3450 } 3451 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 3452 } 3453 return (mstate->dtms_ustackdepth); 3454 3455 case DIF_VAR_CALLER: 3456 if (!dtrace_priv_kernel(state)) 3457 return (0); 3458 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 3459 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3460 3461 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 3462 /* 3463 * If this is an unanchored probe, we are 3464 * required to go through the slow path: 3465 * dtrace_caller() only guarantees correct 3466 * results for anchored probes. 3467 */ 3468 pc_t caller[2] = {0, 0}; 3469 3470 dtrace_getpcstack(caller, 2, aframes, 3471 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 3472 mstate->dtms_caller = caller[1]; 3473 } else if ((mstate->dtms_caller = 3474 dtrace_caller(aframes)) == -1) { 3475 /* 3476 * We have failed to do this the quick way; 3477 * we must resort to the slower approach of 3478 * calling dtrace_getpcstack(). 3479 */ 3480 pc_t caller = 0; 3481 3482 dtrace_getpcstack(&caller, 1, aframes, NULL); 3483 mstate->dtms_caller = caller; 3484 } 3485 3486 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 3487 } 3488 return (mstate->dtms_caller); 3489 3490 case DIF_VAR_UCALLER: 3491 if (!dtrace_priv_proc(state)) 3492 return (0); 3493 3494 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 3495 uint64_t ustack[3]; 3496 3497 /* 3498 * dtrace_getupcstack() fills in the first uint64_t 3499 * with the current PID. The second uint64_t will 3500 * be the program counter at user-level. The third 3501 * uint64_t will contain the caller, which is what 3502 * we're after. 3503 */ 3504 ustack[2] = 0; 3505 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3506 dtrace_getupcstack(ustack, 3); 3507 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3508 mstate->dtms_ucaller = ustack[2]; 3509 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 3510 } 3511 3512 return (mstate->dtms_ucaller); 3513 3514 case DIF_VAR_PROBEPROV: 3515 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3516 return (dtrace_dif_varstr( 3517 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 3518 state, mstate)); 3519 3520 case DIF_VAR_PROBEMOD: 3521 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3522 return (dtrace_dif_varstr( 3523 (uintptr_t)mstate->dtms_probe->dtpr_mod, 3524 state, mstate)); 3525 3526 case DIF_VAR_PROBEFUNC: 3527 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3528 return (dtrace_dif_varstr( 3529 (uintptr_t)mstate->dtms_probe->dtpr_func, 3530 state, mstate)); 3531 3532 case DIF_VAR_PROBENAME: 3533 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3534 return (dtrace_dif_varstr( 3535 (uintptr_t)mstate->dtms_probe->dtpr_name, 3536 state, mstate)); 3537 3538 case DIF_VAR_PID: 3539 if (!dtrace_priv_proc(state)) 3540 return (0); 3541 3542 #ifdef illumos 3543 /* 3544 * Note that we are assuming that an unanchored probe is 3545 * always due to a high-level interrupt. (And we're assuming 3546 * that there is only a single high level interrupt.) 3547 */ 3548 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3549 return (pid0.pid_id); 3550 3551 /* 3552 * It is always safe to dereference one's own t_procp pointer: 3553 * it always points to a valid, allocated proc structure. 3554 * Further, it is always safe to dereference the p_pidp member 3555 * of one's own proc structure. (These are truisms becuase 3556 * threads and processes don't clean up their own state -- 3557 * they leave that task to whomever reaps them.) 3558 */ 3559 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3560 #else 3561 return ((uint64_t)curproc->p_pid); 3562 #endif 3563 3564 case DIF_VAR_PPID: 3565 if (!dtrace_priv_proc(state)) 3566 return (0); 3567 3568 #ifdef illumos 3569 /* 3570 * See comment in DIF_VAR_PID. 3571 */ 3572 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3573 return (pid0.pid_id); 3574 3575 /* 3576 * It is always safe to dereference one's own t_procp pointer: 3577 * it always points to a valid, allocated proc structure. 3578 * (This is true because threads don't clean up their own 3579 * state -- they leave that task to whomever reaps them.) 3580 */ 3581 return ((uint64_t)curthread->t_procp->p_ppid); 3582 #else 3583 if (curproc->p_pid == proc0.p_pid) 3584 return (curproc->p_pid); 3585 else 3586 return (curproc->p_pptr->p_pid); 3587 #endif 3588 3589 case DIF_VAR_TID: 3590 #ifdef illumos 3591 /* 3592 * See comment in DIF_VAR_PID. 3593 */ 3594 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3595 return (0); 3596 #endif 3597 3598 return ((uint64_t)curthread->t_tid); 3599 3600 case DIF_VAR_EXECARGS: { 3601 struct pargs *p_args = curthread->td_proc->p_args; 3602 3603 if (p_args == NULL) 3604 return(0); 3605 3606 return (dtrace_dif_varstrz( 3607 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3608 } 3609 3610 case DIF_VAR_EXECNAME: 3611 #ifdef illumos 3612 if (!dtrace_priv_proc(state)) 3613 return (0); 3614 3615 /* 3616 * See comment in DIF_VAR_PID. 3617 */ 3618 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3619 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3620 3621 /* 3622 * It is always safe to dereference one's own t_procp pointer: 3623 * it always points to a valid, allocated proc structure. 3624 * (This is true because threads don't clean up their own 3625 * state -- they leave that task to whomever reaps them.) 3626 */ 3627 return (dtrace_dif_varstr( 3628 (uintptr_t)curthread->t_procp->p_user.u_comm, 3629 state, mstate)); 3630 #else 3631 return (dtrace_dif_varstr( 3632 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3633 #endif 3634 3635 case DIF_VAR_ZONENAME: 3636 #ifdef illumos 3637 if (!dtrace_priv_proc(state)) 3638 return (0); 3639 3640 /* 3641 * See comment in DIF_VAR_PID. 3642 */ 3643 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3644 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3645 3646 /* 3647 * It is always safe to dereference one's own t_procp pointer: 3648 * it always points to a valid, allocated proc structure. 3649 * (This is true because threads don't clean up their own 3650 * state -- they leave that task to whomever reaps them.) 3651 */ 3652 return (dtrace_dif_varstr( 3653 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3654 state, mstate)); 3655 #else 3656 return (0); 3657 #endif 3658 3659 case DIF_VAR_UID: 3660 if (!dtrace_priv_proc(state)) 3661 return (0); 3662 3663 #ifdef illumos 3664 /* 3665 * See comment in DIF_VAR_PID. 3666 */ 3667 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3668 return ((uint64_t)p0.p_cred->cr_uid); 3669 3670 /* 3671 * It is always safe to dereference one's own t_procp pointer: 3672 * it always points to a valid, allocated proc structure. 3673 * (This is true because threads don't clean up their own 3674 * state -- they leave that task to whomever reaps them.) 3675 * 3676 * Additionally, it is safe to dereference one's own process 3677 * credential, since this is never NULL after process birth. 3678 */ 3679 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3680 #else 3681 return ((uint64_t)curthread->td_ucred->cr_uid); 3682 #endif 3683 3684 case DIF_VAR_GID: 3685 if (!dtrace_priv_proc(state)) 3686 return (0); 3687 3688 #ifdef illumos 3689 /* 3690 * See comment in DIF_VAR_PID. 3691 */ 3692 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3693 return ((uint64_t)p0.p_cred->cr_gid); 3694 3695 /* 3696 * It is always safe to dereference one's own t_procp pointer: 3697 * it always points to a valid, allocated proc structure. 3698 * (This is true because threads don't clean up their own 3699 * state -- they leave that task to whomever reaps them.) 3700 * 3701 * Additionally, it is safe to dereference one's own process 3702 * credential, since this is never NULL after process birth. 3703 */ 3704 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3705 #else 3706 return ((uint64_t)curthread->td_ucred->cr_gid); 3707 #endif 3708 3709 case DIF_VAR_ERRNO: { 3710 #ifdef illumos 3711 klwp_t *lwp; 3712 if (!dtrace_priv_proc(state)) 3713 return (0); 3714 3715 /* 3716 * See comment in DIF_VAR_PID. 3717 */ 3718 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3719 return (0); 3720 3721 /* 3722 * It is always safe to dereference one's own t_lwp pointer in 3723 * the event that this pointer is non-NULL. (This is true 3724 * because threads and lwps don't clean up their own state -- 3725 * they leave that task to whomever reaps them.) 3726 */ 3727 if ((lwp = curthread->t_lwp) == NULL) 3728 return (0); 3729 3730 return ((uint64_t)lwp->lwp_errno); 3731 #else 3732 return (curthread->td_errno); 3733 #endif 3734 } 3735 #ifndef illumos 3736 case DIF_VAR_CPU: { 3737 return curcpu; 3738 } 3739 #endif 3740 default: 3741 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3742 return (0); 3743 } 3744 } 3745 3746 3747 typedef enum dtrace_json_state { 3748 DTRACE_JSON_REST = 1, 3749 DTRACE_JSON_OBJECT, 3750 DTRACE_JSON_STRING, 3751 DTRACE_JSON_STRING_ESCAPE, 3752 DTRACE_JSON_STRING_ESCAPE_UNICODE, 3753 DTRACE_JSON_COLON, 3754 DTRACE_JSON_COMMA, 3755 DTRACE_JSON_VALUE, 3756 DTRACE_JSON_IDENTIFIER, 3757 DTRACE_JSON_NUMBER, 3758 DTRACE_JSON_NUMBER_FRAC, 3759 DTRACE_JSON_NUMBER_EXP, 3760 DTRACE_JSON_COLLECT_OBJECT 3761 } dtrace_json_state_t; 3762 3763 /* 3764 * This function possesses just enough knowledge about JSON to extract a single 3765 * value from a JSON string and store it in the scratch buffer. It is able 3766 * to extract nested object values, and members of arrays by index. 3767 * 3768 * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to 3769 * be looked up as we descend into the object tree. e.g. 3770 * 3771 * foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL 3772 * with nelems = 5. 3773 * 3774 * The run time of this function must be bounded above by strsize to limit the 3775 * amount of work done in probe context. As such, it is implemented as a 3776 * simple state machine, reading one character at a time using safe loads 3777 * until we find the requested element, hit a parsing error or run off the 3778 * end of the object or string. 3779 * 3780 * As there is no way for a subroutine to return an error without interrupting 3781 * clause execution, we simply return NULL in the event of a missing key or any 3782 * other error condition. Each NULL return in this function is commented with 3783 * the error condition it represents -- parsing or otherwise. 3784 * 3785 * The set of states for the state machine closely matches the JSON 3786 * specification (http://json.org/). Briefly: 3787 * 3788 * DTRACE_JSON_REST: 3789 * Skip whitespace until we find either a top-level Object, moving 3790 * to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE. 3791 * 3792 * DTRACE_JSON_OBJECT: 3793 * Locate the next key String in an Object. Sets a flag to denote 3794 * the next String as a key string and moves to DTRACE_JSON_STRING. 3795 * 3796 * DTRACE_JSON_COLON: 3797 * Skip whitespace until we find the colon that separates key Strings 3798 * from their values. Once found, move to DTRACE_JSON_VALUE. 3799 * 3800 * DTRACE_JSON_VALUE: 3801 * Detects the type of the next value (String, Number, Identifier, Object 3802 * or Array) and routes to the states that process that type. Here we also 3803 * deal with the element selector list if we are requested to traverse down 3804 * into the object tree. 3805 * 3806 * DTRACE_JSON_COMMA: 3807 * Skip whitespace until we find the comma that separates key-value pairs 3808 * in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays 3809 * (similarly DTRACE_JSON_VALUE). All following literal value processing 3810 * states return to this state at the end of their value, unless otherwise 3811 * noted. 3812 * 3813 * DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP: 3814 * Processes a Number literal from the JSON, including any exponent 3815 * component that may be present. Numbers are returned as strings, which 3816 * may be passed to strtoll() if an integer is required. 3817 * 3818 * DTRACE_JSON_IDENTIFIER: 3819 * Processes a "true", "false" or "null" literal in the JSON. 3820 * 3821 * DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE, 3822 * DTRACE_JSON_STRING_ESCAPE_UNICODE: 3823 * Processes a String literal from the JSON, whether the String denotes 3824 * a key, a value or part of a larger Object. Handles all escape sequences 3825 * present in the specification, including four-digit unicode characters, 3826 * but merely includes the escape sequence without converting it to the 3827 * actual escaped character. If the String is flagged as a key, we 3828 * move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA. 3829 * 3830 * DTRACE_JSON_COLLECT_OBJECT: 3831 * This state collects an entire Object (or Array), correctly handling 3832 * embedded strings. If the full element selector list matches this nested 3833 * object, we return the Object in full as a string. If not, we use this 3834 * state to skip to the next value at this level and continue processing. 3835 * 3836 * NOTE: This function uses various macros from strtolctype.h to manipulate 3837 * digit values, etc -- these have all been checked to ensure they make 3838 * no additional function calls. 3839 */ 3840 static char * 3841 dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems, 3842 char *dest) 3843 { 3844 dtrace_json_state_t state = DTRACE_JSON_REST; 3845 int64_t array_elem = INT64_MIN; 3846 int64_t array_pos = 0; 3847 uint8_t escape_unicount = 0; 3848 boolean_t string_is_key = B_FALSE; 3849 boolean_t collect_object = B_FALSE; 3850 boolean_t found_key = B_FALSE; 3851 boolean_t in_array = B_FALSE; 3852 uint32_t braces = 0, brackets = 0; 3853 char *elem = elemlist; 3854 char *dd = dest; 3855 uintptr_t cur; 3856 3857 for (cur = json; cur < json + size; cur++) { 3858 char cc = dtrace_load8(cur); 3859 if (cc == '\0') 3860 return (NULL); 3861 3862 switch (state) { 3863 case DTRACE_JSON_REST: 3864 if (isspace(cc)) 3865 break; 3866 3867 if (cc == '{') { 3868 state = DTRACE_JSON_OBJECT; 3869 break; 3870 } 3871 3872 if (cc == '[') { 3873 in_array = B_TRUE; 3874 array_pos = 0; 3875 array_elem = dtrace_strtoll(elem, 10, size); 3876 found_key = array_elem == 0 ? B_TRUE : B_FALSE; 3877 state = DTRACE_JSON_VALUE; 3878 break; 3879 } 3880 3881 /* 3882 * ERROR: expected to find a top-level object or array. 3883 */ 3884 return (NULL); 3885 case DTRACE_JSON_OBJECT: 3886 if (isspace(cc)) 3887 break; 3888 3889 if (cc == '"') { 3890 state = DTRACE_JSON_STRING; 3891 string_is_key = B_TRUE; 3892 break; 3893 } 3894 3895 /* 3896 * ERROR: either the object did not start with a key 3897 * string, or we've run off the end of the object 3898 * without finding the requested key. 3899 */ 3900 return (NULL); 3901 case DTRACE_JSON_STRING: 3902 if (cc == '\\') { 3903 *dd++ = '\\'; 3904 state = DTRACE_JSON_STRING_ESCAPE; 3905 break; 3906 } 3907 3908 if (cc == '"') { 3909 if (collect_object) { 3910 /* 3911 * We don't reset the dest here, as 3912 * the string is part of a larger 3913 * object being collected. 3914 */ 3915 *dd++ = cc; 3916 collect_object = B_FALSE; 3917 state = DTRACE_JSON_COLLECT_OBJECT; 3918 break; 3919 } 3920 *dd = '\0'; 3921 dd = dest; /* reset string buffer */ 3922 if (string_is_key) { 3923 if (dtrace_strncmp(dest, elem, 3924 size) == 0) 3925 found_key = B_TRUE; 3926 } else if (found_key) { 3927 if (nelems > 1) { 3928 /* 3929 * We expected an object, not 3930 * this string. 3931 */ 3932 return (NULL); 3933 } 3934 return (dest); 3935 } 3936 state = string_is_key ? DTRACE_JSON_COLON : 3937 DTRACE_JSON_COMMA; 3938 string_is_key = B_FALSE; 3939 break; 3940 } 3941 3942 *dd++ = cc; 3943 break; 3944 case DTRACE_JSON_STRING_ESCAPE: 3945 *dd++ = cc; 3946 if (cc == 'u') { 3947 escape_unicount = 0; 3948 state = DTRACE_JSON_STRING_ESCAPE_UNICODE; 3949 } else { 3950 state = DTRACE_JSON_STRING; 3951 } 3952 break; 3953 case DTRACE_JSON_STRING_ESCAPE_UNICODE: 3954 if (!isxdigit(cc)) { 3955 /* 3956 * ERROR: invalid unicode escape, expected 3957 * four valid hexidecimal digits. 3958 */ 3959 return (NULL); 3960 } 3961 3962 *dd++ = cc; 3963 if (++escape_unicount == 4) 3964 state = DTRACE_JSON_STRING; 3965 break; 3966 case DTRACE_JSON_COLON: 3967 if (isspace(cc)) 3968 break; 3969 3970 if (cc == ':') { 3971 state = DTRACE_JSON_VALUE; 3972 break; 3973 } 3974 3975 /* 3976 * ERROR: expected a colon. 3977 */ 3978 return (NULL); 3979 case DTRACE_JSON_COMMA: 3980 if (isspace(cc)) 3981 break; 3982 3983 if (cc == ',') { 3984 if (in_array) { 3985 state = DTRACE_JSON_VALUE; 3986 if (++array_pos == array_elem) 3987 found_key = B_TRUE; 3988 } else { 3989 state = DTRACE_JSON_OBJECT; 3990 } 3991 break; 3992 } 3993 3994 /* 3995 * ERROR: either we hit an unexpected character, or 3996 * we reached the end of the object or array without 3997 * finding the requested key. 3998 */ 3999 return (NULL); 4000 case DTRACE_JSON_IDENTIFIER: 4001 if (islower(cc)) { 4002 *dd++ = cc; 4003 break; 4004 } 4005 4006 *dd = '\0'; 4007 dd = dest; /* reset string buffer */ 4008 4009 if (dtrace_strncmp(dest, "true", 5) == 0 || 4010 dtrace_strncmp(dest, "false", 6) == 0 || 4011 dtrace_strncmp(dest, "null", 5) == 0) { 4012 if (found_key) { 4013 if (nelems > 1) { 4014 /* 4015 * ERROR: We expected an object, 4016 * not this identifier. 4017 */ 4018 return (NULL); 4019 } 4020 return (dest); 4021 } else { 4022 cur--; 4023 state = DTRACE_JSON_COMMA; 4024 break; 4025 } 4026 } 4027 4028 /* 4029 * ERROR: we did not recognise the identifier as one 4030 * of those in the JSON specification. 4031 */ 4032 return (NULL); 4033 case DTRACE_JSON_NUMBER: 4034 if (cc == '.') { 4035 *dd++ = cc; 4036 state = DTRACE_JSON_NUMBER_FRAC; 4037 break; 4038 } 4039 4040 if (cc == 'x' || cc == 'X') { 4041 /* 4042 * ERROR: specification explicitly excludes 4043 * hexidecimal or octal numbers. 4044 */ 4045 return (NULL); 4046 } 4047 4048 /* FALLTHRU */ 4049 case DTRACE_JSON_NUMBER_FRAC: 4050 if (cc == 'e' || cc == 'E') { 4051 *dd++ = cc; 4052 state = DTRACE_JSON_NUMBER_EXP; 4053 break; 4054 } 4055 4056 if (cc == '+' || cc == '-') { 4057 /* 4058 * ERROR: expect sign as part of exponent only. 4059 */ 4060 return (NULL); 4061 } 4062 /* FALLTHRU */ 4063 case DTRACE_JSON_NUMBER_EXP: 4064 if (isdigit(cc) || cc == '+' || cc == '-') { 4065 *dd++ = cc; 4066 break; 4067 } 4068 4069 *dd = '\0'; 4070 dd = dest; /* reset string buffer */ 4071 if (found_key) { 4072 if (nelems > 1) { 4073 /* 4074 * ERROR: We expected an object, not 4075 * this number. 4076 */ 4077 return (NULL); 4078 } 4079 return (dest); 4080 } 4081 4082 cur--; 4083 state = DTRACE_JSON_COMMA; 4084 break; 4085 case DTRACE_JSON_VALUE: 4086 if (isspace(cc)) 4087 break; 4088 4089 if (cc == '{' || cc == '[') { 4090 if (nelems > 1 && found_key) { 4091 in_array = cc == '[' ? B_TRUE : B_FALSE; 4092 /* 4093 * If our element selector directs us 4094 * to descend into this nested object, 4095 * then move to the next selector 4096 * element in the list and restart the 4097 * state machine. 4098 */ 4099 while (*elem != '\0') 4100 elem++; 4101 elem++; /* skip the inter-element NUL */ 4102 nelems--; 4103 dd = dest; 4104 if (in_array) { 4105 state = DTRACE_JSON_VALUE; 4106 array_pos = 0; 4107 array_elem = dtrace_strtoll( 4108 elem, 10, size); 4109 found_key = array_elem == 0 ? 4110 B_TRUE : B_FALSE; 4111 } else { 4112 found_key = B_FALSE; 4113 state = DTRACE_JSON_OBJECT; 4114 } 4115 break; 4116 } 4117 4118 /* 4119 * Otherwise, we wish to either skip this 4120 * nested object or return it in full. 4121 */ 4122 if (cc == '[') 4123 brackets = 1; 4124 else 4125 braces = 1; 4126 *dd++ = cc; 4127 state = DTRACE_JSON_COLLECT_OBJECT; 4128 break; 4129 } 4130 4131 if (cc == '"') { 4132 state = DTRACE_JSON_STRING; 4133 break; 4134 } 4135 4136 if (islower(cc)) { 4137 /* 4138 * Here we deal with true, false and null. 4139 */ 4140 *dd++ = cc; 4141 state = DTRACE_JSON_IDENTIFIER; 4142 break; 4143 } 4144 4145 if (cc == '-' || isdigit(cc)) { 4146 *dd++ = cc; 4147 state = DTRACE_JSON_NUMBER; 4148 break; 4149 } 4150 4151 /* 4152 * ERROR: unexpected character at start of value. 4153 */ 4154 return (NULL); 4155 case DTRACE_JSON_COLLECT_OBJECT: 4156 if (cc == '\0') 4157 /* 4158 * ERROR: unexpected end of input. 4159 */ 4160 return (NULL); 4161 4162 *dd++ = cc; 4163 if (cc == '"') { 4164 collect_object = B_TRUE; 4165 state = DTRACE_JSON_STRING; 4166 break; 4167 } 4168 4169 if (cc == ']') { 4170 if (brackets-- == 0) { 4171 /* 4172 * ERROR: unbalanced brackets. 4173 */ 4174 return (NULL); 4175 } 4176 } else if (cc == '}') { 4177 if (braces-- == 0) { 4178 /* 4179 * ERROR: unbalanced braces. 4180 */ 4181 return (NULL); 4182 } 4183 } else if (cc == '{') { 4184 braces++; 4185 } else if (cc == '[') { 4186 brackets++; 4187 } 4188 4189 if (brackets == 0 && braces == 0) { 4190 if (found_key) { 4191 *dd = '\0'; 4192 return (dest); 4193 } 4194 dd = dest; /* reset string buffer */ 4195 state = DTRACE_JSON_COMMA; 4196 } 4197 break; 4198 } 4199 } 4200 return (NULL); 4201 } 4202 4203 /* 4204 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 4205 * Notice that we don't bother validating the proper number of arguments or 4206 * their types in the tuple stack. This isn't needed because all argument 4207 * interpretation is safe because of our load safety -- the worst that can 4208 * happen is that a bogus program can obtain bogus results. 4209 */ 4210 static void 4211 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 4212 dtrace_key_t *tupregs, int nargs, 4213 dtrace_mstate_t *mstate, dtrace_state_t *state) 4214 { 4215 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4216 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4217 dtrace_vstate_t *vstate = &state->dts_vstate; 4218 4219 #ifdef illumos 4220 union { 4221 mutex_impl_t mi; 4222 uint64_t mx; 4223 } m; 4224 4225 union { 4226 krwlock_t ri; 4227 uintptr_t rw; 4228 } r; 4229 #else 4230 struct thread *lowner; 4231 union { 4232 struct lock_object *li; 4233 uintptr_t lx; 4234 } l; 4235 #endif 4236 4237 switch (subr) { 4238 case DIF_SUBR_RAND: 4239 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 4240 break; 4241 4242 #ifdef illumos 4243 case DIF_SUBR_MUTEX_OWNED: 4244 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4245 mstate, vstate)) { 4246 regs[rd] = 0; 4247 break; 4248 } 4249 4250 m.mx = dtrace_load64(tupregs[0].dttk_value); 4251 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 4252 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 4253 else 4254 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 4255 break; 4256 4257 case DIF_SUBR_MUTEX_OWNER: 4258 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4259 mstate, vstate)) { 4260 regs[rd] = 0; 4261 break; 4262 } 4263 4264 m.mx = dtrace_load64(tupregs[0].dttk_value); 4265 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 4266 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 4267 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 4268 else 4269 regs[rd] = 0; 4270 break; 4271 4272 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 4273 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4274 mstate, vstate)) { 4275 regs[rd] = 0; 4276 break; 4277 } 4278 4279 m.mx = dtrace_load64(tupregs[0].dttk_value); 4280 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 4281 break; 4282 4283 case DIF_SUBR_MUTEX_TYPE_SPIN: 4284 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4285 mstate, vstate)) { 4286 regs[rd] = 0; 4287 break; 4288 } 4289 4290 m.mx = dtrace_load64(tupregs[0].dttk_value); 4291 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 4292 break; 4293 4294 case DIF_SUBR_RW_READ_HELD: { 4295 uintptr_t tmp; 4296 4297 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4298 mstate, vstate)) { 4299 regs[rd] = 0; 4300 break; 4301 } 4302 4303 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4304 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 4305 break; 4306 } 4307 4308 case DIF_SUBR_RW_WRITE_HELD: 4309 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 4310 mstate, vstate)) { 4311 regs[rd] = 0; 4312 break; 4313 } 4314 4315 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4316 regs[rd] = _RW_WRITE_HELD(&r.ri); 4317 break; 4318 4319 case DIF_SUBR_RW_ISWRITER: 4320 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 4321 mstate, vstate)) { 4322 regs[rd] = 0; 4323 break; 4324 } 4325 4326 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4327 regs[rd] = _RW_ISWRITER(&r.ri); 4328 break; 4329 4330 #else /* !illumos */ 4331 case DIF_SUBR_MUTEX_OWNED: 4332 if (!dtrace_canload(tupregs[0].dttk_value, 4333 sizeof (struct lock_object), mstate, vstate)) { 4334 regs[rd] = 0; 4335 break; 4336 } 4337 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4338 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4339 break; 4340 4341 case DIF_SUBR_MUTEX_OWNER: 4342 if (!dtrace_canload(tupregs[0].dttk_value, 4343 sizeof (struct lock_object), mstate, vstate)) { 4344 regs[rd] = 0; 4345 break; 4346 } 4347 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4348 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4349 regs[rd] = (uintptr_t)lowner; 4350 break; 4351 4352 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 4353 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 4354 mstate, vstate)) { 4355 regs[rd] = 0; 4356 break; 4357 } 4358 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4359 /* XXX - should be only LC_SLEEPABLE? */ 4360 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & 4361 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0; 4362 break; 4363 4364 case DIF_SUBR_MUTEX_TYPE_SPIN: 4365 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 4366 mstate, vstate)) { 4367 regs[rd] = 0; 4368 break; 4369 } 4370 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4371 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 4372 break; 4373 4374 case DIF_SUBR_RW_READ_HELD: 4375 case DIF_SUBR_SX_SHARED_HELD: 4376 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4377 mstate, vstate)) { 4378 regs[rd] = 0; 4379 break; 4380 } 4381 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4382 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 4383 lowner == NULL; 4384 break; 4385 4386 case DIF_SUBR_RW_WRITE_HELD: 4387 case DIF_SUBR_SX_EXCLUSIVE_HELD: 4388 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4389 mstate, vstate)) { 4390 regs[rd] = 0; 4391 break; 4392 } 4393 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 4394 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 4395 lowner != NULL; 4396 break; 4397 4398 case DIF_SUBR_RW_ISWRITER: 4399 case DIF_SUBR_SX_ISEXCLUSIVE: 4400 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4401 mstate, vstate)) { 4402 regs[rd] = 0; 4403 break; 4404 } 4405 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 4406 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4407 regs[rd] = (lowner == curthread); 4408 break; 4409 #endif /* illumos */ 4410 4411 case DIF_SUBR_BCOPY: { 4412 /* 4413 * We need to be sure that the destination is in the scratch 4414 * region -- no other region is allowed. 4415 */ 4416 uintptr_t src = tupregs[0].dttk_value; 4417 uintptr_t dest = tupregs[1].dttk_value; 4418 size_t size = tupregs[2].dttk_value; 4419 4420 if (!dtrace_inscratch(dest, size, mstate)) { 4421 *flags |= CPU_DTRACE_BADADDR; 4422 *illval = regs[rd]; 4423 break; 4424 } 4425 4426 if (!dtrace_canload(src, size, mstate, vstate)) { 4427 regs[rd] = 0; 4428 break; 4429 } 4430 4431 dtrace_bcopy((void *)src, (void *)dest, size); 4432 break; 4433 } 4434 4435 case DIF_SUBR_ALLOCA: 4436 case DIF_SUBR_COPYIN: { 4437 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4438 uint64_t size = 4439 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 4440 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 4441 4442 /* 4443 * This action doesn't require any credential checks since 4444 * probes will not activate in user contexts to which the 4445 * enabling user does not have permissions. 4446 */ 4447 4448 /* 4449 * Rounding up the user allocation size could have overflowed 4450 * a large, bogus allocation (like -1ULL) to 0. 4451 */ 4452 if (scratch_size < size || 4453 !DTRACE_INSCRATCH(mstate, scratch_size)) { 4454 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4455 regs[rd] = 0; 4456 break; 4457 } 4458 4459 if (subr == DIF_SUBR_COPYIN) { 4460 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4461 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 4462 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4463 } 4464 4465 mstate->dtms_scratch_ptr += scratch_size; 4466 regs[rd] = dest; 4467 break; 4468 } 4469 4470 case DIF_SUBR_COPYINTO: { 4471 uint64_t size = tupregs[1].dttk_value; 4472 uintptr_t dest = tupregs[2].dttk_value; 4473 4474 /* 4475 * This action doesn't require any credential checks since 4476 * probes will not activate in user contexts to which the 4477 * enabling user does not have permissions. 4478 */ 4479 if (!dtrace_inscratch(dest, size, mstate)) { 4480 *flags |= CPU_DTRACE_BADADDR; 4481 *illval = regs[rd]; 4482 break; 4483 } 4484 4485 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4486 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 4487 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4488 break; 4489 } 4490 4491 case DIF_SUBR_COPYINSTR: { 4492 uintptr_t dest = mstate->dtms_scratch_ptr; 4493 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4494 4495 if (nargs > 1 && tupregs[1].dttk_value < size) 4496 size = tupregs[1].dttk_value + 1; 4497 4498 /* 4499 * This action doesn't require any credential checks since 4500 * probes will not activate in user contexts to which the 4501 * enabling user does not have permissions. 4502 */ 4503 if (!DTRACE_INSCRATCH(mstate, size)) { 4504 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4505 regs[rd] = 0; 4506 break; 4507 } 4508 4509 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4510 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 4511 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4512 4513 ((char *)dest)[size - 1] = '\0'; 4514 mstate->dtms_scratch_ptr += size; 4515 regs[rd] = dest; 4516 break; 4517 } 4518 4519 #ifdef illumos 4520 case DIF_SUBR_MSGSIZE: 4521 case DIF_SUBR_MSGDSIZE: { 4522 uintptr_t baddr = tupregs[0].dttk_value, daddr; 4523 uintptr_t wptr, rptr; 4524 size_t count = 0; 4525 int cont = 0; 4526 4527 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 4528 4529 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 4530 vstate)) { 4531 regs[rd] = 0; 4532 break; 4533 } 4534 4535 wptr = dtrace_loadptr(baddr + 4536 offsetof(mblk_t, b_wptr)); 4537 4538 rptr = dtrace_loadptr(baddr + 4539 offsetof(mblk_t, b_rptr)); 4540 4541 if (wptr < rptr) { 4542 *flags |= CPU_DTRACE_BADADDR; 4543 *illval = tupregs[0].dttk_value; 4544 break; 4545 } 4546 4547 daddr = dtrace_loadptr(baddr + 4548 offsetof(mblk_t, b_datap)); 4549 4550 baddr = dtrace_loadptr(baddr + 4551 offsetof(mblk_t, b_cont)); 4552 4553 /* 4554 * We want to prevent against denial-of-service here, 4555 * so we're only going to search the list for 4556 * dtrace_msgdsize_max mblks. 4557 */ 4558 if (cont++ > dtrace_msgdsize_max) { 4559 *flags |= CPU_DTRACE_ILLOP; 4560 break; 4561 } 4562 4563 if (subr == DIF_SUBR_MSGDSIZE) { 4564 if (dtrace_load8(daddr + 4565 offsetof(dblk_t, db_type)) != M_DATA) 4566 continue; 4567 } 4568 4569 count += wptr - rptr; 4570 } 4571 4572 if (!(*flags & CPU_DTRACE_FAULT)) 4573 regs[rd] = count; 4574 4575 break; 4576 } 4577 #endif 4578 4579 case DIF_SUBR_PROGENYOF: { 4580 pid_t pid = tupregs[0].dttk_value; 4581 proc_t *p; 4582 int rval = 0; 4583 4584 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4585 4586 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 4587 #ifdef illumos 4588 if (p->p_pidp->pid_id == pid) { 4589 #else 4590 if (p->p_pid == pid) { 4591 #endif 4592 rval = 1; 4593 break; 4594 } 4595 } 4596 4597 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4598 4599 regs[rd] = rval; 4600 break; 4601 } 4602 4603 case DIF_SUBR_SPECULATION: 4604 regs[rd] = dtrace_speculation(state); 4605 break; 4606 4607 case DIF_SUBR_COPYOUT: { 4608 uintptr_t kaddr = tupregs[0].dttk_value; 4609 uintptr_t uaddr = tupregs[1].dttk_value; 4610 uint64_t size = tupregs[2].dttk_value; 4611 4612 if (!dtrace_destructive_disallow && 4613 dtrace_priv_proc_control(state) && 4614 !dtrace_istoxic(kaddr, size) && 4615 dtrace_canload(kaddr, size, mstate, vstate)) { 4616 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4617 dtrace_copyout(kaddr, uaddr, size, flags); 4618 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4619 } 4620 break; 4621 } 4622 4623 case DIF_SUBR_COPYOUTSTR: { 4624 uintptr_t kaddr = tupregs[0].dttk_value; 4625 uintptr_t uaddr = tupregs[1].dttk_value; 4626 uint64_t size = tupregs[2].dttk_value; 4627 size_t lim; 4628 4629 if (!dtrace_destructive_disallow && 4630 dtrace_priv_proc_control(state) && 4631 !dtrace_istoxic(kaddr, size) && 4632 dtrace_strcanload(kaddr, size, &lim, mstate, vstate)) { 4633 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4634 dtrace_copyoutstr(kaddr, uaddr, lim, flags); 4635 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4636 } 4637 break; 4638 } 4639 4640 case DIF_SUBR_STRLEN: { 4641 size_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4642 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 4643 size_t lim; 4644 4645 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) { 4646 regs[rd] = 0; 4647 break; 4648 } 4649 4650 regs[rd] = dtrace_strlen((char *)addr, lim); 4651 break; 4652 } 4653 4654 case DIF_SUBR_STRCHR: 4655 case DIF_SUBR_STRRCHR: { 4656 /* 4657 * We're going to iterate over the string looking for the 4658 * specified character. We will iterate until we have reached 4659 * the string length or we have found the character. If this 4660 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 4661 * of the specified character instead of the first. 4662 */ 4663 uintptr_t addr = tupregs[0].dttk_value; 4664 uintptr_t addr_limit; 4665 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4666 size_t lim; 4667 char c, target = (char)tupregs[1].dttk_value; 4668 4669 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) { 4670 regs[rd] = 0; 4671 break; 4672 } 4673 addr_limit = addr + lim; 4674 4675 for (regs[rd] = 0; addr < addr_limit; addr++) { 4676 if ((c = dtrace_load8(addr)) == target) { 4677 regs[rd] = addr; 4678 4679 if (subr == DIF_SUBR_STRCHR) 4680 break; 4681 } 4682 4683 if (c == '\0') 4684 break; 4685 } 4686 break; 4687 } 4688 4689 case DIF_SUBR_STRSTR: 4690 case DIF_SUBR_INDEX: 4691 case DIF_SUBR_RINDEX: { 4692 /* 4693 * We're going to iterate over the string looking for the 4694 * specified string. We will iterate until we have reached 4695 * the string length or we have found the string. (Yes, this 4696 * is done in the most naive way possible -- but considering 4697 * that the string we're searching for is likely to be 4698 * relatively short, the complexity of Rabin-Karp or similar 4699 * hardly seems merited.) 4700 */ 4701 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 4702 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 4703 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4704 size_t len = dtrace_strlen(addr, size); 4705 size_t sublen = dtrace_strlen(substr, size); 4706 char *limit = addr + len, *orig = addr; 4707 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 4708 int inc = 1; 4709 4710 regs[rd] = notfound; 4711 4712 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 4713 regs[rd] = 0; 4714 break; 4715 } 4716 4717 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 4718 vstate)) { 4719 regs[rd] = 0; 4720 break; 4721 } 4722 4723 /* 4724 * strstr() and index()/rindex() have similar semantics if 4725 * both strings are the empty string: strstr() returns a 4726 * pointer to the (empty) string, and index() and rindex() 4727 * both return index 0 (regardless of any position argument). 4728 */ 4729 if (sublen == 0 && len == 0) { 4730 if (subr == DIF_SUBR_STRSTR) 4731 regs[rd] = (uintptr_t)addr; 4732 else 4733 regs[rd] = 0; 4734 break; 4735 } 4736 4737 if (subr != DIF_SUBR_STRSTR) { 4738 if (subr == DIF_SUBR_RINDEX) { 4739 limit = orig - 1; 4740 addr += len; 4741 inc = -1; 4742 } 4743 4744 /* 4745 * Both index() and rindex() take an optional position 4746 * argument that denotes the starting position. 4747 */ 4748 if (nargs == 3) { 4749 int64_t pos = (int64_t)tupregs[2].dttk_value; 4750 4751 /* 4752 * If the position argument to index() is 4753 * negative, Perl implicitly clamps it at 4754 * zero. This semantic is a little surprising 4755 * given the special meaning of negative 4756 * positions to similar Perl functions like 4757 * substr(), but it appears to reflect a 4758 * notion that index() can start from a 4759 * negative index and increment its way up to 4760 * the string. Given this notion, Perl's 4761 * rindex() is at least self-consistent in 4762 * that it implicitly clamps positions greater 4763 * than the string length to be the string 4764 * length. Where Perl completely loses 4765 * coherence, however, is when the specified 4766 * substring is the empty string (""). In 4767 * this case, even if the position is 4768 * negative, rindex() returns 0 -- and even if 4769 * the position is greater than the length, 4770 * index() returns the string length. These 4771 * semantics violate the notion that index() 4772 * should never return a value less than the 4773 * specified position and that rindex() should 4774 * never return a value greater than the 4775 * specified position. (One assumes that 4776 * these semantics are artifacts of Perl's 4777 * implementation and not the results of 4778 * deliberate design -- it beggars belief that 4779 * even Larry Wall could desire such oddness.) 4780 * While in the abstract one would wish for 4781 * consistent position semantics across 4782 * substr(), index() and rindex() -- or at the 4783 * very least self-consistent position 4784 * semantics for index() and rindex() -- we 4785 * instead opt to keep with the extant Perl 4786 * semantics, in all their broken glory. (Do 4787 * we have more desire to maintain Perl's 4788 * semantics than Perl does? Probably.) 4789 */ 4790 if (subr == DIF_SUBR_RINDEX) { 4791 if (pos < 0) { 4792 if (sublen == 0) 4793 regs[rd] = 0; 4794 break; 4795 } 4796 4797 if (pos > len) 4798 pos = len; 4799 } else { 4800 if (pos < 0) 4801 pos = 0; 4802 4803 if (pos >= len) { 4804 if (sublen == 0) 4805 regs[rd] = len; 4806 break; 4807 } 4808 } 4809 4810 addr = orig + pos; 4811 } 4812 } 4813 4814 for (regs[rd] = notfound; addr != limit; addr += inc) { 4815 if (dtrace_strncmp(addr, substr, sublen) == 0) { 4816 if (subr != DIF_SUBR_STRSTR) { 4817 /* 4818 * As D index() and rindex() are 4819 * modeled on Perl (and not on awk), 4820 * we return a zero-based (and not a 4821 * one-based) index. (For you Perl 4822 * weenies: no, we're not going to add 4823 * $[ -- and shouldn't you be at a con 4824 * or something?) 4825 */ 4826 regs[rd] = (uintptr_t)(addr - orig); 4827 break; 4828 } 4829 4830 ASSERT(subr == DIF_SUBR_STRSTR); 4831 regs[rd] = (uintptr_t)addr; 4832 break; 4833 } 4834 } 4835 4836 break; 4837 } 4838 4839 case DIF_SUBR_STRTOK: { 4840 uintptr_t addr = tupregs[0].dttk_value; 4841 uintptr_t tokaddr = tupregs[1].dttk_value; 4842 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4843 uintptr_t limit, toklimit; 4844 size_t clim; 4845 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 4846 char *dest = (char *)mstate->dtms_scratch_ptr; 4847 int i; 4848 4849 /* 4850 * Check both the token buffer and (later) the input buffer, 4851 * since both could be non-scratch addresses. 4852 */ 4853 if (!dtrace_strcanload(tokaddr, size, &clim, mstate, vstate)) { 4854 regs[rd] = 0; 4855 break; 4856 } 4857 toklimit = tokaddr + clim; 4858 4859 if (!DTRACE_INSCRATCH(mstate, size)) { 4860 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4861 regs[rd] = 0; 4862 break; 4863 } 4864 4865 if (addr == 0) { 4866 /* 4867 * If the address specified is NULL, we use our saved 4868 * strtok pointer from the mstate. Note that this 4869 * means that the saved strtok pointer is _only_ 4870 * valid within multiple enablings of the same probe -- 4871 * it behaves like an implicit clause-local variable. 4872 */ 4873 addr = mstate->dtms_strtok; 4874 limit = mstate->dtms_strtok_limit; 4875 } else { 4876 /* 4877 * If the user-specified address is non-NULL we must 4878 * access check it. This is the only time we have 4879 * a chance to do so, since this address may reside 4880 * in the string table of this clause-- future calls 4881 * (when we fetch addr from mstate->dtms_strtok) 4882 * would fail this access check. 4883 */ 4884 if (!dtrace_strcanload(addr, size, &clim, mstate, 4885 vstate)) { 4886 regs[rd] = 0; 4887 break; 4888 } 4889 limit = addr + clim; 4890 } 4891 4892 /* 4893 * First, zero the token map, and then process the token 4894 * string -- setting a bit in the map for every character 4895 * found in the token string. 4896 */ 4897 for (i = 0; i < sizeof (tokmap); i++) 4898 tokmap[i] = 0; 4899 4900 for (; tokaddr < toklimit; tokaddr++) { 4901 if ((c = dtrace_load8(tokaddr)) == '\0') 4902 break; 4903 4904 ASSERT((c >> 3) < sizeof (tokmap)); 4905 tokmap[c >> 3] |= (1 << (c & 0x7)); 4906 } 4907 4908 for (; addr < limit; addr++) { 4909 /* 4910 * We're looking for a character that is _not_ 4911 * contained in the token string. 4912 */ 4913 if ((c = dtrace_load8(addr)) == '\0') 4914 break; 4915 4916 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 4917 break; 4918 } 4919 4920 if (c == '\0') { 4921 /* 4922 * We reached the end of the string without finding 4923 * any character that was not in the token string. 4924 * We return NULL in this case, and we set the saved 4925 * address to NULL as well. 4926 */ 4927 regs[rd] = 0; 4928 mstate->dtms_strtok = 0; 4929 mstate->dtms_strtok_limit = 0; 4930 break; 4931 } 4932 4933 /* 4934 * From here on, we're copying into the destination string. 4935 */ 4936 for (i = 0; addr < limit && i < size - 1; addr++) { 4937 if ((c = dtrace_load8(addr)) == '\0') 4938 break; 4939 4940 if (tokmap[c >> 3] & (1 << (c & 0x7))) 4941 break; 4942 4943 ASSERT(i < size); 4944 dest[i++] = c; 4945 } 4946 4947 ASSERT(i < size); 4948 dest[i] = '\0'; 4949 regs[rd] = (uintptr_t)dest; 4950 mstate->dtms_scratch_ptr += size; 4951 mstate->dtms_strtok = addr; 4952 mstate->dtms_strtok_limit = limit; 4953 break; 4954 } 4955 4956 case DIF_SUBR_SUBSTR: { 4957 uintptr_t s = tupregs[0].dttk_value; 4958 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4959 char *d = (char *)mstate->dtms_scratch_ptr; 4960 int64_t index = (int64_t)tupregs[1].dttk_value; 4961 int64_t remaining = (int64_t)tupregs[2].dttk_value; 4962 size_t len = dtrace_strlen((char *)s, size); 4963 int64_t i; 4964 4965 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4966 regs[rd] = 0; 4967 break; 4968 } 4969 4970 if (!DTRACE_INSCRATCH(mstate, size)) { 4971 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4972 regs[rd] = 0; 4973 break; 4974 } 4975 4976 if (nargs <= 2) 4977 remaining = (int64_t)size; 4978 4979 if (index < 0) { 4980 index += len; 4981 4982 if (index < 0 && index + remaining > 0) { 4983 remaining += index; 4984 index = 0; 4985 } 4986 } 4987 4988 if (index >= len || index < 0) { 4989 remaining = 0; 4990 } else if (remaining < 0) { 4991 remaining += len - index; 4992 } else if (index + remaining > size) { 4993 remaining = size - index; 4994 } 4995 4996 for (i = 0; i < remaining; i++) { 4997 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 4998 break; 4999 } 5000 5001 d[i] = '\0'; 5002 5003 mstate->dtms_scratch_ptr += size; 5004 regs[rd] = (uintptr_t)d; 5005 break; 5006 } 5007 5008 case DIF_SUBR_JSON: { 5009 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5010 uintptr_t json = tupregs[0].dttk_value; 5011 size_t jsonlen = dtrace_strlen((char *)json, size); 5012 uintptr_t elem = tupregs[1].dttk_value; 5013 size_t elemlen = dtrace_strlen((char *)elem, size); 5014 5015 char *dest = (char *)mstate->dtms_scratch_ptr; 5016 char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1; 5017 char *ee = elemlist; 5018 int nelems = 1; 5019 uintptr_t cur; 5020 5021 if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) || 5022 !dtrace_canload(elem, elemlen + 1, mstate, vstate)) { 5023 regs[rd] = 0; 5024 break; 5025 } 5026 5027 if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) { 5028 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5029 regs[rd] = 0; 5030 break; 5031 } 5032 5033 /* 5034 * Read the element selector and split it up into a packed list 5035 * of strings. 5036 */ 5037 for (cur = elem; cur < elem + elemlen; cur++) { 5038 char cc = dtrace_load8(cur); 5039 5040 if (cur == elem && cc == '[') { 5041 /* 5042 * If the first element selector key is 5043 * actually an array index then ignore the 5044 * bracket. 5045 */ 5046 continue; 5047 } 5048 5049 if (cc == ']') 5050 continue; 5051 5052 if (cc == '.' || cc == '[') { 5053 nelems++; 5054 cc = '\0'; 5055 } 5056 5057 *ee++ = cc; 5058 } 5059 *ee++ = '\0'; 5060 5061 if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist, 5062 nelems, dest)) != 0) 5063 mstate->dtms_scratch_ptr += jsonlen + 1; 5064 break; 5065 } 5066 5067 case DIF_SUBR_TOUPPER: 5068 case DIF_SUBR_TOLOWER: { 5069 uintptr_t s = tupregs[0].dttk_value; 5070 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5071 char *dest = (char *)mstate->dtms_scratch_ptr, c; 5072 size_t len = dtrace_strlen((char *)s, size); 5073 char lower, upper, convert; 5074 int64_t i; 5075 5076 if (subr == DIF_SUBR_TOUPPER) { 5077 lower = 'a'; 5078 upper = 'z'; 5079 convert = 'A'; 5080 } else { 5081 lower = 'A'; 5082 upper = 'Z'; 5083 convert = 'a'; 5084 } 5085 5086 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 5087 regs[rd] = 0; 5088 break; 5089 } 5090 5091 if (!DTRACE_INSCRATCH(mstate, size)) { 5092 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5093 regs[rd] = 0; 5094 break; 5095 } 5096 5097 for (i = 0; i < size - 1; i++) { 5098 if ((c = dtrace_load8(s + i)) == '\0') 5099 break; 5100 5101 if (c >= lower && c <= upper) 5102 c = convert + (c - lower); 5103 5104 dest[i] = c; 5105 } 5106 5107 ASSERT(i < size); 5108 dest[i] = '\0'; 5109 regs[rd] = (uintptr_t)dest; 5110 mstate->dtms_scratch_ptr += size; 5111 break; 5112 } 5113 5114 #ifdef illumos 5115 case DIF_SUBR_GETMAJOR: 5116 #ifdef _LP64 5117 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 5118 #else 5119 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 5120 #endif 5121 break; 5122 5123 case DIF_SUBR_GETMINOR: 5124 #ifdef _LP64 5125 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 5126 #else 5127 regs[rd] = tupregs[0].dttk_value & MAXMIN; 5128 #endif 5129 break; 5130 5131 case DIF_SUBR_DDI_PATHNAME: { 5132 /* 5133 * This one is a galactic mess. We are going to roughly 5134 * emulate ddi_pathname(), but it's made more complicated 5135 * by the fact that we (a) want to include the minor name and 5136 * (b) must proceed iteratively instead of recursively. 5137 */ 5138 uintptr_t dest = mstate->dtms_scratch_ptr; 5139 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5140 char *start = (char *)dest, *end = start + size - 1; 5141 uintptr_t daddr = tupregs[0].dttk_value; 5142 int64_t minor = (int64_t)tupregs[1].dttk_value; 5143 char *s; 5144 int i, len, depth = 0; 5145 5146 /* 5147 * Due to all the pointer jumping we do and context we must 5148 * rely upon, we just mandate that the user must have kernel 5149 * read privileges to use this routine. 5150 */ 5151 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 5152 *flags |= CPU_DTRACE_KPRIV; 5153 *illval = daddr; 5154 regs[rd] = 0; 5155 } 5156 5157 if (!DTRACE_INSCRATCH(mstate, size)) { 5158 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5159 regs[rd] = 0; 5160 break; 5161 } 5162 5163 *end = '\0'; 5164 5165 /* 5166 * We want to have a name for the minor. In order to do this, 5167 * we need to walk the minor list from the devinfo. We want 5168 * to be sure that we don't infinitely walk a circular list, 5169 * so we check for circularity by sending a scout pointer 5170 * ahead two elements for every element that we iterate over; 5171 * if the list is circular, these will ultimately point to the 5172 * same element. You may recognize this little trick as the 5173 * answer to a stupid interview question -- one that always 5174 * seems to be asked by those who had to have it laboriously 5175 * explained to them, and who can't even concisely describe 5176 * the conditions under which one would be forced to resort to 5177 * this technique. Needless to say, those conditions are 5178 * found here -- and probably only here. Is this the only use 5179 * of this infamous trick in shipping, production code? If it 5180 * isn't, it probably should be... 5181 */ 5182 if (minor != -1) { 5183 uintptr_t maddr = dtrace_loadptr(daddr + 5184 offsetof(struct dev_info, devi_minor)); 5185 5186 uintptr_t next = offsetof(struct ddi_minor_data, next); 5187 uintptr_t name = offsetof(struct ddi_minor_data, 5188 d_minor) + offsetof(struct ddi_minor, name); 5189 uintptr_t dev = offsetof(struct ddi_minor_data, 5190 d_minor) + offsetof(struct ddi_minor, dev); 5191 uintptr_t scout; 5192 5193 if (maddr != NULL) 5194 scout = dtrace_loadptr(maddr + next); 5195 5196 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 5197 uint64_t m; 5198 #ifdef _LP64 5199 m = dtrace_load64(maddr + dev) & MAXMIN64; 5200 #else 5201 m = dtrace_load32(maddr + dev) & MAXMIN; 5202 #endif 5203 if (m != minor) { 5204 maddr = dtrace_loadptr(maddr + next); 5205 5206 if (scout == NULL) 5207 continue; 5208 5209 scout = dtrace_loadptr(scout + next); 5210 5211 if (scout == NULL) 5212 continue; 5213 5214 scout = dtrace_loadptr(scout + next); 5215 5216 if (scout == NULL) 5217 continue; 5218 5219 if (scout == maddr) { 5220 *flags |= CPU_DTRACE_ILLOP; 5221 break; 5222 } 5223 5224 continue; 5225 } 5226 5227 /* 5228 * We have the minor data. Now we need to 5229 * copy the minor's name into the end of the 5230 * pathname. 5231 */ 5232 s = (char *)dtrace_loadptr(maddr + name); 5233 len = dtrace_strlen(s, size); 5234 5235 if (*flags & CPU_DTRACE_FAULT) 5236 break; 5237 5238 if (len != 0) { 5239 if ((end -= (len + 1)) < start) 5240 break; 5241 5242 *end = ':'; 5243 } 5244 5245 for (i = 1; i <= len; i++) 5246 end[i] = dtrace_load8((uintptr_t)s++); 5247 break; 5248 } 5249 } 5250 5251 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 5252 ddi_node_state_t devi_state; 5253 5254 devi_state = dtrace_load32(daddr + 5255 offsetof(struct dev_info, devi_node_state)); 5256 5257 if (*flags & CPU_DTRACE_FAULT) 5258 break; 5259 5260 if (devi_state >= DS_INITIALIZED) { 5261 s = (char *)dtrace_loadptr(daddr + 5262 offsetof(struct dev_info, devi_addr)); 5263 len = dtrace_strlen(s, size); 5264 5265 if (*flags & CPU_DTRACE_FAULT) 5266 break; 5267 5268 if (len != 0) { 5269 if ((end -= (len + 1)) < start) 5270 break; 5271 5272 *end = '@'; 5273 } 5274 5275 for (i = 1; i <= len; i++) 5276 end[i] = dtrace_load8((uintptr_t)s++); 5277 } 5278 5279 /* 5280 * Now for the node name... 5281 */ 5282 s = (char *)dtrace_loadptr(daddr + 5283 offsetof(struct dev_info, devi_node_name)); 5284 5285 daddr = dtrace_loadptr(daddr + 5286 offsetof(struct dev_info, devi_parent)); 5287 5288 /* 5289 * If our parent is NULL (that is, if we're the root 5290 * node), we're going to use the special path 5291 * "devices". 5292 */ 5293 if (daddr == 0) 5294 s = "devices"; 5295 5296 len = dtrace_strlen(s, size); 5297 if (*flags & CPU_DTRACE_FAULT) 5298 break; 5299 5300 if ((end -= (len + 1)) < start) 5301 break; 5302 5303 for (i = 1; i <= len; i++) 5304 end[i] = dtrace_load8((uintptr_t)s++); 5305 *end = '/'; 5306 5307 if (depth++ > dtrace_devdepth_max) { 5308 *flags |= CPU_DTRACE_ILLOP; 5309 break; 5310 } 5311 } 5312 5313 if (end < start) 5314 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5315 5316 if (daddr == 0) { 5317 regs[rd] = (uintptr_t)end; 5318 mstate->dtms_scratch_ptr += size; 5319 } 5320 5321 break; 5322 } 5323 #endif 5324 5325 case DIF_SUBR_STRJOIN: { 5326 char *d = (char *)mstate->dtms_scratch_ptr; 5327 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5328 uintptr_t s1 = tupregs[0].dttk_value; 5329 uintptr_t s2 = tupregs[1].dttk_value; 5330 int i = 0, j = 0; 5331 size_t lim1, lim2; 5332 char c; 5333 5334 if (!dtrace_strcanload(s1, size, &lim1, mstate, vstate) || 5335 !dtrace_strcanload(s2, size, &lim2, mstate, vstate)) { 5336 regs[rd] = 0; 5337 break; 5338 } 5339 5340 if (!DTRACE_INSCRATCH(mstate, size)) { 5341 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5342 regs[rd] = 0; 5343 break; 5344 } 5345 5346 for (;;) { 5347 if (i >= size) { 5348 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5349 regs[rd] = 0; 5350 break; 5351 } 5352 c = (i >= lim1) ? '\0' : dtrace_load8(s1++); 5353 if ((d[i++] = c) == '\0') { 5354 i--; 5355 break; 5356 } 5357 } 5358 5359 for (;;) { 5360 if (i >= size) { 5361 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5362 regs[rd] = 0; 5363 break; 5364 } 5365 5366 c = (j++ >= lim2) ? '\0' : dtrace_load8(s2++); 5367 if ((d[i++] = c) == '\0') 5368 break; 5369 } 5370 5371 if (i < size) { 5372 mstate->dtms_scratch_ptr += i; 5373 regs[rd] = (uintptr_t)d; 5374 } 5375 5376 break; 5377 } 5378 5379 case DIF_SUBR_STRTOLL: { 5380 uintptr_t s = tupregs[0].dttk_value; 5381 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5382 size_t lim; 5383 int base = 10; 5384 5385 if (nargs > 1) { 5386 if ((base = tupregs[1].dttk_value) <= 1 || 5387 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 5388 *flags |= CPU_DTRACE_ILLOP; 5389 break; 5390 } 5391 } 5392 5393 if (!dtrace_strcanload(s, size, &lim, mstate, vstate)) { 5394 regs[rd] = INT64_MIN; 5395 break; 5396 } 5397 5398 regs[rd] = dtrace_strtoll((char *)s, base, lim); 5399 break; 5400 } 5401 5402 case DIF_SUBR_LLTOSTR: { 5403 int64_t i = (int64_t)tupregs[0].dttk_value; 5404 uint64_t val, digit; 5405 uint64_t size = 65; /* enough room for 2^64 in binary */ 5406 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 5407 int base = 10; 5408 5409 if (nargs > 1) { 5410 if ((base = tupregs[1].dttk_value) <= 1 || 5411 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 5412 *flags |= CPU_DTRACE_ILLOP; 5413 break; 5414 } 5415 } 5416 5417 val = (base == 10 && i < 0) ? i * -1 : i; 5418 5419 if (!DTRACE_INSCRATCH(mstate, size)) { 5420 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5421 regs[rd] = 0; 5422 break; 5423 } 5424 5425 for (*end-- = '\0'; val; val /= base) { 5426 if ((digit = val % base) <= '9' - '0') { 5427 *end-- = '0' + digit; 5428 } else { 5429 *end-- = 'a' + (digit - ('9' - '0') - 1); 5430 } 5431 } 5432 5433 if (i == 0 && base == 16) 5434 *end-- = '0'; 5435 5436 if (base == 16) 5437 *end-- = 'x'; 5438 5439 if (i == 0 || base == 8 || base == 16) 5440 *end-- = '0'; 5441 5442 if (i < 0 && base == 10) 5443 *end-- = '-'; 5444 5445 regs[rd] = (uintptr_t)end + 1; 5446 mstate->dtms_scratch_ptr += size; 5447 break; 5448 } 5449 5450 case DIF_SUBR_HTONS: 5451 case DIF_SUBR_NTOHS: 5452 #if BYTE_ORDER == BIG_ENDIAN 5453 regs[rd] = (uint16_t)tupregs[0].dttk_value; 5454 #else 5455 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 5456 #endif 5457 break; 5458 5459 5460 case DIF_SUBR_HTONL: 5461 case DIF_SUBR_NTOHL: 5462 #if BYTE_ORDER == BIG_ENDIAN 5463 regs[rd] = (uint32_t)tupregs[0].dttk_value; 5464 #else 5465 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 5466 #endif 5467 break; 5468 5469 5470 case DIF_SUBR_HTONLL: 5471 case DIF_SUBR_NTOHLL: 5472 #if BYTE_ORDER == BIG_ENDIAN 5473 regs[rd] = (uint64_t)tupregs[0].dttk_value; 5474 #else 5475 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 5476 #endif 5477 break; 5478 5479 5480 case DIF_SUBR_DIRNAME: 5481 case DIF_SUBR_BASENAME: { 5482 char *dest = (char *)mstate->dtms_scratch_ptr; 5483 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5484 uintptr_t src = tupregs[0].dttk_value; 5485 int i, j, len = dtrace_strlen((char *)src, size); 5486 int lastbase = -1, firstbase = -1, lastdir = -1; 5487 int start, end; 5488 5489 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 5490 regs[rd] = 0; 5491 break; 5492 } 5493 5494 if (!DTRACE_INSCRATCH(mstate, size)) { 5495 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5496 regs[rd] = 0; 5497 break; 5498 } 5499 5500 /* 5501 * The basename and dirname for a zero-length string is 5502 * defined to be "." 5503 */ 5504 if (len == 0) { 5505 len = 1; 5506 src = (uintptr_t)"."; 5507 } 5508 5509 /* 5510 * Start from the back of the string, moving back toward the 5511 * front until we see a character that isn't a slash. That 5512 * character is the last character in the basename. 5513 */ 5514 for (i = len - 1; i >= 0; i--) { 5515 if (dtrace_load8(src + i) != '/') 5516 break; 5517 } 5518 5519 if (i >= 0) 5520 lastbase = i; 5521 5522 /* 5523 * Starting from the last character in the basename, move 5524 * towards the front until we find a slash. The character 5525 * that we processed immediately before that is the first 5526 * character in the basename. 5527 */ 5528 for (; i >= 0; i--) { 5529 if (dtrace_load8(src + i) == '/') 5530 break; 5531 } 5532 5533 if (i >= 0) 5534 firstbase = i + 1; 5535 5536 /* 5537 * Now keep going until we find a non-slash character. That 5538 * character is the last character in the dirname. 5539 */ 5540 for (; i >= 0; i--) { 5541 if (dtrace_load8(src + i) != '/') 5542 break; 5543 } 5544 5545 if (i >= 0) 5546 lastdir = i; 5547 5548 ASSERT(!(lastbase == -1 && firstbase != -1)); 5549 ASSERT(!(firstbase == -1 && lastdir != -1)); 5550 5551 if (lastbase == -1) { 5552 /* 5553 * We didn't find a non-slash character. We know that 5554 * the length is non-zero, so the whole string must be 5555 * slashes. In either the dirname or the basename 5556 * case, we return '/'. 5557 */ 5558 ASSERT(firstbase == -1); 5559 firstbase = lastbase = lastdir = 0; 5560 } 5561 5562 if (firstbase == -1) { 5563 /* 5564 * The entire string consists only of a basename 5565 * component. If we're looking for dirname, we need 5566 * to change our string to be just "."; if we're 5567 * looking for a basename, we'll just set the first 5568 * character of the basename to be 0. 5569 */ 5570 if (subr == DIF_SUBR_DIRNAME) { 5571 ASSERT(lastdir == -1); 5572 src = (uintptr_t)"."; 5573 lastdir = 0; 5574 } else { 5575 firstbase = 0; 5576 } 5577 } 5578 5579 if (subr == DIF_SUBR_DIRNAME) { 5580 if (lastdir == -1) { 5581 /* 5582 * We know that we have a slash in the name -- 5583 * or lastdir would be set to 0, above. And 5584 * because lastdir is -1, we know that this 5585 * slash must be the first character. (That 5586 * is, the full string must be of the form 5587 * "/basename".) In this case, the last 5588 * character of the directory name is 0. 5589 */ 5590 lastdir = 0; 5591 } 5592 5593 start = 0; 5594 end = lastdir; 5595 } else { 5596 ASSERT(subr == DIF_SUBR_BASENAME); 5597 ASSERT(firstbase != -1 && lastbase != -1); 5598 start = firstbase; 5599 end = lastbase; 5600 } 5601 5602 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 5603 dest[j] = dtrace_load8(src + i); 5604 5605 dest[j] = '\0'; 5606 regs[rd] = (uintptr_t)dest; 5607 mstate->dtms_scratch_ptr += size; 5608 break; 5609 } 5610 5611 case DIF_SUBR_GETF: { 5612 uintptr_t fd = tupregs[0].dttk_value; 5613 struct filedesc *fdp; 5614 file_t *fp; 5615 5616 if (!dtrace_priv_proc(state)) { 5617 regs[rd] = 0; 5618 break; 5619 } 5620 fdp = curproc->p_fd; 5621 FILEDESC_SLOCK(fdp); 5622 fp = fget_locked(fdp, fd); 5623 mstate->dtms_getf = fp; 5624 regs[rd] = (uintptr_t)fp; 5625 FILEDESC_SUNLOCK(fdp); 5626 break; 5627 } 5628 5629 case DIF_SUBR_CLEANPATH: { 5630 char *dest = (char *)mstate->dtms_scratch_ptr, c; 5631 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5632 uintptr_t src = tupregs[0].dttk_value; 5633 size_t lim; 5634 int i = 0, j = 0; 5635 #ifdef illumos 5636 zone_t *z; 5637 #endif 5638 5639 if (!dtrace_strcanload(src, size, &lim, mstate, vstate)) { 5640 regs[rd] = 0; 5641 break; 5642 } 5643 5644 if (!DTRACE_INSCRATCH(mstate, size)) { 5645 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5646 regs[rd] = 0; 5647 break; 5648 } 5649 5650 /* 5651 * Move forward, loading each character. 5652 */ 5653 do { 5654 c = (i >= lim) ? '\0' : dtrace_load8(src + i++); 5655 next: 5656 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 5657 break; 5658 5659 if (c != '/') { 5660 dest[j++] = c; 5661 continue; 5662 } 5663 5664 c = (i >= lim) ? '\0' : dtrace_load8(src + i++); 5665 5666 if (c == '/') { 5667 /* 5668 * We have two slashes -- we can just advance 5669 * to the next character. 5670 */ 5671 goto next; 5672 } 5673 5674 if (c != '.') { 5675 /* 5676 * This is not "." and it's not ".." -- we can 5677 * just store the "/" and this character and 5678 * drive on. 5679 */ 5680 dest[j++] = '/'; 5681 dest[j++] = c; 5682 continue; 5683 } 5684 5685 c = (i >= lim) ? '\0' : dtrace_load8(src + i++); 5686 5687 if (c == '/') { 5688 /* 5689 * This is a "/./" component. We're not going 5690 * to store anything in the destination buffer; 5691 * we're just going to go to the next component. 5692 */ 5693 goto next; 5694 } 5695 5696 if (c != '.') { 5697 /* 5698 * This is not ".." -- we can just store the 5699 * "/." and this character and continue 5700 * processing. 5701 */ 5702 dest[j++] = '/'; 5703 dest[j++] = '.'; 5704 dest[j++] = c; 5705 continue; 5706 } 5707 5708 c = (i >= lim) ? '\0' : dtrace_load8(src + i++); 5709 5710 if (c != '/' && c != '\0') { 5711 /* 5712 * This is not ".." -- it's "..[mumble]". 5713 * We'll store the "/.." and this character 5714 * and continue processing. 5715 */ 5716 dest[j++] = '/'; 5717 dest[j++] = '.'; 5718 dest[j++] = '.'; 5719 dest[j++] = c; 5720 continue; 5721 } 5722 5723 /* 5724 * This is "/../" or "/..\0". We need to back up 5725 * our destination pointer until we find a "/". 5726 */ 5727 i--; 5728 while (j != 0 && dest[--j] != '/') 5729 continue; 5730 5731 if (c == '\0') 5732 dest[++j] = '/'; 5733 } while (c != '\0'); 5734 5735 dest[j] = '\0'; 5736 5737 #ifdef illumos 5738 if (mstate->dtms_getf != NULL && 5739 !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) && 5740 (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) { 5741 /* 5742 * If we've done a getf() as a part of this ECB and we 5743 * don't have kernel access (and we're not in the global 5744 * zone), check if the path we cleaned up begins with 5745 * the zone's root path, and trim it off if so. Note 5746 * that this is an output cleanliness issue, not a 5747 * security issue: knowing one's zone root path does 5748 * not enable privilege escalation. 5749 */ 5750 if (strstr(dest, z->zone_rootpath) == dest) 5751 dest += strlen(z->zone_rootpath) - 1; 5752 } 5753 #endif 5754 5755 regs[rd] = (uintptr_t)dest; 5756 mstate->dtms_scratch_ptr += size; 5757 break; 5758 } 5759 5760 case DIF_SUBR_INET_NTOA: 5761 case DIF_SUBR_INET_NTOA6: 5762 case DIF_SUBR_INET_NTOP: { 5763 size_t size; 5764 int af, argi, i; 5765 char *base, *end; 5766 5767 if (subr == DIF_SUBR_INET_NTOP) { 5768 af = (int)tupregs[0].dttk_value; 5769 argi = 1; 5770 } else { 5771 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 5772 argi = 0; 5773 } 5774 5775 if (af == AF_INET) { 5776 ipaddr_t ip4; 5777 uint8_t *ptr8, val; 5778 5779 if (!dtrace_canload(tupregs[argi].dttk_value, 5780 sizeof (ipaddr_t), mstate, vstate)) { 5781 regs[rd] = 0; 5782 break; 5783 } 5784 5785 /* 5786 * Safely load the IPv4 address. 5787 */ 5788 ip4 = dtrace_load32(tupregs[argi].dttk_value); 5789 5790 /* 5791 * Check an IPv4 string will fit in scratch. 5792 */ 5793 size = INET_ADDRSTRLEN; 5794 if (!DTRACE_INSCRATCH(mstate, size)) { 5795 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5796 regs[rd] = 0; 5797 break; 5798 } 5799 base = (char *)mstate->dtms_scratch_ptr; 5800 end = (char *)mstate->dtms_scratch_ptr + size - 1; 5801 5802 /* 5803 * Stringify as a dotted decimal quad. 5804 */ 5805 *end-- = '\0'; 5806 ptr8 = (uint8_t *)&ip4; 5807 for (i = 3; i >= 0; i--) { 5808 val = ptr8[i]; 5809 5810 if (val == 0) { 5811 *end-- = '0'; 5812 } else { 5813 for (; val; val /= 10) { 5814 *end-- = '0' + (val % 10); 5815 } 5816 } 5817 5818 if (i > 0) 5819 *end-- = '.'; 5820 } 5821 ASSERT(end + 1 >= base); 5822 5823 } else if (af == AF_INET6) { 5824 struct in6_addr ip6; 5825 int firstzero, tryzero, numzero, v6end; 5826 uint16_t val; 5827 const char digits[] = "0123456789abcdef"; 5828 5829 /* 5830 * Stringify using RFC 1884 convention 2 - 16 bit 5831 * hexadecimal values with a zero-run compression. 5832 * Lower case hexadecimal digits are used. 5833 * eg, fe80::214:4fff:fe0b:76c8. 5834 * The IPv4 embedded form is returned for inet_ntop, 5835 * just the IPv4 string is returned for inet_ntoa6. 5836 */ 5837 5838 if (!dtrace_canload(tupregs[argi].dttk_value, 5839 sizeof (struct in6_addr), mstate, vstate)) { 5840 regs[rd] = 0; 5841 break; 5842 } 5843 5844 /* 5845 * Safely load the IPv6 address. 5846 */ 5847 dtrace_bcopy( 5848 (void *)(uintptr_t)tupregs[argi].dttk_value, 5849 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 5850 5851 /* 5852 * Check an IPv6 string will fit in scratch. 5853 */ 5854 size = INET6_ADDRSTRLEN; 5855 if (!DTRACE_INSCRATCH(mstate, size)) { 5856 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5857 regs[rd] = 0; 5858 break; 5859 } 5860 base = (char *)mstate->dtms_scratch_ptr; 5861 end = (char *)mstate->dtms_scratch_ptr + size - 1; 5862 *end-- = '\0'; 5863 5864 /* 5865 * Find the longest run of 16 bit zero values 5866 * for the single allowed zero compression - "::". 5867 */ 5868 firstzero = -1; 5869 tryzero = -1; 5870 numzero = 1; 5871 for (i = 0; i < sizeof (struct in6_addr); i++) { 5872 #ifdef illumos 5873 if (ip6._S6_un._S6_u8[i] == 0 && 5874 #else 5875 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 5876 #endif 5877 tryzero == -1 && i % 2 == 0) { 5878 tryzero = i; 5879 continue; 5880 } 5881 5882 if (tryzero != -1 && 5883 #ifdef illumos 5884 (ip6._S6_un._S6_u8[i] != 0 || 5885 #else 5886 (ip6.__u6_addr.__u6_addr8[i] != 0 || 5887 #endif 5888 i == sizeof (struct in6_addr) - 1)) { 5889 5890 if (i - tryzero <= numzero) { 5891 tryzero = -1; 5892 continue; 5893 } 5894 5895 firstzero = tryzero; 5896 numzero = i - i % 2 - tryzero; 5897 tryzero = -1; 5898 5899 #ifdef illumos 5900 if (ip6._S6_un._S6_u8[i] == 0 && 5901 #else 5902 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 5903 #endif 5904 i == sizeof (struct in6_addr) - 1) 5905 numzero += 2; 5906 } 5907 } 5908 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 5909 5910 /* 5911 * Check for an IPv4 embedded address. 5912 */ 5913 v6end = sizeof (struct in6_addr) - 2; 5914 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 5915 IN6_IS_ADDR_V4COMPAT(&ip6)) { 5916 for (i = sizeof (struct in6_addr) - 1; 5917 i >= DTRACE_V4MAPPED_OFFSET; i--) { 5918 ASSERT(end >= base); 5919 5920 #ifdef illumos 5921 val = ip6._S6_un._S6_u8[i]; 5922 #else 5923 val = ip6.__u6_addr.__u6_addr8[i]; 5924 #endif 5925 5926 if (val == 0) { 5927 *end-- = '0'; 5928 } else { 5929 for (; val; val /= 10) { 5930 *end-- = '0' + val % 10; 5931 } 5932 } 5933 5934 if (i > DTRACE_V4MAPPED_OFFSET) 5935 *end-- = '.'; 5936 } 5937 5938 if (subr == DIF_SUBR_INET_NTOA6) 5939 goto inetout; 5940 5941 /* 5942 * Set v6end to skip the IPv4 address that 5943 * we have already stringified. 5944 */ 5945 v6end = 10; 5946 } 5947 5948 /* 5949 * Build the IPv6 string by working through the 5950 * address in reverse. 5951 */ 5952 for (i = v6end; i >= 0; i -= 2) { 5953 ASSERT(end >= base); 5954 5955 if (i == firstzero + numzero - 2) { 5956 *end-- = ':'; 5957 *end-- = ':'; 5958 i -= numzero - 2; 5959 continue; 5960 } 5961 5962 if (i < 14 && i != firstzero - 2) 5963 *end-- = ':'; 5964 5965 #ifdef illumos 5966 val = (ip6._S6_un._S6_u8[i] << 8) + 5967 ip6._S6_un._S6_u8[i + 1]; 5968 #else 5969 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 5970 ip6.__u6_addr.__u6_addr8[i + 1]; 5971 #endif 5972 5973 if (val == 0) { 5974 *end-- = '0'; 5975 } else { 5976 for (; val; val /= 16) { 5977 *end-- = digits[val % 16]; 5978 } 5979 } 5980 } 5981 ASSERT(end + 1 >= base); 5982 5983 } else { 5984 /* 5985 * The user didn't use AH_INET or AH_INET6. 5986 */ 5987 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5988 regs[rd] = 0; 5989 break; 5990 } 5991 5992 inetout: regs[rd] = (uintptr_t)end + 1; 5993 mstate->dtms_scratch_ptr += size; 5994 break; 5995 } 5996 5997 case DIF_SUBR_MEMREF: { 5998 uintptr_t size = 2 * sizeof(uintptr_t); 5999 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 6000 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 6001 6002 /* address and length */ 6003 memref[0] = tupregs[0].dttk_value; 6004 memref[1] = tupregs[1].dttk_value; 6005 6006 regs[rd] = (uintptr_t) memref; 6007 mstate->dtms_scratch_ptr += scratch_size; 6008 break; 6009 } 6010 6011 #ifndef illumos 6012 case DIF_SUBR_MEMSTR: { 6013 char *str = (char *)mstate->dtms_scratch_ptr; 6014 uintptr_t mem = tupregs[0].dttk_value; 6015 char c = tupregs[1].dttk_value; 6016 size_t size = tupregs[2].dttk_value; 6017 uint8_t n; 6018 int i; 6019 6020 regs[rd] = 0; 6021 6022 if (size == 0) 6023 break; 6024 6025 if (!dtrace_canload(mem, size - 1, mstate, vstate)) 6026 break; 6027 6028 if (!DTRACE_INSCRATCH(mstate, size)) { 6029 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 6030 break; 6031 } 6032 6033 if (dtrace_memstr_max != 0 && size > dtrace_memstr_max) { 6034 *flags |= CPU_DTRACE_ILLOP; 6035 break; 6036 } 6037 6038 for (i = 0; i < size - 1; i++) { 6039 n = dtrace_load8(mem++); 6040 str[i] = (n == 0) ? c : n; 6041 } 6042 str[size - 1] = 0; 6043 6044 regs[rd] = (uintptr_t)str; 6045 mstate->dtms_scratch_ptr += size; 6046 break; 6047 } 6048 #endif 6049 } 6050 } 6051 6052 /* 6053 * Emulate the execution of DTrace IR instructions specified by the given 6054 * DIF object. This function is deliberately void of assertions as all of 6055 * the necessary checks are handled by a call to dtrace_difo_validate(). 6056 */ 6057 static uint64_t 6058 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 6059 dtrace_vstate_t *vstate, dtrace_state_t *state) 6060 { 6061 const dif_instr_t *text = difo->dtdo_buf; 6062 const uint_t textlen = difo->dtdo_len; 6063 const char *strtab = difo->dtdo_strtab; 6064 const uint64_t *inttab = difo->dtdo_inttab; 6065 6066 uint64_t rval = 0; 6067 dtrace_statvar_t *svar; 6068 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 6069 dtrace_difv_t *v; 6070 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 6071 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 6072 6073 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 6074 uint64_t regs[DIF_DIR_NREGS]; 6075 uint64_t *tmp; 6076 6077 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 6078 int64_t cc_r; 6079 uint_t pc = 0, id, opc = 0; 6080 uint8_t ttop = 0; 6081 dif_instr_t instr; 6082 uint_t r1, r2, rd; 6083 6084 /* 6085 * We stash the current DIF object into the machine state: we need it 6086 * for subsequent access checking. 6087 */ 6088 mstate->dtms_difo = difo; 6089 6090 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 6091 6092 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 6093 opc = pc; 6094 6095 instr = text[pc++]; 6096 r1 = DIF_INSTR_R1(instr); 6097 r2 = DIF_INSTR_R2(instr); 6098 rd = DIF_INSTR_RD(instr); 6099 6100 switch (DIF_INSTR_OP(instr)) { 6101 case DIF_OP_OR: 6102 regs[rd] = regs[r1] | regs[r2]; 6103 break; 6104 case DIF_OP_XOR: 6105 regs[rd] = regs[r1] ^ regs[r2]; 6106 break; 6107 case DIF_OP_AND: 6108 regs[rd] = regs[r1] & regs[r2]; 6109 break; 6110 case DIF_OP_SLL: 6111 regs[rd] = regs[r1] << regs[r2]; 6112 break; 6113 case DIF_OP_SRL: 6114 regs[rd] = regs[r1] >> regs[r2]; 6115 break; 6116 case DIF_OP_SUB: 6117 regs[rd] = regs[r1] - regs[r2]; 6118 break; 6119 case DIF_OP_ADD: 6120 regs[rd] = regs[r1] + regs[r2]; 6121 break; 6122 case DIF_OP_MUL: 6123 regs[rd] = regs[r1] * regs[r2]; 6124 break; 6125 case DIF_OP_SDIV: 6126 if (regs[r2] == 0) { 6127 regs[rd] = 0; 6128 *flags |= CPU_DTRACE_DIVZERO; 6129 } else { 6130 regs[rd] = (int64_t)regs[r1] / 6131 (int64_t)regs[r2]; 6132 } 6133 break; 6134 6135 case DIF_OP_UDIV: 6136 if (regs[r2] == 0) { 6137 regs[rd] = 0; 6138 *flags |= CPU_DTRACE_DIVZERO; 6139 } else { 6140 regs[rd] = regs[r1] / regs[r2]; 6141 } 6142 break; 6143 6144 case DIF_OP_SREM: 6145 if (regs[r2] == 0) { 6146 regs[rd] = 0; 6147 *flags |= CPU_DTRACE_DIVZERO; 6148 } else { 6149 regs[rd] = (int64_t)regs[r1] % 6150 (int64_t)regs[r2]; 6151 } 6152 break; 6153 6154 case DIF_OP_UREM: 6155 if (regs[r2] == 0) { 6156 regs[rd] = 0; 6157 *flags |= CPU_DTRACE_DIVZERO; 6158 } else { 6159 regs[rd] = regs[r1] % regs[r2]; 6160 } 6161 break; 6162 6163 case DIF_OP_NOT: 6164 regs[rd] = ~regs[r1]; 6165 break; 6166 case DIF_OP_MOV: 6167 regs[rd] = regs[r1]; 6168 break; 6169 case DIF_OP_CMP: 6170 cc_r = regs[r1] - regs[r2]; 6171 cc_n = cc_r < 0; 6172 cc_z = cc_r == 0; 6173 cc_v = 0; 6174 cc_c = regs[r1] < regs[r2]; 6175 break; 6176 case DIF_OP_TST: 6177 cc_n = cc_v = cc_c = 0; 6178 cc_z = regs[r1] == 0; 6179 break; 6180 case DIF_OP_BA: 6181 pc = DIF_INSTR_LABEL(instr); 6182 break; 6183 case DIF_OP_BE: 6184 if (cc_z) 6185 pc = DIF_INSTR_LABEL(instr); 6186 break; 6187 case DIF_OP_BNE: 6188 if (cc_z == 0) 6189 pc = DIF_INSTR_LABEL(instr); 6190 break; 6191 case DIF_OP_BG: 6192 if ((cc_z | (cc_n ^ cc_v)) == 0) 6193 pc = DIF_INSTR_LABEL(instr); 6194 break; 6195 case DIF_OP_BGU: 6196 if ((cc_c | cc_z) == 0) 6197 pc = DIF_INSTR_LABEL(instr); 6198 break; 6199 case DIF_OP_BGE: 6200 if ((cc_n ^ cc_v) == 0) 6201 pc = DIF_INSTR_LABEL(instr); 6202 break; 6203 case DIF_OP_BGEU: 6204 if (cc_c == 0) 6205 pc = DIF_INSTR_LABEL(instr); 6206 break; 6207 case DIF_OP_BL: 6208 if (cc_n ^ cc_v) 6209 pc = DIF_INSTR_LABEL(instr); 6210 break; 6211 case DIF_OP_BLU: 6212 if (cc_c) 6213 pc = DIF_INSTR_LABEL(instr); 6214 break; 6215 case DIF_OP_BLE: 6216 if (cc_z | (cc_n ^ cc_v)) 6217 pc = DIF_INSTR_LABEL(instr); 6218 break; 6219 case DIF_OP_BLEU: 6220 if (cc_c | cc_z) 6221 pc = DIF_INSTR_LABEL(instr); 6222 break; 6223 case DIF_OP_RLDSB: 6224 if (!dtrace_canload(regs[r1], 1, mstate, vstate)) 6225 break; 6226 /*FALLTHROUGH*/ 6227 case DIF_OP_LDSB: 6228 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 6229 break; 6230 case DIF_OP_RLDSH: 6231 if (!dtrace_canload(regs[r1], 2, mstate, vstate)) 6232 break; 6233 /*FALLTHROUGH*/ 6234 case DIF_OP_LDSH: 6235 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 6236 break; 6237 case DIF_OP_RLDSW: 6238 if (!dtrace_canload(regs[r1], 4, mstate, vstate)) 6239 break; 6240 /*FALLTHROUGH*/ 6241 case DIF_OP_LDSW: 6242 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 6243 break; 6244 case DIF_OP_RLDUB: 6245 if (!dtrace_canload(regs[r1], 1, mstate, vstate)) 6246 break; 6247 /*FALLTHROUGH*/ 6248 case DIF_OP_LDUB: 6249 regs[rd] = dtrace_load8(regs[r1]); 6250 break; 6251 case DIF_OP_RLDUH: 6252 if (!dtrace_canload(regs[r1], 2, mstate, vstate)) 6253 break; 6254 /*FALLTHROUGH*/ 6255 case DIF_OP_LDUH: 6256 regs[rd] = dtrace_load16(regs[r1]); 6257 break; 6258 case DIF_OP_RLDUW: 6259 if (!dtrace_canload(regs[r1], 4, mstate, vstate)) 6260 break; 6261 /*FALLTHROUGH*/ 6262 case DIF_OP_LDUW: 6263 regs[rd] = dtrace_load32(regs[r1]); 6264 break; 6265 case DIF_OP_RLDX: 6266 if (!dtrace_canload(regs[r1], 8, mstate, vstate)) 6267 break; 6268 /*FALLTHROUGH*/ 6269 case DIF_OP_LDX: 6270 regs[rd] = dtrace_load64(regs[r1]); 6271 break; 6272 case DIF_OP_ULDSB: 6273 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6274 regs[rd] = (int8_t) 6275 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 6276 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6277 break; 6278 case DIF_OP_ULDSH: 6279 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6280 regs[rd] = (int16_t) 6281 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 6282 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6283 break; 6284 case DIF_OP_ULDSW: 6285 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6286 regs[rd] = (int32_t) 6287 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 6288 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6289 break; 6290 case DIF_OP_ULDUB: 6291 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6292 regs[rd] = 6293 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 6294 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6295 break; 6296 case DIF_OP_ULDUH: 6297 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6298 regs[rd] = 6299 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 6300 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6301 break; 6302 case DIF_OP_ULDUW: 6303 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6304 regs[rd] = 6305 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 6306 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6307 break; 6308 case DIF_OP_ULDX: 6309 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6310 regs[rd] = 6311 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 6312 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6313 break; 6314 case DIF_OP_RET: 6315 rval = regs[rd]; 6316 pc = textlen; 6317 break; 6318 case DIF_OP_NOP: 6319 break; 6320 case DIF_OP_SETX: 6321 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 6322 break; 6323 case DIF_OP_SETS: 6324 regs[rd] = (uint64_t)(uintptr_t) 6325 (strtab + DIF_INSTR_STRING(instr)); 6326 break; 6327 case DIF_OP_SCMP: { 6328 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 6329 uintptr_t s1 = regs[r1]; 6330 uintptr_t s2 = regs[r2]; 6331 size_t lim1, lim2; 6332 6333 if (s1 != 0 && 6334 !dtrace_strcanload(s1, sz, &lim1, mstate, vstate)) 6335 break; 6336 if (s2 != 0 && 6337 !dtrace_strcanload(s2, sz, &lim2, mstate, vstate)) 6338 break; 6339 6340 cc_r = dtrace_strncmp((char *)s1, (char *)s2, 6341 MIN(lim1, lim2)); 6342 6343 cc_n = cc_r < 0; 6344 cc_z = cc_r == 0; 6345 cc_v = cc_c = 0; 6346 break; 6347 } 6348 case DIF_OP_LDGA: 6349 regs[rd] = dtrace_dif_variable(mstate, state, 6350 r1, regs[r2]); 6351 break; 6352 case DIF_OP_LDGS: 6353 id = DIF_INSTR_VAR(instr); 6354 6355 if (id >= DIF_VAR_OTHER_UBASE) { 6356 uintptr_t a; 6357 6358 id -= DIF_VAR_OTHER_UBASE; 6359 svar = vstate->dtvs_globals[id]; 6360 ASSERT(svar != NULL); 6361 v = &svar->dtsv_var; 6362 6363 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 6364 regs[rd] = svar->dtsv_data; 6365 break; 6366 } 6367 6368 a = (uintptr_t)svar->dtsv_data; 6369 6370 if (*(uint8_t *)a == UINT8_MAX) { 6371 /* 6372 * If the 0th byte is set to UINT8_MAX 6373 * then this is to be treated as a 6374 * reference to a NULL variable. 6375 */ 6376 regs[rd] = 0; 6377 } else { 6378 regs[rd] = a + sizeof (uint64_t); 6379 } 6380 6381 break; 6382 } 6383 6384 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 6385 break; 6386 6387 case DIF_OP_STGS: 6388 id = DIF_INSTR_VAR(instr); 6389 6390 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6391 id -= DIF_VAR_OTHER_UBASE; 6392 6393 VERIFY(id < vstate->dtvs_nglobals); 6394 svar = vstate->dtvs_globals[id]; 6395 ASSERT(svar != NULL); 6396 v = &svar->dtsv_var; 6397 6398 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6399 uintptr_t a = (uintptr_t)svar->dtsv_data; 6400 size_t lim; 6401 6402 ASSERT(a != 0); 6403 ASSERT(svar->dtsv_size != 0); 6404 6405 if (regs[rd] == 0) { 6406 *(uint8_t *)a = UINT8_MAX; 6407 break; 6408 } else { 6409 *(uint8_t *)a = 0; 6410 a += sizeof (uint64_t); 6411 } 6412 if (!dtrace_vcanload( 6413 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6414 &lim, mstate, vstate)) 6415 break; 6416 6417 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6418 (void *)a, &v->dtdv_type, lim); 6419 break; 6420 } 6421 6422 svar->dtsv_data = regs[rd]; 6423 break; 6424 6425 case DIF_OP_LDTA: 6426 /* 6427 * There are no DTrace built-in thread-local arrays at 6428 * present. This opcode is saved for future work. 6429 */ 6430 *flags |= CPU_DTRACE_ILLOP; 6431 regs[rd] = 0; 6432 break; 6433 6434 case DIF_OP_LDLS: 6435 id = DIF_INSTR_VAR(instr); 6436 6437 if (id < DIF_VAR_OTHER_UBASE) { 6438 /* 6439 * For now, this has no meaning. 6440 */ 6441 regs[rd] = 0; 6442 break; 6443 } 6444 6445 id -= DIF_VAR_OTHER_UBASE; 6446 6447 ASSERT(id < vstate->dtvs_nlocals); 6448 ASSERT(vstate->dtvs_locals != NULL); 6449 6450 svar = vstate->dtvs_locals[id]; 6451 ASSERT(svar != NULL); 6452 v = &svar->dtsv_var; 6453 6454 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6455 uintptr_t a = (uintptr_t)svar->dtsv_data; 6456 size_t sz = v->dtdv_type.dtdt_size; 6457 size_t lim; 6458 6459 sz += sizeof (uint64_t); 6460 ASSERT(svar->dtsv_size == NCPU * sz); 6461 a += curcpu * sz; 6462 6463 if (*(uint8_t *)a == UINT8_MAX) { 6464 /* 6465 * If the 0th byte is set to UINT8_MAX 6466 * then this is to be treated as a 6467 * reference to a NULL variable. 6468 */ 6469 regs[rd] = 0; 6470 } else { 6471 regs[rd] = a + sizeof (uint64_t); 6472 } 6473 6474 break; 6475 } 6476 6477 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 6478 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 6479 regs[rd] = tmp[curcpu]; 6480 break; 6481 6482 case DIF_OP_STLS: 6483 id = DIF_INSTR_VAR(instr); 6484 6485 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6486 id -= DIF_VAR_OTHER_UBASE; 6487 VERIFY(id < vstate->dtvs_nlocals); 6488 6489 ASSERT(vstate->dtvs_locals != NULL); 6490 svar = vstate->dtvs_locals[id]; 6491 ASSERT(svar != NULL); 6492 v = &svar->dtsv_var; 6493 6494 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6495 uintptr_t a = (uintptr_t)svar->dtsv_data; 6496 size_t sz = v->dtdv_type.dtdt_size; 6497 size_t lim; 6498 6499 sz += sizeof (uint64_t); 6500 ASSERT(svar->dtsv_size == NCPU * sz); 6501 a += curcpu * sz; 6502 6503 if (regs[rd] == 0) { 6504 *(uint8_t *)a = UINT8_MAX; 6505 break; 6506 } else { 6507 *(uint8_t *)a = 0; 6508 a += sizeof (uint64_t); 6509 } 6510 6511 if (!dtrace_vcanload( 6512 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6513 &lim, mstate, vstate)) 6514 break; 6515 6516 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6517 (void *)a, &v->dtdv_type, lim); 6518 break; 6519 } 6520 6521 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 6522 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 6523 tmp[curcpu] = regs[rd]; 6524 break; 6525 6526 case DIF_OP_LDTS: { 6527 dtrace_dynvar_t *dvar; 6528 dtrace_key_t *key; 6529 6530 id = DIF_INSTR_VAR(instr); 6531 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6532 id -= DIF_VAR_OTHER_UBASE; 6533 v = &vstate->dtvs_tlocals[id]; 6534 6535 key = &tupregs[DIF_DTR_NREGS]; 6536 key[0].dttk_value = (uint64_t)id; 6537 key[0].dttk_size = 0; 6538 DTRACE_TLS_THRKEY(key[1].dttk_value); 6539 key[1].dttk_size = 0; 6540 6541 dvar = dtrace_dynvar(dstate, 2, key, 6542 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 6543 mstate, vstate); 6544 6545 if (dvar == NULL) { 6546 regs[rd] = 0; 6547 break; 6548 } 6549 6550 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6551 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 6552 } else { 6553 regs[rd] = *((uint64_t *)dvar->dtdv_data); 6554 } 6555 6556 break; 6557 } 6558 6559 case DIF_OP_STTS: { 6560 dtrace_dynvar_t *dvar; 6561 dtrace_key_t *key; 6562 6563 id = DIF_INSTR_VAR(instr); 6564 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6565 id -= DIF_VAR_OTHER_UBASE; 6566 VERIFY(id < vstate->dtvs_ntlocals); 6567 6568 key = &tupregs[DIF_DTR_NREGS]; 6569 key[0].dttk_value = (uint64_t)id; 6570 key[0].dttk_size = 0; 6571 DTRACE_TLS_THRKEY(key[1].dttk_value); 6572 key[1].dttk_size = 0; 6573 v = &vstate->dtvs_tlocals[id]; 6574 6575 dvar = dtrace_dynvar(dstate, 2, key, 6576 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6577 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6578 regs[rd] ? DTRACE_DYNVAR_ALLOC : 6579 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 6580 6581 /* 6582 * Given that we're storing to thread-local data, 6583 * we need to flush our predicate cache. 6584 */ 6585 curthread->t_predcache = 0; 6586 6587 if (dvar == NULL) 6588 break; 6589 6590 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6591 size_t lim; 6592 6593 if (!dtrace_vcanload( 6594 (void *)(uintptr_t)regs[rd], 6595 &v->dtdv_type, &lim, mstate, vstate)) 6596 break; 6597 6598 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6599 dvar->dtdv_data, &v->dtdv_type, lim); 6600 } else { 6601 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 6602 } 6603 6604 break; 6605 } 6606 6607 case DIF_OP_SRA: 6608 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 6609 break; 6610 6611 case DIF_OP_CALL: 6612 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 6613 regs, tupregs, ttop, mstate, state); 6614 break; 6615 6616 case DIF_OP_PUSHTR: 6617 if (ttop == DIF_DTR_NREGS) { 6618 *flags |= CPU_DTRACE_TUPOFLOW; 6619 break; 6620 } 6621 6622 if (r1 == DIF_TYPE_STRING) { 6623 /* 6624 * If this is a string type and the size is 0, 6625 * we'll use the system-wide default string 6626 * size. Note that we are _not_ looking at 6627 * the value of the DTRACEOPT_STRSIZE option; 6628 * had this been set, we would expect to have 6629 * a non-zero size value in the "pushtr". 6630 */ 6631 tupregs[ttop].dttk_size = 6632 dtrace_strlen((char *)(uintptr_t)regs[rd], 6633 regs[r2] ? regs[r2] : 6634 dtrace_strsize_default) + 1; 6635 } else { 6636 if (regs[r2] > LONG_MAX) { 6637 *flags |= CPU_DTRACE_ILLOP; 6638 break; 6639 } 6640 6641 tupregs[ttop].dttk_size = regs[r2]; 6642 } 6643 6644 tupregs[ttop++].dttk_value = regs[rd]; 6645 break; 6646 6647 case DIF_OP_PUSHTV: 6648 if (ttop == DIF_DTR_NREGS) { 6649 *flags |= CPU_DTRACE_TUPOFLOW; 6650 break; 6651 } 6652 6653 tupregs[ttop].dttk_value = regs[rd]; 6654 tupregs[ttop++].dttk_size = 0; 6655 break; 6656 6657 case DIF_OP_POPTS: 6658 if (ttop != 0) 6659 ttop--; 6660 break; 6661 6662 case DIF_OP_FLUSHTS: 6663 ttop = 0; 6664 break; 6665 6666 case DIF_OP_LDGAA: 6667 case DIF_OP_LDTAA: { 6668 dtrace_dynvar_t *dvar; 6669 dtrace_key_t *key = tupregs; 6670 uint_t nkeys = ttop; 6671 6672 id = DIF_INSTR_VAR(instr); 6673 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6674 id -= DIF_VAR_OTHER_UBASE; 6675 6676 key[nkeys].dttk_value = (uint64_t)id; 6677 key[nkeys++].dttk_size = 0; 6678 6679 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 6680 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 6681 key[nkeys++].dttk_size = 0; 6682 VERIFY(id < vstate->dtvs_ntlocals); 6683 v = &vstate->dtvs_tlocals[id]; 6684 } else { 6685 VERIFY(id < vstate->dtvs_nglobals); 6686 v = &vstate->dtvs_globals[id]->dtsv_var; 6687 } 6688 6689 dvar = dtrace_dynvar(dstate, nkeys, key, 6690 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6691 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6692 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 6693 6694 if (dvar == NULL) { 6695 regs[rd] = 0; 6696 break; 6697 } 6698 6699 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6700 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 6701 } else { 6702 regs[rd] = *((uint64_t *)dvar->dtdv_data); 6703 } 6704 6705 break; 6706 } 6707 6708 case DIF_OP_STGAA: 6709 case DIF_OP_STTAA: { 6710 dtrace_dynvar_t *dvar; 6711 dtrace_key_t *key = tupregs; 6712 uint_t nkeys = ttop; 6713 6714 id = DIF_INSTR_VAR(instr); 6715 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6716 id -= DIF_VAR_OTHER_UBASE; 6717 6718 key[nkeys].dttk_value = (uint64_t)id; 6719 key[nkeys++].dttk_size = 0; 6720 6721 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 6722 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 6723 key[nkeys++].dttk_size = 0; 6724 VERIFY(id < vstate->dtvs_ntlocals); 6725 v = &vstate->dtvs_tlocals[id]; 6726 } else { 6727 VERIFY(id < vstate->dtvs_nglobals); 6728 v = &vstate->dtvs_globals[id]->dtsv_var; 6729 } 6730 6731 dvar = dtrace_dynvar(dstate, nkeys, key, 6732 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6733 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6734 regs[rd] ? DTRACE_DYNVAR_ALLOC : 6735 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 6736 6737 if (dvar == NULL) 6738 break; 6739 6740 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6741 size_t lim; 6742 6743 if (!dtrace_vcanload( 6744 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6745 &lim, mstate, vstate)) 6746 break; 6747 6748 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6749 dvar->dtdv_data, &v->dtdv_type, lim); 6750 } else { 6751 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 6752 } 6753 6754 break; 6755 } 6756 6757 case DIF_OP_ALLOCS: { 6758 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 6759 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 6760 6761 /* 6762 * Rounding up the user allocation size could have 6763 * overflowed large, bogus allocations (like -1ULL) to 6764 * 0. 6765 */ 6766 if (size < regs[r1] || 6767 !DTRACE_INSCRATCH(mstate, size)) { 6768 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 6769 regs[rd] = 0; 6770 break; 6771 } 6772 6773 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 6774 mstate->dtms_scratch_ptr += size; 6775 regs[rd] = ptr; 6776 break; 6777 } 6778 6779 case DIF_OP_COPYS: 6780 if (!dtrace_canstore(regs[rd], regs[r2], 6781 mstate, vstate)) { 6782 *flags |= CPU_DTRACE_BADADDR; 6783 *illval = regs[rd]; 6784 break; 6785 } 6786 6787 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 6788 break; 6789 6790 dtrace_bcopy((void *)(uintptr_t)regs[r1], 6791 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 6792 break; 6793 6794 case DIF_OP_STB: 6795 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 6796 *flags |= CPU_DTRACE_BADADDR; 6797 *illval = regs[rd]; 6798 break; 6799 } 6800 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 6801 break; 6802 6803 case DIF_OP_STH: 6804 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 6805 *flags |= CPU_DTRACE_BADADDR; 6806 *illval = regs[rd]; 6807 break; 6808 } 6809 if (regs[rd] & 1) { 6810 *flags |= CPU_DTRACE_BADALIGN; 6811 *illval = regs[rd]; 6812 break; 6813 } 6814 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 6815 break; 6816 6817 case DIF_OP_STW: 6818 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 6819 *flags |= CPU_DTRACE_BADADDR; 6820 *illval = regs[rd]; 6821 break; 6822 } 6823 if (regs[rd] & 3) { 6824 *flags |= CPU_DTRACE_BADALIGN; 6825 *illval = regs[rd]; 6826 break; 6827 } 6828 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 6829 break; 6830 6831 case DIF_OP_STX: 6832 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 6833 *flags |= CPU_DTRACE_BADADDR; 6834 *illval = regs[rd]; 6835 break; 6836 } 6837 if (regs[rd] & 7) { 6838 *flags |= CPU_DTRACE_BADALIGN; 6839 *illval = regs[rd]; 6840 break; 6841 } 6842 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 6843 break; 6844 } 6845 } 6846 6847 if (!(*flags & CPU_DTRACE_FAULT)) 6848 return (rval); 6849 6850 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 6851 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 6852 6853 return (0); 6854 } 6855 6856 static void 6857 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 6858 { 6859 dtrace_probe_t *probe = ecb->dte_probe; 6860 dtrace_provider_t *prov = probe->dtpr_provider; 6861 char c[DTRACE_FULLNAMELEN + 80], *str; 6862 char *msg = "dtrace: breakpoint action at probe "; 6863 char *ecbmsg = " (ecb "; 6864 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 6865 uintptr_t val = (uintptr_t)ecb; 6866 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 6867 6868 if (dtrace_destructive_disallow) 6869 return; 6870 6871 /* 6872 * It's impossible to be taking action on the NULL probe. 6873 */ 6874 ASSERT(probe != NULL); 6875 6876 /* 6877 * This is a poor man's (destitute man's?) sprintf(): we want to 6878 * print the provider name, module name, function name and name of 6879 * the probe, along with the hex address of the ECB with the breakpoint 6880 * action -- all of which we must place in the character buffer by 6881 * hand. 6882 */ 6883 while (*msg != '\0') 6884 c[i++] = *msg++; 6885 6886 for (str = prov->dtpv_name; *str != '\0'; str++) 6887 c[i++] = *str; 6888 c[i++] = ':'; 6889 6890 for (str = probe->dtpr_mod; *str != '\0'; str++) 6891 c[i++] = *str; 6892 c[i++] = ':'; 6893 6894 for (str = probe->dtpr_func; *str != '\0'; str++) 6895 c[i++] = *str; 6896 c[i++] = ':'; 6897 6898 for (str = probe->dtpr_name; *str != '\0'; str++) 6899 c[i++] = *str; 6900 6901 while (*ecbmsg != '\0') 6902 c[i++] = *ecbmsg++; 6903 6904 while (shift >= 0) { 6905 mask = (uintptr_t)0xf << shift; 6906 6907 if (val >= ((uintptr_t)1 << shift)) 6908 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 6909 shift -= 4; 6910 } 6911 6912 c[i++] = ')'; 6913 c[i] = '\0'; 6914 6915 #ifdef illumos 6916 debug_enter(c); 6917 #else 6918 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 6919 #endif 6920 } 6921 6922 static void 6923 dtrace_action_panic(dtrace_ecb_t *ecb) 6924 { 6925 dtrace_probe_t *probe = ecb->dte_probe; 6926 6927 /* 6928 * It's impossible to be taking action on the NULL probe. 6929 */ 6930 ASSERT(probe != NULL); 6931 6932 if (dtrace_destructive_disallow) 6933 return; 6934 6935 if (dtrace_panicked != NULL) 6936 return; 6937 6938 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 6939 return; 6940 6941 /* 6942 * We won the right to panic. (We want to be sure that only one 6943 * thread calls panic() from dtrace_probe(), and that panic() is 6944 * called exactly once.) 6945 */ 6946 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 6947 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 6948 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 6949 } 6950 6951 static void 6952 dtrace_action_raise(uint64_t sig) 6953 { 6954 if (dtrace_destructive_disallow) 6955 return; 6956 6957 if (sig >= NSIG) { 6958 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 6959 return; 6960 } 6961 6962 #ifdef illumos 6963 /* 6964 * raise() has a queue depth of 1 -- we ignore all subsequent 6965 * invocations of the raise() action. 6966 */ 6967 if (curthread->t_dtrace_sig == 0) 6968 curthread->t_dtrace_sig = (uint8_t)sig; 6969 6970 curthread->t_sig_check = 1; 6971 aston(curthread); 6972 #else 6973 struct proc *p = curproc; 6974 PROC_LOCK(p); 6975 kern_psignal(p, sig); 6976 PROC_UNLOCK(p); 6977 #endif 6978 } 6979 6980 static void 6981 dtrace_action_stop(void) 6982 { 6983 if (dtrace_destructive_disallow) 6984 return; 6985 6986 #ifdef illumos 6987 if (!curthread->t_dtrace_stop) { 6988 curthread->t_dtrace_stop = 1; 6989 curthread->t_sig_check = 1; 6990 aston(curthread); 6991 } 6992 #else 6993 struct proc *p = curproc; 6994 PROC_LOCK(p); 6995 kern_psignal(p, SIGSTOP); 6996 PROC_UNLOCK(p); 6997 #endif 6998 } 6999 7000 static void 7001 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 7002 { 7003 hrtime_t now; 7004 volatile uint16_t *flags; 7005 #ifdef illumos 7006 cpu_t *cpu = CPU; 7007 #else 7008 cpu_t *cpu = &solaris_cpu[curcpu]; 7009 #endif 7010 7011 if (dtrace_destructive_disallow) 7012 return; 7013 7014 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 7015 7016 now = dtrace_gethrtime(); 7017 7018 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 7019 /* 7020 * We need to advance the mark to the current time. 7021 */ 7022 cpu->cpu_dtrace_chillmark = now; 7023 cpu->cpu_dtrace_chilled = 0; 7024 } 7025 7026 /* 7027 * Now check to see if the requested chill time would take us over 7028 * the maximum amount of time allowed in the chill interval. (Or 7029 * worse, if the calculation itself induces overflow.) 7030 */ 7031 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 7032 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 7033 *flags |= CPU_DTRACE_ILLOP; 7034 return; 7035 } 7036 7037 while (dtrace_gethrtime() - now < val) 7038 continue; 7039 7040 /* 7041 * Normally, we assure that the value of the variable "timestamp" does 7042 * not change within an ECB. The presence of chill() represents an 7043 * exception to this rule, however. 7044 */ 7045 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 7046 cpu->cpu_dtrace_chilled += val; 7047 } 7048 7049 static void 7050 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 7051 uint64_t *buf, uint64_t arg) 7052 { 7053 int nframes = DTRACE_USTACK_NFRAMES(arg); 7054 int strsize = DTRACE_USTACK_STRSIZE(arg); 7055 uint64_t *pcs = &buf[1], *fps; 7056 char *str = (char *)&pcs[nframes]; 7057 int size, offs = 0, i, j; 7058 size_t rem; 7059 uintptr_t old = mstate->dtms_scratch_ptr, saved; 7060 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 7061 char *sym; 7062 7063 /* 7064 * Should be taking a faster path if string space has not been 7065 * allocated. 7066 */ 7067 ASSERT(strsize != 0); 7068 7069 /* 7070 * We will first allocate some temporary space for the frame pointers. 7071 */ 7072 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 7073 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 7074 (nframes * sizeof (uint64_t)); 7075 7076 if (!DTRACE_INSCRATCH(mstate, size)) { 7077 /* 7078 * Not enough room for our frame pointers -- need to indicate 7079 * that we ran out of scratch space. 7080 */ 7081 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 7082 return; 7083 } 7084 7085 mstate->dtms_scratch_ptr += size; 7086 saved = mstate->dtms_scratch_ptr; 7087 7088 /* 7089 * Now get a stack with both program counters and frame pointers. 7090 */ 7091 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7092 dtrace_getufpstack(buf, fps, nframes + 1); 7093 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7094 7095 /* 7096 * If that faulted, we're cooked. 7097 */ 7098 if (*flags & CPU_DTRACE_FAULT) 7099 goto out; 7100 7101 /* 7102 * Now we want to walk up the stack, calling the USTACK helper. For 7103 * each iteration, we restore the scratch pointer. 7104 */ 7105 for (i = 0; i < nframes; i++) { 7106 mstate->dtms_scratch_ptr = saved; 7107 7108 if (offs >= strsize) 7109 break; 7110 7111 sym = (char *)(uintptr_t)dtrace_helper( 7112 DTRACE_HELPER_ACTION_USTACK, 7113 mstate, state, pcs[i], fps[i]); 7114 7115 /* 7116 * If we faulted while running the helper, we're going to 7117 * clear the fault and null out the corresponding string. 7118 */ 7119 if (*flags & CPU_DTRACE_FAULT) { 7120 *flags &= ~CPU_DTRACE_FAULT; 7121 str[offs++] = '\0'; 7122 continue; 7123 } 7124 7125 if (sym == NULL) { 7126 str[offs++] = '\0'; 7127 continue; 7128 } 7129 7130 if (!dtrace_strcanload((uintptr_t)sym, strsize, &rem, mstate, 7131 &(state->dts_vstate))) { 7132 str[offs++] = '\0'; 7133 continue; 7134 } 7135 7136 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7137 7138 /* 7139 * Now copy in the string that the helper returned to us. 7140 */ 7141 for (j = 0; offs + j < strsize && j < rem; j++) { 7142 if ((str[offs + j] = sym[j]) == '\0') 7143 break; 7144 } 7145 7146 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7147 7148 offs += j + 1; 7149 } 7150 7151 if (offs >= strsize) { 7152 /* 7153 * If we didn't have room for all of the strings, we don't 7154 * abort processing -- this needn't be a fatal error -- but we 7155 * still want to increment a counter (dts_stkstroverflows) to 7156 * allow this condition to be warned about. (If this is from 7157 * a jstack() action, it is easily tuned via jstackstrsize.) 7158 */ 7159 dtrace_error(&state->dts_stkstroverflows); 7160 } 7161 7162 while (offs < strsize) 7163 str[offs++] = '\0'; 7164 7165 out: 7166 mstate->dtms_scratch_ptr = old; 7167 } 7168 7169 static void 7170 dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size, 7171 size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind) 7172 { 7173 volatile uint16_t *flags; 7174 uint64_t val = *valp; 7175 size_t valoffs = *valoffsp; 7176 7177 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 7178 ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF); 7179 7180 /* 7181 * If this is a string, we're going to only load until we find the zero 7182 * byte -- after which we'll store zero bytes. 7183 */ 7184 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 7185 char c = '\0' + 1; 7186 size_t s; 7187 7188 for (s = 0; s < size; s++) { 7189 if (c != '\0' && dtkind == DIF_TF_BYREF) { 7190 c = dtrace_load8(val++); 7191 } else if (c != '\0' && dtkind == DIF_TF_BYUREF) { 7192 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7193 c = dtrace_fuword8((void *)(uintptr_t)val++); 7194 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7195 if (*flags & CPU_DTRACE_FAULT) 7196 break; 7197 } 7198 7199 DTRACE_STORE(uint8_t, tomax, valoffs++, c); 7200 7201 if (c == '\0' && intuple) 7202 break; 7203 } 7204 } else { 7205 uint8_t c; 7206 while (valoffs < end) { 7207 if (dtkind == DIF_TF_BYREF) { 7208 c = dtrace_load8(val++); 7209 } else if (dtkind == DIF_TF_BYUREF) { 7210 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7211 c = dtrace_fuword8((void *)(uintptr_t)val++); 7212 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7213 if (*flags & CPU_DTRACE_FAULT) 7214 break; 7215 } 7216 7217 DTRACE_STORE(uint8_t, tomax, 7218 valoffs++, c); 7219 } 7220 } 7221 7222 *valp = val; 7223 *valoffsp = valoffs; 7224 } 7225 7226 /* 7227 * If you're looking for the epicenter of DTrace, you just found it. This 7228 * is the function called by the provider to fire a probe -- from which all 7229 * subsequent probe-context DTrace activity emanates. 7230 */ 7231 void 7232 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 7233 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 7234 { 7235 processorid_t cpuid; 7236 dtrace_icookie_t cookie; 7237 dtrace_probe_t *probe; 7238 dtrace_mstate_t mstate; 7239 dtrace_ecb_t *ecb; 7240 dtrace_action_t *act; 7241 intptr_t offs; 7242 size_t size; 7243 int vtime, onintr; 7244 volatile uint16_t *flags; 7245 hrtime_t now; 7246 7247 if (panicstr != NULL) 7248 return; 7249 7250 #ifdef illumos 7251 /* 7252 * Kick out immediately if this CPU is still being born (in which case 7253 * curthread will be set to -1) or the current thread can't allow 7254 * probes in its current context. 7255 */ 7256 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 7257 return; 7258 #endif 7259 7260 cookie = dtrace_interrupt_disable(); 7261 probe = dtrace_probes[id - 1]; 7262 cpuid = curcpu; 7263 onintr = CPU_ON_INTR(CPU); 7264 7265 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 7266 probe->dtpr_predcache == curthread->t_predcache) { 7267 /* 7268 * We have hit in the predicate cache; we know that 7269 * this predicate would evaluate to be false. 7270 */ 7271 dtrace_interrupt_enable(cookie); 7272 return; 7273 } 7274 7275 #ifdef illumos 7276 if (panic_quiesce) { 7277 #else 7278 if (panicstr != NULL) { 7279 #endif 7280 /* 7281 * We don't trace anything if we're panicking. 7282 */ 7283 dtrace_interrupt_enable(cookie); 7284 return; 7285 } 7286 7287 now = mstate.dtms_timestamp = dtrace_gethrtime(); 7288 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP; 7289 vtime = dtrace_vtime_references != 0; 7290 7291 if (vtime && curthread->t_dtrace_start) 7292 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 7293 7294 mstate.dtms_difo = NULL; 7295 mstate.dtms_probe = probe; 7296 mstate.dtms_strtok = 0; 7297 mstate.dtms_arg[0] = arg0; 7298 mstate.dtms_arg[1] = arg1; 7299 mstate.dtms_arg[2] = arg2; 7300 mstate.dtms_arg[3] = arg3; 7301 mstate.dtms_arg[4] = arg4; 7302 7303 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 7304 7305 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 7306 dtrace_predicate_t *pred = ecb->dte_predicate; 7307 dtrace_state_t *state = ecb->dte_state; 7308 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 7309 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 7310 dtrace_vstate_t *vstate = &state->dts_vstate; 7311 dtrace_provider_t *prov = probe->dtpr_provider; 7312 uint64_t tracememsize = 0; 7313 int committed = 0; 7314 caddr_t tomax; 7315 7316 /* 7317 * A little subtlety with the following (seemingly innocuous) 7318 * declaration of the automatic 'val': by looking at the 7319 * code, you might think that it could be declared in the 7320 * action processing loop, below. (That is, it's only used in 7321 * the action processing loop.) However, it must be declared 7322 * out of that scope because in the case of DIF expression 7323 * arguments to aggregating actions, one iteration of the 7324 * action loop will use the last iteration's value. 7325 */ 7326 uint64_t val = 0; 7327 7328 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 7329 mstate.dtms_getf = NULL; 7330 7331 *flags &= ~CPU_DTRACE_ERROR; 7332 7333 if (prov == dtrace_provider) { 7334 /* 7335 * If dtrace itself is the provider of this probe, 7336 * we're only going to continue processing the ECB if 7337 * arg0 (the dtrace_state_t) is equal to the ECB's 7338 * creating state. (This prevents disjoint consumers 7339 * from seeing one another's metaprobes.) 7340 */ 7341 if (arg0 != (uint64_t)(uintptr_t)state) 7342 continue; 7343 } 7344 7345 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 7346 /* 7347 * We're not currently active. If our provider isn't 7348 * the dtrace pseudo provider, we're not interested. 7349 */ 7350 if (prov != dtrace_provider) 7351 continue; 7352 7353 /* 7354 * Now we must further check if we are in the BEGIN 7355 * probe. If we are, we will only continue processing 7356 * if we're still in WARMUP -- if one BEGIN enabling 7357 * has invoked the exit() action, we don't want to 7358 * evaluate subsequent BEGIN enablings. 7359 */ 7360 if (probe->dtpr_id == dtrace_probeid_begin && 7361 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 7362 ASSERT(state->dts_activity == 7363 DTRACE_ACTIVITY_DRAINING); 7364 continue; 7365 } 7366 } 7367 7368 if (ecb->dte_cond) { 7369 /* 7370 * If the dte_cond bits indicate that this 7371 * consumer is only allowed to see user-mode firings 7372 * of this probe, call the provider's dtps_usermode() 7373 * entry point to check that the probe was fired 7374 * while in a user context. Skip this ECB if that's 7375 * not the case. 7376 */ 7377 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 7378 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 7379 probe->dtpr_id, probe->dtpr_arg) == 0) 7380 continue; 7381 7382 #ifdef illumos 7383 /* 7384 * This is more subtle than it looks. We have to be 7385 * absolutely certain that CRED() isn't going to 7386 * change out from under us so it's only legit to 7387 * examine that structure if we're in constrained 7388 * situations. Currently, the only times we'll this 7389 * check is if a non-super-user has enabled the 7390 * profile or syscall providers -- providers that 7391 * allow visibility of all processes. For the 7392 * profile case, the check above will ensure that 7393 * we're examining a user context. 7394 */ 7395 if (ecb->dte_cond & DTRACE_COND_OWNER) { 7396 cred_t *cr; 7397 cred_t *s_cr = 7398 ecb->dte_state->dts_cred.dcr_cred; 7399 proc_t *proc; 7400 7401 ASSERT(s_cr != NULL); 7402 7403 if ((cr = CRED()) == NULL || 7404 s_cr->cr_uid != cr->cr_uid || 7405 s_cr->cr_uid != cr->cr_ruid || 7406 s_cr->cr_uid != cr->cr_suid || 7407 s_cr->cr_gid != cr->cr_gid || 7408 s_cr->cr_gid != cr->cr_rgid || 7409 s_cr->cr_gid != cr->cr_sgid || 7410 (proc = ttoproc(curthread)) == NULL || 7411 (proc->p_flag & SNOCD)) 7412 continue; 7413 } 7414 7415 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 7416 cred_t *cr; 7417 cred_t *s_cr = 7418 ecb->dte_state->dts_cred.dcr_cred; 7419 7420 ASSERT(s_cr != NULL); 7421 7422 if ((cr = CRED()) == NULL || 7423 s_cr->cr_zone->zone_id != 7424 cr->cr_zone->zone_id) 7425 continue; 7426 } 7427 #endif 7428 } 7429 7430 if (now - state->dts_alive > dtrace_deadman_timeout) { 7431 /* 7432 * We seem to be dead. Unless we (a) have kernel 7433 * destructive permissions (b) have explicitly enabled 7434 * destructive actions and (c) destructive actions have 7435 * not been disabled, we're going to transition into 7436 * the KILLED state, from which no further processing 7437 * on this state will be performed. 7438 */ 7439 if (!dtrace_priv_kernel_destructive(state) || 7440 !state->dts_cred.dcr_destructive || 7441 dtrace_destructive_disallow) { 7442 void *activity = &state->dts_activity; 7443 dtrace_activity_t current; 7444 7445 do { 7446 current = state->dts_activity; 7447 } while (dtrace_cas32(activity, current, 7448 DTRACE_ACTIVITY_KILLED) != current); 7449 7450 continue; 7451 } 7452 } 7453 7454 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 7455 ecb->dte_alignment, state, &mstate)) < 0) 7456 continue; 7457 7458 tomax = buf->dtb_tomax; 7459 ASSERT(tomax != NULL); 7460 7461 if (ecb->dte_size != 0) { 7462 dtrace_rechdr_t dtrh; 7463 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 7464 mstate.dtms_timestamp = dtrace_gethrtime(); 7465 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP; 7466 } 7467 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t)); 7468 dtrh.dtrh_epid = ecb->dte_epid; 7469 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh, 7470 mstate.dtms_timestamp); 7471 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh; 7472 } 7473 7474 mstate.dtms_epid = ecb->dte_epid; 7475 mstate.dtms_present |= DTRACE_MSTATE_EPID; 7476 7477 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 7478 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 7479 else 7480 mstate.dtms_access = 0; 7481 7482 if (pred != NULL) { 7483 dtrace_difo_t *dp = pred->dtp_difo; 7484 uint64_t rval; 7485 7486 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 7487 7488 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 7489 dtrace_cacheid_t cid = probe->dtpr_predcache; 7490 7491 if (cid != DTRACE_CACHEIDNONE && !onintr) { 7492 /* 7493 * Update the predicate cache... 7494 */ 7495 ASSERT(cid == pred->dtp_cacheid); 7496 curthread->t_predcache = cid; 7497 } 7498 7499 continue; 7500 } 7501 } 7502 7503 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 7504 act != NULL; act = act->dta_next) { 7505 size_t valoffs; 7506 dtrace_difo_t *dp; 7507 dtrace_recdesc_t *rec = &act->dta_rec; 7508 7509 size = rec->dtrd_size; 7510 valoffs = offs + rec->dtrd_offset; 7511 7512 if (DTRACEACT_ISAGG(act->dta_kind)) { 7513 uint64_t v = 0xbad; 7514 dtrace_aggregation_t *agg; 7515 7516 agg = (dtrace_aggregation_t *)act; 7517 7518 if ((dp = act->dta_difo) != NULL) 7519 v = dtrace_dif_emulate(dp, 7520 &mstate, vstate, state); 7521 7522 if (*flags & CPU_DTRACE_ERROR) 7523 continue; 7524 7525 /* 7526 * Note that we always pass the expression 7527 * value from the previous iteration of the 7528 * action loop. This value will only be used 7529 * if there is an expression argument to the 7530 * aggregating action, denoted by the 7531 * dtag_hasarg field. 7532 */ 7533 dtrace_aggregate(agg, buf, 7534 offs, aggbuf, v, val); 7535 continue; 7536 } 7537 7538 switch (act->dta_kind) { 7539 case DTRACEACT_STOP: 7540 if (dtrace_priv_proc_destructive(state)) 7541 dtrace_action_stop(); 7542 continue; 7543 7544 case DTRACEACT_BREAKPOINT: 7545 if (dtrace_priv_kernel_destructive(state)) 7546 dtrace_action_breakpoint(ecb); 7547 continue; 7548 7549 case DTRACEACT_PANIC: 7550 if (dtrace_priv_kernel_destructive(state)) 7551 dtrace_action_panic(ecb); 7552 continue; 7553 7554 case DTRACEACT_STACK: 7555 if (!dtrace_priv_kernel(state)) 7556 continue; 7557 7558 dtrace_getpcstack((pc_t *)(tomax + valoffs), 7559 size / sizeof (pc_t), probe->dtpr_aframes, 7560 DTRACE_ANCHORED(probe) ? NULL : 7561 (uint32_t *)arg0); 7562 continue; 7563 7564 case DTRACEACT_JSTACK: 7565 case DTRACEACT_USTACK: 7566 if (!dtrace_priv_proc(state)) 7567 continue; 7568 7569 /* 7570 * See comment in DIF_VAR_PID. 7571 */ 7572 if (DTRACE_ANCHORED(mstate.dtms_probe) && 7573 CPU_ON_INTR(CPU)) { 7574 int depth = DTRACE_USTACK_NFRAMES( 7575 rec->dtrd_arg) + 1; 7576 7577 dtrace_bzero((void *)(tomax + valoffs), 7578 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 7579 + depth * sizeof (uint64_t)); 7580 7581 continue; 7582 } 7583 7584 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 7585 curproc->p_dtrace_helpers != NULL) { 7586 /* 7587 * This is the slow path -- we have 7588 * allocated string space, and we're 7589 * getting the stack of a process that 7590 * has helpers. Call into a separate 7591 * routine to perform this processing. 7592 */ 7593 dtrace_action_ustack(&mstate, state, 7594 (uint64_t *)(tomax + valoffs), 7595 rec->dtrd_arg); 7596 continue; 7597 } 7598 7599 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7600 dtrace_getupcstack((uint64_t *) 7601 (tomax + valoffs), 7602 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 7603 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7604 continue; 7605 7606 default: 7607 break; 7608 } 7609 7610 dp = act->dta_difo; 7611 ASSERT(dp != NULL); 7612 7613 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 7614 7615 if (*flags & CPU_DTRACE_ERROR) 7616 continue; 7617 7618 switch (act->dta_kind) { 7619 case DTRACEACT_SPECULATE: { 7620 dtrace_rechdr_t *dtrh; 7621 7622 ASSERT(buf == &state->dts_buffer[cpuid]); 7623 buf = dtrace_speculation_buffer(state, 7624 cpuid, val); 7625 7626 if (buf == NULL) { 7627 *flags |= CPU_DTRACE_DROP; 7628 continue; 7629 } 7630 7631 offs = dtrace_buffer_reserve(buf, 7632 ecb->dte_needed, ecb->dte_alignment, 7633 state, NULL); 7634 7635 if (offs < 0) { 7636 *flags |= CPU_DTRACE_DROP; 7637 continue; 7638 } 7639 7640 tomax = buf->dtb_tomax; 7641 ASSERT(tomax != NULL); 7642 7643 if (ecb->dte_size == 0) 7644 continue; 7645 7646 ASSERT3U(ecb->dte_size, >=, 7647 sizeof (dtrace_rechdr_t)); 7648 dtrh = ((void *)(tomax + offs)); 7649 dtrh->dtrh_epid = ecb->dte_epid; 7650 /* 7651 * When the speculation is committed, all of 7652 * the records in the speculative buffer will 7653 * have their timestamps set to the commit 7654 * time. Until then, it is set to a sentinel 7655 * value, for debugability. 7656 */ 7657 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX); 7658 continue; 7659 } 7660 7661 case DTRACEACT_PRINTM: { 7662 /* The DIF returns a 'memref'. */ 7663 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 7664 7665 /* Get the size from the memref. */ 7666 size = memref[1]; 7667 7668 /* 7669 * Check if the size exceeds the allocated 7670 * buffer size. 7671 */ 7672 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 7673 /* Flag a drop! */ 7674 *flags |= CPU_DTRACE_DROP; 7675 continue; 7676 } 7677 7678 /* Store the size in the buffer first. */ 7679 DTRACE_STORE(uintptr_t, tomax, 7680 valoffs, size); 7681 7682 /* 7683 * Offset the buffer address to the start 7684 * of the data. 7685 */ 7686 valoffs += sizeof(uintptr_t); 7687 7688 /* 7689 * Reset to the memory address rather than 7690 * the memref array, then let the BYREF 7691 * code below do the work to store the 7692 * memory data in the buffer. 7693 */ 7694 val = memref[0]; 7695 break; 7696 } 7697 7698 case DTRACEACT_CHILL: 7699 if (dtrace_priv_kernel_destructive(state)) 7700 dtrace_action_chill(&mstate, val); 7701 continue; 7702 7703 case DTRACEACT_RAISE: 7704 if (dtrace_priv_proc_destructive(state)) 7705 dtrace_action_raise(val); 7706 continue; 7707 7708 case DTRACEACT_COMMIT: 7709 ASSERT(!committed); 7710 7711 /* 7712 * We need to commit our buffer state. 7713 */ 7714 if (ecb->dte_size) 7715 buf->dtb_offset = offs + ecb->dte_size; 7716 buf = &state->dts_buffer[cpuid]; 7717 dtrace_speculation_commit(state, cpuid, val); 7718 committed = 1; 7719 continue; 7720 7721 case DTRACEACT_DISCARD: 7722 dtrace_speculation_discard(state, cpuid, val); 7723 continue; 7724 7725 case DTRACEACT_DIFEXPR: 7726 case DTRACEACT_LIBACT: 7727 case DTRACEACT_PRINTF: 7728 case DTRACEACT_PRINTA: 7729 case DTRACEACT_SYSTEM: 7730 case DTRACEACT_FREOPEN: 7731 case DTRACEACT_TRACEMEM: 7732 break; 7733 7734 case DTRACEACT_TRACEMEM_DYNSIZE: 7735 tracememsize = val; 7736 break; 7737 7738 case DTRACEACT_SYM: 7739 case DTRACEACT_MOD: 7740 if (!dtrace_priv_kernel(state)) 7741 continue; 7742 break; 7743 7744 case DTRACEACT_USYM: 7745 case DTRACEACT_UMOD: 7746 case DTRACEACT_UADDR: { 7747 #ifdef illumos 7748 struct pid *pid = curthread->t_procp->p_pidp; 7749 #endif 7750 7751 if (!dtrace_priv_proc(state)) 7752 continue; 7753 7754 DTRACE_STORE(uint64_t, tomax, 7755 #ifdef illumos 7756 valoffs, (uint64_t)pid->pid_id); 7757 #else 7758 valoffs, (uint64_t) curproc->p_pid); 7759 #endif 7760 DTRACE_STORE(uint64_t, tomax, 7761 valoffs + sizeof (uint64_t), val); 7762 7763 continue; 7764 } 7765 7766 case DTRACEACT_EXIT: { 7767 /* 7768 * For the exit action, we are going to attempt 7769 * to atomically set our activity to be 7770 * draining. If this fails (either because 7771 * another CPU has beat us to the exit action, 7772 * or because our current activity is something 7773 * other than ACTIVE or WARMUP), we will 7774 * continue. This assures that the exit action 7775 * can be successfully recorded at most once 7776 * when we're in the ACTIVE state. If we're 7777 * encountering the exit() action while in 7778 * COOLDOWN, however, we want to honor the new 7779 * status code. (We know that we're the only 7780 * thread in COOLDOWN, so there is no race.) 7781 */ 7782 void *activity = &state->dts_activity; 7783 dtrace_activity_t current = state->dts_activity; 7784 7785 if (current == DTRACE_ACTIVITY_COOLDOWN) 7786 break; 7787 7788 if (current != DTRACE_ACTIVITY_WARMUP) 7789 current = DTRACE_ACTIVITY_ACTIVE; 7790 7791 if (dtrace_cas32(activity, current, 7792 DTRACE_ACTIVITY_DRAINING) != current) { 7793 *flags |= CPU_DTRACE_DROP; 7794 continue; 7795 } 7796 7797 break; 7798 } 7799 7800 default: 7801 ASSERT(0); 7802 } 7803 7804 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF || 7805 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYUREF) { 7806 uintptr_t end = valoffs + size; 7807 7808 if (tracememsize != 0 && 7809 valoffs + tracememsize < end) { 7810 end = valoffs + tracememsize; 7811 tracememsize = 0; 7812 } 7813 7814 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF && 7815 !dtrace_vcanload((void *)(uintptr_t)val, 7816 &dp->dtdo_rtype, NULL, &mstate, vstate)) 7817 continue; 7818 7819 dtrace_store_by_ref(dp, tomax, size, &valoffs, 7820 &val, end, act->dta_intuple, 7821 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ? 7822 DIF_TF_BYREF: DIF_TF_BYUREF); 7823 continue; 7824 } 7825 7826 switch (size) { 7827 case 0: 7828 break; 7829 7830 case sizeof (uint8_t): 7831 DTRACE_STORE(uint8_t, tomax, valoffs, val); 7832 break; 7833 case sizeof (uint16_t): 7834 DTRACE_STORE(uint16_t, tomax, valoffs, val); 7835 break; 7836 case sizeof (uint32_t): 7837 DTRACE_STORE(uint32_t, tomax, valoffs, val); 7838 break; 7839 case sizeof (uint64_t): 7840 DTRACE_STORE(uint64_t, tomax, valoffs, val); 7841 break; 7842 default: 7843 /* 7844 * Any other size should have been returned by 7845 * reference, not by value. 7846 */ 7847 ASSERT(0); 7848 break; 7849 } 7850 } 7851 7852 if (*flags & CPU_DTRACE_DROP) 7853 continue; 7854 7855 if (*flags & CPU_DTRACE_FAULT) { 7856 int ndx; 7857 dtrace_action_t *err; 7858 7859 buf->dtb_errors++; 7860 7861 if (probe->dtpr_id == dtrace_probeid_error) { 7862 /* 7863 * There's nothing we can do -- we had an 7864 * error on the error probe. We bump an 7865 * error counter to at least indicate that 7866 * this condition happened. 7867 */ 7868 dtrace_error(&state->dts_dblerrors); 7869 continue; 7870 } 7871 7872 if (vtime) { 7873 /* 7874 * Before recursing on dtrace_probe(), we 7875 * need to explicitly clear out our start 7876 * time to prevent it from being accumulated 7877 * into t_dtrace_vtime. 7878 */ 7879 curthread->t_dtrace_start = 0; 7880 } 7881 7882 /* 7883 * Iterate over the actions to figure out which action 7884 * we were processing when we experienced the error. 7885 * Note that act points _past_ the faulting action; if 7886 * act is ecb->dte_action, the fault was in the 7887 * predicate, if it's ecb->dte_action->dta_next it's 7888 * in action #1, and so on. 7889 */ 7890 for (err = ecb->dte_action, ndx = 0; 7891 err != act; err = err->dta_next, ndx++) 7892 continue; 7893 7894 dtrace_probe_error(state, ecb->dte_epid, ndx, 7895 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 7896 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 7897 cpu_core[cpuid].cpuc_dtrace_illval); 7898 7899 continue; 7900 } 7901 7902 if (!committed) 7903 buf->dtb_offset = offs + ecb->dte_size; 7904 } 7905 7906 if (vtime) 7907 curthread->t_dtrace_start = dtrace_gethrtime(); 7908 7909 dtrace_interrupt_enable(cookie); 7910 } 7911 7912 /* 7913 * DTrace Probe Hashing Functions 7914 * 7915 * The functions in this section (and indeed, the functions in remaining 7916 * sections) are not _called_ from probe context. (Any exceptions to this are 7917 * marked with a "Note:".) Rather, they are called from elsewhere in the 7918 * DTrace framework to look-up probes in, add probes to and remove probes from 7919 * the DTrace probe hashes. (Each probe is hashed by each element of the 7920 * probe tuple -- allowing for fast lookups, regardless of what was 7921 * specified.) 7922 */ 7923 static uint_t 7924 dtrace_hash_str(const char *p) 7925 { 7926 unsigned int g; 7927 uint_t hval = 0; 7928 7929 while (*p) { 7930 hval = (hval << 4) + *p++; 7931 if ((g = (hval & 0xf0000000)) != 0) 7932 hval ^= g >> 24; 7933 hval &= ~g; 7934 } 7935 return (hval); 7936 } 7937 7938 static dtrace_hash_t * 7939 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 7940 { 7941 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 7942 7943 hash->dth_stroffs = stroffs; 7944 hash->dth_nextoffs = nextoffs; 7945 hash->dth_prevoffs = prevoffs; 7946 7947 hash->dth_size = 1; 7948 hash->dth_mask = hash->dth_size - 1; 7949 7950 hash->dth_tab = kmem_zalloc(hash->dth_size * 7951 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 7952 7953 return (hash); 7954 } 7955 7956 static void 7957 dtrace_hash_destroy(dtrace_hash_t *hash) 7958 { 7959 #ifdef DEBUG 7960 int i; 7961 7962 for (i = 0; i < hash->dth_size; i++) 7963 ASSERT(hash->dth_tab[i] == NULL); 7964 #endif 7965 7966 kmem_free(hash->dth_tab, 7967 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 7968 kmem_free(hash, sizeof (dtrace_hash_t)); 7969 } 7970 7971 static void 7972 dtrace_hash_resize(dtrace_hash_t *hash) 7973 { 7974 int size = hash->dth_size, i, ndx; 7975 int new_size = hash->dth_size << 1; 7976 int new_mask = new_size - 1; 7977 dtrace_hashbucket_t **new_tab, *bucket, *next; 7978 7979 ASSERT((new_size & new_mask) == 0); 7980 7981 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 7982 7983 for (i = 0; i < size; i++) { 7984 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 7985 dtrace_probe_t *probe = bucket->dthb_chain; 7986 7987 ASSERT(probe != NULL); 7988 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 7989 7990 next = bucket->dthb_next; 7991 bucket->dthb_next = new_tab[ndx]; 7992 new_tab[ndx] = bucket; 7993 } 7994 } 7995 7996 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 7997 hash->dth_tab = new_tab; 7998 hash->dth_size = new_size; 7999 hash->dth_mask = new_mask; 8000 } 8001 8002 static void 8003 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 8004 { 8005 int hashval = DTRACE_HASHSTR(hash, new); 8006 int ndx = hashval & hash->dth_mask; 8007 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 8008 dtrace_probe_t **nextp, **prevp; 8009 8010 for (; bucket != NULL; bucket = bucket->dthb_next) { 8011 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 8012 goto add; 8013 } 8014 8015 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 8016 dtrace_hash_resize(hash); 8017 dtrace_hash_add(hash, new); 8018 return; 8019 } 8020 8021 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 8022 bucket->dthb_next = hash->dth_tab[ndx]; 8023 hash->dth_tab[ndx] = bucket; 8024 hash->dth_nbuckets++; 8025 8026 add: 8027 nextp = DTRACE_HASHNEXT(hash, new); 8028 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 8029 *nextp = bucket->dthb_chain; 8030 8031 if (bucket->dthb_chain != NULL) { 8032 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 8033 ASSERT(*prevp == NULL); 8034 *prevp = new; 8035 } 8036 8037 bucket->dthb_chain = new; 8038 bucket->dthb_len++; 8039 } 8040 8041 static dtrace_probe_t * 8042 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 8043 { 8044 int hashval = DTRACE_HASHSTR(hash, template); 8045 int ndx = hashval & hash->dth_mask; 8046 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 8047 8048 for (; bucket != NULL; bucket = bucket->dthb_next) { 8049 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 8050 return (bucket->dthb_chain); 8051 } 8052 8053 return (NULL); 8054 } 8055 8056 static int 8057 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 8058 { 8059 int hashval = DTRACE_HASHSTR(hash, template); 8060 int ndx = hashval & hash->dth_mask; 8061 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 8062 8063 for (; bucket != NULL; bucket = bucket->dthb_next) { 8064 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 8065 return (bucket->dthb_len); 8066 } 8067 8068 return (0); 8069 } 8070 8071 static void 8072 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 8073 { 8074 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 8075 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 8076 8077 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 8078 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 8079 8080 /* 8081 * Find the bucket that we're removing this probe from. 8082 */ 8083 for (; bucket != NULL; bucket = bucket->dthb_next) { 8084 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 8085 break; 8086 } 8087 8088 ASSERT(bucket != NULL); 8089 8090 if (*prevp == NULL) { 8091 if (*nextp == NULL) { 8092 /* 8093 * The removed probe was the only probe on this 8094 * bucket; we need to remove the bucket. 8095 */ 8096 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 8097 8098 ASSERT(bucket->dthb_chain == probe); 8099 ASSERT(b != NULL); 8100 8101 if (b == bucket) { 8102 hash->dth_tab[ndx] = bucket->dthb_next; 8103 } else { 8104 while (b->dthb_next != bucket) 8105 b = b->dthb_next; 8106 b->dthb_next = bucket->dthb_next; 8107 } 8108 8109 ASSERT(hash->dth_nbuckets > 0); 8110 hash->dth_nbuckets--; 8111 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 8112 return; 8113 } 8114 8115 bucket->dthb_chain = *nextp; 8116 } else { 8117 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 8118 } 8119 8120 if (*nextp != NULL) 8121 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 8122 } 8123 8124 /* 8125 * DTrace Utility Functions 8126 * 8127 * These are random utility functions that are _not_ called from probe context. 8128 */ 8129 static int 8130 dtrace_badattr(const dtrace_attribute_t *a) 8131 { 8132 return (a->dtat_name > DTRACE_STABILITY_MAX || 8133 a->dtat_data > DTRACE_STABILITY_MAX || 8134 a->dtat_class > DTRACE_CLASS_MAX); 8135 } 8136 8137 /* 8138 * Return a duplicate copy of a string. If the specified string is NULL, 8139 * this function returns a zero-length string. 8140 */ 8141 static char * 8142 dtrace_strdup(const char *str) 8143 { 8144 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 8145 8146 if (str != NULL) 8147 (void) strcpy(new, str); 8148 8149 return (new); 8150 } 8151 8152 #define DTRACE_ISALPHA(c) \ 8153 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 8154 8155 static int 8156 dtrace_badname(const char *s) 8157 { 8158 char c; 8159 8160 if (s == NULL || (c = *s++) == '\0') 8161 return (0); 8162 8163 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 8164 return (1); 8165 8166 while ((c = *s++) != '\0') { 8167 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 8168 c != '-' && c != '_' && c != '.' && c != '`') 8169 return (1); 8170 } 8171 8172 return (0); 8173 } 8174 8175 static void 8176 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 8177 { 8178 uint32_t priv; 8179 8180 #ifdef illumos 8181 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 8182 /* 8183 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 8184 */ 8185 priv = DTRACE_PRIV_ALL; 8186 } else { 8187 *uidp = crgetuid(cr); 8188 *zoneidp = crgetzoneid(cr); 8189 8190 priv = 0; 8191 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 8192 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 8193 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 8194 priv |= DTRACE_PRIV_USER; 8195 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 8196 priv |= DTRACE_PRIV_PROC; 8197 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 8198 priv |= DTRACE_PRIV_OWNER; 8199 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 8200 priv |= DTRACE_PRIV_ZONEOWNER; 8201 } 8202 #else 8203 priv = DTRACE_PRIV_ALL; 8204 #endif 8205 8206 *privp = priv; 8207 } 8208 8209 #ifdef DTRACE_ERRDEBUG 8210 static void 8211 dtrace_errdebug(const char *str) 8212 { 8213 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 8214 int occupied = 0; 8215 8216 mutex_enter(&dtrace_errlock); 8217 dtrace_errlast = str; 8218 dtrace_errthread = curthread; 8219 8220 while (occupied++ < DTRACE_ERRHASHSZ) { 8221 if (dtrace_errhash[hval].dter_msg == str) { 8222 dtrace_errhash[hval].dter_count++; 8223 goto out; 8224 } 8225 8226 if (dtrace_errhash[hval].dter_msg != NULL) { 8227 hval = (hval + 1) % DTRACE_ERRHASHSZ; 8228 continue; 8229 } 8230 8231 dtrace_errhash[hval].dter_msg = str; 8232 dtrace_errhash[hval].dter_count = 1; 8233 goto out; 8234 } 8235 8236 panic("dtrace: undersized error hash"); 8237 out: 8238 mutex_exit(&dtrace_errlock); 8239 } 8240 #endif 8241 8242 /* 8243 * DTrace Matching Functions 8244 * 8245 * These functions are used to match groups of probes, given some elements of 8246 * a probe tuple, or some globbed expressions for elements of a probe tuple. 8247 */ 8248 static int 8249 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 8250 zoneid_t zoneid) 8251 { 8252 if (priv != DTRACE_PRIV_ALL) { 8253 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 8254 uint32_t match = priv & ppriv; 8255 8256 /* 8257 * No PRIV_DTRACE_* privileges... 8258 */ 8259 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 8260 DTRACE_PRIV_KERNEL)) == 0) 8261 return (0); 8262 8263 /* 8264 * No matching bits, but there were bits to match... 8265 */ 8266 if (match == 0 && ppriv != 0) 8267 return (0); 8268 8269 /* 8270 * Need to have permissions to the process, but don't... 8271 */ 8272 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 8273 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 8274 return (0); 8275 } 8276 8277 /* 8278 * Need to be in the same zone unless we possess the 8279 * privilege to examine all zones. 8280 */ 8281 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 8282 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 8283 return (0); 8284 } 8285 } 8286 8287 return (1); 8288 } 8289 8290 /* 8291 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 8292 * consists of input pattern strings and an ops-vector to evaluate them. 8293 * This function returns >0 for match, 0 for no match, and <0 for error. 8294 */ 8295 static int 8296 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 8297 uint32_t priv, uid_t uid, zoneid_t zoneid) 8298 { 8299 dtrace_provider_t *pvp = prp->dtpr_provider; 8300 int rv; 8301 8302 if (pvp->dtpv_defunct) 8303 return (0); 8304 8305 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 8306 return (rv); 8307 8308 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 8309 return (rv); 8310 8311 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 8312 return (rv); 8313 8314 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 8315 return (rv); 8316 8317 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 8318 return (0); 8319 8320 return (rv); 8321 } 8322 8323 /* 8324 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 8325 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 8326 * libc's version, the kernel version only applies to 8-bit ASCII strings. 8327 * In addition, all of the recursion cases except for '*' matching have been 8328 * unwound. For '*', we still implement recursive evaluation, but a depth 8329 * counter is maintained and matching is aborted if we recurse too deep. 8330 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 8331 */ 8332 static int 8333 dtrace_match_glob(const char *s, const char *p, int depth) 8334 { 8335 const char *olds; 8336 char s1, c; 8337 int gs; 8338 8339 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 8340 return (-1); 8341 8342 if (s == NULL) 8343 s = ""; /* treat NULL as empty string */ 8344 8345 top: 8346 olds = s; 8347 s1 = *s++; 8348 8349 if (p == NULL) 8350 return (0); 8351 8352 if ((c = *p++) == '\0') 8353 return (s1 == '\0'); 8354 8355 switch (c) { 8356 case '[': { 8357 int ok = 0, notflag = 0; 8358 char lc = '\0'; 8359 8360 if (s1 == '\0') 8361 return (0); 8362 8363 if (*p == '!') { 8364 notflag = 1; 8365 p++; 8366 } 8367 8368 if ((c = *p++) == '\0') 8369 return (0); 8370 8371 do { 8372 if (c == '-' && lc != '\0' && *p != ']') { 8373 if ((c = *p++) == '\0') 8374 return (0); 8375 if (c == '\\' && (c = *p++) == '\0') 8376 return (0); 8377 8378 if (notflag) { 8379 if (s1 < lc || s1 > c) 8380 ok++; 8381 else 8382 return (0); 8383 } else if (lc <= s1 && s1 <= c) 8384 ok++; 8385 8386 } else if (c == '\\' && (c = *p++) == '\0') 8387 return (0); 8388 8389 lc = c; /* save left-hand 'c' for next iteration */ 8390 8391 if (notflag) { 8392 if (s1 != c) 8393 ok++; 8394 else 8395 return (0); 8396 } else if (s1 == c) 8397 ok++; 8398 8399 if ((c = *p++) == '\0') 8400 return (0); 8401 8402 } while (c != ']'); 8403 8404 if (ok) 8405 goto top; 8406 8407 return (0); 8408 } 8409 8410 case '\\': 8411 if ((c = *p++) == '\0') 8412 return (0); 8413 /*FALLTHRU*/ 8414 8415 default: 8416 if (c != s1) 8417 return (0); 8418 /*FALLTHRU*/ 8419 8420 case '?': 8421 if (s1 != '\0') 8422 goto top; 8423 return (0); 8424 8425 case '*': 8426 while (*p == '*') 8427 p++; /* consecutive *'s are identical to a single one */ 8428 8429 if (*p == '\0') 8430 return (1); 8431 8432 for (s = olds; *s != '\0'; s++) { 8433 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 8434 return (gs); 8435 } 8436 8437 return (0); 8438 } 8439 } 8440 8441 /*ARGSUSED*/ 8442 static int 8443 dtrace_match_string(const char *s, const char *p, int depth) 8444 { 8445 return (s != NULL && strcmp(s, p) == 0); 8446 } 8447 8448 /*ARGSUSED*/ 8449 static int 8450 dtrace_match_nul(const char *s, const char *p, int depth) 8451 { 8452 return (1); /* always match the empty pattern */ 8453 } 8454 8455 /*ARGSUSED*/ 8456 static int 8457 dtrace_match_nonzero(const char *s, const char *p, int depth) 8458 { 8459 return (s != NULL && s[0] != '\0'); 8460 } 8461 8462 static int 8463 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 8464 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 8465 { 8466 dtrace_probe_t template, *probe; 8467 dtrace_hash_t *hash = NULL; 8468 int len, best = INT_MAX, nmatched = 0; 8469 dtrace_id_t i; 8470 8471 ASSERT(MUTEX_HELD(&dtrace_lock)); 8472 8473 /* 8474 * If the probe ID is specified in the key, just lookup by ID and 8475 * invoke the match callback once if a matching probe is found. 8476 */ 8477 if (pkp->dtpk_id != DTRACE_IDNONE) { 8478 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 8479 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 8480 (void) (*matched)(probe, arg); 8481 nmatched++; 8482 } 8483 return (nmatched); 8484 } 8485 8486 template.dtpr_mod = (char *)pkp->dtpk_mod; 8487 template.dtpr_func = (char *)pkp->dtpk_func; 8488 template.dtpr_name = (char *)pkp->dtpk_name; 8489 8490 /* 8491 * We want to find the most distinct of the module name, function 8492 * name, and name. So for each one that is not a glob pattern or 8493 * empty string, we perform a lookup in the corresponding hash and 8494 * use the hash table with the fewest collisions to do our search. 8495 */ 8496 if (pkp->dtpk_mmatch == &dtrace_match_string && 8497 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 8498 best = len; 8499 hash = dtrace_bymod; 8500 } 8501 8502 if (pkp->dtpk_fmatch == &dtrace_match_string && 8503 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 8504 best = len; 8505 hash = dtrace_byfunc; 8506 } 8507 8508 if (pkp->dtpk_nmatch == &dtrace_match_string && 8509 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 8510 best = len; 8511 hash = dtrace_byname; 8512 } 8513 8514 /* 8515 * If we did not select a hash table, iterate over every probe and 8516 * invoke our callback for each one that matches our input probe key. 8517 */ 8518 if (hash == NULL) { 8519 for (i = 0; i < dtrace_nprobes; i++) { 8520 if ((probe = dtrace_probes[i]) == NULL || 8521 dtrace_match_probe(probe, pkp, priv, uid, 8522 zoneid) <= 0) 8523 continue; 8524 8525 nmatched++; 8526 8527 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 8528 break; 8529 } 8530 8531 return (nmatched); 8532 } 8533 8534 /* 8535 * If we selected a hash table, iterate over each probe of the same key 8536 * name and invoke the callback for every probe that matches the other 8537 * attributes of our input probe key. 8538 */ 8539 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 8540 probe = *(DTRACE_HASHNEXT(hash, probe))) { 8541 8542 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 8543 continue; 8544 8545 nmatched++; 8546 8547 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 8548 break; 8549 } 8550 8551 return (nmatched); 8552 } 8553 8554 /* 8555 * Return the function pointer dtrace_probecmp() should use to compare the 8556 * specified pattern with a string. For NULL or empty patterns, we select 8557 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 8558 * For non-empty non-glob strings, we use dtrace_match_string(). 8559 */ 8560 static dtrace_probekey_f * 8561 dtrace_probekey_func(const char *p) 8562 { 8563 char c; 8564 8565 if (p == NULL || *p == '\0') 8566 return (&dtrace_match_nul); 8567 8568 while ((c = *p++) != '\0') { 8569 if (c == '[' || c == '?' || c == '*' || c == '\\') 8570 return (&dtrace_match_glob); 8571 } 8572 8573 return (&dtrace_match_string); 8574 } 8575 8576 /* 8577 * Build a probe comparison key for use with dtrace_match_probe() from the 8578 * given probe description. By convention, a null key only matches anchored 8579 * probes: if each field is the empty string, reset dtpk_fmatch to 8580 * dtrace_match_nonzero(). 8581 */ 8582 static void 8583 dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 8584 { 8585 pkp->dtpk_prov = pdp->dtpd_provider; 8586 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 8587 8588 pkp->dtpk_mod = pdp->dtpd_mod; 8589 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 8590 8591 pkp->dtpk_func = pdp->dtpd_func; 8592 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 8593 8594 pkp->dtpk_name = pdp->dtpd_name; 8595 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 8596 8597 pkp->dtpk_id = pdp->dtpd_id; 8598 8599 if (pkp->dtpk_id == DTRACE_IDNONE && 8600 pkp->dtpk_pmatch == &dtrace_match_nul && 8601 pkp->dtpk_mmatch == &dtrace_match_nul && 8602 pkp->dtpk_fmatch == &dtrace_match_nul && 8603 pkp->dtpk_nmatch == &dtrace_match_nul) 8604 pkp->dtpk_fmatch = &dtrace_match_nonzero; 8605 } 8606 8607 /* 8608 * DTrace Provider-to-Framework API Functions 8609 * 8610 * These functions implement much of the Provider-to-Framework API, as 8611 * described in <sys/dtrace.h>. The parts of the API not in this section are 8612 * the functions in the API for probe management (found below), and 8613 * dtrace_probe() itself (found above). 8614 */ 8615 8616 /* 8617 * Register the calling provider with the DTrace framework. This should 8618 * generally be called by DTrace providers in their attach(9E) entry point. 8619 */ 8620 int 8621 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 8622 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 8623 { 8624 dtrace_provider_t *provider; 8625 8626 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 8627 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8628 "arguments", name ? name : "<NULL>"); 8629 return (EINVAL); 8630 } 8631 8632 if (name[0] == '\0' || dtrace_badname(name)) { 8633 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8634 "provider name", name); 8635 return (EINVAL); 8636 } 8637 8638 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 8639 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 8640 pops->dtps_destroy == NULL || 8641 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 8642 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8643 "provider ops", name); 8644 return (EINVAL); 8645 } 8646 8647 if (dtrace_badattr(&pap->dtpa_provider) || 8648 dtrace_badattr(&pap->dtpa_mod) || 8649 dtrace_badattr(&pap->dtpa_func) || 8650 dtrace_badattr(&pap->dtpa_name) || 8651 dtrace_badattr(&pap->dtpa_args)) { 8652 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8653 "provider attributes", name); 8654 return (EINVAL); 8655 } 8656 8657 if (priv & ~DTRACE_PRIV_ALL) { 8658 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8659 "privilege attributes", name); 8660 return (EINVAL); 8661 } 8662 8663 if ((priv & DTRACE_PRIV_KERNEL) && 8664 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 8665 pops->dtps_usermode == NULL) { 8666 cmn_err(CE_WARN, "failed to register provider '%s': need " 8667 "dtps_usermode() op for given privilege attributes", name); 8668 return (EINVAL); 8669 } 8670 8671 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 8672 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8673 (void) strcpy(provider->dtpv_name, name); 8674 8675 provider->dtpv_attr = *pap; 8676 provider->dtpv_priv.dtpp_flags = priv; 8677 if (cr != NULL) { 8678 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 8679 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 8680 } 8681 provider->dtpv_pops = *pops; 8682 8683 if (pops->dtps_provide == NULL) { 8684 ASSERT(pops->dtps_provide_module != NULL); 8685 provider->dtpv_pops.dtps_provide = 8686 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 8687 } 8688 8689 if (pops->dtps_provide_module == NULL) { 8690 ASSERT(pops->dtps_provide != NULL); 8691 provider->dtpv_pops.dtps_provide_module = 8692 (void (*)(void *, modctl_t *))dtrace_nullop; 8693 } 8694 8695 if (pops->dtps_suspend == NULL) { 8696 ASSERT(pops->dtps_resume == NULL); 8697 provider->dtpv_pops.dtps_suspend = 8698 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 8699 provider->dtpv_pops.dtps_resume = 8700 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 8701 } 8702 8703 provider->dtpv_arg = arg; 8704 *idp = (dtrace_provider_id_t)provider; 8705 8706 if (pops == &dtrace_provider_ops) { 8707 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8708 ASSERT(MUTEX_HELD(&dtrace_lock)); 8709 ASSERT(dtrace_anon.dta_enabling == NULL); 8710 8711 /* 8712 * We make sure that the DTrace provider is at the head of 8713 * the provider chain. 8714 */ 8715 provider->dtpv_next = dtrace_provider; 8716 dtrace_provider = provider; 8717 return (0); 8718 } 8719 8720 mutex_enter(&dtrace_provider_lock); 8721 mutex_enter(&dtrace_lock); 8722 8723 /* 8724 * If there is at least one provider registered, we'll add this 8725 * provider after the first provider. 8726 */ 8727 if (dtrace_provider != NULL) { 8728 provider->dtpv_next = dtrace_provider->dtpv_next; 8729 dtrace_provider->dtpv_next = provider; 8730 } else { 8731 dtrace_provider = provider; 8732 } 8733 8734 if (dtrace_retained != NULL) { 8735 dtrace_enabling_provide(provider); 8736 8737 /* 8738 * Now we need to call dtrace_enabling_matchall() -- which 8739 * will acquire cpu_lock and dtrace_lock. We therefore need 8740 * to drop all of our locks before calling into it... 8741 */ 8742 mutex_exit(&dtrace_lock); 8743 mutex_exit(&dtrace_provider_lock); 8744 dtrace_enabling_matchall(); 8745 8746 return (0); 8747 } 8748 8749 mutex_exit(&dtrace_lock); 8750 mutex_exit(&dtrace_provider_lock); 8751 8752 return (0); 8753 } 8754 8755 /* 8756 * Unregister the specified provider from the DTrace framework. This should 8757 * generally be called by DTrace providers in their detach(9E) entry point. 8758 */ 8759 int 8760 dtrace_unregister(dtrace_provider_id_t id) 8761 { 8762 dtrace_provider_t *old = (dtrace_provider_t *)id; 8763 dtrace_provider_t *prev = NULL; 8764 int i, self = 0, noreap = 0; 8765 dtrace_probe_t *probe, *first = NULL; 8766 8767 if (old->dtpv_pops.dtps_enable == 8768 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 8769 /* 8770 * If DTrace itself is the provider, we're called with locks 8771 * already held. 8772 */ 8773 ASSERT(old == dtrace_provider); 8774 #ifdef illumos 8775 ASSERT(dtrace_devi != NULL); 8776 #endif 8777 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8778 ASSERT(MUTEX_HELD(&dtrace_lock)); 8779 self = 1; 8780 8781 if (dtrace_provider->dtpv_next != NULL) { 8782 /* 8783 * There's another provider here; return failure. 8784 */ 8785 return (EBUSY); 8786 } 8787 } else { 8788 mutex_enter(&dtrace_provider_lock); 8789 #ifdef illumos 8790 mutex_enter(&mod_lock); 8791 #endif 8792 mutex_enter(&dtrace_lock); 8793 } 8794 8795 /* 8796 * If anyone has /dev/dtrace open, or if there are anonymous enabled 8797 * probes, we refuse to let providers slither away, unless this 8798 * provider has already been explicitly invalidated. 8799 */ 8800 if (!old->dtpv_defunct && 8801 (dtrace_opens || (dtrace_anon.dta_state != NULL && 8802 dtrace_anon.dta_state->dts_necbs > 0))) { 8803 if (!self) { 8804 mutex_exit(&dtrace_lock); 8805 #ifdef illumos 8806 mutex_exit(&mod_lock); 8807 #endif 8808 mutex_exit(&dtrace_provider_lock); 8809 } 8810 return (EBUSY); 8811 } 8812 8813 /* 8814 * Attempt to destroy the probes associated with this provider. 8815 */ 8816 for (i = 0; i < dtrace_nprobes; i++) { 8817 if ((probe = dtrace_probes[i]) == NULL) 8818 continue; 8819 8820 if (probe->dtpr_provider != old) 8821 continue; 8822 8823 if (probe->dtpr_ecb == NULL) 8824 continue; 8825 8826 /* 8827 * If we are trying to unregister a defunct provider, and the 8828 * provider was made defunct within the interval dictated by 8829 * dtrace_unregister_defunct_reap, we'll (asynchronously) 8830 * attempt to reap our enablings. To denote that the provider 8831 * should reattempt to unregister itself at some point in the 8832 * future, we will return a differentiable error code (EAGAIN 8833 * instead of EBUSY) in this case. 8834 */ 8835 if (dtrace_gethrtime() - old->dtpv_defunct > 8836 dtrace_unregister_defunct_reap) 8837 noreap = 1; 8838 8839 if (!self) { 8840 mutex_exit(&dtrace_lock); 8841 #ifdef illumos 8842 mutex_exit(&mod_lock); 8843 #endif 8844 mutex_exit(&dtrace_provider_lock); 8845 } 8846 8847 if (noreap) 8848 return (EBUSY); 8849 8850 (void) taskq_dispatch(dtrace_taskq, 8851 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP); 8852 8853 return (EAGAIN); 8854 } 8855 8856 /* 8857 * All of the probes for this provider are disabled; we can safely 8858 * remove all of them from their hash chains and from the probe array. 8859 */ 8860 for (i = 0; i < dtrace_nprobes; i++) { 8861 if ((probe = dtrace_probes[i]) == NULL) 8862 continue; 8863 8864 if (probe->dtpr_provider != old) 8865 continue; 8866 8867 dtrace_probes[i] = NULL; 8868 8869 dtrace_hash_remove(dtrace_bymod, probe); 8870 dtrace_hash_remove(dtrace_byfunc, probe); 8871 dtrace_hash_remove(dtrace_byname, probe); 8872 8873 if (first == NULL) { 8874 first = probe; 8875 probe->dtpr_nextmod = NULL; 8876 } else { 8877 probe->dtpr_nextmod = first; 8878 first = probe; 8879 } 8880 } 8881 8882 /* 8883 * The provider's probes have been removed from the hash chains and 8884 * from the probe array. Now issue a dtrace_sync() to be sure that 8885 * everyone has cleared out from any probe array processing. 8886 */ 8887 dtrace_sync(); 8888 8889 for (probe = first; probe != NULL; probe = first) { 8890 first = probe->dtpr_nextmod; 8891 8892 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 8893 probe->dtpr_arg); 8894 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 8895 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 8896 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 8897 #ifdef illumos 8898 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 8899 #else 8900 free_unr(dtrace_arena, probe->dtpr_id); 8901 #endif 8902 kmem_free(probe, sizeof (dtrace_probe_t)); 8903 } 8904 8905 if ((prev = dtrace_provider) == old) { 8906 #ifdef illumos 8907 ASSERT(self || dtrace_devi == NULL); 8908 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 8909 #endif 8910 dtrace_provider = old->dtpv_next; 8911 } else { 8912 while (prev != NULL && prev->dtpv_next != old) 8913 prev = prev->dtpv_next; 8914 8915 if (prev == NULL) { 8916 panic("attempt to unregister non-existent " 8917 "dtrace provider %p\n", (void *)id); 8918 } 8919 8920 prev->dtpv_next = old->dtpv_next; 8921 } 8922 8923 if (!self) { 8924 mutex_exit(&dtrace_lock); 8925 #ifdef illumos 8926 mutex_exit(&mod_lock); 8927 #endif 8928 mutex_exit(&dtrace_provider_lock); 8929 } 8930 8931 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 8932 kmem_free(old, sizeof (dtrace_provider_t)); 8933 8934 return (0); 8935 } 8936 8937 /* 8938 * Invalidate the specified provider. All subsequent probe lookups for the 8939 * specified provider will fail, but its probes will not be removed. 8940 */ 8941 void 8942 dtrace_invalidate(dtrace_provider_id_t id) 8943 { 8944 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 8945 8946 ASSERT(pvp->dtpv_pops.dtps_enable != 8947 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 8948 8949 mutex_enter(&dtrace_provider_lock); 8950 mutex_enter(&dtrace_lock); 8951 8952 pvp->dtpv_defunct = dtrace_gethrtime(); 8953 8954 mutex_exit(&dtrace_lock); 8955 mutex_exit(&dtrace_provider_lock); 8956 } 8957 8958 /* 8959 * Indicate whether or not DTrace has attached. 8960 */ 8961 int 8962 dtrace_attached(void) 8963 { 8964 /* 8965 * dtrace_provider will be non-NULL iff the DTrace driver has 8966 * attached. (It's non-NULL because DTrace is always itself a 8967 * provider.) 8968 */ 8969 return (dtrace_provider != NULL); 8970 } 8971 8972 /* 8973 * Remove all the unenabled probes for the given provider. This function is 8974 * not unlike dtrace_unregister(), except that it doesn't remove the provider 8975 * -- just as many of its associated probes as it can. 8976 */ 8977 int 8978 dtrace_condense(dtrace_provider_id_t id) 8979 { 8980 dtrace_provider_t *prov = (dtrace_provider_t *)id; 8981 int i; 8982 dtrace_probe_t *probe; 8983 8984 /* 8985 * Make sure this isn't the dtrace provider itself. 8986 */ 8987 ASSERT(prov->dtpv_pops.dtps_enable != 8988 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 8989 8990 mutex_enter(&dtrace_provider_lock); 8991 mutex_enter(&dtrace_lock); 8992 8993 /* 8994 * Attempt to destroy the probes associated with this provider. 8995 */ 8996 for (i = 0; i < dtrace_nprobes; i++) { 8997 if ((probe = dtrace_probes[i]) == NULL) 8998 continue; 8999 9000 if (probe->dtpr_provider != prov) 9001 continue; 9002 9003 if (probe->dtpr_ecb != NULL) 9004 continue; 9005 9006 dtrace_probes[i] = NULL; 9007 9008 dtrace_hash_remove(dtrace_bymod, probe); 9009 dtrace_hash_remove(dtrace_byfunc, probe); 9010 dtrace_hash_remove(dtrace_byname, probe); 9011 9012 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 9013 probe->dtpr_arg); 9014 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 9015 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 9016 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 9017 kmem_free(probe, sizeof (dtrace_probe_t)); 9018 #ifdef illumos 9019 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 9020 #else 9021 free_unr(dtrace_arena, i + 1); 9022 #endif 9023 } 9024 9025 mutex_exit(&dtrace_lock); 9026 mutex_exit(&dtrace_provider_lock); 9027 9028 return (0); 9029 } 9030 9031 /* 9032 * DTrace Probe Management Functions 9033 * 9034 * The functions in this section perform the DTrace probe management, 9035 * including functions to create probes, look-up probes, and call into the 9036 * providers to request that probes be provided. Some of these functions are 9037 * in the Provider-to-Framework API; these functions can be identified by the 9038 * fact that they are not declared "static". 9039 */ 9040 9041 /* 9042 * Create a probe with the specified module name, function name, and name. 9043 */ 9044 dtrace_id_t 9045 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 9046 const char *func, const char *name, int aframes, void *arg) 9047 { 9048 dtrace_probe_t *probe, **probes; 9049 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 9050 dtrace_id_t id; 9051 9052 if (provider == dtrace_provider) { 9053 ASSERT(MUTEX_HELD(&dtrace_lock)); 9054 } else { 9055 mutex_enter(&dtrace_lock); 9056 } 9057 9058 #ifdef illumos 9059 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 9060 VM_BESTFIT | VM_SLEEP); 9061 #else 9062 id = alloc_unr(dtrace_arena); 9063 #endif 9064 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 9065 9066 probe->dtpr_id = id; 9067 probe->dtpr_gen = dtrace_probegen++; 9068 probe->dtpr_mod = dtrace_strdup(mod); 9069 probe->dtpr_func = dtrace_strdup(func); 9070 probe->dtpr_name = dtrace_strdup(name); 9071 probe->dtpr_arg = arg; 9072 probe->dtpr_aframes = aframes; 9073 probe->dtpr_provider = provider; 9074 9075 dtrace_hash_add(dtrace_bymod, probe); 9076 dtrace_hash_add(dtrace_byfunc, probe); 9077 dtrace_hash_add(dtrace_byname, probe); 9078 9079 if (id - 1 >= dtrace_nprobes) { 9080 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 9081 size_t nsize = osize << 1; 9082 9083 if (nsize == 0) { 9084 ASSERT(osize == 0); 9085 ASSERT(dtrace_probes == NULL); 9086 nsize = sizeof (dtrace_probe_t *); 9087 } 9088 9089 probes = kmem_zalloc(nsize, KM_SLEEP); 9090 9091 if (dtrace_probes == NULL) { 9092 ASSERT(osize == 0); 9093 dtrace_probes = probes; 9094 dtrace_nprobes = 1; 9095 } else { 9096 dtrace_probe_t **oprobes = dtrace_probes; 9097 9098 bcopy(oprobes, probes, osize); 9099 dtrace_membar_producer(); 9100 dtrace_probes = probes; 9101 9102 dtrace_sync(); 9103 9104 /* 9105 * All CPUs are now seeing the new probes array; we can 9106 * safely free the old array. 9107 */ 9108 kmem_free(oprobes, osize); 9109 dtrace_nprobes <<= 1; 9110 } 9111 9112 ASSERT(id - 1 < dtrace_nprobes); 9113 } 9114 9115 ASSERT(dtrace_probes[id - 1] == NULL); 9116 dtrace_probes[id - 1] = probe; 9117 9118 if (provider != dtrace_provider) 9119 mutex_exit(&dtrace_lock); 9120 9121 return (id); 9122 } 9123 9124 static dtrace_probe_t * 9125 dtrace_probe_lookup_id(dtrace_id_t id) 9126 { 9127 ASSERT(MUTEX_HELD(&dtrace_lock)); 9128 9129 if (id == 0 || id > dtrace_nprobes) 9130 return (NULL); 9131 9132 return (dtrace_probes[id - 1]); 9133 } 9134 9135 static int 9136 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 9137 { 9138 *((dtrace_id_t *)arg) = probe->dtpr_id; 9139 9140 return (DTRACE_MATCH_DONE); 9141 } 9142 9143 /* 9144 * Look up a probe based on provider and one or more of module name, function 9145 * name and probe name. 9146 */ 9147 dtrace_id_t 9148 dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 9149 char *func, char *name) 9150 { 9151 dtrace_probekey_t pkey; 9152 dtrace_id_t id; 9153 int match; 9154 9155 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 9156 pkey.dtpk_pmatch = &dtrace_match_string; 9157 pkey.dtpk_mod = mod; 9158 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 9159 pkey.dtpk_func = func; 9160 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 9161 pkey.dtpk_name = name; 9162 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 9163 pkey.dtpk_id = DTRACE_IDNONE; 9164 9165 mutex_enter(&dtrace_lock); 9166 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 9167 dtrace_probe_lookup_match, &id); 9168 mutex_exit(&dtrace_lock); 9169 9170 ASSERT(match == 1 || match == 0); 9171 return (match ? id : 0); 9172 } 9173 9174 /* 9175 * Returns the probe argument associated with the specified probe. 9176 */ 9177 void * 9178 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 9179 { 9180 dtrace_probe_t *probe; 9181 void *rval = NULL; 9182 9183 mutex_enter(&dtrace_lock); 9184 9185 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 9186 probe->dtpr_provider == (dtrace_provider_t *)id) 9187 rval = probe->dtpr_arg; 9188 9189 mutex_exit(&dtrace_lock); 9190 9191 return (rval); 9192 } 9193 9194 /* 9195 * Copy a probe into a probe description. 9196 */ 9197 static void 9198 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 9199 { 9200 bzero(pdp, sizeof (dtrace_probedesc_t)); 9201 pdp->dtpd_id = prp->dtpr_id; 9202 9203 (void) strncpy(pdp->dtpd_provider, 9204 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 9205 9206 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 9207 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 9208 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 9209 } 9210 9211 /* 9212 * Called to indicate that a probe -- or probes -- should be provided by a 9213 * specfied provider. If the specified description is NULL, the provider will 9214 * be told to provide all of its probes. (This is done whenever a new 9215 * consumer comes along, or whenever a retained enabling is to be matched.) If 9216 * the specified description is non-NULL, the provider is given the 9217 * opportunity to dynamically provide the specified probe, allowing providers 9218 * to support the creation of probes on-the-fly. (So-called _autocreated_ 9219 * probes.) If the provider is NULL, the operations will be applied to all 9220 * providers; if the provider is non-NULL the operations will only be applied 9221 * to the specified provider. The dtrace_provider_lock must be held, and the 9222 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 9223 * will need to grab the dtrace_lock when it reenters the framework through 9224 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 9225 */ 9226 static void 9227 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 9228 { 9229 #ifdef illumos 9230 modctl_t *ctl; 9231 #endif 9232 int all = 0; 9233 9234 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 9235 9236 if (prv == NULL) { 9237 all = 1; 9238 prv = dtrace_provider; 9239 } 9240 9241 do { 9242 /* 9243 * First, call the blanket provide operation. 9244 */ 9245 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 9246 9247 #ifdef illumos 9248 /* 9249 * Now call the per-module provide operation. We will grab 9250 * mod_lock to prevent the list from being modified. Note 9251 * that this also prevents the mod_busy bits from changing. 9252 * (mod_busy can only be changed with mod_lock held.) 9253 */ 9254 mutex_enter(&mod_lock); 9255 9256 ctl = &modules; 9257 do { 9258 if (ctl->mod_busy || ctl->mod_mp == NULL) 9259 continue; 9260 9261 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 9262 9263 } while ((ctl = ctl->mod_next) != &modules); 9264 9265 mutex_exit(&mod_lock); 9266 #endif 9267 } while (all && (prv = prv->dtpv_next) != NULL); 9268 } 9269 9270 #ifdef illumos 9271 /* 9272 * Iterate over each probe, and call the Framework-to-Provider API function 9273 * denoted by offs. 9274 */ 9275 static void 9276 dtrace_probe_foreach(uintptr_t offs) 9277 { 9278 dtrace_provider_t *prov; 9279 void (*func)(void *, dtrace_id_t, void *); 9280 dtrace_probe_t *probe; 9281 dtrace_icookie_t cookie; 9282 int i; 9283 9284 /* 9285 * We disable interrupts to walk through the probe array. This is 9286 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 9287 * won't see stale data. 9288 */ 9289 cookie = dtrace_interrupt_disable(); 9290 9291 for (i = 0; i < dtrace_nprobes; i++) { 9292 if ((probe = dtrace_probes[i]) == NULL) 9293 continue; 9294 9295 if (probe->dtpr_ecb == NULL) { 9296 /* 9297 * This probe isn't enabled -- don't call the function. 9298 */ 9299 continue; 9300 } 9301 9302 prov = probe->dtpr_provider; 9303 func = *((void(**)(void *, dtrace_id_t, void *)) 9304 ((uintptr_t)&prov->dtpv_pops + offs)); 9305 9306 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 9307 } 9308 9309 dtrace_interrupt_enable(cookie); 9310 } 9311 #endif 9312 9313 static int 9314 dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 9315 { 9316 dtrace_probekey_t pkey; 9317 uint32_t priv; 9318 uid_t uid; 9319 zoneid_t zoneid; 9320 9321 ASSERT(MUTEX_HELD(&dtrace_lock)); 9322 dtrace_ecb_create_cache = NULL; 9323 9324 if (desc == NULL) { 9325 /* 9326 * If we're passed a NULL description, we're being asked to 9327 * create an ECB with a NULL probe. 9328 */ 9329 (void) dtrace_ecb_create_enable(NULL, enab); 9330 return (0); 9331 } 9332 9333 dtrace_probekey(desc, &pkey); 9334 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 9335 &priv, &uid, &zoneid); 9336 9337 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 9338 enab)); 9339 } 9340 9341 /* 9342 * DTrace Helper Provider Functions 9343 */ 9344 static void 9345 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 9346 { 9347 attr->dtat_name = DOF_ATTR_NAME(dofattr); 9348 attr->dtat_data = DOF_ATTR_DATA(dofattr); 9349 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 9350 } 9351 9352 static void 9353 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 9354 const dof_provider_t *dofprov, char *strtab) 9355 { 9356 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 9357 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 9358 dofprov->dofpv_provattr); 9359 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 9360 dofprov->dofpv_modattr); 9361 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 9362 dofprov->dofpv_funcattr); 9363 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 9364 dofprov->dofpv_nameattr); 9365 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 9366 dofprov->dofpv_argsattr); 9367 } 9368 9369 static void 9370 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 9371 { 9372 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9373 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9374 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 9375 dof_provider_t *provider; 9376 dof_probe_t *probe; 9377 uint32_t *off, *enoff; 9378 uint8_t *arg; 9379 char *strtab; 9380 uint_t i, nprobes; 9381 dtrace_helper_provdesc_t dhpv; 9382 dtrace_helper_probedesc_t dhpb; 9383 dtrace_meta_t *meta = dtrace_meta_pid; 9384 dtrace_mops_t *mops = &meta->dtm_mops; 9385 void *parg; 9386 9387 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 9388 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9389 provider->dofpv_strtab * dof->dofh_secsize); 9390 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9391 provider->dofpv_probes * dof->dofh_secsize); 9392 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9393 provider->dofpv_prargs * dof->dofh_secsize); 9394 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9395 provider->dofpv_proffs * dof->dofh_secsize); 9396 9397 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 9398 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 9399 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 9400 enoff = NULL; 9401 9402 /* 9403 * See dtrace_helper_provider_validate(). 9404 */ 9405 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 9406 provider->dofpv_prenoffs != DOF_SECT_NONE) { 9407 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9408 provider->dofpv_prenoffs * dof->dofh_secsize); 9409 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 9410 } 9411 9412 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 9413 9414 /* 9415 * Create the provider. 9416 */ 9417 dtrace_dofprov2hprov(&dhpv, provider, strtab); 9418 9419 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 9420 return; 9421 9422 meta->dtm_count++; 9423 9424 /* 9425 * Create the probes. 9426 */ 9427 for (i = 0; i < nprobes; i++) { 9428 probe = (dof_probe_t *)(uintptr_t)(daddr + 9429 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 9430 9431 /* See the check in dtrace_helper_provider_validate(). */ 9432 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) 9433 continue; 9434 9435 dhpb.dthpb_mod = dhp->dofhp_mod; 9436 dhpb.dthpb_func = strtab + probe->dofpr_func; 9437 dhpb.dthpb_name = strtab + probe->dofpr_name; 9438 dhpb.dthpb_base = probe->dofpr_addr; 9439 dhpb.dthpb_offs = off + probe->dofpr_offidx; 9440 dhpb.dthpb_noffs = probe->dofpr_noffs; 9441 if (enoff != NULL) { 9442 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 9443 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 9444 } else { 9445 dhpb.dthpb_enoffs = NULL; 9446 dhpb.dthpb_nenoffs = 0; 9447 } 9448 dhpb.dthpb_args = arg + probe->dofpr_argidx; 9449 dhpb.dthpb_nargc = probe->dofpr_nargc; 9450 dhpb.dthpb_xargc = probe->dofpr_xargc; 9451 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 9452 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 9453 9454 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 9455 } 9456 } 9457 9458 static void 9459 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 9460 { 9461 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9462 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9463 int i; 9464 9465 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 9466 9467 for (i = 0; i < dof->dofh_secnum; i++) { 9468 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 9469 dof->dofh_secoff + i * dof->dofh_secsize); 9470 9471 if (sec->dofs_type != DOF_SECT_PROVIDER) 9472 continue; 9473 9474 dtrace_helper_provide_one(dhp, sec, pid); 9475 } 9476 9477 /* 9478 * We may have just created probes, so we must now rematch against 9479 * any retained enablings. Note that this call will acquire both 9480 * cpu_lock and dtrace_lock; the fact that we are holding 9481 * dtrace_meta_lock now is what defines the ordering with respect to 9482 * these three locks. 9483 */ 9484 dtrace_enabling_matchall(); 9485 } 9486 9487 static void 9488 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 9489 { 9490 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9491 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9492 dof_sec_t *str_sec; 9493 dof_provider_t *provider; 9494 char *strtab; 9495 dtrace_helper_provdesc_t dhpv; 9496 dtrace_meta_t *meta = dtrace_meta_pid; 9497 dtrace_mops_t *mops = &meta->dtm_mops; 9498 9499 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 9500 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9501 provider->dofpv_strtab * dof->dofh_secsize); 9502 9503 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 9504 9505 /* 9506 * Create the provider. 9507 */ 9508 dtrace_dofprov2hprov(&dhpv, provider, strtab); 9509 9510 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 9511 9512 meta->dtm_count--; 9513 } 9514 9515 static void 9516 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 9517 { 9518 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9519 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9520 int i; 9521 9522 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 9523 9524 for (i = 0; i < dof->dofh_secnum; i++) { 9525 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 9526 dof->dofh_secoff + i * dof->dofh_secsize); 9527 9528 if (sec->dofs_type != DOF_SECT_PROVIDER) 9529 continue; 9530 9531 dtrace_helper_provider_remove_one(dhp, sec, pid); 9532 } 9533 } 9534 9535 /* 9536 * DTrace Meta Provider-to-Framework API Functions 9537 * 9538 * These functions implement the Meta Provider-to-Framework API, as described 9539 * in <sys/dtrace.h>. 9540 */ 9541 int 9542 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 9543 dtrace_meta_provider_id_t *idp) 9544 { 9545 dtrace_meta_t *meta; 9546 dtrace_helpers_t *help, *next; 9547 int i; 9548 9549 *idp = DTRACE_METAPROVNONE; 9550 9551 /* 9552 * We strictly don't need the name, but we hold onto it for 9553 * debuggability. All hail error queues! 9554 */ 9555 if (name == NULL) { 9556 cmn_err(CE_WARN, "failed to register meta-provider: " 9557 "invalid name"); 9558 return (EINVAL); 9559 } 9560 9561 if (mops == NULL || 9562 mops->dtms_create_probe == NULL || 9563 mops->dtms_provide_pid == NULL || 9564 mops->dtms_remove_pid == NULL) { 9565 cmn_err(CE_WARN, "failed to register meta-register %s: " 9566 "invalid ops", name); 9567 return (EINVAL); 9568 } 9569 9570 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 9571 meta->dtm_mops = *mops; 9572 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 9573 (void) strcpy(meta->dtm_name, name); 9574 meta->dtm_arg = arg; 9575 9576 mutex_enter(&dtrace_meta_lock); 9577 mutex_enter(&dtrace_lock); 9578 9579 if (dtrace_meta_pid != NULL) { 9580 mutex_exit(&dtrace_lock); 9581 mutex_exit(&dtrace_meta_lock); 9582 cmn_err(CE_WARN, "failed to register meta-register %s: " 9583 "user-land meta-provider exists", name); 9584 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 9585 kmem_free(meta, sizeof (dtrace_meta_t)); 9586 return (EINVAL); 9587 } 9588 9589 dtrace_meta_pid = meta; 9590 *idp = (dtrace_meta_provider_id_t)meta; 9591 9592 /* 9593 * If there are providers and probes ready to go, pass them 9594 * off to the new meta provider now. 9595 */ 9596 9597 help = dtrace_deferred_pid; 9598 dtrace_deferred_pid = NULL; 9599 9600 mutex_exit(&dtrace_lock); 9601 9602 while (help != NULL) { 9603 for (i = 0; i < help->dthps_nprovs; i++) { 9604 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 9605 help->dthps_pid); 9606 } 9607 9608 next = help->dthps_next; 9609 help->dthps_next = NULL; 9610 help->dthps_prev = NULL; 9611 help->dthps_deferred = 0; 9612 help = next; 9613 } 9614 9615 mutex_exit(&dtrace_meta_lock); 9616 9617 return (0); 9618 } 9619 9620 int 9621 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 9622 { 9623 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 9624 9625 mutex_enter(&dtrace_meta_lock); 9626 mutex_enter(&dtrace_lock); 9627 9628 if (old == dtrace_meta_pid) { 9629 pp = &dtrace_meta_pid; 9630 } else { 9631 panic("attempt to unregister non-existent " 9632 "dtrace meta-provider %p\n", (void *)old); 9633 } 9634 9635 if (old->dtm_count != 0) { 9636 mutex_exit(&dtrace_lock); 9637 mutex_exit(&dtrace_meta_lock); 9638 return (EBUSY); 9639 } 9640 9641 *pp = NULL; 9642 9643 mutex_exit(&dtrace_lock); 9644 mutex_exit(&dtrace_meta_lock); 9645 9646 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 9647 kmem_free(old, sizeof (dtrace_meta_t)); 9648 9649 return (0); 9650 } 9651 9652 9653 /* 9654 * DTrace DIF Object Functions 9655 */ 9656 static int 9657 dtrace_difo_err(uint_t pc, const char *format, ...) 9658 { 9659 if (dtrace_err_verbose) { 9660 va_list alist; 9661 9662 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 9663 va_start(alist, format); 9664 (void) vuprintf(format, alist); 9665 va_end(alist); 9666 } 9667 9668 #ifdef DTRACE_ERRDEBUG 9669 dtrace_errdebug(format); 9670 #endif 9671 return (1); 9672 } 9673 9674 /* 9675 * Validate a DTrace DIF object by checking the IR instructions. The following 9676 * rules are currently enforced by dtrace_difo_validate(): 9677 * 9678 * 1. Each instruction must have a valid opcode 9679 * 2. Each register, string, variable, or subroutine reference must be valid 9680 * 3. No instruction can modify register %r0 (must be zero) 9681 * 4. All instruction reserved bits must be set to zero 9682 * 5. The last instruction must be a "ret" instruction 9683 * 6. All branch targets must reference a valid instruction _after_ the branch 9684 */ 9685 static int 9686 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 9687 cred_t *cr) 9688 { 9689 int err = 0, i; 9690 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 9691 int kcheckload; 9692 uint_t pc; 9693 int maxglobal = -1, maxlocal = -1, maxtlocal = -1; 9694 9695 kcheckload = cr == NULL || 9696 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 9697 9698 dp->dtdo_destructive = 0; 9699 9700 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 9701 dif_instr_t instr = dp->dtdo_buf[pc]; 9702 9703 uint_t r1 = DIF_INSTR_R1(instr); 9704 uint_t r2 = DIF_INSTR_R2(instr); 9705 uint_t rd = DIF_INSTR_RD(instr); 9706 uint_t rs = DIF_INSTR_RS(instr); 9707 uint_t label = DIF_INSTR_LABEL(instr); 9708 uint_t v = DIF_INSTR_VAR(instr); 9709 uint_t subr = DIF_INSTR_SUBR(instr); 9710 uint_t type = DIF_INSTR_TYPE(instr); 9711 uint_t op = DIF_INSTR_OP(instr); 9712 9713 switch (op) { 9714 case DIF_OP_OR: 9715 case DIF_OP_XOR: 9716 case DIF_OP_AND: 9717 case DIF_OP_SLL: 9718 case DIF_OP_SRL: 9719 case DIF_OP_SRA: 9720 case DIF_OP_SUB: 9721 case DIF_OP_ADD: 9722 case DIF_OP_MUL: 9723 case DIF_OP_SDIV: 9724 case DIF_OP_UDIV: 9725 case DIF_OP_SREM: 9726 case DIF_OP_UREM: 9727 case DIF_OP_COPYS: 9728 if (r1 >= nregs) 9729 err += efunc(pc, "invalid register %u\n", r1); 9730 if (r2 >= nregs) 9731 err += efunc(pc, "invalid register %u\n", r2); 9732 if (rd >= nregs) 9733 err += efunc(pc, "invalid register %u\n", rd); 9734 if (rd == 0) 9735 err += efunc(pc, "cannot write to %r0\n"); 9736 break; 9737 case DIF_OP_NOT: 9738 case DIF_OP_MOV: 9739 case DIF_OP_ALLOCS: 9740 if (r1 >= nregs) 9741 err += efunc(pc, "invalid register %u\n", r1); 9742 if (r2 != 0) 9743 err += efunc(pc, "non-zero reserved bits\n"); 9744 if (rd >= nregs) 9745 err += efunc(pc, "invalid register %u\n", rd); 9746 if (rd == 0) 9747 err += efunc(pc, "cannot write to %r0\n"); 9748 break; 9749 case DIF_OP_LDSB: 9750 case DIF_OP_LDSH: 9751 case DIF_OP_LDSW: 9752 case DIF_OP_LDUB: 9753 case DIF_OP_LDUH: 9754 case DIF_OP_LDUW: 9755 case DIF_OP_LDX: 9756 if (r1 >= nregs) 9757 err += efunc(pc, "invalid register %u\n", r1); 9758 if (r2 != 0) 9759 err += efunc(pc, "non-zero reserved bits\n"); 9760 if (rd >= nregs) 9761 err += efunc(pc, "invalid register %u\n", rd); 9762 if (rd == 0) 9763 err += efunc(pc, "cannot write to %r0\n"); 9764 if (kcheckload) 9765 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 9766 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 9767 break; 9768 case DIF_OP_RLDSB: 9769 case DIF_OP_RLDSH: 9770 case DIF_OP_RLDSW: 9771 case DIF_OP_RLDUB: 9772 case DIF_OP_RLDUH: 9773 case DIF_OP_RLDUW: 9774 case DIF_OP_RLDX: 9775 if (r1 >= nregs) 9776 err += efunc(pc, "invalid register %u\n", r1); 9777 if (r2 != 0) 9778 err += efunc(pc, "non-zero reserved bits\n"); 9779 if (rd >= nregs) 9780 err += efunc(pc, "invalid register %u\n", rd); 9781 if (rd == 0) 9782 err += efunc(pc, "cannot write to %r0\n"); 9783 break; 9784 case DIF_OP_ULDSB: 9785 case DIF_OP_ULDSH: 9786 case DIF_OP_ULDSW: 9787 case DIF_OP_ULDUB: 9788 case DIF_OP_ULDUH: 9789 case DIF_OP_ULDUW: 9790 case DIF_OP_ULDX: 9791 if (r1 >= nregs) 9792 err += efunc(pc, "invalid register %u\n", r1); 9793 if (r2 != 0) 9794 err += efunc(pc, "non-zero reserved bits\n"); 9795 if (rd >= nregs) 9796 err += efunc(pc, "invalid register %u\n", rd); 9797 if (rd == 0) 9798 err += efunc(pc, "cannot write to %r0\n"); 9799 break; 9800 case DIF_OP_STB: 9801 case DIF_OP_STH: 9802 case DIF_OP_STW: 9803 case DIF_OP_STX: 9804 if (r1 >= nregs) 9805 err += efunc(pc, "invalid register %u\n", r1); 9806 if (r2 != 0) 9807 err += efunc(pc, "non-zero reserved bits\n"); 9808 if (rd >= nregs) 9809 err += efunc(pc, "invalid register %u\n", rd); 9810 if (rd == 0) 9811 err += efunc(pc, "cannot write to 0 address\n"); 9812 break; 9813 case DIF_OP_CMP: 9814 case DIF_OP_SCMP: 9815 if (r1 >= nregs) 9816 err += efunc(pc, "invalid register %u\n", r1); 9817 if (r2 >= nregs) 9818 err += efunc(pc, "invalid register %u\n", r2); 9819 if (rd != 0) 9820 err += efunc(pc, "non-zero reserved bits\n"); 9821 break; 9822 case DIF_OP_TST: 9823 if (r1 >= nregs) 9824 err += efunc(pc, "invalid register %u\n", r1); 9825 if (r2 != 0 || rd != 0) 9826 err += efunc(pc, "non-zero reserved bits\n"); 9827 break; 9828 case DIF_OP_BA: 9829 case DIF_OP_BE: 9830 case DIF_OP_BNE: 9831 case DIF_OP_BG: 9832 case DIF_OP_BGU: 9833 case DIF_OP_BGE: 9834 case DIF_OP_BGEU: 9835 case DIF_OP_BL: 9836 case DIF_OP_BLU: 9837 case DIF_OP_BLE: 9838 case DIF_OP_BLEU: 9839 if (label >= dp->dtdo_len) { 9840 err += efunc(pc, "invalid branch target %u\n", 9841 label); 9842 } 9843 if (label <= pc) { 9844 err += efunc(pc, "backward branch to %u\n", 9845 label); 9846 } 9847 break; 9848 case DIF_OP_RET: 9849 if (r1 != 0 || r2 != 0) 9850 err += efunc(pc, "non-zero reserved bits\n"); 9851 if (rd >= nregs) 9852 err += efunc(pc, "invalid register %u\n", rd); 9853 break; 9854 case DIF_OP_NOP: 9855 case DIF_OP_POPTS: 9856 case DIF_OP_FLUSHTS: 9857 if (r1 != 0 || r2 != 0 || rd != 0) 9858 err += efunc(pc, "non-zero reserved bits\n"); 9859 break; 9860 case DIF_OP_SETX: 9861 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 9862 err += efunc(pc, "invalid integer ref %u\n", 9863 DIF_INSTR_INTEGER(instr)); 9864 } 9865 if (rd >= nregs) 9866 err += efunc(pc, "invalid register %u\n", rd); 9867 if (rd == 0) 9868 err += efunc(pc, "cannot write to %r0\n"); 9869 break; 9870 case DIF_OP_SETS: 9871 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 9872 err += efunc(pc, "invalid string ref %u\n", 9873 DIF_INSTR_STRING(instr)); 9874 } 9875 if (rd >= nregs) 9876 err += efunc(pc, "invalid register %u\n", rd); 9877 if (rd == 0) 9878 err += efunc(pc, "cannot write to %r0\n"); 9879 break; 9880 case DIF_OP_LDGA: 9881 case DIF_OP_LDTA: 9882 if (r1 > DIF_VAR_ARRAY_MAX) 9883 err += efunc(pc, "invalid array %u\n", r1); 9884 if (r2 >= nregs) 9885 err += efunc(pc, "invalid register %u\n", r2); 9886 if (rd >= nregs) 9887 err += efunc(pc, "invalid register %u\n", rd); 9888 if (rd == 0) 9889 err += efunc(pc, "cannot write to %r0\n"); 9890 break; 9891 case DIF_OP_LDGS: 9892 case DIF_OP_LDTS: 9893 case DIF_OP_LDLS: 9894 case DIF_OP_LDGAA: 9895 case DIF_OP_LDTAA: 9896 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 9897 err += efunc(pc, "invalid variable %u\n", v); 9898 if (rd >= nregs) 9899 err += efunc(pc, "invalid register %u\n", rd); 9900 if (rd == 0) 9901 err += efunc(pc, "cannot write to %r0\n"); 9902 break; 9903 case DIF_OP_STGS: 9904 case DIF_OP_STTS: 9905 case DIF_OP_STLS: 9906 case DIF_OP_STGAA: 9907 case DIF_OP_STTAA: 9908 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 9909 err += efunc(pc, "invalid variable %u\n", v); 9910 if (rs >= nregs) 9911 err += efunc(pc, "invalid register %u\n", rd); 9912 break; 9913 case DIF_OP_CALL: 9914 if (subr > DIF_SUBR_MAX) 9915 err += efunc(pc, "invalid subr %u\n", subr); 9916 if (rd >= nregs) 9917 err += efunc(pc, "invalid register %u\n", rd); 9918 if (rd == 0) 9919 err += efunc(pc, "cannot write to %r0\n"); 9920 9921 if (subr == DIF_SUBR_COPYOUT || 9922 subr == DIF_SUBR_COPYOUTSTR) { 9923 dp->dtdo_destructive = 1; 9924 } 9925 9926 if (subr == DIF_SUBR_GETF) { 9927 /* 9928 * If we have a getf() we need to record that 9929 * in our state. Note that our state can be 9930 * NULL if this is a helper -- but in that 9931 * case, the call to getf() is itself illegal, 9932 * and will be caught (slightly later) when 9933 * the helper is validated. 9934 */ 9935 if (vstate->dtvs_state != NULL) 9936 vstate->dtvs_state->dts_getf++; 9937 } 9938 9939 break; 9940 case DIF_OP_PUSHTR: 9941 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 9942 err += efunc(pc, "invalid ref type %u\n", type); 9943 if (r2 >= nregs) 9944 err += efunc(pc, "invalid register %u\n", r2); 9945 if (rs >= nregs) 9946 err += efunc(pc, "invalid register %u\n", rs); 9947 break; 9948 case DIF_OP_PUSHTV: 9949 if (type != DIF_TYPE_CTF) 9950 err += efunc(pc, "invalid val type %u\n", type); 9951 if (r2 >= nregs) 9952 err += efunc(pc, "invalid register %u\n", r2); 9953 if (rs >= nregs) 9954 err += efunc(pc, "invalid register %u\n", rs); 9955 break; 9956 default: 9957 err += efunc(pc, "invalid opcode %u\n", 9958 DIF_INSTR_OP(instr)); 9959 } 9960 } 9961 9962 if (dp->dtdo_len != 0 && 9963 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 9964 err += efunc(dp->dtdo_len - 1, 9965 "expected 'ret' as last DIF instruction\n"); 9966 } 9967 9968 if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) { 9969 /* 9970 * If we're not returning by reference, the size must be either 9971 * 0 or the size of one of the base types. 9972 */ 9973 switch (dp->dtdo_rtype.dtdt_size) { 9974 case 0: 9975 case sizeof (uint8_t): 9976 case sizeof (uint16_t): 9977 case sizeof (uint32_t): 9978 case sizeof (uint64_t): 9979 break; 9980 9981 default: 9982 err += efunc(dp->dtdo_len - 1, "bad return size\n"); 9983 } 9984 } 9985 9986 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 9987 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 9988 dtrace_diftype_t *vt, *et; 9989 uint_t id, ndx; 9990 9991 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 9992 v->dtdv_scope != DIFV_SCOPE_THREAD && 9993 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 9994 err += efunc(i, "unrecognized variable scope %d\n", 9995 v->dtdv_scope); 9996 break; 9997 } 9998 9999 if (v->dtdv_kind != DIFV_KIND_ARRAY && 10000 v->dtdv_kind != DIFV_KIND_SCALAR) { 10001 err += efunc(i, "unrecognized variable type %d\n", 10002 v->dtdv_kind); 10003 break; 10004 } 10005 10006 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 10007 err += efunc(i, "%d exceeds variable id limit\n", id); 10008 break; 10009 } 10010 10011 if (id < DIF_VAR_OTHER_UBASE) 10012 continue; 10013 10014 /* 10015 * For user-defined variables, we need to check that this 10016 * definition is identical to any previous definition that we 10017 * encountered. 10018 */ 10019 ndx = id - DIF_VAR_OTHER_UBASE; 10020 10021 switch (v->dtdv_scope) { 10022 case DIFV_SCOPE_GLOBAL: 10023 if (maxglobal == -1 || ndx > maxglobal) 10024 maxglobal = ndx; 10025 10026 if (ndx < vstate->dtvs_nglobals) { 10027 dtrace_statvar_t *svar; 10028 10029 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 10030 existing = &svar->dtsv_var; 10031 } 10032 10033 break; 10034 10035 case DIFV_SCOPE_THREAD: 10036 if (maxtlocal == -1 || ndx > maxtlocal) 10037 maxtlocal = ndx; 10038 10039 if (ndx < vstate->dtvs_ntlocals) 10040 existing = &vstate->dtvs_tlocals[ndx]; 10041 break; 10042 10043 case DIFV_SCOPE_LOCAL: 10044 if (maxlocal == -1 || ndx > maxlocal) 10045 maxlocal = ndx; 10046 10047 if (ndx < vstate->dtvs_nlocals) { 10048 dtrace_statvar_t *svar; 10049 10050 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 10051 existing = &svar->dtsv_var; 10052 } 10053 10054 break; 10055 } 10056 10057 vt = &v->dtdv_type; 10058 10059 if (vt->dtdt_flags & DIF_TF_BYREF) { 10060 if (vt->dtdt_size == 0) { 10061 err += efunc(i, "zero-sized variable\n"); 10062 break; 10063 } 10064 10065 if ((v->dtdv_scope == DIFV_SCOPE_GLOBAL || 10066 v->dtdv_scope == DIFV_SCOPE_LOCAL) && 10067 vt->dtdt_size > dtrace_statvar_maxsize) { 10068 err += efunc(i, "oversized by-ref static\n"); 10069 break; 10070 } 10071 } 10072 10073 if (existing == NULL || existing->dtdv_id == 0) 10074 continue; 10075 10076 ASSERT(existing->dtdv_id == v->dtdv_id); 10077 ASSERT(existing->dtdv_scope == v->dtdv_scope); 10078 10079 if (existing->dtdv_kind != v->dtdv_kind) 10080 err += efunc(i, "%d changed variable kind\n", id); 10081 10082 et = &existing->dtdv_type; 10083 10084 if (vt->dtdt_flags != et->dtdt_flags) { 10085 err += efunc(i, "%d changed variable type flags\n", id); 10086 break; 10087 } 10088 10089 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 10090 err += efunc(i, "%d changed variable type size\n", id); 10091 break; 10092 } 10093 } 10094 10095 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 10096 dif_instr_t instr = dp->dtdo_buf[pc]; 10097 10098 uint_t v = DIF_INSTR_VAR(instr); 10099 uint_t op = DIF_INSTR_OP(instr); 10100 10101 switch (op) { 10102 case DIF_OP_LDGS: 10103 case DIF_OP_LDGAA: 10104 case DIF_OP_STGS: 10105 case DIF_OP_STGAA: 10106 if (v > DIF_VAR_OTHER_UBASE + maxglobal) 10107 err += efunc(pc, "invalid variable %u\n", v); 10108 break; 10109 case DIF_OP_LDTS: 10110 case DIF_OP_LDTAA: 10111 case DIF_OP_STTS: 10112 case DIF_OP_STTAA: 10113 if (v > DIF_VAR_OTHER_UBASE + maxtlocal) 10114 err += efunc(pc, "invalid variable %u\n", v); 10115 break; 10116 case DIF_OP_LDLS: 10117 case DIF_OP_STLS: 10118 if (v > DIF_VAR_OTHER_UBASE + maxlocal) 10119 err += efunc(pc, "invalid variable %u\n", v); 10120 break; 10121 default: 10122 break; 10123 } 10124 } 10125 10126 return (err); 10127 } 10128 10129 /* 10130 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 10131 * are much more constrained than normal DIFOs. Specifically, they may 10132 * not: 10133 * 10134 * 1. Make calls to subroutines other than copyin(), copyinstr() or 10135 * miscellaneous string routines 10136 * 2. Access DTrace variables other than the args[] array, and the 10137 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 10138 * 3. Have thread-local variables. 10139 * 4. Have dynamic variables. 10140 */ 10141 static int 10142 dtrace_difo_validate_helper(dtrace_difo_t *dp) 10143 { 10144 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 10145 int err = 0; 10146 uint_t pc; 10147 10148 for (pc = 0; pc < dp->dtdo_len; pc++) { 10149 dif_instr_t instr = dp->dtdo_buf[pc]; 10150 10151 uint_t v = DIF_INSTR_VAR(instr); 10152 uint_t subr = DIF_INSTR_SUBR(instr); 10153 uint_t op = DIF_INSTR_OP(instr); 10154 10155 switch (op) { 10156 case DIF_OP_OR: 10157 case DIF_OP_XOR: 10158 case DIF_OP_AND: 10159 case DIF_OP_SLL: 10160 case DIF_OP_SRL: 10161 case DIF_OP_SRA: 10162 case DIF_OP_SUB: 10163 case DIF_OP_ADD: 10164 case DIF_OP_MUL: 10165 case DIF_OP_SDIV: 10166 case DIF_OP_UDIV: 10167 case DIF_OP_SREM: 10168 case DIF_OP_UREM: 10169 case DIF_OP_COPYS: 10170 case DIF_OP_NOT: 10171 case DIF_OP_MOV: 10172 case DIF_OP_RLDSB: 10173 case DIF_OP_RLDSH: 10174 case DIF_OP_RLDSW: 10175 case DIF_OP_RLDUB: 10176 case DIF_OP_RLDUH: 10177 case DIF_OP_RLDUW: 10178 case DIF_OP_RLDX: 10179 case DIF_OP_ULDSB: 10180 case DIF_OP_ULDSH: 10181 case DIF_OP_ULDSW: 10182 case DIF_OP_ULDUB: 10183 case DIF_OP_ULDUH: 10184 case DIF_OP_ULDUW: 10185 case DIF_OP_ULDX: 10186 case DIF_OP_STB: 10187 case DIF_OP_STH: 10188 case DIF_OP_STW: 10189 case DIF_OP_STX: 10190 case DIF_OP_ALLOCS: 10191 case DIF_OP_CMP: 10192 case DIF_OP_SCMP: 10193 case DIF_OP_TST: 10194 case DIF_OP_BA: 10195 case DIF_OP_BE: 10196 case DIF_OP_BNE: 10197 case DIF_OP_BG: 10198 case DIF_OP_BGU: 10199 case DIF_OP_BGE: 10200 case DIF_OP_BGEU: 10201 case DIF_OP_BL: 10202 case DIF_OP_BLU: 10203 case DIF_OP_BLE: 10204 case DIF_OP_BLEU: 10205 case DIF_OP_RET: 10206 case DIF_OP_NOP: 10207 case DIF_OP_POPTS: 10208 case DIF_OP_FLUSHTS: 10209 case DIF_OP_SETX: 10210 case DIF_OP_SETS: 10211 case DIF_OP_LDGA: 10212 case DIF_OP_LDLS: 10213 case DIF_OP_STGS: 10214 case DIF_OP_STLS: 10215 case DIF_OP_PUSHTR: 10216 case DIF_OP_PUSHTV: 10217 break; 10218 10219 case DIF_OP_LDGS: 10220 if (v >= DIF_VAR_OTHER_UBASE) 10221 break; 10222 10223 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 10224 break; 10225 10226 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 10227 v == DIF_VAR_PPID || v == DIF_VAR_TID || 10228 v == DIF_VAR_EXECARGS || 10229 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 10230 v == DIF_VAR_UID || v == DIF_VAR_GID) 10231 break; 10232 10233 err += efunc(pc, "illegal variable %u\n", v); 10234 break; 10235 10236 case DIF_OP_LDTA: 10237 case DIF_OP_LDTS: 10238 case DIF_OP_LDGAA: 10239 case DIF_OP_LDTAA: 10240 err += efunc(pc, "illegal dynamic variable load\n"); 10241 break; 10242 10243 case DIF_OP_STTS: 10244 case DIF_OP_STGAA: 10245 case DIF_OP_STTAA: 10246 err += efunc(pc, "illegal dynamic variable store\n"); 10247 break; 10248 10249 case DIF_OP_CALL: 10250 if (subr == DIF_SUBR_ALLOCA || 10251 subr == DIF_SUBR_BCOPY || 10252 subr == DIF_SUBR_COPYIN || 10253 subr == DIF_SUBR_COPYINTO || 10254 subr == DIF_SUBR_COPYINSTR || 10255 subr == DIF_SUBR_INDEX || 10256 subr == DIF_SUBR_INET_NTOA || 10257 subr == DIF_SUBR_INET_NTOA6 || 10258 subr == DIF_SUBR_INET_NTOP || 10259 subr == DIF_SUBR_JSON || 10260 subr == DIF_SUBR_LLTOSTR || 10261 subr == DIF_SUBR_STRTOLL || 10262 subr == DIF_SUBR_RINDEX || 10263 subr == DIF_SUBR_STRCHR || 10264 subr == DIF_SUBR_STRJOIN || 10265 subr == DIF_SUBR_STRRCHR || 10266 subr == DIF_SUBR_STRSTR || 10267 subr == DIF_SUBR_HTONS || 10268 subr == DIF_SUBR_HTONL || 10269 subr == DIF_SUBR_HTONLL || 10270 subr == DIF_SUBR_NTOHS || 10271 subr == DIF_SUBR_NTOHL || 10272 subr == DIF_SUBR_NTOHLL || 10273 subr == DIF_SUBR_MEMREF) 10274 break; 10275 #ifdef __FreeBSD__ 10276 if (subr == DIF_SUBR_MEMSTR) 10277 break; 10278 #endif 10279 10280 err += efunc(pc, "invalid subr %u\n", subr); 10281 break; 10282 10283 default: 10284 err += efunc(pc, "invalid opcode %u\n", 10285 DIF_INSTR_OP(instr)); 10286 } 10287 } 10288 10289 return (err); 10290 } 10291 10292 /* 10293 * Returns 1 if the expression in the DIF object can be cached on a per-thread 10294 * basis; 0 if not. 10295 */ 10296 static int 10297 dtrace_difo_cacheable(dtrace_difo_t *dp) 10298 { 10299 int i; 10300 10301 if (dp == NULL) 10302 return (0); 10303 10304 for (i = 0; i < dp->dtdo_varlen; i++) { 10305 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10306 10307 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 10308 continue; 10309 10310 switch (v->dtdv_id) { 10311 case DIF_VAR_CURTHREAD: 10312 case DIF_VAR_PID: 10313 case DIF_VAR_TID: 10314 case DIF_VAR_EXECARGS: 10315 case DIF_VAR_EXECNAME: 10316 case DIF_VAR_ZONENAME: 10317 break; 10318 10319 default: 10320 return (0); 10321 } 10322 } 10323 10324 /* 10325 * This DIF object may be cacheable. Now we need to look for any 10326 * array loading instructions, any memory loading instructions, or 10327 * any stores to thread-local variables. 10328 */ 10329 for (i = 0; i < dp->dtdo_len; i++) { 10330 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 10331 10332 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 10333 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 10334 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 10335 op == DIF_OP_LDGA || op == DIF_OP_STTS) 10336 return (0); 10337 } 10338 10339 return (1); 10340 } 10341 10342 static void 10343 dtrace_difo_hold(dtrace_difo_t *dp) 10344 { 10345 int i; 10346 10347 ASSERT(MUTEX_HELD(&dtrace_lock)); 10348 10349 dp->dtdo_refcnt++; 10350 ASSERT(dp->dtdo_refcnt != 0); 10351 10352 /* 10353 * We need to check this DIF object for references to the variable 10354 * DIF_VAR_VTIMESTAMP. 10355 */ 10356 for (i = 0; i < dp->dtdo_varlen; i++) { 10357 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10358 10359 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 10360 continue; 10361 10362 if (dtrace_vtime_references++ == 0) 10363 dtrace_vtime_enable(); 10364 } 10365 } 10366 10367 /* 10368 * This routine calculates the dynamic variable chunksize for a given DIF 10369 * object. The calculation is not fool-proof, and can probably be tricked by 10370 * malicious DIF -- but it works for all compiler-generated DIF. Because this 10371 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 10372 * if a dynamic variable size exceeds the chunksize. 10373 */ 10374 static void 10375 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10376 { 10377 uint64_t sval = 0; 10378 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 10379 const dif_instr_t *text = dp->dtdo_buf; 10380 uint_t pc, srd = 0; 10381 uint_t ttop = 0; 10382 size_t size, ksize; 10383 uint_t id, i; 10384 10385 for (pc = 0; pc < dp->dtdo_len; pc++) { 10386 dif_instr_t instr = text[pc]; 10387 uint_t op = DIF_INSTR_OP(instr); 10388 uint_t rd = DIF_INSTR_RD(instr); 10389 uint_t r1 = DIF_INSTR_R1(instr); 10390 uint_t nkeys = 0; 10391 uchar_t scope = 0; 10392 10393 dtrace_key_t *key = tupregs; 10394 10395 switch (op) { 10396 case DIF_OP_SETX: 10397 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 10398 srd = rd; 10399 continue; 10400 10401 case DIF_OP_STTS: 10402 key = &tupregs[DIF_DTR_NREGS]; 10403 key[0].dttk_size = 0; 10404 key[1].dttk_size = 0; 10405 nkeys = 2; 10406 scope = DIFV_SCOPE_THREAD; 10407 break; 10408 10409 case DIF_OP_STGAA: 10410 case DIF_OP_STTAA: 10411 nkeys = ttop; 10412 10413 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 10414 key[nkeys++].dttk_size = 0; 10415 10416 key[nkeys++].dttk_size = 0; 10417 10418 if (op == DIF_OP_STTAA) { 10419 scope = DIFV_SCOPE_THREAD; 10420 } else { 10421 scope = DIFV_SCOPE_GLOBAL; 10422 } 10423 10424 break; 10425 10426 case DIF_OP_PUSHTR: 10427 if (ttop == DIF_DTR_NREGS) 10428 return; 10429 10430 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 10431 /* 10432 * If the register for the size of the "pushtr" 10433 * is %r0 (or the value is 0) and the type is 10434 * a string, we'll use the system-wide default 10435 * string size. 10436 */ 10437 tupregs[ttop++].dttk_size = 10438 dtrace_strsize_default; 10439 } else { 10440 if (srd == 0) 10441 return; 10442 10443 if (sval > LONG_MAX) 10444 return; 10445 10446 tupregs[ttop++].dttk_size = sval; 10447 } 10448 10449 break; 10450 10451 case DIF_OP_PUSHTV: 10452 if (ttop == DIF_DTR_NREGS) 10453 return; 10454 10455 tupregs[ttop++].dttk_size = 0; 10456 break; 10457 10458 case DIF_OP_FLUSHTS: 10459 ttop = 0; 10460 break; 10461 10462 case DIF_OP_POPTS: 10463 if (ttop != 0) 10464 ttop--; 10465 break; 10466 } 10467 10468 sval = 0; 10469 srd = 0; 10470 10471 if (nkeys == 0) 10472 continue; 10473 10474 /* 10475 * We have a dynamic variable allocation; calculate its size. 10476 */ 10477 for (ksize = 0, i = 0; i < nkeys; i++) 10478 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 10479 10480 size = sizeof (dtrace_dynvar_t); 10481 size += sizeof (dtrace_key_t) * (nkeys - 1); 10482 size += ksize; 10483 10484 /* 10485 * Now we need to determine the size of the stored data. 10486 */ 10487 id = DIF_INSTR_VAR(instr); 10488 10489 for (i = 0; i < dp->dtdo_varlen; i++) { 10490 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10491 10492 if (v->dtdv_id == id && v->dtdv_scope == scope) { 10493 size += v->dtdv_type.dtdt_size; 10494 break; 10495 } 10496 } 10497 10498 if (i == dp->dtdo_varlen) 10499 return; 10500 10501 /* 10502 * We have the size. If this is larger than the chunk size 10503 * for our dynamic variable state, reset the chunk size. 10504 */ 10505 size = P2ROUNDUP(size, sizeof (uint64_t)); 10506 10507 /* 10508 * Before setting the chunk size, check that we're not going 10509 * to set it to a negative value... 10510 */ 10511 if (size > LONG_MAX) 10512 return; 10513 10514 /* 10515 * ...and make certain that we didn't badly overflow. 10516 */ 10517 if (size < ksize || size < sizeof (dtrace_dynvar_t)) 10518 return; 10519 10520 if (size > vstate->dtvs_dynvars.dtds_chunksize) 10521 vstate->dtvs_dynvars.dtds_chunksize = size; 10522 } 10523 } 10524 10525 static void 10526 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10527 { 10528 int i, oldsvars, osz, nsz, otlocals, ntlocals; 10529 uint_t id; 10530 10531 ASSERT(MUTEX_HELD(&dtrace_lock)); 10532 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 10533 10534 for (i = 0; i < dp->dtdo_varlen; i++) { 10535 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10536 dtrace_statvar_t *svar, ***svarp = NULL; 10537 size_t dsize = 0; 10538 uint8_t scope = v->dtdv_scope; 10539 int *np = NULL; 10540 10541 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 10542 continue; 10543 10544 id -= DIF_VAR_OTHER_UBASE; 10545 10546 switch (scope) { 10547 case DIFV_SCOPE_THREAD: 10548 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 10549 dtrace_difv_t *tlocals; 10550 10551 if ((ntlocals = (otlocals << 1)) == 0) 10552 ntlocals = 1; 10553 10554 osz = otlocals * sizeof (dtrace_difv_t); 10555 nsz = ntlocals * sizeof (dtrace_difv_t); 10556 10557 tlocals = kmem_zalloc(nsz, KM_SLEEP); 10558 10559 if (osz != 0) { 10560 bcopy(vstate->dtvs_tlocals, 10561 tlocals, osz); 10562 kmem_free(vstate->dtvs_tlocals, osz); 10563 } 10564 10565 vstate->dtvs_tlocals = tlocals; 10566 vstate->dtvs_ntlocals = ntlocals; 10567 } 10568 10569 vstate->dtvs_tlocals[id] = *v; 10570 continue; 10571 10572 case DIFV_SCOPE_LOCAL: 10573 np = &vstate->dtvs_nlocals; 10574 svarp = &vstate->dtvs_locals; 10575 10576 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 10577 dsize = NCPU * (v->dtdv_type.dtdt_size + 10578 sizeof (uint64_t)); 10579 else 10580 dsize = NCPU * sizeof (uint64_t); 10581 10582 break; 10583 10584 case DIFV_SCOPE_GLOBAL: 10585 np = &vstate->dtvs_nglobals; 10586 svarp = &vstate->dtvs_globals; 10587 10588 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 10589 dsize = v->dtdv_type.dtdt_size + 10590 sizeof (uint64_t); 10591 10592 break; 10593 10594 default: 10595 ASSERT(0); 10596 } 10597 10598 while (id >= (oldsvars = *np)) { 10599 dtrace_statvar_t **statics; 10600 int newsvars, oldsize, newsize; 10601 10602 if ((newsvars = (oldsvars << 1)) == 0) 10603 newsvars = 1; 10604 10605 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 10606 newsize = newsvars * sizeof (dtrace_statvar_t *); 10607 10608 statics = kmem_zalloc(newsize, KM_SLEEP); 10609 10610 if (oldsize != 0) { 10611 bcopy(*svarp, statics, oldsize); 10612 kmem_free(*svarp, oldsize); 10613 } 10614 10615 *svarp = statics; 10616 *np = newsvars; 10617 } 10618 10619 if ((svar = (*svarp)[id]) == NULL) { 10620 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 10621 svar->dtsv_var = *v; 10622 10623 if ((svar->dtsv_size = dsize) != 0) { 10624 svar->dtsv_data = (uint64_t)(uintptr_t) 10625 kmem_zalloc(dsize, KM_SLEEP); 10626 } 10627 10628 (*svarp)[id] = svar; 10629 } 10630 10631 svar->dtsv_refcnt++; 10632 } 10633 10634 dtrace_difo_chunksize(dp, vstate); 10635 dtrace_difo_hold(dp); 10636 } 10637 10638 static dtrace_difo_t * 10639 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10640 { 10641 dtrace_difo_t *new; 10642 size_t sz; 10643 10644 ASSERT(dp->dtdo_buf != NULL); 10645 ASSERT(dp->dtdo_refcnt != 0); 10646 10647 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 10648 10649 ASSERT(dp->dtdo_buf != NULL); 10650 sz = dp->dtdo_len * sizeof (dif_instr_t); 10651 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 10652 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 10653 new->dtdo_len = dp->dtdo_len; 10654 10655 if (dp->dtdo_strtab != NULL) { 10656 ASSERT(dp->dtdo_strlen != 0); 10657 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 10658 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 10659 new->dtdo_strlen = dp->dtdo_strlen; 10660 } 10661 10662 if (dp->dtdo_inttab != NULL) { 10663 ASSERT(dp->dtdo_intlen != 0); 10664 sz = dp->dtdo_intlen * sizeof (uint64_t); 10665 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 10666 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 10667 new->dtdo_intlen = dp->dtdo_intlen; 10668 } 10669 10670 if (dp->dtdo_vartab != NULL) { 10671 ASSERT(dp->dtdo_varlen != 0); 10672 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 10673 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 10674 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 10675 new->dtdo_varlen = dp->dtdo_varlen; 10676 } 10677 10678 dtrace_difo_init(new, vstate); 10679 return (new); 10680 } 10681 10682 static void 10683 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10684 { 10685 int i; 10686 10687 ASSERT(dp->dtdo_refcnt == 0); 10688 10689 for (i = 0; i < dp->dtdo_varlen; i++) { 10690 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10691 dtrace_statvar_t *svar, **svarp = NULL; 10692 uint_t id; 10693 uint8_t scope = v->dtdv_scope; 10694 int *np = NULL; 10695 10696 switch (scope) { 10697 case DIFV_SCOPE_THREAD: 10698 continue; 10699 10700 case DIFV_SCOPE_LOCAL: 10701 np = &vstate->dtvs_nlocals; 10702 svarp = vstate->dtvs_locals; 10703 break; 10704 10705 case DIFV_SCOPE_GLOBAL: 10706 np = &vstate->dtvs_nglobals; 10707 svarp = vstate->dtvs_globals; 10708 break; 10709 10710 default: 10711 ASSERT(0); 10712 } 10713 10714 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 10715 continue; 10716 10717 id -= DIF_VAR_OTHER_UBASE; 10718 ASSERT(id < *np); 10719 10720 svar = svarp[id]; 10721 ASSERT(svar != NULL); 10722 ASSERT(svar->dtsv_refcnt > 0); 10723 10724 if (--svar->dtsv_refcnt > 0) 10725 continue; 10726 10727 if (svar->dtsv_size != 0) { 10728 ASSERT(svar->dtsv_data != 0); 10729 kmem_free((void *)(uintptr_t)svar->dtsv_data, 10730 svar->dtsv_size); 10731 } 10732 10733 kmem_free(svar, sizeof (dtrace_statvar_t)); 10734 svarp[id] = NULL; 10735 } 10736 10737 if (dp->dtdo_buf != NULL) 10738 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 10739 if (dp->dtdo_inttab != NULL) 10740 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 10741 if (dp->dtdo_strtab != NULL) 10742 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 10743 if (dp->dtdo_vartab != NULL) 10744 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 10745 10746 kmem_free(dp, sizeof (dtrace_difo_t)); 10747 } 10748 10749 static void 10750 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10751 { 10752 int i; 10753 10754 ASSERT(MUTEX_HELD(&dtrace_lock)); 10755 ASSERT(dp->dtdo_refcnt != 0); 10756 10757 for (i = 0; i < dp->dtdo_varlen; i++) { 10758 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10759 10760 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 10761 continue; 10762 10763 ASSERT(dtrace_vtime_references > 0); 10764 if (--dtrace_vtime_references == 0) 10765 dtrace_vtime_disable(); 10766 } 10767 10768 if (--dp->dtdo_refcnt == 0) 10769 dtrace_difo_destroy(dp, vstate); 10770 } 10771 10772 /* 10773 * DTrace Format Functions 10774 */ 10775 static uint16_t 10776 dtrace_format_add(dtrace_state_t *state, char *str) 10777 { 10778 char *fmt, **new; 10779 uint16_t ndx, len = strlen(str) + 1; 10780 10781 fmt = kmem_zalloc(len, KM_SLEEP); 10782 bcopy(str, fmt, len); 10783 10784 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 10785 if (state->dts_formats[ndx] == NULL) { 10786 state->dts_formats[ndx] = fmt; 10787 return (ndx + 1); 10788 } 10789 } 10790 10791 if (state->dts_nformats == USHRT_MAX) { 10792 /* 10793 * This is only likely if a denial-of-service attack is being 10794 * attempted. As such, it's okay to fail silently here. 10795 */ 10796 kmem_free(fmt, len); 10797 return (0); 10798 } 10799 10800 /* 10801 * For simplicity, we always resize the formats array to be exactly the 10802 * number of formats. 10803 */ 10804 ndx = state->dts_nformats++; 10805 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 10806 10807 if (state->dts_formats != NULL) { 10808 ASSERT(ndx != 0); 10809 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 10810 kmem_free(state->dts_formats, ndx * sizeof (char *)); 10811 } 10812 10813 state->dts_formats = new; 10814 state->dts_formats[ndx] = fmt; 10815 10816 return (ndx + 1); 10817 } 10818 10819 static void 10820 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 10821 { 10822 char *fmt; 10823 10824 ASSERT(state->dts_formats != NULL); 10825 ASSERT(format <= state->dts_nformats); 10826 ASSERT(state->dts_formats[format - 1] != NULL); 10827 10828 fmt = state->dts_formats[format - 1]; 10829 kmem_free(fmt, strlen(fmt) + 1); 10830 state->dts_formats[format - 1] = NULL; 10831 } 10832 10833 static void 10834 dtrace_format_destroy(dtrace_state_t *state) 10835 { 10836 int i; 10837 10838 if (state->dts_nformats == 0) { 10839 ASSERT(state->dts_formats == NULL); 10840 return; 10841 } 10842 10843 ASSERT(state->dts_formats != NULL); 10844 10845 for (i = 0; i < state->dts_nformats; i++) { 10846 char *fmt = state->dts_formats[i]; 10847 10848 if (fmt == NULL) 10849 continue; 10850 10851 kmem_free(fmt, strlen(fmt) + 1); 10852 } 10853 10854 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 10855 state->dts_nformats = 0; 10856 state->dts_formats = NULL; 10857 } 10858 10859 /* 10860 * DTrace Predicate Functions 10861 */ 10862 static dtrace_predicate_t * 10863 dtrace_predicate_create(dtrace_difo_t *dp) 10864 { 10865 dtrace_predicate_t *pred; 10866 10867 ASSERT(MUTEX_HELD(&dtrace_lock)); 10868 ASSERT(dp->dtdo_refcnt != 0); 10869 10870 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 10871 pred->dtp_difo = dp; 10872 pred->dtp_refcnt = 1; 10873 10874 if (!dtrace_difo_cacheable(dp)) 10875 return (pred); 10876 10877 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 10878 /* 10879 * This is only theoretically possible -- we have had 2^32 10880 * cacheable predicates on this machine. We cannot allow any 10881 * more predicates to become cacheable: as unlikely as it is, 10882 * there may be a thread caching a (now stale) predicate cache 10883 * ID. (N.B.: the temptation is being successfully resisted to 10884 * have this cmn_err() "Holy shit -- we executed this code!") 10885 */ 10886 return (pred); 10887 } 10888 10889 pred->dtp_cacheid = dtrace_predcache_id++; 10890 10891 return (pred); 10892 } 10893 10894 static void 10895 dtrace_predicate_hold(dtrace_predicate_t *pred) 10896 { 10897 ASSERT(MUTEX_HELD(&dtrace_lock)); 10898 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 10899 ASSERT(pred->dtp_refcnt > 0); 10900 10901 pred->dtp_refcnt++; 10902 } 10903 10904 static void 10905 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 10906 { 10907 dtrace_difo_t *dp = pred->dtp_difo; 10908 10909 ASSERT(MUTEX_HELD(&dtrace_lock)); 10910 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 10911 ASSERT(pred->dtp_refcnt > 0); 10912 10913 if (--pred->dtp_refcnt == 0) { 10914 dtrace_difo_release(pred->dtp_difo, vstate); 10915 kmem_free(pred, sizeof (dtrace_predicate_t)); 10916 } 10917 } 10918 10919 /* 10920 * DTrace Action Description Functions 10921 */ 10922 static dtrace_actdesc_t * 10923 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 10924 uint64_t uarg, uint64_t arg) 10925 { 10926 dtrace_actdesc_t *act; 10927 10928 #ifdef illumos 10929 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 10930 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 10931 #endif 10932 10933 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 10934 act->dtad_kind = kind; 10935 act->dtad_ntuple = ntuple; 10936 act->dtad_uarg = uarg; 10937 act->dtad_arg = arg; 10938 act->dtad_refcnt = 1; 10939 10940 return (act); 10941 } 10942 10943 static void 10944 dtrace_actdesc_hold(dtrace_actdesc_t *act) 10945 { 10946 ASSERT(act->dtad_refcnt >= 1); 10947 act->dtad_refcnt++; 10948 } 10949 10950 static void 10951 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 10952 { 10953 dtrace_actkind_t kind = act->dtad_kind; 10954 dtrace_difo_t *dp; 10955 10956 ASSERT(act->dtad_refcnt >= 1); 10957 10958 if (--act->dtad_refcnt != 0) 10959 return; 10960 10961 if ((dp = act->dtad_difo) != NULL) 10962 dtrace_difo_release(dp, vstate); 10963 10964 if (DTRACEACT_ISPRINTFLIKE(kind)) { 10965 char *str = (char *)(uintptr_t)act->dtad_arg; 10966 10967 #ifdef illumos 10968 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 10969 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 10970 #endif 10971 10972 if (str != NULL) 10973 kmem_free(str, strlen(str) + 1); 10974 } 10975 10976 kmem_free(act, sizeof (dtrace_actdesc_t)); 10977 } 10978 10979 /* 10980 * DTrace ECB Functions 10981 */ 10982 static dtrace_ecb_t * 10983 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 10984 { 10985 dtrace_ecb_t *ecb; 10986 dtrace_epid_t epid; 10987 10988 ASSERT(MUTEX_HELD(&dtrace_lock)); 10989 10990 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 10991 ecb->dte_predicate = NULL; 10992 ecb->dte_probe = probe; 10993 10994 /* 10995 * The default size is the size of the default action: recording 10996 * the header. 10997 */ 10998 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t); 10999 ecb->dte_alignment = sizeof (dtrace_epid_t); 11000 11001 epid = state->dts_epid++; 11002 11003 if (epid - 1 >= state->dts_necbs) { 11004 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 11005 int necbs = state->dts_necbs << 1; 11006 11007 ASSERT(epid == state->dts_necbs + 1); 11008 11009 if (necbs == 0) { 11010 ASSERT(oecbs == NULL); 11011 necbs = 1; 11012 } 11013 11014 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 11015 11016 if (oecbs != NULL) 11017 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 11018 11019 dtrace_membar_producer(); 11020 state->dts_ecbs = ecbs; 11021 11022 if (oecbs != NULL) { 11023 /* 11024 * If this state is active, we must dtrace_sync() 11025 * before we can free the old dts_ecbs array: we're 11026 * coming in hot, and there may be active ring 11027 * buffer processing (which indexes into the dts_ecbs 11028 * array) on another CPU. 11029 */ 11030 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 11031 dtrace_sync(); 11032 11033 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 11034 } 11035 11036 dtrace_membar_producer(); 11037 state->dts_necbs = necbs; 11038 } 11039 11040 ecb->dte_state = state; 11041 11042 ASSERT(state->dts_ecbs[epid - 1] == NULL); 11043 dtrace_membar_producer(); 11044 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 11045 11046 return (ecb); 11047 } 11048 11049 static void 11050 dtrace_ecb_enable(dtrace_ecb_t *ecb) 11051 { 11052 dtrace_probe_t *probe = ecb->dte_probe; 11053 11054 ASSERT(MUTEX_HELD(&cpu_lock)); 11055 ASSERT(MUTEX_HELD(&dtrace_lock)); 11056 ASSERT(ecb->dte_next == NULL); 11057 11058 if (probe == NULL) { 11059 /* 11060 * This is the NULL probe -- there's nothing to do. 11061 */ 11062 return; 11063 } 11064 11065 if (probe->dtpr_ecb == NULL) { 11066 dtrace_provider_t *prov = probe->dtpr_provider; 11067 11068 /* 11069 * We're the first ECB on this probe. 11070 */ 11071 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 11072 11073 if (ecb->dte_predicate != NULL) 11074 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 11075 11076 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 11077 probe->dtpr_id, probe->dtpr_arg); 11078 } else { 11079 /* 11080 * This probe is already active. Swing the last pointer to 11081 * point to the new ECB, and issue a dtrace_sync() to assure 11082 * that all CPUs have seen the change. 11083 */ 11084 ASSERT(probe->dtpr_ecb_last != NULL); 11085 probe->dtpr_ecb_last->dte_next = ecb; 11086 probe->dtpr_ecb_last = ecb; 11087 probe->dtpr_predcache = 0; 11088 11089 dtrace_sync(); 11090 } 11091 } 11092 11093 static int 11094 dtrace_ecb_resize(dtrace_ecb_t *ecb) 11095 { 11096 dtrace_action_t *act; 11097 uint32_t curneeded = UINT32_MAX; 11098 uint32_t aggbase = UINT32_MAX; 11099 11100 /* 11101 * If we record anything, we always record the dtrace_rechdr_t. (And 11102 * we always record it first.) 11103 */ 11104 ecb->dte_size = sizeof (dtrace_rechdr_t); 11105 ecb->dte_alignment = sizeof (dtrace_epid_t); 11106 11107 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 11108 dtrace_recdesc_t *rec = &act->dta_rec; 11109 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1); 11110 11111 ecb->dte_alignment = MAX(ecb->dte_alignment, 11112 rec->dtrd_alignment); 11113 11114 if (DTRACEACT_ISAGG(act->dta_kind)) { 11115 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 11116 11117 ASSERT(rec->dtrd_size != 0); 11118 ASSERT(agg->dtag_first != NULL); 11119 ASSERT(act->dta_prev->dta_intuple); 11120 ASSERT(aggbase != UINT32_MAX); 11121 ASSERT(curneeded != UINT32_MAX); 11122 11123 agg->dtag_base = aggbase; 11124 11125 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 11126 rec->dtrd_offset = curneeded; 11127 if (curneeded + rec->dtrd_size < curneeded) 11128 return (EINVAL); 11129 curneeded += rec->dtrd_size; 11130 ecb->dte_needed = MAX(ecb->dte_needed, curneeded); 11131 11132 aggbase = UINT32_MAX; 11133 curneeded = UINT32_MAX; 11134 } else if (act->dta_intuple) { 11135 if (curneeded == UINT32_MAX) { 11136 /* 11137 * This is the first record in a tuple. Align 11138 * curneeded to be at offset 4 in an 8-byte 11139 * aligned block. 11140 */ 11141 ASSERT(act->dta_prev == NULL || 11142 !act->dta_prev->dta_intuple); 11143 ASSERT3U(aggbase, ==, UINT32_MAX); 11144 curneeded = P2PHASEUP(ecb->dte_size, 11145 sizeof (uint64_t), sizeof (dtrace_aggid_t)); 11146 11147 aggbase = curneeded - sizeof (dtrace_aggid_t); 11148 ASSERT(IS_P2ALIGNED(aggbase, 11149 sizeof (uint64_t))); 11150 } 11151 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 11152 rec->dtrd_offset = curneeded; 11153 if (curneeded + rec->dtrd_size < curneeded) 11154 return (EINVAL); 11155 curneeded += rec->dtrd_size; 11156 } else { 11157 /* tuples must be followed by an aggregation */ 11158 ASSERT(act->dta_prev == NULL || 11159 !act->dta_prev->dta_intuple); 11160 11161 ecb->dte_size = P2ROUNDUP(ecb->dte_size, 11162 rec->dtrd_alignment); 11163 rec->dtrd_offset = ecb->dte_size; 11164 if (ecb->dte_size + rec->dtrd_size < ecb->dte_size) 11165 return (EINVAL); 11166 ecb->dte_size += rec->dtrd_size; 11167 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size); 11168 } 11169 } 11170 11171 if ((act = ecb->dte_action) != NULL && 11172 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 11173 ecb->dte_size == sizeof (dtrace_rechdr_t)) { 11174 /* 11175 * If the size is still sizeof (dtrace_rechdr_t), then all 11176 * actions store no data; set the size to 0. 11177 */ 11178 ecb->dte_size = 0; 11179 } 11180 11181 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t)); 11182 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t))); 11183 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed, 11184 ecb->dte_needed); 11185 return (0); 11186 } 11187 11188 static dtrace_action_t * 11189 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 11190 { 11191 dtrace_aggregation_t *agg; 11192 size_t size = sizeof (uint64_t); 11193 int ntuple = desc->dtad_ntuple; 11194 dtrace_action_t *act; 11195 dtrace_recdesc_t *frec; 11196 dtrace_aggid_t aggid; 11197 dtrace_state_t *state = ecb->dte_state; 11198 11199 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 11200 agg->dtag_ecb = ecb; 11201 11202 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 11203 11204 switch (desc->dtad_kind) { 11205 case DTRACEAGG_MIN: 11206 agg->dtag_initial = INT64_MAX; 11207 agg->dtag_aggregate = dtrace_aggregate_min; 11208 break; 11209 11210 case DTRACEAGG_MAX: 11211 agg->dtag_initial = INT64_MIN; 11212 agg->dtag_aggregate = dtrace_aggregate_max; 11213 break; 11214 11215 case DTRACEAGG_COUNT: 11216 agg->dtag_aggregate = dtrace_aggregate_count; 11217 break; 11218 11219 case DTRACEAGG_QUANTIZE: 11220 agg->dtag_aggregate = dtrace_aggregate_quantize; 11221 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 11222 sizeof (uint64_t); 11223 break; 11224 11225 case DTRACEAGG_LQUANTIZE: { 11226 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 11227 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 11228 11229 agg->dtag_initial = desc->dtad_arg; 11230 agg->dtag_aggregate = dtrace_aggregate_lquantize; 11231 11232 if (step == 0 || levels == 0) 11233 goto err; 11234 11235 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 11236 break; 11237 } 11238 11239 case DTRACEAGG_LLQUANTIZE: { 11240 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 11241 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 11242 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 11243 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 11244 int64_t v; 11245 11246 agg->dtag_initial = desc->dtad_arg; 11247 agg->dtag_aggregate = dtrace_aggregate_llquantize; 11248 11249 if (factor < 2 || low >= high || nsteps < factor) 11250 goto err; 11251 11252 /* 11253 * Now check that the number of steps evenly divides a power 11254 * of the factor. (This assures both integer bucket size and 11255 * linearity within each magnitude.) 11256 */ 11257 for (v = factor; v < nsteps; v *= factor) 11258 continue; 11259 11260 if ((v % nsteps) || (nsteps % factor)) 11261 goto err; 11262 11263 size = (dtrace_aggregate_llquantize_bucket(factor, 11264 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 11265 break; 11266 } 11267 11268 case DTRACEAGG_AVG: 11269 agg->dtag_aggregate = dtrace_aggregate_avg; 11270 size = sizeof (uint64_t) * 2; 11271 break; 11272 11273 case DTRACEAGG_STDDEV: 11274 agg->dtag_aggregate = dtrace_aggregate_stddev; 11275 size = sizeof (uint64_t) * 4; 11276 break; 11277 11278 case DTRACEAGG_SUM: 11279 agg->dtag_aggregate = dtrace_aggregate_sum; 11280 break; 11281 11282 default: 11283 goto err; 11284 } 11285 11286 agg->dtag_action.dta_rec.dtrd_size = size; 11287 11288 if (ntuple == 0) 11289 goto err; 11290 11291 /* 11292 * We must make sure that we have enough actions for the n-tuple. 11293 */ 11294 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 11295 if (DTRACEACT_ISAGG(act->dta_kind)) 11296 break; 11297 11298 if (--ntuple == 0) { 11299 /* 11300 * This is the action with which our n-tuple begins. 11301 */ 11302 agg->dtag_first = act; 11303 goto success; 11304 } 11305 } 11306 11307 /* 11308 * This n-tuple is short by ntuple elements. Return failure. 11309 */ 11310 ASSERT(ntuple != 0); 11311 err: 11312 kmem_free(agg, sizeof (dtrace_aggregation_t)); 11313 return (NULL); 11314 11315 success: 11316 /* 11317 * If the last action in the tuple has a size of zero, it's actually 11318 * an expression argument for the aggregating action. 11319 */ 11320 ASSERT(ecb->dte_action_last != NULL); 11321 act = ecb->dte_action_last; 11322 11323 if (act->dta_kind == DTRACEACT_DIFEXPR) { 11324 ASSERT(act->dta_difo != NULL); 11325 11326 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 11327 agg->dtag_hasarg = 1; 11328 } 11329 11330 /* 11331 * We need to allocate an id for this aggregation. 11332 */ 11333 #ifdef illumos 11334 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 11335 VM_BESTFIT | VM_SLEEP); 11336 #else 11337 aggid = alloc_unr(state->dts_aggid_arena); 11338 #endif 11339 11340 if (aggid - 1 >= state->dts_naggregations) { 11341 dtrace_aggregation_t **oaggs = state->dts_aggregations; 11342 dtrace_aggregation_t **aggs; 11343 int naggs = state->dts_naggregations << 1; 11344 int onaggs = state->dts_naggregations; 11345 11346 ASSERT(aggid == state->dts_naggregations + 1); 11347 11348 if (naggs == 0) { 11349 ASSERT(oaggs == NULL); 11350 naggs = 1; 11351 } 11352 11353 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 11354 11355 if (oaggs != NULL) { 11356 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 11357 kmem_free(oaggs, onaggs * sizeof (*aggs)); 11358 } 11359 11360 state->dts_aggregations = aggs; 11361 state->dts_naggregations = naggs; 11362 } 11363 11364 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 11365 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 11366 11367 frec = &agg->dtag_first->dta_rec; 11368 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 11369 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 11370 11371 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 11372 ASSERT(!act->dta_intuple); 11373 act->dta_intuple = 1; 11374 } 11375 11376 return (&agg->dtag_action); 11377 } 11378 11379 static void 11380 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 11381 { 11382 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 11383 dtrace_state_t *state = ecb->dte_state; 11384 dtrace_aggid_t aggid = agg->dtag_id; 11385 11386 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 11387 #ifdef illumos 11388 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 11389 #else 11390 free_unr(state->dts_aggid_arena, aggid); 11391 #endif 11392 11393 ASSERT(state->dts_aggregations[aggid - 1] == agg); 11394 state->dts_aggregations[aggid - 1] = NULL; 11395 11396 kmem_free(agg, sizeof (dtrace_aggregation_t)); 11397 } 11398 11399 static int 11400 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 11401 { 11402 dtrace_action_t *action, *last; 11403 dtrace_difo_t *dp = desc->dtad_difo; 11404 uint32_t size = 0, align = sizeof (uint8_t), mask; 11405 uint16_t format = 0; 11406 dtrace_recdesc_t *rec; 11407 dtrace_state_t *state = ecb->dte_state; 11408 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 11409 uint64_t arg = desc->dtad_arg; 11410 11411 ASSERT(MUTEX_HELD(&dtrace_lock)); 11412 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 11413 11414 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 11415 /* 11416 * If this is an aggregating action, there must be neither 11417 * a speculate nor a commit on the action chain. 11418 */ 11419 dtrace_action_t *act; 11420 11421 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 11422 if (act->dta_kind == DTRACEACT_COMMIT) 11423 return (EINVAL); 11424 11425 if (act->dta_kind == DTRACEACT_SPECULATE) 11426 return (EINVAL); 11427 } 11428 11429 action = dtrace_ecb_aggregation_create(ecb, desc); 11430 11431 if (action == NULL) 11432 return (EINVAL); 11433 } else { 11434 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 11435 (desc->dtad_kind == DTRACEACT_DIFEXPR && 11436 dp != NULL && dp->dtdo_destructive)) { 11437 state->dts_destructive = 1; 11438 } 11439 11440 switch (desc->dtad_kind) { 11441 case DTRACEACT_PRINTF: 11442 case DTRACEACT_PRINTA: 11443 case DTRACEACT_SYSTEM: 11444 case DTRACEACT_FREOPEN: 11445 case DTRACEACT_DIFEXPR: 11446 /* 11447 * We know that our arg is a string -- turn it into a 11448 * format. 11449 */ 11450 if (arg == 0) { 11451 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA || 11452 desc->dtad_kind == DTRACEACT_DIFEXPR); 11453 format = 0; 11454 } else { 11455 ASSERT(arg != 0); 11456 #ifdef illumos 11457 ASSERT(arg > KERNELBASE); 11458 #endif 11459 format = dtrace_format_add(state, 11460 (char *)(uintptr_t)arg); 11461 } 11462 11463 /*FALLTHROUGH*/ 11464 case DTRACEACT_LIBACT: 11465 case DTRACEACT_TRACEMEM: 11466 case DTRACEACT_TRACEMEM_DYNSIZE: 11467 if (dp == NULL) 11468 return (EINVAL); 11469 11470 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 11471 break; 11472 11473 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 11474 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11475 return (EINVAL); 11476 11477 size = opt[DTRACEOPT_STRSIZE]; 11478 } 11479 11480 break; 11481 11482 case DTRACEACT_STACK: 11483 if ((nframes = arg) == 0) { 11484 nframes = opt[DTRACEOPT_STACKFRAMES]; 11485 ASSERT(nframes > 0); 11486 arg = nframes; 11487 } 11488 11489 size = nframes * sizeof (pc_t); 11490 break; 11491 11492 case DTRACEACT_JSTACK: 11493 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 11494 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 11495 11496 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 11497 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 11498 11499 arg = DTRACE_USTACK_ARG(nframes, strsize); 11500 11501 /*FALLTHROUGH*/ 11502 case DTRACEACT_USTACK: 11503 if (desc->dtad_kind != DTRACEACT_JSTACK && 11504 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 11505 strsize = DTRACE_USTACK_STRSIZE(arg); 11506 nframes = opt[DTRACEOPT_USTACKFRAMES]; 11507 ASSERT(nframes > 0); 11508 arg = DTRACE_USTACK_ARG(nframes, strsize); 11509 } 11510 11511 /* 11512 * Save a slot for the pid. 11513 */ 11514 size = (nframes + 1) * sizeof (uint64_t); 11515 size += DTRACE_USTACK_STRSIZE(arg); 11516 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 11517 11518 break; 11519 11520 case DTRACEACT_SYM: 11521 case DTRACEACT_MOD: 11522 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 11523 sizeof (uint64_t)) || 11524 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11525 return (EINVAL); 11526 break; 11527 11528 case DTRACEACT_USYM: 11529 case DTRACEACT_UMOD: 11530 case DTRACEACT_UADDR: 11531 if (dp == NULL || 11532 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 11533 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11534 return (EINVAL); 11535 11536 /* 11537 * We have a slot for the pid, plus a slot for the 11538 * argument. To keep things simple (aligned with 11539 * bitness-neutral sizing), we store each as a 64-bit 11540 * quantity. 11541 */ 11542 size = 2 * sizeof (uint64_t); 11543 break; 11544 11545 case DTRACEACT_STOP: 11546 case DTRACEACT_BREAKPOINT: 11547 case DTRACEACT_PANIC: 11548 break; 11549 11550 case DTRACEACT_CHILL: 11551 case DTRACEACT_DISCARD: 11552 case DTRACEACT_RAISE: 11553 if (dp == NULL) 11554 return (EINVAL); 11555 break; 11556 11557 case DTRACEACT_EXIT: 11558 if (dp == NULL || 11559 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 11560 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11561 return (EINVAL); 11562 break; 11563 11564 case DTRACEACT_SPECULATE: 11565 if (ecb->dte_size > sizeof (dtrace_rechdr_t)) 11566 return (EINVAL); 11567 11568 if (dp == NULL) 11569 return (EINVAL); 11570 11571 state->dts_speculates = 1; 11572 break; 11573 11574 case DTRACEACT_PRINTM: 11575 size = dp->dtdo_rtype.dtdt_size; 11576 break; 11577 11578 case DTRACEACT_COMMIT: { 11579 dtrace_action_t *act = ecb->dte_action; 11580 11581 for (; act != NULL; act = act->dta_next) { 11582 if (act->dta_kind == DTRACEACT_COMMIT) 11583 return (EINVAL); 11584 } 11585 11586 if (dp == NULL) 11587 return (EINVAL); 11588 break; 11589 } 11590 11591 default: 11592 return (EINVAL); 11593 } 11594 11595 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 11596 /* 11597 * If this is a data-storing action or a speculate, 11598 * we must be sure that there isn't a commit on the 11599 * action chain. 11600 */ 11601 dtrace_action_t *act = ecb->dte_action; 11602 11603 for (; act != NULL; act = act->dta_next) { 11604 if (act->dta_kind == DTRACEACT_COMMIT) 11605 return (EINVAL); 11606 } 11607 } 11608 11609 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 11610 action->dta_rec.dtrd_size = size; 11611 } 11612 11613 action->dta_refcnt = 1; 11614 rec = &action->dta_rec; 11615 size = rec->dtrd_size; 11616 11617 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 11618 if (!(size & mask)) { 11619 align = mask + 1; 11620 break; 11621 } 11622 } 11623 11624 action->dta_kind = desc->dtad_kind; 11625 11626 if ((action->dta_difo = dp) != NULL) 11627 dtrace_difo_hold(dp); 11628 11629 rec->dtrd_action = action->dta_kind; 11630 rec->dtrd_arg = arg; 11631 rec->dtrd_uarg = desc->dtad_uarg; 11632 rec->dtrd_alignment = (uint16_t)align; 11633 rec->dtrd_format = format; 11634 11635 if ((last = ecb->dte_action_last) != NULL) { 11636 ASSERT(ecb->dte_action != NULL); 11637 action->dta_prev = last; 11638 last->dta_next = action; 11639 } else { 11640 ASSERT(ecb->dte_action == NULL); 11641 ecb->dte_action = action; 11642 } 11643 11644 ecb->dte_action_last = action; 11645 11646 return (0); 11647 } 11648 11649 static void 11650 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 11651 { 11652 dtrace_action_t *act = ecb->dte_action, *next; 11653 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 11654 dtrace_difo_t *dp; 11655 uint16_t format; 11656 11657 if (act != NULL && act->dta_refcnt > 1) { 11658 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 11659 act->dta_refcnt--; 11660 } else { 11661 for (; act != NULL; act = next) { 11662 next = act->dta_next; 11663 ASSERT(next != NULL || act == ecb->dte_action_last); 11664 ASSERT(act->dta_refcnt == 1); 11665 11666 if ((format = act->dta_rec.dtrd_format) != 0) 11667 dtrace_format_remove(ecb->dte_state, format); 11668 11669 if ((dp = act->dta_difo) != NULL) 11670 dtrace_difo_release(dp, vstate); 11671 11672 if (DTRACEACT_ISAGG(act->dta_kind)) { 11673 dtrace_ecb_aggregation_destroy(ecb, act); 11674 } else { 11675 kmem_free(act, sizeof (dtrace_action_t)); 11676 } 11677 } 11678 } 11679 11680 ecb->dte_action = NULL; 11681 ecb->dte_action_last = NULL; 11682 ecb->dte_size = 0; 11683 } 11684 11685 static void 11686 dtrace_ecb_disable(dtrace_ecb_t *ecb) 11687 { 11688 /* 11689 * We disable the ECB by removing it from its probe. 11690 */ 11691 dtrace_ecb_t *pecb, *prev = NULL; 11692 dtrace_probe_t *probe = ecb->dte_probe; 11693 11694 ASSERT(MUTEX_HELD(&dtrace_lock)); 11695 11696 if (probe == NULL) { 11697 /* 11698 * This is the NULL probe; there is nothing to disable. 11699 */ 11700 return; 11701 } 11702 11703 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 11704 if (pecb == ecb) 11705 break; 11706 prev = pecb; 11707 } 11708 11709 ASSERT(pecb != NULL); 11710 11711 if (prev == NULL) { 11712 probe->dtpr_ecb = ecb->dte_next; 11713 } else { 11714 prev->dte_next = ecb->dte_next; 11715 } 11716 11717 if (ecb == probe->dtpr_ecb_last) { 11718 ASSERT(ecb->dte_next == NULL); 11719 probe->dtpr_ecb_last = prev; 11720 } 11721 11722 /* 11723 * The ECB has been disconnected from the probe; now sync to assure 11724 * that all CPUs have seen the change before returning. 11725 */ 11726 dtrace_sync(); 11727 11728 if (probe->dtpr_ecb == NULL) { 11729 /* 11730 * That was the last ECB on the probe; clear the predicate 11731 * cache ID for the probe, disable it and sync one more time 11732 * to assure that we'll never hit it again. 11733 */ 11734 dtrace_provider_t *prov = probe->dtpr_provider; 11735 11736 ASSERT(ecb->dte_next == NULL); 11737 ASSERT(probe->dtpr_ecb_last == NULL); 11738 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 11739 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 11740 probe->dtpr_id, probe->dtpr_arg); 11741 dtrace_sync(); 11742 } else { 11743 /* 11744 * There is at least one ECB remaining on the probe. If there 11745 * is _exactly_ one, set the probe's predicate cache ID to be 11746 * the predicate cache ID of the remaining ECB. 11747 */ 11748 ASSERT(probe->dtpr_ecb_last != NULL); 11749 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 11750 11751 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 11752 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 11753 11754 ASSERT(probe->dtpr_ecb->dte_next == NULL); 11755 11756 if (p != NULL) 11757 probe->dtpr_predcache = p->dtp_cacheid; 11758 } 11759 11760 ecb->dte_next = NULL; 11761 } 11762 } 11763 11764 static void 11765 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 11766 { 11767 dtrace_state_t *state = ecb->dte_state; 11768 dtrace_vstate_t *vstate = &state->dts_vstate; 11769 dtrace_predicate_t *pred; 11770 dtrace_epid_t epid = ecb->dte_epid; 11771 11772 ASSERT(MUTEX_HELD(&dtrace_lock)); 11773 ASSERT(ecb->dte_next == NULL); 11774 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 11775 11776 if ((pred = ecb->dte_predicate) != NULL) 11777 dtrace_predicate_release(pred, vstate); 11778 11779 dtrace_ecb_action_remove(ecb); 11780 11781 ASSERT(state->dts_ecbs[epid - 1] == ecb); 11782 state->dts_ecbs[epid - 1] = NULL; 11783 11784 kmem_free(ecb, sizeof (dtrace_ecb_t)); 11785 } 11786 11787 static dtrace_ecb_t * 11788 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 11789 dtrace_enabling_t *enab) 11790 { 11791 dtrace_ecb_t *ecb; 11792 dtrace_predicate_t *pred; 11793 dtrace_actdesc_t *act; 11794 dtrace_provider_t *prov; 11795 dtrace_ecbdesc_t *desc = enab->dten_current; 11796 11797 ASSERT(MUTEX_HELD(&dtrace_lock)); 11798 ASSERT(state != NULL); 11799 11800 ecb = dtrace_ecb_add(state, probe); 11801 ecb->dte_uarg = desc->dted_uarg; 11802 11803 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 11804 dtrace_predicate_hold(pred); 11805 ecb->dte_predicate = pred; 11806 } 11807 11808 if (probe != NULL) { 11809 /* 11810 * If the provider shows more leg than the consumer is old 11811 * enough to see, we need to enable the appropriate implicit 11812 * predicate bits to prevent the ecb from activating at 11813 * revealing times. 11814 * 11815 * Providers specifying DTRACE_PRIV_USER at register time 11816 * are stating that they need the /proc-style privilege 11817 * model to be enforced, and this is what DTRACE_COND_OWNER 11818 * and DTRACE_COND_ZONEOWNER will then do at probe time. 11819 */ 11820 prov = probe->dtpr_provider; 11821 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 11822 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 11823 ecb->dte_cond |= DTRACE_COND_OWNER; 11824 11825 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 11826 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 11827 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 11828 11829 /* 11830 * If the provider shows us kernel innards and the user 11831 * is lacking sufficient privilege, enable the 11832 * DTRACE_COND_USERMODE implicit predicate. 11833 */ 11834 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 11835 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 11836 ecb->dte_cond |= DTRACE_COND_USERMODE; 11837 } 11838 11839 if (dtrace_ecb_create_cache != NULL) { 11840 /* 11841 * If we have a cached ecb, we'll use its action list instead 11842 * of creating our own (saving both time and space). 11843 */ 11844 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 11845 dtrace_action_t *act = cached->dte_action; 11846 11847 if (act != NULL) { 11848 ASSERT(act->dta_refcnt > 0); 11849 act->dta_refcnt++; 11850 ecb->dte_action = act; 11851 ecb->dte_action_last = cached->dte_action_last; 11852 ecb->dte_needed = cached->dte_needed; 11853 ecb->dte_size = cached->dte_size; 11854 ecb->dte_alignment = cached->dte_alignment; 11855 } 11856 11857 return (ecb); 11858 } 11859 11860 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 11861 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 11862 dtrace_ecb_destroy(ecb); 11863 return (NULL); 11864 } 11865 } 11866 11867 if ((enab->dten_error = dtrace_ecb_resize(ecb)) != 0) { 11868 dtrace_ecb_destroy(ecb); 11869 return (NULL); 11870 } 11871 11872 return (dtrace_ecb_create_cache = ecb); 11873 } 11874 11875 static int 11876 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 11877 { 11878 dtrace_ecb_t *ecb; 11879 dtrace_enabling_t *enab = arg; 11880 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 11881 11882 ASSERT(state != NULL); 11883 11884 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 11885 /* 11886 * This probe was created in a generation for which this 11887 * enabling has previously created ECBs; we don't want to 11888 * enable it again, so just kick out. 11889 */ 11890 return (DTRACE_MATCH_NEXT); 11891 } 11892 11893 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 11894 return (DTRACE_MATCH_DONE); 11895 11896 dtrace_ecb_enable(ecb); 11897 return (DTRACE_MATCH_NEXT); 11898 } 11899 11900 static dtrace_ecb_t * 11901 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 11902 { 11903 dtrace_ecb_t *ecb; 11904 11905 ASSERT(MUTEX_HELD(&dtrace_lock)); 11906 11907 if (id == 0 || id > state->dts_necbs) 11908 return (NULL); 11909 11910 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 11911 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 11912 11913 return (state->dts_ecbs[id - 1]); 11914 } 11915 11916 static dtrace_aggregation_t * 11917 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 11918 { 11919 dtrace_aggregation_t *agg; 11920 11921 ASSERT(MUTEX_HELD(&dtrace_lock)); 11922 11923 if (id == 0 || id > state->dts_naggregations) 11924 return (NULL); 11925 11926 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 11927 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 11928 agg->dtag_id == id); 11929 11930 return (state->dts_aggregations[id - 1]); 11931 } 11932 11933 /* 11934 * DTrace Buffer Functions 11935 * 11936 * The following functions manipulate DTrace buffers. Most of these functions 11937 * are called in the context of establishing or processing consumer state; 11938 * exceptions are explicitly noted. 11939 */ 11940 11941 /* 11942 * Note: called from cross call context. This function switches the two 11943 * buffers on a given CPU. The atomicity of this operation is assured by 11944 * disabling interrupts while the actual switch takes place; the disabling of 11945 * interrupts serializes the execution with any execution of dtrace_probe() on 11946 * the same CPU. 11947 */ 11948 static void 11949 dtrace_buffer_switch(dtrace_buffer_t *buf) 11950 { 11951 caddr_t tomax = buf->dtb_tomax; 11952 caddr_t xamot = buf->dtb_xamot; 11953 dtrace_icookie_t cookie; 11954 hrtime_t now; 11955 11956 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11957 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 11958 11959 cookie = dtrace_interrupt_disable(); 11960 now = dtrace_gethrtime(); 11961 buf->dtb_tomax = xamot; 11962 buf->dtb_xamot = tomax; 11963 buf->dtb_xamot_drops = buf->dtb_drops; 11964 buf->dtb_xamot_offset = buf->dtb_offset; 11965 buf->dtb_xamot_errors = buf->dtb_errors; 11966 buf->dtb_xamot_flags = buf->dtb_flags; 11967 buf->dtb_offset = 0; 11968 buf->dtb_drops = 0; 11969 buf->dtb_errors = 0; 11970 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 11971 buf->dtb_interval = now - buf->dtb_switched; 11972 buf->dtb_switched = now; 11973 dtrace_interrupt_enable(cookie); 11974 } 11975 11976 /* 11977 * Note: called from cross call context. This function activates a buffer 11978 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 11979 * is guaranteed by the disabling of interrupts. 11980 */ 11981 static void 11982 dtrace_buffer_activate(dtrace_state_t *state) 11983 { 11984 dtrace_buffer_t *buf; 11985 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 11986 11987 buf = &state->dts_buffer[curcpu]; 11988 11989 if (buf->dtb_tomax != NULL) { 11990 /* 11991 * We might like to assert that the buffer is marked inactive, 11992 * but this isn't necessarily true: the buffer for the CPU 11993 * that processes the BEGIN probe has its buffer activated 11994 * manually. In this case, we take the (harmless) action 11995 * re-clearing the bit INACTIVE bit. 11996 */ 11997 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 11998 } 11999 12000 dtrace_interrupt_enable(cookie); 12001 } 12002 12003 #ifdef __FreeBSD__ 12004 /* 12005 * Activate the specified per-CPU buffer. This is used instead of 12006 * dtrace_buffer_activate() when APs have not yet started, i.e. when 12007 * activating anonymous state. 12008 */ 12009 static void 12010 dtrace_buffer_activate_cpu(dtrace_state_t *state, int cpu) 12011 { 12012 12013 if (state->dts_buffer[cpu].dtb_tomax != NULL) 12014 state->dts_buffer[cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 12015 } 12016 #endif 12017 12018 static int 12019 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 12020 processorid_t cpu, int *factor) 12021 { 12022 #ifdef illumos 12023 cpu_t *cp; 12024 #endif 12025 dtrace_buffer_t *buf; 12026 int allocated = 0, desired = 0; 12027 12028 #ifdef illumos 12029 ASSERT(MUTEX_HELD(&cpu_lock)); 12030 ASSERT(MUTEX_HELD(&dtrace_lock)); 12031 12032 *factor = 1; 12033 12034 if (size > dtrace_nonroot_maxsize && 12035 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 12036 return (EFBIG); 12037 12038 cp = cpu_list; 12039 12040 do { 12041 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 12042 continue; 12043 12044 buf = &bufs[cp->cpu_id]; 12045 12046 /* 12047 * If there is already a buffer allocated for this CPU, it 12048 * is only possible that this is a DR event. In this case, 12049 */ 12050 if (buf->dtb_tomax != NULL) { 12051 ASSERT(buf->dtb_size == size); 12052 continue; 12053 } 12054 12055 ASSERT(buf->dtb_xamot == NULL); 12056 12057 if ((buf->dtb_tomax = kmem_zalloc(size, 12058 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 12059 goto err; 12060 12061 buf->dtb_size = size; 12062 buf->dtb_flags = flags; 12063 buf->dtb_offset = 0; 12064 buf->dtb_drops = 0; 12065 12066 if (flags & DTRACEBUF_NOSWITCH) 12067 continue; 12068 12069 if ((buf->dtb_xamot = kmem_zalloc(size, 12070 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 12071 goto err; 12072 } while ((cp = cp->cpu_next) != cpu_list); 12073 12074 return (0); 12075 12076 err: 12077 cp = cpu_list; 12078 12079 do { 12080 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 12081 continue; 12082 12083 buf = &bufs[cp->cpu_id]; 12084 desired += 2; 12085 12086 if (buf->dtb_xamot != NULL) { 12087 ASSERT(buf->dtb_tomax != NULL); 12088 ASSERT(buf->dtb_size == size); 12089 kmem_free(buf->dtb_xamot, size); 12090 allocated++; 12091 } 12092 12093 if (buf->dtb_tomax != NULL) { 12094 ASSERT(buf->dtb_size == size); 12095 kmem_free(buf->dtb_tomax, size); 12096 allocated++; 12097 } 12098 12099 buf->dtb_tomax = NULL; 12100 buf->dtb_xamot = NULL; 12101 buf->dtb_size = 0; 12102 } while ((cp = cp->cpu_next) != cpu_list); 12103 #else 12104 int i; 12105 12106 *factor = 1; 12107 #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \ 12108 defined(__mips__) || defined(__powerpc__) || defined(__riscv__) 12109 /* 12110 * FreeBSD isn't good at limiting the amount of memory we 12111 * ask to malloc, so let's place a limit here before trying 12112 * to do something that might well end in tears at bedtime. 12113 */ 12114 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 12115 return (ENOMEM); 12116 #endif 12117 12118 ASSERT(MUTEX_HELD(&dtrace_lock)); 12119 CPU_FOREACH(i) { 12120 if (cpu != DTRACE_CPUALL && cpu != i) 12121 continue; 12122 12123 buf = &bufs[i]; 12124 12125 /* 12126 * If there is already a buffer allocated for this CPU, it 12127 * is only possible that this is a DR event. In this case, 12128 * the buffer size must match our specified size. 12129 */ 12130 if (buf->dtb_tomax != NULL) { 12131 ASSERT(buf->dtb_size == size); 12132 continue; 12133 } 12134 12135 ASSERT(buf->dtb_xamot == NULL); 12136 12137 if ((buf->dtb_tomax = kmem_zalloc(size, 12138 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 12139 goto err; 12140 12141 buf->dtb_size = size; 12142 buf->dtb_flags = flags; 12143 buf->dtb_offset = 0; 12144 buf->dtb_drops = 0; 12145 12146 if (flags & DTRACEBUF_NOSWITCH) 12147 continue; 12148 12149 if ((buf->dtb_xamot = kmem_zalloc(size, 12150 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 12151 goto err; 12152 } 12153 12154 return (0); 12155 12156 err: 12157 /* 12158 * Error allocating memory, so free the buffers that were 12159 * allocated before the failed allocation. 12160 */ 12161 CPU_FOREACH(i) { 12162 if (cpu != DTRACE_CPUALL && cpu != i) 12163 continue; 12164 12165 buf = &bufs[i]; 12166 desired += 2; 12167 12168 if (buf->dtb_xamot != NULL) { 12169 ASSERT(buf->dtb_tomax != NULL); 12170 ASSERT(buf->dtb_size == size); 12171 kmem_free(buf->dtb_xamot, size); 12172 allocated++; 12173 } 12174 12175 if (buf->dtb_tomax != NULL) { 12176 ASSERT(buf->dtb_size == size); 12177 kmem_free(buf->dtb_tomax, size); 12178 allocated++; 12179 } 12180 12181 buf->dtb_tomax = NULL; 12182 buf->dtb_xamot = NULL; 12183 buf->dtb_size = 0; 12184 12185 } 12186 #endif 12187 *factor = desired / (allocated > 0 ? allocated : 1); 12188 12189 return (ENOMEM); 12190 } 12191 12192 /* 12193 * Note: called from probe context. This function just increments the drop 12194 * count on a buffer. It has been made a function to allow for the 12195 * possibility of understanding the source of mysterious drop counts. (A 12196 * problem for which one may be particularly disappointed that DTrace cannot 12197 * be used to understand DTrace.) 12198 */ 12199 static void 12200 dtrace_buffer_drop(dtrace_buffer_t *buf) 12201 { 12202 buf->dtb_drops++; 12203 } 12204 12205 /* 12206 * Note: called from probe context. This function is called to reserve space 12207 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 12208 * mstate. Returns the new offset in the buffer, or a negative value if an 12209 * error has occurred. 12210 */ 12211 static intptr_t 12212 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 12213 dtrace_state_t *state, dtrace_mstate_t *mstate) 12214 { 12215 intptr_t offs = buf->dtb_offset, soffs; 12216 intptr_t woffs; 12217 caddr_t tomax; 12218 size_t total; 12219 12220 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 12221 return (-1); 12222 12223 if ((tomax = buf->dtb_tomax) == NULL) { 12224 dtrace_buffer_drop(buf); 12225 return (-1); 12226 } 12227 12228 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 12229 while (offs & (align - 1)) { 12230 /* 12231 * Assert that our alignment is off by a number which 12232 * is itself sizeof (uint32_t) aligned. 12233 */ 12234 ASSERT(!((align - (offs & (align - 1))) & 12235 (sizeof (uint32_t) - 1))); 12236 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 12237 offs += sizeof (uint32_t); 12238 } 12239 12240 if ((soffs = offs + needed) > buf->dtb_size) { 12241 dtrace_buffer_drop(buf); 12242 return (-1); 12243 } 12244 12245 if (mstate == NULL) 12246 return (offs); 12247 12248 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 12249 mstate->dtms_scratch_size = buf->dtb_size - soffs; 12250 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 12251 12252 return (offs); 12253 } 12254 12255 if (buf->dtb_flags & DTRACEBUF_FILL) { 12256 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 12257 (buf->dtb_flags & DTRACEBUF_FULL)) 12258 return (-1); 12259 goto out; 12260 } 12261 12262 total = needed + (offs & (align - 1)); 12263 12264 /* 12265 * For a ring buffer, life is quite a bit more complicated. Before 12266 * we can store any padding, we need to adjust our wrapping offset. 12267 * (If we've never before wrapped or we're not about to, no adjustment 12268 * is required.) 12269 */ 12270 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 12271 offs + total > buf->dtb_size) { 12272 woffs = buf->dtb_xamot_offset; 12273 12274 if (offs + total > buf->dtb_size) { 12275 /* 12276 * We can't fit in the end of the buffer. First, a 12277 * sanity check that we can fit in the buffer at all. 12278 */ 12279 if (total > buf->dtb_size) { 12280 dtrace_buffer_drop(buf); 12281 return (-1); 12282 } 12283 12284 /* 12285 * We're going to be storing at the top of the buffer, 12286 * so now we need to deal with the wrapped offset. We 12287 * only reset our wrapped offset to 0 if it is 12288 * currently greater than the current offset. If it 12289 * is less than the current offset, it is because a 12290 * previous allocation induced a wrap -- but the 12291 * allocation didn't subsequently take the space due 12292 * to an error or false predicate evaluation. In this 12293 * case, we'll just leave the wrapped offset alone: if 12294 * the wrapped offset hasn't been advanced far enough 12295 * for this allocation, it will be adjusted in the 12296 * lower loop. 12297 */ 12298 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 12299 if (woffs >= offs) 12300 woffs = 0; 12301 } else { 12302 woffs = 0; 12303 } 12304 12305 /* 12306 * Now we know that we're going to be storing to the 12307 * top of the buffer and that there is room for us 12308 * there. We need to clear the buffer from the current 12309 * offset to the end (there may be old gunk there). 12310 */ 12311 while (offs < buf->dtb_size) 12312 tomax[offs++] = 0; 12313 12314 /* 12315 * We need to set our offset to zero. And because we 12316 * are wrapping, we need to set the bit indicating as 12317 * much. We can also adjust our needed space back 12318 * down to the space required by the ECB -- we know 12319 * that the top of the buffer is aligned. 12320 */ 12321 offs = 0; 12322 total = needed; 12323 buf->dtb_flags |= DTRACEBUF_WRAPPED; 12324 } else { 12325 /* 12326 * There is room for us in the buffer, so we simply 12327 * need to check the wrapped offset. 12328 */ 12329 if (woffs < offs) { 12330 /* 12331 * The wrapped offset is less than the offset. 12332 * This can happen if we allocated buffer space 12333 * that induced a wrap, but then we didn't 12334 * subsequently take the space due to an error 12335 * or false predicate evaluation. This is 12336 * okay; we know that _this_ allocation isn't 12337 * going to induce a wrap. We still can't 12338 * reset the wrapped offset to be zero, 12339 * however: the space may have been trashed in 12340 * the previous failed probe attempt. But at 12341 * least the wrapped offset doesn't need to 12342 * be adjusted at all... 12343 */ 12344 goto out; 12345 } 12346 } 12347 12348 while (offs + total > woffs) { 12349 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 12350 size_t size; 12351 12352 if (epid == DTRACE_EPIDNONE) { 12353 size = sizeof (uint32_t); 12354 } else { 12355 ASSERT3U(epid, <=, state->dts_necbs); 12356 ASSERT(state->dts_ecbs[epid - 1] != NULL); 12357 12358 size = state->dts_ecbs[epid - 1]->dte_size; 12359 } 12360 12361 ASSERT(woffs + size <= buf->dtb_size); 12362 ASSERT(size != 0); 12363 12364 if (woffs + size == buf->dtb_size) { 12365 /* 12366 * We've reached the end of the buffer; we want 12367 * to set the wrapped offset to 0 and break 12368 * out. However, if the offs is 0, then we're 12369 * in a strange edge-condition: the amount of 12370 * space that we want to reserve plus the size 12371 * of the record that we're overwriting is 12372 * greater than the size of the buffer. This 12373 * is problematic because if we reserve the 12374 * space but subsequently don't consume it (due 12375 * to a failed predicate or error) the wrapped 12376 * offset will be 0 -- yet the EPID at offset 0 12377 * will not be committed. This situation is 12378 * relatively easy to deal with: if we're in 12379 * this case, the buffer is indistinguishable 12380 * from one that hasn't wrapped; we need only 12381 * finish the job by clearing the wrapped bit, 12382 * explicitly setting the offset to be 0, and 12383 * zero'ing out the old data in the buffer. 12384 */ 12385 if (offs == 0) { 12386 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 12387 buf->dtb_offset = 0; 12388 woffs = total; 12389 12390 while (woffs < buf->dtb_size) 12391 tomax[woffs++] = 0; 12392 } 12393 12394 woffs = 0; 12395 break; 12396 } 12397 12398 woffs += size; 12399 } 12400 12401 /* 12402 * We have a wrapped offset. It may be that the wrapped offset 12403 * has become zero -- that's okay. 12404 */ 12405 buf->dtb_xamot_offset = woffs; 12406 } 12407 12408 out: 12409 /* 12410 * Now we can plow the buffer with any necessary padding. 12411 */ 12412 while (offs & (align - 1)) { 12413 /* 12414 * Assert that our alignment is off by a number which 12415 * is itself sizeof (uint32_t) aligned. 12416 */ 12417 ASSERT(!((align - (offs & (align - 1))) & 12418 (sizeof (uint32_t) - 1))); 12419 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 12420 offs += sizeof (uint32_t); 12421 } 12422 12423 if (buf->dtb_flags & DTRACEBUF_FILL) { 12424 if (offs + needed > buf->dtb_size - state->dts_reserve) { 12425 buf->dtb_flags |= DTRACEBUF_FULL; 12426 return (-1); 12427 } 12428 } 12429 12430 if (mstate == NULL) 12431 return (offs); 12432 12433 /* 12434 * For ring buffers and fill buffers, the scratch space is always 12435 * the inactive buffer. 12436 */ 12437 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 12438 mstate->dtms_scratch_size = buf->dtb_size; 12439 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 12440 12441 return (offs); 12442 } 12443 12444 static void 12445 dtrace_buffer_polish(dtrace_buffer_t *buf) 12446 { 12447 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 12448 ASSERT(MUTEX_HELD(&dtrace_lock)); 12449 12450 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 12451 return; 12452 12453 /* 12454 * We need to polish the ring buffer. There are three cases: 12455 * 12456 * - The first (and presumably most common) is that there is no gap 12457 * between the buffer offset and the wrapped offset. In this case, 12458 * there is nothing in the buffer that isn't valid data; we can 12459 * mark the buffer as polished and return. 12460 * 12461 * - The second (less common than the first but still more common 12462 * than the third) is that there is a gap between the buffer offset 12463 * and the wrapped offset, and the wrapped offset is larger than the 12464 * buffer offset. This can happen because of an alignment issue, or 12465 * can happen because of a call to dtrace_buffer_reserve() that 12466 * didn't subsequently consume the buffer space. In this case, 12467 * we need to zero the data from the buffer offset to the wrapped 12468 * offset. 12469 * 12470 * - The third (and least common) is that there is a gap between the 12471 * buffer offset and the wrapped offset, but the wrapped offset is 12472 * _less_ than the buffer offset. This can only happen because a 12473 * call to dtrace_buffer_reserve() induced a wrap, but the space 12474 * was not subsequently consumed. In this case, we need to zero the 12475 * space from the offset to the end of the buffer _and_ from the 12476 * top of the buffer to the wrapped offset. 12477 */ 12478 if (buf->dtb_offset < buf->dtb_xamot_offset) { 12479 bzero(buf->dtb_tomax + buf->dtb_offset, 12480 buf->dtb_xamot_offset - buf->dtb_offset); 12481 } 12482 12483 if (buf->dtb_offset > buf->dtb_xamot_offset) { 12484 bzero(buf->dtb_tomax + buf->dtb_offset, 12485 buf->dtb_size - buf->dtb_offset); 12486 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 12487 } 12488 } 12489 12490 /* 12491 * This routine determines if data generated at the specified time has likely 12492 * been entirely consumed at user-level. This routine is called to determine 12493 * if an ECB on a defunct probe (but for an active enabling) can be safely 12494 * disabled and destroyed. 12495 */ 12496 static int 12497 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when) 12498 { 12499 int i; 12500 12501 for (i = 0; i < NCPU; i++) { 12502 dtrace_buffer_t *buf = &bufs[i]; 12503 12504 if (buf->dtb_size == 0) 12505 continue; 12506 12507 if (buf->dtb_flags & DTRACEBUF_RING) 12508 return (0); 12509 12510 if (!buf->dtb_switched && buf->dtb_offset != 0) 12511 return (0); 12512 12513 if (buf->dtb_switched - buf->dtb_interval < when) 12514 return (0); 12515 } 12516 12517 return (1); 12518 } 12519 12520 static void 12521 dtrace_buffer_free(dtrace_buffer_t *bufs) 12522 { 12523 int i; 12524 12525 for (i = 0; i < NCPU; i++) { 12526 dtrace_buffer_t *buf = &bufs[i]; 12527 12528 if (buf->dtb_tomax == NULL) { 12529 ASSERT(buf->dtb_xamot == NULL); 12530 ASSERT(buf->dtb_size == 0); 12531 continue; 12532 } 12533 12534 if (buf->dtb_xamot != NULL) { 12535 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 12536 kmem_free(buf->dtb_xamot, buf->dtb_size); 12537 } 12538 12539 kmem_free(buf->dtb_tomax, buf->dtb_size); 12540 buf->dtb_size = 0; 12541 buf->dtb_tomax = NULL; 12542 buf->dtb_xamot = NULL; 12543 } 12544 } 12545 12546 /* 12547 * DTrace Enabling Functions 12548 */ 12549 static dtrace_enabling_t * 12550 dtrace_enabling_create(dtrace_vstate_t *vstate) 12551 { 12552 dtrace_enabling_t *enab; 12553 12554 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 12555 enab->dten_vstate = vstate; 12556 12557 return (enab); 12558 } 12559 12560 static void 12561 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 12562 { 12563 dtrace_ecbdesc_t **ndesc; 12564 size_t osize, nsize; 12565 12566 /* 12567 * We can't add to enablings after we've enabled them, or after we've 12568 * retained them. 12569 */ 12570 ASSERT(enab->dten_probegen == 0); 12571 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 12572 12573 if (enab->dten_ndesc < enab->dten_maxdesc) { 12574 enab->dten_desc[enab->dten_ndesc++] = ecb; 12575 return; 12576 } 12577 12578 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 12579 12580 if (enab->dten_maxdesc == 0) { 12581 enab->dten_maxdesc = 1; 12582 } else { 12583 enab->dten_maxdesc <<= 1; 12584 } 12585 12586 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 12587 12588 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 12589 ndesc = kmem_zalloc(nsize, KM_SLEEP); 12590 bcopy(enab->dten_desc, ndesc, osize); 12591 if (enab->dten_desc != NULL) 12592 kmem_free(enab->dten_desc, osize); 12593 12594 enab->dten_desc = ndesc; 12595 enab->dten_desc[enab->dten_ndesc++] = ecb; 12596 } 12597 12598 static void 12599 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 12600 dtrace_probedesc_t *pd) 12601 { 12602 dtrace_ecbdesc_t *new; 12603 dtrace_predicate_t *pred; 12604 dtrace_actdesc_t *act; 12605 12606 /* 12607 * We're going to create a new ECB description that matches the 12608 * specified ECB in every way, but has the specified probe description. 12609 */ 12610 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12611 12612 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 12613 dtrace_predicate_hold(pred); 12614 12615 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 12616 dtrace_actdesc_hold(act); 12617 12618 new->dted_action = ecb->dted_action; 12619 new->dted_pred = ecb->dted_pred; 12620 new->dted_probe = *pd; 12621 new->dted_uarg = ecb->dted_uarg; 12622 12623 dtrace_enabling_add(enab, new); 12624 } 12625 12626 static void 12627 dtrace_enabling_dump(dtrace_enabling_t *enab) 12628 { 12629 int i; 12630 12631 for (i = 0; i < enab->dten_ndesc; i++) { 12632 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 12633 12634 #ifdef __FreeBSD__ 12635 printf("dtrace: enabling probe %d (%s:%s:%s:%s)\n", i, 12636 desc->dtpd_provider, desc->dtpd_mod, 12637 desc->dtpd_func, desc->dtpd_name); 12638 #else 12639 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 12640 desc->dtpd_provider, desc->dtpd_mod, 12641 desc->dtpd_func, desc->dtpd_name); 12642 #endif 12643 } 12644 } 12645 12646 static void 12647 dtrace_enabling_destroy(dtrace_enabling_t *enab) 12648 { 12649 int i; 12650 dtrace_ecbdesc_t *ep; 12651 dtrace_vstate_t *vstate = enab->dten_vstate; 12652 12653 ASSERT(MUTEX_HELD(&dtrace_lock)); 12654 12655 for (i = 0; i < enab->dten_ndesc; i++) { 12656 dtrace_actdesc_t *act, *next; 12657 dtrace_predicate_t *pred; 12658 12659 ep = enab->dten_desc[i]; 12660 12661 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 12662 dtrace_predicate_release(pred, vstate); 12663 12664 for (act = ep->dted_action; act != NULL; act = next) { 12665 next = act->dtad_next; 12666 dtrace_actdesc_release(act, vstate); 12667 } 12668 12669 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12670 } 12671 12672 if (enab->dten_desc != NULL) 12673 kmem_free(enab->dten_desc, 12674 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 12675 12676 /* 12677 * If this was a retained enabling, decrement the dts_nretained count 12678 * and take it off of the dtrace_retained list. 12679 */ 12680 if (enab->dten_prev != NULL || enab->dten_next != NULL || 12681 dtrace_retained == enab) { 12682 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12683 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 12684 enab->dten_vstate->dtvs_state->dts_nretained--; 12685 dtrace_retained_gen++; 12686 } 12687 12688 if (enab->dten_prev == NULL) { 12689 if (dtrace_retained == enab) { 12690 dtrace_retained = enab->dten_next; 12691 12692 if (dtrace_retained != NULL) 12693 dtrace_retained->dten_prev = NULL; 12694 } 12695 } else { 12696 ASSERT(enab != dtrace_retained); 12697 ASSERT(dtrace_retained != NULL); 12698 enab->dten_prev->dten_next = enab->dten_next; 12699 } 12700 12701 if (enab->dten_next != NULL) { 12702 ASSERT(dtrace_retained != NULL); 12703 enab->dten_next->dten_prev = enab->dten_prev; 12704 } 12705 12706 kmem_free(enab, sizeof (dtrace_enabling_t)); 12707 } 12708 12709 static int 12710 dtrace_enabling_retain(dtrace_enabling_t *enab) 12711 { 12712 dtrace_state_t *state; 12713 12714 ASSERT(MUTEX_HELD(&dtrace_lock)); 12715 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 12716 ASSERT(enab->dten_vstate != NULL); 12717 12718 state = enab->dten_vstate->dtvs_state; 12719 ASSERT(state != NULL); 12720 12721 /* 12722 * We only allow each state to retain dtrace_retain_max enablings. 12723 */ 12724 if (state->dts_nretained >= dtrace_retain_max) 12725 return (ENOSPC); 12726 12727 state->dts_nretained++; 12728 dtrace_retained_gen++; 12729 12730 if (dtrace_retained == NULL) { 12731 dtrace_retained = enab; 12732 return (0); 12733 } 12734 12735 enab->dten_next = dtrace_retained; 12736 dtrace_retained->dten_prev = enab; 12737 dtrace_retained = enab; 12738 12739 return (0); 12740 } 12741 12742 static int 12743 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 12744 dtrace_probedesc_t *create) 12745 { 12746 dtrace_enabling_t *new, *enab; 12747 int found = 0, err = ENOENT; 12748 12749 ASSERT(MUTEX_HELD(&dtrace_lock)); 12750 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 12751 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 12752 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 12753 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 12754 12755 new = dtrace_enabling_create(&state->dts_vstate); 12756 12757 /* 12758 * Iterate over all retained enablings, looking for enablings that 12759 * match the specified state. 12760 */ 12761 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12762 int i; 12763 12764 /* 12765 * dtvs_state can only be NULL for helper enablings -- and 12766 * helper enablings can't be retained. 12767 */ 12768 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12769 12770 if (enab->dten_vstate->dtvs_state != state) 12771 continue; 12772 12773 /* 12774 * Now iterate over each probe description; we're looking for 12775 * an exact match to the specified probe description. 12776 */ 12777 for (i = 0; i < enab->dten_ndesc; i++) { 12778 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12779 dtrace_probedesc_t *pd = &ep->dted_probe; 12780 12781 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 12782 continue; 12783 12784 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 12785 continue; 12786 12787 if (strcmp(pd->dtpd_func, match->dtpd_func)) 12788 continue; 12789 12790 if (strcmp(pd->dtpd_name, match->dtpd_name)) 12791 continue; 12792 12793 /* 12794 * We have a winning probe! Add it to our growing 12795 * enabling. 12796 */ 12797 found = 1; 12798 dtrace_enabling_addlike(new, ep, create); 12799 } 12800 } 12801 12802 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 12803 dtrace_enabling_destroy(new); 12804 return (err); 12805 } 12806 12807 return (0); 12808 } 12809 12810 static void 12811 dtrace_enabling_retract(dtrace_state_t *state) 12812 { 12813 dtrace_enabling_t *enab, *next; 12814 12815 ASSERT(MUTEX_HELD(&dtrace_lock)); 12816 12817 /* 12818 * Iterate over all retained enablings, destroy the enablings retained 12819 * for the specified state. 12820 */ 12821 for (enab = dtrace_retained; enab != NULL; enab = next) { 12822 next = enab->dten_next; 12823 12824 /* 12825 * dtvs_state can only be NULL for helper enablings -- and 12826 * helper enablings can't be retained. 12827 */ 12828 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12829 12830 if (enab->dten_vstate->dtvs_state == state) { 12831 ASSERT(state->dts_nretained > 0); 12832 dtrace_enabling_destroy(enab); 12833 } 12834 } 12835 12836 ASSERT(state->dts_nretained == 0); 12837 } 12838 12839 static int 12840 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 12841 { 12842 int i = 0; 12843 int matched = 0; 12844 12845 ASSERT(MUTEX_HELD(&cpu_lock)); 12846 ASSERT(MUTEX_HELD(&dtrace_lock)); 12847 12848 for (i = 0; i < enab->dten_ndesc; i++) { 12849 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12850 12851 enab->dten_current = ep; 12852 enab->dten_error = 0; 12853 12854 matched += dtrace_probe_enable(&ep->dted_probe, enab); 12855 12856 if (enab->dten_error != 0) { 12857 /* 12858 * If we get an error half-way through enabling the 12859 * probes, we kick out -- perhaps with some number of 12860 * them enabled. Leaving enabled probes enabled may 12861 * be slightly confusing for user-level, but we expect 12862 * that no one will attempt to actually drive on in 12863 * the face of such errors. If this is an anonymous 12864 * enabling (indicated with a NULL nmatched pointer), 12865 * we cmn_err() a message. We aren't expecting to 12866 * get such an error -- such as it can exist at all, 12867 * it would be a result of corrupted DOF in the driver 12868 * properties. 12869 */ 12870 if (nmatched == NULL) { 12871 cmn_err(CE_WARN, "dtrace_enabling_match() " 12872 "error on %p: %d", (void *)ep, 12873 enab->dten_error); 12874 } 12875 12876 return (enab->dten_error); 12877 } 12878 } 12879 12880 enab->dten_probegen = dtrace_probegen; 12881 if (nmatched != NULL) 12882 *nmatched = matched; 12883 12884 return (0); 12885 } 12886 12887 static void 12888 dtrace_enabling_matchall(void) 12889 { 12890 dtrace_enabling_t *enab; 12891 12892 mutex_enter(&cpu_lock); 12893 mutex_enter(&dtrace_lock); 12894 12895 /* 12896 * Iterate over all retained enablings to see if any probes match 12897 * against them. We only perform this operation on enablings for which 12898 * we have sufficient permissions by virtue of being in the global zone 12899 * or in the same zone as the DTrace client. Because we can be called 12900 * after dtrace_detach() has been called, we cannot assert that there 12901 * are retained enablings. We can safely load from dtrace_retained, 12902 * however: the taskq_destroy() at the end of dtrace_detach() will 12903 * block pending our completion. 12904 */ 12905 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12906 #ifdef illumos 12907 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 12908 12909 if (INGLOBALZONE(curproc) || 12910 cr != NULL && getzoneid() == crgetzoneid(cr)) 12911 #endif 12912 (void) dtrace_enabling_match(enab, NULL); 12913 } 12914 12915 mutex_exit(&dtrace_lock); 12916 mutex_exit(&cpu_lock); 12917 } 12918 12919 /* 12920 * If an enabling is to be enabled without having matched probes (that is, if 12921 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 12922 * enabling must be _primed_ by creating an ECB for every ECB description. 12923 * This must be done to assure that we know the number of speculations, the 12924 * number of aggregations, the minimum buffer size needed, etc. before we 12925 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 12926 * enabling any probes, we create ECBs for every ECB decription, but with a 12927 * NULL probe -- which is exactly what this function does. 12928 */ 12929 static void 12930 dtrace_enabling_prime(dtrace_state_t *state) 12931 { 12932 dtrace_enabling_t *enab; 12933 int i; 12934 12935 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12936 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12937 12938 if (enab->dten_vstate->dtvs_state != state) 12939 continue; 12940 12941 /* 12942 * We don't want to prime an enabling more than once, lest 12943 * we allow a malicious user to induce resource exhaustion. 12944 * (The ECBs that result from priming an enabling aren't 12945 * leaked -- but they also aren't deallocated until the 12946 * consumer state is destroyed.) 12947 */ 12948 if (enab->dten_primed) 12949 continue; 12950 12951 for (i = 0; i < enab->dten_ndesc; i++) { 12952 enab->dten_current = enab->dten_desc[i]; 12953 (void) dtrace_probe_enable(NULL, enab); 12954 } 12955 12956 enab->dten_primed = 1; 12957 } 12958 } 12959 12960 /* 12961 * Called to indicate that probes should be provided due to retained 12962 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 12963 * must take an initial lap through the enabling calling the dtps_provide() 12964 * entry point explicitly to allow for autocreated probes. 12965 */ 12966 static void 12967 dtrace_enabling_provide(dtrace_provider_t *prv) 12968 { 12969 int i, all = 0; 12970 dtrace_probedesc_t desc; 12971 dtrace_genid_t gen; 12972 12973 ASSERT(MUTEX_HELD(&dtrace_lock)); 12974 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 12975 12976 if (prv == NULL) { 12977 all = 1; 12978 prv = dtrace_provider; 12979 } 12980 12981 do { 12982 dtrace_enabling_t *enab; 12983 void *parg = prv->dtpv_arg; 12984 12985 retry: 12986 gen = dtrace_retained_gen; 12987 for (enab = dtrace_retained; enab != NULL; 12988 enab = enab->dten_next) { 12989 for (i = 0; i < enab->dten_ndesc; i++) { 12990 desc = enab->dten_desc[i]->dted_probe; 12991 mutex_exit(&dtrace_lock); 12992 prv->dtpv_pops.dtps_provide(parg, &desc); 12993 mutex_enter(&dtrace_lock); 12994 /* 12995 * Process the retained enablings again if 12996 * they have changed while we weren't holding 12997 * dtrace_lock. 12998 */ 12999 if (gen != dtrace_retained_gen) 13000 goto retry; 13001 } 13002 } 13003 } while (all && (prv = prv->dtpv_next) != NULL); 13004 13005 mutex_exit(&dtrace_lock); 13006 dtrace_probe_provide(NULL, all ? NULL : prv); 13007 mutex_enter(&dtrace_lock); 13008 } 13009 13010 /* 13011 * Called to reap ECBs that are attached to probes from defunct providers. 13012 */ 13013 static void 13014 dtrace_enabling_reap(void) 13015 { 13016 dtrace_provider_t *prov; 13017 dtrace_probe_t *probe; 13018 dtrace_ecb_t *ecb; 13019 hrtime_t when; 13020 int i; 13021 13022 mutex_enter(&cpu_lock); 13023 mutex_enter(&dtrace_lock); 13024 13025 for (i = 0; i < dtrace_nprobes; i++) { 13026 if ((probe = dtrace_probes[i]) == NULL) 13027 continue; 13028 13029 if (probe->dtpr_ecb == NULL) 13030 continue; 13031 13032 prov = probe->dtpr_provider; 13033 13034 if ((when = prov->dtpv_defunct) == 0) 13035 continue; 13036 13037 /* 13038 * We have ECBs on a defunct provider: we want to reap these 13039 * ECBs to allow the provider to unregister. The destruction 13040 * of these ECBs must be done carefully: if we destroy the ECB 13041 * and the consumer later wishes to consume an EPID that 13042 * corresponds to the destroyed ECB (and if the EPID metadata 13043 * has not been previously consumed), the consumer will abort 13044 * processing on the unknown EPID. To reduce (but not, sadly, 13045 * eliminate) the possibility of this, we will only destroy an 13046 * ECB for a defunct provider if, for the state that 13047 * corresponds to the ECB: 13048 * 13049 * (a) There is no speculative tracing (which can effectively 13050 * cache an EPID for an arbitrary amount of time). 13051 * 13052 * (b) The principal buffers have been switched twice since the 13053 * provider became defunct. 13054 * 13055 * (c) The aggregation buffers are of zero size or have been 13056 * switched twice since the provider became defunct. 13057 * 13058 * We use dts_speculates to determine (a) and call a function 13059 * (dtrace_buffer_consumed()) to determine (b) and (c). Note 13060 * that as soon as we've been unable to destroy one of the ECBs 13061 * associated with the probe, we quit trying -- reaping is only 13062 * fruitful in as much as we can destroy all ECBs associated 13063 * with the defunct provider's probes. 13064 */ 13065 while ((ecb = probe->dtpr_ecb) != NULL) { 13066 dtrace_state_t *state = ecb->dte_state; 13067 dtrace_buffer_t *buf = state->dts_buffer; 13068 dtrace_buffer_t *aggbuf = state->dts_aggbuffer; 13069 13070 if (state->dts_speculates) 13071 break; 13072 13073 if (!dtrace_buffer_consumed(buf, when)) 13074 break; 13075 13076 if (!dtrace_buffer_consumed(aggbuf, when)) 13077 break; 13078 13079 dtrace_ecb_disable(ecb); 13080 ASSERT(probe->dtpr_ecb != ecb); 13081 dtrace_ecb_destroy(ecb); 13082 } 13083 } 13084 13085 mutex_exit(&dtrace_lock); 13086 mutex_exit(&cpu_lock); 13087 } 13088 13089 /* 13090 * DTrace DOF Functions 13091 */ 13092 /*ARGSUSED*/ 13093 static void 13094 dtrace_dof_error(dof_hdr_t *dof, const char *str) 13095 { 13096 if (dtrace_err_verbose) 13097 cmn_err(CE_WARN, "failed to process DOF: %s", str); 13098 13099 #ifdef DTRACE_ERRDEBUG 13100 dtrace_errdebug(str); 13101 #endif 13102 } 13103 13104 /* 13105 * Create DOF out of a currently enabled state. Right now, we only create 13106 * DOF containing the run-time options -- but this could be expanded to create 13107 * complete DOF representing the enabled state. 13108 */ 13109 static dof_hdr_t * 13110 dtrace_dof_create(dtrace_state_t *state) 13111 { 13112 dof_hdr_t *dof; 13113 dof_sec_t *sec; 13114 dof_optdesc_t *opt; 13115 int i, len = sizeof (dof_hdr_t) + 13116 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 13117 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 13118 13119 ASSERT(MUTEX_HELD(&dtrace_lock)); 13120 13121 dof = kmem_zalloc(len, KM_SLEEP); 13122 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 13123 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 13124 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 13125 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 13126 13127 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 13128 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 13129 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 13130 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 13131 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 13132 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 13133 13134 dof->dofh_flags = 0; 13135 dof->dofh_hdrsize = sizeof (dof_hdr_t); 13136 dof->dofh_secsize = sizeof (dof_sec_t); 13137 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 13138 dof->dofh_secoff = sizeof (dof_hdr_t); 13139 dof->dofh_loadsz = len; 13140 dof->dofh_filesz = len; 13141 dof->dofh_pad = 0; 13142 13143 /* 13144 * Fill in the option section header... 13145 */ 13146 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 13147 sec->dofs_type = DOF_SECT_OPTDESC; 13148 sec->dofs_align = sizeof (uint64_t); 13149 sec->dofs_flags = DOF_SECF_LOAD; 13150 sec->dofs_entsize = sizeof (dof_optdesc_t); 13151 13152 opt = (dof_optdesc_t *)((uintptr_t)sec + 13153 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 13154 13155 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 13156 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 13157 13158 for (i = 0; i < DTRACEOPT_MAX; i++) { 13159 opt[i].dofo_option = i; 13160 opt[i].dofo_strtab = DOF_SECIDX_NONE; 13161 opt[i].dofo_value = state->dts_options[i]; 13162 } 13163 13164 return (dof); 13165 } 13166 13167 static dof_hdr_t * 13168 dtrace_dof_copyin(uintptr_t uarg, int *errp) 13169 { 13170 dof_hdr_t hdr, *dof; 13171 13172 ASSERT(!MUTEX_HELD(&dtrace_lock)); 13173 13174 /* 13175 * First, we're going to copyin() the sizeof (dof_hdr_t). 13176 */ 13177 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 13178 dtrace_dof_error(NULL, "failed to copyin DOF header"); 13179 *errp = EFAULT; 13180 return (NULL); 13181 } 13182 13183 /* 13184 * Now we'll allocate the entire DOF and copy it in -- provided 13185 * that the length isn't outrageous. 13186 */ 13187 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 13188 dtrace_dof_error(&hdr, "load size exceeds maximum"); 13189 *errp = E2BIG; 13190 return (NULL); 13191 } 13192 13193 if (hdr.dofh_loadsz < sizeof (hdr)) { 13194 dtrace_dof_error(&hdr, "invalid load size"); 13195 *errp = EINVAL; 13196 return (NULL); 13197 } 13198 13199 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 13200 13201 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 || 13202 dof->dofh_loadsz != hdr.dofh_loadsz) { 13203 kmem_free(dof, hdr.dofh_loadsz); 13204 *errp = EFAULT; 13205 return (NULL); 13206 } 13207 13208 return (dof); 13209 } 13210 13211 #ifdef __FreeBSD__ 13212 static dof_hdr_t * 13213 dtrace_dof_copyin_proc(struct proc *p, uintptr_t uarg, int *errp) 13214 { 13215 dof_hdr_t hdr, *dof; 13216 struct thread *td; 13217 size_t loadsz; 13218 13219 ASSERT(!MUTEX_HELD(&dtrace_lock)); 13220 13221 td = curthread; 13222 13223 /* 13224 * First, we're going to copyin() the sizeof (dof_hdr_t). 13225 */ 13226 if (proc_readmem(td, p, uarg, &hdr, sizeof(hdr)) != sizeof(hdr)) { 13227 dtrace_dof_error(NULL, "failed to copyin DOF header"); 13228 *errp = EFAULT; 13229 return (NULL); 13230 } 13231 13232 /* 13233 * Now we'll allocate the entire DOF and copy it in -- provided 13234 * that the length isn't outrageous. 13235 */ 13236 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 13237 dtrace_dof_error(&hdr, "load size exceeds maximum"); 13238 *errp = E2BIG; 13239 return (NULL); 13240 } 13241 loadsz = (size_t)hdr.dofh_loadsz; 13242 13243 if (loadsz < sizeof (hdr)) { 13244 dtrace_dof_error(&hdr, "invalid load size"); 13245 *errp = EINVAL; 13246 return (NULL); 13247 } 13248 13249 dof = kmem_alloc(loadsz, KM_SLEEP); 13250 13251 if (proc_readmem(td, p, uarg, dof, loadsz) != loadsz || 13252 dof->dofh_loadsz != loadsz) { 13253 kmem_free(dof, hdr.dofh_loadsz); 13254 *errp = EFAULT; 13255 return (NULL); 13256 } 13257 13258 return (dof); 13259 } 13260 13261 static __inline uchar_t 13262 dtrace_dof_char(char c) 13263 { 13264 13265 switch (c) { 13266 case '0': 13267 case '1': 13268 case '2': 13269 case '3': 13270 case '4': 13271 case '5': 13272 case '6': 13273 case '7': 13274 case '8': 13275 case '9': 13276 return (c - '0'); 13277 case 'A': 13278 case 'B': 13279 case 'C': 13280 case 'D': 13281 case 'E': 13282 case 'F': 13283 return (c - 'A' + 10); 13284 case 'a': 13285 case 'b': 13286 case 'c': 13287 case 'd': 13288 case 'e': 13289 case 'f': 13290 return (c - 'a' + 10); 13291 } 13292 /* Should not reach here. */ 13293 return (UCHAR_MAX); 13294 } 13295 #endif /* __FreeBSD__ */ 13296 13297 static dof_hdr_t * 13298 dtrace_dof_property(const char *name) 13299 { 13300 #ifdef __FreeBSD__ 13301 uint8_t *dofbuf; 13302 u_char *data, *eol; 13303 caddr_t doffile; 13304 size_t bytes, len, i; 13305 dof_hdr_t *dof; 13306 u_char c1, c2; 13307 13308 dof = NULL; 13309 13310 doffile = preload_search_by_type("dtrace_dof"); 13311 if (doffile == NULL) 13312 return (NULL); 13313 13314 data = preload_fetch_addr(doffile); 13315 len = preload_fetch_size(doffile); 13316 for (;;) { 13317 /* Look for the end of the line. All lines end in a newline. */ 13318 eol = memchr(data, '\n', len); 13319 if (eol == NULL) 13320 return (NULL); 13321 13322 if (strncmp(name, data, strlen(name)) == 0) 13323 break; 13324 13325 eol++; /* skip past the newline */ 13326 len -= eol - data; 13327 data = eol; 13328 } 13329 13330 /* We've found the data corresponding to the specified key. */ 13331 13332 data += strlen(name) + 1; /* skip past the '=' */ 13333 len = eol - data; 13334 bytes = len / 2; 13335 13336 if (bytes < sizeof(dof_hdr_t)) { 13337 dtrace_dof_error(NULL, "truncated header"); 13338 goto doferr; 13339 } 13340 13341 /* 13342 * Each byte is represented by the two ASCII characters in its hex 13343 * representation. 13344 */ 13345 dofbuf = malloc(bytes, M_SOLARIS, M_WAITOK); 13346 for (i = 0; i < bytes; i++) { 13347 c1 = dtrace_dof_char(data[i * 2]); 13348 c2 = dtrace_dof_char(data[i * 2 + 1]); 13349 if (c1 == UCHAR_MAX || c2 == UCHAR_MAX) { 13350 dtrace_dof_error(NULL, "invalid hex char in DOF"); 13351 goto doferr; 13352 } 13353 dofbuf[i] = c1 * 16 + c2; 13354 } 13355 13356 dof = (dof_hdr_t *)dofbuf; 13357 if (bytes < dof->dofh_loadsz) { 13358 dtrace_dof_error(NULL, "truncated DOF"); 13359 goto doferr; 13360 } 13361 13362 if (dof->dofh_loadsz >= dtrace_dof_maxsize) { 13363 dtrace_dof_error(NULL, "oversized DOF"); 13364 goto doferr; 13365 } 13366 13367 return (dof); 13368 13369 doferr: 13370 free(dof, M_SOLARIS); 13371 return (NULL); 13372 #else /* __FreeBSD__ */ 13373 uchar_t *buf; 13374 uint64_t loadsz; 13375 unsigned int len, i; 13376 dof_hdr_t *dof; 13377 13378 /* 13379 * Unfortunately, array of values in .conf files are always (and 13380 * only) interpreted to be integer arrays. We must read our DOF 13381 * as an integer array, and then squeeze it into a byte array. 13382 */ 13383 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 13384 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 13385 return (NULL); 13386 13387 for (i = 0; i < len; i++) 13388 buf[i] = (uchar_t)(((int *)buf)[i]); 13389 13390 if (len < sizeof (dof_hdr_t)) { 13391 ddi_prop_free(buf); 13392 dtrace_dof_error(NULL, "truncated header"); 13393 return (NULL); 13394 } 13395 13396 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 13397 ddi_prop_free(buf); 13398 dtrace_dof_error(NULL, "truncated DOF"); 13399 return (NULL); 13400 } 13401 13402 if (loadsz >= dtrace_dof_maxsize) { 13403 ddi_prop_free(buf); 13404 dtrace_dof_error(NULL, "oversized DOF"); 13405 return (NULL); 13406 } 13407 13408 dof = kmem_alloc(loadsz, KM_SLEEP); 13409 bcopy(buf, dof, loadsz); 13410 ddi_prop_free(buf); 13411 13412 return (dof); 13413 #endif /* !__FreeBSD__ */ 13414 } 13415 13416 static void 13417 dtrace_dof_destroy(dof_hdr_t *dof) 13418 { 13419 kmem_free(dof, dof->dofh_loadsz); 13420 } 13421 13422 /* 13423 * Return the dof_sec_t pointer corresponding to a given section index. If the 13424 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 13425 * a type other than DOF_SECT_NONE is specified, the header is checked against 13426 * this type and NULL is returned if the types do not match. 13427 */ 13428 static dof_sec_t * 13429 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 13430 { 13431 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 13432 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 13433 13434 if (i >= dof->dofh_secnum) { 13435 dtrace_dof_error(dof, "referenced section index is invalid"); 13436 return (NULL); 13437 } 13438 13439 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 13440 dtrace_dof_error(dof, "referenced section is not loadable"); 13441 return (NULL); 13442 } 13443 13444 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 13445 dtrace_dof_error(dof, "referenced section is the wrong type"); 13446 return (NULL); 13447 } 13448 13449 return (sec); 13450 } 13451 13452 static dtrace_probedesc_t * 13453 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 13454 { 13455 dof_probedesc_t *probe; 13456 dof_sec_t *strtab; 13457 uintptr_t daddr = (uintptr_t)dof; 13458 uintptr_t str; 13459 size_t size; 13460 13461 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 13462 dtrace_dof_error(dof, "invalid probe section"); 13463 return (NULL); 13464 } 13465 13466 if (sec->dofs_align != sizeof (dof_secidx_t)) { 13467 dtrace_dof_error(dof, "bad alignment in probe description"); 13468 return (NULL); 13469 } 13470 13471 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 13472 dtrace_dof_error(dof, "truncated probe description"); 13473 return (NULL); 13474 } 13475 13476 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 13477 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 13478 13479 if (strtab == NULL) 13480 return (NULL); 13481 13482 str = daddr + strtab->dofs_offset; 13483 size = strtab->dofs_size; 13484 13485 if (probe->dofp_provider >= strtab->dofs_size) { 13486 dtrace_dof_error(dof, "corrupt probe provider"); 13487 return (NULL); 13488 } 13489 13490 (void) strncpy(desc->dtpd_provider, 13491 (char *)(str + probe->dofp_provider), 13492 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 13493 13494 if (probe->dofp_mod >= strtab->dofs_size) { 13495 dtrace_dof_error(dof, "corrupt probe module"); 13496 return (NULL); 13497 } 13498 13499 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 13500 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 13501 13502 if (probe->dofp_func >= strtab->dofs_size) { 13503 dtrace_dof_error(dof, "corrupt probe function"); 13504 return (NULL); 13505 } 13506 13507 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 13508 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 13509 13510 if (probe->dofp_name >= strtab->dofs_size) { 13511 dtrace_dof_error(dof, "corrupt probe name"); 13512 return (NULL); 13513 } 13514 13515 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 13516 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 13517 13518 return (desc); 13519 } 13520 13521 static dtrace_difo_t * 13522 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13523 cred_t *cr) 13524 { 13525 dtrace_difo_t *dp; 13526 size_t ttl = 0; 13527 dof_difohdr_t *dofd; 13528 uintptr_t daddr = (uintptr_t)dof; 13529 size_t max = dtrace_difo_maxsize; 13530 int i, l, n; 13531 13532 static const struct { 13533 int section; 13534 int bufoffs; 13535 int lenoffs; 13536 int entsize; 13537 int align; 13538 const char *msg; 13539 } difo[] = { 13540 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 13541 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 13542 sizeof (dif_instr_t), "multiple DIF sections" }, 13543 13544 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 13545 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 13546 sizeof (uint64_t), "multiple integer tables" }, 13547 13548 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 13549 offsetof(dtrace_difo_t, dtdo_strlen), 0, 13550 sizeof (char), "multiple string tables" }, 13551 13552 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 13553 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 13554 sizeof (uint_t), "multiple variable tables" }, 13555 13556 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 13557 }; 13558 13559 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 13560 dtrace_dof_error(dof, "invalid DIFO header section"); 13561 return (NULL); 13562 } 13563 13564 if (sec->dofs_align != sizeof (dof_secidx_t)) { 13565 dtrace_dof_error(dof, "bad alignment in DIFO header"); 13566 return (NULL); 13567 } 13568 13569 if (sec->dofs_size < sizeof (dof_difohdr_t) || 13570 sec->dofs_size % sizeof (dof_secidx_t)) { 13571 dtrace_dof_error(dof, "bad size in DIFO header"); 13572 return (NULL); 13573 } 13574 13575 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 13576 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 13577 13578 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 13579 dp->dtdo_rtype = dofd->dofd_rtype; 13580 13581 for (l = 0; l < n; l++) { 13582 dof_sec_t *subsec; 13583 void **bufp; 13584 uint32_t *lenp; 13585 13586 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 13587 dofd->dofd_links[l])) == NULL) 13588 goto err; /* invalid section link */ 13589 13590 if (ttl + subsec->dofs_size > max) { 13591 dtrace_dof_error(dof, "exceeds maximum size"); 13592 goto err; 13593 } 13594 13595 ttl += subsec->dofs_size; 13596 13597 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 13598 if (subsec->dofs_type != difo[i].section) 13599 continue; 13600 13601 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 13602 dtrace_dof_error(dof, "section not loaded"); 13603 goto err; 13604 } 13605 13606 if (subsec->dofs_align != difo[i].align) { 13607 dtrace_dof_error(dof, "bad alignment"); 13608 goto err; 13609 } 13610 13611 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 13612 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 13613 13614 if (*bufp != NULL) { 13615 dtrace_dof_error(dof, difo[i].msg); 13616 goto err; 13617 } 13618 13619 if (difo[i].entsize != subsec->dofs_entsize) { 13620 dtrace_dof_error(dof, "entry size mismatch"); 13621 goto err; 13622 } 13623 13624 if (subsec->dofs_entsize != 0 && 13625 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 13626 dtrace_dof_error(dof, "corrupt entry size"); 13627 goto err; 13628 } 13629 13630 *lenp = subsec->dofs_size; 13631 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 13632 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 13633 *bufp, subsec->dofs_size); 13634 13635 if (subsec->dofs_entsize != 0) 13636 *lenp /= subsec->dofs_entsize; 13637 13638 break; 13639 } 13640 13641 /* 13642 * If we encounter a loadable DIFO sub-section that is not 13643 * known to us, assume this is a broken program and fail. 13644 */ 13645 if (difo[i].section == DOF_SECT_NONE && 13646 (subsec->dofs_flags & DOF_SECF_LOAD)) { 13647 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 13648 goto err; 13649 } 13650 } 13651 13652 if (dp->dtdo_buf == NULL) { 13653 /* 13654 * We can't have a DIF object without DIF text. 13655 */ 13656 dtrace_dof_error(dof, "missing DIF text"); 13657 goto err; 13658 } 13659 13660 /* 13661 * Before we validate the DIF object, run through the variable table 13662 * looking for the strings -- if any of their size are under, we'll set 13663 * their size to be the system-wide default string size. Note that 13664 * this should _not_ happen if the "strsize" option has been set -- 13665 * in this case, the compiler should have set the size to reflect the 13666 * setting of the option. 13667 */ 13668 for (i = 0; i < dp->dtdo_varlen; i++) { 13669 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 13670 dtrace_diftype_t *t = &v->dtdv_type; 13671 13672 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 13673 continue; 13674 13675 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 13676 t->dtdt_size = dtrace_strsize_default; 13677 } 13678 13679 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 13680 goto err; 13681 13682 dtrace_difo_init(dp, vstate); 13683 return (dp); 13684 13685 err: 13686 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 13687 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 13688 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 13689 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 13690 13691 kmem_free(dp, sizeof (dtrace_difo_t)); 13692 return (NULL); 13693 } 13694 13695 static dtrace_predicate_t * 13696 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13697 cred_t *cr) 13698 { 13699 dtrace_difo_t *dp; 13700 13701 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 13702 return (NULL); 13703 13704 return (dtrace_predicate_create(dp)); 13705 } 13706 13707 static dtrace_actdesc_t * 13708 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13709 cred_t *cr) 13710 { 13711 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 13712 dof_actdesc_t *desc; 13713 dof_sec_t *difosec; 13714 size_t offs; 13715 uintptr_t daddr = (uintptr_t)dof; 13716 uint64_t arg; 13717 dtrace_actkind_t kind; 13718 13719 if (sec->dofs_type != DOF_SECT_ACTDESC) { 13720 dtrace_dof_error(dof, "invalid action section"); 13721 return (NULL); 13722 } 13723 13724 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 13725 dtrace_dof_error(dof, "truncated action description"); 13726 return (NULL); 13727 } 13728 13729 if (sec->dofs_align != sizeof (uint64_t)) { 13730 dtrace_dof_error(dof, "bad alignment in action description"); 13731 return (NULL); 13732 } 13733 13734 if (sec->dofs_size < sec->dofs_entsize) { 13735 dtrace_dof_error(dof, "section entry size exceeds total size"); 13736 return (NULL); 13737 } 13738 13739 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 13740 dtrace_dof_error(dof, "bad entry size in action description"); 13741 return (NULL); 13742 } 13743 13744 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 13745 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 13746 return (NULL); 13747 } 13748 13749 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 13750 desc = (dof_actdesc_t *)(daddr + 13751 (uintptr_t)sec->dofs_offset + offs); 13752 kind = (dtrace_actkind_t)desc->dofa_kind; 13753 13754 if ((DTRACEACT_ISPRINTFLIKE(kind) && 13755 (kind != DTRACEACT_PRINTA || 13756 desc->dofa_strtab != DOF_SECIDX_NONE)) || 13757 (kind == DTRACEACT_DIFEXPR && 13758 desc->dofa_strtab != DOF_SECIDX_NONE)) { 13759 dof_sec_t *strtab; 13760 char *str, *fmt; 13761 uint64_t i; 13762 13763 /* 13764 * The argument to these actions is an index into the 13765 * DOF string table. For printf()-like actions, this 13766 * is the format string. For print(), this is the 13767 * CTF type of the expression result. 13768 */ 13769 if ((strtab = dtrace_dof_sect(dof, 13770 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 13771 goto err; 13772 13773 str = (char *)((uintptr_t)dof + 13774 (uintptr_t)strtab->dofs_offset); 13775 13776 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 13777 if (str[i] == '\0') 13778 break; 13779 } 13780 13781 if (i >= strtab->dofs_size) { 13782 dtrace_dof_error(dof, "bogus format string"); 13783 goto err; 13784 } 13785 13786 if (i == desc->dofa_arg) { 13787 dtrace_dof_error(dof, "empty format string"); 13788 goto err; 13789 } 13790 13791 i -= desc->dofa_arg; 13792 fmt = kmem_alloc(i + 1, KM_SLEEP); 13793 bcopy(&str[desc->dofa_arg], fmt, i + 1); 13794 arg = (uint64_t)(uintptr_t)fmt; 13795 } else { 13796 if (kind == DTRACEACT_PRINTA) { 13797 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 13798 arg = 0; 13799 } else { 13800 arg = desc->dofa_arg; 13801 } 13802 } 13803 13804 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 13805 desc->dofa_uarg, arg); 13806 13807 if (last != NULL) { 13808 last->dtad_next = act; 13809 } else { 13810 first = act; 13811 } 13812 13813 last = act; 13814 13815 if (desc->dofa_difo == DOF_SECIDX_NONE) 13816 continue; 13817 13818 if ((difosec = dtrace_dof_sect(dof, 13819 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 13820 goto err; 13821 13822 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 13823 13824 if (act->dtad_difo == NULL) 13825 goto err; 13826 } 13827 13828 ASSERT(first != NULL); 13829 return (first); 13830 13831 err: 13832 for (act = first; act != NULL; act = next) { 13833 next = act->dtad_next; 13834 dtrace_actdesc_release(act, vstate); 13835 } 13836 13837 return (NULL); 13838 } 13839 13840 static dtrace_ecbdesc_t * 13841 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13842 cred_t *cr) 13843 { 13844 dtrace_ecbdesc_t *ep; 13845 dof_ecbdesc_t *ecb; 13846 dtrace_probedesc_t *desc; 13847 dtrace_predicate_t *pred = NULL; 13848 13849 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 13850 dtrace_dof_error(dof, "truncated ECB description"); 13851 return (NULL); 13852 } 13853 13854 if (sec->dofs_align != sizeof (uint64_t)) { 13855 dtrace_dof_error(dof, "bad alignment in ECB description"); 13856 return (NULL); 13857 } 13858 13859 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 13860 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 13861 13862 if (sec == NULL) 13863 return (NULL); 13864 13865 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 13866 ep->dted_uarg = ecb->dofe_uarg; 13867 desc = &ep->dted_probe; 13868 13869 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 13870 goto err; 13871 13872 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 13873 if ((sec = dtrace_dof_sect(dof, 13874 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 13875 goto err; 13876 13877 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 13878 goto err; 13879 13880 ep->dted_pred.dtpdd_predicate = pred; 13881 } 13882 13883 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 13884 if ((sec = dtrace_dof_sect(dof, 13885 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 13886 goto err; 13887 13888 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 13889 13890 if (ep->dted_action == NULL) 13891 goto err; 13892 } 13893 13894 return (ep); 13895 13896 err: 13897 if (pred != NULL) 13898 dtrace_predicate_release(pred, vstate); 13899 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 13900 return (NULL); 13901 } 13902 13903 /* 13904 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 13905 * specified DOF. At present, this amounts to simply adding 'ubase' to the 13906 * site of any user SETX relocations to account for load object base address. 13907 * In the future, if we need other relocations, this function can be extended. 13908 */ 13909 static int 13910 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 13911 { 13912 uintptr_t daddr = (uintptr_t)dof; 13913 dof_relohdr_t *dofr = 13914 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 13915 dof_sec_t *ss, *rs, *ts; 13916 dof_relodesc_t *r; 13917 uint_t i, n; 13918 13919 if (sec->dofs_size < sizeof (dof_relohdr_t) || 13920 sec->dofs_align != sizeof (dof_secidx_t)) { 13921 dtrace_dof_error(dof, "invalid relocation header"); 13922 return (-1); 13923 } 13924 13925 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 13926 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 13927 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 13928 13929 if (ss == NULL || rs == NULL || ts == NULL) 13930 return (-1); /* dtrace_dof_error() has been called already */ 13931 13932 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 13933 rs->dofs_align != sizeof (uint64_t)) { 13934 dtrace_dof_error(dof, "invalid relocation section"); 13935 return (-1); 13936 } 13937 13938 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 13939 n = rs->dofs_size / rs->dofs_entsize; 13940 13941 for (i = 0; i < n; i++) { 13942 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 13943 13944 switch (r->dofr_type) { 13945 case DOF_RELO_NONE: 13946 break; 13947 case DOF_RELO_SETX: 13948 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 13949 sizeof (uint64_t) > ts->dofs_size) { 13950 dtrace_dof_error(dof, "bad relocation offset"); 13951 return (-1); 13952 } 13953 13954 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 13955 dtrace_dof_error(dof, "misaligned setx relo"); 13956 return (-1); 13957 } 13958 13959 *(uint64_t *)taddr += ubase; 13960 break; 13961 default: 13962 dtrace_dof_error(dof, "invalid relocation type"); 13963 return (-1); 13964 } 13965 13966 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 13967 } 13968 13969 return (0); 13970 } 13971 13972 /* 13973 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 13974 * header: it should be at the front of a memory region that is at least 13975 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 13976 * size. It need not be validated in any other way. 13977 */ 13978 static int 13979 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 13980 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 13981 { 13982 uint64_t len = dof->dofh_loadsz, seclen; 13983 uintptr_t daddr = (uintptr_t)dof; 13984 dtrace_ecbdesc_t *ep; 13985 dtrace_enabling_t *enab; 13986 uint_t i; 13987 13988 ASSERT(MUTEX_HELD(&dtrace_lock)); 13989 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 13990 13991 /* 13992 * Check the DOF header identification bytes. In addition to checking 13993 * valid settings, we also verify that unused bits/bytes are zeroed so 13994 * we can use them later without fear of regressing existing binaries. 13995 */ 13996 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 13997 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 13998 dtrace_dof_error(dof, "DOF magic string mismatch"); 13999 return (-1); 14000 } 14001 14002 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 14003 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 14004 dtrace_dof_error(dof, "DOF has invalid data model"); 14005 return (-1); 14006 } 14007 14008 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 14009 dtrace_dof_error(dof, "DOF encoding mismatch"); 14010 return (-1); 14011 } 14012 14013 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14014 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 14015 dtrace_dof_error(dof, "DOF version mismatch"); 14016 return (-1); 14017 } 14018 14019 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 14020 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 14021 return (-1); 14022 } 14023 14024 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 14025 dtrace_dof_error(dof, "DOF uses too many integer registers"); 14026 return (-1); 14027 } 14028 14029 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 14030 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 14031 return (-1); 14032 } 14033 14034 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 14035 if (dof->dofh_ident[i] != 0) { 14036 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 14037 return (-1); 14038 } 14039 } 14040 14041 if (dof->dofh_flags & ~DOF_FL_VALID) { 14042 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 14043 return (-1); 14044 } 14045 14046 if (dof->dofh_secsize == 0) { 14047 dtrace_dof_error(dof, "zero section header size"); 14048 return (-1); 14049 } 14050 14051 /* 14052 * Check that the section headers don't exceed the amount of DOF 14053 * data. Note that we cast the section size and number of sections 14054 * to uint64_t's to prevent possible overflow in the multiplication. 14055 */ 14056 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 14057 14058 if (dof->dofh_secoff > len || seclen > len || 14059 dof->dofh_secoff + seclen > len) { 14060 dtrace_dof_error(dof, "truncated section headers"); 14061 return (-1); 14062 } 14063 14064 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 14065 dtrace_dof_error(dof, "misaligned section headers"); 14066 return (-1); 14067 } 14068 14069 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 14070 dtrace_dof_error(dof, "misaligned section size"); 14071 return (-1); 14072 } 14073 14074 /* 14075 * Take an initial pass through the section headers to be sure that 14076 * the headers don't have stray offsets. If the 'noprobes' flag is 14077 * set, do not permit sections relating to providers, probes, or args. 14078 */ 14079 for (i = 0; i < dof->dofh_secnum; i++) { 14080 dof_sec_t *sec = (dof_sec_t *)(daddr + 14081 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14082 14083 if (noprobes) { 14084 switch (sec->dofs_type) { 14085 case DOF_SECT_PROVIDER: 14086 case DOF_SECT_PROBES: 14087 case DOF_SECT_PRARGS: 14088 case DOF_SECT_PROFFS: 14089 dtrace_dof_error(dof, "illegal sections " 14090 "for enabling"); 14091 return (-1); 14092 } 14093 } 14094 14095 if (DOF_SEC_ISLOADABLE(sec->dofs_type) && 14096 !(sec->dofs_flags & DOF_SECF_LOAD)) { 14097 dtrace_dof_error(dof, "loadable section with load " 14098 "flag unset"); 14099 return (-1); 14100 } 14101 14102 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 14103 continue; /* just ignore non-loadable sections */ 14104 14105 if (!ISP2(sec->dofs_align)) { 14106 dtrace_dof_error(dof, "bad section alignment"); 14107 return (-1); 14108 } 14109 14110 if (sec->dofs_offset & (sec->dofs_align - 1)) { 14111 dtrace_dof_error(dof, "misaligned section"); 14112 return (-1); 14113 } 14114 14115 if (sec->dofs_offset > len || sec->dofs_size > len || 14116 sec->dofs_offset + sec->dofs_size > len) { 14117 dtrace_dof_error(dof, "corrupt section header"); 14118 return (-1); 14119 } 14120 14121 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 14122 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 14123 dtrace_dof_error(dof, "non-terminating string table"); 14124 return (-1); 14125 } 14126 } 14127 14128 /* 14129 * Take a second pass through the sections and locate and perform any 14130 * relocations that are present. We do this after the first pass to 14131 * be sure that all sections have had their headers validated. 14132 */ 14133 for (i = 0; i < dof->dofh_secnum; i++) { 14134 dof_sec_t *sec = (dof_sec_t *)(daddr + 14135 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14136 14137 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 14138 continue; /* skip sections that are not loadable */ 14139 14140 switch (sec->dofs_type) { 14141 case DOF_SECT_URELHDR: 14142 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 14143 return (-1); 14144 break; 14145 } 14146 } 14147 14148 if ((enab = *enabp) == NULL) 14149 enab = *enabp = dtrace_enabling_create(vstate); 14150 14151 for (i = 0; i < dof->dofh_secnum; i++) { 14152 dof_sec_t *sec = (dof_sec_t *)(daddr + 14153 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14154 14155 if (sec->dofs_type != DOF_SECT_ECBDESC) 14156 continue; 14157 14158 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 14159 dtrace_enabling_destroy(enab); 14160 *enabp = NULL; 14161 return (-1); 14162 } 14163 14164 dtrace_enabling_add(enab, ep); 14165 } 14166 14167 return (0); 14168 } 14169 14170 /* 14171 * Process DOF for any options. This routine assumes that the DOF has been 14172 * at least processed by dtrace_dof_slurp(). 14173 */ 14174 static int 14175 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 14176 { 14177 int i, rval; 14178 uint32_t entsize; 14179 size_t offs; 14180 dof_optdesc_t *desc; 14181 14182 for (i = 0; i < dof->dofh_secnum; i++) { 14183 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 14184 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14185 14186 if (sec->dofs_type != DOF_SECT_OPTDESC) 14187 continue; 14188 14189 if (sec->dofs_align != sizeof (uint64_t)) { 14190 dtrace_dof_error(dof, "bad alignment in " 14191 "option description"); 14192 return (EINVAL); 14193 } 14194 14195 if ((entsize = sec->dofs_entsize) == 0) { 14196 dtrace_dof_error(dof, "zeroed option entry size"); 14197 return (EINVAL); 14198 } 14199 14200 if (entsize < sizeof (dof_optdesc_t)) { 14201 dtrace_dof_error(dof, "bad option entry size"); 14202 return (EINVAL); 14203 } 14204 14205 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 14206 desc = (dof_optdesc_t *)((uintptr_t)dof + 14207 (uintptr_t)sec->dofs_offset + offs); 14208 14209 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 14210 dtrace_dof_error(dof, "non-zero option string"); 14211 return (EINVAL); 14212 } 14213 14214 if (desc->dofo_value == DTRACEOPT_UNSET) { 14215 dtrace_dof_error(dof, "unset option"); 14216 return (EINVAL); 14217 } 14218 14219 if ((rval = dtrace_state_option(state, 14220 desc->dofo_option, desc->dofo_value)) != 0) { 14221 dtrace_dof_error(dof, "rejected option"); 14222 return (rval); 14223 } 14224 } 14225 } 14226 14227 return (0); 14228 } 14229 14230 /* 14231 * DTrace Consumer State Functions 14232 */ 14233 static int 14234 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 14235 { 14236 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 14237 void *base; 14238 uintptr_t limit; 14239 dtrace_dynvar_t *dvar, *next, *start; 14240 int i; 14241 14242 ASSERT(MUTEX_HELD(&dtrace_lock)); 14243 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 14244 14245 bzero(dstate, sizeof (dtrace_dstate_t)); 14246 14247 if ((dstate->dtds_chunksize = chunksize) == 0) 14248 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 14249 14250 VERIFY(dstate->dtds_chunksize < LONG_MAX); 14251 14252 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 14253 size = min; 14254 14255 if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL) 14256 return (ENOMEM); 14257 14258 dstate->dtds_size = size; 14259 dstate->dtds_base = base; 14260 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 14261 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 14262 14263 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 14264 14265 if (hashsize != 1 && (hashsize & 1)) 14266 hashsize--; 14267 14268 dstate->dtds_hashsize = hashsize; 14269 dstate->dtds_hash = dstate->dtds_base; 14270 14271 /* 14272 * Set all of our hash buckets to point to the single sink, and (if 14273 * it hasn't already been set), set the sink's hash value to be the 14274 * sink sentinel value. The sink is needed for dynamic variable 14275 * lookups to know that they have iterated over an entire, valid hash 14276 * chain. 14277 */ 14278 for (i = 0; i < hashsize; i++) 14279 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 14280 14281 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 14282 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 14283 14284 /* 14285 * Determine number of active CPUs. Divide free list evenly among 14286 * active CPUs. 14287 */ 14288 start = (dtrace_dynvar_t *) 14289 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 14290 limit = (uintptr_t)base + size; 14291 14292 VERIFY((uintptr_t)start < limit); 14293 VERIFY((uintptr_t)start >= (uintptr_t)base); 14294 14295 maxper = (limit - (uintptr_t)start) / NCPU; 14296 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 14297 14298 #ifndef illumos 14299 CPU_FOREACH(i) { 14300 #else 14301 for (i = 0; i < NCPU; i++) { 14302 #endif 14303 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 14304 14305 /* 14306 * If we don't even have enough chunks to make it once through 14307 * NCPUs, we're just going to allocate everything to the first 14308 * CPU. And if we're on the last CPU, we're going to allocate 14309 * whatever is left over. In either case, we set the limit to 14310 * be the limit of the dynamic variable space. 14311 */ 14312 if (maxper == 0 || i == NCPU - 1) { 14313 limit = (uintptr_t)base + size; 14314 start = NULL; 14315 } else { 14316 limit = (uintptr_t)start + maxper; 14317 start = (dtrace_dynvar_t *)limit; 14318 } 14319 14320 VERIFY(limit <= (uintptr_t)base + size); 14321 14322 for (;;) { 14323 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 14324 dstate->dtds_chunksize); 14325 14326 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 14327 break; 14328 14329 VERIFY((uintptr_t)dvar >= (uintptr_t)base && 14330 (uintptr_t)dvar <= (uintptr_t)base + size); 14331 dvar->dtdv_next = next; 14332 dvar = next; 14333 } 14334 14335 if (maxper == 0) 14336 break; 14337 } 14338 14339 return (0); 14340 } 14341 14342 static void 14343 dtrace_dstate_fini(dtrace_dstate_t *dstate) 14344 { 14345 ASSERT(MUTEX_HELD(&cpu_lock)); 14346 14347 if (dstate->dtds_base == NULL) 14348 return; 14349 14350 kmem_free(dstate->dtds_base, dstate->dtds_size); 14351 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 14352 } 14353 14354 static void 14355 dtrace_vstate_fini(dtrace_vstate_t *vstate) 14356 { 14357 /* 14358 * Logical XOR, where are you? 14359 */ 14360 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 14361 14362 if (vstate->dtvs_nglobals > 0) { 14363 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 14364 sizeof (dtrace_statvar_t *)); 14365 } 14366 14367 if (vstate->dtvs_ntlocals > 0) { 14368 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 14369 sizeof (dtrace_difv_t)); 14370 } 14371 14372 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 14373 14374 if (vstate->dtvs_nlocals > 0) { 14375 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 14376 sizeof (dtrace_statvar_t *)); 14377 } 14378 } 14379 14380 #ifdef illumos 14381 static void 14382 dtrace_state_clean(dtrace_state_t *state) 14383 { 14384 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 14385 return; 14386 14387 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 14388 dtrace_speculation_clean(state); 14389 } 14390 14391 static void 14392 dtrace_state_deadman(dtrace_state_t *state) 14393 { 14394 hrtime_t now; 14395 14396 dtrace_sync(); 14397 14398 now = dtrace_gethrtime(); 14399 14400 if (state != dtrace_anon.dta_state && 14401 now - state->dts_laststatus >= dtrace_deadman_user) 14402 return; 14403 14404 /* 14405 * We must be sure that dts_alive never appears to be less than the 14406 * value upon entry to dtrace_state_deadman(), and because we lack a 14407 * dtrace_cas64(), we cannot store to it atomically. We thus instead 14408 * store INT64_MAX to it, followed by a memory barrier, followed by 14409 * the new value. This assures that dts_alive never appears to be 14410 * less than its true value, regardless of the order in which the 14411 * stores to the underlying storage are issued. 14412 */ 14413 state->dts_alive = INT64_MAX; 14414 dtrace_membar_producer(); 14415 state->dts_alive = now; 14416 } 14417 #else /* !illumos */ 14418 static void 14419 dtrace_state_clean(void *arg) 14420 { 14421 dtrace_state_t *state = arg; 14422 dtrace_optval_t *opt = state->dts_options; 14423 14424 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 14425 return; 14426 14427 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 14428 dtrace_speculation_clean(state); 14429 14430 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 14431 dtrace_state_clean, state); 14432 } 14433 14434 static void 14435 dtrace_state_deadman(void *arg) 14436 { 14437 dtrace_state_t *state = arg; 14438 hrtime_t now; 14439 14440 dtrace_sync(); 14441 14442 dtrace_debug_output(); 14443 14444 now = dtrace_gethrtime(); 14445 14446 if (state != dtrace_anon.dta_state && 14447 now - state->dts_laststatus >= dtrace_deadman_user) 14448 return; 14449 14450 /* 14451 * We must be sure that dts_alive never appears to be less than the 14452 * value upon entry to dtrace_state_deadman(), and because we lack a 14453 * dtrace_cas64(), we cannot store to it atomically. We thus instead 14454 * store INT64_MAX to it, followed by a memory barrier, followed by 14455 * the new value. This assures that dts_alive never appears to be 14456 * less than its true value, regardless of the order in which the 14457 * stores to the underlying storage are issued. 14458 */ 14459 state->dts_alive = INT64_MAX; 14460 dtrace_membar_producer(); 14461 state->dts_alive = now; 14462 14463 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 14464 dtrace_state_deadman, state); 14465 } 14466 #endif /* illumos */ 14467 14468 static dtrace_state_t * 14469 #ifdef illumos 14470 dtrace_state_create(dev_t *devp, cred_t *cr) 14471 #else 14472 dtrace_state_create(struct cdev *dev, struct ucred *cred __unused) 14473 #endif 14474 { 14475 #ifdef illumos 14476 minor_t minor; 14477 major_t major; 14478 #else 14479 cred_t *cr = NULL; 14480 int m = 0; 14481 #endif 14482 char c[30]; 14483 dtrace_state_t *state; 14484 dtrace_optval_t *opt; 14485 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 14486 14487 ASSERT(MUTEX_HELD(&dtrace_lock)); 14488 ASSERT(MUTEX_HELD(&cpu_lock)); 14489 14490 #ifdef illumos 14491 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 14492 VM_BESTFIT | VM_SLEEP); 14493 14494 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 14495 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 14496 return (NULL); 14497 } 14498 14499 state = ddi_get_soft_state(dtrace_softstate, minor); 14500 #else 14501 if (dev != NULL) { 14502 cr = dev->si_cred; 14503 m = dev2unit(dev); 14504 } 14505 14506 /* Allocate memory for the state. */ 14507 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 14508 #endif 14509 14510 state->dts_epid = DTRACE_EPIDNONE + 1; 14511 14512 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 14513 #ifdef illumos 14514 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 14515 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 14516 14517 if (devp != NULL) { 14518 major = getemajor(*devp); 14519 } else { 14520 major = ddi_driver_major(dtrace_devi); 14521 } 14522 14523 state->dts_dev = makedevice(major, minor); 14524 14525 if (devp != NULL) 14526 *devp = state->dts_dev; 14527 #else 14528 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 14529 state->dts_dev = dev; 14530 #endif 14531 14532 /* 14533 * We allocate NCPU buffers. On the one hand, this can be quite 14534 * a bit of memory per instance (nearly 36K on a Starcat). On the 14535 * other hand, it saves an additional memory reference in the probe 14536 * path. 14537 */ 14538 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 14539 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 14540 14541 #ifdef illumos 14542 state->dts_cleaner = CYCLIC_NONE; 14543 state->dts_deadman = CYCLIC_NONE; 14544 #else 14545 callout_init(&state->dts_cleaner, 1); 14546 callout_init(&state->dts_deadman, 1); 14547 #endif 14548 state->dts_vstate.dtvs_state = state; 14549 14550 for (i = 0; i < DTRACEOPT_MAX; i++) 14551 state->dts_options[i] = DTRACEOPT_UNSET; 14552 14553 /* 14554 * Set the default options. 14555 */ 14556 opt = state->dts_options; 14557 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 14558 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 14559 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 14560 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 14561 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 14562 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 14563 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 14564 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 14565 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 14566 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 14567 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 14568 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 14569 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 14570 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 14571 14572 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 14573 14574 /* 14575 * Depending on the user credentials, we set flag bits which alter probe 14576 * visibility or the amount of destructiveness allowed. In the case of 14577 * actual anonymous tracing, or the possession of all privileges, all of 14578 * the normal checks are bypassed. 14579 */ 14580 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 14581 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 14582 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 14583 } else { 14584 /* 14585 * Set up the credentials for this instantiation. We take a 14586 * hold on the credential to prevent it from disappearing on 14587 * us; this in turn prevents the zone_t referenced by this 14588 * credential from disappearing. This means that we can 14589 * examine the credential and the zone from probe context. 14590 */ 14591 crhold(cr); 14592 state->dts_cred.dcr_cred = cr; 14593 14594 /* 14595 * CRA_PROC means "we have *some* privilege for dtrace" and 14596 * unlocks the use of variables like pid, zonename, etc. 14597 */ 14598 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 14599 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 14600 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 14601 } 14602 14603 /* 14604 * dtrace_user allows use of syscall and profile providers. 14605 * If the user also has proc_owner and/or proc_zone, we 14606 * extend the scope to include additional visibility and 14607 * destructive power. 14608 */ 14609 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 14610 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 14611 state->dts_cred.dcr_visible |= 14612 DTRACE_CRV_ALLPROC; 14613 14614 state->dts_cred.dcr_action |= 14615 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14616 } 14617 14618 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 14619 state->dts_cred.dcr_visible |= 14620 DTRACE_CRV_ALLZONE; 14621 14622 state->dts_cred.dcr_action |= 14623 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14624 } 14625 14626 /* 14627 * If we have all privs in whatever zone this is, 14628 * we can do destructive things to processes which 14629 * have altered credentials. 14630 */ 14631 #ifdef illumos 14632 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 14633 cr->cr_zone->zone_privset)) { 14634 state->dts_cred.dcr_action |= 14635 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 14636 } 14637 #endif 14638 } 14639 14640 /* 14641 * Holding the dtrace_kernel privilege also implies that 14642 * the user has the dtrace_user privilege from a visibility 14643 * perspective. But without further privileges, some 14644 * destructive actions are not available. 14645 */ 14646 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 14647 /* 14648 * Make all probes in all zones visible. However, 14649 * this doesn't mean that all actions become available 14650 * to all zones. 14651 */ 14652 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 14653 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 14654 14655 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 14656 DTRACE_CRA_PROC; 14657 /* 14658 * Holding proc_owner means that destructive actions 14659 * for *this* zone are allowed. 14660 */ 14661 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 14662 state->dts_cred.dcr_action |= 14663 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14664 14665 /* 14666 * Holding proc_zone means that destructive actions 14667 * for this user/group ID in all zones is allowed. 14668 */ 14669 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 14670 state->dts_cred.dcr_action |= 14671 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14672 14673 #ifdef illumos 14674 /* 14675 * If we have all privs in whatever zone this is, 14676 * we can do destructive things to processes which 14677 * have altered credentials. 14678 */ 14679 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 14680 cr->cr_zone->zone_privset)) { 14681 state->dts_cred.dcr_action |= 14682 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 14683 } 14684 #endif 14685 } 14686 14687 /* 14688 * Holding the dtrace_proc privilege gives control over fasttrap 14689 * and pid providers. We need to grant wider destructive 14690 * privileges in the event that the user has proc_owner and/or 14691 * proc_zone. 14692 */ 14693 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 14694 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 14695 state->dts_cred.dcr_action |= 14696 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14697 14698 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 14699 state->dts_cred.dcr_action |= 14700 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14701 } 14702 } 14703 14704 return (state); 14705 } 14706 14707 static int 14708 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 14709 { 14710 dtrace_optval_t *opt = state->dts_options, size; 14711 processorid_t cpu = 0;; 14712 int flags = 0, rval, factor, divisor = 1; 14713 14714 ASSERT(MUTEX_HELD(&dtrace_lock)); 14715 ASSERT(MUTEX_HELD(&cpu_lock)); 14716 ASSERT(which < DTRACEOPT_MAX); 14717 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 14718 (state == dtrace_anon.dta_state && 14719 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 14720 14721 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 14722 return (0); 14723 14724 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 14725 cpu = opt[DTRACEOPT_CPU]; 14726 14727 if (which == DTRACEOPT_SPECSIZE) 14728 flags |= DTRACEBUF_NOSWITCH; 14729 14730 if (which == DTRACEOPT_BUFSIZE) { 14731 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 14732 flags |= DTRACEBUF_RING; 14733 14734 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 14735 flags |= DTRACEBUF_FILL; 14736 14737 if (state != dtrace_anon.dta_state || 14738 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 14739 flags |= DTRACEBUF_INACTIVE; 14740 } 14741 14742 for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) { 14743 /* 14744 * The size must be 8-byte aligned. If the size is not 8-byte 14745 * aligned, drop it down by the difference. 14746 */ 14747 if (size & (sizeof (uint64_t) - 1)) 14748 size -= size & (sizeof (uint64_t) - 1); 14749 14750 if (size < state->dts_reserve) { 14751 /* 14752 * Buffers always must be large enough to accommodate 14753 * their prereserved space. We return E2BIG instead 14754 * of ENOMEM in this case to allow for user-level 14755 * software to differentiate the cases. 14756 */ 14757 return (E2BIG); 14758 } 14759 14760 rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor); 14761 14762 if (rval != ENOMEM) { 14763 opt[which] = size; 14764 return (rval); 14765 } 14766 14767 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 14768 return (rval); 14769 14770 for (divisor = 2; divisor < factor; divisor <<= 1) 14771 continue; 14772 } 14773 14774 return (ENOMEM); 14775 } 14776 14777 static int 14778 dtrace_state_buffers(dtrace_state_t *state) 14779 { 14780 dtrace_speculation_t *spec = state->dts_speculations; 14781 int rval, i; 14782 14783 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 14784 DTRACEOPT_BUFSIZE)) != 0) 14785 return (rval); 14786 14787 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 14788 DTRACEOPT_AGGSIZE)) != 0) 14789 return (rval); 14790 14791 for (i = 0; i < state->dts_nspeculations; i++) { 14792 if ((rval = dtrace_state_buffer(state, 14793 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 14794 return (rval); 14795 } 14796 14797 return (0); 14798 } 14799 14800 static void 14801 dtrace_state_prereserve(dtrace_state_t *state) 14802 { 14803 dtrace_ecb_t *ecb; 14804 dtrace_probe_t *probe; 14805 14806 state->dts_reserve = 0; 14807 14808 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 14809 return; 14810 14811 /* 14812 * If our buffer policy is a "fill" buffer policy, we need to set the 14813 * prereserved space to be the space required by the END probes. 14814 */ 14815 probe = dtrace_probes[dtrace_probeid_end - 1]; 14816 ASSERT(probe != NULL); 14817 14818 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 14819 if (ecb->dte_state != state) 14820 continue; 14821 14822 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 14823 } 14824 } 14825 14826 static int 14827 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 14828 { 14829 dtrace_optval_t *opt = state->dts_options, sz, nspec; 14830 dtrace_speculation_t *spec; 14831 dtrace_buffer_t *buf; 14832 #ifdef illumos 14833 cyc_handler_t hdlr; 14834 cyc_time_t when; 14835 #endif 14836 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 14837 dtrace_icookie_t cookie; 14838 14839 mutex_enter(&cpu_lock); 14840 mutex_enter(&dtrace_lock); 14841 14842 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 14843 rval = EBUSY; 14844 goto out; 14845 } 14846 14847 /* 14848 * Before we can perform any checks, we must prime all of the 14849 * retained enablings that correspond to this state. 14850 */ 14851 dtrace_enabling_prime(state); 14852 14853 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 14854 rval = EACCES; 14855 goto out; 14856 } 14857 14858 dtrace_state_prereserve(state); 14859 14860 /* 14861 * Now we want to do is try to allocate our speculations. 14862 * We do not automatically resize the number of speculations; if 14863 * this fails, we will fail the operation. 14864 */ 14865 nspec = opt[DTRACEOPT_NSPEC]; 14866 ASSERT(nspec != DTRACEOPT_UNSET); 14867 14868 if (nspec > INT_MAX) { 14869 rval = ENOMEM; 14870 goto out; 14871 } 14872 14873 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), 14874 KM_NOSLEEP | KM_NORMALPRI); 14875 14876 if (spec == NULL) { 14877 rval = ENOMEM; 14878 goto out; 14879 } 14880 14881 state->dts_speculations = spec; 14882 state->dts_nspeculations = (int)nspec; 14883 14884 for (i = 0; i < nspec; i++) { 14885 if ((buf = kmem_zalloc(bufsize, 14886 KM_NOSLEEP | KM_NORMALPRI)) == NULL) { 14887 rval = ENOMEM; 14888 goto err; 14889 } 14890 14891 spec[i].dtsp_buffer = buf; 14892 } 14893 14894 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 14895 if (dtrace_anon.dta_state == NULL) { 14896 rval = ENOENT; 14897 goto out; 14898 } 14899 14900 if (state->dts_necbs != 0) { 14901 rval = EALREADY; 14902 goto out; 14903 } 14904 14905 state->dts_anon = dtrace_anon_grab(); 14906 ASSERT(state->dts_anon != NULL); 14907 state = state->dts_anon; 14908 14909 /* 14910 * We want "grabanon" to be set in the grabbed state, so we'll 14911 * copy that option value from the grabbing state into the 14912 * grabbed state. 14913 */ 14914 state->dts_options[DTRACEOPT_GRABANON] = 14915 opt[DTRACEOPT_GRABANON]; 14916 14917 *cpu = dtrace_anon.dta_beganon; 14918 14919 /* 14920 * If the anonymous state is active (as it almost certainly 14921 * is if the anonymous enabling ultimately matched anything), 14922 * we don't allow any further option processing -- but we 14923 * don't return failure. 14924 */ 14925 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 14926 goto out; 14927 } 14928 14929 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 14930 opt[DTRACEOPT_AGGSIZE] != 0) { 14931 if (state->dts_aggregations == NULL) { 14932 /* 14933 * We're not going to create an aggregation buffer 14934 * because we don't have any ECBs that contain 14935 * aggregations -- set this option to 0. 14936 */ 14937 opt[DTRACEOPT_AGGSIZE] = 0; 14938 } else { 14939 /* 14940 * If we have an aggregation buffer, we must also have 14941 * a buffer to use as scratch. 14942 */ 14943 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 14944 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 14945 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 14946 } 14947 } 14948 } 14949 14950 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 14951 opt[DTRACEOPT_SPECSIZE] != 0) { 14952 if (!state->dts_speculates) { 14953 /* 14954 * We're not going to create speculation buffers 14955 * because we don't have any ECBs that actually 14956 * speculate -- set the speculation size to 0. 14957 */ 14958 opt[DTRACEOPT_SPECSIZE] = 0; 14959 } 14960 } 14961 14962 /* 14963 * The bare minimum size for any buffer that we're actually going to 14964 * do anything to is sizeof (uint64_t). 14965 */ 14966 sz = sizeof (uint64_t); 14967 14968 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 14969 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 14970 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 14971 /* 14972 * A buffer size has been explicitly set to 0 (or to a size 14973 * that will be adjusted to 0) and we need the space -- we 14974 * need to return failure. We return ENOSPC to differentiate 14975 * it from failing to allocate a buffer due to failure to meet 14976 * the reserve (for which we return E2BIG). 14977 */ 14978 rval = ENOSPC; 14979 goto out; 14980 } 14981 14982 if ((rval = dtrace_state_buffers(state)) != 0) 14983 goto err; 14984 14985 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 14986 sz = dtrace_dstate_defsize; 14987 14988 do { 14989 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 14990 14991 if (rval == 0) 14992 break; 14993 14994 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 14995 goto err; 14996 } while (sz >>= 1); 14997 14998 opt[DTRACEOPT_DYNVARSIZE] = sz; 14999 15000 if (rval != 0) 15001 goto err; 15002 15003 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 15004 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 15005 15006 if (opt[DTRACEOPT_CLEANRATE] == 0) 15007 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 15008 15009 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 15010 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 15011 15012 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 15013 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 15014 15015 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 15016 #ifdef illumos 15017 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 15018 hdlr.cyh_arg = state; 15019 hdlr.cyh_level = CY_LOW_LEVEL; 15020 15021 when.cyt_when = 0; 15022 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 15023 15024 state->dts_cleaner = cyclic_add(&hdlr, &when); 15025 15026 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 15027 hdlr.cyh_arg = state; 15028 hdlr.cyh_level = CY_LOW_LEVEL; 15029 15030 when.cyt_when = 0; 15031 when.cyt_interval = dtrace_deadman_interval; 15032 15033 state->dts_deadman = cyclic_add(&hdlr, &when); 15034 #else 15035 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 15036 dtrace_state_clean, state); 15037 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 15038 dtrace_state_deadman, state); 15039 #endif 15040 15041 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 15042 15043 #ifdef illumos 15044 if (state->dts_getf != 0 && 15045 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { 15046 /* 15047 * We don't have kernel privs but we have at least one call 15048 * to getf(); we need to bump our zone's count, and (if 15049 * this is the first enabling to have an unprivileged call 15050 * to getf()) we need to hook into closef(). 15051 */ 15052 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++; 15053 15054 if (dtrace_getf++ == 0) { 15055 ASSERT(dtrace_closef == NULL); 15056 dtrace_closef = dtrace_getf_barrier; 15057 } 15058 } 15059 #endif 15060 15061 /* 15062 * Now it's time to actually fire the BEGIN probe. We need to disable 15063 * interrupts here both to record the CPU on which we fired the BEGIN 15064 * probe (the data from this CPU will be processed first at user 15065 * level) and to manually activate the buffer for this CPU. 15066 */ 15067 cookie = dtrace_interrupt_disable(); 15068 *cpu = curcpu; 15069 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 15070 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 15071 15072 dtrace_probe(dtrace_probeid_begin, 15073 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 15074 dtrace_interrupt_enable(cookie); 15075 /* 15076 * We may have had an exit action from a BEGIN probe; only change our 15077 * state to ACTIVE if we're still in WARMUP. 15078 */ 15079 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 15080 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 15081 15082 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 15083 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 15084 15085 #ifdef __FreeBSD__ 15086 /* 15087 * We enable anonymous tracing before APs are started, so we must 15088 * activate buffers using the current CPU. 15089 */ 15090 if (state == dtrace_anon.dta_state) 15091 for (int i = 0; i < NCPU; i++) 15092 dtrace_buffer_activate_cpu(state, i); 15093 else 15094 dtrace_xcall(DTRACE_CPUALL, 15095 (dtrace_xcall_t)dtrace_buffer_activate, state); 15096 #else 15097 /* 15098 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 15099 * want each CPU to transition its principal buffer out of the 15100 * INACTIVE state. Doing this assures that no CPU will suddenly begin 15101 * processing an ECB halfway down a probe's ECB chain; all CPUs will 15102 * atomically transition from processing none of a state's ECBs to 15103 * processing all of them. 15104 */ 15105 dtrace_xcall(DTRACE_CPUALL, 15106 (dtrace_xcall_t)dtrace_buffer_activate, state); 15107 #endif 15108 goto out; 15109 15110 err: 15111 dtrace_buffer_free(state->dts_buffer); 15112 dtrace_buffer_free(state->dts_aggbuffer); 15113 15114 if ((nspec = state->dts_nspeculations) == 0) { 15115 ASSERT(state->dts_speculations == NULL); 15116 goto out; 15117 } 15118 15119 spec = state->dts_speculations; 15120 ASSERT(spec != NULL); 15121 15122 for (i = 0; i < state->dts_nspeculations; i++) { 15123 if ((buf = spec[i].dtsp_buffer) == NULL) 15124 break; 15125 15126 dtrace_buffer_free(buf); 15127 kmem_free(buf, bufsize); 15128 } 15129 15130 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 15131 state->dts_nspeculations = 0; 15132 state->dts_speculations = NULL; 15133 15134 out: 15135 mutex_exit(&dtrace_lock); 15136 mutex_exit(&cpu_lock); 15137 15138 return (rval); 15139 } 15140 15141 static int 15142 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 15143 { 15144 dtrace_icookie_t cookie; 15145 15146 ASSERT(MUTEX_HELD(&dtrace_lock)); 15147 15148 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 15149 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 15150 return (EINVAL); 15151 15152 /* 15153 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 15154 * to be sure that every CPU has seen it. See below for the details 15155 * on why this is done. 15156 */ 15157 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 15158 dtrace_sync(); 15159 15160 /* 15161 * By this point, it is impossible for any CPU to be still processing 15162 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 15163 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 15164 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 15165 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 15166 * iff we're in the END probe. 15167 */ 15168 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 15169 dtrace_sync(); 15170 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 15171 15172 /* 15173 * Finally, we can release the reserve and call the END probe. We 15174 * disable interrupts across calling the END probe to allow us to 15175 * return the CPU on which we actually called the END probe. This 15176 * allows user-land to be sure that this CPU's principal buffer is 15177 * processed last. 15178 */ 15179 state->dts_reserve = 0; 15180 15181 cookie = dtrace_interrupt_disable(); 15182 *cpu = curcpu; 15183 dtrace_probe(dtrace_probeid_end, 15184 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 15185 dtrace_interrupt_enable(cookie); 15186 15187 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 15188 dtrace_sync(); 15189 15190 #ifdef illumos 15191 if (state->dts_getf != 0 && 15192 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { 15193 /* 15194 * We don't have kernel privs but we have at least one call 15195 * to getf(); we need to lower our zone's count, and (if 15196 * this is the last enabling to have an unprivileged call 15197 * to getf()) we need to clear the closef() hook. 15198 */ 15199 ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0); 15200 ASSERT(dtrace_closef == dtrace_getf_barrier); 15201 ASSERT(dtrace_getf > 0); 15202 15203 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--; 15204 15205 if (--dtrace_getf == 0) 15206 dtrace_closef = NULL; 15207 } 15208 #endif 15209 15210 return (0); 15211 } 15212 15213 static int 15214 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 15215 dtrace_optval_t val) 15216 { 15217 ASSERT(MUTEX_HELD(&dtrace_lock)); 15218 15219 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 15220 return (EBUSY); 15221 15222 if (option >= DTRACEOPT_MAX) 15223 return (EINVAL); 15224 15225 if (option != DTRACEOPT_CPU && val < 0) 15226 return (EINVAL); 15227 15228 switch (option) { 15229 case DTRACEOPT_DESTRUCTIVE: 15230 if (dtrace_destructive_disallow) 15231 return (EACCES); 15232 15233 state->dts_cred.dcr_destructive = 1; 15234 break; 15235 15236 case DTRACEOPT_BUFSIZE: 15237 case DTRACEOPT_DYNVARSIZE: 15238 case DTRACEOPT_AGGSIZE: 15239 case DTRACEOPT_SPECSIZE: 15240 case DTRACEOPT_STRSIZE: 15241 if (val < 0) 15242 return (EINVAL); 15243 15244 if (val >= LONG_MAX) { 15245 /* 15246 * If this is an otherwise negative value, set it to 15247 * the highest multiple of 128m less than LONG_MAX. 15248 * Technically, we're adjusting the size without 15249 * regard to the buffer resizing policy, but in fact, 15250 * this has no effect -- if we set the buffer size to 15251 * ~LONG_MAX and the buffer policy is ultimately set to 15252 * be "manual", the buffer allocation is guaranteed to 15253 * fail, if only because the allocation requires two 15254 * buffers. (We set the the size to the highest 15255 * multiple of 128m because it ensures that the size 15256 * will remain a multiple of a megabyte when 15257 * repeatedly halved -- all the way down to 15m.) 15258 */ 15259 val = LONG_MAX - (1 << 27) + 1; 15260 } 15261 } 15262 15263 state->dts_options[option] = val; 15264 15265 return (0); 15266 } 15267 15268 static void 15269 dtrace_state_destroy(dtrace_state_t *state) 15270 { 15271 dtrace_ecb_t *ecb; 15272 dtrace_vstate_t *vstate = &state->dts_vstate; 15273 #ifdef illumos 15274 minor_t minor = getminor(state->dts_dev); 15275 #endif 15276 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 15277 dtrace_speculation_t *spec = state->dts_speculations; 15278 int nspec = state->dts_nspeculations; 15279 uint32_t match; 15280 15281 ASSERT(MUTEX_HELD(&dtrace_lock)); 15282 ASSERT(MUTEX_HELD(&cpu_lock)); 15283 15284 /* 15285 * First, retract any retained enablings for this state. 15286 */ 15287 dtrace_enabling_retract(state); 15288 ASSERT(state->dts_nretained == 0); 15289 15290 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 15291 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 15292 /* 15293 * We have managed to come into dtrace_state_destroy() on a 15294 * hot enabling -- almost certainly because of a disorderly 15295 * shutdown of a consumer. (That is, a consumer that is 15296 * exiting without having called dtrace_stop().) In this case, 15297 * we're going to set our activity to be KILLED, and then 15298 * issue a sync to be sure that everyone is out of probe 15299 * context before we start blowing away ECBs. 15300 */ 15301 state->dts_activity = DTRACE_ACTIVITY_KILLED; 15302 dtrace_sync(); 15303 } 15304 15305 /* 15306 * Release the credential hold we took in dtrace_state_create(). 15307 */ 15308 if (state->dts_cred.dcr_cred != NULL) 15309 crfree(state->dts_cred.dcr_cred); 15310 15311 /* 15312 * Now we can safely disable and destroy any enabled probes. Because 15313 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 15314 * (especially if they're all enabled), we take two passes through the 15315 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 15316 * in the second we disable whatever is left over. 15317 */ 15318 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 15319 for (i = 0; i < state->dts_necbs; i++) { 15320 if ((ecb = state->dts_ecbs[i]) == NULL) 15321 continue; 15322 15323 if (match && ecb->dte_probe != NULL) { 15324 dtrace_probe_t *probe = ecb->dte_probe; 15325 dtrace_provider_t *prov = probe->dtpr_provider; 15326 15327 if (!(prov->dtpv_priv.dtpp_flags & match)) 15328 continue; 15329 } 15330 15331 dtrace_ecb_disable(ecb); 15332 dtrace_ecb_destroy(ecb); 15333 } 15334 15335 if (!match) 15336 break; 15337 } 15338 15339 /* 15340 * Before we free the buffers, perform one more sync to assure that 15341 * every CPU is out of probe context. 15342 */ 15343 dtrace_sync(); 15344 15345 dtrace_buffer_free(state->dts_buffer); 15346 dtrace_buffer_free(state->dts_aggbuffer); 15347 15348 for (i = 0; i < nspec; i++) 15349 dtrace_buffer_free(spec[i].dtsp_buffer); 15350 15351 #ifdef illumos 15352 if (state->dts_cleaner != CYCLIC_NONE) 15353 cyclic_remove(state->dts_cleaner); 15354 15355 if (state->dts_deadman != CYCLIC_NONE) 15356 cyclic_remove(state->dts_deadman); 15357 #else 15358 callout_stop(&state->dts_cleaner); 15359 callout_drain(&state->dts_cleaner); 15360 callout_stop(&state->dts_deadman); 15361 callout_drain(&state->dts_deadman); 15362 #endif 15363 15364 dtrace_dstate_fini(&vstate->dtvs_dynvars); 15365 dtrace_vstate_fini(vstate); 15366 if (state->dts_ecbs != NULL) 15367 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 15368 15369 if (state->dts_aggregations != NULL) { 15370 #ifdef DEBUG 15371 for (i = 0; i < state->dts_naggregations; i++) 15372 ASSERT(state->dts_aggregations[i] == NULL); 15373 #endif 15374 ASSERT(state->dts_naggregations > 0); 15375 kmem_free(state->dts_aggregations, 15376 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 15377 } 15378 15379 kmem_free(state->dts_buffer, bufsize); 15380 kmem_free(state->dts_aggbuffer, bufsize); 15381 15382 for (i = 0; i < nspec; i++) 15383 kmem_free(spec[i].dtsp_buffer, bufsize); 15384 15385 if (spec != NULL) 15386 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 15387 15388 dtrace_format_destroy(state); 15389 15390 if (state->dts_aggid_arena != NULL) { 15391 #ifdef illumos 15392 vmem_destroy(state->dts_aggid_arena); 15393 #else 15394 delete_unrhdr(state->dts_aggid_arena); 15395 #endif 15396 state->dts_aggid_arena = NULL; 15397 } 15398 #ifdef illumos 15399 ddi_soft_state_free(dtrace_softstate, minor); 15400 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 15401 #endif 15402 } 15403 15404 /* 15405 * DTrace Anonymous Enabling Functions 15406 */ 15407 static dtrace_state_t * 15408 dtrace_anon_grab(void) 15409 { 15410 dtrace_state_t *state; 15411 15412 ASSERT(MUTEX_HELD(&dtrace_lock)); 15413 15414 if ((state = dtrace_anon.dta_state) == NULL) { 15415 ASSERT(dtrace_anon.dta_enabling == NULL); 15416 return (NULL); 15417 } 15418 15419 ASSERT(dtrace_anon.dta_enabling != NULL); 15420 ASSERT(dtrace_retained != NULL); 15421 15422 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 15423 dtrace_anon.dta_enabling = NULL; 15424 dtrace_anon.dta_state = NULL; 15425 15426 return (state); 15427 } 15428 15429 static void 15430 dtrace_anon_property(void) 15431 { 15432 int i, rv; 15433 dtrace_state_t *state; 15434 dof_hdr_t *dof; 15435 char c[32]; /* enough for "dof-data-" + digits */ 15436 15437 ASSERT(MUTEX_HELD(&dtrace_lock)); 15438 ASSERT(MUTEX_HELD(&cpu_lock)); 15439 15440 for (i = 0; ; i++) { 15441 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 15442 15443 dtrace_err_verbose = 1; 15444 15445 if ((dof = dtrace_dof_property(c)) == NULL) { 15446 dtrace_err_verbose = 0; 15447 break; 15448 } 15449 15450 #ifdef illumos 15451 /* 15452 * We want to create anonymous state, so we need to transition 15453 * the kernel debugger to indicate that DTrace is active. If 15454 * this fails (e.g. because the debugger has modified text in 15455 * some way), we won't continue with the processing. 15456 */ 15457 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15458 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 15459 "enabling ignored."); 15460 dtrace_dof_destroy(dof); 15461 break; 15462 } 15463 #endif 15464 15465 /* 15466 * If we haven't allocated an anonymous state, we'll do so now. 15467 */ 15468 if ((state = dtrace_anon.dta_state) == NULL) { 15469 state = dtrace_state_create(NULL, NULL); 15470 dtrace_anon.dta_state = state; 15471 15472 if (state == NULL) { 15473 /* 15474 * This basically shouldn't happen: the only 15475 * failure mode from dtrace_state_create() is a 15476 * failure of ddi_soft_state_zalloc() that 15477 * itself should never happen. Still, the 15478 * interface allows for a failure mode, and 15479 * we want to fail as gracefully as possible: 15480 * we'll emit an error message and cease 15481 * processing anonymous state in this case. 15482 */ 15483 cmn_err(CE_WARN, "failed to create " 15484 "anonymous state"); 15485 dtrace_dof_destroy(dof); 15486 break; 15487 } 15488 } 15489 15490 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 15491 &dtrace_anon.dta_enabling, 0, B_TRUE); 15492 15493 if (rv == 0) 15494 rv = dtrace_dof_options(dof, state); 15495 15496 dtrace_err_verbose = 0; 15497 dtrace_dof_destroy(dof); 15498 15499 if (rv != 0) { 15500 /* 15501 * This is malformed DOF; chuck any anonymous state 15502 * that we created. 15503 */ 15504 ASSERT(dtrace_anon.dta_enabling == NULL); 15505 dtrace_state_destroy(state); 15506 dtrace_anon.dta_state = NULL; 15507 break; 15508 } 15509 15510 ASSERT(dtrace_anon.dta_enabling != NULL); 15511 } 15512 15513 if (dtrace_anon.dta_enabling != NULL) { 15514 int rval; 15515 15516 /* 15517 * dtrace_enabling_retain() can only fail because we are 15518 * trying to retain more enablings than are allowed -- but 15519 * we only have one anonymous enabling, and we are guaranteed 15520 * to be allowed at least one retained enabling; we assert 15521 * that dtrace_enabling_retain() returns success. 15522 */ 15523 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 15524 ASSERT(rval == 0); 15525 15526 dtrace_enabling_dump(dtrace_anon.dta_enabling); 15527 } 15528 } 15529 15530 /* 15531 * DTrace Helper Functions 15532 */ 15533 static void 15534 dtrace_helper_trace(dtrace_helper_action_t *helper, 15535 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 15536 { 15537 uint32_t size, next, nnext, i; 15538 dtrace_helptrace_t *ent, *buffer; 15539 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 15540 15541 if ((buffer = dtrace_helptrace_buffer) == NULL) 15542 return; 15543 15544 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 15545 15546 /* 15547 * What would a tracing framework be without its own tracing 15548 * framework? (Well, a hell of a lot simpler, for starters...) 15549 */ 15550 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 15551 sizeof (uint64_t) - sizeof (uint64_t); 15552 15553 /* 15554 * Iterate until we can allocate a slot in the trace buffer. 15555 */ 15556 do { 15557 next = dtrace_helptrace_next; 15558 15559 if (next + size < dtrace_helptrace_bufsize) { 15560 nnext = next + size; 15561 } else { 15562 nnext = size; 15563 } 15564 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 15565 15566 /* 15567 * We have our slot; fill it in. 15568 */ 15569 if (nnext == size) { 15570 dtrace_helptrace_wrapped++; 15571 next = 0; 15572 } 15573 15574 ent = (dtrace_helptrace_t *)((uintptr_t)buffer + next); 15575 ent->dtht_helper = helper; 15576 ent->dtht_where = where; 15577 ent->dtht_nlocals = vstate->dtvs_nlocals; 15578 15579 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 15580 mstate->dtms_fltoffs : -1; 15581 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 15582 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 15583 15584 for (i = 0; i < vstate->dtvs_nlocals; i++) { 15585 dtrace_statvar_t *svar; 15586 15587 if ((svar = vstate->dtvs_locals[i]) == NULL) 15588 continue; 15589 15590 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 15591 ent->dtht_locals[i] = 15592 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 15593 } 15594 } 15595 15596 static uint64_t 15597 dtrace_helper(int which, dtrace_mstate_t *mstate, 15598 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 15599 { 15600 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 15601 uint64_t sarg0 = mstate->dtms_arg[0]; 15602 uint64_t sarg1 = mstate->dtms_arg[1]; 15603 uint64_t rval = 0; 15604 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 15605 dtrace_helper_action_t *helper; 15606 dtrace_vstate_t *vstate; 15607 dtrace_difo_t *pred; 15608 int i, trace = dtrace_helptrace_buffer != NULL; 15609 15610 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 15611 15612 if (helpers == NULL) 15613 return (0); 15614 15615 if ((helper = helpers->dthps_actions[which]) == NULL) 15616 return (0); 15617 15618 vstate = &helpers->dthps_vstate; 15619 mstate->dtms_arg[0] = arg0; 15620 mstate->dtms_arg[1] = arg1; 15621 15622 /* 15623 * Now iterate over each helper. If its predicate evaluates to 'true', 15624 * we'll call the corresponding actions. Note that the below calls 15625 * to dtrace_dif_emulate() may set faults in machine state. This is 15626 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 15627 * the stored DIF offset with its own (which is the desired behavior). 15628 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 15629 * from machine state; this is okay, too. 15630 */ 15631 for (; helper != NULL; helper = helper->dtha_next) { 15632 if ((pred = helper->dtha_predicate) != NULL) { 15633 if (trace) 15634 dtrace_helper_trace(helper, mstate, vstate, 0); 15635 15636 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 15637 goto next; 15638 15639 if (*flags & CPU_DTRACE_FAULT) 15640 goto err; 15641 } 15642 15643 for (i = 0; i < helper->dtha_nactions; i++) { 15644 if (trace) 15645 dtrace_helper_trace(helper, 15646 mstate, vstate, i + 1); 15647 15648 rval = dtrace_dif_emulate(helper->dtha_actions[i], 15649 mstate, vstate, state); 15650 15651 if (*flags & CPU_DTRACE_FAULT) 15652 goto err; 15653 } 15654 15655 next: 15656 if (trace) 15657 dtrace_helper_trace(helper, mstate, vstate, 15658 DTRACE_HELPTRACE_NEXT); 15659 } 15660 15661 if (trace) 15662 dtrace_helper_trace(helper, mstate, vstate, 15663 DTRACE_HELPTRACE_DONE); 15664 15665 /* 15666 * Restore the arg0 that we saved upon entry. 15667 */ 15668 mstate->dtms_arg[0] = sarg0; 15669 mstate->dtms_arg[1] = sarg1; 15670 15671 return (rval); 15672 15673 err: 15674 if (trace) 15675 dtrace_helper_trace(helper, mstate, vstate, 15676 DTRACE_HELPTRACE_ERR); 15677 15678 /* 15679 * Restore the arg0 that we saved upon entry. 15680 */ 15681 mstate->dtms_arg[0] = sarg0; 15682 mstate->dtms_arg[1] = sarg1; 15683 15684 return (0); 15685 } 15686 15687 static void 15688 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 15689 dtrace_vstate_t *vstate) 15690 { 15691 int i; 15692 15693 if (helper->dtha_predicate != NULL) 15694 dtrace_difo_release(helper->dtha_predicate, vstate); 15695 15696 for (i = 0; i < helper->dtha_nactions; i++) { 15697 ASSERT(helper->dtha_actions[i] != NULL); 15698 dtrace_difo_release(helper->dtha_actions[i], vstate); 15699 } 15700 15701 kmem_free(helper->dtha_actions, 15702 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 15703 kmem_free(helper, sizeof (dtrace_helper_action_t)); 15704 } 15705 15706 static int 15707 dtrace_helper_destroygen(dtrace_helpers_t *help, int gen) 15708 { 15709 proc_t *p = curproc; 15710 dtrace_vstate_t *vstate; 15711 int i; 15712 15713 if (help == NULL) 15714 help = p->p_dtrace_helpers; 15715 15716 ASSERT(MUTEX_HELD(&dtrace_lock)); 15717 15718 if (help == NULL || gen > help->dthps_generation) 15719 return (EINVAL); 15720 15721 vstate = &help->dthps_vstate; 15722 15723 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 15724 dtrace_helper_action_t *last = NULL, *h, *next; 15725 15726 for (h = help->dthps_actions[i]; h != NULL; h = next) { 15727 next = h->dtha_next; 15728 15729 if (h->dtha_generation == gen) { 15730 if (last != NULL) { 15731 last->dtha_next = next; 15732 } else { 15733 help->dthps_actions[i] = next; 15734 } 15735 15736 dtrace_helper_action_destroy(h, vstate); 15737 } else { 15738 last = h; 15739 } 15740 } 15741 } 15742 15743 /* 15744 * Interate until we've cleared out all helper providers with the 15745 * given generation number. 15746 */ 15747 for (;;) { 15748 dtrace_helper_provider_t *prov; 15749 15750 /* 15751 * Look for a helper provider with the right generation. We 15752 * have to start back at the beginning of the list each time 15753 * because we drop dtrace_lock. It's unlikely that we'll make 15754 * more than two passes. 15755 */ 15756 for (i = 0; i < help->dthps_nprovs; i++) { 15757 prov = help->dthps_provs[i]; 15758 15759 if (prov->dthp_generation == gen) 15760 break; 15761 } 15762 15763 /* 15764 * If there were no matches, we're done. 15765 */ 15766 if (i == help->dthps_nprovs) 15767 break; 15768 15769 /* 15770 * Move the last helper provider into this slot. 15771 */ 15772 help->dthps_nprovs--; 15773 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 15774 help->dthps_provs[help->dthps_nprovs] = NULL; 15775 15776 mutex_exit(&dtrace_lock); 15777 15778 /* 15779 * If we have a meta provider, remove this helper provider. 15780 */ 15781 mutex_enter(&dtrace_meta_lock); 15782 if (dtrace_meta_pid != NULL) { 15783 ASSERT(dtrace_deferred_pid == NULL); 15784 dtrace_helper_provider_remove(&prov->dthp_prov, 15785 p->p_pid); 15786 } 15787 mutex_exit(&dtrace_meta_lock); 15788 15789 dtrace_helper_provider_destroy(prov); 15790 15791 mutex_enter(&dtrace_lock); 15792 } 15793 15794 return (0); 15795 } 15796 15797 static int 15798 dtrace_helper_validate(dtrace_helper_action_t *helper) 15799 { 15800 int err = 0, i; 15801 dtrace_difo_t *dp; 15802 15803 if ((dp = helper->dtha_predicate) != NULL) 15804 err += dtrace_difo_validate_helper(dp); 15805 15806 for (i = 0; i < helper->dtha_nactions; i++) 15807 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 15808 15809 return (err == 0); 15810 } 15811 15812 static int 15813 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep, 15814 dtrace_helpers_t *help) 15815 { 15816 dtrace_helper_action_t *helper, *last; 15817 dtrace_actdesc_t *act; 15818 dtrace_vstate_t *vstate; 15819 dtrace_predicate_t *pred; 15820 int count = 0, nactions = 0, i; 15821 15822 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 15823 return (EINVAL); 15824 15825 last = help->dthps_actions[which]; 15826 vstate = &help->dthps_vstate; 15827 15828 for (count = 0; last != NULL; last = last->dtha_next) { 15829 count++; 15830 if (last->dtha_next == NULL) 15831 break; 15832 } 15833 15834 /* 15835 * If we already have dtrace_helper_actions_max helper actions for this 15836 * helper action type, we'll refuse to add a new one. 15837 */ 15838 if (count >= dtrace_helper_actions_max) 15839 return (ENOSPC); 15840 15841 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 15842 helper->dtha_generation = help->dthps_generation; 15843 15844 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 15845 ASSERT(pred->dtp_difo != NULL); 15846 dtrace_difo_hold(pred->dtp_difo); 15847 helper->dtha_predicate = pred->dtp_difo; 15848 } 15849 15850 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 15851 if (act->dtad_kind != DTRACEACT_DIFEXPR) 15852 goto err; 15853 15854 if (act->dtad_difo == NULL) 15855 goto err; 15856 15857 nactions++; 15858 } 15859 15860 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 15861 (helper->dtha_nactions = nactions), KM_SLEEP); 15862 15863 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 15864 dtrace_difo_hold(act->dtad_difo); 15865 helper->dtha_actions[i++] = act->dtad_difo; 15866 } 15867 15868 if (!dtrace_helper_validate(helper)) 15869 goto err; 15870 15871 if (last == NULL) { 15872 help->dthps_actions[which] = helper; 15873 } else { 15874 last->dtha_next = helper; 15875 } 15876 15877 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 15878 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 15879 dtrace_helptrace_next = 0; 15880 } 15881 15882 return (0); 15883 err: 15884 dtrace_helper_action_destroy(helper, vstate); 15885 return (EINVAL); 15886 } 15887 15888 static void 15889 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 15890 dof_helper_t *dofhp) 15891 { 15892 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 15893 15894 mutex_enter(&dtrace_meta_lock); 15895 mutex_enter(&dtrace_lock); 15896 15897 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 15898 /* 15899 * If the dtrace module is loaded but not attached, or if 15900 * there aren't isn't a meta provider registered to deal with 15901 * these provider descriptions, we need to postpone creating 15902 * the actual providers until later. 15903 */ 15904 15905 if (help->dthps_next == NULL && help->dthps_prev == NULL && 15906 dtrace_deferred_pid != help) { 15907 help->dthps_deferred = 1; 15908 help->dthps_pid = p->p_pid; 15909 help->dthps_next = dtrace_deferred_pid; 15910 help->dthps_prev = NULL; 15911 if (dtrace_deferred_pid != NULL) 15912 dtrace_deferred_pid->dthps_prev = help; 15913 dtrace_deferred_pid = help; 15914 } 15915 15916 mutex_exit(&dtrace_lock); 15917 15918 } else if (dofhp != NULL) { 15919 /* 15920 * If the dtrace module is loaded and we have a particular 15921 * helper provider description, pass that off to the 15922 * meta provider. 15923 */ 15924 15925 mutex_exit(&dtrace_lock); 15926 15927 dtrace_helper_provide(dofhp, p->p_pid); 15928 15929 } else { 15930 /* 15931 * Otherwise, just pass all the helper provider descriptions 15932 * off to the meta provider. 15933 */ 15934 15935 int i; 15936 mutex_exit(&dtrace_lock); 15937 15938 for (i = 0; i < help->dthps_nprovs; i++) { 15939 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 15940 p->p_pid); 15941 } 15942 } 15943 15944 mutex_exit(&dtrace_meta_lock); 15945 } 15946 15947 static int 15948 dtrace_helper_provider_add(dof_helper_t *dofhp, dtrace_helpers_t *help, int gen) 15949 { 15950 dtrace_helper_provider_t *hprov, **tmp_provs; 15951 uint_t tmp_maxprovs, i; 15952 15953 ASSERT(MUTEX_HELD(&dtrace_lock)); 15954 ASSERT(help != NULL); 15955 15956 /* 15957 * If we already have dtrace_helper_providers_max helper providers, 15958 * we're refuse to add a new one. 15959 */ 15960 if (help->dthps_nprovs >= dtrace_helper_providers_max) 15961 return (ENOSPC); 15962 15963 /* 15964 * Check to make sure this isn't a duplicate. 15965 */ 15966 for (i = 0; i < help->dthps_nprovs; i++) { 15967 if (dofhp->dofhp_addr == 15968 help->dthps_provs[i]->dthp_prov.dofhp_addr) 15969 return (EALREADY); 15970 } 15971 15972 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 15973 hprov->dthp_prov = *dofhp; 15974 hprov->dthp_ref = 1; 15975 hprov->dthp_generation = gen; 15976 15977 /* 15978 * Allocate a bigger table for helper providers if it's already full. 15979 */ 15980 if (help->dthps_maxprovs == help->dthps_nprovs) { 15981 tmp_maxprovs = help->dthps_maxprovs; 15982 tmp_provs = help->dthps_provs; 15983 15984 if (help->dthps_maxprovs == 0) 15985 help->dthps_maxprovs = 2; 15986 else 15987 help->dthps_maxprovs *= 2; 15988 if (help->dthps_maxprovs > dtrace_helper_providers_max) 15989 help->dthps_maxprovs = dtrace_helper_providers_max; 15990 15991 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 15992 15993 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 15994 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 15995 15996 if (tmp_provs != NULL) { 15997 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 15998 sizeof (dtrace_helper_provider_t *)); 15999 kmem_free(tmp_provs, tmp_maxprovs * 16000 sizeof (dtrace_helper_provider_t *)); 16001 } 16002 } 16003 16004 help->dthps_provs[help->dthps_nprovs] = hprov; 16005 help->dthps_nprovs++; 16006 16007 return (0); 16008 } 16009 16010 static void 16011 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 16012 { 16013 mutex_enter(&dtrace_lock); 16014 16015 if (--hprov->dthp_ref == 0) { 16016 dof_hdr_t *dof; 16017 mutex_exit(&dtrace_lock); 16018 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 16019 dtrace_dof_destroy(dof); 16020 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 16021 } else { 16022 mutex_exit(&dtrace_lock); 16023 } 16024 } 16025 16026 static int 16027 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 16028 { 16029 uintptr_t daddr = (uintptr_t)dof; 16030 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 16031 dof_provider_t *provider; 16032 dof_probe_t *probe; 16033 uint8_t *arg; 16034 char *strtab, *typestr; 16035 dof_stridx_t typeidx; 16036 size_t typesz; 16037 uint_t nprobes, j, k; 16038 16039 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 16040 16041 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 16042 dtrace_dof_error(dof, "misaligned section offset"); 16043 return (-1); 16044 } 16045 16046 /* 16047 * The section needs to be large enough to contain the DOF provider 16048 * structure appropriate for the given version. 16049 */ 16050 if (sec->dofs_size < 16051 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 16052 offsetof(dof_provider_t, dofpv_prenoffs) : 16053 sizeof (dof_provider_t))) { 16054 dtrace_dof_error(dof, "provider section too small"); 16055 return (-1); 16056 } 16057 16058 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 16059 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 16060 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 16061 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 16062 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 16063 16064 if (str_sec == NULL || prb_sec == NULL || 16065 arg_sec == NULL || off_sec == NULL) 16066 return (-1); 16067 16068 enoff_sec = NULL; 16069 16070 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 16071 provider->dofpv_prenoffs != DOF_SECT_NONE && 16072 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 16073 provider->dofpv_prenoffs)) == NULL) 16074 return (-1); 16075 16076 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 16077 16078 if (provider->dofpv_name >= str_sec->dofs_size || 16079 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 16080 dtrace_dof_error(dof, "invalid provider name"); 16081 return (-1); 16082 } 16083 16084 if (prb_sec->dofs_entsize == 0 || 16085 prb_sec->dofs_entsize > prb_sec->dofs_size) { 16086 dtrace_dof_error(dof, "invalid entry size"); 16087 return (-1); 16088 } 16089 16090 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 16091 dtrace_dof_error(dof, "misaligned entry size"); 16092 return (-1); 16093 } 16094 16095 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 16096 dtrace_dof_error(dof, "invalid entry size"); 16097 return (-1); 16098 } 16099 16100 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 16101 dtrace_dof_error(dof, "misaligned section offset"); 16102 return (-1); 16103 } 16104 16105 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 16106 dtrace_dof_error(dof, "invalid entry size"); 16107 return (-1); 16108 } 16109 16110 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 16111 16112 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 16113 16114 /* 16115 * Take a pass through the probes to check for errors. 16116 */ 16117 for (j = 0; j < nprobes; j++) { 16118 probe = (dof_probe_t *)(uintptr_t)(daddr + 16119 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 16120 16121 if (probe->dofpr_func >= str_sec->dofs_size) { 16122 dtrace_dof_error(dof, "invalid function name"); 16123 return (-1); 16124 } 16125 16126 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 16127 dtrace_dof_error(dof, "function name too long"); 16128 /* 16129 * Keep going if the function name is too long. 16130 * Unlike provider and probe names, we cannot reasonably 16131 * impose restrictions on function names, since they're 16132 * a property of the code being instrumented. We will 16133 * skip this probe in dtrace_helper_provide_one(). 16134 */ 16135 } 16136 16137 if (probe->dofpr_name >= str_sec->dofs_size || 16138 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 16139 dtrace_dof_error(dof, "invalid probe name"); 16140 return (-1); 16141 } 16142 16143 /* 16144 * The offset count must not wrap the index, and the offsets 16145 * must also not overflow the section's data. 16146 */ 16147 if (probe->dofpr_offidx + probe->dofpr_noffs < 16148 probe->dofpr_offidx || 16149 (probe->dofpr_offidx + probe->dofpr_noffs) * 16150 off_sec->dofs_entsize > off_sec->dofs_size) { 16151 dtrace_dof_error(dof, "invalid probe offset"); 16152 return (-1); 16153 } 16154 16155 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 16156 /* 16157 * If there's no is-enabled offset section, make sure 16158 * there aren't any is-enabled offsets. Otherwise 16159 * perform the same checks as for probe offsets 16160 * (immediately above). 16161 */ 16162 if (enoff_sec == NULL) { 16163 if (probe->dofpr_enoffidx != 0 || 16164 probe->dofpr_nenoffs != 0) { 16165 dtrace_dof_error(dof, "is-enabled " 16166 "offsets with null section"); 16167 return (-1); 16168 } 16169 } else if (probe->dofpr_enoffidx + 16170 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 16171 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 16172 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 16173 dtrace_dof_error(dof, "invalid is-enabled " 16174 "offset"); 16175 return (-1); 16176 } 16177 16178 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 16179 dtrace_dof_error(dof, "zero probe and " 16180 "is-enabled offsets"); 16181 return (-1); 16182 } 16183 } else if (probe->dofpr_noffs == 0) { 16184 dtrace_dof_error(dof, "zero probe offsets"); 16185 return (-1); 16186 } 16187 16188 if (probe->dofpr_argidx + probe->dofpr_xargc < 16189 probe->dofpr_argidx || 16190 (probe->dofpr_argidx + probe->dofpr_xargc) * 16191 arg_sec->dofs_entsize > arg_sec->dofs_size) { 16192 dtrace_dof_error(dof, "invalid args"); 16193 return (-1); 16194 } 16195 16196 typeidx = probe->dofpr_nargv; 16197 typestr = strtab + probe->dofpr_nargv; 16198 for (k = 0; k < probe->dofpr_nargc; k++) { 16199 if (typeidx >= str_sec->dofs_size) { 16200 dtrace_dof_error(dof, "bad " 16201 "native argument type"); 16202 return (-1); 16203 } 16204 16205 typesz = strlen(typestr) + 1; 16206 if (typesz > DTRACE_ARGTYPELEN) { 16207 dtrace_dof_error(dof, "native " 16208 "argument type too long"); 16209 return (-1); 16210 } 16211 typeidx += typesz; 16212 typestr += typesz; 16213 } 16214 16215 typeidx = probe->dofpr_xargv; 16216 typestr = strtab + probe->dofpr_xargv; 16217 for (k = 0; k < probe->dofpr_xargc; k++) { 16218 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 16219 dtrace_dof_error(dof, "bad " 16220 "native argument index"); 16221 return (-1); 16222 } 16223 16224 if (typeidx >= str_sec->dofs_size) { 16225 dtrace_dof_error(dof, "bad " 16226 "translated argument type"); 16227 return (-1); 16228 } 16229 16230 typesz = strlen(typestr) + 1; 16231 if (typesz > DTRACE_ARGTYPELEN) { 16232 dtrace_dof_error(dof, "translated argument " 16233 "type too long"); 16234 return (-1); 16235 } 16236 16237 typeidx += typesz; 16238 typestr += typesz; 16239 } 16240 } 16241 16242 return (0); 16243 } 16244 16245 static int 16246 #ifdef __FreeBSD__ 16247 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp, struct proc *p) 16248 #else 16249 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 16250 #endif 16251 { 16252 dtrace_helpers_t *help; 16253 dtrace_vstate_t *vstate; 16254 dtrace_enabling_t *enab = NULL; 16255 #ifndef __FreeBSD__ 16256 proc_t *p = curproc; 16257 #endif 16258 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 16259 uintptr_t daddr = (uintptr_t)dof; 16260 16261 ASSERT(MUTEX_HELD(&dtrace_lock)); 16262 16263 if ((help = p->p_dtrace_helpers) == NULL) 16264 help = dtrace_helpers_create(p); 16265 16266 vstate = &help->dthps_vstate; 16267 16268 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 16269 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 16270 dtrace_dof_destroy(dof); 16271 return (rv); 16272 } 16273 16274 /* 16275 * Look for helper providers and validate their descriptions. 16276 */ 16277 if (dhp != NULL) { 16278 for (i = 0; i < dof->dofh_secnum; i++) { 16279 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 16280 dof->dofh_secoff + i * dof->dofh_secsize); 16281 16282 if (sec->dofs_type != DOF_SECT_PROVIDER) 16283 continue; 16284 16285 if (dtrace_helper_provider_validate(dof, sec) != 0) { 16286 dtrace_enabling_destroy(enab); 16287 dtrace_dof_destroy(dof); 16288 return (-1); 16289 } 16290 16291 nprovs++; 16292 } 16293 } 16294 16295 /* 16296 * Now we need to walk through the ECB descriptions in the enabling. 16297 */ 16298 for (i = 0; i < enab->dten_ndesc; i++) { 16299 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 16300 dtrace_probedesc_t *desc = &ep->dted_probe; 16301 16302 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 16303 continue; 16304 16305 if (strcmp(desc->dtpd_mod, "helper") != 0) 16306 continue; 16307 16308 if (strcmp(desc->dtpd_func, "ustack") != 0) 16309 continue; 16310 16311 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 16312 ep, help)) != 0) { 16313 /* 16314 * Adding this helper action failed -- we are now going 16315 * to rip out the entire generation and return failure. 16316 */ 16317 (void) dtrace_helper_destroygen(help, 16318 help->dthps_generation); 16319 dtrace_enabling_destroy(enab); 16320 dtrace_dof_destroy(dof); 16321 return (-1); 16322 } 16323 16324 nhelpers++; 16325 } 16326 16327 if (nhelpers < enab->dten_ndesc) 16328 dtrace_dof_error(dof, "unmatched helpers"); 16329 16330 gen = help->dthps_generation++; 16331 dtrace_enabling_destroy(enab); 16332 16333 if (dhp != NULL && nprovs > 0) { 16334 /* 16335 * Now that this is in-kernel, we change the sense of the 16336 * members: dofhp_dof denotes the in-kernel copy of the DOF 16337 * and dofhp_addr denotes the address at user-level. 16338 */ 16339 dhp->dofhp_addr = dhp->dofhp_dof; 16340 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 16341 16342 if (dtrace_helper_provider_add(dhp, help, gen) == 0) { 16343 mutex_exit(&dtrace_lock); 16344 dtrace_helper_provider_register(p, help, dhp); 16345 mutex_enter(&dtrace_lock); 16346 16347 destroy = 0; 16348 } 16349 } 16350 16351 if (destroy) 16352 dtrace_dof_destroy(dof); 16353 16354 return (gen); 16355 } 16356 16357 static dtrace_helpers_t * 16358 dtrace_helpers_create(proc_t *p) 16359 { 16360 dtrace_helpers_t *help; 16361 16362 ASSERT(MUTEX_HELD(&dtrace_lock)); 16363 ASSERT(p->p_dtrace_helpers == NULL); 16364 16365 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 16366 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 16367 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 16368 16369 p->p_dtrace_helpers = help; 16370 dtrace_helpers++; 16371 16372 return (help); 16373 } 16374 16375 #ifdef illumos 16376 static 16377 #endif 16378 void 16379 dtrace_helpers_destroy(proc_t *p) 16380 { 16381 dtrace_helpers_t *help; 16382 dtrace_vstate_t *vstate; 16383 #ifdef illumos 16384 proc_t *p = curproc; 16385 #endif 16386 int i; 16387 16388 mutex_enter(&dtrace_lock); 16389 16390 ASSERT(p->p_dtrace_helpers != NULL); 16391 ASSERT(dtrace_helpers > 0); 16392 16393 help = p->p_dtrace_helpers; 16394 vstate = &help->dthps_vstate; 16395 16396 /* 16397 * We're now going to lose the help from this process. 16398 */ 16399 p->p_dtrace_helpers = NULL; 16400 dtrace_sync(); 16401 16402 /* 16403 * Destory the helper actions. 16404 */ 16405 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 16406 dtrace_helper_action_t *h, *next; 16407 16408 for (h = help->dthps_actions[i]; h != NULL; h = next) { 16409 next = h->dtha_next; 16410 dtrace_helper_action_destroy(h, vstate); 16411 h = next; 16412 } 16413 } 16414 16415 mutex_exit(&dtrace_lock); 16416 16417 /* 16418 * Destroy the helper providers. 16419 */ 16420 if (help->dthps_maxprovs > 0) { 16421 mutex_enter(&dtrace_meta_lock); 16422 if (dtrace_meta_pid != NULL) { 16423 ASSERT(dtrace_deferred_pid == NULL); 16424 16425 for (i = 0; i < help->dthps_nprovs; i++) { 16426 dtrace_helper_provider_remove( 16427 &help->dthps_provs[i]->dthp_prov, p->p_pid); 16428 } 16429 } else { 16430 mutex_enter(&dtrace_lock); 16431 ASSERT(help->dthps_deferred == 0 || 16432 help->dthps_next != NULL || 16433 help->dthps_prev != NULL || 16434 help == dtrace_deferred_pid); 16435 16436 /* 16437 * Remove the helper from the deferred list. 16438 */ 16439 if (help->dthps_next != NULL) 16440 help->dthps_next->dthps_prev = help->dthps_prev; 16441 if (help->dthps_prev != NULL) 16442 help->dthps_prev->dthps_next = help->dthps_next; 16443 if (dtrace_deferred_pid == help) { 16444 dtrace_deferred_pid = help->dthps_next; 16445 ASSERT(help->dthps_prev == NULL); 16446 } 16447 16448 mutex_exit(&dtrace_lock); 16449 } 16450 16451 mutex_exit(&dtrace_meta_lock); 16452 16453 for (i = 0; i < help->dthps_nprovs; i++) { 16454 dtrace_helper_provider_destroy(help->dthps_provs[i]); 16455 } 16456 16457 kmem_free(help->dthps_provs, help->dthps_maxprovs * 16458 sizeof (dtrace_helper_provider_t *)); 16459 } 16460 16461 mutex_enter(&dtrace_lock); 16462 16463 dtrace_vstate_fini(&help->dthps_vstate); 16464 kmem_free(help->dthps_actions, 16465 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 16466 kmem_free(help, sizeof (dtrace_helpers_t)); 16467 16468 --dtrace_helpers; 16469 mutex_exit(&dtrace_lock); 16470 } 16471 16472 #ifdef illumos 16473 static 16474 #endif 16475 void 16476 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 16477 { 16478 dtrace_helpers_t *help, *newhelp; 16479 dtrace_helper_action_t *helper, *new, *last; 16480 dtrace_difo_t *dp; 16481 dtrace_vstate_t *vstate; 16482 int i, j, sz, hasprovs = 0; 16483 16484 mutex_enter(&dtrace_lock); 16485 ASSERT(from->p_dtrace_helpers != NULL); 16486 ASSERT(dtrace_helpers > 0); 16487 16488 help = from->p_dtrace_helpers; 16489 newhelp = dtrace_helpers_create(to); 16490 ASSERT(to->p_dtrace_helpers != NULL); 16491 16492 newhelp->dthps_generation = help->dthps_generation; 16493 vstate = &newhelp->dthps_vstate; 16494 16495 /* 16496 * Duplicate the helper actions. 16497 */ 16498 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 16499 if ((helper = help->dthps_actions[i]) == NULL) 16500 continue; 16501 16502 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 16503 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 16504 KM_SLEEP); 16505 new->dtha_generation = helper->dtha_generation; 16506 16507 if ((dp = helper->dtha_predicate) != NULL) { 16508 dp = dtrace_difo_duplicate(dp, vstate); 16509 new->dtha_predicate = dp; 16510 } 16511 16512 new->dtha_nactions = helper->dtha_nactions; 16513 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 16514 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 16515 16516 for (j = 0; j < new->dtha_nactions; j++) { 16517 dtrace_difo_t *dp = helper->dtha_actions[j]; 16518 16519 ASSERT(dp != NULL); 16520 dp = dtrace_difo_duplicate(dp, vstate); 16521 new->dtha_actions[j] = dp; 16522 } 16523 16524 if (last != NULL) { 16525 last->dtha_next = new; 16526 } else { 16527 newhelp->dthps_actions[i] = new; 16528 } 16529 16530 last = new; 16531 } 16532 } 16533 16534 /* 16535 * Duplicate the helper providers and register them with the 16536 * DTrace framework. 16537 */ 16538 if (help->dthps_nprovs > 0) { 16539 newhelp->dthps_nprovs = help->dthps_nprovs; 16540 newhelp->dthps_maxprovs = help->dthps_nprovs; 16541 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 16542 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 16543 for (i = 0; i < newhelp->dthps_nprovs; i++) { 16544 newhelp->dthps_provs[i] = help->dthps_provs[i]; 16545 newhelp->dthps_provs[i]->dthp_ref++; 16546 } 16547 16548 hasprovs = 1; 16549 } 16550 16551 mutex_exit(&dtrace_lock); 16552 16553 if (hasprovs) 16554 dtrace_helper_provider_register(to, newhelp, NULL); 16555 } 16556 16557 /* 16558 * DTrace Hook Functions 16559 */ 16560 static void 16561 dtrace_module_loaded(modctl_t *ctl) 16562 { 16563 dtrace_provider_t *prv; 16564 16565 mutex_enter(&dtrace_provider_lock); 16566 #ifdef illumos 16567 mutex_enter(&mod_lock); 16568 #endif 16569 16570 #ifdef illumos 16571 ASSERT(ctl->mod_busy); 16572 #endif 16573 16574 /* 16575 * We're going to call each providers per-module provide operation 16576 * specifying only this module. 16577 */ 16578 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 16579 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 16580 16581 #ifdef illumos 16582 mutex_exit(&mod_lock); 16583 #endif 16584 mutex_exit(&dtrace_provider_lock); 16585 16586 /* 16587 * If we have any retained enablings, we need to match against them. 16588 * Enabling probes requires that cpu_lock be held, and we cannot hold 16589 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 16590 * module. (In particular, this happens when loading scheduling 16591 * classes.) So if we have any retained enablings, we need to dispatch 16592 * our task queue to do the match for us. 16593 */ 16594 mutex_enter(&dtrace_lock); 16595 16596 if (dtrace_retained == NULL) { 16597 mutex_exit(&dtrace_lock); 16598 return; 16599 } 16600 16601 (void) taskq_dispatch(dtrace_taskq, 16602 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 16603 16604 mutex_exit(&dtrace_lock); 16605 16606 /* 16607 * And now, for a little heuristic sleaze: in general, we want to 16608 * match modules as soon as they load. However, we cannot guarantee 16609 * this, because it would lead us to the lock ordering violation 16610 * outlined above. The common case, of course, is that cpu_lock is 16611 * _not_ held -- so we delay here for a clock tick, hoping that that's 16612 * long enough for the task queue to do its work. If it's not, it's 16613 * not a serious problem -- it just means that the module that we 16614 * just loaded may not be immediately instrumentable. 16615 */ 16616 delay(1); 16617 } 16618 16619 static void 16620 #ifdef illumos 16621 dtrace_module_unloaded(modctl_t *ctl) 16622 #else 16623 dtrace_module_unloaded(modctl_t *ctl, int *error) 16624 #endif 16625 { 16626 dtrace_probe_t template, *probe, *first, *next; 16627 dtrace_provider_t *prov; 16628 #ifndef illumos 16629 char modname[DTRACE_MODNAMELEN]; 16630 size_t len; 16631 #endif 16632 16633 #ifdef illumos 16634 template.dtpr_mod = ctl->mod_modname; 16635 #else 16636 /* Handle the fact that ctl->filename may end in ".ko". */ 16637 strlcpy(modname, ctl->filename, sizeof(modname)); 16638 len = strlen(ctl->filename); 16639 if (len > 3 && strcmp(modname + len - 3, ".ko") == 0) 16640 modname[len - 3] = '\0'; 16641 template.dtpr_mod = modname; 16642 #endif 16643 16644 mutex_enter(&dtrace_provider_lock); 16645 #ifdef illumos 16646 mutex_enter(&mod_lock); 16647 #endif 16648 mutex_enter(&dtrace_lock); 16649 16650 #ifndef illumos 16651 if (ctl->nenabled > 0) { 16652 /* Don't allow unloads if a probe is enabled. */ 16653 mutex_exit(&dtrace_provider_lock); 16654 mutex_exit(&dtrace_lock); 16655 *error = -1; 16656 printf( 16657 "kldunload: attempt to unload module that has DTrace probes enabled\n"); 16658 return; 16659 } 16660 #endif 16661 16662 if (dtrace_bymod == NULL) { 16663 /* 16664 * The DTrace module is loaded (obviously) but not attached; 16665 * we don't have any work to do. 16666 */ 16667 mutex_exit(&dtrace_provider_lock); 16668 #ifdef illumos 16669 mutex_exit(&mod_lock); 16670 #endif 16671 mutex_exit(&dtrace_lock); 16672 return; 16673 } 16674 16675 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 16676 probe != NULL; probe = probe->dtpr_nextmod) { 16677 if (probe->dtpr_ecb != NULL) { 16678 mutex_exit(&dtrace_provider_lock); 16679 #ifdef illumos 16680 mutex_exit(&mod_lock); 16681 #endif 16682 mutex_exit(&dtrace_lock); 16683 16684 /* 16685 * This shouldn't _actually_ be possible -- we're 16686 * unloading a module that has an enabled probe in it. 16687 * (It's normally up to the provider to make sure that 16688 * this can't happen.) However, because dtps_enable() 16689 * doesn't have a failure mode, there can be an 16690 * enable/unload race. Upshot: we don't want to 16691 * assert, but we're not going to disable the 16692 * probe, either. 16693 */ 16694 if (dtrace_err_verbose) { 16695 #ifdef illumos 16696 cmn_err(CE_WARN, "unloaded module '%s' had " 16697 "enabled probes", ctl->mod_modname); 16698 #else 16699 cmn_err(CE_WARN, "unloaded module '%s' had " 16700 "enabled probes", modname); 16701 #endif 16702 } 16703 16704 return; 16705 } 16706 } 16707 16708 probe = first; 16709 16710 for (first = NULL; probe != NULL; probe = next) { 16711 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 16712 16713 dtrace_probes[probe->dtpr_id - 1] = NULL; 16714 16715 next = probe->dtpr_nextmod; 16716 dtrace_hash_remove(dtrace_bymod, probe); 16717 dtrace_hash_remove(dtrace_byfunc, probe); 16718 dtrace_hash_remove(dtrace_byname, probe); 16719 16720 if (first == NULL) { 16721 first = probe; 16722 probe->dtpr_nextmod = NULL; 16723 } else { 16724 probe->dtpr_nextmod = first; 16725 first = probe; 16726 } 16727 } 16728 16729 /* 16730 * We've removed all of the module's probes from the hash chains and 16731 * from the probe array. Now issue a dtrace_sync() to be sure that 16732 * everyone has cleared out from any probe array processing. 16733 */ 16734 dtrace_sync(); 16735 16736 for (probe = first; probe != NULL; probe = first) { 16737 first = probe->dtpr_nextmod; 16738 prov = probe->dtpr_provider; 16739 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 16740 probe->dtpr_arg); 16741 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 16742 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 16743 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 16744 #ifdef illumos 16745 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 16746 #else 16747 free_unr(dtrace_arena, probe->dtpr_id); 16748 #endif 16749 kmem_free(probe, sizeof (dtrace_probe_t)); 16750 } 16751 16752 mutex_exit(&dtrace_lock); 16753 #ifdef illumos 16754 mutex_exit(&mod_lock); 16755 #endif 16756 mutex_exit(&dtrace_provider_lock); 16757 } 16758 16759 #ifndef illumos 16760 static void 16761 dtrace_kld_load(void *arg __unused, linker_file_t lf) 16762 { 16763 16764 dtrace_module_loaded(lf); 16765 } 16766 16767 static void 16768 dtrace_kld_unload_try(void *arg __unused, linker_file_t lf, int *error) 16769 { 16770 16771 if (*error != 0) 16772 /* We already have an error, so don't do anything. */ 16773 return; 16774 dtrace_module_unloaded(lf, error); 16775 } 16776 #endif 16777 16778 #ifdef illumos 16779 static void 16780 dtrace_suspend(void) 16781 { 16782 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 16783 } 16784 16785 static void 16786 dtrace_resume(void) 16787 { 16788 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 16789 } 16790 #endif 16791 16792 static int 16793 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 16794 { 16795 ASSERT(MUTEX_HELD(&cpu_lock)); 16796 mutex_enter(&dtrace_lock); 16797 16798 switch (what) { 16799 case CPU_CONFIG: { 16800 dtrace_state_t *state; 16801 dtrace_optval_t *opt, rs, c; 16802 16803 /* 16804 * For now, we only allocate a new buffer for anonymous state. 16805 */ 16806 if ((state = dtrace_anon.dta_state) == NULL) 16807 break; 16808 16809 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 16810 break; 16811 16812 opt = state->dts_options; 16813 c = opt[DTRACEOPT_CPU]; 16814 16815 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 16816 break; 16817 16818 /* 16819 * Regardless of what the actual policy is, we're going to 16820 * temporarily set our resize policy to be manual. We're 16821 * also going to temporarily set our CPU option to denote 16822 * the newly configured CPU. 16823 */ 16824 rs = opt[DTRACEOPT_BUFRESIZE]; 16825 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 16826 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 16827 16828 (void) dtrace_state_buffers(state); 16829 16830 opt[DTRACEOPT_BUFRESIZE] = rs; 16831 opt[DTRACEOPT_CPU] = c; 16832 16833 break; 16834 } 16835 16836 case CPU_UNCONFIG: 16837 /* 16838 * We don't free the buffer in the CPU_UNCONFIG case. (The 16839 * buffer will be freed when the consumer exits.) 16840 */ 16841 break; 16842 16843 default: 16844 break; 16845 } 16846 16847 mutex_exit(&dtrace_lock); 16848 return (0); 16849 } 16850 16851 #ifdef illumos 16852 static void 16853 dtrace_cpu_setup_initial(processorid_t cpu) 16854 { 16855 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 16856 } 16857 #endif 16858 16859 static void 16860 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 16861 { 16862 if (dtrace_toxranges >= dtrace_toxranges_max) { 16863 int osize, nsize; 16864 dtrace_toxrange_t *range; 16865 16866 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 16867 16868 if (osize == 0) { 16869 ASSERT(dtrace_toxrange == NULL); 16870 ASSERT(dtrace_toxranges_max == 0); 16871 dtrace_toxranges_max = 1; 16872 } else { 16873 dtrace_toxranges_max <<= 1; 16874 } 16875 16876 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 16877 range = kmem_zalloc(nsize, KM_SLEEP); 16878 16879 if (dtrace_toxrange != NULL) { 16880 ASSERT(osize != 0); 16881 bcopy(dtrace_toxrange, range, osize); 16882 kmem_free(dtrace_toxrange, osize); 16883 } 16884 16885 dtrace_toxrange = range; 16886 } 16887 16888 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 16889 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 16890 16891 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 16892 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 16893 dtrace_toxranges++; 16894 } 16895 16896 static void 16897 dtrace_getf_barrier() 16898 { 16899 #ifdef illumos 16900 /* 16901 * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings 16902 * that contain calls to getf(), this routine will be called on every 16903 * closef() before either the underlying vnode is released or the 16904 * file_t itself is freed. By the time we are here, it is essential 16905 * that the file_t can no longer be accessed from a call to getf() 16906 * in probe context -- that assures that a dtrace_sync() can be used 16907 * to clear out any enablings referring to the old structures. 16908 */ 16909 if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 || 16910 kcred->cr_zone->zone_dtrace_getf != 0) 16911 dtrace_sync(); 16912 #endif 16913 } 16914 16915 /* 16916 * DTrace Driver Cookbook Functions 16917 */ 16918 #ifdef illumos 16919 /*ARGSUSED*/ 16920 static int 16921 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 16922 { 16923 dtrace_provider_id_t id; 16924 dtrace_state_t *state = NULL; 16925 dtrace_enabling_t *enab; 16926 16927 mutex_enter(&cpu_lock); 16928 mutex_enter(&dtrace_provider_lock); 16929 mutex_enter(&dtrace_lock); 16930 16931 if (ddi_soft_state_init(&dtrace_softstate, 16932 sizeof (dtrace_state_t), 0) != 0) { 16933 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 16934 mutex_exit(&cpu_lock); 16935 mutex_exit(&dtrace_provider_lock); 16936 mutex_exit(&dtrace_lock); 16937 return (DDI_FAILURE); 16938 } 16939 16940 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 16941 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 16942 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 16943 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 16944 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 16945 ddi_remove_minor_node(devi, NULL); 16946 ddi_soft_state_fini(&dtrace_softstate); 16947 mutex_exit(&cpu_lock); 16948 mutex_exit(&dtrace_provider_lock); 16949 mutex_exit(&dtrace_lock); 16950 return (DDI_FAILURE); 16951 } 16952 16953 ddi_report_dev(devi); 16954 dtrace_devi = devi; 16955 16956 dtrace_modload = dtrace_module_loaded; 16957 dtrace_modunload = dtrace_module_unloaded; 16958 dtrace_cpu_init = dtrace_cpu_setup_initial; 16959 dtrace_helpers_cleanup = dtrace_helpers_destroy; 16960 dtrace_helpers_fork = dtrace_helpers_duplicate; 16961 dtrace_cpustart_init = dtrace_suspend; 16962 dtrace_cpustart_fini = dtrace_resume; 16963 dtrace_debugger_init = dtrace_suspend; 16964 dtrace_debugger_fini = dtrace_resume; 16965 16966 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16967 16968 ASSERT(MUTEX_HELD(&cpu_lock)); 16969 16970 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 16971 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 16972 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 16973 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 16974 VM_SLEEP | VMC_IDENTIFIER); 16975 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 16976 1, INT_MAX, 0); 16977 16978 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 16979 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 16980 NULL, NULL, NULL, NULL, NULL, 0); 16981 16982 ASSERT(MUTEX_HELD(&cpu_lock)); 16983 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 16984 offsetof(dtrace_probe_t, dtpr_nextmod), 16985 offsetof(dtrace_probe_t, dtpr_prevmod)); 16986 16987 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 16988 offsetof(dtrace_probe_t, dtpr_nextfunc), 16989 offsetof(dtrace_probe_t, dtpr_prevfunc)); 16990 16991 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 16992 offsetof(dtrace_probe_t, dtpr_nextname), 16993 offsetof(dtrace_probe_t, dtpr_prevname)); 16994 16995 if (dtrace_retain_max < 1) { 16996 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 16997 "setting to 1", dtrace_retain_max); 16998 dtrace_retain_max = 1; 16999 } 17000 17001 /* 17002 * Now discover our toxic ranges. 17003 */ 17004 dtrace_toxic_ranges(dtrace_toxrange_add); 17005 17006 /* 17007 * Before we register ourselves as a provider to our own framework, 17008 * we would like to assert that dtrace_provider is NULL -- but that's 17009 * not true if we were loaded as a dependency of a DTrace provider. 17010 * Once we've registered, we can assert that dtrace_provider is our 17011 * pseudo provider. 17012 */ 17013 (void) dtrace_register("dtrace", &dtrace_provider_attr, 17014 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 17015 17016 ASSERT(dtrace_provider != NULL); 17017 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 17018 17019 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 17020 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 17021 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 17022 dtrace_provider, NULL, NULL, "END", 0, NULL); 17023 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 17024 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 17025 17026 dtrace_anon_property(); 17027 mutex_exit(&cpu_lock); 17028 17029 /* 17030 * If there are already providers, we must ask them to provide their 17031 * probes, and then match any anonymous enabling against them. Note 17032 * that there should be no other retained enablings at this time: 17033 * the only retained enablings at this time should be the anonymous 17034 * enabling. 17035 */ 17036 if (dtrace_anon.dta_enabling != NULL) { 17037 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 17038 17039 dtrace_enabling_provide(NULL); 17040 state = dtrace_anon.dta_state; 17041 17042 /* 17043 * We couldn't hold cpu_lock across the above call to 17044 * dtrace_enabling_provide(), but we must hold it to actually 17045 * enable the probes. We have to drop all of our locks, pick 17046 * up cpu_lock, and regain our locks before matching the 17047 * retained anonymous enabling. 17048 */ 17049 mutex_exit(&dtrace_lock); 17050 mutex_exit(&dtrace_provider_lock); 17051 17052 mutex_enter(&cpu_lock); 17053 mutex_enter(&dtrace_provider_lock); 17054 mutex_enter(&dtrace_lock); 17055 17056 if ((enab = dtrace_anon.dta_enabling) != NULL) 17057 (void) dtrace_enabling_match(enab, NULL); 17058 17059 mutex_exit(&cpu_lock); 17060 } 17061 17062 mutex_exit(&dtrace_lock); 17063 mutex_exit(&dtrace_provider_lock); 17064 17065 if (state != NULL) { 17066 /* 17067 * If we created any anonymous state, set it going now. 17068 */ 17069 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 17070 } 17071 17072 return (DDI_SUCCESS); 17073 } 17074 #endif /* illumos */ 17075 17076 #ifndef illumos 17077 static void dtrace_dtr(void *); 17078 #endif 17079 17080 /*ARGSUSED*/ 17081 static int 17082 #ifdef illumos 17083 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 17084 #else 17085 dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 17086 #endif 17087 { 17088 dtrace_state_t *state; 17089 uint32_t priv; 17090 uid_t uid; 17091 zoneid_t zoneid; 17092 17093 #ifdef illumos 17094 if (getminor(*devp) == DTRACEMNRN_HELPER) 17095 return (0); 17096 17097 /* 17098 * If this wasn't an open with the "helper" minor, then it must be 17099 * the "dtrace" minor. 17100 */ 17101 if (getminor(*devp) == DTRACEMNRN_DTRACE) 17102 return (ENXIO); 17103 #else 17104 cred_t *cred_p = NULL; 17105 cred_p = dev->si_cred; 17106 17107 /* 17108 * If no DTRACE_PRIV_* bits are set in the credential, then the 17109 * caller lacks sufficient permission to do anything with DTrace. 17110 */ 17111 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 17112 if (priv == DTRACE_PRIV_NONE) { 17113 #endif 17114 17115 return (EACCES); 17116 } 17117 17118 /* 17119 * Ask all providers to provide all their probes. 17120 */ 17121 mutex_enter(&dtrace_provider_lock); 17122 dtrace_probe_provide(NULL, NULL); 17123 mutex_exit(&dtrace_provider_lock); 17124 17125 mutex_enter(&cpu_lock); 17126 mutex_enter(&dtrace_lock); 17127 dtrace_opens++; 17128 dtrace_membar_producer(); 17129 17130 #ifdef illumos 17131 /* 17132 * If the kernel debugger is active (that is, if the kernel debugger 17133 * modified text in some way), we won't allow the open. 17134 */ 17135 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 17136 dtrace_opens--; 17137 mutex_exit(&cpu_lock); 17138 mutex_exit(&dtrace_lock); 17139 return (EBUSY); 17140 } 17141 17142 if (dtrace_helptrace_enable && dtrace_helptrace_buffer == NULL) { 17143 /* 17144 * If DTrace helper tracing is enabled, we need to allocate the 17145 * trace buffer and initialize the values. 17146 */ 17147 dtrace_helptrace_buffer = 17148 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 17149 dtrace_helptrace_next = 0; 17150 dtrace_helptrace_wrapped = 0; 17151 dtrace_helptrace_enable = 0; 17152 } 17153 17154 state = dtrace_state_create(devp, cred_p); 17155 #else 17156 state = dtrace_state_create(dev, NULL); 17157 devfs_set_cdevpriv(state, dtrace_dtr); 17158 #endif 17159 17160 mutex_exit(&cpu_lock); 17161 17162 if (state == NULL) { 17163 #ifdef illumos 17164 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 17165 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 17166 #else 17167 --dtrace_opens; 17168 #endif 17169 mutex_exit(&dtrace_lock); 17170 return (EAGAIN); 17171 } 17172 17173 mutex_exit(&dtrace_lock); 17174 17175 return (0); 17176 } 17177 17178 /*ARGSUSED*/ 17179 #ifdef illumos 17180 static int 17181 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 17182 #else 17183 static void 17184 dtrace_dtr(void *data) 17185 #endif 17186 { 17187 #ifdef illumos 17188 minor_t minor = getminor(dev); 17189 dtrace_state_t *state; 17190 #endif 17191 dtrace_helptrace_t *buf = NULL; 17192 17193 #ifdef illumos 17194 if (minor == DTRACEMNRN_HELPER) 17195 return (0); 17196 17197 state = ddi_get_soft_state(dtrace_softstate, minor); 17198 #else 17199 dtrace_state_t *state = data; 17200 #endif 17201 17202 mutex_enter(&cpu_lock); 17203 mutex_enter(&dtrace_lock); 17204 17205 #ifdef illumos 17206 if (state->dts_anon) 17207 #else 17208 if (state != NULL && state->dts_anon) 17209 #endif 17210 { 17211 /* 17212 * There is anonymous state. Destroy that first. 17213 */ 17214 ASSERT(dtrace_anon.dta_state == NULL); 17215 dtrace_state_destroy(state->dts_anon); 17216 } 17217 17218 if (dtrace_helptrace_disable) { 17219 /* 17220 * If we have been told to disable helper tracing, set the 17221 * buffer to NULL before calling into dtrace_state_destroy(); 17222 * we take advantage of its dtrace_sync() to know that no 17223 * CPU is in probe context with enabled helper tracing 17224 * after it returns. 17225 */ 17226 buf = dtrace_helptrace_buffer; 17227 dtrace_helptrace_buffer = NULL; 17228 } 17229 17230 #ifdef illumos 17231 dtrace_state_destroy(state); 17232 #else 17233 if (state != NULL) { 17234 dtrace_state_destroy(state); 17235 kmem_free(state, 0); 17236 } 17237 #endif 17238 ASSERT(dtrace_opens > 0); 17239 17240 #ifdef illumos 17241 /* 17242 * Only relinquish control of the kernel debugger interface when there 17243 * are no consumers and no anonymous enablings. 17244 */ 17245 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 17246 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 17247 #else 17248 --dtrace_opens; 17249 #endif 17250 17251 if (buf != NULL) { 17252 kmem_free(buf, dtrace_helptrace_bufsize); 17253 dtrace_helptrace_disable = 0; 17254 } 17255 17256 mutex_exit(&dtrace_lock); 17257 mutex_exit(&cpu_lock); 17258 17259 #ifdef illumos 17260 return (0); 17261 #endif 17262 } 17263 17264 #ifdef illumos 17265 /*ARGSUSED*/ 17266 static int 17267 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 17268 { 17269 int rval; 17270 dof_helper_t help, *dhp = NULL; 17271 17272 switch (cmd) { 17273 case DTRACEHIOC_ADDDOF: 17274 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 17275 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 17276 return (EFAULT); 17277 } 17278 17279 dhp = &help; 17280 arg = (intptr_t)help.dofhp_dof; 17281 /*FALLTHROUGH*/ 17282 17283 case DTRACEHIOC_ADD: { 17284 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 17285 17286 if (dof == NULL) 17287 return (rval); 17288 17289 mutex_enter(&dtrace_lock); 17290 17291 /* 17292 * dtrace_helper_slurp() takes responsibility for the dof -- 17293 * it may free it now or it may save it and free it later. 17294 */ 17295 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 17296 *rv = rval; 17297 rval = 0; 17298 } else { 17299 rval = EINVAL; 17300 } 17301 17302 mutex_exit(&dtrace_lock); 17303 return (rval); 17304 } 17305 17306 case DTRACEHIOC_REMOVE: { 17307 mutex_enter(&dtrace_lock); 17308 rval = dtrace_helper_destroygen(NULL, arg); 17309 mutex_exit(&dtrace_lock); 17310 17311 return (rval); 17312 } 17313 17314 default: 17315 break; 17316 } 17317 17318 return (ENOTTY); 17319 } 17320 17321 /*ARGSUSED*/ 17322 static int 17323 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 17324 { 17325 minor_t minor = getminor(dev); 17326 dtrace_state_t *state; 17327 int rval; 17328 17329 if (minor == DTRACEMNRN_HELPER) 17330 return (dtrace_ioctl_helper(cmd, arg, rv)); 17331 17332 state = ddi_get_soft_state(dtrace_softstate, minor); 17333 17334 if (state->dts_anon) { 17335 ASSERT(dtrace_anon.dta_state == NULL); 17336 state = state->dts_anon; 17337 } 17338 17339 switch (cmd) { 17340 case DTRACEIOC_PROVIDER: { 17341 dtrace_providerdesc_t pvd; 17342 dtrace_provider_t *pvp; 17343 17344 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 17345 return (EFAULT); 17346 17347 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 17348 mutex_enter(&dtrace_provider_lock); 17349 17350 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 17351 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 17352 break; 17353 } 17354 17355 mutex_exit(&dtrace_provider_lock); 17356 17357 if (pvp == NULL) 17358 return (ESRCH); 17359 17360 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 17361 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 17362 17363 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 17364 return (EFAULT); 17365 17366 return (0); 17367 } 17368 17369 case DTRACEIOC_EPROBE: { 17370 dtrace_eprobedesc_t epdesc; 17371 dtrace_ecb_t *ecb; 17372 dtrace_action_t *act; 17373 void *buf; 17374 size_t size; 17375 uintptr_t dest; 17376 int nrecs; 17377 17378 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 17379 return (EFAULT); 17380 17381 mutex_enter(&dtrace_lock); 17382 17383 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 17384 mutex_exit(&dtrace_lock); 17385 return (EINVAL); 17386 } 17387 17388 if (ecb->dte_probe == NULL) { 17389 mutex_exit(&dtrace_lock); 17390 return (EINVAL); 17391 } 17392 17393 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 17394 epdesc.dtepd_uarg = ecb->dte_uarg; 17395 epdesc.dtepd_size = ecb->dte_size; 17396 17397 nrecs = epdesc.dtepd_nrecs; 17398 epdesc.dtepd_nrecs = 0; 17399 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 17400 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 17401 continue; 17402 17403 epdesc.dtepd_nrecs++; 17404 } 17405 17406 /* 17407 * Now that we have the size, we need to allocate a temporary 17408 * buffer in which to store the complete description. We need 17409 * the temporary buffer to be able to drop dtrace_lock() 17410 * across the copyout(), below. 17411 */ 17412 size = sizeof (dtrace_eprobedesc_t) + 17413 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 17414 17415 buf = kmem_alloc(size, KM_SLEEP); 17416 dest = (uintptr_t)buf; 17417 17418 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 17419 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 17420 17421 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 17422 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 17423 continue; 17424 17425 if (nrecs-- == 0) 17426 break; 17427 17428 bcopy(&act->dta_rec, (void *)dest, 17429 sizeof (dtrace_recdesc_t)); 17430 dest += sizeof (dtrace_recdesc_t); 17431 } 17432 17433 mutex_exit(&dtrace_lock); 17434 17435 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 17436 kmem_free(buf, size); 17437 return (EFAULT); 17438 } 17439 17440 kmem_free(buf, size); 17441 return (0); 17442 } 17443 17444 case DTRACEIOC_AGGDESC: { 17445 dtrace_aggdesc_t aggdesc; 17446 dtrace_action_t *act; 17447 dtrace_aggregation_t *agg; 17448 int nrecs; 17449 uint32_t offs; 17450 dtrace_recdesc_t *lrec; 17451 void *buf; 17452 size_t size; 17453 uintptr_t dest; 17454 17455 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 17456 return (EFAULT); 17457 17458 mutex_enter(&dtrace_lock); 17459 17460 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 17461 mutex_exit(&dtrace_lock); 17462 return (EINVAL); 17463 } 17464 17465 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 17466 17467 nrecs = aggdesc.dtagd_nrecs; 17468 aggdesc.dtagd_nrecs = 0; 17469 17470 offs = agg->dtag_base; 17471 lrec = &agg->dtag_action.dta_rec; 17472 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 17473 17474 for (act = agg->dtag_first; ; act = act->dta_next) { 17475 ASSERT(act->dta_intuple || 17476 DTRACEACT_ISAGG(act->dta_kind)); 17477 17478 /* 17479 * If this action has a record size of zero, it 17480 * denotes an argument to the aggregating action. 17481 * Because the presence of this record doesn't (or 17482 * shouldn't) affect the way the data is interpreted, 17483 * we don't copy it out to save user-level the 17484 * confusion of dealing with a zero-length record. 17485 */ 17486 if (act->dta_rec.dtrd_size == 0) { 17487 ASSERT(agg->dtag_hasarg); 17488 continue; 17489 } 17490 17491 aggdesc.dtagd_nrecs++; 17492 17493 if (act == &agg->dtag_action) 17494 break; 17495 } 17496 17497 /* 17498 * Now that we have the size, we need to allocate a temporary 17499 * buffer in which to store the complete description. We need 17500 * the temporary buffer to be able to drop dtrace_lock() 17501 * across the copyout(), below. 17502 */ 17503 size = sizeof (dtrace_aggdesc_t) + 17504 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 17505 17506 buf = kmem_alloc(size, KM_SLEEP); 17507 dest = (uintptr_t)buf; 17508 17509 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 17510 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 17511 17512 for (act = agg->dtag_first; ; act = act->dta_next) { 17513 dtrace_recdesc_t rec = act->dta_rec; 17514 17515 /* 17516 * See the comment in the above loop for why we pass 17517 * over zero-length records. 17518 */ 17519 if (rec.dtrd_size == 0) { 17520 ASSERT(agg->dtag_hasarg); 17521 continue; 17522 } 17523 17524 if (nrecs-- == 0) 17525 break; 17526 17527 rec.dtrd_offset -= offs; 17528 bcopy(&rec, (void *)dest, sizeof (rec)); 17529 dest += sizeof (dtrace_recdesc_t); 17530 17531 if (act == &agg->dtag_action) 17532 break; 17533 } 17534 17535 mutex_exit(&dtrace_lock); 17536 17537 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 17538 kmem_free(buf, size); 17539 return (EFAULT); 17540 } 17541 17542 kmem_free(buf, size); 17543 return (0); 17544 } 17545 17546 case DTRACEIOC_ENABLE: { 17547 dof_hdr_t *dof; 17548 dtrace_enabling_t *enab = NULL; 17549 dtrace_vstate_t *vstate; 17550 int err = 0; 17551 17552 *rv = 0; 17553 17554 /* 17555 * If a NULL argument has been passed, we take this as our 17556 * cue to reevaluate our enablings. 17557 */ 17558 if (arg == NULL) { 17559 dtrace_enabling_matchall(); 17560 17561 return (0); 17562 } 17563 17564 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 17565 return (rval); 17566 17567 mutex_enter(&cpu_lock); 17568 mutex_enter(&dtrace_lock); 17569 vstate = &state->dts_vstate; 17570 17571 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 17572 mutex_exit(&dtrace_lock); 17573 mutex_exit(&cpu_lock); 17574 dtrace_dof_destroy(dof); 17575 return (EBUSY); 17576 } 17577 17578 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 17579 mutex_exit(&dtrace_lock); 17580 mutex_exit(&cpu_lock); 17581 dtrace_dof_destroy(dof); 17582 return (EINVAL); 17583 } 17584 17585 if ((rval = dtrace_dof_options(dof, state)) != 0) { 17586 dtrace_enabling_destroy(enab); 17587 mutex_exit(&dtrace_lock); 17588 mutex_exit(&cpu_lock); 17589 dtrace_dof_destroy(dof); 17590 return (rval); 17591 } 17592 17593 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 17594 err = dtrace_enabling_retain(enab); 17595 } else { 17596 dtrace_enabling_destroy(enab); 17597 } 17598 17599 mutex_exit(&cpu_lock); 17600 mutex_exit(&dtrace_lock); 17601 dtrace_dof_destroy(dof); 17602 17603 return (err); 17604 } 17605 17606 case DTRACEIOC_REPLICATE: { 17607 dtrace_repldesc_t desc; 17608 dtrace_probedesc_t *match = &desc.dtrpd_match; 17609 dtrace_probedesc_t *create = &desc.dtrpd_create; 17610 int err; 17611 17612 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17613 return (EFAULT); 17614 17615 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17616 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17617 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17618 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17619 17620 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17621 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17622 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17623 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17624 17625 mutex_enter(&dtrace_lock); 17626 err = dtrace_enabling_replicate(state, match, create); 17627 mutex_exit(&dtrace_lock); 17628 17629 return (err); 17630 } 17631 17632 case DTRACEIOC_PROBEMATCH: 17633 case DTRACEIOC_PROBES: { 17634 dtrace_probe_t *probe = NULL; 17635 dtrace_probedesc_t desc; 17636 dtrace_probekey_t pkey; 17637 dtrace_id_t i; 17638 int m = 0; 17639 uint32_t priv; 17640 uid_t uid; 17641 zoneid_t zoneid; 17642 17643 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17644 return (EFAULT); 17645 17646 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17647 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17648 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17649 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17650 17651 /* 17652 * Before we attempt to match this probe, we want to give 17653 * all providers the opportunity to provide it. 17654 */ 17655 if (desc.dtpd_id == DTRACE_IDNONE) { 17656 mutex_enter(&dtrace_provider_lock); 17657 dtrace_probe_provide(&desc, NULL); 17658 mutex_exit(&dtrace_provider_lock); 17659 desc.dtpd_id++; 17660 } 17661 17662 if (cmd == DTRACEIOC_PROBEMATCH) { 17663 dtrace_probekey(&desc, &pkey); 17664 pkey.dtpk_id = DTRACE_IDNONE; 17665 } 17666 17667 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 17668 17669 mutex_enter(&dtrace_lock); 17670 17671 if (cmd == DTRACEIOC_PROBEMATCH) { 17672 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 17673 if ((probe = dtrace_probes[i - 1]) != NULL && 17674 (m = dtrace_match_probe(probe, &pkey, 17675 priv, uid, zoneid)) != 0) 17676 break; 17677 } 17678 17679 if (m < 0) { 17680 mutex_exit(&dtrace_lock); 17681 return (EINVAL); 17682 } 17683 17684 } else { 17685 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 17686 if ((probe = dtrace_probes[i - 1]) != NULL && 17687 dtrace_match_priv(probe, priv, uid, zoneid)) 17688 break; 17689 } 17690 } 17691 17692 if (probe == NULL) { 17693 mutex_exit(&dtrace_lock); 17694 return (ESRCH); 17695 } 17696 17697 dtrace_probe_description(probe, &desc); 17698 mutex_exit(&dtrace_lock); 17699 17700 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17701 return (EFAULT); 17702 17703 return (0); 17704 } 17705 17706 case DTRACEIOC_PROBEARG: { 17707 dtrace_argdesc_t desc; 17708 dtrace_probe_t *probe; 17709 dtrace_provider_t *prov; 17710 17711 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17712 return (EFAULT); 17713 17714 if (desc.dtargd_id == DTRACE_IDNONE) 17715 return (EINVAL); 17716 17717 if (desc.dtargd_ndx == DTRACE_ARGNONE) 17718 return (EINVAL); 17719 17720 mutex_enter(&dtrace_provider_lock); 17721 mutex_enter(&mod_lock); 17722 mutex_enter(&dtrace_lock); 17723 17724 if (desc.dtargd_id > dtrace_nprobes) { 17725 mutex_exit(&dtrace_lock); 17726 mutex_exit(&mod_lock); 17727 mutex_exit(&dtrace_provider_lock); 17728 return (EINVAL); 17729 } 17730 17731 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 17732 mutex_exit(&dtrace_lock); 17733 mutex_exit(&mod_lock); 17734 mutex_exit(&dtrace_provider_lock); 17735 return (EINVAL); 17736 } 17737 17738 mutex_exit(&dtrace_lock); 17739 17740 prov = probe->dtpr_provider; 17741 17742 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 17743 /* 17744 * There isn't any typed information for this probe. 17745 * Set the argument number to DTRACE_ARGNONE. 17746 */ 17747 desc.dtargd_ndx = DTRACE_ARGNONE; 17748 } else { 17749 desc.dtargd_native[0] = '\0'; 17750 desc.dtargd_xlate[0] = '\0'; 17751 desc.dtargd_mapping = desc.dtargd_ndx; 17752 17753 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 17754 probe->dtpr_id, probe->dtpr_arg, &desc); 17755 } 17756 17757 mutex_exit(&mod_lock); 17758 mutex_exit(&dtrace_provider_lock); 17759 17760 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17761 return (EFAULT); 17762 17763 return (0); 17764 } 17765 17766 case DTRACEIOC_GO: { 17767 processorid_t cpuid; 17768 rval = dtrace_state_go(state, &cpuid); 17769 17770 if (rval != 0) 17771 return (rval); 17772 17773 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 17774 return (EFAULT); 17775 17776 return (0); 17777 } 17778 17779 case DTRACEIOC_STOP: { 17780 processorid_t cpuid; 17781 17782 mutex_enter(&dtrace_lock); 17783 rval = dtrace_state_stop(state, &cpuid); 17784 mutex_exit(&dtrace_lock); 17785 17786 if (rval != 0) 17787 return (rval); 17788 17789 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 17790 return (EFAULT); 17791 17792 return (0); 17793 } 17794 17795 case DTRACEIOC_DOFGET: { 17796 dof_hdr_t hdr, *dof; 17797 uint64_t len; 17798 17799 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 17800 return (EFAULT); 17801 17802 mutex_enter(&dtrace_lock); 17803 dof = dtrace_dof_create(state); 17804 mutex_exit(&dtrace_lock); 17805 17806 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 17807 rval = copyout(dof, (void *)arg, len); 17808 dtrace_dof_destroy(dof); 17809 17810 return (rval == 0 ? 0 : EFAULT); 17811 } 17812 17813 case DTRACEIOC_AGGSNAP: 17814 case DTRACEIOC_BUFSNAP: { 17815 dtrace_bufdesc_t desc; 17816 caddr_t cached; 17817 dtrace_buffer_t *buf; 17818 17819 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17820 return (EFAULT); 17821 17822 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 17823 return (EINVAL); 17824 17825 mutex_enter(&dtrace_lock); 17826 17827 if (cmd == DTRACEIOC_BUFSNAP) { 17828 buf = &state->dts_buffer[desc.dtbd_cpu]; 17829 } else { 17830 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 17831 } 17832 17833 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 17834 size_t sz = buf->dtb_offset; 17835 17836 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 17837 mutex_exit(&dtrace_lock); 17838 return (EBUSY); 17839 } 17840 17841 /* 17842 * If this buffer has already been consumed, we're 17843 * going to indicate that there's nothing left here 17844 * to consume. 17845 */ 17846 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 17847 mutex_exit(&dtrace_lock); 17848 17849 desc.dtbd_size = 0; 17850 desc.dtbd_drops = 0; 17851 desc.dtbd_errors = 0; 17852 desc.dtbd_oldest = 0; 17853 sz = sizeof (desc); 17854 17855 if (copyout(&desc, (void *)arg, sz) != 0) 17856 return (EFAULT); 17857 17858 return (0); 17859 } 17860 17861 /* 17862 * If this is a ring buffer that has wrapped, we want 17863 * to copy the whole thing out. 17864 */ 17865 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 17866 dtrace_buffer_polish(buf); 17867 sz = buf->dtb_size; 17868 } 17869 17870 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 17871 mutex_exit(&dtrace_lock); 17872 return (EFAULT); 17873 } 17874 17875 desc.dtbd_size = sz; 17876 desc.dtbd_drops = buf->dtb_drops; 17877 desc.dtbd_errors = buf->dtb_errors; 17878 desc.dtbd_oldest = buf->dtb_xamot_offset; 17879 desc.dtbd_timestamp = dtrace_gethrtime(); 17880 17881 mutex_exit(&dtrace_lock); 17882 17883 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17884 return (EFAULT); 17885 17886 buf->dtb_flags |= DTRACEBUF_CONSUMED; 17887 17888 return (0); 17889 } 17890 17891 if (buf->dtb_tomax == NULL) { 17892 ASSERT(buf->dtb_xamot == NULL); 17893 mutex_exit(&dtrace_lock); 17894 return (ENOENT); 17895 } 17896 17897 cached = buf->dtb_tomax; 17898 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 17899 17900 dtrace_xcall(desc.dtbd_cpu, 17901 (dtrace_xcall_t)dtrace_buffer_switch, buf); 17902 17903 state->dts_errors += buf->dtb_xamot_errors; 17904 17905 /* 17906 * If the buffers did not actually switch, then the cross call 17907 * did not take place -- presumably because the given CPU is 17908 * not in the ready set. If this is the case, we'll return 17909 * ENOENT. 17910 */ 17911 if (buf->dtb_tomax == cached) { 17912 ASSERT(buf->dtb_xamot != cached); 17913 mutex_exit(&dtrace_lock); 17914 return (ENOENT); 17915 } 17916 17917 ASSERT(cached == buf->dtb_xamot); 17918 17919 /* 17920 * We have our snapshot; now copy it out. 17921 */ 17922 if (copyout(buf->dtb_xamot, desc.dtbd_data, 17923 buf->dtb_xamot_offset) != 0) { 17924 mutex_exit(&dtrace_lock); 17925 return (EFAULT); 17926 } 17927 17928 desc.dtbd_size = buf->dtb_xamot_offset; 17929 desc.dtbd_drops = buf->dtb_xamot_drops; 17930 desc.dtbd_errors = buf->dtb_xamot_errors; 17931 desc.dtbd_oldest = 0; 17932 desc.dtbd_timestamp = buf->dtb_switched; 17933 17934 mutex_exit(&dtrace_lock); 17935 17936 /* 17937 * Finally, copy out the buffer description. 17938 */ 17939 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17940 return (EFAULT); 17941 17942 return (0); 17943 } 17944 17945 case DTRACEIOC_CONF: { 17946 dtrace_conf_t conf; 17947 17948 bzero(&conf, sizeof (conf)); 17949 conf.dtc_difversion = DIF_VERSION; 17950 conf.dtc_difintregs = DIF_DIR_NREGS; 17951 conf.dtc_diftupregs = DIF_DTR_NREGS; 17952 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 17953 17954 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 17955 return (EFAULT); 17956 17957 return (0); 17958 } 17959 17960 case DTRACEIOC_STATUS: { 17961 dtrace_status_t stat; 17962 dtrace_dstate_t *dstate; 17963 int i, j; 17964 uint64_t nerrs; 17965 17966 /* 17967 * See the comment in dtrace_state_deadman() for the reason 17968 * for setting dts_laststatus to INT64_MAX before setting 17969 * it to the correct value. 17970 */ 17971 state->dts_laststatus = INT64_MAX; 17972 dtrace_membar_producer(); 17973 state->dts_laststatus = dtrace_gethrtime(); 17974 17975 bzero(&stat, sizeof (stat)); 17976 17977 mutex_enter(&dtrace_lock); 17978 17979 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 17980 mutex_exit(&dtrace_lock); 17981 return (ENOENT); 17982 } 17983 17984 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 17985 stat.dtst_exiting = 1; 17986 17987 nerrs = state->dts_errors; 17988 dstate = &state->dts_vstate.dtvs_dynvars; 17989 17990 for (i = 0; i < NCPU; i++) { 17991 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 17992 17993 stat.dtst_dyndrops += dcpu->dtdsc_drops; 17994 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 17995 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 17996 17997 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 17998 stat.dtst_filled++; 17999 18000 nerrs += state->dts_buffer[i].dtb_errors; 18001 18002 for (j = 0; j < state->dts_nspeculations; j++) { 18003 dtrace_speculation_t *spec; 18004 dtrace_buffer_t *buf; 18005 18006 spec = &state->dts_speculations[j]; 18007 buf = &spec->dtsp_buffer[i]; 18008 stat.dtst_specdrops += buf->dtb_xamot_drops; 18009 } 18010 } 18011 18012 stat.dtst_specdrops_busy = state->dts_speculations_busy; 18013 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 18014 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 18015 stat.dtst_dblerrors = state->dts_dblerrors; 18016 stat.dtst_killed = 18017 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 18018 stat.dtst_errors = nerrs; 18019 18020 mutex_exit(&dtrace_lock); 18021 18022 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 18023 return (EFAULT); 18024 18025 return (0); 18026 } 18027 18028 case DTRACEIOC_FORMAT: { 18029 dtrace_fmtdesc_t fmt; 18030 char *str; 18031 int len; 18032 18033 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 18034 return (EFAULT); 18035 18036 mutex_enter(&dtrace_lock); 18037 18038 if (fmt.dtfd_format == 0 || 18039 fmt.dtfd_format > state->dts_nformats) { 18040 mutex_exit(&dtrace_lock); 18041 return (EINVAL); 18042 } 18043 18044 /* 18045 * Format strings are allocated contiguously and they are 18046 * never freed; if a format index is less than the number 18047 * of formats, we can assert that the format map is non-NULL 18048 * and that the format for the specified index is non-NULL. 18049 */ 18050 ASSERT(state->dts_formats != NULL); 18051 str = state->dts_formats[fmt.dtfd_format - 1]; 18052 ASSERT(str != NULL); 18053 18054 len = strlen(str) + 1; 18055 18056 if (len > fmt.dtfd_length) { 18057 fmt.dtfd_length = len; 18058 18059 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 18060 mutex_exit(&dtrace_lock); 18061 return (EINVAL); 18062 } 18063 } else { 18064 if (copyout(str, fmt.dtfd_string, len) != 0) { 18065 mutex_exit(&dtrace_lock); 18066 return (EINVAL); 18067 } 18068 } 18069 18070 mutex_exit(&dtrace_lock); 18071 return (0); 18072 } 18073 18074 default: 18075 break; 18076 } 18077 18078 return (ENOTTY); 18079 } 18080 18081 /*ARGSUSED*/ 18082 static int 18083 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 18084 { 18085 dtrace_state_t *state; 18086 18087 switch (cmd) { 18088 case DDI_DETACH: 18089 break; 18090 18091 case DDI_SUSPEND: 18092 return (DDI_SUCCESS); 18093 18094 default: 18095 return (DDI_FAILURE); 18096 } 18097 18098 mutex_enter(&cpu_lock); 18099 mutex_enter(&dtrace_provider_lock); 18100 mutex_enter(&dtrace_lock); 18101 18102 ASSERT(dtrace_opens == 0); 18103 18104 if (dtrace_helpers > 0) { 18105 mutex_exit(&dtrace_provider_lock); 18106 mutex_exit(&dtrace_lock); 18107 mutex_exit(&cpu_lock); 18108 return (DDI_FAILURE); 18109 } 18110 18111 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 18112 mutex_exit(&dtrace_provider_lock); 18113 mutex_exit(&dtrace_lock); 18114 mutex_exit(&cpu_lock); 18115 return (DDI_FAILURE); 18116 } 18117 18118 dtrace_provider = NULL; 18119 18120 if ((state = dtrace_anon_grab()) != NULL) { 18121 /* 18122 * If there were ECBs on this state, the provider should 18123 * have not been allowed to detach; assert that there is 18124 * none. 18125 */ 18126 ASSERT(state->dts_necbs == 0); 18127 dtrace_state_destroy(state); 18128 18129 /* 18130 * If we're being detached with anonymous state, we need to 18131 * indicate to the kernel debugger that DTrace is now inactive. 18132 */ 18133 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 18134 } 18135 18136 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 18137 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 18138 dtrace_cpu_init = NULL; 18139 dtrace_helpers_cleanup = NULL; 18140 dtrace_helpers_fork = NULL; 18141 dtrace_cpustart_init = NULL; 18142 dtrace_cpustart_fini = NULL; 18143 dtrace_debugger_init = NULL; 18144 dtrace_debugger_fini = NULL; 18145 dtrace_modload = NULL; 18146 dtrace_modunload = NULL; 18147 18148 ASSERT(dtrace_getf == 0); 18149 ASSERT(dtrace_closef == NULL); 18150 18151 mutex_exit(&cpu_lock); 18152 18153 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 18154 dtrace_probes = NULL; 18155 dtrace_nprobes = 0; 18156 18157 dtrace_hash_destroy(dtrace_bymod); 18158 dtrace_hash_destroy(dtrace_byfunc); 18159 dtrace_hash_destroy(dtrace_byname); 18160 dtrace_bymod = NULL; 18161 dtrace_byfunc = NULL; 18162 dtrace_byname = NULL; 18163 18164 kmem_cache_destroy(dtrace_state_cache); 18165 vmem_destroy(dtrace_minor); 18166 vmem_destroy(dtrace_arena); 18167 18168 if (dtrace_toxrange != NULL) { 18169 kmem_free(dtrace_toxrange, 18170 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 18171 dtrace_toxrange = NULL; 18172 dtrace_toxranges = 0; 18173 dtrace_toxranges_max = 0; 18174 } 18175 18176 ddi_remove_minor_node(dtrace_devi, NULL); 18177 dtrace_devi = NULL; 18178 18179 ddi_soft_state_fini(&dtrace_softstate); 18180 18181 ASSERT(dtrace_vtime_references == 0); 18182 ASSERT(dtrace_opens == 0); 18183 ASSERT(dtrace_retained == NULL); 18184 18185 mutex_exit(&dtrace_lock); 18186 mutex_exit(&dtrace_provider_lock); 18187 18188 /* 18189 * We don't destroy the task queue until after we have dropped our 18190 * locks (taskq_destroy() may block on running tasks). To prevent 18191 * attempting to do work after we have effectively detached but before 18192 * the task queue has been destroyed, all tasks dispatched via the 18193 * task queue must check that DTrace is still attached before 18194 * performing any operation. 18195 */ 18196 taskq_destroy(dtrace_taskq); 18197 dtrace_taskq = NULL; 18198 18199 return (DDI_SUCCESS); 18200 } 18201 #endif 18202 18203 #ifdef illumos 18204 /*ARGSUSED*/ 18205 static int 18206 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 18207 { 18208 int error; 18209 18210 switch (infocmd) { 18211 case DDI_INFO_DEVT2DEVINFO: 18212 *result = (void *)dtrace_devi; 18213 error = DDI_SUCCESS; 18214 break; 18215 case DDI_INFO_DEVT2INSTANCE: 18216 *result = (void *)0; 18217 error = DDI_SUCCESS; 18218 break; 18219 default: 18220 error = DDI_FAILURE; 18221 } 18222 return (error); 18223 } 18224 #endif 18225 18226 #ifdef illumos 18227 static struct cb_ops dtrace_cb_ops = { 18228 dtrace_open, /* open */ 18229 dtrace_close, /* close */ 18230 nulldev, /* strategy */ 18231 nulldev, /* print */ 18232 nodev, /* dump */ 18233 nodev, /* read */ 18234 nodev, /* write */ 18235 dtrace_ioctl, /* ioctl */ 18236 nodev, /* devmap */ 18237 nodev, /* mmap */ 18238 nodev, /* segmap */ 18239 nochpoll, /* poll */ 18240 ddi_prop_op, /* cb_prop_op */ 18241 0, /* streamtab */ 18242 D_NEW | D_MP /* Driver compatibility flag */ 18243 }; 18244 18245 static struct dev_ops dtrace_ops = { 18246 DEVO_REV, /* devo_rev */ 18247 0, /* refcnt */ 18248 dtrace_info, /* get_dev_info */ 18249 nulldev, /* identify */ 18250 nulldev, /* probe */ 18251 dtrace_attach, /* attach */ 18252 dtrace_detach, /* detach */ 18253 nodev, /* reset */ 18254 &dtrace_cb_ops, /* driver operations */ 18255 NULL, /* bus operations */ 18256 nodev /* dev power */ 18257 }; 18258 18259 static struct modldrv modldrv = { 18260 &mod_driverops, /* module type (this is a pseudo driver) */ 18261 "Dynamic Tracing", /* name of module */ 18262 &dtrace_ops, /* driver ops */ 18263 }; 18264 18265 static struct modlinkage modlinkage = { 18266 MODREV_1, 18267 (void *)&modldrv, 18268 NULL 18269 }; 18270 18271 int 18272 _init(void) 18273 { 18274 return (mod_install(&modlinkage)); 18275 } 18276 18277 int 18278 _info(struct modinfo *modinfop) 18279 { 18280 return (mod_info(&modlinkage, modinfop)); 18281 } 18282 18283 int 18284 _fini(void) 18285 { 18286 return (mod_remove(&modlinkage)); 18287 } 18288 #else 18289 18290 static d_ioctl_t dtrace_ioctl; 18291 static d_ioctl_t dtrace_ioctl_helper; 18292 static void dtrace_load(void *); 18293 static int dtrace_unload(void); 18294 static struct cdev *dtrace_dev; 18295 static struct cdev *helper_dev; 18296 18297 void dtrace_invop_init(void); 18298 void dtrace_invop_uninit(void); 18299 18300 static struct cdevsw dtrace_cdevsw = { 18301 .d_version = D_VERSION, 18302 .d_ioctl = dtrace_ioctl, 18303 .d_open = dtrace_open, 18304 .d_name = "dtrace", 18305 }; 18306 18307 static struct cdevsw helper_cdevsw = { 18308 .d_version = D_VERSION, 18309 .d_ioctl = dtrace_ioctl_helper, 18310 .d_name = "helper", 18311 }; 18312 18313 #include <dtrace_anon.c> 18314 #include <dtrace_ioctl.c> 18315 #include <dtrace_load.c> 18316 #include <dtrace_modevent.c> 18317 #include <dtrace_sysctl.c> 18318 #include <dtrace_unload.c> 18319 #include <dtrace_vtime.c> 18320 #include <dtrace_hacks.c> 18321 #include <dtrace_isa.c> 18322 18323 SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 18324 SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 18325 SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 18326 18327 DEV_MODULE(dtrace, dtrace_modevent, NULL); 18328 MODULE_VERSION(dtrace, 1); 18329 MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 18330 #endif 18331