1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD$ 22 */ 23 24 /* 25 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 26 * Copyright (c) 2016, Joyent, Inc. All rights reserved. 27 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 28 */ 29 30 /* 31 * DTrace - Dynamic Tracing for Solaris 32 * 33 * This is the implementation of the Solaris Dynamic Tracing framework 34 * (DTrace). The user-visible interface to DTrace is described at length in 35 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 36 * library, the in-kernel DTrace framework, and the DTrace providers are 37 * described in the block comments in the <sys/dtrace.h> header file. The 38 * internal architecture of DTrace is described in the block comments in the 39 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 40 * implementation very much assume mastery of all of these sources; if one has 41 * an unanswered question about the implementation, one should consult them 42 * first. 43 * 44 * The functions here are ordered roughly as follows: 45 * 46 * - Probe context functions 47 * - Probe hashing functions 48 * - Non-probe context utility functions 49 * - Matching functions 50 * - Provider-to-Framework API functions 51 * - Probe management functions 52 * - DIF object functions 53 * - Format functions 54 * - Predicate functions 55 * - ECB functions 56 * - Buffer functions 57 * - Enabling functions 58 * - DOF functions 59 * - Anonymous enabling functions 60 * - Consumer state functions 61 * - Helper functions 62 * - Hook functions 63 * - Driver cookbook functions 64 * 65 * Each group of functions begins with a block comment labelled the "DTrace 66 * [Group] Functions", allowing one to find each block by searching forward 67 * on capital-f functions. 68 */ 69 #include <sys/errno.h> 70 #ifndef illumos 71 #include <sys/time.h> 72 #endif 73 #include <sys/stat.h> 74 #include <sys/modctl.h> 75 #include <sys/conf.h> 76 #include <sys/systm.h> 77 #ifdef illumos 78 #include <sys/ddi.h> 79 #include <sys/sunddi.h> 80 #endif 81 #include <sys/cpuvar.h> 82 #include <sys/kmem.h> 83 #ifdef illumos 84 #include <sys/strsubr.h> 85 #endif 86 #include <sys/sysmacros.h> 87 #include <sys/dtrace_impl.h> 88 #include <sys/atomic.h> 89 #include <sys/cmn_err.h> 90 #ifdef illumos 91 #include <sys/mutex_impl.h> 92 #include <sys/rwlock_impl.h> 93 #endif 94 #include <sys/ctf_api.h> 95 #ifdef illumos 96 #include <sys/panic.h> 97 #include <sys/priv_impl.h> 98 #endif 99 #include <sys/policy.h> 100 #ifdef illumos 101 #include <sys/cred_impl.h> 102 #include <sys/procfs_isa.h> 103 #endif 104 #include <sys/taskq.h> 105 #ifdef illumos 106 #include <sys/mkdev.h> 107 #include <sys/kdi.h> 108 #endif 109 #include <sys/zone.h> 110 #include <sys/socket.h> 111 #include <netinet/in.h> 112 #include "strtolctype.h" 113 114 /* FreeBSD includes: */ 115 #ifndef illumos 116 #include <sys/callout.h> 117 #include <sys/ctype.h> 118 #include <sys/eventhandler.h> 119 #include <sys/limits.h> 120 #include <sys/linker.h> 121 #include <sys/kdb.h> 122 #include <sys/kernel.h> 123 #include <sys/malloc.h> 124 #include <sys/lock.h> 125 #include <sys/mutex.h> 126 #include <sys/ptrace.h> 127 #include <sys/random.h> 128 #include <sys/rwlock.h> 129 #include <sys/sx.h> 130 #include <sys/sysctl.h> 131 132 #include <sys/dtrace_bsd.h> 133 134 #include <netinet/in.h> 135 136 #include "dtrace_cddl.h" 137 #include "dtrace_debug.c" 138 #endif 139 140 #include "dtrace_xoroshiro128_plus.h" 141 142 /* 143 * DTrace Tunable Variables 144 * 145 * The following variables may be tuned by adding a line to /etc/system that 146 * includes both the name of the DTrace module ("dtrace") and the name of the 147 * variable. For example: 148 * 149 * set dtrace:dtrace_destructive_disallow = 1 150 * 151 * In general, the only variables that one should be tuning this way are those 152 * that affect system-wide DTrace behavior, and for which the default behavior 153 * is undesirable. Most of these variables are tunable on a per-consumer 154 * basis using DTrace options, and need not be tuned on a system-wide basis. 155 * When tuning these variables, avoid pathological values; while some attempt 156 * is made to verify the integrity of these variables, they are not considered 157 * part of the supported interface to DTrace, and they are therefore not 158 * checked comprehensively. Further, these variables should not be tuned 159 * dynamically via "mdb -kw" or other means; they should only be tuned via 160 * /etc/system. 161 */ 162 int dtrace_destructive_disallow = 0; 163 #ifndef illumos 164 /* Positive logic version of dtrace_destructive_disallow for loader tunable */ 165 int dtrace_allow_destructive = 1; 166 #endif 167 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 168 size_t dtrace_difo_maxsize = (256 * 1024); 169 dtrace_optval_t dtrace_dof_maxsize = (8 * 1024 * 1024); 170 size_t dtrace_statvar_maxsize = (16 * 1024); 171 size_t dtrace_actions_max = (16 * 1024); 172 size_t dtrace_retain_max = 1024; 173 dtrace_optval_t dtrace_helper_actions_max = 128; 174 dtrace_optval_t dtrace_helper_providers_max = 32; 175 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 176 size_t dtrace_strsize_default = 256; 177 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 178 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 179 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 180 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 181 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 182 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 183 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 184 dtrace_optval_t dtrace_nspec_default = 1; 185 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 186 dtrace_optval_t dtrace_stackframes_default = 20; 187 dtrace_optval_t dtrace_ustackframes_default = 20; 188 dtrace_optval_t dtrace_jstackframes_default = 50; 189 dtrace_optval_t dtrace_jstackstrsize_default = 512; 190 int dtrace_msgdsize_max = 128; 191 hrtime_t dtrace_chill_max = MSEC2NSEC(500); /* 500 ms */ 192 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 193 int dtrace_devdepth_max = 32; 194 int dtrace_err_verbose; 195 hrtime_t dtrace_deadman_interval = NANOSEC; 196 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 197 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 198 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC; 199 #ifndef illumos 200 int dtrace_memstr_max = 4096; 201 #endif 202 203 /* 204 * DTrace External Variables 205 * 206 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 207 * available to DTrace consumers via the backtick (`) syntax. One of these, 208 * dtrace_zero, is made deliberately so: it is provided as a source of 209 * well-known, zero-filled memory. While this variable is not documented, 210 * it is used by some translators as an implementation detail. 211 */ 212 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 213 214 /* 215 * DTrace Internal Variables 216 */ 217 #ifdef illumos 218 static dev_info_t *dtrace_devi; /* device info */ 219 #endif 220 #ifdef illumos 221 static vmem_t *dtrace_arena; /* probe ID arena */ 222 static vmem_t *dtrace_minor; /* minor number arena */ 223 #else 224 static taskq_t *dtrace_taskq; /* task queue */ 225 static struct unrhdr *dtrace_arena; /* Probe ID number. */ 226 #endif 227 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 228 static int dtrace_nprobes; /* number of probes */ 229 static dtrace_provider_t *dtrace_provider; /* provider list */ 230 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 231 static int dtrace_opens; /* number of opens */ 232 static int dtrace_helpers; /* number of helpers */ 233 static int dtrace_getf; /* number of unpriv getf()s */ 234 #ifdef illumos 235 static void *dtrace_softstate; /* softstate pointer */ 236 #endif 237 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 238 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 239 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 240 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 241 static int dtrace_toxranges; /* number of toxic ranges */ 242 static int dtrace_toxranges_max; /* size of toxic range array */ 243 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 244 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 245 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 246 static kthread_t *dtrace_panicked; /* panicking thread */ 247 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 248 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 249 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 250 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 251 static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */ 252 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 253 static int dtrace_dynvar_failclean; /* dynvars failed to clean */ 254 #ifndef illumos 255 static struct mtx dtrace_unr_mtx; 256 MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 257 static eventhandler_tag dtrace_kld_load_tag; 258 static eventhandler_tag dtrace_kld_unload_try_tag; 259 #endif 260 261 /* 262 * DTrace Locking 263 * DTrace is protected by three (relatively coarse-grained) locks: 264 * 265 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 266 * including enabling state, probes, ECBs, consumer state, helper state, 267 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 268 * probe context is lock-free -- synchronization is handled via the 269 * dtrace_sync() cross call mechanism. 270 * 271 * (2) dtrace_provider_lock is required when manipulating provider state, or 272 * when provider state must be held constant. 273 * 274 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 275 * when meta provider state must be held constant. 276 * 277 * The lock ordering between these three locks is dtrace_meta_lock before 278 * dtrace_provider_lock before dtrace_lock. (In particular, there are 279 * several places where dtrace_provider_lock is held by the framework as it 280 * calls into the providers -- which then call back into the framework, 281 * grabbing dtrace_lock.) 282 * 283 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 284 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 285 * role as a coarse-grained lock; it is acquired before both of these locks. 286 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 287 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 288 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 289 * acquired _between_ dtrace_provider_lock and dtrace_lock. 290 */ 291 static kmutex_t dtrace_lock; /* probe state lock */ 292 static kmutex_t dtrace_provider_lock; /* provider state lock */ 293 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 294 295 #ifndef illumos 296 /* XXX FreeBSD hacks. */ 297 #define cr_suid cr_svuid 298 #define cr_sgid cr_svgid 299 #define ipaddr_t in_addr_t 300 #define mod_modname pathname 301 #define vuprintf vprintf 302 #define ttoproc(_a) ((_a)->td_proc) 303 #define crgetzoneid(_a) 0 304 #define SNOCD 0 305 #define CPU_ON_INTR(_a) 0 306 307 #define PRIV_EFFECTIVE (1 << 0) 308 #define PRIV_DTRACE_KERNEL (1 << 1) 309 #define PRIV_DTRACE_PROC (1 << 2) 310 #define PRIV_DTRACE_USER (1 << 3) 311 #define PRIV_PROC_OWNER (1 << 4) 312 #define PRIV_PROC_ZONE (1 << 5) 313 #define PRIV_ALL ~0 314 315 SYSCTL_DECL(_debug_dtrace); 316 SYSCTL_DECL(_kern_dtrace); 317 #endif 318 319 #ifdef illumos 320 #define curcpu CPU->cpu_id 321 #endif 322 323 324 /* 325 * DTrace Provider Variables 326 * 327 * These are the variables relating to DTrace as a provider (that is, the 328 * provider of the BEGIN, END, and ERROR probes). 329 */ 330 static dtrace_pattr_t dtrace_provider_attr = { 331 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 332 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 333 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 334 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 335 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 336 }; 337 338 static void 339 dtrace_nullop(void) 340 {} 341 342 static dtrace_pops_t dtrace_provider_ops = { 343 .dtps_provide = (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 344 .dtps_provide_module = (void (*)(void *, modctl_t *))dtrace_nullop, 345 .dtps_enable = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 346 .dtps_disable = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 347 .dtps_suspend = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 348 .dtps_resume = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 349 .dtps_getargdesc = NULL, 350 .dtps_getargval = NULL, 351 .dtps_usermode = NULL, 352 .dtps_destroy = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 353 }; 354 355 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 356 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 357 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 358 359 /* 360 * DTrace Helper Tracing Variables 361 * 362 * These variables should be set dynamically to enable helper tracing. The 363 * only variables that should be set are dtrace_helptrace_enable (which should 364 * be set to a non-zero value to allocate helper tracing buffers on the next 365 * open of /dev/dtrace) and dtrace_helptrace_disable (which should be set to a 366 * non-zero value to deallocate helper tracing buffers on the next close of 367 * /dev/dtrace). When (and only when) helper tracing is disabled, the 368 * buffer size may also be set via dtrace_helptrace_bufsize. 369 */ 370 int dtrace_helptrace_enable = 0; 371 int dtrace_helptrace_disable = 0; 372 int dtrace_helptrace_bufsize = 16 * 1024 * 1024; 373 uint32_t dtrace_helptrace_nlocals; 374 static dtrace_helptrace_t *dtrace_helptrace_buffer; 375 static uint32_t dtrace_helptrace_next = 0; 376 static int dtrace_helptrace_wrapped = 0; 377 378 /* 379 * DTrace Error Hashing 380 * 381 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 382 * table. This is very useful for checking coverage of tests that are 383 * expected to induce DIF or DOF processing errors, and may be useful for 384 * debugging problems in the DIF code generator or in DOF generation . The 385 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 386 */ 387 #ifdef DEBUG 388 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 389 static const char *dtrace_errlast; 390 static kthread_t *dtrace_errthread; 391 static kmutex_t dtrace_errlock; 392 #endif 393 394 /* 395 * DTrace Macros and Constants 396 * 397 * These are various macros that are useful in various spots in the 398 * implementation, along with a few random constants that have no meaning 399 * outside of the implementation. There is no real structure to this cpp 400 * mishmash -- but is there ever? 401 */ 402 #define DTRACE_HASHSTR(hash, probe) \ 403 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 404 405 #define DTRACE_HASHNEXT(hash, probe) \ 406 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 407 408 #define DTRACE_HASHPREV(hash, probe) \ 409 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 410 411 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 412 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 413 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 414 415 #define DTRACE_AGGHASHSIZE_SLEW 17 416 417 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 418 419 /* 420 * The key for a thread-local variable consists of the lower 61 bits of the 421 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 422 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 423 * equal to a variable identifier. This is necessary (but not sufficient) to 424 * assure that global associative arrays never collide with thread-local 425 * variables. To guarantee that they cannot collide, we must also define the 426 * order for keying dynamic variables. That order is: 427 * 428 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 429 * 430 * Because the variable-key and the tls-key are in orthogonal spaces, there is 431 * no way for a global variable key signature to match a thread-local key 432 * signature. 433 */ 434 #ifdef illumos 435 #define DTRACE_TLS_THRKEY(where) { \ 436 uint_t intr = 0; \ 437 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 438 for (; actv; actv >>= 1) \ 439 intr++; \ 440 ASSERT(intr < (1 << 3)); \ 441 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 442 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 443 } 444 #else 445 #define DTRACE_TLS_THRKEY(where) { \ 446 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 447 uint_t intr = 0; \ 448 uint_t actv = _c->cpu_intr_actv; \ 449 for (; actv; actv >>= 1) \ 450 intr++; \ 451 ASSERT(intr < (1 << 3)); \ 452 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 453 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 454 } 455 #endif 456 457 #define DT_BSWAP_8(x) ((x) & 0xff) 458 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 459 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 460 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 461 462 #define DT_MASK_LO 0x00000000FFFFFFFFULL 463 464 #define DTRACE_STORE(type, tomax, offset, what) \ 465 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 466 467 #ifndef __x86 468 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 469 if (addr & (size - 1)) { \ 470 *flags |= CPU_DTRACE_BADALIGN; \ 471 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 472 return (0); \ 473 } 474 #else 475 #define DTRACE_ALIGNCHECK(addr, size, flags) 476 #endif 477 478 /* 479 * Test whether a range of memory starting at testaddr of size testsz falls 480 * within the range of memory described by addr, sz. We take care to avoid 481 * problems with overflow and underflow of the unsigned quantities, and 482 * disallow all negative sizes. Ranges of size 0 are allowed. 483 */ 484 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 485 ((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \ 486 (testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \ 487 (testaddr) + (testsz) >= (testaddr)) 488 489 #define DTRACE_RANGE_REMAIN(remp, addr, baseaddr, basesz) \ 490 do { \ 491 if ((remp) != NULL) { \ 492 *(remp) = (uintptr_t)(baseaddr) + (basesz) - (addr); \ 493 } \ 494 _NOTE(CONSTCOND) } while (0) 495 496 497 /* 498 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 499 * alloc_sz on the righthand side of the comparison in order to avoid overflow 500 * or underflow in the comparison with it. This is simpler than the INRANGE 501 * check above, because we know that the dtms_scratch_ptr is valid in the 502 * range. Allocations of size zero are allowed. 503 */ 504 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 505 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 506 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 507 508 #define DTRACE_LOADFUNC(bits) \ 509 /*CSTYLED*/ \ 510 uint##bits##_t \ 511 dtrace_load##bits(uintptr_t addr) \ 512 { \ 513 size_t size = bits / NBBY; \ 514 /*CSTYLED*/ \ 515 uint##bits##_t rval; \ 516 int i; \ 517 volatile uint16_t *flags = (volatile uint16_t *) \ 518 &cpu_core[curcpu].cpuc_dtrace_flags; \ 519 \ 520 DTRACE_ALIGNCHECK(addr, size, flags); \ 521 \ 522 for (i = 0; i < dtrace_toxranges; i++) { \ 523 if (addr >= dtrace_toxrange[i].dtt_limit) \ 524 continue; \ 525 \ 526 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 527 continue; \ 528 \ 529 /* \ 530 * This address falls within a toxic region; return 0. \ 531 */ \ 532 *flags |= CPU_DTRACE_BADADDR; \ 533 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 534 return (0); \ 535 } \ 536 \ 537 *flags |= CPU_DTRACE_NOFAULT; \ 538 /*CSTYLED*/ \ 539 rval = *((volatile uint##bits##_t *)addr); \ 540 *flags &= ~CPU_DTRACE_NOFAULT; \ 541 \ 542 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 543 } 544 545 #ifdef _LP64 546 #define dtrace_loadptr dtrace_load64 547 #else 548 #define dtrace_loadptr dtrace_load32 549 #endif 550 551 #define DTRACE_DYNHASH_FREE 0 552 #define DTRACE_DYNHASH_SINK 1 553 #define DTRACE_DYNHASH_VALID 2 554 555 #define DTRACE_MATCH_NEXT 0 556 #define DTRACE_MATCH_DONE 1 557 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 558 #define DTRACE_STATE_ALIGN 64 559 560 #define DTRACE_FLAGS2FLT(flags) \ 561 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 562 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 563 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 564 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 565 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 566 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 567 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 568 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 569 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 570 DTRACEFLT_UNKNOWN) 571 572 #define DTRACEACT_ISSTRING(act) \ 573 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 574 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 575 576 /* Function prototype definitions: */ 577 static size_t dtrace_strlen(const char *, size_t); 578 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 579 static void dtrace_enabling_provide(dtrace_provider_t *); 580 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 581 static void dtrace_enabling_matchall(void); 582 static void dtrace_enabling_reap(void); 583 static dtrace_state_t *dtrace_anon_grab(void); 584 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 585 dtrace_state_t *, uint64_t, uint64_t); 586 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 587 static void dtrace_buffer_drop(dtrace_buffer_t *); 588 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when); 589 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 590 dtrace_state_t *, dtrace_mstate_t *); 591 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 592 dtrace_optval_t); 593 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 594 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 595 uint16_t dtrace_load16(uintptr_t); 596 uint32_t dtrace_load32(uintptr_t); 597 uint64_t dtrace_load64(uintptr_t); 598 uint8_t dtrace_load8(uintptr_t); 599 void dtrace_dynvar_clean(dtrace_dstate_t *); 600 dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 601 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 602 uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 603 static int dtrace_priv_proc(dtrace_state_t *); 604 static void dtrace_getf_barrier(void); 605 static int dtrace_canload_remains(uint64_t, size_t, size_t *, 606 dtrace_mstate_t *, dtrace_vstate_t *); 607 static int dtrace_canstore_remains(uint64_t, size_t, size_t *, 608 dtrace_mstate_t *, dtrace_vstate_t *); 609 610 /* 611 * DTrace Probe Context Functions 612 * 613 * These functions are called from probe context. Because probe context is 614 * any context in which C may be called, arbitrarily locks may be held, 615 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 616 * As a result, functions called from probe context may only call other DTrace 617 * support functions -- they may not interact at all with the system at large. 618 * (Note that the ASSERT macro is made probe-context safe by redefining it in 619 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 620 * loads are to be performed from probe context, they _must_ be in terms of 621 * the safe dtrace_load*() variants. 622 * 623 * Some functions in this block are not actually called from probe context; 624 * for these functions, there will be a comment above the function reading 625 * "Note: not called from probe context." 626 */ 627 void 628 dtrace_panic(const char *format, ...) 629 { 630 va_list alist; 631 632 va_start(alist, format); 633 #ifdef __FreeBSD__ 634 vpanic(format, alist); 635 #else 636 dtrace_vpanic(format, alist); 637 #endif 638 va_end(alist); 639 } 640 641 int 642 dtrace_assfail(const char *a, const char *f, int l) 643 { 644 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 645 646 /* 647 * We just need something here that even the most clever compiler 648 * cannot optimize away. 649 */ 650 return (a[(uintptr_t)f]); 651 } 652 653 /* 654 * Atomically increment a specified error counter from probe context. 655 */ 656 static void 657 dtrace_error(uint32_t *counter) 658 { 659 /* 660 * Most counters stored to in probe context are per-CPU counters. 661 * However, there are some error conditions that are sufficiently 662 * arcane that they don't merit per-CPU storage. If these counters 663 * are incremented concurrently on different CPUs, scalability will be 664 * adversely affected -- but we don't expect them to be white-hot in a 665 * correctly constructed enabling... 666 */ 667 uint32_t oval, nval; 668 669 do { 670 oval = *counter; 671 672 if ((nval = oval + 1) == 0) { 673 /* 674 * If the counter would wrap, set it to 1 -- assuring 675 * that the counter is never zero when we have seen 676 * errors. (The counter must be 32-bits because we 677 * aren't guaranteed a 64-bit compare&swap operation.) 678 * To save this code both the infamy of being fingered 679 * by a priggish news story and the indignity of being 680 * the target of a neo-puritan witch trial, we're 681 * carefully avoiding any colorful description of the 682 * likelihood of this condition -- but suffice it to 683 * say that it is only slightly more likely than the 684 * overflow of predicate cache IDs, as discussed in 685 * dtrace_predicate_create(). 686 */ 687 nval = 1; 688 } 689 } while (dtrace_cas32(counter, oval, nval) != oval); 690 } 691 692 /* 693 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 694 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 695 */ 696 /* BEGIN CSTYLED */ 697 DTRACE_LOADFUNC(8) 698 DTRACE_LOADFUNC(16) 699 DTRACE_LOADFUNC(32) 700 DTRACE_LOADFUNC(64) 701 /* END CSTYLED */ 702 703 static int 704 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 705 { 706 if (dest < mstate->dtms_scratch_base) 707 return (0); 708 709 if (dest + size < dest) 710 return (0); 711 712 if (dest + size > mstate->dtms_scratch_ptr) 713 return (0); 714 715 return (1); 716 } 717 718 static int 719 dtrace_canstore_statvar(uint64_t addr, size_t sz, size_t *remain, 720 dtrace_statvar_t **svars, int nsvars) 721 { 722 int i; 723 size_t maxglobalsize, maxlocalsize; 724 725 if (nsvars == 0) 726 return (0); 727 728 maxglobalsize = dtrace_statvar_maxsize + sizeof (uint64_t); 729 maxlocalsize = maxglobalsize * NCPU; 730 731 for (i = 0; i < nsvars; i++) { 732 dtrace_statvar_t *svar = svars[i]; 733 uint8_t scope; 734 size_t size; 735 736 if (svar == NULL || (size = svar->dtsv_size) == 0) 737 continue; 738 739 scope = svar->dtsv_var.dtdv_scope; 740 741 /* 742 * We verify that our size is valid in the spirit of providing 743 * defense in depth: we want to prevent attackers from using 744 * DTrace to escalate an orthogonal kernel heap corruption bug 745 * into the ability to store to arbitrary locations in memory. 746 */ 747 VERIFY((scope == DIFV_SCOPE_GLOBAL && size <= maxglobalsize) || 748 (scope == DIFV_SCOPE_LOCAL && size <= maxlocalsize)); 749 750 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, 751 svar->dtsv_size)) { 752 DTRACE_RANGE_REMAIN(remain, addr, svar->dtsv_data, 753 svar->dtsv_size); 754 return (1); 755 } 756 } 757 758 return (0); 759 } 760 761 /* 762 * Check to see if the address is within a memory region to which a store may 763 * be issued. This includes the DTrace scratch areas, and any DTrace variable 764 * region. The caller of dtrace_canstore() is responsible for performing any 765 * alignment checks that are needed before stores are actually executed. 766 */ 767 static int 768 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 769 dtrace_vstate_t *vstate) 770 { 771 return (dtrace_canstore_remains(addr, sz, NULL, mstate, vstate)); 772 } 773 774 /* 775 * Implementation of dtrace_canstore which communicates the upper bound of the 776 * allowed memory region. 777 */ 778 static int 779 dtrace_canstore_remains(uint64_t addr, size_t sz, size_t *remain, 780 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 781 { 782 /* 783 * First, check to see if the address is in scratch space... 784 */ 785 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 786 mstate->dtms_scratch_size)) { 787 DTRACE_RANGE_REMAIN(remain, addr, mstate->dtms_scratch_base, 788 mstate->dtms_scratch_size); 789 return (1); 790 } 791 792 /* 793 * Now check to see if it's a dynamic variable. This check will pick 794 * up both thread-local variables and any global dynamically-allocated 795 * variables. 796 */ 797 if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base, 798 vstate->dtvs_dynvars.dtds_size)) { 799 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 800 uintptr_t base = (uintptr_t)dstate->dtds_base + 801 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 802 uintptr_t chunkoffs; 803 dtrace_dynvar_t *dvar; 804 805 /* 806 * Before we assume that we can store here, we need to make 807 * sure that it isn't in our metadata -- storing to our 808 * dynamic variable metadata would corrupt our state. For 809 * the range to not include any dynamic variable metadata, 810 * it must: 811 * 812 * (1) Start above the hash table that is at the base of 813 * the dynamic variable space 814 * 815 * (2) Have a starting chunk offset that is beyond the 816 * dtrace_dynvar_t that is at the base of every chunk 817 * 818 * (3) Not span a chunk boundary 819 * 820 * (4) Not be in the tuple space of a dynamic variable 821 * 822 */ 823 if (addr < base) 824 return (0); 825 826 chunkoffs = (addr - base) % dstate->dtds_chunksize; 827 828 if (chunkoffs < sizeof (dtrace_dynvar_t)) 829 return (0); 830 831 if (chunkoffs + sz > dstate->dtds_chunksize) 832 return (0); 833 834 dvar = (dtrace_dynvar_t *)((uintptr_t)addr - chunkoffs); 835 836 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) 837 return (0); 838 839 if (chunkoffs < sizeof (dtrace_dynvar_t) + 840 ((dvar->dtdv_tuple.dtt_nkeys - 1) * sizeof (dtrace_key_t))) 841 return (0); 842 843 DTRACE_RANGE_REMAIN(remain, addr, dvar, dstate->dtds_chunksize); 844 return (1); 845 } 846 847 /* 848 * Finally, check the static local and global variables. These checks 849 * take the longest, so we perform them last. 850 */ 851 if (dtrace_canstore_statvar(addr, sz, remain, 852 vstate->dtvs_locals, vstate->dtvs_nlocals)) 853 return (1); 854 855 if (dtrace_canstore_statvar(addr, sz, remain, 856 vstate->dtvs_globals, vstate->dtvs_nglobals)) 857 return (1); 858 859 return (0); 860 } 861 862 863 /* 864 * Convenience routine to check to see if the address is within a memory 865 * region in which a load may be issued given the user's privilege level; 866 * if not, it sets the appropriate error flags and loads 'addr' into the 867 * illegal value slot. 868 * 869 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 870 * appropriate memory access protection. 871 */ 872 static int 873 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 874 dtrace_vstate_t *vstate) 875 { 876 return (dtrace_canload_remains(addr, sz, NULL, mstate, vstate)); 877 } 878 879 /* 880 * Implementation of dtrace_canload which communicates the uppoer bound of the 881 * allowed memory region. 882 */ 883 static int 884 dtrace_canload_remains(uint64_t addr, size_t sz, size_t *remain, 885 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 886 { 887 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 888 file_t *fp; 889 890 /* 891 * If we hold the privilege to read from kernel memory, then 892 * everything is readable. 893 */ 894 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) { 895 DTRACE_RANGE_REMAIN(remain, addr, addr, sz); 896 return (1); 897 } 898 899 /* 900 * You can obviously read that which you can store. 901 */ 902 if (dtrace_canstore_remains(addr, sz, remain, mstate, vstate)) 903 return (1); 904 905 /* 906 * We're allowed to read from our own string table. 907 */ 908 if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab, 909 mstate->dtms_difo->dtdo_strlen)) { 910 DTRACE_RANGE_REMAIN(remain, addr, 911 mstate->dtms_difo->dtdo_strtab, 912 mstate->dtms_difo->dtdo_strlen); 913 return (1); 914 } 915 916 if (vstate->dtvs_state != NULL && 917 dtrace_priv_proc(vstate->dtvs_state)) { 918 proc_t *p; 919 920 /* 921 * When we have privileges to the current process, there are 922 * several context-related kernel structures that are safe to 923 * read, even absent the privilege to read from kernel memory. 924 * These reads are safe because these structures contain only 925 * state that (1) we're permitted to read, (2) is harmless or 926 * (3) contains pointers to additional kernel state that we're 927 * not permitted to read (and as such, do not present an 928 * opportunity for privilege escalation). Finally (and 929 * critically), because of the nature of their relation with 930 * the current thread context, the memory associated with these 931 * structures cannot change over the duration of probe context, 932 * and it is therefore impossible for this memory to be 933 * deallocated and reallocated as something else while it's 934 * being operated upon. 935 */ 936 if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t))) { 937 DTRACE_RANGE_REMAIN(remain, addr, curthread, 938 sizeof (kthread_t)); 939 return (1); 940 } 941 942 if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr, 943 sz, curthread->t_procp, sizeof (proc_t))) { 944 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_procp, 945 sizeof (proc_t)); 946 return (1); 947 } 948 949 if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz, 950 curthread->t_cred, sizeof (cred_t))) { 951 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cred, 952 sizeof (cred_t)); 953 return (1); 954 } 955 956 #ifdef illumos 957 if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz, 958 &(p->p_pidp->pid_id), sizeof (pid_t))) { 959 DTRACE_RANGE_REMAIN(remain, addr, &(p->p_pidp->pid_id), 960 sizeof (pid_t)); 961 return (1); 962 } 963 964 if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz, 965 curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) { 966 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cpu, 967 offsetof(cpu_t, cpu_pause_thread)); 968 return (1); 969 } 970 #endif 971 } 972 973 if ((fp = mstate->dtms_getf) != NULL) { 974 uintptr_t psz = sizeof (void *); 975 vnode_t *vp; 976 vnodeops_t *op; 977 978 /* 979 * When getf() returns a file_t, the enabling is implicitly 980 * granted the (transient) right to read the returned file_t 981 * as well as the v_path and v_op->vnop_name of the underlying 982 * vnode. These accesses are allowed after a successful 983 * getf() because the members that they refer to cannot change 984 * once set -- and the barrier logic in the kernel's closef() 985 * path assures that the file_t and its referenced vode_t 986 * cannot themselves be stale (that is, it impossible for 987 * either dtms_getf itself or its f_vnode member to reference 988 * freed memory). 989 */ 990 if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t))) { 991 DTRACE_RANGE_REMAIN(remain, addr, fp, sizeof (file_t)); 992 return (1); 993 } 994 995 if ((vp = fp->f_vnode) != NULL) { 996 size_t slen; 997 #ifdef illumos 998 if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz)) { 999 DTRACE_RANGE_REMAIN(remain, addr, &vp->v_path, 1000 psz); 1001 return (1); 1002 } 1003 slen = strlen(vp->v_path) + 1; 1004 if (DTRACE_INRANGE(addr, sz, vp->v_path, slen)) { 1005 DTRACE_RANGE_REMAIN(remain, addr, vp->v_path, 1006 slen); 1007 return (1); 1008 } 1009 #endif 1010 1011 if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz)) { 1012 DTRACE_RANGE_REMAIN(remain, addr, &vp->v_op, 1013 psz); 1014 return (1); 1015 } 1016 1017 #ifdef illumos 1018 if ((op = vp->v_op) != NULL && 1019 DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) { 1020 DTRACE_RANGE_REMAIN(remain, addr, 1021 &op->vnop_name, psz); 1022 return (1); 1023 } 1024 1025 if (op != NULL && op->vnop_name != NULL && 1026 DTRACE_INRANGE(addr, sz, op->vnop_name, 1027 (slen = strlen(op->vnop_name) + 1))) { 1028 DTRACE_RANGE_REMAIN(remain, addr, 1029 op->vnop_name, slen); 1030 return (1); 1031 } 1032 #endif 1033 } 1034 } 1035 1036 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 1037 *illval = addr; 1038 return (0); 1039 } 1040 1041 /* 1042 * Convenience routine to check to see if a given string is within a memory 1043 * region in which a load may be issued given the user's privilege level; 1044 * this exists so that we don't need to issue unnecessary dtrace_strlen() 1045 * calls in the event that the user has all privileges. 1046 */ 1047 static int 1048 dtrace_strcanload(uint64_t addr, size_t sz, size_t *remain, 1049 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1050 { 1051 size_t rsize; 1052 1053 /* 1054 * If we hold the privilege to read from kernel memory, then 1055 * everything is readable. 1056 */ 1057 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) { 1058 DTRACE_RANGE_REMAIN(remain, addr, addr, sz); 1059 return (1); 1060 } 1061 1062 /* 1063 * Even if the caller is uninterested in querying the remaining valid 1064 * range, it is required to ensure that the access is allowed. 1065 */ 1066 if (remain == NULL) { 1067 remain = &rsize; 1068 } 1069 if (dtrace_canload_remains(addr, 0, remain, mstate, vstate)) { 1070 size_t strsz; 1071 /* 1072 * Perform the strlen after determining the length of the 1073 * memory region which is accessible. This prevents timing 1074 * information from being used to find NULs in memory which is 1075 * not accessible to the caller. 1076 */ 1077 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, 1078 MIN(sz, *remain)); 1079 if (strsz <= *remain) { 1080 return (1); 1081 } 1082 } 1083 1084 return (0); 1085 } 1086 1087 /* 1088 * Convenience routine to check to see if a given variable is within a memory 1089 * region in which a load may be issued given the user's privilege level. 1090 */ 1091 static int 1092 dtrace_vcanload(void *src, dtrace_diftype_t *type, size_t *remain, 1093 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1094 { 1095 size_t sz; 1096 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1097 1098 /* 1099 * Calculate the max size before performing any checks since even 1100 * DTRACE_ACCESS_KERNEL-credentialed callers expect that this function 1101 * return the max length via 'remain'. 1102 */ 1103 if (type->dtdt_kind == DIF_TYPE_STRING) { 1104 dtrace_state_t *state = vstate->dtvs_state; 1105 1106 if (state != NULL) { 1107 sz = state->dts_options[DTRACEOPT_STRSIZE]; 1108 } else { 1109 /* 1110 * In helper context, we have a NULL state; fall back 1111 * to using the system-wide default for the string size 1112 * in this case. 1113 */ 1114 sz = dtrace_strsize_default; 1115 } 1116 } else { 1117 sz = type->dtdt_size; 1118 } 1119 1120 /* 1121 * If we hold the privilege to read from kernel memory, then 1122 * everything is readable. 1123 */ 1124 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) { 1125 DTRACE_RANGE_REMAIN(remain, (uintptr_t)src, src, sz); 1126 return (1); 1127 } 1128 1129 if (type->dtdt_kind == DIF_TYPE_STRING) { 1130 return (dtrace_strcanload((uintptr_t)src, sz, remain, mstate, 1131 vstate)); 1132 } 1133 return (dtrace_canload_remains((uintptr_t)src, sz, remain, mstate, 1134 vstate)); 1135 } 1136 1137 /* 1138 * Convert a string to a signed integer using safe loads. 1139 * 1140 * NOTE: This function uses various macros from strtolctype.h to manipulate 1141 * digit values, etc -- these have all been checked to ensure they make 1142 * no additional function calls. 1143 */ 1144 static int64_t 1145 dtrace_strtoll(char *input, int base, size_t limit) 1146 { 1147 uintptr_t pos = (uintptr_t)input; 1148 int64_t val = 0; 1149 int x; 1150 boolean_t neg = B_FALSE; 1151 char c, cc, ccc; 1152 uintptr_t end = pos + limit; 1153 1154 /* 1155 * Consume any whitespace preceding digits. 1156 */ 1157 while ((c = dtrace_load8(pos)) == ' ' || c == '\t') 1158 pos++; 1159 1160 /* 1161 * Handle an explicit sign if one is present. 1162 */ 1163 if (c == '-' || c == '+') { 1164 if (c == '-') 1165 neg = B_TRUE; 1166 c = dtrace_load8(++pos); 1167 } 1168 1169 /* 1170 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it 1171 * if present. 1172 */ 1173 if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' || 1174 cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) { 1175 pos += 2; 1176 c = ccc; 1177 } 1178 1179 /* 1180 * Read in contiguous digits until the first non-digit character. 1181 */ 1182 for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base; 1183 c = dtrace_load8(++pos)) 1184 val = val * base + x; 1185 1186 return (neg ? -val : val); 1187 } 1188 1189 /* 1190 * Compare two strings using safe loads. 1191 */ 1192 static int 1193 dtrace_strncmp(char *s1, char *s2, size_t limit) 1194 { 1195 uint8_t c1, c2; 1196 volatile uint16_t *flags; 1197 1198 if (s1 == s2 || limit == 0) 1199 return (0); 1200 1201 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1202 1203 do { 1204 if (s1 == NULL) { 1205 c1 = '\0'; 1206 } else { 1207 c1 = dtrace_load8((uintptr_t)s1++); 1208 } 1209 1210 if (s2 == NULL) { 1211 c2 = '\0'; 1212 } else { 1213 c2 = dtrace_load8((uintptr_t)s2++); 1214 } 1215 1216 if (c1 != c2) 1217 return (c1 - c2); 1218 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 1219 1220 return (0); 1221 } 1222 1223 /* 1224 * Compute strlen(s) for a string using safe memory accesses. The additional 1225 * len parameter is used to specify a maximum length to ensure completion. 1226 */ 1227 static size_t 1228 dtrace_strlen(const char *s, size_t lim) 1229 { 1230 uint_t len; 1231 1232 for (len = 0; len != lim; len++) { 1233 if (dtrace_load8((uintptr_t)s++) == '\0') 1234 break; 1235 } 1236 1237 return (len); 1238 } 1239 1240 /* 1241 * Check if an address falls within a toxic region. 1242 */ 1243 static int 1244 dtrace_istoxic(uintptr_t kaddr, size_t size) 1245 { 1246 uintptr_t taddr, tsize; 1247 int i; 1248 1249 for (i = 0; i < dtrace_toxranges; i++) { 1250 taddr = dtrace_toxrange[i].dtt_base; 1251 tsize = dtrace_toxrange[i].dtt_limit - taddr; 1252 1253 if (kaddr - taddr < tsize) { 1254 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1255 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 1256 return (1); 1257 } 1258 1259 if (taddr - kaddr < size) { 1260 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1261 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 1262 return (1); 1263 } 1264 } 1265 1266 return (0); 1267 } 1268 1269 /* 1270 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 1271 * memory specified by the DIF program. The dst is assumed to be safe memory 1272 * that we can store to directly because it is managed by DTrace. As with 1273 * standard bcopy, overlapping copies are handled properly. 1274 */ 1275 static void 1276 dtrace_bcopy(const void *src, void *dst, size_t len) 1277 { 1278 if (len != 0) { 1279 uint8_t *s1 = dst; 1280 const uint8_t *s2 = src; 1281 1282 if (s1 <= s2) { 1283 do { 1284 *s1++ = dtrace_load8((uintptr_t)s2++); 1285 } while (--len != 0); 1286 } else { 1287 s2 += len; 1288 s1 += len; 1289 1290 do { 1291 *--s1 = dtrace_load8((uintptr_t)--s2); 1292 } while (--len != 0); 1293 } 1294 } 1295 } 1296 1297 /* 1298 * Copy src to dst using safe memory accesses, up to either the specified 1299 * length, or the point that a nul byte is encountered. The src is assumed to 1300 * be unsafe memory specified by the DIF program. The dst is assumed to be 1301 * safe memory that we can store to directly because it is managed by DTrace. 1302 * Unlike dtrace_bcopy(), overlapping regions are not handled. 1303 */ 1304 static void 1305 dtrace_strcpy(const void *src, void *dst, size_t len) 1306 { 1307 if (len != 0) { 1308 uint8_t *s1 = dst, c; 1309 const uint8_t *s2 = src; 1310 1311 do { 1312 *s1++ = c = dtrace_load8((uintptr_t)s2++); 1313 } while (--len != 0 && c != '\0'); 1314 } 1315 } 1316 1317 /* 1318 * Copy src to dst, deriving the size and type from the specified (BYREF) 1319 * variable type. The src is assumed to be unsafe memory specified by the DIF 1320 * program. The dst is assumed to be DTrace variable memory that is of the 1321 * specified type; we assume that we can store to directly. 1322 */ 1323 static void 1324 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type, size_t limit) 1325 { 1326 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1327 1328 if (type->dtdt_kind == DIF_TYPE_STRING) { 1329 dtrace_strcpy(src, dst, MIN(type->dtdt_size, limit)); 1330 } else { 1331 dtrace_bcopy(src, dst, MIN(type->dtdt_size, limit)); 1332 } 1333 } 1334 1335 /* 1336 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1337 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1338 * safe memory that we can access directly because it is managed by DTrace. 1339 */ 1340 static int 1341 dtrace_bcmp(const void *s1, const void *s2, size_t len) 1342 { 1343 volatile uint16_t *flags; 1344 1345 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1346 1347 if (s1 == s2) 1348 return (0); 1349 1350 if (s1 == NULL || s2 == NULL) 1351 return (1); 1352 1353 if (s1 != s2 && len != 0) { 1354 const uint8_t *ps1 = s1; 1355 const uint8_t *ps2 = s2; 1356 1357 do { 1358 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1359 return (1); 1360 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1361 } 1362 return (0); 1363 } 1364 1365 /* 1366 * Zero the specified region using a simple byte-by-byte loop. Note that this 1367 * is for safe DTrace-managed memory only. 1368 */ 1369 static void 1370 dtrace_bzero(void *dst, size_t len) 1371 { 1372 uchar_t *cp; 1373 1374 for (cp = dst; len != 0; len--) 1375 *cp++ = 0; 1376 } 1377 1378 static void 1379 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1380 { 1381 uint64_t result[2]; 1382 1383 result[0] = addend1[0] + addend2[0]; 1384 result[1] = addend1[1] + addend2[1] + 1385 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1386 1387 sum[0] = result[0]; 1388 sum[1] = result[1]; 1389 } 1390 1391 /* 1392 * Shift the 128-bit value in a by b. If b is positive, shift left. 1393 * If b is negative, shift right. 1394 */ 1395 static void 1396 dtrace_shift_128(uint64_t *a, int b) 1397 { 1398 uint64_t mask; 1399 1400 if (b == 0) 1401 return; 1402 1403 if (b < 0) { 1404 b = -b; 1405 if (b >= 64) { 1406 a[0] = a[1] >> (b - 64); 1407 a[1] = 0; 1408 } else { 1409 a[0] >>= b; 1410 mask = 1LL << (64 - b); 1411 mask -= 1; 1412 a[0] |= ((a[1] & mask) << (64 - b)); 1413 a[1] >>= b; 1414 } 1415 } else { 1416 if (b >= 64) { 1417 a[1] = a[0] << (b - 64); 1418 a[0] = 0; 1419 } else { 1420 a[1] <<= b; 1421 mask = a[0] >> (64 - b); 1422 a[1] |= mask; 1423 a[0] <<= b; 1424 } 1425 } 1426 } 1427 1428 /* 1429 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1430 * use native multiplication on those, and then re-combine into the 1431 * resulting 128-bit value. 1432 * 1433 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1434 * hi1 * hi2 << 64 + 1435 * hi1 * lo2 << 32 + 1436 * hi2 * lo1 << 32 + 1437 * lo1 * lo2 1438 */ 1439 static void 1440 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1441 { 1442 uint64_t hi1, hi2, lo1, lo2; 1443 uint64_t tmp[2]; 1444 1445 hi1 = factor1 >> 32; 1446 hi2 = factor2 >> 32; 1447 1448 lo1 = factor1 & DT_MASK_LO; 1449 lo2 = factor2 & DT_MASK_LO; 1450 1451 product[0] = lo1 * lo2; 1452 product[1] = hi1 * hi2; 1453 1454 tmp[0] = hi1 * lo2; 1455 tmp[1] = 0; 1456 dtrace_shift_128(tmp, 32); 1457 dtrace_add_128(product, tmp, product); 1458 1459 tmp[0] = hi2 * lo1; 1460 tmp[1] = 0; 1461 dtrace_shift_128(tmp, 32); 1462 dtrace_add_128(product, tmp, product); 1463 } 1464 1465 /* 1466 * This privilege check should be used by actions and subroutines to 1467 * verify that the user credentials of the process that enabled the 1468 * invoking ECB match the target credentials 1469 */ 1470 static int 1471 dtrace_priv_proc_common_user(dtrace_state_t *state) 1472 { 1473 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1474 1475 /* 1476 * We should always have a non-NULL state cred here, since if cred 1477 * is null (anonymous tracing), we fast-path bypass this routine. 1478 */ 1479 ASSERT(s_cr != NULL); 1480 1481 if ((cr = CRED()) != NULL && 1482 s_cr->cr_uid == cr->cr_uid && 1483 s_cr->cr_uid == cr->cr_ruid && 1484 s_cr->cr_uid == cr->cr_suid && 1485 s_cr->cr_gid == cr->cr_gid && 1486 s_cr->cr_gid == cr->cr_rgid && 1487 s_cr->cr_gid == cr->cr_sgid) 1488 return (1); 1489 1490 return (0); 1491 } 1492 1493 /* 1494 * This privilege check should be used by actions and subroutines to 1495 * verify that the zone of the process that enabled the invoking ECB 1496 * matches the target credentials 1497 */ 1498 static int 1499 dtrace_priv_proc_common_zone(dtrace_state_t *state) 1500 { 1501 #ifdef illumos 1502 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1503 1504 /* 1505 * We should always have a non-NULL state cred here, since if cred 1506 * is null (anonymous tracing), we fast-path bypass this routine. 1507 */ 1508 ASSERT(s_cr != NULL); 1509 1510 if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone) 1511 return (1); 1512 1513 return (0); 1514 #else 1515 return (1); 1516 #endif 1517 } 1518 1519 /* 1520 * This privilege check should be used by actions and subroutines to 1521 * verify that the process has not setuid or changed credentials. 1522 */ 1523 static int 1524 dtrace_priv_proc_common_nocd(void) 1525 { 1526 proc_t *proc; 1527 1528 if ((proc = ttoproc(curthread)) != NULL && 1529 !(proc->p_flag & SNOCD)) 1530 return (1); 1531 1532 return (0); 1533 } 1534 1535 static int 1536 dtrace_priv_proc_destructive(dtrace_state_t *state) 1537 { 1538 int action = state->dts_cred.dcr_action; 1539 1540 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1541 dtrace_priv_proc_common_zone(state) == 0) 1542 goto bad; 1543 1544 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1545 dtrace_priv_proc_common_user(state) == 0) 1546 goto bad; 1547 1548 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1549 dtrace_priv_proc_common_nocd() == 0) 1550 goto bad; 1551 1552 return (1); 1553 1554 bad: 1555 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1556 1557 return (0); 1558 } 1559 1560 static int 1561 dtrace_priv_proc_control(dtrace_state_t *state) 1562 { 1563 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1564 return (1); 1565 1566 if (dtrace_priv_proc_common_zone(state) && 1567 dtrace_priv_proc_common_user(state) && 1568 dtrace_priv_proc_common_nocd()) 1569 return (1); 1570 1571 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1572 1573 return (0); 1574 } 1575 1576 static int 1577 dtrace_priv_proc(dtrace_state_t *state) 1578 { 1579 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1580 return (1); 1581 1582 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1583 1584 return (0); 1585 } 1586 1587 static int 1588 dtrace_priv_kernel(dtrace_state_t *state) 1589 { 1590 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1591 return (1); 1592 1593 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1594 1595 return (0); 1596 } 1597 1598 static int 1599 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1600 { 1601 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1602 return (1); 1603 1604 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1605 1606 return (0); 1607 } 1608 1609 /* 1610 * Determine if the dte_cond of the specified ECB allows for processing of 1611 * the current probe to continue. Note that this routine may allow continued 1612 * processing, but with access(es) stripped from the mstate's dtms_access 1613 * field. 1614 */ 1615 static int 1616 dtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate, 1617 dtrace_ecb_t *ecb) 1618 { 1619 dtrace_probe_t *probe = ecb->dte_probe; 1620 dtrace_provider_t *prov = probe->dtpr_provider; 1621 dtrace_pops_t *pops = &prov->dtpv_pops; 1622 int mode = DTRACE_MODE_NOPRIV_DROP; 1623 1624 ASSERT(ecb->dte_cond); 1625 1626 #ifdef illumos 1627 if (pops->dtps_mode != NULL) { 1628 mode = pops->dtps_mode(prov->dtpv_arg, 1629 probe->dtpr_id, probe->dtpr_arg); 1630 1631 ASSERT((mode & DTRACE_MODE_USER) || 1632 (mode & DTRACE_MODE_KERNEL)); 1633 ASSERT((mode & DTRACE_MODE_NOPRIV_RESTRICT) || 1634 (mode & DTRACE_MODE_NOPRIV_DROP)); 1635 } 1636 1637 /* 1638 * If the dte_cond bits indicate that this consumer is only allowed to 1639 * see user-mode firings of this probe, call the provider's dtps_mode() 1640 * entry point to check that the probe was fired while in a user 1641 * context. If that's not the case, use the policy specified by the 1642 * provider to determine if we drop the probe or merely restrict 1643 * operation. 1644 */ 1645 if (ecb->dte_cond & DTRACE_COND_USERMODE) { 1646 ASSERT(mode != DTRACE_MODE_NOPRIV_DROP); 1647 1648 if (!(mode & DTRACE_MODE_USER)) { 1649 if (mode & DTRACE_MODE_NOPRIV_DROP) 1650 return (0); 1651 1652 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS; 1653 } 1654 } 1655 #endif 1656 1657 /* 1658 * This is more subtle than it looks. We have to be absolutely certain 1659 * that CRED() isn't going to change out from under us so it's only 1660 * legit to examine that structure if we're in constrained situations. 1661 * Currently, the only times we'll this check is if a non-super-user 1662 * has enabled the profile or syscall providers -- providers that 1663 * allow visibility of all processes. For the profile case, the check 1664 * above will ensure that we're examining a user context. 1665 */ 1666 if (ecb->dte_cond & DTRACE_COND_OWNER) { 1667 cred_t *cr; 1668 cred_t *s_cr = state->dts_cred.dcr_cred; 1669 proc_t *proc; 1670 1671 ASSERT(s_cr != NULL); 1672 1673 if ((cr = CRED()) == NULL || 1674 s_cr->cr_uid != cr->cr_uid || 1675 s_cr->cr_uid != cr->cr_ruid || 1676 s_cr->cr_uid != cr->cr_suid || 1677 s_cr->cr_gid != cr->cr_gid || 1678 s_cr->cr_gid != cr->cr_rgid || 1679 s_cr->cr_gid != cr->cr_sgid || 1680 (proc = ttoproc(curthread)) == NULL || 1681 (proc->p_flag & SNOCD)) { 1682 if (mode & DTRACE_MODE_NOPRIV_DROP) 1683 return (0); 1684 1685 #ifdef illumos 1686 mstate->dtms_access &= ~DTRACE_ACCESS_PROC; 1687 #endif 1688 } 1689 } 1690 1691 #ifdef illumos 1692 /* 1693 * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not 1694 * in our zone, check to see if our mode policy is to restrict rather 1695 * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC 1696 * and DTRACE_ACCESS_ARGS 1697 */ 1698 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 1699 cred_t *cr; 1700 cred_t *s_cr = state->dts_cred.dcr_cred; 1701 1702 ASSERT(s_cr != NULL); 1703 1704 if ((cr = CRED()) == NULL || 1705 s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) { 1706 if (mode & DTRACE_MODE_NOPRIV_DROP) 1707 return (0); 1708 1709 mstate->dtms_access &= 1710 ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS); 1711 } 1712 } 1713 #endif 1714 1715 return (1); 1716 } 1717 1718 /* 1719 * Note: not called from probe context. This function is called 1720 * asynchronously (and at a regular interval) from outside of probe context to 1721 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1722 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1723 */ 1724 void 1725 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1726 { 1727 dtrace_dynvar_t *dirty; 1728 dtrace_dstate_percpu_t *dcpu; 1729 dtrace_dynvar_t **rinsep; 1730 int i, j, work = 0; 1731 1732 for (i = 0; i < NCPU; i++) { 1733 dcpu = &dstate->dtds_percpu[i]; 1734 rinsep = &dcpu->dtdsc_rinsing; 1735 1736 /* 1737 * If the dirty list is NULL, there is no dirty work to do. 1738 */ 1739 if (dcpu->dtdsc_dirty == NULL) 1740 continue; 1741 1742 if (dcpu->dtdsc_rinsing != NULL) { 1743 /* 1744 * If the rinsing list is non-NULL, then it is because 1745 * this CPU was selected to accept another CPU's 1746 * dirty list -- and since that time, dirty buffers 1747 * have accumulated. This is a highly unlikely 1748 * condition, but we choose to ignore the dirty 1749 * buffers -- they'll be picked up a future cleanse. 1750 */ 1751 continue; 1752 } 1753 1754 if (dcpu->dtdsc_clean != NULL) { 1755 /* 1756 * If the clean list is non-NULL, then we're in a 1757 * situation where a CPU has done deallocations (we 1758 * have a non-NULL dirty list) but no allocations (we 1759 * also have a non-NULL clean list). We can't simply 1760 * move the dirty list into the clean list on this 1761 * CPU, yet we also don't want to allow this condition 1762 * to persist, lest a short clean list prevent a 1763 * massive dirty list from being cleaned (which in 1764 * turn could lead to otherwise avoidable dynamic 1765 * drops). To deal with this, we look for some CPU 1766 * with a NULL clean list, NULL dirty list, and NULL 1767 * rinsing list -- and then we borrow this CPU to 1768 * rinse our dirty list. 1769 */ 1770 for (j = 0; j < NCPU; j++) { 1771 dtrace_dstate_percpu_t *rinser; 1772 1773 rinser = &dstate->dtds_percpu[j]; 1774 1775 if (rinser->dtdsc_rinsing != NULL) 1776 continue; 1777 1778 if (rinser->dtdsc_dirty != NULL) 1779 continue; 1780 1781 if (rinser->dtdsc_clean != NULL) 1782 continue; 1783 1784 rinsep = &rinser->dtdsc_rinsing; 1785 break; 1786 } 1787 1788 if (j == NCPU) { 1789 /* 1790 * We were unable to find another CPU that 1791 * could accept this dirty list -- we are 1792 * therefore unable to clean it now. 1793 */ 1794 dtrace_dynvar_failclean++; 1795 continue; 1796 } 1797 } 1798 1799 work = 1; 1800 1801 /* 1802 * Atomically move the dirty list aside. 1803 */ 1804 do { 1805 dirty = dcpu->dtdsc_dirty; 1806 1807 /* 1808 * Before we zap the dirty list, set the rinsing list. 1809 * (This allows for a potential assertion in 1810 * dtrace_dynvar(): if a free dynamic variable appears 1811 * on a hash chain, either the dirty list or the 1812 * rinsing list for some CPU must be non-NULL.) 1813 */ 1814 *rinsep = dirty; 1815 dtrace_membar_producer(); 1816 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1817 dirty, NULL) != dirty); 1818 } 1819 1820 if (!work) { 1821 /* 1822 * We have no work to do; we can simply return. 1823 */ 1824 return; 1825 } 1826 1827 dtrace_sync(); 1828 1829 for (i = 0; i < NCPU; i++) { 1830 dcpu = &dstate->dtds_percpu[i]; 1831 1832 if (dcpu->dtdsc_rinsing == NULL) 1833 continue; 1834 1835 /* 1836 * We are now guaranteed that no hash chain contains a pointer 1837 * into this dirty list; we can make it clean. 1838 */ 1839 ASSERT(dcpu->dtdsc_clean == NULL); 1840 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1841 dcpu->dtdsc_rinsing = NULL; 1842 } 1843 1844 /* 1845 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1846 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1847 * This prevents a race whereby a CPU incorrectly decides that 1848 * the state should be something other than DTRACE_DSTATE_CLEAN 1849 * after dtrace_dynvar_clean() has completed. 1850 */ 1851 dtrace_sync(); 1852 1853 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1854 } 1855 1856 /* 1857 * Depending on the value of the op parameter, this function looks-up, 1858 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1859 * allocation is requested, this function will return a pointer to a 1860 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1861 * variable can be allocated. If NULL is returned, the appropriate counter 1862 * will be incremented. 1863 */ 1864 dtrace_dynvar_t * 1865 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1866 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1867 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1868 { 1869 uint64_t hashval = DTRACE_DYNHASH_VALID; 1870 dtrace_dynhash_t *hash = dstate->dtds_hash; 1871 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1872 processorid_t me = curcpu, cpu = me; 1873 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1874 size_t bucket, ksize; 1875 size_t chunksize = dstate->dtds_chunksize; 1876 uintptr_t kdata, lock, nstate; 1877 uint_t i; 1878 1879 ASSERT(nkeys != 0); 1880 1881 /* 1882 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1883 * algorithm. For the by-value portions, we perform the algorithm in 1884 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1885 * bit, and seems to have only a minute effect on distribution. For 1886 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1887 * over each referenced byte. It's painful to do this, but it's much 1888 * better than pathological hash distribution. The efficacy of the 1889 * hashing algorithm (and a comparison with other algorithms) may be 1890 * found by running the ::dtrace_dynstat MDB dcmd. 1891 */ 1892 for (i = 0; i < nkeys; i++) { 1893 if (key[i].dttk_size == 0) { 1894 uint64_t val = key[i].dttk_value; 1895 1896 hashval += (val >> 48) & 0xffff; 1897 hashval += (hashval << 10); 1898 hashval ^= (hashval >> 6); 1899 1900 hashval += (val >> 32) & 0xffff; 1901 hashval += (hashval << 10); 1902 hashval ^= (hashval >> 6); 1903 1904 hashval += (val >> 16) & 0xffff; 1905 hashval += (hashval << 10); 1906 hashval ^= (hashval >> 6); 1907 1908 hashval += val & 0xffff; 1909 hashval += (hashval << 10); 1910 hashval ^= (hashval >> 6); 1911 } else { 1912 /* 1913 * This is incredibly painful, but it beats the hell 1914 * out of the alternative. 1915 */ 1916 uint64_t j, size = key[i].dttk_size; 1917 uintptr_t base = (uintptr_t)key[i].dttk_value; 1918 1919 if (!dtrace_canload(base, size, mstate, vstate)) 1920 break; 1921 1922 for (j = 0; j < size; j++) { 1923 hashval += dtrace_load8(base + j); 1924 hashval += (hashval << 10); 1925 hashval ^= (hashval >> 6); 1926 } 1927 } 1928 } 1929 1930 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1931 return (NULL); 1932 1933 hashval += (hashval << 3); 1934 hashval ^= (hashval >> 11); 1935 hashval += (hashval << 15); 1936 1937 /* 1938 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1939 * comes out to be one of our two sentinel hash values. If this 1940 * actually happens, we set the hashval to be a value known to be a 1941 * non-sentinel value. 1942 */ 1943 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1944 hashval = DTRACE_DYNHASH_VALID; 1945 1946 /* 1947 * Yes, it's painful to do a divide here. If the cycle count becomes 1948 * important here, tricks can be pulled to reduce it. (However, it's 1949 * critical that hash collisions be kept to an absolute minimum; 1950 * they're much more painful than a divide.) It's better to have a 1951 * solution that generates few collisions and still keeps things 1952 * relatively simple. 1953 */ 1954 bucket = hashval % dstate->dtds_hashsize; 1955 1956 if (op == DTRACE_DYNVAR_DEALLOC) { 1957 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1958 1959 for (;;) { 1960 while ((lock = *lockp) & 1) 1961 continue; 1962 1963 if (dtrace_casptr((volatile void *)lockp, 1964 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1965 break; 1966 } 1967 1968 dtrace_membar_producer(); 1969 } 1970 1971 top: 1972 prev = NULL; 1973 lock = hash[bucket].dtdh_lock; 1974 1975 dtrace_membar_consumer(); 1976 1977 start = hash[bucket].dtdh_chain; 1978 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1979 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1980 op != DTRACE_DYNVAR_DEALLOC)); 1981 1982 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1983 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1984 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1985 1986 if (dvar->dtdv_hashval != hashval) { 1987 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1988 /* 1989 * We've reached the sink, and therefore the 1990 * end of the hash chain; we can kick out of 1991 * the loop knowing that we have seen a valid 1992 * snapshot of state. 1993 */ 1994 ASSERT(dvar->dtdv_next == NULL); 1995 ASSERT(dvar == &dtrace_dynhash_sink); 1996 break; 1997 } 1998 1999 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 2000 /* 2001 * We've gone off the rails: somewhere along 2002 * the line, one of the members of this hash 2003 * chain was deleted. Note that we could also 2004 * detect this by simply letting this loop run 2005 * to completion, as we would eventually hit 2006 * the end of the dirty list. However, we 2007 * want to avoid running the length of the 2008 * dirty list unnecessarily (it might be quite 2009 * long), so we catch this as early as 2010 * possible by detecting the hash marker. In 2011 * this case, we simply set dvar to NULL and 2012 * break; the conditional after the loop will 2013 * send us back to top. 2014 */ 2015 dvar = NULL; 2016 break; 2017 } 2018 2019 goto next; 2020 } 2021 2022 if (dtuple->dtt_nkeys != nkeys) 2023 goto next; 2024 2025 for (i = 0; i < nkeys; i++, dkey++) { 2026 if (dkey->dttk_size != key[i].dttk_size) 2027 goto next; /* size or type mismatch */ 2028 2029 if (dkey->dttk_size != 0) { 2030 if (dtrace_bcmp( 2031 (void *)(uintptr_t)key[i].dttk_value, 2032 (void *)(uintptr_t)dkey->dttk_value, 2033 dkey->dttk_size)) 2034 goto next; 2035 } else { 2036 if (dkey->dttk_value != key[i].dttk_value) 2037 goto next; 2038 } 2039 } 2040 2041 if (op != DTRACE_DYNVAR_DEALLOC) 2042 return (dvar); 2043 2044 ASSERT(dvar->dtdv_next == NULL || 2045 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 2046 2047 if (prev != NULL) { 2048 ASSERT(hash[bucket].dtdh_chain != dvar); 2049 ASSERT(start != dvar); 2050 ASSERT(prev->dtdv_next == dvar); 2051 prev->dtdv_next = dvar->dtdv_next; 2052 } else { 2053 if (dtrace_casptr(&hash[bucket].dtdh_chain, 2054 start, dvar->dtdv_next) != start) { 2055 /* 2056 * We have failed to atomically swing the 2057 * hash table head pointer, presumably because 2058 * of a conflicting allocation on another CPU. 2059 * We need to reread the hash chain and try 2060 * again. 2061 */ 2062 goto top; 2063 } 2064 } 2065 2066 dtrace_membar_producer(); 2067 2068 /* 2069 * Now set the hash value to indicate that it's free. 2070 */ 2071 ASSERT(hash[bucket].dtdh_chain != dvar); 2072 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 2073 2074 dtrace_membar_producer(); 2075 2076 /* 2077 * Set the next pointer to point at the dirty list, and 2078 * atomically swing the dirty pointer to the newly freed dvar. 2079 */ 2080 do { 2081 next = dcpu->dtdsc_dirty; 2082 dvar->dtdv_next = next; 2083 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 2084 2085 /* 2086 * Finally, unlock this hash bucket. 2087 */ 2088 ASSERT(hash[bucket].dtdh_lock == lock); 2089 ASSERT(lock & 1); 2090 hash[bucket].dtdh_lock++; 2091 2092 return (NULL); 2093 next: 2094 prev = dvar; 2095 continue; 2096 } 2097 2098 if (dvar == NULL) { 2099 /* 2100 * If dvar is NULL, it is because we went off the rails: 2101 * one of the elements that we traversed in the hash chain 2102 * was deleted while we were traversing it. In this case, 2103 * we assert that we aren't doing a dealloc (deallocs lock 2104 * the hash bucket to prevent themselves from racing with 2105 * one another), and retry the hash chain traversal. 2106 */ 2107 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 2108 goto top; 2109 } 2110 2111 if (op != DTRACE_DYNVAR_ALLOC) { 2112 /* 2113 * If we are not to allocate a new variable, we want to 2114 * return NULL now. Before we return, check that the value 2115 * of the lock word hasn't changed. If it has, we may have 2116 * seen an inconsistent snapshot. 2117 */ 2118 if (op == DTRACE_DYNVAR_NOALLOC) { 2119 if (hash[bucket].dtdh_lock != lock) 2120 goto top; 2121 } else { 2122 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 2123 ASSERT(hash[bucket].dtdh_lock == lock); 2124 ASSERT(lock & 1); 2125 hash[bucket].dtdh_lock++; 2126 } 2127 2128 return (NULL); 2129 } 2130 2131 /* 2132 * We need to allocate a new dynamic variable. The size we need is the 2133 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 2134 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 2135 * the size of any referred-to data (dsize). We then round the final 2136 * size up to the chunksize for allocation. 2137 */ 2138 for (ksize = 0, i = 0; i < nkeys; i++) 2139 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 2140 2141 /* 2142 * This should be pretty much impossible, but could happen if, say, 2143 * strange DIF specified the tuple. Ideally, this should be an 2144 * assertion and not an error condition -- but that requires that the 2145 * chunksize calculation in dtrace_difo_chunksize() be absolutely 2146 * bullet-proof. (That is, it must not be able to be fooled by 2147 * malicious DIF.) Given the lack of backwards branches in DIF, 2148 * solving this would presumably not amount to solving the Halting 2149 * Problem -- but it still seems awfully hard. 2150 */ 2151 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 2152 ksize + dsize > chunksize) { 2153 dcpu->dtdsc_drops++; 2154 return (NULL); 2155 } 2156 2157 nstate = DTRACE_DSTATE_EMPTY; 2158 2159 do { 2160 retry: 2161 free = dcpu->dtdsc_free; 2162 2163 if (free == NULL) { 2164 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 2165 void *rval; 2166 2167 if (clean == NULL) { 2168 /* 2169 * We're out of dynamic variable space on 2170 * this CPU. Unless we have tried all CPUs, 2171 * we'll try to allocate from a different 2172 * CPU. 2173 */ 2174 switch (dstate->dtds_state) { 2175 case DTRACE_DSTATE_CLEAN: { 2176 void *sp = &dstate->dtds_state; 2177 2178 if (++cpu >= NCPU) 2179 cpu = 0; 2180 2181 if (dcpu->dtdsc_dirty != NULL && 2182 nstate == DTRACE_DSTATE_EMPTY) 2183 nstate = DTRACE_DSTATE_DIRTY; 2184 2185 if (dcpu->dtdsc_rinsing != NULL) 2186 nstate = DTRACE_DSTATE_RINSING; 2187 2188 dcpu = &dstate->dtds_percpu[cpu]; 2189 2190 if (cpu != me) 2191 goto retry; 2192 2193 (void) dtrace_cas32(sp, 2194 DTRACE_DSTATE_CLEAN, nstate); 2195 2196 /* 2197 * To increment the correct bean 2198 * counter, take another lap. 2199 */ 2200 goto retry; 2201 } 2202 2203 case DTRACE_DSTATE_DIRTY: 2204 dcpu->dtdsc_dirty_drops++; 2205 break; 2206 2207 case DTRACE_DSTATE_RINSING: 2208 dcpu->dtdsc_rinsing_drops++; 2209 break; 2210 2211 case DTRACE_DSTATE_EMPTY: 2212 dcpu->dtdsc_drops++; 2213 break; 2214 } 2215 2216 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 2217 return (NULL); 2218 } 2219 2220 /* 2221 * The clean list appears to be non-empty. We want to 2222 * move the clean list to the free list; we start by 2223 * moving the clean pointer aside. 2224 */ 2225 if (dtrace_casptr(&dcpu->dtdsc_clean, 2226 clean, NULL) != clean) { 2227 /* 2228 * We are in one of two situations: 2229 * 2230 * (a) The clean list was switched to the 2231 * free list by another CPU. 2232 * 2233 * (b) The clean list was added to by the 2234 * cleansing cyclic. 2235 * 2236 * In either of these situations, we can 2237 * just reattempt the free list allocation. 2238 */ 2239 goto retry; 2240 } 2241 2242 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 2243 2244 /* 2245 * Now we'll move the clean list to our free list. 2246 * It's impossible for this to fail: the only way 2247 * the free list can be updated is through this 2248 * code path, and only one CPU can own the clean list. 2249 * Thus, it would only be possible for this to fail if 2250 * this code were racing with dtrace_dynvar_clean(). 2251 * (That is, if dtrace_dynvar_clean() updated the clean 2252 * list, and we ended up racing to update the free 2253 * list.) This race is prevented by the dtrace_sync() 2254 * in dtrace_dynvar_clean() -- which flushes the 2255 * owners of the clean lists out before resetting 2256 * the clean lists. 2257 */ 2258 dcpu = &dstate->dtds_percpu[me]; 2259 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 2260 ASSERT(rval == NULL); 2261 goto retry; 2262 } 2263 2264 dvar = free; 2265 new_free = dvar->dtdv_next; 2266 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 2267 2268 /* 2269 * We have now allocated a new chunk. We copy the tuple keys into the 2270 * tuple array and copy any referenced key data into the data space 2271 * following the tuple array. As we do this, we relocate dttk_value 2272 * in the final tuple to point to the key data address in the chunk. 2273 */ 2274 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 2275 dvar->dtdv_data = (void *)(kdata + ksize); 2276 dvar->dtdv_tuple.dtt_nkeys = nkeys; 2277 2278 for (i = 0; i < nkeys; i++) { 2279 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 2280 size_t kesize = key[i].dttk_size; 2281 2282 if (kesize != 0) { 2283 dtrace_bcopy( 2284 (const void *)(uintptr_t)key[i].dttk_value, 2285 (void *)kdata, kesize); 2286 dkey->dttk_value = kdata; 2287 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 2288 } else { 2289 dkey->dttk_value = key[i].dttk_value; 2290 } 2291 2292 dkey->dttk_size = kesize; 2293 } 2294 2295 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 2296 dvar->dtdv_hashval = hashval; 2297 dvar->dtdv_next = start; 2298 2299 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 2300 return (dvar); 2301 2302 /* 2303 * The cas has failed. Either another CPU is adding an element to 2304 * this hash chain, or another CPU is deleting an element from this 2305 * hash chain. The simplest way to deal with both of these cases 2306 * (though not necessarily the most efficient) is to free our 2307 * allocated block and re-attempt it all. Note that the free is 2308 * to the dirty list and _not_ to the free list. This is to prevent 2309 * races with allocators, above. 2310 */ 2311 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 2312 2313 dtrace_membar_producer(); 2314 2315 do { 2316 free = dcpu->dtdsc_dirty; 2317 dvar->dtdv_next = free; 2318 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 2319 2320 goto top; 2321 } 2322 2323 /*ARGSUSED*/ 2324 static void 2325 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 2326 { 2327 if ((int64_t)nval < (int64_t)*oval) 2328 *oval = nval; 2329 } 2330 2331 /*ARGSUSED*/ 2332 static void 2333 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 2334 { 2335 if ((int64_t)nval > (int64_t)*oval) 2336 *oval = nval; 2337 } 2338 2339 static void 2340 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 2341 { 2342 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 2343 int64_t val = (int64_t)nval; 2344 2345 if (val < 0) { 2346 for (i = 0; i < zero; i++) { 2347 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 2348 quanta[i] += incr; 2349 return; 2350 } 2351 } 2352 } else { 2353 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 2354 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 2355 quanta[i - 1] += incr; 2356 return; 2357 } 2358 } 2359 2360 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 2361 return; 2362 } 2363 2364 ASSERT(0); 2365 } 2366 2367 static void 2368 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 2369 { 2370 uint64_t arg = *lquanta++; 2371 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 2372 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 2373 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 2374 int32_t val = (int32_t)nval, level; 2375 2376 ASSERT(step != 0); 2377 ASSERT(levels != 0); 2378 2379 if (val < base) { 2380 /* 2381 * This is an underflow. 2382 */ 2383 lquanta[0] += incr; 2384 return; 2385 } 2386 2387 level = (val - base) / step; 2388 2389 if (level < levels) { 2390 lquanta[level + 1] += incr; 2391 return; 2392 } 2393 2394 /* 2395 * This is an overflow. 2396 */ 2397 lquanta[levels + 1] += incr; 2398 } 2399 2400 static int 2401 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 2402 uint16_t high, uint16_t nsteps, int64_t value) 2403 { 2404 int64_t this = 1, last, next; 2405 int base = 1, order; 2406 2407 ASSERT(factor <= nsteps); 2408 ASSERT(nsteps % factor == 0); 2409 2410 for (order = 0; order < low; order++) 2411 this *= factor; 2412 2413 /* 2414 * If our value is less than our factor taken to the power of the 2415 * low order of magnitude, it goes into the zeroth bucket. 2416 */ 2417 if (value < (last = this)) 2418 return (0); 2419 2420 for (this *= factor; order <= high; order++) { 2421 int nbuckets = this > nsteps ? nsteps : this; 2422 2423 if ((next = this * factor) < this) { 2424 /* 2425 * We should not generally get log/linear quantizations 2426 * with a high magnitude that allows 64-bits to 2427 * overflow, but we nonetheless protect against this 2428 * by explicitly checking for overflow, and clamping 2429 * our value accordingly. 2430 */ 2431 value = this - 1; 2432 } 2433 2434 if (value < this) { 2435 /* 2436 * If our value lies within this order of magnitude, 2437 * determine its position by taking the offset within 2438 * the order of magnitude, dividing by the bucket 2439 * width, and adding to our (accumulated) base. 2440 */ 2441 return (base + (value - last) / (this / nbuckets)); 2442 } 2443 2444 base += nbuckets - (nbuckets / factor); 2445 last = this; 2446 this = next; 2447 } 2448 2449 /* 2450 * Our value is greater than or equal to our factor taken to the 2451 * power of one plus the high magnitude -- return the top bucket. 2452 */ 2453 return (base); 2454 } 2455 2456 static void 2457 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 2458 { 2459 uint64_t arg = *llquanta++; 2460 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 2461 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 2462 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 2463 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 2464 2465 llquanta[dtrace_aggregate_llquantize_bucket(factor, 2466 low, high, nsteps, nval)] += incr; 2467 } 2468 2469 /*ARGSUSED*/ 2470 static void 2471 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 2472 { 2473 data[0]++; 2474 data[1] += nval; 2475 } 2476 2477 /*ARGSUSED*/ 2478 static void 2479 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 2480 { 2481 int64_t snval = (int64_t)nval; 2482 uint64_t tmp[2]; 2483 2484 data[0]++; 2485 data[1] += nval; 2486 2487 /* 2488 * What we want to say here is: 2489 * 2490 * data[2] += nval * nval; 2491 * 2492 * But given that nval is 64-bit, we could easily overflow, so 2493 * we do this as 128-bit arithmetic. 2494 */ 2495 if (snval < 0) 2496 snval = -snval; 2497 2498 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2499 dtrace_add_128(data + 2, tmp, data + 2); 2500 } 2501 2502 /*ARGSUSED*/ 2503 static void 2504 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2505 { 2506 *oval = *oval + 1; 2507 } 2508 2509 /*ARGSUSED*/ 2510 static void 2511 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2512 { 2513 *oval += nval; 2514 } 2515 2516 /* 2517 * Aggregate given the tuple in the principal data buffer, and the aggregating 2518 * action denoted by the specified dtrace_aggregation_t. The aggregation 2519 * buffer is specified as the buf parameter. This routine does not return 2520 * failure; if there is no space in the aggregation buffer, the data will be 2521 * dropped, and a corresponding counter incremented. 2522 */ 2523 static void 2524 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2525 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2526 { 2527 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2528 uint32_t i, ndx, size, fsize; 2529 uint32_t align = sizeof (uint64_t) - 1; 2530 dtrace_aggbuffer_t *agb; 2531 dtrace_aggkey_t *key; 2532 uint32_t hashval = 0, limit, isstr; 2533 caddr_t tomax, data, kdata; 2534 dtrace_actkind_t action; 2535 dtrace_action_t *act; 2536 uintptr_t offs; 2537 2538 if (buf == NULL) 2539 return; 2540 2541 if (!agg->dtag_hasarg) { 2542 /* 2543 * Currently, only quantize() and lquantize() take additional 2544 * arguments, and they have the same semantics: an increment 2545 * value that defaults to 1 when not present. If additional 2546 * aggregating actions take arguments, the setting of the 2547 * default argument value will presumably have to become more 2548 * sophisticated... 2549 */ 2550 arg = 1; 2551 } 2552 2553 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2554 size = rec->dtrd_offset - agg->dtag_base; 2555 fsize = size + rec->dtrd_size; 2556 2557 ASSERT(dbuf->dtb_tomax != NULL); 2558 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2559 2560 if ((tomax = buf->dtb_tomax) == NULL) { 2561 dtrace_buffer_drop(buf); 2562 return; 2563 } 2564 2565 /* 2566 * The metastructure is always at the bottom of the buffer. 2567 */ 2568 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2569 sizeof (dtrace_aggbuffer_t)); 2570 2571 if (buf->dtb_offset == 0) { 2572 /* 2573 * We just kludge up approximately 1/8th of the size to be 2574 * buckets. If this guess ends up being routinely 2575 * off-the-mark, we may need to dynamically readjust this 2576 * based on past performance. 2577 */ 2578 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2579 2580 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2581 (uintptr_t)tomax || hashsize == 0) { 2582 /* 2583 * We've been given a ludicrously small buffer; 2584 * increment our drop count and leave. 2585 */ 2586 dtrace_buffer_drop(buf); 2587 return; 2588 } 2589 2590 /* 2591 * And now, a pathetic attempt to try to get a an odd (or 2592 * perchance, a prime) hash size for better hash distribution. 2593 */ 2594 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2595 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2596 2597 agb->dtagb_hashsize = hashsize; 2598 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2599 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2600 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2601 2602 for (i = 0; i < agb->dtagb_hashsize; i++) 2603 agb->dtagb_hash[i] = NULL; 2604 } 2605 2606 ASSERT(agg->dtag_first != NULL); 2607 ASSERT(agg->dtag_first->dta_intuple); 2608 2609 /* 2610 * Calculate the hash value based on the key. Note that we _don't_ 2611 * include the aggid in the hashing (but we will store it as part of 2612 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2613 * algorithm: a simple, quick algorithm that has no known funnels, and 2614 * gets good distribution in practice. The efficacy of the hashing 2615 * algorithm (and a comparison with other algorithms) may be found by 2616 * running the ::dtrace_aggstat MDB dcmd. 2617 */ 2618 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2619 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2620 limit = i + act->dta_rec.dtrd_size; 2621 ASSERT(limit <= size); 2622 isstr = DTRACEACT_ISSTRING(act); 2623 2624 for (; i < limit; i++) { 2625 hashval += data[i]; 2626 hashval += (hashval << 10); 2627 hashval ^= (hashval >> 6); 2628 2629 if (isstr && data[i] == '\0') 2630 break; 2631 } 2632 } 2633 2634 hashval += (hashval << 3); 2635 hashval ^= (hashval >> 11); 2636 hashval += (hashval << 15); 2637 2638 /* 2639 * Yes, the divide here is expensive -- but it's generally the least 2640 * of the performance issues given the amount of data that we iterate 2641 * over to compute hash values, compare data, etc. 2642 */ 2643 ndx = hashval % agb->dtagb_hashsize; 2644 2645 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2646 ASSERT((caddr_t)key >= tomax); 2647 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2648 2649 if (hashval != key->dtak_hashval || key->dtak_size != size) 2650 continue; 2651 2652 kdata = key->dtak_data; 2653 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2654 2655 for (act = agg->dtag_first; act->dta_intuple; 2656 act = act->dta_next) { 2657 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2658 limit = i + act->dta_rec.dtrd_size; 2659 ASSERT(limit <= size); 2660 isstr = DTRACEACT_ISSTRING(act); 2661 2662 for (; i < limit; i++) { 2663 if (kdata[i] != data[i]) 2664 goto next; 2665 2666 if (isstr && data[i] == '\0') 2667 break; 2668 } 2669 } 2670 2671 if (action != key->dtak_action) { 2672 /* 2673 * We are aggregating on the same value in the same 2674 * aggregation with two different aggregating actions. 2675 * (This should have been picked up in the compiler, 2676 * so we may be dealing with errant or devious DIF.) 2677 * This is an error condition; we indicate as much, 2678 * and return. 2679 */ 2680 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2681 return; 2682 } 2683 2684 /* 2685 * This is a hit: we need to apply the aggregator to 2686 * the value at this key. 2687 */ 2688 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2689 return; 2690 next: 2691 continue; 2692 } 2693 2694 /* 2695 * We didn't find it. We need to allocate some zero-filled space, 2696 * link it into the hash table appropriately, and apply the aggregator 2697 * to the (zero-filled) value. 2698 */ 2699 offs = buf->dtb_offset; 2700 while (offs & (align - 1)) 2701 offs += sizeof (uint32_t); 2702 2703 /* 2704 * If we don't have enough room to both allocate a new key _and_ 2705 * its associated data, increment the drop count and return. 2706 */ 2707 if ((uintptr_t)tomax + offs + fsize > 2708 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2709 dtrace_buffer_drop(buf); 2710 return; 2711 } 2712 2713 /*CONSTCOND*/ 2714 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2715 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2716 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2717 2718 key->dtak_data = kdata = tomax + offs; 2719 buf->dtb_offset = offs + fsize; 2720 2721 /* 2722 * Now copy the data across. 2723 */ 2724 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2725 2726 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2727 kdata[i] = data[i]; 2728 2729 /* 2730 * Because strings are not zeroed out by default, we need to iterate 2731 * looking for actions that store strings, and we need to explicitly 2732 * pad these strings out with zeroes. 2733 */ 2734 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2735 int nul; 2736 2737 if (!DTRACEACT_ISSTRING(act)) 2738 continue; 2739 2740 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2741 limit = i + act->dta_rec.dtrd_size; 2742 ASSERT(limit <= size); 2743 2744 for (nul = 0; i < limit; i++) { 2745 if (nul) { 2746 kdata[i] = '\0'; 2747 continue; 2748 } 2749 2750 if (data[i] != '\0') 2751 continue; 2752 2753 nul = 1; 2754 } 2755 } 2756 2757 for (i = size; i < fsize; i++) 2758 kdata[i] = 0; 2759 2760 key->dtak_hashval = hashval; 2761 key->dtak_size = size; 2762 key->dtak_action = action; 2763 key->dtak_next = agb->dtagb_hash[ndx]; 2764 agb->dtagb_hash[ndx] = key; 2765 2766 /* 2767 * Finally, apply the aggregator. 2768 */ 2769 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2770 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2771 } 2772 2773 /* 2774 * Given consumer state, this routine finds a speculation in the INACTIVE 2775 * state and transitions it into the ACTIVE state. If there is no speculation 2776 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2777 * incremented -- it is up to the caller to take appropriate action. 2778 */ 2779 static int 2780 dtrace_speculation(dtrace_state_t *state) 2781 { 2782 int i = 0; 2783 dtrace_speculation_state_t current; 2784 uint32_t *stat = &state->dts_speculations_unavail, count; 2785 2786 while (i < state->dts_nspeculations) { 2787 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2788 2789 current = spec->dtsp_state; 2790 2791 if (current != DTRACESPEC_INACTIVE) { 2792 if (current == DTRACESPEC_COMMITTINGMANY || 2793 current == DTRACESPEC_COMMITTING || 2794 current == DTRACESPEC_DISCARDING) 2795 stat = &state->dts_speculations_busy; 2796 i++; 2797 continue; 2798 } 2799 2800 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2801 current, DTRACESPEC_ACTIVE) == current) 2802 return (i + 1); 2803 } 2804 2805 /* 2806 * We couldn't find a speculation. If we found as much as a single 2807 * busy speculation buffer, we'll attribute this failure as "busy" 2808 * instead of "unavail". 2809 */ 2810 do { 2811 count = *stat; 2812 } while (dtrace_cas32(stat, count, count + 1) != count); 2813 2814 return (0); 2815 } 2816 2817 /* 2818 * This routine commits an active speculation. If the specified speculation 2819 * is not in a valid state to perform a commit(), this routine will silently do 2820 * nothing. The state of the specified speculation is transitioned according 2821 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2822 */ 2823 static void 2824 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2825 dtrace_specid_t which) 2826 { 2827 dtrace_speculation_t *spec; 2828 dtrace_buffer_t *src, *dest; 2829 uintptr_t daddr, saddr, dlimit, slimit; 2830 dtrace_speculation_state_t current, new = 0; 2831 intptr_t offs; 2832 uint64_t timestamp; 2833 2834 if (which == 0) 2835 return; 2836 2837 if (which > state->dts_nspeculations) { 2838 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2839 return; 2840 } 2841 2842 spec = &state->dts_speculations[which - 1]; 2843 src = &spec->dtsp_buffer[cpu]; 2844 dest = &state->dts_buffer[cpu]; 2845 2846 do { 2847 current = spec->dtsp_state; 2848 2849 if (current == DTRACESPEC_COMMITTINGMANY) 2850 break; 2851 2852 switch (current) { 2853 case DTRACESPEC_INACTIVE: 2854 case DTRACESPEC_DISCARDING: 2855 return; 2856 2857 case DTRACESPEC_COMMITTING: 2858 /* 2859 * This is only possible if we are (a) commit()'ing 2860 * without having done a prior speculate() on this CPU 2861 * and (b) racing with another commit() on a different 2862 * CPU. There's nothing to do -- we just assert that 2863 * our offset is 0. 2864 */ 2865 ASSERT(src->dtb_offset == 0); 2866 return; 2867 2868 case DTRACESPEC_ACTIVE: 2869 new = DTRACESPEC_COMMITTING; 2870 break; 2871 2872 case DTRACESPEC_ACTIVEONE: 2873 /* 2874 * This speculation is active on one CPU. If our 2875 * buffer offset is non-zero, we know that the one CPU 2876 * must be us. Otherwise, we are committing on a 2877 * different CPU from the speculate(), and we must 2878 * rely on being asynchronously cleaned. 2879 */ 2880 if (src->dtb_offset != 0) { 2881 new = DTRACESPEC_COMMITTING; 2882 break; 2883 } 2884 /*FALLTHROUGH*/ 2885 2886 case DTRACESPEC_ACTIVEMANY: 2887 new = DTRACESPEC_COMMITTINGMANY; 2888 break; 2889 2890 default: 2891 ASSERT(0); 2892 } 2893 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2894 current, new) != current); 2895 2896 /* 2897 * We have set the state to indicate that we are committing this 2898 * speculation. Now reserve the necessary space in the destination 2899 * buffer. 2900 */ 2901 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2902 sizeof (uint64_t), state, NULL)) < 0) { 2903 dtrace_buffer_drop(dest); 2904 goto out; 2905 } 2906 2907 /* 2908 * We have sufficient space to copy the speculative buffer into the 2909 * primary buffer. First, modify the speculative buffer, filling 2910 * in the timestamp of all entries with the current time. The data 2911 * must have the commit() time rather than the time it was traced, 2912 * so that all entries in the primary buffer are in timestamp order. 2913 */ 2914 timestamp = dtrace_gethrtime(); 2915 saddr = (uintptr_t)src->dtb_tomax; 2916 slimit = saddr + src->dtb_offset; 2917 while (saddr < slimit) { 2918 size_t size; 2919 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr; 2920 2921 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) { 2922 saddr += sizeof (dtrace_epid_t); 2923 continue; 2924 } 2925 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs); 2926 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size; 2927 2928 ASSERT3U(saddr + size, <=, slimit); 2929 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t)); 2930 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX); 2931 2932 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp); 2933 2934 saddr += size; 2935 } 2936 2937 /* 2938 * Copy the buffer across. (Note that this is a 2939 * highly subobtimal bcopy(); in the unlikely event that this becomes 2940 * a serious performance issue, a high-performance DTrace-specific 2941 * bcopy() should obviously be invented.) 2942 */ 2943 daddr = (uintptr_t)dest->dtb_tomax + offs; 2944 dlimit = daddr + src->dtb_offset; 2945 saddr = (uintptr_t)src->dtb_tomax; 2946 2947 /* 2948 * First, the aligned portion. 2949 */ 2950 while (dlimit - daddr >= sizeof (uint64_t)) { 2951 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2952 2953 daddr += sizeof (uint64_t); 2954 saddr += sizeof (uint64_t); 2955 } 2956 2957 /* 2958 * Now any left-over bit... 2959 */ 2960 while (dlimit - daddr) 2961 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2962 2963 /* 2964 * Finally, commit the reserved space in the destination buffer. 2965 */ 2966 dest->dtb_offset = offs + src->dtb_offset; 2967 2968 out: 2969 /* 2970 * If we're lucky enough to be the only active CPU on this speculation 2971 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2972 */ 2973 if (current == DTRACESPEC_ACTIVE || 2974 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2975 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2976 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2977 2978 ASSERT(rval == DTRACESPEC_COMMITTING); 2979 } 2980 2981 src->dtb_offset = 0; 2982 src->dtb_xamot_drops += src->dtb_drops; 2983 src->dtb_drops = 0; 2984 } 2985 2986 /* 2987 * This routine discards an active speculation. If the specified speculation 2988 * is not in a valid state to perform a discard(), this routine will silently 2989 * do nothing. The state of the specified speculation is transitioned 2990 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2991 */ 2992 static void 2993 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2994 dtrace_specid_t which) 2995 { 2996 dtrace_speculation_t *spec; 2997 dtrace_speculation_state_t current, new = 0; 2998 dtrace_buffer_t *buf; 2999 3000 if (which == 0) 3001 return; 3002 3003 if (which > state->dts_nspeculations) { 3004 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 3005 return; 3006 } 3007 3008 spec = &state->dts_speculations[which - 1]; 3009 buf = &spec->dtsp_buffer[cpu]; 3010 3011 do { 3012 current = spec->dtsp_state; 3013 3014 switch (current) { 3015 case DTRACESPEC_INACTIVE: 3016 case DTRACESPEC_COMMITTINGMANY: 3017 case DTRACESPEC_COMMITTING: 3018 case DTRACESPEC_DISCARDING: 3019 return; 3020 3021 case DTRACESPEC_ACTIVE: 3022 case DTRACESPEC_ACTIVEMANY: 3023 new = DTRACESPEC_DISCARDING; 3024 break; 3025 3026 case DTRACESPEC_ACTIVEONE: 3027 if (buf->dtb_offset != 0) { 3028 new = DTRACESPEC_INACTIVE; 3029 } else { 3030 new = DTRACESPEC_DISCARDING; 3031 } 3032 break; 3033 3034 default: 3035 ASSERT(0); 3036 } 3037 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 3038 current, new) != current); 3039 3040 buf->dtb_offset = 0; 3041 buf->dtb_drops = 0; 3042 } 3043 3044 /* 3045 * Note: not called from probe context. This function is called 3046 * asynchronously from cross call context to clean any speculations that are 3047 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 3048 * transitioned back to the INACTIVE state until all CPUs have cleaned the 3049 * speculation. 3050 */ 3051 static void 3052 dtrace_speculation_clean_here(dtrace_state_t *state) 3053 { 3054 dtrace_icookie_t cookie; 3055 processorid_t cpu = curcpu; 3056 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 3057 dtrace_specid_t i; 3058 3059 cookie = dtrace_interrupt_disable(); 3060 3061 if (dest->dtb_tomax == NULL) { 3062 dtrace_interrupt_enable(cookie); 3063 return; 3064 } 3065 3066 for (i = 0; i < state->dts_nspeculations; i++) { 3067 dtrace_speculation_t *spec = &state->dts_speculations[i]; 3068 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 3069 3070 if (src->dtb_tomax == NULL) 3071 continue; 3072 3073 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 3074 src->dtb_offset = 0; 3075 continue; 3076 } 3077 3078 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 3079 continue; 3080 3081 if (src->dtb_offset == 0) 3082 continue; 3083 3084 dtrace_speculation_commit(state, cpu, i + 1); 3085 } 3086 3087 dtrace_interrupt_enable(cookie); 3088 } 3089 3090 /* 3091 * Note: not called from probe context. This function is called 3092 * asynchronously (and at a regular interval) to clean any speculations that 3093 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 3094 * is work to be done, it cross calls all CPUs to perform that work; 3095 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 3096 * INACTIVE state until they have been cleaned by all CPUs. 3097 */ 3098 static void 3099 dtrace_speculation_clean(dtrace_state_t *state) 3100 { 3101 int work = 0, rv; 3102 dtrace_specid_t i; 3103 3104 for (i = 0; i < state->dts_nspeculations; i++) { 3105 dtrace_speculation_t *spec = &state->dts_speculations[i]; 3106 3107 ASSERT(!spec->dtsp_cleaning); 3108 3109 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 3110 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 3111 continue; 3112 3113 work++; 3114 spec->dtsp_cleaning = 1; 3115 } 3116 3117 if (!work) 3118 return; 3119 3120 dtrace_xcall(DTRACE_CPUALL, 3121 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 3122 3123 /* 3124 * We now know that all CPUs have committed or discarded their 3125 * speculation buffers, as appropriate. We can now set the state 3126 * to inactive. 3127 */ 3128 for (i = 0; i < state->dts_nspeculations; i++) { 3129 dtrace_speculation_t *spec = &state->dts_speculations[i]; 3130 dtrace_speculation_state_t current, new; 3131 3132 if (!spec->dtsp_cleaning) 3133 continue; 3134 3135 current = spec->dtsp_state; 3136 ASSERT(current == DTRACESPEC_DISCARDING || 3137 current == DTRACESPEC_COMMITTINGMANY); 3138 3139 new = DTRACESPEC_INACTIVE; 3140 3141 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 3142 ASSERT(rv == current); 3143 spec->dtsp_cleaning = 0; 3144 } 3145 } 3146 3147 /* 3148 * Called as part of a speculate() to get the speculative buffer associated 3149 * with a given speculation. Returns NULL if the specified speculation is not 3150 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 3151 * the active CPU is not the specified CPU -- the speculation will be 3152 * atomically transitioned into the ACTIVEMANY state. 3153 */ 3154 static dtrace_buffer_t * 3155 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 3156 dtrace_specid_t which) 3157 { 3158 dtrace_speculation_t *spec; 3159 dtrace_speculation_state_t current, new = 0; 3160 dtrace_buffer_t *buf; 3161 3162 if (which == 0) 3163 return (NULL); 3164 3165 if (which > state->dts_nspeculations) { 3166 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 3167 return (NULL); 3168 } 3169 3170 spec = &state->dts_speculations[which - 1]; 3171 buf = &spec->dtsp_buffer[cpuid]; 3172 3173 do { 3174 current = spec->dtsp_state; 3175 3176 switch (current) { 3177 case DTRACESPEC_INACTIVE: 3178 case DTRACESPEC_COMMITTINGMANY: 3179 case DTRACESPEC_DISCARDING: 3180 return (NULL); 3181 3182 case DTRACESPEC_COMMITTING: 3183 ASSERT(buf->dtb_offset == 0); 3184 return (NULL); 3185 3186 case DTRACESPEC_ACTIVEONE: 3187 /* 3188 * This speculation is currently active on one CPU. 3189 * Check the offset in the buffer; if it's non-zero, 3190 * that CPU must be us (and we leave the state alone). 3191 * If it's zero, assume that we're starting on a new 3192 * CPU -- and change the state to indicate that the 3193 * speculation is active on more than one CPU. 3194 */ 3195 if (buf->dtb_offset != 0) 3196 return (buf); 3197 3198 new = DTRACESPEC_ACTIVEMANY; 3199 break; 3200 3201 case DTRACESPEC_ACTIVEMANY: 3202 return (buf); 3203 3204 case DTRACESPEC_ACTIVE: 3205 new = DTRACESPEC_ACTIVEONE; 3206 break; 3207 3208 default: 3209 ASSERT(0); 3210 } 3211 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 3212 current, new) != current); 3213 3214 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 3215 return (buf); 3216 } 3217 3218 /* 3219 * Return a string. In the event that the user lacks the privilege to access 3220 * arbitrary kernel memory, we copy the string out to scratch memory so that we 3221 * don't fail access checking. 3222 * 3223 * dtrace_dif_variable() uses this routine as a helper for various 3224 * builtin values such as 'execname' and 'probefunc.' 3225 */ 3226 uintptr_t 3227 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 3228 dtrace_mstate_t *mstate) 3229 { 3230 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3231 uintptr_t ret; 3232 size_t strsz; 3233 3234 /* 3235 * The easy case: this probe is allowed to read all of memory, so 3236 * we can just return this as a vanilla pointer. 3237 */ 3238 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 3239 return (addr); 3240 3241 /* 3242 * This is the tougher case: we copy the string in question from 3243 * kernel memory into scratch memory and return it that way: this 3244 * ensures that we won't trip up when access checking tests the 3245 * BYREF return value. 3246 */ 3247 strsz = dtrace_strlen((char *)addr, size) + 1; 3248 3249 if (mstate->dtms_scratch_ptr + strsz > 3250 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3251 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3252 return (0); 3253 } 3254 3255 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 3256 strsz); 3257 ret = mstate->dtms_scratch_ptr; 3258 mstate->dtms_scratch_ptr += strsz; 3259 return (ret); 3260 } 3261 3262 /* 3263 * Return a string from a memoy address which is known to have one or 3264 * more concatenated, individually zero terminated, sub-strings. 3265 * In the event that the user lacks the privilege to access 3266 * arbitrary kernel memory, we copy the string out to scratch memory so that we 3267 * don't fail access checking. 3268 * 3269 * dtrace_dif_variable() uses this routine as a helper for various 3270 * builtin values such as 'execargs'. 3271 */ 3272 static uintptr_t 3273 dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 3274 dtrace_mstate_t *mstate) 3275 { 3276 char *p; 3277 size_t i; 3278 uintptr_t ret; 3279 3280 if (mstate->dtms_scratch_ptr + strsz > 3281 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3282 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3283 return (0); 3284 } 3285 3286 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 3287 strsz); 3288 3289 /* Replace sub-string termination characters with a space. */ 3290 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 3291 p++, i++) 3292 if (*p == '\0') 3293 *p = ' '; 3294 3295 ret = mstate->dtms_scratch_ptr; 3296 mstate->dtms_scratch_ptr += strsz; 3297 return (ret); 3298 } 3299 3300 /* 3301 * This function implements the DIF emulator's variable lookups. The emulator 3302 * passes a reserved variable identifier and optional built-in array index. 3303 */ 3304 static uint64_t 3305 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 3306 uint64_t ndx) 3307 { 3308 /* 3309 * If we're accessing one of the uncached arguments, we'll turn this 3310 * into a reference in the args array. 3311 */ 3312 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 3313 ndx = v - DIF_VAR_ARG0; 3314 v = DIF_VAR_ARGS; 3315 } 3316 3317 switch (v) { 3318 case DIF_VAR_ARGS: 3319 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 3320 if (ndx >= sizeof (mstate->dtms_arg) / 3321 sizeof (mstate->dtms_arg[0])) { 3322 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3323 dtrace_provider_t *pv; 3324 uint64_t val; 3325 3326 pv = mstate->dtms_probe->dtpr_provider; 3327 if (pv->dtpv_pops.dtps_getargval != NULL) 3328 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 3329 mstate->dtms_probe->dtpr_id, 3330 mstate->dtms_probe->dtpr_arg, ndx, aframes); 3331 else 3332 val = dtrace_getarg(ndx, aframes); 3333 3334 /* 3335 * This is regrettably required to keep the compiler 3336 * from tail-optimizing the call to dtrace_getarg(). 3337 * The condition always evaluates to true, but the 3338 * compiler has no way of figuring that out a priori. 3339 * (None of this would be necessary if the compiler 3340 * could be relied upon to _always_ tail-optimize 3341 * the call to dtrace_getarg() -- but it can't.) 3342 */ 3343 if (mstate->dtms_probe != NULL) 3344 return (val); 3345 3346 ASSERT(0); 3347 } 3348 3349 return (mstate->dtms_arg[ndx]); 3350 3351 #ifdef illumos 3352 case DIF_VAR_UREGS: { 3353 klwp_t *lwp; 3354 3355 if (!dtrace_priv_proc(state)) 3356 return (0); 3357 3358 if ((lwp = curthread->t_lwp) == NULL) { 3359 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 3360 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 3361 return (0); 3362 } 3363 3364 return (dtrace_getreg(lwp->lwp_regs, ndx)); 3365 return (0); 3366 } 3367 #else 3368 case DIF_VAR_UREGS: { 3369 struct trapframe *tframe; 3370 3371 if (!dtrace_priv_proc(state)) 3372 return (0); 3373 3374 if ((tframe = curthread->td_frame) == NULL) { 3375 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 3376 cpu_core[curcpu].cpuc_dtrace_illval = 0; 3377 return (0); 3378 } 3379 3380 return (dtrace_getreg(tframe, ndx)); 3381 } 3382 #endif 3383 3384 case DIF_VAR_CURTHREAD: 3385 if (!dtrace_priv_proc(state)) 3386 return (0); 3387 return ((uint64_t)(uintptr_t)curthread); 3388 3389 case DIF_VAR_TIMESTAMP: 3390 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 3391 mstate->dtms_timestamp = dtrace_gethrtime(); 3392 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 3393 } 3394 return (mstate->dtms_timestamp); 3395 3396 case DIF_VAR_VTIMESTAMP: 3397 ASSERT(dtrace_vtime_references != 0); 3398 return (curthread->t_dtrace_vtime); 3399 3400 case DIF_VAR_WALLTIMESTAMP: 3401 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 3402 mstate->dtms_walltimestamp = dtrace_gethrestime(); 3403 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 3404 } 3405 return (mstate->dtms_walltimestamp); 3406 3407 #ifdef illumos 3408 case DIF_VAR_IPL: 3409 if (!dtrace_priv_kernel(state)) 3410 return (0); 3411 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 3412 mstate->dtms_ipl = dtrace_getipl(); 3413 mstate->dtms_present |= DTRACE_MSTATE_IPL; 3414 } 3415 return (mstate->dtms_ipl); 3416 #endif 3417 3418 case DIF_VAR_EPID: 3419 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 3420 return (mstate->dtms_epid); 3421 3422 case DIF_VAR_ID: 3423 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3424 return (mstate->dtms_probe->dtpr_id); 3425 3426 case DIF_VAR_STACKDEPTH: 3427 if (!dtrace_priv_kernel(state)) 3428 return (0); 3429 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 3430 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3431 3432 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 3433 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 3434 } 3435 return (mstate->dtms_stackdepth); 3436 3437 case DIF_VAR_USTACKDEPTH: 3438 if (!dtrace_priv_proc(state)) 3439 return (0); 3440 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 3441 /* 3442 * See comment in DIF_VAR_PID. 3443 */ 3444 if (DTRACE_ANCHORED(mstate->dtms_probe) && 3445 CPU_ON_INTR(CPU)) { 3446 mstate->dtms_ustackdepth = 0; 3447 } else { 3448 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3449 mstate->dtms_ustackdepth = 3450 dtrace_getustackdepth(); 3451 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3452 } 3453 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 3454 } 3455 return (mstate->dtms_ustackdepth); 3456 3457 case DIF_VAR_CALLER: 3458 if (!dtrace_priv_kernel(state)) 3459 return (0); 3460 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 3461 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3462 3463 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 3464 /* 3465 * If this is an unanchored probe, we are 3466 * required to go through the slow path: 3467 * dtrace_caller() only guarantees correct 3468 * results for anchored probes. 3469 */ 3470 pc_t caller[2] = {0, 0}; 3471 3472 dtrace_getpcstack(caller, 2, aframes, 3473 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 3474 mstate->dtms_caller = caller[1]; 3475 } else if ((mstate->dtms_caller = 3476 dtrace_caller(aframes)) == -1) { 3477 /* 3478 * We have failed to do this the quick way; 3479 * we must resort to the slower approach of 3480 * calling dtrace_getpcstack(). 3481 */ 3482 pc_t caller = 0; 3483 3484 dtrace_getpcstack(&caller, 1, aframes, NULL); 3485 mstate->dtms_caller = caller; 3486 } 3487 3488 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 3489 } 3490 return (mstate->dtms_caller); 3491 3492 case DIF_VAR_UCALLER: 3493 if (!dtrace_priv_proc(state)) 3494 return (0); 3495 3496 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 3497 uint64_t ustack[3]; 3498 3499 /* 3500 * dtrace_getupcstack() fills in the first uint64_t 3501 * with the current PID. The second uint64_t will 3502 * be the program counter at user-level. The third 3503 * uint64_t will contain the caller, which is what 3504 * we're after. 3505 */ 3506 ustack[2] = 0; 3507 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3508 dtrace_getupcstack(ustack, 3); 3509 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3510 mstate->dtms_ucaller = ustack[2]; 3511 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 3512 } 3513 3514 return (mstate->dtms_ucaller); 3515 3516 case DIF_VAR_PROBEPROV: 3517 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3518 return (dtrace_dif_varstr( 3519 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 3520 state, mstate)); 3521 3522 case DIF_VAR_PROBEMOD: 3523 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3524 return (dtrace_dif_varstr( 3525 (uintptr_t)mstate->dtms_probe->dtpr_mod, 3526 state, mstate)); 3527 3528 case DIF_VAR_PROBEFUNC: 3529 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3530 return (dtrace_dif_varstr( 3531 (uintptr_t)mstate->dtms_probe->dtpr_func, 3532 state, mstate)); 3533 3534 case DIF_VAR_PROBENAME: 3535 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3536 return (dtrace_dif_varstr( 3537 (uintptr_t)mstate->dtms_probe->dtpr_name, 3538 state, mstate)); 3539 3540 case DIF_VAR_PID: 3541 if (!dtrace_priv_proc(state)) 3542 return (0); 3543 3544 #ifdef illumos 3545 /* 3546 * Note that we are assuming that an unanchored probe is 3547 * always due to a high-level interrupt. (And we're assuming 3548 * that there is only a single high level interrupt.) 3549 */ 3550 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3551 return (pid0.pid_id); 3552 3553 /* 3554 * It is always safe to dereference one's own t_procp pointer: 3555 * it always points to a valid, allocated proc structure. 3556 * Further, it is always safe to dereference the p_pidp member 3557 * of one's own proc structure. (These are truisms becuase 3558 * threads and processes don't clean up their own state -- 3559 * they leave that task to whomever reaps them.) 3560 */ 3561 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3562 #else 3563 return ((uint64_t)curproc->p_pid); 3564 #endif 3565 3566 case DIF_VAR_PPID: 3567 if (!dtrace_priv_proc(state)) 3568 return (0); 3569 3570 #ifdef illumos 3571 /* 3572 * See comment in DIF_VAR_PID. 3573 */ 3574 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3575 return (pid0.pid_id); 3576 3577 /* 3578 * It is always safe to dereference one's own t_procp pointer: 3579 * it always points to a valid, allocated proc structure. 3580 * (This is true because threads don't clean up their own 3581 * state -- they leave that task to whomever reaps them.) 3582 */ 3583 return ((uint64_t)curthread->t_procp->p_ppid); 3584 #else 3585 if (curproc->p_pid == proc0.p_pid) 3586 return (curproc->p_pid); 3587 else 3588 return (curproc->p_pptr->p_pid); 3589 #endif 3590 3591 case DIF_VAR_TID: 3592 #ifdef illumos 3593 /* 3594 * See comment in DIF_VAR_PID. 3595 */ 3596 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3597 return (0); 3598 #endif 3599 3600 return ((uint64_t)curthread->t_tid); 3601 3602 case DIF_VAR_EXECARGS: { 3603 struct pargs *p_args = curthread->td_proc->p_args; 3604 3605 if (p_args == NULL) 3606 return(0); 3607 3608 return (dtrace_dif_varstrz( 3609 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3610 } 3611 3612 case DIF_VAR_EXECNAME: 3613 #ifdef illumos 3614 if (!dtrace_priv_proc(state)) 3615 return (0); 3616 3617 /* 3618 * See comment in DIF_VAR_PID. 3619 */ 3620 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3621 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3622 3623 /* 3624 * It is always safe to dereference one's own t_procp pointer: 3625 * it always points to a valid, allocated proc structure. 3626 * (This is true because threads don't clean up their own 3627 * state -- they leave that task to whomever reaps them.) 3628 */ 3629 return (dtrace_dif_varstr( 3630 (uintptr_t)curthread->t_procp->p_user.u_comm, 3631 state, mstate)); 3632 #else 3633 return (dtrace_dif_varstr( 3634 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3635 #endif 3636 3637 case DIF_VAR_ZONENAME: 3638 #ifdef illumos 3639 if (!dtrace_priv_proc(state)) 3640 return (0); 3641 3642 /* 3643 * See comment in DIF_VAR_PID. 3644 */ 3645 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3646 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3647 3648 /* 3649 * It is always safe to dereference one's own t_procp pointer: 3650 * it always points to a valid, allocated proc structure. 3651 * (This is true because threads don't clean up their own 3652 * state -- they leave that task to whomever reaps them.) 3653 */ 3654 return (dtrace_dif_varstr( 3655 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3656 state, mstate)); 3657 #elif defined(__FreeBSD__) 3658 /* 3659 * On FreeBSD, we introduce compatibility to zonename by falling through 3660 * into jailname. 3661 */ 3662 case DIF_VAR_JAILNAME: 3663 if (!dtrace_priv_kernel(state)) 3664 return (0); 3665 3666 return (dtrace_dif_varstr( 3667 (uintptr_t)curthread->td_ucred->cr_prison->pr_name, 3668 state, mstate)); 3669 3670 case DIF_VAR_JID: 3671 if (!dtrace_priv_kernel(state)) 3672 return (0); 3673 3674 return ((uint64_t)curthread->td_ucred->cr_prison->pr_id); 3675 #else 3676 return (0); 3677 #endif 3678 3679 case DIF_VAR_UID: 3680 if (!dtrace_priv_proc(state)) 3681 return (0); 3682 3683 #ifdef illumos 3684 /* 3685 * See comment in DIF_VAR_PID. 3686 */ 3687 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3688 return ((uint64_t)p0.p_cred->cr_uid); 3689 3690 /* 3691 * It is always safe to dereference one's own t_procp pointer: 3692 * it always points to a valid, allocated proc structure. 3693 * (This is true because threads don't clean up their own 3694 * state -- they leave that task to whomever reaps them.) 3695 * 3696 * Additionally, it is safe to dereference one's own process 3697 * credential, since this is never NULL after process birth. 3698 */ 3699 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3700 #else 3701 return ((uint64_t)curthread->td_ucred->cr_uid); 3702 #endif 3703 3704 case DIF_VAR_GID: 3705 if (!dtrace_priv_proc(state)) 3706 return (0); 3707 3708 #ifdef illumos 3709 /* 3710 * See comment in DIF_VAR_PID. 3711 */ 3712 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3713 return ((uint64_t)p0.p_cred->cr_gid); 3714 3715 /* 3716 * It is always safe to dereference one's own t_procp pointer: 3717 * it always points to a valid, allocated proc structure. 3718 * (This is true because threads don't clean up their own 3719 * state -- they leave that task to whomever reaps them.) 3720 * 3721 * Additionally, it is safe to dereference one's own process 3722 * credential, since this is never NULL after process birth. 3723 */ 3724 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3725 #else 3726 return ((uint64_t)curthread->td_ucred->cr_gid); 3727 #endif 3728 3729 case DIF_VAR_ERRNO: { 3730 #ifdef illumos 3731 klwp_t *lwp; 3732 if (!dtrace_priv_proc(state)) 3733 return (0); 3734 3735 /* 3736 * See comment in DIF_VAR_PID. 3737 */ 3738 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3739 return (0); 3740 3741 /* 3742 * It is always safe to dereference one's own t_lwp pointer in 3743 * the event that this pointer is non-NULL. (This is true 3744 * because threads and lwps don't clean up their own state -- 3745 * they leave that task to whomever reaps them.) 3746 */ 3747 if ((lwp = curthread->t_lwp) == NULL) 3748 return (0); 3749 3750 return ((uint64_t)lwp->lwp_errno); 3751 #else 3752 return (curthread->td_errno); 3753 #endif 3754 } 3755 #ifndef illumos 3756 case DIF_VAR_CPU: { 3757 return curcpu; 3758 } 3759 #endif 3760 default: 3761 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3762 return (0); 3763 } 3764 } 3765 3766 3767 typedef enum dtrace_json_state { 3768 DTRACE_JSON_REST = 1, 3769 DTRACE_JSON_OBJECT, 3770 DTRACE_JSON_STRING, 3771 DTRACE_JSON_STRING_ESCAPE, 3772 DTRACE_JSON_STRING_ESCAPE_UNICODE, 3773 DTRACE_JSON_COLON, 3774 DTRACE_JSON_COMMA, 3775 DTRACE_JSON_VALUE, 3776 DTRACE_JSON_IDENTIFIER, 3777 DTRACE_JSON_NUMBER, 3778 DTRACE_JSON_NUMBER_FRAC, 3779 DTRACE_JSON_NUMBER_EXP, 3780 DTRACE_JSON_COLLECT_OBJECT 3781 } dtrace_json_state_t; 3782 3783 /* 3784 * This function possesses just enough knowledge about JSON to extract a single 3785 * value from a JSON string and store it in the scratch buffer. It is able 3786 * to extract nested object values, and members of arrays by index. 3787 * 3788 * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to 3789 * be looked up as we descend into the object tree. e.g. 3790 * 3791 * foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL 3792 * with nelems = 5. 3793 * 3794 * The run time of this function must be bounded above by strsize to limit the 3795 * amount of work done in probe context. As such, it is implemented as a 3796 * simple state machine, reading one character at a time using safe loads 3797 * until we find the requested element, hit a parsing error or run off the 3798 * end of the object or string. 3799 * 3800 * As there is no way for a subroutine to return an error without interrupting 3801 * clause execution, we simply return NULL in the event of a missing key or any 3802 * other error condition. Each NULL return in this function is commented with 3803 * the error condition it represents -- parsing or otherwise. 3804 * 3805 * The set of states for the state machine closely matches the JSON 3806 * specification (http://json.org/). Briefly: 3807 * 3808 * DTRACE_JSON_REST: 3809 * Skip whitespace until we find either a top-level Object, moving 3810 * to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE. 3811 * 3812 * DTRACE_JSON_OBJECT: 3813 * Locate the next key String in an Object. Sets a flag to denote 3814 * the next String as a key string and moves to DTRACE_JSON_STRING. 3815 * 3816 * DTRACE_JSON_COLON: 3817 * Skip whitespace until we find the colon that separates key Strings 3818 * from their values. Once found, move to DTRACE_JSON_VALUE. 3819 * 3820 * DTRACE_JSON_VALUE: 3821 * Detects the type of the next value (String, Number, Identifier, Object 3822 * or Array) and routes to the states that process that type. Here we also 3823 * deal with the element selector list if we are requested to traverse down 3824 * into the object tree. 3825 * 3826 * DTRACE_JSON_COMMA: 3827 * Skip whitespace until we find the comma that separates key-value pairs 3828 * in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays 3829 * (similarly DTRACE_JSON_VALUE). All following literal value processing 3830 * states return to this state at the end of their value, unless otherwise 3831 * noted. 3832 * 3833 * DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP: 3834 * Processes a Number literal from the JSON, including any exponent 3835 * component that may be present. Numbers are returned as strings, which 3836 * may be passed to strtoll() if an integer is required. 3837 * 3838 * DTRACE_JSON_IDENTIFIER: 3839 * Processes a "true", "false" or "null" literal in the JSON. 3840 * 3841 * DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE, 3842 * DTRACE_JSON_STRING_ESCAPE_UNICODE: 3843 * Processes a String literal from the JSON, whether the String denotes 3844 * a key, a value or part of a larger Object. Handles all escape sequences 3845 * present in the specification, including four-digit unicode characters, 3846 * but merely includes the escape sequence without converting it to the 3847 * actual escaped character. If the String is flagged as a key, we 3848 * move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA. 3849 * 3850 * DTRACE_JSON_COLLECT_OBJECT: 3851 * This state collects an entire Object (or Array), correctly handling 3852 * embedded strings. If the full element selector list matches this nested 3853 * object, we return the Object in full as a string. If not, we use this 3854 * state to skip to the next value at this level and continue processing. 3855 * 3856 * NOTE: This function uses various macros from strtolctype.h to manipulate 3857 * digit values, etc -- these have all been checked to ensure they make 3858 * no additional function calls. 3859 */ 3860 static char * 3861 dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems, 3862 char *dest) 3863 { 3864 dtrace_json_state_t state = DTRACE_JSON_REST; 3865 int64_t array_elem = INT64_MIN; 3866 int64_t array_pos = 0; 3867 uint8_t escape_unicount = 0; 3868 boolean_t string_is_key = B_FALSE; 3869 boolean_t collect_object = B_FALSE; 3870 boolean_t found_key = B_FALSE; 3871 boolean_t in_array = B_FALSE; 3872 uint32_t braces = 0, brackets = 0; 3873 char *elem = elemlist; 3874 char *dd = dest; 3875 uintptr_t cur; 3876 3877 for (cur = json; cur < json + size; cur++) { 3878 char cc = dtrace_load8(cur); 3879 if (cc == '\0') 3880 return (NULL); 3881 3882 switch (state) { 3883 case DTRACE_JSON_REST: 3884 if (isspace(cc)) 3885 break; 3886 3887 if (cc == '{') { 3888 state = DTRACE_JSON_OBJECT; 3889 break; 3890 } 3891 3892 if (cc == '[') { 3893 in_array = B_TRUE; 3894 array_pos = 0; 3895 array_elem = dtrace_strtoll(elem, 10, size); 3896 found_key = array_elem == 0 ? B_TRUE : B_FALSE; 3897 state = DTRACE_JSON_VALUE; 3898 break; 3899 } 3900 3901 /* 3902 * ERROR: expected to find a top-level object or array. 3903 */ 3904 return (NULL); 3905 case DTRACE_JSON_OBJECT: 3906 if (isspace(cc)) 3907 break; 3908 3909 if (cc == '"') { 3910 state = DTRACE_JSON_STRING; 3911 string_is_key = B_TRUE; 3912 break; 3913 } 3914 3915 /* 3916 * ERROR: either the object did not start with a key 3917 * string, or we've run off the end of the object 3918 * without finding the requested key. 3919 */ 3920 return (NULL); 3921 case DTRACE_JSON_STRING: 3922 if (cc == '\\') { 3923 *dd++ = '\\'; 3924 state = DTRACE_JSON_STRING_ESCAPE; 3925 break; 3926 } 3927 3928 if (cc == '"') { 3929 if (collect_object) { 3930 /* 3931 * We don't reset the dest here, as 3932 * the string is part of a larger 3933 * object being collected. 3934 */ 3935 *dd++ = cc; 3936 collect_object = B_FALSE; 3937 state = DTRACE_JSON_COLLECT_OBJECT; 3938 break; 3939 } 3940 *dd = '\0'; 3941 dd = dest; /* reset string buffer */ 3942 if (string_is_key) { 3943 if (dtrace_strncmp(dest, elem, 3944 size) == 0) 3945 found_key = B_TRUE; 3946 } else if (found_key) { 3947 if (nelems > 1) { 3948 /* 3949 * We expected an object, not 3950 * this string. 3951 */ 3952 return (NULL); 3953 } 3954 return (dest); 3955 } 3956 state = string_is_key ? DTRACE_JSON_COLON : 3957 DTRACE_JSON_COMMA; 3958 string_is_key = B_FALSE; 3959 break; 3960 } 3961 3962 *dd++ = cc; 3963 break; 3964 case DTRACE_JSON_STRING_ESCAPE: 3965 *dd++ = cc; 3966 if (cc == 'u') { 3967 escape_unicount = 0; 3968 state = DTRACE_JSON_STRING_ESCAPE_UNICODE; 3969 } else { 3970 state = DTRACE_JSON_STRING; 3971 } 3972 break; 3973 case DTRACE_JSON_STRING_ESCAPE_UNICODE: 3974 if (!isxdigit(cc)) { 3975 /* 3976 * ERROR: invalid unicode escape, expected 3977 * four valid hexidecimal digits. 3978 */ 3979 return (NULL); 3980 } 3981 3982 *dd++ = cc; 3983 if (++escape_unicount == 4) 3984 state = DTRACE_JSON_STRING; 3985 break; 3986 case DTRACE_JSON_COLON: 3987 if (isspace(cc)) 3988 break; 3989 3990 if (cc == ':') { 3991 state = DTRACE_JSON_VALUE; 3992 break; 3993 } 3994 3995 /* 3996 * ERROR: expected a colon. 3997 */ 3998 return (NULL); 3999 case DTRACE_JSON_COMMA: 4000 if (isspace(cc)) 4001 break; 4002 4003 if (cc == ',') { 4004 if (in_array) { 4005 state = DTRACE_JSON_VALUE; 4006 if (++array_pos == array_elem) 4007 found_key = B_TRUE; 4008 } else { 4009 state = DTRACE_JSON_OBJECT; 4010 } 4011 break; 4012 } 4013 4014 /* 4015 * ERROR: either we hit an unexpected character, or 4016 * we reached the end of the object or array without 4017 * finding the requested key. 4018 */ 4019 return (NULL); 4020 case DTRACE_JSON_IDENTIFIER: 4021 if (islower(cc)) { 4022 *dd++ = cc; 4023 break; 4024 } 4025 4026 *dd = '\0'; 4027 dd = dest; /* reset string buffer */ 4028 4029 if (dtrace_strncmp(dest, "true", 5) == 0 || 4030 dtrace_strncmp(dest, "false", 6) == 0 || 4031 dtrace_strncmp(dest, "null", 5) == 0) { 4032 if (found_key) { 4033 if (nelems > 1) { 4034 /* 4035 * ERROR: We expected an object, 4036 * not this identifier. 4037 */ 4038 return (NULL); 4039 } 4040 return (dest); 4041 } else { 4042 cur--; 4043 state = DTRACE_JSON_COMMA; 4044 break; 4045 } 4046 } 4047 4048 /* 4049 * ERROR: we did not recognise the identifier as one 4050 * of those in the JSON specification. 4051 */ 4052 return (NULL); 4053 case DTRACE_JSON_NUMBER: 4054 if (cc == '.') { 4055 *dd++ = cc; 4056 state = DTRACE_JSON_NUMBER_FRAC; 4057 break; 4058 } 4059 4060 if (cc == 'x' || cc == 'X') { 4061 /* 4062 * ERROR: specification explicitly excludes 4063 * hexidecimal or octal numbers. 4064 */ 4065 return (NULL); 4066 } 4067 4068 /* FALLTHRU */ 4069 case DTRACE_JSON_NUMBER_FRAC: 4070 if (cc == 'e' || cc == 'E') { 4071 *dd++ = cc; 4072 state = DTRACE_JSON_NUMBER_EXP; 4073 break; 4074 } 4075 4076 if (cc == '+' || cc == '-') { 4077 /* 4078 * ERROR: expect sign as part of exponent only. 4079 */ 4080 return (NULL); 4081 } 4082 /* FALLTHRU */ 4083 case DTRACE_JSON_NUMBER_EXP: 4084 if (isdigit(cc) || cc == '+' || cc == '-') { 4085 *dd++ = cc; 4086 break; 4087 } 4088 4089 *dd = '\0'; 4090 dd = dest; /* reset string buffer */ 4091 if (found_key) { 4092 if (nelems > 1) { 4093 /* 4094 * ERROR: We expected an object, not 4095 * this number. 4096 */ 4097 return (NULL); 4098 } 4099 return (dest); 4100 } 4101 4102 cur--; 4103 state = DTRACE_JSON_COMMA; 4104 break; 4105 case DTRACE_JSON_VALUE: 4106 if (isspace(cc)) 4107 break; 4108 4109 if (cc == '{' || cc == '[') { 4110 if (nelems > 1 && found_key) { 4111 in_array = cc == '[' ? B_TRUE : B_FALSE; 4112 /* 4113 * If our element selector directs us 4114 * to descend into this nested object, 4115 * then move to the next selector 4116 * element in the list and restart the 4117 * state machine. 4118 */ 4119 while (*elem != '\0') 4120 elem++; 4121 elem++; /* skip the inter-element NUL */ 4122 nelems--; 4123 dd = dest; 4124 if (in_array) { 4125 state = DTRACE_JSON_VALUE; 4126 array_pos = 0; 4127 array_elem = dtrace_strtoll( 4128 elem, 10, size); 4129 found_key = array_elem == 0 ? 4130 B_TRUE : B_FALSE; 4131 } else { 4132 found_key = B_FALSE; 4133 state = DTRACE_JSON_OBJECT; 4134 } 4135 break; 4136 } 4137 4138 /* 4139 * Otherwise, we wish to either skip this 4140 * nested object or return it in full. 4141 */ 4142 if (cc == '[') 4143 brackets = 1; 4144 else 4145 braces = 1; 4146 *dd++ = cc; 4147 state = DTRACE_JSON_COLLECT_OBJECT; 4148 break; 4149 } 4150 4151 if (cc == '"') { 4152 state = DTRACE_JSON_STRING; 4153 break; 4154 } 4155 4156 if (islower(cc)) { 4157 /* 4158 * Here we deal with true, false and null. 4159 */ 4160 *dd++ = cc; 4161 state = DTRACE_JSON_IDENTIFIER; 4162 break; 4163 } 4164 4165 if (cc == '-' || isdigit(cc)) { 4166 *dd++ = cc; 4167 state = DTRACE_JSON_NUMBER; 4168 break; 4169 } 4170 4171 /* 4172 * ERROR: unexpected character at start of value. 4173 */ 4174 return (NULL); 4175 case DTRACE_JSON_COLLECT_OBJECT: 4176 if (cc == '\0') 4177 /* 4178 * ERROR: unexpected end of input. 4179 */ 4180 return (NULL); 4181 4182 *dd++ = cc; 4183 if (cc == '"') { 4184 collect_object = B_TRUE; 4185 state = DTRACE_JSON_STRING; 4186 break; 4187 } 4188 4189 if (cc == ']') { 4190 if (brackets-- == 0) { 4191 /* 4192 * ERROR: unbalanced brackets. 4193 */ 4194 return (NULL); 4195 } 4196 } else if (cc == '}') { 4197 if (braces-- == 0) { 4198 /* 4199 * ERROR: unbalanced braces. 4200 */ 4201 return (NULL); 4202 } 4203 } else if (cc == '{') { 4204 braces++; 4205 } else if (cc == '[') { 4206 brackets++; 4207 } 4208 4209 if (brackets == 0 && braces == 0) { 4210 if (found_key) { 4211 *dd = '\0'; 4212 return (dest); 4213 } 4214 dd = dest; /* reset string buffer */ 4215 state = DTRACE_JSON_COMMA; 4216 } 4217 break; 4218 } 4219 } 4220 return (NULL); 4221 } 4222 4223 /* 4224 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 4225 * Notice that we don't bother validating the proper number of arguments or 4226 * their types in the tuple stack. This isn't needed because all argument 4227 * interpretation is safe because of our load safety -- the worst that can 4228 * happen is that a bogus program can obtain bogus results. 4229 */ 4230 static void 4231 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 4232 dtrace_key_t *tupregs, int nargs, 4233 dtrace_mstate_t *mstate, dtrace_state_t *state) 4234 { 4235 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4236 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4237 dtrace_vstate_t *vstate = &state->dts_vstate; 4238 4239 #ifdef illumos 4240 union { 4241 mutex_impl_t mi; 4242 uint64_t mx; 4243 } m; 4244 4245 union { 4246 krwlock_t ri; 4247 uintptr_t rw; 4248 } r; 4249 #else 4250 struct thread *lowner; 4251 union { 4252 struct lock_object *li; 4253 uintptr_t lx; 4254 } l; 4255 #endif 4256 4257 switch (subr) { 4258 case DIF_SUBR_RAND: 4259 regs[rd] = dtrace_xoroshiro128_plus_next( 4260 state->dts_rstate[curcpu]); 4261 break; 4262 4263 #ifdef illumos 4264 case DIF_SUBR_MUTEX_OWNED: 4265 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4266 mstate, vstate)) { 4267 regs[rd] = 0; 4268 break; 4269 } 4270 4271 m.mx = dtrace_load64(tupregs[0].dttk_value); 4272 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 4273 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 4274 else 4275 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 4276 break; 4277 4278 case DIF_SUBR_MUTEX_OWNER: 4279 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4280 mstate, vstate)) { 4281 regs[rd] = 0; 4282 break; 4283 } 4284 4285 m.mx = dtrace_load64(tupregs[0].dttk_value); 4286 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 4287 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 4288 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 4289 else 4290 regs[rd] = 0; 4291 break; 4292 4293 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 4294 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4295 mstate, vstate)) { 4296 regs[rd] = 0; 4297 break; 4298 } 4299 4300 m.mx = dtrace_load64(tupregs[0].dttk_value); 4301 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 4302 break; 4303 4304 case DIF_SUBR_MUTEX_TYPE_SPIN: 4305 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4306 mstate, vstate)) { 4307 regs[rd] = 0; 4308 break; 4309 } 4310 4311 m.mx = dtrace_load64(tupregs[0].dttk_value); 4312 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 4313 break; 4314 4315 case DIF_SUBR_RW_READ_HELD: { 4316 uintptr_t tmp; 4317 4318 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4319 mstate, vstate)) { 4320 regs[rd] = 0; 4321 break; 4322 } 4323 4324 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4325 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 4326 break; 4327 } 4328 4329 case DIF_SUBR_RW_WRITE_HELD: 4330 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 4331 mstate, vstate)) { 4332 regs[rd] = 0; 4333 break; 4334 } 4335 4336 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4337 regs[rd] = _RW_WRITE_HELD(&r.ri); 4338 break; 4339 4340 case DIF_SUBR_RW_ISWRITER: 4341 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 4342 mstate, vstate)) { 4343 regs[rd] = 0; 4344 break; 4345 } 4346 4347 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4348 regs[rd] = _RW_ISWRITER(&r.ri); 4349 break; 4350 4351 #else /* !illumos */ 4352 case DIF_SUBR_MUTEX_OWNED: 4353 if (!dtrace_canload(tupregs[0].dttk_value, 4354 sizeof (struct lock_object), mstate, vstate)) { 4355 regs[rd] = 0; 4356 break; 4357 } 4358 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4359 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4360 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4361 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4362 break; 4363 4364 case DIF_SUBR_MUTEX_OWNER: 4365 if (!dtrace_canload(tupregs[0].dttk_value, 4366 sizeof (struct lock_object), mstate, vstate)) { 4367 regs[rd] = 0; 4368 break; 4369 } 4370 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4371 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4372 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4373 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4374 regs[rd] = (uintptr_t)lowner; 4375 break; 4376 4377 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 4378 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 4379 mstate, vstate)) { 4380 regs[rd] = 0; 4381 break; 4382 } 4383 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4384 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4385 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SLEEPLOCK) != 0; 4386 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4387 break; 4388 4389 case DIF_SUBR_MUTEX_TYPE_SPIN: 4390 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 4391 mstate, vstate)) { 4392 regs[rd] = 0; 4393 break; 4394 } 4395 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4396 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4397 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 4398 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4399 break; 4400 4401 case DIF_SUBR_RW_READ_HELD: 4402 case DIF_SUBR_SX_SHARED_HELD: 4403 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4404 mstate, vstate)) { 4405 regs[rd] = 0; 4406 break; 4407 } 4408 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4409 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4410 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 4411 lowner == NULL; 4412 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4413 break; 4414 4415 case DIF_SUBR_RW_WRITE_HELD: 4416 case DIF_SUBR_SX_EXCLUSIVE_HELD: 4417 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4418 mstate, vstate)) { 4419 regs[rd] = 0; 4420 break; 4421 } 4422 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 4423 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4424 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 4425 lowner != NULL; 4426 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4427 break; 4428 4429 case DIF_SUBR_RW_ISWRITER: 4430 case DIF_SUBR_SX_ISEXCLUSIVE: 4431 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4432 mstate, vstate)) { 4433 regs[rd] = 0; 4434 break; 4435 } 4436 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 4437 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4438 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4439 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4440 regs[rd] = (lowner == curthread); 4441 break; 4442 #endif /* illumos */ 4443 4444 case DIF_SUBR_BCOPY: { 4445 /* 4446 * We need to be sure that the destination is in the scratch 4447 * region -- no other region is allowed. 4448 */ 4449 uintptr_t src = tupregs[0].dttk_value; 4450 uintptr_t dest = tupregs[1].dttk_value; 4451 size_t size = tupregs[2].dttk_value; 4452 4453 if (!dtrace_inscratch(dest, size, mstate)) { 4454 *flags |= CPU_DTRACE_BADADDR; 4455 *illval = regs[rd]; 4456 break; 4457 } 4458 4459 if (!dtrace_canload(src, size, mstate, vstate)) { 4460 regs[rd] = 0; 4461 break; 4462 } 4463 4464 dtrace_bcopy((void *)src, (void *)dest, size); 4465 break; 4466 } 4467 4468 case DIF_SUBR_ALLOCA: 4469 case DIF_SUBR_COPYIN: { 4470 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4471 uint64_t size = 4472 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 4473 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 4474 4475 /* 4476 * This action doesn't require any credential checks since 4477 * probes will not activate in user contexts to which the 4478 * enabling user does not have permissions. 4479 */ 4480 4481 /* 4482 * Rounding up the user allocation size could have overflowed 4483 * a large, bogus allocation (like -1ULL) to 0. 4484 */ 4485 if (scratch_size < size || 4486 !DTRACE_INSCRATCH(mstate, scratch_size)) { 4487 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4488 regs[rd] = 0; 4489 break; 4490 } 4491 4492 if (subr == DIF_SUBR_COPYIN) { 4493 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4494 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 4495 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4496 } 4497 4498 mstate->dtms_scratch_ptr += scratch_size; 4499 regs[rd] = dest; 4500 break; 4501 } 4502 4503 case DIF_SUBR_COPYINTO: { 4504 uint64_t size = tupregs[1].dttk_value; 4505 uintptr_t dest = tupregs[2].dttk_value; 4506 4507 /* 4508 * This action doesn't require any credential checks since 4509 * probes will not activate in user contexts to which the 4510 * enabling user does not have permissions. 4511 */ 4512 if (!dtrace_inscratch(dest, size, mstate)) { 4513 *flags |= CPU_DTRACE_BADADDR; 4514 *illval = regs[rd]; 4515 break; 4516 } 4517 4518 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4519 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 4520 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4521 break; 4522 } 4523 4524 case DIF_SUBR_COPYINSTR: { 4525 uintptr_t dest = mstate->dtms_scratch_ptr; 4526 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4527 4528 if (nargs > 1 && tupregs[1].dttk_value < size) 4529 size = tupregs[1].dttk_value + 1; 4530 4531 /* 4532 * This action doesn't require any credential checks since 4533 * probes will not activate in user contexts to which the 4534 * enabling user does not have permissions. 4535 */ 4536 if (!DTRACE_INSCRATCH(mstate, size)) { 4537 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4538 regs[rd] = 0; 4539 break; 4540 } 4541 4542 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4543 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 4544 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4545 4546 ((char *)dest)[size - 1] = '\0'; 4547 mstate->dtms_scratch_ptr += size; 4548 regs[rd] = dest; 4549 break; 4550 } 4551 4552 #ifdef illumos 4553 case DIF_SUBR_MSGSIZE: 4554 case DIF_SUBR_MSGDSIZE: { 4555 uintptr_t baddr = tupregs[0].dttk_value, daddr; 4556 uintptr_t wptr, rptr; 4557 size_t count = 0; 4558 int cont = 0; 4559 4560 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 4561 4562 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 4563 vstate)) { 4564 regs[rd] = 0; 4565 break; 4566 } 4567 4568 wptr = dtrace_loadptr(baddr + 4569 offsetof(mblk_t, b_wptr)); 4570 4571 rptr = dtrace_loadptr(baddr + 4572 offsetof(mblk_t, b_rptr)); 4573 4574 if (wptr < rptr) { 4575 *flags |= CPU_DTRACE_BADADDR; 4576 *illval = tupregs[0].dttk_value; 4577 break; 4578 } 4579 4580 daddr = dtrace_loadptr(baddr + 4581 offsetof(mblk_t, b_datap)); 4582 4583 baddr = dtrace_loadptr(baddr + 4584 offsetof(mblk_t, b_cont)); 4585 4586 /* 4587 * We want to prevent against denial-of-service here, 4588 * so we're only going to search the list for 4589 * dtrace_msgdsize_max mblks. 4590 */ 4591 if (cont++ > dtrace_msgdsize_max) { 4592 *flags |= CPU_DTRACE_ILLOP; 4593 break; 4594 } 4595 4596 if (subr == DIF_SUBR_MSGDSIZE) { 4597 if (dtrace_load8(daddr + 4598 offsetof(dblk_t, db_type)) != M_DATA) 4599 continue; 4600 } 4601 4602 count += wptr - rptr; 4603 } 4604 4605 if (!(*flags & CPU_DTRACE_FAULT)) 4606 regs[rd] = count; 4607 4608 break; 4609 } 4610 #endif 4611 4612 case DIF_SUBR_PROGENYOF: { 4613 pid_t pid = tupregs[0].dttk_value; 4614 proc_t *p; 4615 int rval = 0; 4616 4617 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4618 4619 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 4620 #ifdef illumos 4621 if (p->p_pidp->pid_id == pid) { 4622 #else 4623 if (p->p_pid == pid) { 4624 #endif 4625 rval = 1; 4626 break; 4627 } 4628 } 4629 4630 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4631 4632 regs[rd] = rval; 4633 break; 4634 } 4635 4636 case DIF_SUBR_SPECULATION: 4637 regs[rd] = dtrace_speculation(state); 4638 break; 4639 4640 case DIF_SUBR_COPYOUT: { 4641 uintptr_t kaddr = tupregs[0].dttk_value; 4642 uintptr_t uaddr = tupregs[1].dttk_value; 4643 uint64_t size = tupregs[2].dttk_value; 4644 4645 if (!dtrace_destructive_disallow && 4646 dtrace_priv_proc_control(state) && 4647 !dtrace_istoxic(kaddr, size) && 4648 dtrace_canload(kaddr, size, mstate, vstate)) { 4649 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4650 dtrace_copyout(kaddr, uaddr, size, flags); 4651 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4652 } 4653 break; 4654 } 4655 4656 case DIF_SUBR_COPYOUTSTR: { 4657 uintptr_t kaddr = tupregs[0].dttk_value; 4658 uintptr_t uaddr = tupregs[1].dttk_value; 4659 uint64_t size = tupregs[2].dttk_value; 4660 size_t lim; 4661 4662 if (!dtrace_destructive_disallow && 4663 dtrace_priv_proc_control(state) && 4664 !dtrace_istoxic(kaddr, size) && 4665 dtrace_strcanload(kaddr, size, &lim, mstate, vstate)) { 4666 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4667 dtrace_copyoutstr(kaddr, uaddr, lim, flags); 4668 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4669 } 4670 break; 4671 } 4672 4673 case DIF_SUBR_STRLEN: { 4674 size_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4675 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 4676 size_t lim; 4677 4678 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) { 4679 regs[rd] = 0; 4680 break; 4681 } 4682 4683 regs[rd] = dtrace_strlen((char *)addr, lim); 4684 break; 4685 } 4686 4687 case DIF_SUBR_STRCHR: 4688 case DIF_SUBR_STRRCHR: { 4689 /* 4690 * We're going to iterate over the string looking for the 4691 * specified character. We will iterate until we have reached 4692 * the string length or we have found the character. If this 4693 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 4694 * of the specified character instead of the first. 4695 */ 4696 uintptr_t addr = tupregs[0].dttk_value; 4697 uintptr_t addr_limit; 4698 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4699 size_t lim; 4700 char c, target = (char)tupregs[1].dttk_value; 4701 4702 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) { 4703 regs[rd] = 0; 4704 break; 4705 } 4706 addr_limit = addr + lim; 4707 4708 for (regs[rd] = 0; addr < addr_limit; addr++) { 4709 if ((c = dtrace_load8(addr)) == target) { 4710 regs[rd] = addr; 4711 4712 if (subr == DIF_SUBR_STRCHR) 4713 break; 4714 } 4715 4716 if (c == '\0') 4717 break; 4718 } 4719 break; 4720 } 4721 4722 case DIF_SUBR_STRSTR: 4723 case DIF_SUBR_INDEX: 4724 case DIF_SUBR_RINDEX: { 4725 /* 4726 * We're going to iterate over the string looking for the 4727 * specified string. We will iterate until we have reached 4728 * the string length or we have found the string. (Yes, this 4729 * is done in the most naive way possible -- but considering 4730 * that the string we're searching for is likely to be 4731 * relatively short, the complexity of Rabin-Karp or similar 4732 * hardly seems merited.) 4733 */ 4734 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 4735 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 4736 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4737 size_t len = dtrace_strlen(addr, size); 4738 size_t sublen = dtrace_strlen(substr, size); 4739 char *limit = addr + len, *orig = addr; 4740 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 4741 int inc = 1; 4742 4743 regs[rd] = notfound; 4744 4745 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 4746 regs[rd] = 0; 4747 break; 4748 } 4749 4750 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 4751 vstate)) { 4752 regs[rd] = 0; 4753 break; 4754 } 4755 4756 /* 4757 * strstr() and index()/rindex() have similar semantics if 4758 * both strings are the empty string: strstr() returns a 4759 * pointer to the (empty) string, and index() and rindex() 4760 * both return index 0 (regardless of any position argument). 4761 */ 4762 if (sublen == 0 && len == 0) { 4763 if (subr == DIF_SUBR_STRSTR) 4764 regs[rd] = (uintptr_t)addr; 4765 else 4766 regs[rd] = 0; 4767 break; 4768 } 4769 4770 if (subr != DIF_SUBR_STRSTR) { 4771 if (subr == DIF_SUBR_RINDEX) { 4772 limit = orig - 1; 4773 addr += len; 4774 inc = -1; 4775 } 4776 4777 /* 4778 * Both index() and rindex() take an optional position 4779 * argument that denotes the starting position. 4780 */ 4781 if (nargs == 3) { 4782 int64_t pos = (int64_t)tupregs[2].dttk_value; 4783 4784 /* 4785 * If the position argument to index() is 4786 * negative, Perl implicitly clamps it at 4787 * zero. This semantic is a little surprising 4788 * given the special meaning of negative 4789 * positions to similar Perl functions like 4790 * substr(), but it appears to reflect a 4791 * notion that index() can start from a 4792 * negative index and increment its way up to 4793 * the string. Given this notion, Perl's 4794 * rindex() is at least self-consistent in 4795 * that it implicitly clamps positions greater 4796 * than the string length to be the string 4797 * length. Where Perl completely loses 4798 * coherence, however, is when the specified 4799 * substring is the empty string (""). In 4800 * this case, even if the position is 4801 * negative, rindex() returns 0 -- and even if 4802 * the position is greater than the length, 4803 * index() returns the string length. These 4804 * semantics violate the notion that index() 4805 * should never return a value less than the 4806 * specified position and that rindex() should 4807 * never return a value greater than the 4808 * specified position. (One assumes that 4809 * these semantics are artifacts of Perl's 4810 * implementation and not the results of 4811 * deliberate design -- it beggars belief that 4812 * even Larry Wall could desire such oddness.) 4813 * While in the abstract one would wish for 4814 * consistent position semantics across 4815 * substr(), index() and rindex() -- or at the 4816 * very least self-consistent position 4817 * semantics for index() and rindex() -- we 4818 * instead opt to keep with the extant Perl 4819 * semantics, in all their broken glory. (Do 4820 * we have more desire to maintain Perl's 4821 * semantics than Perl does? Probably.) 4822 */ 4823 if (subr == DIF_SUBR_RINDEX) { 4824 if (pos < 0) { 4825 if (sublen == 0) 4826 regs[rd] = 0; 4827 break; 4828 } 4829 4830 if (pos > len) 4831 pos = len; 4832 } else { 4833 if (pos < 0) 4834 pos = 0; 4835 4836 if (pos >= len) { 4837 if (sublen == 0) 4838 regs[rd] = len; 4839 break; 4840 } 4841 } 4842 4843 addr = orig + pos; 4844 } 4845 } 4846 4847 for (regs[rd] = notfound; addr != limit; addr += inc) { 4848 if (dtrace_strncmp(addr, substr, sublen) == 0) { 4849 if (subr != DIF_SUBR_STRSTR) { 4850 /* 4851 * As D index() and rindex() are 4852 * modeled on Perl (and not on awk), 4853 * we return a zero-based (and not a 4854 * one-based) index. (For you Perl 4855 * weenies: no, we're not going to add 4856 * $[ -- and shouldn't you be at a con 4857 * or something?) 4858 */ 4859 regs[rd] = (uintptr_t)(addr - orig); 4860 break; 4861 } 4862 4863 ASSERT(subr == DIF_SUBR_STRSTR); 4864 regs[rd] = (uintptr_t)addr; 4865 break; 4866 } 4867 } 4868 4869 break; 4870 } 4871 4872 case DIF_SUBR_STRTOK: { 4873 uintptr_t addr = tupregs[0].dttk_value; 4874 uintptr_t tokaddr = tupregs[1].dttk_value; 4875 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4876 uintptr_t limit, toklimit; 4877 size_t clim; 4878 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 4879 char *dest = (char *)mstate->dtms_scratch_ptr; 4880 int i; 4881 4882 /* 4883 * Check both the token buffer and (later) the input buffer, 4884 * since both could be non-scratch addresses. 4885 */ 4886 if (!dtrace_strcanload(tokaddr, size, &clim, mstate, vstate)) { 4887 regs[rd] = 0; 4888 break; 4889 } 4890 toklimit = tokaddr + clim; 4891 4892 if (!DTRACE_INSCRATCH(mstate, size)) { 4893 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4894 regs[rd] = 0; 4895 break; 4896 } 4897 4898 if (addr == 0) { 4899 /* 4900 * If the address specified is NULL, we use our saved 4901 * strtok pointer from the mstate. Note that this 4902 * means that the saved strtok pointer is _only_ 4903 * valid within multiple enablings of the same probe -- 4904 * it behaves like an implicit clause-local variable. 4905 */ 4906 addr = mstate->dtms_strtok; 4907 limit = mstate->dtms_strtok_limit; 4908 } else { 4909 /* 4910 * If the user-specified address is non-NULL we must 4911 * access check it. This is the only time we have 4912 * a chance to do so, since this address may reside 4913 * in the string table of this clause-- future calls 4914 * (when we fetch addr from mstate->dtms_strtok) 4915 * would fail this access check. 4916 */ 4917 if (!dtrace_strcanload(addr, size, &clim, mstate, 4918 vstate)) { 4919 regs[rd] = 0; 4920 break; 4921 } 4922 limit = addr + clim; 4923 } 4924 4925 /* 4926 * First, zero the token map, and then process the token 4927 * string -- setting a bit in the map for every character 4928 * found in the token string. 4929 */ 4930 for (i = 0; i < sizeof (tokmap); i++) 4931 tokmap[i] = 0; 4932 4933 for (; tokaddr < toklimit; tokaddr++) { 4934 if ((c = dtrace_load8(tokaddr)) == '\0') 4935 break; 4936 4937 ASSERT((c >> 3) < sizeof (tokmap)); 4938 tokmap[c >> 3] |= (1 << (c & 0x7)); 4939 } 4940 4941 for (; addr < limit; addr++) { 4942 /* 4943 * We're looking for a character that is _not_ 4944 * contained in the token string. 4945 */ 4946 if ((c = dtrace_load8(addr)) == '\0') 4947 break; 4948 4949 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 4950 break; 4951 } 4952 4953 if (c == '\0') { 4954 /* 4955 * We reached the end of the string without finding 4956 * any character that was not in the token string. 4957 * We return NULL in this case, and we set the saved 4958 * address to NULL as well. 4959 */ 4960 regs[rd] = 0; 4961 mstate->dtms_strtok = 0; 4962 mstate->dtms_strtok_limit = 0; 4963 break; 4964 } 4965 4966 /* 4967 * From here on, we're copying into the destination string. 4968 */ 4969 for (i = 0; addr < limit && i < size - 1; addr++) { 4970 if ((c = dtrace_load8(addr)) == '\0') 4971 break; 4972 4973 if (tokmap[c >> 3] & (1 << (c & 0x7))) 4974 break; 4975 4976 ASSERT(i < size); 4977 dest[i++] = c; 4978 } 4979 4980 ASSERT(i < size); 4981 dest[i] = '\0'; 4982 regs[rd] = (uintptr_t)dest; 4983 mstate->dtms_scratch_ptr += size; 4984 mstate->dtms_strtok = addr; 4985 mstate->dtms_strtok_limit = limit; 4986 break; 4987 } 4988 4989 case DIF_SUBR_SUBSTR: { 4990 uintptr_t s = tupregs[0].dttk_value; 4991 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4992 char *d = (char *)mstate->dtms_scratch_ptr; 4993 int64_t index = (int64_t)tupregs[1].dttk_value; 4994 int64_t remaining = (int64_t)tupregs[2].dttk_value; 4995 size_t len = dtrace_strlen((char *)s, size); 4996 int64_t i; 4997 4998 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4999 regs[rd] = 0; 5000 break; 5001 } 5002 5003 if (!DTRACE_INSCRATCH(mstate, size)) { 5004 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5005 regs[rd] = 0; 5006 break; 5007 } 5008 5009 if (nargs <= 2) 5010 remaining = (int64_t)size; 5011 5012 if (index < 0) { 5013 index += len; 5014 5015 if (index < 0 && index + remaining > 0) { 5016 remaining += index; 5017 index = 0; 5018 } 5019 } 5020 5021 if (index >= len || index < 0) { 5022 remaining = 0; 5023 } else if (remaining < 0) { 5024 remaining += len - index; 5025 } else if (index + remaining > size) { 5026 remaining = size - index; 5027 } 5028 5029 for (i = 0; i < remaining; i++) { 5030 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 5031 break; 5032 } 5033 5034 d[i] = '\0'; 5035 5036 mstate->dtms_scratch_ptr += size; 5037 regs[rd] = (uintptr_t)d; 5038 break; 5039 } 5040 5041 case DIF_SUBR_JSON: { 5042 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5043 uintptr_t json = tupregs[0].dttk_value; 5044 size_t jsonlen = dtrace_strlen((char *)json, size); 5045 uintptr_t elem = tupregs[1].dttk_value; 5046 size_t elemlen = dtrace_strlen((char *)elem, size); 5047 5048 char *dest = (char *)mstate->dtms_scratch_ptr; 5049 char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1; 5050 char *ee = elemlist; 5051 int nelems = 1; 5052 uintptr_t cur; 5053 5054 if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) || 5055 !dtrace_canload(elem, elemlen + 1, mstate, vstate)) { 5056 regs[rd] = 0; 5057 break; 5058 } 5059 5060 if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) { 5061 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5062 regs[rd] = 0; 5063 break; 5064 } 5065 5066 /* 5067 * Read the element selector and split it up into a packed list 5068 * of strings. 5069 */ 5070 for (cur = elem; cur < elem + elemlen; cur++) { 5071 char cc = dtrace_load8(cur); 5072 5073 if (cur == elem && cc == '[') { 5074 /* 5075 * If the first element selector key is 5076 * actually an array index then ignore the 5077 * bracket. 5078 */ 5079 continue; 5080 } 5081 5082 if (cc == ']') 5083 continue; 5084 5085 if (cc == '.' || cc == '[') { 5086 nelems++; 5087 cc = '\0'; 5088 } 5089 5090 *ee++ = cc; 5091 } 5092 *ee++ = '\0'; 5093 5094 if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist, 5095 nelems, dest)) != 0) 5096 mstate->dtms_scratch_ptr += jsonlen + 1; 5097 break; 5098 } 5099 5100 case DIF_SUBR_TOUPPER: 5101 case DIF_SUBR_TOLOWER: { 5102 uintptr_t s = tupregs[0].dttk_value; 5103 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5104 char *dest = (char *)mstate->dtms_scratch_ptr, c; 5105 size_t len = dtrace_strlen((char *)s, size); 5106 char lower, upper, convert; 5107 int64_t i; 5108 5109 if (subr == DIF_SUBR_TOUPPER) { 5110 lower = 'a'; 5111 upper = 'z'; 5112 convert = 'A'; 5113 } else { 5114 lower = 'A'; 5115 upper = 'Z'; 5116 convert = 'a'; 5117 } 5118 5119 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 5120 regs[rd] = 0; 5121 break; 5122 } 5123 5124 if (!DTRACE_INSCRATCH(mstate, size)) { 5125 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5126 regs[rd] = 0; 5127 break; 5128 } 5129 5130 for (i = 0; i < size - 1; i++) { 5131 if ((c = dtrace_load8(s + i)) == '\0') 5132 break; 5133 5134 if (c >= lower && c <= upper) 5135 c = convert + (c - lower); 5136 5137 dest[i] = c; 5138 } 5139 5140 ASSERT(i < size); 5141 dest[i] = '\0'; 5142 regs[rd] = (uintptr_t)dest; 5143 mstate->dtms_scratch_ptr += size; 5144 break; 5145 } 5146 5147 #ifdef illumos 5148 case DIF_SUBR_GETMAJOR: 5149 #ifdef _LP64 5150 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 5151 #else 5152 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 5153 #endif 5154 break; 5155 5156 case DIF_SUBR_GETMINOR: 5157 #ifdef _LP64 5158 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 5159 #else 5160 regs[rd] = tupregs[0].dttk_value & MAXMIN; 5161 #endif 5162 break; 5163 5164 case DIF_SUBR_DDI_PATHNAME: { 5165 /* 5166 * This one is a galactic mess. We are going to roughly 5167 * emulate ddi_pathname(), but it's made more complicated 5168 * by the fact that we (a) want to include the minor name and 5169 * (b) must proceed iteratively instead of recursively. 5170 */ 5171 uintptr_t dest = mstate->dtms_scratch_ptr; 5172 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5173 char *start = (char *)dest, *end = start + size - 1; 5174 uintptr_t daddr = tupregs[0].dttk_value; 5175 int64_t minor = (int64_t)tupregs[1].dttk_value; 5176 char *s; 5177 int i, len, depth = 0; 5178 5179 /* 5180 * Due to all the pointer jumping we do and context we must 5181 * rely upon, we just mandate that the user must have kernel 5182 * read privileges to use this routine. 5183 */ 5184 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 5185 *flags |= CPU_DTRACE_KPRIV; 5186 *illval = daddr; 5187 regs[rd] = 0; 5188 } 5189 5190 if (!DTRACE_INSCRATCH(mstate, size)) { 5191 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5192 regs[rd] = 0; 5193 break; 5194 } 5195 5196 *end = '\0'; 5197 5198 /* 5199 * We want to have a name for the minor. In order to do this, 5200 * we need to walk the minor list from the devinfo. We want 5201 * to be sure that we don't infinitely walk a circular list, 5202 * so we check for circularity by sending a scout pointer 5203 * ahead two elements for every element that we iterate over; 5204 * if the list is circular, these will ultimately point to the 5205 * same element. You may recognize this little trick as the 5206 * answer to a stupid interview question -- one that always 5207 * seems to be asked by those who had to have it laboriously 5208 * explained to them, and who can't even concisely describe 5209 * the conditions under which one would be forced to resort to 5210 * this technique. Needless to say, those conditions are 5211 * found here -- and probably only here. Is this the only use 5212 * of this infamous trick in shipping, production code? If it 5213 * isn't, it probably should be... 5214 */ 5215 if (minor != -1) { 5216 uintptr_t maddr = dtrace_loadptr(daddr + 5217 offsetof(struct dev_info, devi_minor)); 5218 5219 uintptr_t next = offsetof(struct ddi_minor_data, next); 5220 uintptr_t name = offsetof(struct ddi_minor_data, 5221 d_minor) + offsetof(struct ddi_minor, name); 5222 uintptr_t dev = offsetof(struct ddi_minor_data, 5223 d_minor) + offsetof(struct ddi_minor, dev); 5224 uintptr_t scout; 5225 5226 if (maddr != NULL) 5227 scout = dtrace_loadptr(maddr + next); 5228 5229 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 5230 uint64_t m; 5231 #ifdef _LP64 5232 m = dtrace_load64(maddr + dev) & MAXMIN64; 5233 #else 5234 m = dtrace_load32(maddr + dev) & MAXMIN; 5235 #endif 5236 if (m != minor) { 5237 maddr = dtrace_loadptr(maddr + next); 5238 5239 if (scout == NULL) 5240 continue; 5241 5242 scout = dtrace_loadptr(scout + next); 5243 5244 if (scout == NULL) 5245 continue; 5246 5247 scout = dtrace_loadptr(scout + next); 5248 5249 if (scout == NULL) 5250 continue; 5251 5252 if (scout == maddr) { 5253 *flags |= CPU_DTRACE_ILLOP; 5254 break; 5255 } 5256 5257 continue; 5258 } 5259 5260 /* 5261 * We have the minor data. Now we need to 5262 * copy the minor's name into the end of the 5263 * pathname. 5264 */ 5265 s = (char *)dtrace_loadptr(maddr + name); 5266 len = dtrace_strlen(s, size); 5267 5268 if (*flags & CPU_DTRACE_FAULT) 5269 break; 5270 5271 if (len != 0) { 5272 if ((end -= (len + 1)) < start) 5273 break; 5274 5275 *end = ':'; 5276 } 5277 5278 for (i = 1; i <= len; i++) 5279 end[i] = dtrace_load8((uintptr_t)s++); 5280 break; 5281 } 5282 } 5283 5284 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 5285 ddi_node_state_t devi_state; 5286 5287 devi_state = dtrace_load32(daddr + 5288 offsetof(struct dev_info, devi_node_state)); 5289 5290 if (*flags & CPU_DTRACE_FAULT) 5291 break; 5292 5293 if (devi_state >= DS_INITIALIZED) { 5294 s = (char *)dtrace_loadptr(daddr + 5295 offsetof(struct dev_info, devi_addr)); 5296 len = dtrace_strlen(s, size); 5297 5298 if (*flags & CPU_DTRACE_FAULT) 5299 break; 5300 5301 if (len != 0) { 5302 if ((end -= (len + 1)) < start) 5303 break; 5304 5305 *end = '@'; 5306 } 5307 5308 for (i = 1; i <= len; i++) 5309 end[i] = dtrace_load8((uintptr_t)s++); 5310 } 5311 5312 /* 5313 * Now for the node name... 5314 */ 5315 s = (char *)dtrace_loadptr(daddr + 5316 offsetof(struct dev_info, devi_node_name)); 5317 5318 daddr = dtrace_loadptr(daddr + 5319 offsetof(struct dev_info, devi_parent)); 5320 5321 /* 5322 * If our parent is NULL (that is, if we're the root 5323 * node), we're going to use the special path 5324 * "devices". 5325 */ 5326 if (daddr == 0) 5327 s = "devices"; 5328 5329 len = dtrace_strlen(s, size); 5330 if (*flags & CPU_DTRACE_FAULT) 5331 break; 5332 5333 if ((end -= (len + 1)) < start) 5334 break; 5335 5336 for (i = 1; i <= len; i++) 5337 end[i] = dtrace_load8((uintptr_t)s++); 5338 *end = '/'; 5339 5340 if (depth++ > dtrace_devdepth_max) { 5341 *flags |= CPU_DTRACE_ILLOP; 5342 break; 5343 } 5344 } 5345 5346 if (end < start) 5347 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5348 5349 if (daddr == 0) { 5350 regs[rd] = (uintptr_t)end; 5351 mstate->dtms_scratch_ptr += size; 5352 } 5353 5354 break; 5355 } 5356 #endif 5357 5358 case DIF_SUBR_STRJOIN: { 5359 char *d = (char *)mstate->dtms_scratch_ptr; 5360 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5361 uintptr_t s1 = tupregs[0].dttk_value; 5362 uintptr_t s2 = tupregs[1].dttk_value; 5363 int i = 0, j = 0; 5364 size_t lim1, lim2; 5365 char c; 5366 5367 if (!dtrace_strcanload(s1, size, &lim1, mstate, vstate) || 5368 !dtrace_strcanload(s2, size, &lim2, mstate, vstate)) { 5369 regs[rd] = 0; 5370 break; 5371 } 5372 5373 if (!DTRACE_INSCRATCH(mstate, size)) { 5374 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5375 regs[rd] = 0; 5376 break; 5377 } 5378 5379 for (;;) { 5380 if (i >= size) { 5381 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5382 regs[rd] = 0; 5383 break; 5384 } 5385 c = (i >= lim1) ? '\0' : dtrace_load8(s1++); 5386 if ((d[i++] = c) == '\0') { 5387 i--; 5388 break; 5389 } 5390 } 5391 5392 for (;;) { 5393 if (i >= size) { 5394 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5395 regs[rd] = 0; 5396 break; 5397 } 5398 5399 c = (j++ >= lim2) ? '\0' : dtrace_load8(s2++); 5400 if ((d[i++] = c) == '\0') 5401 break; 5402 } 5403 5404 if (i < size) { 5405 mstate->dtms_scratch_ptr += i; 5406 regs[rd] = (uintptr_t)d; 5407 } 5408 5409 break; 5410 } 5411 5412 case DIF_SUBR_STRTOLL: { 5413 uintptr_t s = tupregs[0].dttk_value; 5414 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5415 size_t lim; 5416 int base = 10; 5417 5418 if (nargs > 1) { 5419 if ((base = tupregs[1].dttk_value) <= 1 || 5420 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 5421 *flags |= CPU_DTRACE_ILLOP; 5422 break; 5423 } 5424 } 5425 5426 if (!dtrace_strcanload(s, size, &lim, mstate, vstate)) { 5427 regs[rd] = INT64_MIN; 5428 break; 5429 } 5430 5431 regs[rd] = dtrace_strtoll((char *)s, base, lim); 5432 break; 5433 } 5434 5435 case DIF_SUBR_LLTOSTR: { 5436 int64_t i = (int64_t)tupregs[0].dttk_value; 5437 uint64_t val, digit; 5438 uint64_t size = 65; /* enough room for 2^64 in binary */ 5439 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 5440 int base = 10; 5441 5442 if (nargs > 1) { 5443 if ((base = tupregs[1].dttk_value) <= 1 || 5444 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 5445 *flags |= CPU_DTRACE_ILLOP; 5446 break; 5447 } 5448 } 5449 5450 val = (base == 10 && i < 0) ? i * -1 : i; 5451 5452 if (!DTRACE_INSCRATCH(mstate, size)) { 5453 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5454 regs[rd] = 0; 5455 break; 5456 } 5457 5458 for (*end-- = '\0'; val; val /= base) { 5459 if ((digit = val % base) <= '9' - '0') { 5460 *end-- = '0' + digit; 5461 } else { 5462 *end-- = 'a' + (digit - ('9' - '0') - 1); 5463 } 5464 } 5465 5466 if (i == 0 && base == 16) 5467 *end-- = '0'; 5468 5469 if (base == 16) 5470 *end-- = 'x'; 5471 5472 if (i == 0 || base == 8 || base == 16) 5473 *end-- = '0'; 5474 5475 if (i < 0 && base == 10) 5476 *end-- = '-'; 5477 5478 regs[rd] = (uintptr_t)end + 1; 5479 mstate->dtms_scratch_ptr += size; 5480 break; 5481 } 5482 5483 case DIF_SUBR_HTONS: 5484 case DIF_SUBR_NTOHS: 5485 #if BYTE_ORDER == BIG_ENDIAN 5486 regs[rd] = (uint16_t)tupregs[0].dttk_value; 5487 #else 5488 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 5489 #endif 5490 break; 5491 5492 5493 case DIF_SUBR_HTONL: 5494 case DIF_SUBR_NTOHL: 5495 #if BYTE_ORDER == BIG_ENDIAN 5496 regs[rd] = (uint32_t)tupregs[0].dttk_value; 5497 #else 5498 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 5499 #endif 5500 break; 5501 5502 5503 case DIF_SUBR_HTONLL: 5504 case DIF_SUBR_NTOHLL: 5505 #if BYTE_ORDER == BIG_ENDIAN 5506 regs[rd] = (uint64_t)tupregs[0].dttk_value; 5507 #else 5508 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 5509 #endif 5510 break; 5511 5512 5513 case DIF_SUBR_DIRNAME: 5514 case DIF_SUBR_BASENAME: { 5515 char *dest = (char *)mstate->dtms_scratch_ptr; 5516 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5517 uintptr_t src = tupregs[0].dttk_value; 5518 int i, j, len = dtrace_strlen((char *)src, size); 5519 int lastbase = -1, firstbase = -1, lastdir = -1; 5520 int start, end; 5521 5522 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 5523 regs[rd] = 0; 5524 break; 5525 } 5526 5527 if (!DTRACE_INSCRATCH(mstate, size)) { 5528 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5529 regs[rd] = 0; 5530 break; 5531 } 5532 5533 /* 5534 * The basename and dirname for a zero-length string is 5535 * defined to be "." 5536 */ 5537 if (len == 0) { 5538 len = 1; 5539 src = (uintptr_t)"."; 5540 } 5541 5542 /* 5543 * Start from the back of the string, moving back toward the 5544 * front until we see a character that isn't a slash. That 5545 * character is the last character in the basename. 5546 */ 5547 for (i = len - 1; i >= 0; i--) { 5548 if (dtrace_load8(src + i) != '/') 5549 break; 5550 } 5551 5552 if (i >= 0) 5553 lastbase = i; 5554 5555 /* 5556 * Starting from the last character in the basename, move 5557 * towards the front until we find a slash. The character 5558 * that we processed immediately before that is the first 5559 * character in the basename. 5560 */ 5561 for (; i >= 0; i--) { 5562 if (dtrace_load8(src + i) == '/') 5563 break; 5564 } 5565 5566 if (i >= 0) 5567 firstbase = i + 1; 5568 5569 /* 5570 * Now keep going until we find a non-slash character. That 5571 * character is the last character in the dirname. 5572 */ 5573 for (; i >= 0; i--) { 5574 if (dtrace_load8(src + i) != '/') 5575 break; 5576 } 5577 5578 if (i >= 0) 5579 lastdir = i; 5580 5581 ASSERT(!(lastbase == -1 && firstbase != -1)); 5582 ASSERT(!(firstbase == -1 && lastdir != -1)); 5583 5584 if (lastbase == -1) { 5585 /* 5586 * We didn't find a non-slash character. We know that 5587 * the length is non-zero, so the whole string must be 5588 * slashes. In either the dirname or the basename 5589 * case, we return '/'. 5590 */ 5591 ASSERT(firstbase == -1); 5592 firstbase = lastbase = lastdir = 0; 5593 } 5594 5595 if (firstbase == -1) { 5596 /* 5597 * The entire string consists only of a basename 5598 * component. If we're looking for dirname, we need 5599 * to change our string to be just "."; if we're 5600 * looking for a basename, we'll just set the first 5601 * character of the basename to be 0. 5602 */ 5603 if (subr == DIF_SUBR_DIRNAME) { 5604 ASSERT(lastdir == -1); 5605 src = (uintptr_t)"."; 5606 lastdir = 0; 5607 } else { 5608 firstbase = 0; 5609 } 5610 } 5611 5612 if (subr == DIF_SUBR_DIRNAME) { 5613 if (lastdir == -1) { 5614 /* 5615 * We know that we have a slash in the name -- 5616 * or lastdir would be set to 0, above. And 5617 * because lastdir is -1, we know that this 5618 * slash must be the first character. (That 5619 * is, the full string must be of the form 5620 * "/basename".) In this case, the last 5621 * character of the directory name is 0. 5622 */ 5623 lastdir = 0; 5624 } 5625 5626 start = 0; 5627 end = lastdir; 5628 } else { 5629 ASSERT(subr == DIF_SUBR_BASENAME); 5630 ASSERT(firstbase != -1 && lastbase != -1); 5631 start = firstbase; 5632 end = lastbase; 5633 } 5634 5635 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 5636 dest[j] = dtrace_load8(src + i); 5637 5638 dest[j] = '\0'; 5639 regs[rd] = (uintptr_t)dest; 5640 mstate->dtms_scratch_ptr += size; 5641 break; 5642 } 5643 5644 case DIF_SUBR_GETF: { 5645 uintptr_t fd = tupregs[0].dttk_value; 5646 struct filedesc *fdp; 5647 file_t *fp; 5648 5649 if (!dtrace_priv_proc(state)) { 5650 regs[rd] = 0; 5651 break; 5652 } 5653 fdp = curproc->p_fd; 5654 FILEDESC_SLOCK(fdp); 5655 fp = fget_locked(fdp, fd); 5656 mstate->dtms_getf = fp; 5657 regs[rd] = (uintptr_t)fp; 5658 FILEDESC_SUNLOCK(fdp); 5659 break; 5660 } 5661 5662 case DIF_SUBR_CLEANPATH: { 5663 char *dest = (char *)mstate->dtms_scratch_ptr, c; 5664 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5665 uintptr_t src = tupregs[0].dttk_value; 5666 size_t lim; 5667 int i = 0, j = 0; 5668 #ifdef illumos 5669 zone_t *z; 5670 #endif 5671 5672 if (!dtrace_strcanload(src, size, &lim, mstate, vstate)) { 5673 regs[rd] = 0; 5674 break; 5675 } 5676 5677 if (!DTRACE_INSCRATCH(mstate, size)) { 5678 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5679 regs[rd] = 0; 5680 break; 5681 } 5682 5683 /* 5684 * Move forward, loading each character. 5685 */ 5686 do { 5687 c = (i >= lim) ? '\0' : dtrace_load8(src + i++); 5688 next: 5689 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 5690 break; 5691 5692 if (c != '/') { 5693 dest[j++] = c; 5694 continue; 5695 } 5696 5697 c = (i >= lim) ? '\0' : dtrace_load8(src + i++); 5698 5699 if (c == '/') { 5700 /* 5701 * We have two slashes -- we can just advance 5702 * to the next character. 5703 */ 5704 goto next; 5705 } 5706 5707 if (c != '.') { 5708 /* 5709 * This is not "." and it's not ".." -- we can 5710 * just store the "/" and this character and 5711 * drive on. 5712 */ 5713 dest[j++] = '/'; 5714 dest[j++] = c; 5715 continue; 5716 } 5717 5718 c = (i >= lim) ? '\0' : dtrace_load8(src + i++); 5719 5720 if (c == '/') { 5721 /* 5722 * This is a "/./" component. We're not going 5723 * to store anything in the destination buffer; 5724 * we're just going to go to the next component. 5725 */ 5726 goto next; 5727 } 5728 5729 if (c != '.') { 5730 /* 5731 * This is not ".." -- we can just store the 5732 * "/." and this character and continue 5733 * processing. 5734 */ 5735 dest[j++] = '/'; 5736 dest[j++] = '.'; 5737 dest[j++] = c; 5738 continue; 5739 } 5740 5741 c = (i >= lim) ? '\0' : dtrace_load8(src + i++); 5742 5743 if (c != '/' && c != '\0') { 5744 /* 5745 * This is not ".." -- it's "..[mumble]". 5746 * We'll store the "/.." and this character 5747 * and continue processing. 5748 */ 5749 dest[j++] = '/'; 5750 dest[j++] = '.'; 5751 dest[j++] = '.'; 5752 dest[j++] = c; 5753 continue; 5754 } 5755 5756 /* 5757 * This is "/../" or "/..\0". We need to back up 5758 * our destination pointer until we find a "/". 5759 */ 5760 i--; 5761 while (j != 0 && dest[--j] != '/') 5762 continue; 5763 5764 if (c == '\0') 5765 dest[++j] = '/'; 5766 } while (c != '\0'); 5767 5768 dest[j] = '\0'; 5769 5770 #ifdef illumos 5771 if (mstate->dtms_getf != NULL && 5772 !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) && 5773 (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) { 5774 /* 5775 * If we've done a getf() as a part of this ECB and we 5776 * don't have kernel access (and we're not in the global 5777 * zone), check if the path we cleaned up begins with 5778 * the zone's root path, and trim it off if so. Note 5779 * that this is an output cleanliness issue, not a 5780 * security issue: knowing one's zone root path does 5781 * not enable privilege escalation. 5782 */ 5783 if (strstr(dest, z->zone_rootpath) == dest) 5784 dest += strlen(z->zone_rootpath) - 1; 5785 } 5786 #endif 5787 5788 regs[rd] = (uintptr_t)dest; 5789 mstate->dtms_scratch_ptr += size; 5790 break; 5791 } 5792 5793 case DIF_SUBR_INET_NTOA: 5794 case DIF_SUBR_INET_NTOA6: 5795 case DIF_SUBR_INET_NTOP: { 5796 size_t size; 5797 int af, argi, i; 5798 char *base, *end; 5799 5800 if (subr == DIF_SUBR_INET_NTOP) { 5801 af = (int)tupregs[0].dttk_value; 5802 argi = 1; 5803 } else { 5804 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 5805 argi = 0; 5806 } 5807 5808 if (af == AF_INET) { 5809 ipaddr_t ip4; 5810 uint8_t *ptr8, val; 5811 5812 if (!dtrace_canload(tupregs[argi].dttk_value, 5813 sizeof (ipaddr_t), mstate, vstate)) { 5814 regs[rd] = 0; 5815 break; 5816 } 5817 5818 /* 5819 * Safely load the IPv4 address. 5820 */ 5821 ip4 = dtrace_load32(tupregs[argi].dttk_value); 5822 5823 /* 5824 * Check an IPv4 string will fit in scratch. 5825 */ 5826 size = INET_ADDRSTRLEN; 5827 if (!DTRACE_INSCRATCH(mstate, size)) { 5828 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5829 regs[rd] = 0; 5830 break; 5831 } 5832 base = (char *)mstate->dtms_scratch_ptr; 5833 end = (char *)mstate->dtms_scratch_ptr + size - 1; 5834 5835 /* 5836 * Stringify as a dotted decimal quad. 5837 */ 5838 *end-- = '\0'; 5839 ptr8 = (uint8_t *)&ip4; 5840 for (i = 3; i >= 0; i--) { 5841 val = ptr8[i]; 5842 5843 if (val == 0) { 5844 *end-- = '0'; 5845 } else { 5846 for (; val; val /= 10) { 5847 *end-- = '0' + (val % 10); 5848 } 5849 } 5850 5851 if (i > 0) 5852 *end-- = '.'; 5853 } 5854 ASSERT(end + 1 >= base); 5855 5856 } else if (af == AF_INET6) { 5857 struct in6_addr ip6; 5858 int firstzero, tryzero, numzero, v6end; 5859 uint16_t val; 5860 const char digits[] = "0123456789abcdef"; 5861 5862 /* 5863 * Stringify using RFC 1884 convention 2 - 16 bit 5864 * hexadecimal values with a zero-run compression. 5865 * Lower case hexadecimal digits are used. 5866 * eg, fe80::214:4fff:fe0b:76c8. 5867 * The IPv4 embedded form is returned for inet_ntop, 5868 * just the IPv4 string is returned for inet_ntoa6. 5869 */ 5870 5871 if (!dtrace_canload(tupregs[argi].dttk_value, 5872 sizeof (struct in6_addr), mstate, vstate)) { 5873 regs[rd] = 0; 5874 break; 5875 } 5876 5877 /* 5878 * Safely load the IPv6 address. 5879 */ 5880 dtrace_bcopy( 5881 (void *)(uintptr_t)tupregs[argi].dttk_value, 5882 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 5883 5884 /* 5885 * Check an IPv6 string will fit in scratch. 5886 */ 5887 size = INET6_ADDRSTRLEN; 5888 if (!DTRACE_INSCRATCH(mstate, size)) { 5889 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5890 regs[rd] = 0; 5891 break; 5892 } 5893 base = (char *)mstate->dtms_scratch_ptr; 5894 end = (char *)mstate->dtms_scratch_ptr + size - 1; 5895 *end-- = '\0'; 5896 5897 /* 5898 * Find the longest run of 16 bit zero values 5899 * for the single allowed zero compression - "::". 5900 */ 5901 firstzero = -1; 5902 tryzero = -1; 5903 numzero = 1; 5904 for (i = 0; i < sizeof (struct in6_addr); i++) { 5905 #ifdef illumos 5906 if (ip6._S6_un._S6_u8[i] == 0 && 5907 #else 5908 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 5909 #endif 5910 tryzero == -1 && i % 2 == 0) { 5911 tryzero = i; 5912 continue; 5913 } 5914 5915 if (tryzero != -1 && 5916 #ifdef illumos 5917 (ip6._S6_un._S6_u8[i] != 0 || 5918 #else 5919 (ip6.__u6_addr.__u6_addr8[i] != 0 || 5920 #endif 5921 i == sizeof (struct in6_addr) - 1)) { 5922 5923 if (i - tryzero <= numzero) { 5924 tryzero = -1; 5925 continue; 5926 } 5927 5928 firstzero = tryzero; 5929 numzero = i - i % 2 - tryzero; 5930 tryzero = -1; 5931 5932 #ifdef illumos 5933 if (ip6._S6_un._S6_u8[i] == 0 && 5934 #else 5935 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 5936 #endif 5937 i == sizeof (struct in6_addr) - 1) 5938 numzero += 2; 5939 } 5940 } 5941 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 5942 5943 /* 5944 * Check for an IPv4 embedded address. 5945 */ 5946 v6end = sizeof (struct in6_addr) - 2; 5947 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 5948 IN6_IS_ADDR_V4COMPAT(&ip6)) { 5949 for (i = sizeof (struct in6_addr) - 1; 5950 i >= DTRACE_V4MAPPED_OFFSET; i--) { 5951 ASSERT(end >= base); 5952 5953 #ifdef illumos 5954 val = ip6._S6_un._S6_u8[i]; 5955 #else 5956 val = ip6.__u6_addr.__u6_addr8[i]; 5957 #endif 5958 5959 if (val == 0) { 5960 *end-- = '0'; 5961 } else { 5962 for (; val; val /= 10) { 5963 *end-- = '0' + val % 10; 5964 } 5965 } 5966 5967 if (i > DTRACE_V4MAPPED_OFFSET) 5968 *end-- = '.'; 5969 } 5970 5971 if (subr == DIF_SUBR_INET_NTOA6) 5972 goto inetout; 5973 5974 /* 5975 * Set v6end to skip the IPv4 address that 5976 * we have already stringified. 5977 */ 5978 v6end = 10; 5979 } 5980 5981 /* 5982 * Build the IPv6 string by working through the 5983 * address in reverse. 5984 */ 5985 for (i = v6end; i >= 0; i -= 2) { 5986 ASSERT(end >= base); 5987 5988 if (i == firstzero + numzero - 2) { 5989 *end-- = ':'; 5990 *end-- = ':'; 5991 i -= numzero - 2; 5992 continue; 5993 } 5994 5995 if (i < 14 && i != firstzero - 2) 5996 *end-- = ':'; 5997 5998 #ifdef illumos 5999 val = (ip6._S6_un._S6_u8[i] << 8) + 6000 ip6._S6_un._S6_u8[i + 1]; 6001 #else 6002 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 6003 ip6.__u6_addr.__u6_addr8[i + 1]; 6004 #endif 6005 6006 if (val == 0) { 6007 *end-- = '0'; 6008 } else { 6009 for (; val; val /= 16) { 6010 *end-- = digits[val % 16]; 6011 } 6012 } 6013 } 6014 ASSERT(end + 1 >= base); 6015 6016 } else { 6017 /* 6018 * The user didn't use AH_INET or AH_INET6. 6019 */ 6020 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 6021 regs[rd] = 0; 6022 break; 6023 } 6024 6025 inetout: regs[rd] = (uintptr_t)end + 1; 6026 mstate->dtms_scratch_ptr += size; 6027 break; 6028 } 6029 6030 case DIF_SUBR_MEMREF: { 6031 uintptr_t size = 2 * sizeof(uintptr_t); 6032 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 6033 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 6034 6035 /* address and length */ 6036 memref[0] = tupregs[0].dttk_value; 6037 memref[1] = tupregs[1].dttk_value; 6038 6039 regs[rd] = (uintptr_t) memref; 6040 mstate->dtms_scratch_ptr += scratch_size; 6041 break; 6042 } 6043 6044 #ifndef illumos 6045 case DIF_SUBR_MEMSTR: { 6046 char *str = (char *)mstate->dtms_scratch_ptr; 6047 uintptr_t mem = tupregs[0].dttk_value; 6048 char c = tupregs[1].dttk_value; 6049 size_t size = tupregs[2].dttk_value; 6050 uint8_t n; 6051 int i; 6052 6053 regs[rd] = 0; 6054 6055 if (size == 0) 6056 break; 6057 6058 if (!dtrace_canload(mem, size - 1, mstate, vstate)) 6059 break; 6060 6061 if (!DTRACE_INSCRATCH(mstate, size)) { 6062 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 6063 break; 6064 } 6065 6066 if (dtrace_memstr_max != 0 && size > dtrace_memstr_max) { 6067 *flags |= CPU_DTRACE_ILLOP; 6068 break; 6069 } 6070 6071 for (i = 0; i < size - 1; i++) { 6072 n = dtrace_load8(mem++); 6073 str[i] = (n == 0) ? c : n; 6074 } 6075 str[size - 1] = 0; 6076 6077 regs[rd] = (uintptr_t)str; 6078 mstate->dtms_scratch_ptr += size; 6079 break; 6080 } 6081 #endif 6082 } 6083 } 6084 6085 /* 6086 * Emulate the execution of DTrace IR instructions specified by the given 6087 * DIF object. This function is deliberately void of assertions as all of 6088 * the necessary checks are handled by a call to dtrace_difo_validate(). 6089 */ 6090 static uint64_t 6091 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 6092 dtrace_vstate_t *vstate, dtrace_state_t *state) 6093 { 6094 const dif_instr_t *text = difo->dtdo_buf; 6095 const uint_t textlen = difo->dtdo_len; 6096 const char *strtab = difo->dtdo_strtab; 6097 const uint64_t *inttab = difo->dtdo_inttab; 6098 6099 uint64_t rval = 0; 6100 dtrace_statvar_t *svar; 6101 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 6102 dtrace_difv_t *v; 6103 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 6104 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 6105 6106 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 6107 uint64_t regs[DIF_DIR_NREGS]; 6108 uint64_t *tmp; 6109 6110 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 6111 int64_t cc_r; 6112 uint_t pc = 0, id, opc = 0; 6113 uint8_t ttop = 0; 6114 dif_instr_t instr; 6115 uint_t r1, r2, rd; 6116 6117 /* 6118 * We stash the current DIF object into the machine state: we need it 6119 * for subsequent access checking. 6120 */ 6121 mstate->dtms_difo = difo; 6122 6123 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 6124 6125 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 6126 opc = pc; 6127 6128 instr = text[pc++]; 6129 r1 = DIF_INSTR_R1(instr); 6130 r2 = DIF_INSTR_R2(instr); 6131 rd = DIF_INSTR_RD(instr); 6132 6133 switch (DIF_INSTR_OP(instr)) { 6134 case DIF_OP_OR: 6135 regs[rd] = regs[r1] | regs[r2]; 6136 break; 6137 case DIF_OP_XOR: 6138 regs[rd] = regs[r1] ^ regs[r2]; 6139 break; 6140 case DIF_OP_AND: 6141 regs[rd] = regs[r1] & regs[r2]; 6142 break; 6143 case DIF_OP_SLL: 6144 regs[rd] = regs[r1] << regs[r2]; 6145 break; 6146 case DIF_OP_SRL: 6147 regs[rd] = regs[r1] >> regs[r2]; 6148 break; 6149 case DIF_OP_SUB: 6150 regs[rd] = regs[r1] - regs[r2]; 6151 break; 6152 case DIF_OP_ADD: 6153 regs[rd] = regs[r1] + regs[r2]; 6154 break; 6155 case DIF_OP_MUL: 6156 regs[rd] = regs[r1] * regs[r2]; 6157 break; 6158 case DIF_OP_SDIV: 6159 if (regs[r2] == 0) { 6160 regs[rd] = 0; 6161 *flags |= CPU_DTRACE_DIVZERO; 6162 } else { 6163 regs[rd] = (int64_t)regs[r1] / 6164 (int64_t)regs[r2]; 6165 } 6166 break; 6167 6168 case DIF_OP_UDIV: 6169 if (regs[r2] == 0) { 6170 regs[rd] = 0; 6171 *flags |= CPU_DTRACE_DIVZERO; 6172 } else { 6173 regs[rd] = regs[r1] / regs[r2]; 6174 } 6175 break; 6176 6177 case DIF_OP_SREM: 6178 if (regs[r2] == 0) { 6179 regs[rd] = 0; 6180 *flags |= CPU_DTRACE_DIVZERO; 6181 } else { 6182 regs[rd] = (int64_t)regs[r1] % 6183 (int64_t)regs[r2]; 6184 } 6185 break; 6186 6187 case DIF_OP_UREM: 6188 if (regs[r2] == 0) { 6189 regs[rd] = 0; 6190 *flags |= CPU_DTRACE_DIVZERO; 6191 } else { 6192 regs[rd] = regs[r1] % regs[r2]; 6193 } 6194 break; 6195 6196 case DIF_OP_NOT: 6197 regs[rd] = ~regs[r1]; 6198 break; 6199 case DIF_OP_MOV: 6200 regs[rd] = regs[r1]; 6201 break; 6202 case DIF_OP_CMP: 6203 cc_r = regs[r1] - regs[r2]; 6204 cc_n = cc_r < 0; 6205 cc_z = cc_r == 0; 6206 cc_v = 0; 6207 cc_c = regs[r1] < regs[r2]; 6208 break; 6209 case DIF_OP_TST: 6210 cc_n = cc_v = cc_c = 0; 6211 cc_z = regs[r1] == 0; 6212 break; 6213 case DIF_OP_BA: 6214 pc = DIF_INSTR_LABEL(instr); 6215 break; 6216 case DIF_OP_BE: 6217 if (cc_z) 6218 pc = DIF_INSTR_LABEL(instr); 6219 break; 6220 case DIF_OP_BNE: 6221 if (cc_z == 0) 6222 pc = DIF_INSTR_LABEL(instr); 6223 break; 6224 case DIF_OP_BG: 6225 if ((cc_z | (cc_n ^ cc_v)) == 0) 6226 pc = DIF_INSTR_LABEL(instr); 6227 break; 6228 case DIF_OP_BGU: 6229 if ((cc_c | cc_z) == 0) 6230 pc = DIF_INSTR_LABEL(instr); 6231 break; 6232 case DIF_OP_BGE: 6233 if ((cc_n ^ cc_v) == 0) 6234 pc = DIF_INSTR_LABEL(instr); 6235 break; 6236 case DIF_OP_BGEU: 6237 if (cc_c == 0) 6238 pc = DIF_INSTR_LABEL(instr); 6239 break; 6240 case DIF_OP_BL: 6241 if (cc_n ^ cc_v) 6242 pc = DIF_INSTR_LABEL(instr); 6243 break; 6244 case DIF_OP_BLU: 6245 if (cc_c) 6246 pc = DIF_INSTR_LABEL(instr); 6247 break; 6248 case DIF_OP_BLE: 6249 if (cc_z | (cc_n ^ cc_v)) 6250 pc = DIF_INSTR_LABEL(instr); 6251 break; 6252 case DIF_OP_BLEU: 6253 if (cc_c | cc_z) 6254 pc = DIF_INSTR_LABEL(instr); 6255 break; 6256 case DIF_OP_RLDSB: 6257 if (!dtrace_canload(regs[r1], 1, mstate, vstate)) 6258 break; 6259 /*FALLTHROUGH*/ 6260 case DIF_OP_LDSB: 6261 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 6262 break; 6263 case DIF_OP_RLDSH: 6264 if (!dtrace_canload(regs[r1], 2, mstate, vstate)) 6265 break; 6266 /*FALLTHROUGH*/ 6267 case DIF_OP_LDSH: 6268 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 6269 break; 6270 case DIF_OP_RLDSW: 6271 if (!dtrace_canload(regs[r1], 4, mstate, vstate)) 6272 break; 6273 /*FALLTHROUGH*/ 6274 case DIF_OP_LDSW: 6275 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 6276 break; 6277 case DIF_OP_RLDUB: 6278 if (!dtrace_canload(regs[r1], 1, mstate, vstate)) 6279 break; 6280 /*FALLTHROUGH*/ 6281 case DIF_OP_LDUB: 6282 regs[rd] = dtrace_load8(regs[r1]); 6283 break; 6284 case DIF_OP_RLDUH: 6285 if (!dtrace_canload(regs[r1], 2, mstate, vstate)) 6286 break; 6287 /*FALLTHROUGH*/ 6288 case DIF_OP_LDUH: 6289 regs[rd] = dtrace_load16(regs[r1]); 6290 break; 6291 case DIF_OP_RLDUW: 6292 if (!dtrace_canload(regs[r1], 4, mstate, vstate)) 6293 break; 6294 /*FALLTHROUGH*/ 6295 case DIF_OP_LDUW: 6296 regs[rd] = dtrace_load32(regs[r1]); 6297 break; 6298 case DIF_OP_RLDX: 6299 if (!dtrace_canload(regs[r1], 8, mstate, vstate)) 6300 break; 6301 /*FALLTHROUGH*/ 6302 case DIF_OP_LDX: 6303 regs[rd] = dtrace_load64(regs[r1]); 6304 break; 6305 case DIF_OP_ULDSB: 6306 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6307 regs[rd] = (int8_t) 6308 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 6309 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6310 break; 6311 case DIF_OP_ULDSH: 6312 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6313 regs[rd] = (int16_t) 6314 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 6315 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6316 break; 6317 case DIF_OP_ULDSW: 6318 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6319 regs[rd] = (int32_t) 6320 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 6321 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6322 break; 6323 case DIF_OP_ULDUB: 6324 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6325 regs[rd] = 6326 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 6327 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6328 break; 6329 case DIF_OP_ULDUH: 6330 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6331 regs[rd] = 6332 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 6333 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6334 break; 6335 case DIF_OP_ULDUW: 6336 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6337 regs[rd] = 6338 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 6339 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6340 break; 6341 case DIF_OP_ULDX: 6342 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6343 regs[rd] = 6344 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 6345 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6346 break; 6347 case DIF_OP_RET: 6348 rval = regs[rd]; 6349 pc = textlen; 6350 break; 6351 case DIF_OP_NOP: 6352 break; 6353 case DIF_OP_SETX: 6354 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 6355 break; 6356 case DIF_OP_SETS: 6357 regs[rd] = (uint64_t)(uintptr_t) 6358 (strtab + DIF_INSTR_STRING(instr)); 6359 break; 6360 case DIF_OP_SCMP: { 6361 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 6362 uintptr_t s1 = regs[r1]; 6363 uintptr_t s2 = regs[r2]; 6364 size_t lim1, lim2; 6365 6366 if (s1 != 0 && 6367 !dtrace_strcanload(s1, sz, &lim1, mstate, vstate)) 6368 break; 6369 if (s2 != 0 && 6370 !dtrace_strcanload(s2, sz, &lim2, mstate, vstate)) 6371 break; 6372 6373 cc_r = dtrace_strncmp((char *)s1, (char *)s2, 6374 MIN(lim1, lim2)); 6375 6376 cc_n = cc_r < 0; 6377 cc_z = cc_r == 0; 6378 cc_v = cc_c = 0; 6379 break; 6380 } 6381 case DIF_OP_LDGA: 6382 regs[rd] = dtrace_dif_variable(mstate, state, 6383 r1, regs[r2]); 6384 break; 6385 case DIF_OP_LDGS: 6386 id = DIF_INSTR_VAR(instr); 6387 6388 if (id >= DIF_VAR_OTHER_UBASE) { 6389 uintptr_t a; 6390 6391 id -= DIF_VAR_OTHER_UBASE; 6392 svar = vstate->dtvs_globals[id]; 6393 ASSERT(svar != NULL); 6394 v = &svar->dtsv_var; 6395 6396 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 6397 regs[rd] = svar->dtsv_data; 6398 break; 6399 } 6400 6401 a = (uintptr_t)svar->dtsv_data; 6402 6403 if (*(uint8_t *)a == UINT8_MAX) { 6404 /* 6405 * If the 0th byte is set to UINT8_MAX 6406 * then this is to be treated as a 6407 * reference to a NULL variable. 6408 */ 6409 regs[rd] = 0; 6410 } else { 6411 regs[rd] = a + sizeof (uint64_t); 6412 } 6413 6414 break; 6415 } 6416 6417 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 6418 break; 6419 6420 case DIF_OP_STGS: 6421 id = DIF_INSTR_VAR(instr); 6422 6423 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6424 id -= DIF_VAR_OTHER_UBASE; 6425 6426 VERIFY(id < vstate->dtvs_nglobals); 6427 svar = vstate->dtvs_globals[id]; 6428 ASSERT(svar != NULL); 6429 v = &svar->dtsv_var; 6430 6431 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6432 uintptr_t a = (uintptr_t)svar->dtsv_data; 6433 size_t lim; 6434 6435 ASSERT(a != 0); 6436 ASSERT(svar->dtsv_size != 0); 6437 6438 if (regs[rd] == 0) { 6439 *(uint8_t *)a = UINT8_MAX; 6440 break; 6441 } else { 6442 *(uint8_t *)a = 0; 6443 a += sizeof (uint64_t); 6444 } 6445 if (!dtrace_vcanload( 6446 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6447 &lim, mstate, vstate)) 6448 break; 6449 6450 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6451 (void *)a, &v->dtdv_type, lim); 6452 break; 6453 } 6454 6455 svar->dtsv_data = regs[rd]; 6456 break; 6457 6458 case DIF_OP_LDTA: 6459 /* 6460 * There are no DTrace built-in thread-local arrays at 6461 * present. This opcode is saved for future work. 6462 */ 6463 *flags |= CPU_DTRACE_ILLOP; 6464 regs[rd] = 0; 6465 break; 6466 6467 case DIF_OP_LDLS: 6468 id = DIF_INSTR_VAR(instr); 6469 6470 if (id < DIF_VAR_OTHER_UBASE) { 6471 /* 6472 * For now, this has no meaning. 6473 */ 6474 regs[rd] = 0; 6475 break; 6476 } 6477 6478 id -= DIF_VAR_OTHER_UBASE; 6479 6480 ASSERT(id < vstate->dtvs_nlocals); 6481 ASSERT(vstate->dtvs_locals != NULL); 6482 6483 svar = vstate->dtvs_locals[id]; 6484 ASSERT(svar != NULL); 6485 v = &svar->dtsv_var; 6486 6487 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6488 uintptr_t a = (uintptr_t)svar->dtsv_data; 6489 size_t sz = v->dtdv_type.dtdt_size; 6490 size_t lim; 6491 6492 sz += sizeof (uint64_t); 6493 ASSERT(svar->dtsv_size == NCPU * sz); 6494 a += curcpu * sz; 6495 6496 if (*(uint8_t *)a == UINT8_MAX) { 6497 /* 6498 * If the 0th byte is set to UINT8_MAX 6499 * then this is to be treated as a 6500 * reference to a NULL variable. 6501 */ 6502 regs[rd] = 0; 6503 } else { 6504 regs[rd] = a + sizeof (uint64_t); 6505 } 6506 6507 break; 6508 } 6509 6510 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 6511 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 6512 regs[rd] = tmp[curcpu]; 6513 break; 6514 6515 case DIF_OP_STLS: 6516 id = DIF_INSTR_VAR(instr); 6517 6518 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6519 id -= DIF_VAR_OTHER_UBASE; 6520 VERIFY(id < vstate->dtvs_nlocals); 6521 6522 ASSERT(vstate->dtvs_locals != NULL); 6523 svar = vstate->dtvs_locals[id]; 6524 ASSERT(svar != NULL); 6525 v = &svar->dtsv_var; 6526 6527 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6528 uintptr_t a = (uintptr_t)svar->dtsv_data; 6529 size_t sz = v->dtdv_type.dtdt_size; 6530 size_t lim; 6531 6532 sz += sizeof (uint64_t); 6533 ASSERT(svar->dtsv_size == NCPU * sz); 6534 a += curcpu * sz; 6535 6536 if (regs[rd] == 0) { 6537 *(uint8_t *)a = UINT8_MAX; 6538 break; 6539 } else { 6540 *(uint8_t *)a = 0; 6541 a += sizeof (uint64_t); 6542 } 6543 6544 if (!dtrace_vcanload( 6545 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6546 &lim, mstate, vstate)) 6547 break; 6548 6549 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6550 (void *)a, &v->dtdv_type, lim); 6551 break; 6552 } 6553 6554 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 6555 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 6556 tmp[curcpu] = regs[rd]; 6557 break; 6558 6559 case DIF_OP_LDTS: { 6560 dtrace_dynvar_t *dvar; 6561 dtrace_key_t *key; 6562 6563 id = DIF_INSTR_VAR(instr); 6564 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6565 id -= DIF_VAR_OTHER_UBASE; 6566 v = &vstate->dtvs_tlocals[id]; 6567 6568 key = &tupregs[DIF_DTR_NREGS]; 6569 key[0].dttk_value = (uint64_t)id; 6570 key[0].dttk_size = 0; 6571 DTRACE_TLS_THRKEY(key[1].dttk_value); 6572 key[1].dttk_size = 0; 6573 6574 dvar = dtrace_dynvar(dstate, 2, key, 6575 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 6576 mstate, vstate); 6577 6578 if (dvar == NULL) { 6579 regs[rd] = 0; 6580 break; 6581 } 6582 6583 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6584 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 6585 } else { 6586 regs[rd] = *((uint64_t *)dvar->dtdv_data); 6587 } 6588 6589 break; 6590 } 6591 6592 case DIF_OP_STTS: { 6593 dtrace_dynvar_t *dvar; 6594 dtrace_key_t *key; 6595 6596 id = DIF_INSTR_VAR(instr); 6597 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6598 id -= DIF_VAR_OTHER_UBASE; 6599 VERIFY(id < vstate->dtvs_ntlocals); 6600 6601 key = &tupregs[DIF_DTR_NREGS]; 6602 key[0].dttk_value = (uint64_t)id; 6603 key[0].dttk_size = 0; 6604 DTRACE_TLS_THRKEY(key[1].dttk_value); 6605 key[1].dttk_size = 0; 6606 v = &vstate->dtvs_tlocals[id]; 6607 6608 dvar = dtrace_dynvar(dstate, 2, key, 6609 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6610 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6611 regs[rd] ? DTRACE_DYNVAR_ALLOC : 6612 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 6613 6614 /* 6615 * Given that we're storing to thread-local data, 6616 * we need to flush our predicate cache. 6617 */ 6618 curthread->t_predcache = 0; 6619 6620 if (dvar == NULL) 6621 break; 6622 6623 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6624 size_t lim; 6625 6626 if (!dtrace_vcanload( 6627 (void *)(uintptr_t)regs[rd], 6628 &v->dtdv_type, &lim, mstate, vstate)) 6629 break; 6630 6631 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6632 dvar->dtdv_data, &v->dtdv_type, lim); 6633 } else { 6634 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 6635 } 6636 6637 break; 6638 } 6639 6640 case DIF_OP_SRA: 6641 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 6642 break; 6643 6644 case DIF_OP_CALL: 6645 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 6646 regs, tupregs, ttop, mstate, state); 6647 break; 6648 6649 case DIF_OP_PUSHTR: 6650 if (ttop == DIF_DTR_NREGS) { 6651 *flags |= CPU_DTRACE_TUPOFLOW; 6652 break; 6653 } 6654 6655 if (r1 == DIF_TYPE_STRING) { 6656 /* 6657 * If this is a string type and the size is 0, 6658 * we'll use the system-wide default string 6659 * size. Note that we are _not_ looking at 6660 * the value of the DTRACEOPT_STRSIZE option; 6661 * had this been set, we would expect to have 6662 * a non-zero size value in the "pushtr". 6663 */ 6664 tupregs[ttop].dttk_size = 6665 dtrace_strlen((char *)(uintptr_t)regs[rd], 6666 regs[r2] ? regs[r2] : 6667 dtrace_strsize_default) + 1; 6668 } else { 6669 if (regs[r2] > LONG_MAX) { 6670 *flags |= CPU_DTRACE_ILLOP; 6671 break; 6672 } 6673 6674 tupregs[ttop].dttk_size = regs[r2]; 6675 } 6676 6677 tupregs[ttop++].dttk_value = regs[rd]; 6678 break; 6679 6680 case DIF_OP_PUSHTV: 6681 if (ttop == DIF_DTR_NREGS) { 6682 *flags |= CPU_DTRACE_TUPOFLOW; 6683 break; 6684 } 6685 6686 tupregs[ttop].dttk_value = regs[rd]; 6687 tupregs[ttop++].dttk_size = 0; 6688 break; 6689 6690 case DIF_OP_POPTS: 6691 if (ttop != 0) 6692 ttop--; 6693 break; 6694 6695 case DIF_OP_FLUSHTS: 6696 ttop = 0; 6697 break; 6698 6699 case DIF_OP_LDGAA: 6700 case DIF_OP_LDTAA: { 6701 dtrace_dynvar_t *dvar; 6702 dtrace_key_t *key = tupregs; 6703 uint_t nkeys = ttop; 6704 6705 id = DIF_INSTR_VAR(instr); 6706 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6707 id -= DIF_VAR_OTHER_UBASE; 6708 6709 key[nkeys].dttk_value = (uint64_t)id; 6710 key[nkeys++].dttk_size = 0; 6711 6712 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 6713 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 6714 key[nkeys++].dttk_size = 0; 6715 VERIFY(id < vstate->dtvs_ntlocals); 6716 v = &vstate->dtvs_tlocals[id]; 6717 } else { 6718 VERIFY(id < vstate->dtvs_nglobals); 6719 v = &vstate->dtvs_globals[id]->dtsv_var; 6720 } 6721 6722 dvar = dtrace_dynvar(dstate, nkeys, key, 6723 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6724 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6725 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 6726 6727 if (dvar == NULL) { 6728 regs[rd] = 0; 6729 break; 6730 } 6731 6732 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6733 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 6734 } else { 6735 regs[rd] = *((uint64_t *)dvar->dtdv_data); 6736 } 6737 6738 break; 6739 } 6740 6741 case DIF_OP_STGAA: 6742 case DIF_OP_STTAA: { 6743 dtrace_dynvar_t *dvar; 6744 dtrace_key_t *key = tupregs; 6745 uint_t nkeys = ttop; 6746 6747 id = DIF_INSTR_VAR(instr); 6748 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6749 id -= DIF_VAR_OTHER_UBASE; 6750 6751 key[nkeys].dttk_value = (uint64_t)id; 6752 key[nkeys++].dttk_size = 0; 6753 6754 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 6755 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 6756 key[nkeys++].dttk_size = 0; 6757 VERIFY(id < vstate->dtvs_ntlocals); 6758 v = &vstate->dtvs_tlocals[id]; 6759 } else { 6760 VERIFY(id < vstate->dtvs_nglobals); 6761 v = &vstate->dtvs_globals[id]->dtsv_var; 6762 } 6763 6764 dvar = dtrace_dynvar(dstate, nkeys, key, 6765 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6766 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6767 regs[rd] ? DTRACE_DYNVAR_ALLOC : 6768 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 6769 6770 if (dvar == NULL) 6771 break; 6772 6773 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6774 size_t lim; 6775 6776 if (!dtrace_vcanload( 6777 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6778 &lim, mstate, vstate)) 6779 break; 6780 6781 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6782 dvar->dtdv_data, &v->dtdv_type, lim); 6783 } else { 6784 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 6785 } 6786 6787 break; 6788 } 6789 6790 case DIF_OP_ALLOCS: { 6791 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 6792 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 6793 6794 /* 6795 * Rounding up the user allocation size could have 6796 * overflowed large, bogus allocations (like -1ULL) to 6797 * 0. 6798 */ 6799 if (size < regs[r1] || 6800 !DTRACE_INSCRATCH(mstate, size)) { 6801 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 6802 regs[rd] = 0; 6803 break; 6804 } 6805 6806 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 6807 mstate->dtms_scratch_ptr += size; 6808 regs[rd] = ptr; 6809 break; 6810 } 6811 6812 case DIF_OP_COPYS: 6813 if (!dtrace_canstore(regs[rd], regs[r2], 6814 mstate, vstate)) { 6815 *flags |= CPU_DTRACE_BADADDR; 6816 *illval = regs[rd]; 6817 break; 6818 } 6819 6820 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 6821 break; 6822 6823 dtrace_bcopy((void *)(uintptr_t)regs[r1], 6824 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 6825 break; 6826 6827 case DIF_OP_STB: 6828 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 6829 *flags |= CPU_DTRACE_BADADDR; 6830 *illval = regs[rd]; 6831 break; 6832 } 6833 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 6834 break; 6835 6836 case DIF_OP_STH: 6837 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 6838 *flags |= CPU_DTRACE_BADADDR; 6839 *illval = regs[rd]; 6840 break; 6841 } 6842 if (regs[rd] & 1) { 6843 *flags |= CPU_DTRACE_BADALIGN; 6844 *illval = regs[rd]; 6845 break; 6846 } 6847 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 6848 break; 6849 6850 case DIF_OP_STW: 6851 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 6852 *flags |= CPU_DTRACE_BADADDR; 6853 *illval = regs[rd]; 6854 break; 6855 } 6856 if (regs[rd] & 3) { 6857 *flags |= CPU_DTRACE_BADALIGN; 6858 *illval = regs[rd]; 6859 break; 6860 } 6861 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 6862 break; 6863 6864 case DIF_OP_STX: 6865 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 6866 *flags |= CPU_DTRACE_BADADDR; 6867 *illval = regs[rd]; 6868 break; 6869 } 6870 if (regs[rd] & 7) { 6871 *flags |= CPU_DTRACE_BADALIGN; 6872 *illval = regs[rd]; 6873 break; 6874 } 6875 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 6876 break; 6877 } 6878 } 6879 6880 if (!(*flags & CPU_DTRACE_FAULT)) 6881 return (rval); 6882 6883 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 6884 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 6885 6886 return (0); 6887 } 6888 6889 static void 6890 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 6891 { 6892 dtrace_probe_t *probe = ecb->dte_probe; 6893 dtrace_provider_t *prov = probe->dtpr_provider; 6894 char c[DTRACE_FULLNAMELEN + 80], *str; 6895 char *msg = "dtrace: breakpoint action at probe "; 6896 char *ecbmsg = " (ecb "; 6897 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 6898 uintptr_t val = (uintptr_t)ecb; 6899 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 6900 6901 if (dtrace_destructive_disallow) 6902 return; 6903 6904 /* 6905 * It's impossible to be taking action on the NULL probe. 6906 */ 6907 ASSERT(probe != NULL); 6908 6909 /* 6910 * This is a poor man's (destitute man's?) sprintf(): we want to 6911 * print the provider name, module name, function name and name of 6912 * the probe, along with the hex address of the ECB with the breakpoint 6913 * action -- all of which we must place in the character buffer by 6914 * hand. 6915 */ 6916 while (*msg != '\0') 6917 c[i++] = *msg++; 6918 6919 for (str = prov->dtpv_name; *str != '\0'; str++) 6920 c[i++] = *str; 6921 c[i++] = ':'; 6922 6923 for (str = probe->dtpr_mod; *str != '\0'; str++) 6924 c[i++] = *str; 6925 c[i++] = ':'; 6926 6927 for (str = probe->dtpr_func; *str != '\0'; str++) 6928 c[i++] = *str; 6929 c[i++] = ':'; 6930 6931 for (str = probe->dtpr_name; *str != '\0'; str++) 6932 c[i++] = *str; 6933 6934 while (*ecbmsg != '\0') 6935 c[i++] = *ecbmsg++; 6936 6937 while (shift >= 0) { 6938 mask = (uintptr_t)0xf << shift; 6939 6940 if (val >= ((uintptr_t)1 << shift)) 6941 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 6942 shift -= 4; 6943 } 6944 6945 c[i++] = ')'; 6946 c[i] = '\0'; 6947 6948 #ifdef illumos 6949 debug_enter(c); 6950 #else 6951 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 6952 #endif 6953 } 6954 6955 static void 6956 dtrace_action_panic(dtrace_ecb_t *ecb) 6957 { 6958 dtrace_probe_t *probe = ecb->dte_probe; 6959 6960 /* 6961 * It's impossible to be taking action on the NULL probe. 6962 */ 6963 ASSERT(probe != NULL); 6964 6965 if (dtrace_destructive_disallow) 6966 return; 6967 6968 if (dtrace_panicked != NULL) 6969 return; 6970 6971 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 6972 return; 6973 6974 /* 6975 * We won the right to panic. (We want to be sure that only one 6976 * thread calls panic() from dtrace_probe(), and that panic() is 6977 * called exactly once.) 6978 */ 6979 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 6980 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 6981 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 6982 } 6983 6984 static void 6985 dtrace_action_raise(uint64_t sig) 6986 { 6987 if (dtrace_destructive_disallow) 6988 return; 6989 6990 if (sig >= NSIG) { 6991 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 6992 return; 6993 } 6994 6995 #ifdef illumos 6996 /* 6997 * raise() has a queue depth of 1 -- we ignore all subsequent 6998 * invocations of the raise() action. 6999 */ 7000 if (curthread->t_dtrace_sig == 0) 7001 curthread->t_dtrace_sig = (uint8_t)sig; 7002 7003 curthread->t_sig_check = 1; 7004 aston(curthread); 7005 #else 7006 struct proc *p = curproc; 7007 PROC_LOCK(p); 7008 kern_psignal(p, sig); 7009 PROC_UNLOCK(p); 7010 #endif 7011 } 7012 7013 static void 7014 dtrace_action_stop(void) 7015 { 7016 if (dtrace_destructive_disallow) 7017 return; 7018 7019 #ifdef illumos 7020 if (!curthread->t_dtrace_stop) { 7021 curthread->t_dtrace_stop = 1; 7022 curthread->t_sig_check = 1; 7023 aston(curthread); 7024 } 7025 #else 7026 struct proc *p = curproc; 7027 PROC_LOCK(p); 7028 kern_psignal(p, SIGSTOP); 7029 PROC_UNLOCK(p); 7030 #endif 7031 } 7032 7033 static void 7034 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 7035 { 7036 hrtime_t now; 7037 volatile uint16_t *flags; 7038 #ifdef illumos 7039 cpu_t *cpu = CPU; 7040 #else 7041 cpu_t *cpu = &solaris_cpu[curcpu]; 7042 #endif 7043 7044 if (dtrace_destructive_disallow) 7045 return; 7046 7047 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 7048 7049 now = dtrace_gethrtime(); 7050 7051 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 7052 /* 7053 * We need to advance the mark to the current time. 7054 */ 7055 cpu->cpu_dtrace_chillmark = now; 7056 cpu->cpu_dtrace_chilled = 0; 7057 } 7058 7059 /* 7060 * Now check to see if the requested chill time would take us over 7061 * the maximum amount of time allowed in the chill interval. (Or 7062 * worse, if the calculation itself induces overflow.) 7063 */ 7064 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 7065 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 7066 *flags |= CPU_DTRACE_ILLOP; 7067 return; 7068 } 7069 7070 while (dtrace_gethrtime() - now < val) 7071 continue; 7072 7073 /* 7074 * Normally, we assure that the value of the variable "timestamp" does 7075 * not change within an ECB. The presence of chill() represents an 7076 * exception to this rule, however. 7077 */ 7078 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 7079 cpu->cpu_dtrace_chilled += val; 7080 } 7081 7082 static void 7083 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 7084 uint64_t *buf, uint64_t arg) 7085 { 7086 int nframes = DTRACE_USTACK_NFRAMES(arg); 7087 int strsize = DTRACE_USTACK_STRSIZE(arg); 7088 uint64_t *pcs = &buf[1], *fps; 7089 char *str = (char *)&pcs[nframes]; 7090 int size, offs = 0, i, j; 7091 size_t rem; 7092 uintptr_t old = mstate->dtms_scratch_ptr, saved; 7093 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 7094 char *sym; 7095 7096 /* 7097 * Should be taking a faster path if string space has not been 7098 * allocated. 7099 */ 7100 ASSERT(strsize != 0); 7101 7102 /* 7103 * We will first allocate some temporary space for the frame pointers. 7104 */ 7105 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 7106 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 7107 (nframes * sizeof (uint64_t)); 7108 7109 if (!DTRACE_INSCRATCH(mstate, size)) { 7110 /* 7111 * Not enough room for our frame pointers -- need to indicate 7112 * that we ran out of scratch space. 7113 */ 7114 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 7115 return; 7116 } 7117 7118 mstate->dtms_scratch_ptr += size; 7119 saved = mstate->dtms_scratch_ptr; 7120 7121 /* 7122 * Now get a stack with both program counters and frame pointers. 7123 */ 7124 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7125 dtrace_getufpstack(buf, fps, nframes + 1); 7126 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7127 7128 /* 7129 * If that faulted, we're cooked. 7130 */ 7131 if (*flags & CPU_DTRACE_FAULT) 7132 goto out; 7133 7134 /* 7135 * Now we want to walk up the stack, calling the USTACK helper. For 7136 * each iteration, we restore the scratch pointer. 7137 */ 7138 for (i = 0; i < nframes; i++) { 7139 mstate->dtms_scratch_ptr = saved; 7140 7141 if (offs >= strsize) 7142 break; 7143 7144 sym = (char *)(uintptr_t)dtrace_helper( 7145 DTRACE_HELPER_ACTION_USTACK, 7146 mstate, state, pcs[i], fps[i]); 7147 7148 /* 7149 * If we faulted while running the helper, we're going to 7150 * clear the fault and null out the corresponding string. 7151 */ 7152 if (*flags & CPU_DTRACE_FAULT) { 7153 *flags &= ~CPU_DTRACE_FAULT; 7154 str[offs++] = '\0'; 7155 continue; 7156 } 7157 7158 if (sym == NULL) { 7159 str[offs++] = '\0'; 7160 continue; 7161 } 7162 7163 if (!dtrace_strcanload((uintptr_t)sym, strsize, &rem, mstate, 7164 &(state->dts_vstate))) { 7165 str[offs++] = '\0'; 7166 continue; 7167 } 7168 7169 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7170 7171 /* 7172 * Now copy in the string that the helper returned to us. 7173 */ 7174 for (j = 0; offs + j < strsize && j < rem; j++) { 7175 if ((str[offs + j] = sym[j]) == '\0') 7176 break; 7177 } 7178 7179 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7180 7181 offs += j + 1; 7182 } 7183 7184 if (offs >= strsize) { 7185 /* 7186 * If we didn't have room for all of the strings, we don't 7187 * abort processing -- this needn't be a fatal error -- but we 7188 * still want to increment a counter (dts_stkstroverflows) to 7189 * allow this condition to be warned about. (If this is from 7190 * a jstack() action, it is easily tuned via jstackstrsize.) 7191 */ 7192 dtrace_error(&state->dts_stkstroverflows); 7193 } 7194 7195 while (offs < strsize) 7196 str[offs++] = '\0'; 7197 7198 out: 7199 mstate->dtms_scratch_ptr = old; 7200 } 7201 7202 static void 7203 dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size, 7204 size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind) 7205 { 7206 volatile uint16_t *flags; 7207 uint64_t val = *valp; 7208 size_t valoffs = *valoffsp; 7209 7210 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 7211 ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF); 7212 7213 /* 7214 * If this is a string, we're going to only load until we find the zero 7215 * byte -- after which we'll store zero bytes. 7216 */ 7217 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 7218 char c = '\0' + 1; 7219 size_t s; 7220 7221 for (s = 0; s < size; s++) { 7222 if (c != '\0' && dtkind == DIF_TF_BYREF) { 7223 c = dtrace_load8(val++); 7224 } else if (c != '\0' && dtkind == DIF_TF_BYUREF) { 7225 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7226 c = dtrace_fuword8((void *)(uintptr_t)val++); 7227 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7228 if (*flags & CPU_DTRACE_FAULT) 7229 break; 7230 } 7231 7232 DTRACE_STORE(uint8_t, tomax, valoffs++, c); 7233 7234 if (c == '\0' && intuple) 7235 break; 7236 } 7237 } else { 7238 uint8_t c; 7239 while (valoffs < end) { 7240 if (dtkind == DIF_TF_BYREF) { 7241 c = dtrace_load8(val++); 7242 } else if (dtkind == DIF_TF_BYUREF) { 7243 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7244 c = dtrace_fuword8((void *)(uintptr_t)val++); 7245 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7246 if (*flags & CPU_DTRACE_FAULT) 7247 break; 7248 } 7249 7250 DTRACE_STORE(uint8_t, tomax, 7251 valoffs++, c); 7252 } 7253 } 7254 7255 *valp = val; 7256 *valoffsp = valoffs; 7257 } 7258 7259 /* 7260 * Disables interrupts and sets the per-thread inprobe flag. When DEBUG is 7261 * defined, we also assert that we are not recursing unless the probe ID is an 7262 * error probe. 7263 */ 7264 static dtrace_icookie_t 7265 dtrace_probe_enter(dtrace_id_t id) 7266 { 7267 dtrace_icookie_t cookie; 7268 7269 cookie = dtrace_interrupt_disable(); 7270 7271 /* 7272 * Unless this is an ERROR probe, we are not allowed to recurse in 7273 * dtrace_probe(). Recursing into DTrace probe usually means that a 7274 * function is instrumented that should not have been instrumented or 7275 * that the ordering guarantee of the records will be violated, 7276 * resulting in unexpected output. If there is an exception to this 7277 * assertion, a new case should be added. 7278 */ 7279 ASSERT(curthread->t_dtrace_inprobe == 0 || 7280 id == dtrace_probeid_error); 7281 curthread->t_dtrace_inprobe = 1; 7282 7283 return (cookie); 7284 } 7285 7286 /* 7287 * Clears the per-thread inprobe flag and enables interrupts. 7288 */ 7289 static void 7290 dtrace_probe_exit(dtrace_icookie_t cookie) 7291 { 7292 7293 curthread->t_dtrace_inprobe = 0; 7294 dtrace_interrupt_enable(cookie); 7295 } 7296 7297 /* 7298 * If you're looking for the epicenter of DTrace, you just found it. This 7299 * is the function called by the provider to fire a probe -- from which all 7300 * subsequent probe-context DTrace activity emanates. 7301 */ 7302 void 7303 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 7304 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 7305 { 7306 processorid_t cpuid; 7307 dtrace_icookie_t cookie; 7308 dtrace_probe_t *probe; 7309 dtrace_mstate_t mstate; 7310 dtrace_ecb_t *ecb; 7311 dtrace_action_t *act; 7312 intptr_t offs; 7313 size_t size; 7314 int vtime, onintr; 7315 volatile uint16_t *flags; 7316 hrtime_t now; 7317 7318 if (panicstr != NULL) 7319 return; 7320 7321 #ifdef illumos 7322 /* 7323 * Kick out immediately if this CPU is still being born (in which case 7324 * curthread will be set to -1) or the current thread can't allow 7325 * probes in its current context. 7326 */ 7327 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 7328 return; 7329 #endif 7330 7331 cookie = dtrace_probe_enter(id); 7332 probe = dtrace_probes[id - 1]; 7333 cpuid = curcpu; 7334 onintr = CPU_ON_INTR(CPU); 7335 7336 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 7337 probe->dtpr_predcache == curthread->t_predcache) { 7338 /* 7339 * We have hit in the predicate cache; we know that 7340 * this predicate would evaluate to be false. 7341 */ 7342 dtrace_probe_exit(cookie); 7343 return; 7344 } 7345 7346 #ifdef illumos 7347 if (panic_quiesce) { 7348 #else 7349 if (panicstr != NULL) { 7350 #endif 7351 /* 7352 * We don't trace anything if we're panicking. 7353 */ 7354 dtrace_probe_exit(cookie); 7355 return; 7356 } 7357 7358 now = mstate.dtms_timestamp = dtrace_gethrtime(); 7359 mstate.dtms_present = DTRACE_MSTATE_TIMESTAMP; 7360 vtime = dtrace_vtime_references != 0; 7361 7362 if (vtime && curthread->t_dtrace_start) 7363 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 7364 7365 mstate.dtms_difo = NULL; 7366 mstate.dtms_probe = probe; 7367 mstate.dtms_strtok = 0; 7368 mstate.dtms_arg[0] = arg0; 7369 mstate.dtms_arg[1] = arg1; 7370 mstate.dtms_arg[2] = arg2; 7371 mstate.dtms_arg[3] = arg3; 7372 mstate.dtms_arg[4] = arg4; 7373 7374 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 7375 7376 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 7377 dtrace_predicate_t *pred = ecb->dte_predicate; 7378 dtrace_state_t *state = ecb->dte_state; 7379 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 7380 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 7381 dtrace_vstate_t *vstate = &state->dts_vstate; 7382 dtrace_provider_t *prov = probe->dtpr_provider; 7383 uint64_t tracememsize = 0; 7384 int committed = 0; 7385 caddr_t tomax; 7386 7387 /* 7388 * A little subtlety with the following (seemingly innocuous) 7389 * declaration of the automatic 'val': by looking at the 7390 * code, you might think that it could be declared in the 7391 * action processing loop, below. (That is, it's only used in 7392 * the action processing loop.) However, it must be declared 7393 * out of that scope because in the case of DIF expression 7394 * arguments to aggregating actions, one iteration of the 7395 * action loop will use the last iteration's value. 7396 */ 7397 uint64_t val = 0; 7398 7399 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 7400 mstate.dtms_getf = NULL; 7401 7402 *flags &= ~CPU_DTRACE_ERROR; 7403 7404 if (prov == dtrace_provider) { 7405 /* 7406 * If dtrace itself is the provider of this probe, 7407 * we're only going to continue processing the ECB if 7408 * arg0 (the dtrace_state_t) is equal to the ECB's 7409 * creating state. (This prevents disjoint consumers 7410 * from seeing one another's metaprobes.) 7411 */ 7412 if (arg0 != (uint64_t)(uintptr_t)state) 7413 continue; 7414 } 7415 7416 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 7417 /* 7418 * We're not currently active. If our provider isn't 7419 * the dtrace pseudo provider, we're not interested. 7420 */ 7421 if (prov != dtrace_provider) 7422 continue; 7423 7424 /* 7425 * Now we must further check if we are in the BEGIN 7426 * probe. If we are, we will only continue processing 7427 * if we're still in WARMUP -- if one BEGIN enabling 7428 * has invoked the exit() action, we don't want to 7429 * evaluate subsequent BEGIN enablings. 7430 */ 7431 if (probe->dtpr_id == dtrace_probeid_begin && 7432 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 7433 ASSERT(state->dts_activity == 7434 DTRACE_ACTIVITY_DRAINING); 7435 continue; 7436 } 7437 } 7438 7439 if (ecb->dte_cond) { 7440 /* 7441 * If the dte_cond bits indicate that this 7442 * consumer is only allowed to see user-mode firings 7443 * of this probe, call the provider's dtps_usermode() 7444 * entry point to check that the probe was fired 7445 * while in a user context. Skip this ECB if that's 7446 * not the case. 7447 */ 7448 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 7449 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 7450 probe->dtpr_id, probe->dtpr_arg) == 0) 7451 continue; 7452 7453 #ifdef illumos 7454 /* 7455 * This is more subtle than it looks. We have to be 7456 * absolutely certain that CRED() isn't going to 7457 * change out from under us so it's only legit to 7458 * examine that structure if we're in constrained 7459 * situations. Currently, the only times we'll this 7460 * check is if a non-super-user has enabled the 7461 * profile or syscall providers -- providers that 7462 * allow visibility of all processes. For the 7463 * profile case, the check above will ensure that 7464 * we're examining a user context. 7465 */ 7466 if (ecb->dte_cond & DTRACE_COND_OWNER) { 7467 cred_t *cr; 7468 cred_t *s_cr = 7469 ecb->dte_state->dts_cred.dcr_cred; 7470 proc_t *proc; 7471 7472 ASSERT(s_cr != NULL); 7473 7474 if ((cr = CRED()) == NULL || 7475 s_cr->cr_uid != cr->cr_uid || 7476 s_cr->cr_uid != cr->cr_ruid || 7477 s_cr->cr_uid != cr->cr_suid || 7478 s_cr->cr_gid != cr->cr_gid || 7479 s_cr->cr_gid != cr->cr_rgid || 7480 s_cr->cr_gid != cr->cr_sgid || 7481 (proc = ttoproc(curthread)) == NULL || 7482 (proc->p_flag & SNOCD)) 7483 continue; 7484 } 7485 7486 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 7487 cred_t *cr; 7488 cred_t *s_cr = 7489 ecb->dte_state->dts_cred.dcr_cred; 7490 7491 ASSERT(s_cr != NULL); 7492 7493 if ((cr = CRED()) == NULL || 7494 s_cr->cr_zone->zone_id != 7495 cr->cr_zone->zone_id) 7496 continue; 7497 } 7498 #endif 7499 } 7500 7501 if (now - state->dts_alive > dtrace_deadman_timeout) { 7502 /* 7503 * We seem to be dead. Unless we (a) have kernel 7504 * destructive permissions (b) have explicitly enabled 7505 * destructive actions and (c) destructive actions have 7506 * not been disabled, we're going to transition into 7507 * the KILLED state, from which no further processing 7508 * on this state will be performed. 7509 */ 7510 if (!dtrace_priv_kernel_destructive(state) || 7511 !state->dts_cred.dcr_destructive || 7512 dtrace_destructive_disallow) { 7513 void *activity = &state->dts_activity; 7514 dtrace_activity_t current; 7515 7516 do { 7517 current = state->dts_activity; 7518 } while (dtrace_cas32(activity, current, 7519 DTRACE_ACTIVITY_KILLED) != current); 7520 7521 continue; 7522 } 7523 } 7524 7525 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 7526 ecb->dte_alignment, state, &mstate)) < 0) 7527 continue; 7528 7529 tomax = buf->dtb_tomax; 7530 ASSERT(tomax != NULL); 7531 7532 if (ecb->dte_size != 0) { 7533 dtrace_rechdr_t dtrh; 7534 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 7535 mstate.dtms_timestamp = dtrace_gethrtime(); 7536 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP; 7537 } 7538 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t)); 7539 dtrh.dtrh_epid = ecb->dte_epid; 7540 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh, 7541 mstate.dtms_timestamp); 7542 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh; 7543 } 7544 7545 mstate.dtms_epid = ecb->dte_epid; 7546 mstate.dtms_present |= DTRACE_MSTATE_EPID; 7547 7548 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 7549 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 7550 else 7551 mstate.dtms_access = 0; 7552 7553 if (pred != NULL) { 7554 dtrace_difo_t *dp = pred->dtp_difo; 7555 uint64_t rval; 7556 7557 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 7558 7559 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 7560 dtrace_cacheid_t cid = probe->dtpr_predcache; 7561 7562 if (cid != DTRACE_CACHEIDNONE && !onintr) { 7563 /* 7564 * Update the predicate cache... 7565 */ 7566 ASSERT(cid == pred->dtp_cacheid); 7567 curthread->t_predcache = cid; 7568 } 7569 7570 continue; 7571 } 7572 } 7573 7574 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 7575 act != NULL; act = act->dta_next) { 7576 size_t valoffs; 7577 dtrace_difo_t *dp; 7578 dtrace_recdesc_t *rec = &act->dta_rec; 7579 7580 size = rec->dtrd_size; 7581 valoffs = offs + rec->dtrd_offset; 7582 7583 if (DTRACEACT_ISAGG(act->dta_kind)) { 7584 uint64_t v = 0xbad; 7585 dtrace_aggregation_t *agg; 7586 7587 agg = (dtrace_aggregation_t *)act; 7588 7589 if ((dp = act->dta_difo) != NULL) 7590 v = dtrace_dif_emulate(dp, 7591 &mstate, vstate, state); 7592 7593 if (*flags & CPU_DTRACE_ERROR) 7594 continue; 7595 7596 /* 7597 * Note that we always pass the expression 7598 * value from the previous iteration of the 7599 * action loop. This value will only be used 7600 * if there is an expression argument to the 7601 * aggregating action, denoted by the 7602 * dtag_hasarg field. 7603 */ 7604 dtrace_aggregate(agg, buf, 7605 offs, aggbuf, v, val); 7606 continue; 7607 } 7608 7609 switch (act->dta_kind) { 7610 case DTRACEACT_STOP: 7611 if (dtrace_priv_proc_destructive(state)) 7612 dtrace_action_stop(); 7613 continue; 7614 7615 case DTRACEACT_BREAKPOINT: 7616 if (dtrace_priv_kernel_destructive(state)) 7617 dtrace_action_breakpoint(ecb); 7618 continue; 7619 7620 case DTRACEACT_PANIC: 7621 if (dtrace_priv_kernel_destructive(state)) 7622 dtrace_action_panic(ecb); 7623 continue; 7624 7625 case DTRACEACT_STACK: 7626 if (!dtrace_priv_kernel(state)) 7627 continue; 7628 7629 dtrace_getpcstack((pc_t *)(tomax + valoffs), 7630 size / sizeof (pc_t), probe->dtpr_aframes, 7631 DTRACE_ANCHORED(probe) ? NULL : 7632 (uint32_t *)arg0); 7633 continue; 7634 7635 case DTRACEACT_JSTACK: 7636 case DTRACEACT_USTACK: 7637 if (!dtrace_priv_proc(state)) 7638 continue; 7639 7640 /* 7641 * See comment in DIF_VAR_PID. 7642 */ 7643 if (DTRACE_ANCHORED(mstate.dtms_probe) && 7644 CPU_ON_INTR(CPU)) { 7645 int depth = DTRACE_USTACK_NFRAMES( 7646 rec->dtrd_arg) + 1; 7647 7648 dtrace_bzero((void *)(tomax + valoffs), 7649 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 7650 + depth * sizeof (uint64_t)); 7651 7652 continue; 7653 } 7654 7655 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 7656 curproc->p_dtrace_helpers != NULL) { 7657 /* 7658 * This is the slow path -- we have 7659 * allocated string space, and we're 7660 * getting the stack of a process that 7661 * has helpers. Call into a separate 7662 * routine to perform this processing. 7663 */ 7664 dtrace_action_ustack(&mstate, state, 7665 (uint64_t *)(tomax + valoffs), 7666 rec->dtrd_arg); 7667 continue; 7668 } 7669 7670 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7671 dtrace_getupcstack((uint64_t *) 7672 (tomax + valoffs), 7673 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 7674 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7675 continue; 7676 7677 default: 7678 break; 7679 } 7680 7681 dp = act->dta_difo; 7682 ASSERT(dp != NULL); 7683 7684 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 7685 7686 if (*flags & CPU_DTRACE_ERROR) 7687 continue; 7688 7689 switch (act->dta_kind) { 7690 case DTRACEACT_SPECULATE: { 7691 dtrace_rechdr_t *dtrh; 7692 7693 ASSERT(buf == &state->dts_buffer[cpuid]); 7694 buf = dtrace_speculation_buffer(state, 7695 cpuid, val); 7696 7697 if (buf == NULL) { 7698 *flags |= CPU_DTRACE_DROP; 7699 continue; 7700 } 7701 7702 offs = dtrace_buffer_reserve(buf, 7703 ecb->dte_needed, ecb->dte_alignment, 7704 state, NULL); 7705 7706 if (offs < 0) { 7707 *flags |= CPU_DTRACE_DROP; 7708 continue; 7709 } 7710 7711 tomax = buf->dtb_tomax; 7712 ASSERT(tomax != NULL); 7713 7714 if (ecb->dte_size == 0) 7715 continue; 7716 7717 ASSERT3U(ecb->dte_size, >=, 7718 sizeof (dtrace_rechdr_t)); 7719 dtrh = ((void *)(tomax + offs)); 7720 dtrh->dtrh_epid = ecb->dte_epid; 7721 /* 7722 * When the speculation is committed, all of 7723 * the records in the speculative buffer will 7724 * have their timestamps set to the commit 7725 * time. Until then, it is set to a sentinel 7726 * value, for debugability. 7727 */ 7728 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX); 7729 continue; 7730 } 7731 7732 case DTRACEACT_PRINTM: { 7733 /* The DIF returns a 'memref'. */ 7734 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 7735 7736 /* Get the size from the memref. */ 7737 size = memref[1]; 7738 7739 /* 7740 * Check if the size exceeds the allocated 7741 * buffer size. 7742 */ 7743 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 7744 /* Flag a drop! */ 7745 *flags |= CPU_DTRACE_DROP; 7746 continue; 7747 } 7748 7749 /* Store the size in the buffer first. */ 7750 DTRACE_STORE(uintptr_t, tomax, 7751 valoffs, size); 7752 7753 /* 7754 * Offset the buffer address to the start 7755 * of the data. 7756 */ 7757 valoffs += sizeof(uintptr_t); 7758 7759 /* 7760 * Reset to the memory address rather than 7761 * the memref array, then let the BYREF 7762 * code below do the work to store the 7763 * memory data in the buffer. 7764 */ 7765 val = memref[0]; 7766 break; 7767 } 7768 7769 case DTRACEACT_CHILL: 7770 if (dtrace_priv_kernel_destructive(state)) 7771 dtrace_action_chill(&mstate, val); 7772 continue; 7773 7774 case DTRACEACT_RAISE: 7775 if (dtrace_priv_proc_destructive(state)) 7776 dtrace_action_raise(val); 7777 continue; 7778 7779 case DTRACEACT_COMMIT: 7780 ASSERT(!committed); 7781 7782 /* 7783 * We need to commit our buffer state. 7784 */ 7785 if (ecb->dte_size) 7786 buf->dtb_offset = offs + ecb->dte_size; 7787 buf = &state->dts_buffer[cpuid]; 7788 dtrace_speculation_commit(state, cpuid, val); 7789 committed = 1; 7790 continue; 7791 7792 case DTRACEACT_DISCARD: 7793 dtrace_speculation_discard(state, cpuid, val); 7794 continue; 7795 7796 case DTRACEACT_DIFEXPR: 7797 case DTRACEACT_LIBACT: 7798 case DTRACEACT_PRINTF: 7799 case DTRACEACT_PRINTA: 7800 case DTRACEACT_SYSTEM: 7801 case DTRACEACT_FREOPEN: 7802 case DTRACEACT_TRACEMEM: 7803 break; 7804 7805 case DTRACEACT_TRACEMEM_DYNSIZE: 7806 tracememsize = val; 7807 break; 7808 7809 case DTRACEACT_SYM: 7810 case DTRACEACT_MOD: 7811 if (!dtrace_priv_kernel(state)) 7812 continue; 7813 break; 7814 7815 case DTRACEACT_USYM: 7816 case DTRACEACT_UMOD: 7817 case DTRACEACT_UADDR: { 7818 #ifdef illumos 7819 struct pid *pid = curthread->t_procp->p_pidp; 7820 #endif 7821 7822 if (!dtrace_priv_proc(state)) 7823 continue; 7824 7825 DTRACE_STORE(uint64_t, tomax, 7826 #ifdef illumos 7827 valoffs, (uint64_t)pid->pid_id); 7828 #else 7829 valoffs, (uint64_t) curproc->p_pid); 7830 #endif 7831 DTRACE_STORE(uint64_t, tomax, 7832 valoffs + sizeof (uint64_t), val); 7833 7834 continue; 7835 } 7836 7837 case DTRACEACT_EXIT: { 7838 /* 7839 * For the exit action, we are going to attempt 7840 * to atomically set our activity to be 7841 * draining. If this fails (either because 7842 * another CPU has beat us to the exit action, 7843 * or because our current activity is something 7844 * other than ACTIVE or WARMUP), we will 7845 * continue. This assures that the exit action 7846 * can be successfully recorded at most once 7847 * when we're in the ACTIVE state. If we're 7848 * encountering the exit() action while in 7849 * COOLDOWN, however, we want to honor the new 7850 * status code. (We know that we're the only 7851 * thread in COOLDOWN, so there is no race.) 7852 */ 7853 void *activity = &state->dts_activity; 7854 dtrace_activity_t current = state->dts_activity; 7855 7856 if (current == DTRACE_ACTIVITY_COOLDOWN) 7857 break; 7858 7859 if (current != DTRACE_ACTIVITY_WARMUP) 7860 current = DTRACE_ACTIVITY_ACTIVE; 7861 7862 if (dtrace_cas32(activity, current, 7863 DTRACE_ACTIVITY_DRAINING) != current) { 7864 *flags |= CPU_DTRACE_DROP; 7865 continue; 7866 } 7867 7868 break; 7869 } 7870 7871 default: 7872 ASSERT(0); 7873 } 7874 7875 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF || 7876 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYUREF) { 7877 uintptr_t end = valoffs + size; 7878 7879 if (tracememsize != 0 && 7880 valoffs + tracememsize < end) { 7881 end = valoffs + tracememsize; 7882 tracememsize = 0; 7883 } 7884 7885 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF && 7886 !dtrace_vcanload((void *)(uintptr_t)val, 7887 &dp->dtdo_rtype, NULL, &mstate, vstate)) 7888 continue; 7889 7890 dtrace_store_by_ref(dp, tomax, size, &valoffs, 7891 &val, end, act->dta_intuple, 7892 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ? 7893 DIF_TF_BYREF: DIF_TF_BYUREF); 7894 continue; 7895 } 7896 7897 switch (size) { 7898 case 0: 7899 break; 7900 7901 case sizeof (uint8_t): 7902 DTRACE_STORE(uint8_t, tomax, valoffs, val); 7903 break; 7904 case sizeof (uint16_t): 7905 DTRACE_STORE(uint16_t, tomax, valoffs, val); 7906 break; 7907 case sizeof (uint32_t): 7908 DTRACE_STORE(uint32_t, tomax, valoffs, val); 7909 break; 7910 case sizeof (uint64_t): 7911 DTRACE_STORE(uint64_t, tomax, valoffs, val); 7912 break; 7913 default: 7914 /* 7915 * Any other size should have been returned by 7916 * reference, not by value. 7917 */ 7918 ASSERT(0); 7919 break; 7920 } 7921 } 7922 7923 if (*flags & CPU_DTRACE_DROP) 7924 continue; 7925 7926 if (*flags & CPU_DTRACE_FAULT) { 7927 int ndx; 7928 dtrace_action_t *err; 7929 7930 buf->dtb_errors++; 7931 7932 if (probe->dtpr_id == dtrace_probeid_error) { 7933 /* 7934 * There's nothing we can do -- we had an 7935 * error on the error probe. We bump an 7936 * error counter to at least indicate that 7937 * this condition happened. 7938 */ 7939 dtrace_error(&state->dts_dblerrors); 7940 continue; 7941 } 7942 7943 if (vtime) { 7944 /* 7945 * Before recursing on dtrace_probe(), we 7946 * need to explicitly clear out our start 7947 * time to prevent it from being accumulated 7948 * into t_dtrace_vtime. 7949 */ 7950 curthread->t_dtrace_start = 0; 7951 } 7952 7953 /* 7954 * Iterate over the actions to figure out which action 7955 * we were processing when we experienced the error. 7956 * Note that act points _past_ the faulting action; if 7957 * act is ecb->dte_action, the fault was in the 7958 * predicate, if it's ecb->dte_action->dta_next it's 7959 * in action #1, and so on. 7960 */ 7961 for (err = ecb->dte_action, ndx = 0; 7962 err != act; err = err->dta_next, ndx++) 7963 continue; 7964 7965 dtrace_probe_error(state, ecb->dte_epid, ndx, 7966 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 7967 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 7968 cpu_core[cpuid].cpuc_dtrace_illval); 7969 7970 continue; 7971 } 7972 7973 if (!committed) 7974 buf->dtb_offset = offs + ecb->dte_size; 7975 } 7976 7977 if (vtime) 7978 curthread->t_dtrace_start = dtrace_gethrtime(); 7979 7980 dtrace_probe_exit(cookie); 7981 } 7982 7983 /* 7984 * DTrace Probe Hashing Functions 7985 * 7986 * The functions in this section (and indeed, the functions in remaining 7987 * sections) are not _called_ from probe context. (Any exceptions to this are 7988 * marked with a "Note:".) Rather, they are called from elsewhere in the 7989 * DTrace framework to look-up probes in, add probes to and remove probes from 7990 * the DTrace probe hashes. (Each probe is hashed by each element of the 7991 * probe tuple -- allowing for fast lookups, regardless of what was 7992 * specified.) 7993 */ 7994 static uint_t 7995 dtrace_hash_str(const char *p) 7996 { 7997 unsigned int g; 7998 uint_t hval = 0; 7999 8000 while (*p) { 8001 hval = (hval << 4) + *p++; 8002 if ((g = (hval & 0xf0000000)) != 0) 8003 hval ^= g >> 24; 8004 hval &= ~g; 8005 } 8006 return (hval); 8007 } 8008 8009 static dtrace_hash_t * 8010 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 8011 { 8012 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 8013 8014 hash->dth_stroffs = stroffs; 8015 hash->dth_nextoffs = nextoffs; 8016 hash->dth_prevoffs = prevoffs; 8017 8018 hash->dth_size = 1; 8019 hash->dth_mask = hash->dth_size - 1; 8020 8021 hash->dth_tab = kmem_zalloc(hash->dth_size * 8022 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 8023 8024 return (hash); 8025 } 8026 8027 static void 8028 dtrace_hash_destroy(dtrace_hash_t *hash) 8029 { 8030 #ifdef DEBUG 8031 int i; 8032 8033 for (i = 0; i < hash->dth_size; i++) 8034 ASSERT(hash->dth_tab[i] == NULL); 8035 #endif 8036 8037 kmem_free(hash->dth_tab, 8038 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 8039 kmem_free(hash, sizeof (dtrace_hash_t)); 8040 } 8041 8042 static void 8043 dtrace_hash_resize(dtrace_hash_t *hash) 8044 { 8045 int size = hash->dth_size, i, ndx; 8046 int new_size = hash->dth_size << 1; 8047 int new_mask = new_size - 1; 8048 dtrace_hashbucket_t **new_tab, *bucket, *next; 8049 8050 ASSERT((new_size & new_mask) == 0); 8051 8052 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 8053 8054 for (i = 0; i < size; i++) { 8055 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 8056 dtrace_probe_t *probe = bucket->dthb_chain; 8057 8058 ASSERT(probe != NULL); 8059 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 8060 8061 next = bucket->dthb_next; 8062 bucket->dthb_next = new_tab[ndx]; 8063 new_tab[ndx] = bucket; 8064 } 8065 } 8066 8067 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 8068 hash->dth_tab = new_tab; 8069 hash->dth_size = new_size; 8070 hash->dth_mask = new_mask; 8071 } 8072 8073 static void 8074 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 8075 { 8076 int hashval = DTRACE_HASHSTR(hash, new); 8077 int ndx = hashval & hash->dth_mask; 8078 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 8079 dtrace_probe_t **nextp, **prevp; 8080 8081 for (; bucket != NULL; bucket = bucket->dthb_next) { 8082 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 8083 goto add; 8084 } 8085 8086 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 8087 dtrace_hash_resize(hash); 8088 dtrace_hash_add(hash, new); 8089 return; 8090 } 8091 8092 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 8093 bucket->dthb_next = hash->dth_tab[ndx]; 8094 hash->dth_tab[ndx] = bucket; 8095 hash->dth_nbuckets++; 8096 8097 add: 8098 nextp = DTRACE_HASHNEXT(hash, new); 8099 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 8100 *nextp = bucket->dthb_chain; 8101 8102 if (bucket->dthb_chain != NULL) { 8103 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 8104 ASSERT(*prevp == NULL); 8105 *prevp = new; 8106 } 8107 8108 bucket->dthb_chain = new; 8109 bucket->dthb_len++; 8110 } 8111 8112 static dtrace_probe_t * 8113 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 8114 { 8115 int hashval = DTRACE_HASHSTR(hash, template); 8116 int ndx = hashval & hash->dth_mask; 8117 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 8118 8119 for (; bucket != NULL; bucket = bucket->dthb_next) { 8120 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 8121 return (bucket->dthb_chain); 8122 } 8123 8124 return (NULL); 8125 } 8126 8127 static int 8128 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 8129 { 8130 int hashval = DTRACE_HASHSTR(hash, template); 8131 int ndx = hashval & hash->dth_mask; 8132 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 8133 8134 for (; bucket != NULL; bucket = bucket->dthb_next) { 8135 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 8136 return (bucket->dthb_len); 8137 } 8138 8139 return (0); 8140 } 8141 8142 static void 8143 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 8144 { 8145 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 8146 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 8147 8148 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 8149 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 8150 8151 /* 8152 * Find the bucket that we're removing this probe from. 8153 */ 8154 for (; bucket != NULL; bucket = bucket->dthb_next) { 8155 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 8156 break; 8157 } 8158 8159 ASSERT(bucket != NULL); 8160 8161 if (*prevp == NULL) { 8162 if (*nextp == NULL) { 8163 /* 8164 * The removed probe was the only probe on this 8165 * bucket; we need to remove the bucket. 8166 */ 8167 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 8168 8169 ASSERT(bucket->dthb_chain == probe); 8170 ASSERT(b != NULL); 8171 8172 if (b == bucket) { 8173 hash->dth_tab[ndx] = bucket->dthb_next; 8174 } else { 8175 while (b->dthb_next != bucket) 8176 b = b->dthb_next; 8177 b->dthb_next = bucket->dthb_next; 8178 } 8179 8180 ASSERT(hash->dth_nbuckets > 0); 8181 hash->dth_nbuckets--; 8182 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 8183 return; 8184 } 8185 8186 bucket->dthb_chain = *nextp; 8187 } else { 8188 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 8189 } 8190 8191 if (*nextp != NULL) 8192 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 8193 } 8194 8195 /* 8196 * DTrace Utility Functions 8197 * 8198 * These are random utility functions that are _not_ called from probe context. 8199 */ 8200 static int 8201 dtrace_badattr(const dtrace_attribute_t *a) 8202 { 8203 return (a->dtat_name > DTRACE_STABILITY_MAX || 8204 a->dtat_data > DTRACE_STABILITY_MAX || 8205 a->dtat_class > DTRACE_CLASS_MAX); 8206 } 8207 8208 /* 8209 * Return a duplicate copy of a string. If the specified string is NULL, 8210 * this function returns a zero-length string. 8211 */ 8212 static char * 8213 dtrace_strdup(const char *str) 8214 { 8215 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 8216 8217 if (str != NULL) 8218 (void) strcpy(new, str); 8219 8220 return (new); 8221 } 8222 8223 #define DTRACE_ISALPHA(c) \ 8224 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 8225 8226 static int 8227 dtrace_badname(const char *s) 8228 { 8229 char c; 8230 8231 if (s == NULL || (c = *s++) == '\0') 8232 return (0); 8233 8234 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 8235 return (1); 8236 8237 while ((c = *s++) != '\0') { 8238 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 8239 c != '-' && c != '_' && c != '.' && c != '`') 8240 return (1); 8241 } 8242 8243 return (0); 8244 } 8245 8246 static void 8247 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 8248 { 8249 uint32_t priv; 8250 8251 #ifdef illumos 8252 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 8253 /* 8254 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 8255 */ 8256 priv = DTRACE_PRIV_ALL; 8257 } else { 8258 *uidp = crgetuid(cr); 8259 *zoneidp = crgetzoneid(cr); 8260 8261 priv = 0; 8262 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 8263 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 8264 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 8265 priv |= DTRACE_PRIV_USER; 8266 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 8267 priv |= DTRACE_PRIV_PROC; 8268 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 8269 priv |= DTRACE_PRIV_OWNER; 8270 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 8271 priv |= DTRACE_PRIV_ZONEOWNER; 8272 } 8273 #else 8274 priv = DTRACE_PRIV_ALL; 8275 #endif 8276 8277 *privp = priv; 8278 } 8279 8280 #ifdef DTRACE_ERRDEBUG 8281 static void 8282 dtrace_errdebug(const char *str) 8283 { 8284 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 8285 int occupied = 0; 8286 8287 mutex_enter(&dtrace_errlock); 8288 dtrace_errlast = str; 8289 dtrace_errthread = curthread; 8290 8291 while (occupied++ < DTRACE_ERRHASHSZ) { 8292 if (dtrace_errhash[hval].dter_msg == str) { 8293 dtrace_errhash[hval].dter_count++; 8294 goto out; 8295 } 8296 8297 if (dtrace_errhash[hval].dter_msg != NULL) { 8298 hval = (hval + 1) % DTRACE_ERRHASHSZ; 8299 continue; 8300 } 8301 8302 dtrace_errhash[hval].dter_msg = str; 8303 dtrace_errhash[hval].dter_count = 1; 8304 goto out; 8305 } 8306 8307 panic("dtrace: undersized error hash"); 8308 out: 8309 mutex_exit(&dtrace_errlock); 8310 } 8311 #endif 8312 8313 /* 8314 * DTrace Matching Functions 8315 * 8316 * These functions are used to match groups of probes, given some elements of 8317 * a probe tuple, or some globbed expressions for elements of a probe tuple. 8318 */ 8319 static int 8320 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 8321 zoneid_t zoneid) 8322 { 8323 if (priv != DTRACE_PRIV_ALL) { 8324 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 8325 uint32_t match = priv & ppriv; 8326 8327 /* 8328 * No PRIV_DTRACE_* privileges... 8329 */ 8330 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 8331 DTRACE_PRIV_KERNEL)) == 0) 8332 return (0); 8333 8334 /* 8335 * No matching bits, but there were bits to match... 8336 */ 8337 if (match == 0 && ppriv != 0) 8338 return (0); 8339 8340 /* 8341 * Need to have permissions to the process, but don't... 8342 */ 8343 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 8344 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 8345 return (0); 8346 } 8347 8348 /* 8349 * Need to be in the same zone unless we possess the 8350 * privilege to examine all zones. 8351 */ 8352 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 8353 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 8354 return (0); 8355 } 8356 } 8357 8358 return (1); 8359 } 8360 8361 /* 8362 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 8363 * consists of input pattern strings and an ops-vector to evaluate them. 8364 * This function returns >0 for match, 0 for no match, and <0 for error. 8365 */ 8366 static int 8367 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 8368 uint32_t priv, uid_t uid, zoneid_t zoneid) 8369 { 8370 dtrace_provider_t *pvp = prp->dtpr_provider; 8371 int rv; 8372 8373 if (pvp->dtpv_defunct) 8374 return (0); 8375 8376 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 8377 return (rv); 8378 8379 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 8380 return (rv); 8381 8382 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 8383 return (rv); 8384 8385 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 8386 return (rv); 8387 8388 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 8389 return (0); 8390 8391 return (rv); 8392 } 8393 8394 /* 8395 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 8396 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 8397 * libc's version, the kernel version only applies to 8-bit ASCII strings. 8398 * In addition, all of the recursion cases except for '*' matching have been 8399 * unwound. For '*', we still implement recursive evaluation, but a depth 8400 * counter is maintained and matching is aborted if we recurse too deep. 8401 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 8402 */ 8403 static int 8404 dtrace_match_glob(const char *s, const char *p, int depth) 8405 { 8406 const char *olds; 8407 char s1, c; 8408 int gs; 8409 8410 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 8411 return (-1); 8412 8413 if (s == NULL) 8414 s = ""; /* treat NULL as empty string */ 8415 8416 top: 8417 olds = s; 8418 s1 = *s++; 8419 8420 if (p == NULL) 8421 return (0); 8422 8423 if ((c = *p++) == '\0') 8424 return (s1 == '\0'); 8425 8426 switch (c) { 8427 case '[': { 8428 int ok = 0, notflag = 0; 8429 char lc = '\0'; 8430 8431 if (s1 == '\0') 8432 return (0); 8433 8434 if (*p == '!') { 8435 notflag = 1; 8436 p++; 8437 } 8438 8439 if ((c = *p++) == '\0') 8440 return (0); 8441 8442 do { 8443 if (c == '-' && lc != '\0' && *p != ']') { 8444 if ((c = *p++) == '\0') 8445 return (0); 8446 if (c == '\\' && (c = *p++) == '\0') 8447 return (0); 8448 8449 if (notflag) { 8450 if (s1 < lc || s1 > c) 8451 ok++; 8452 else 8453 return (0); 8454 } else if (lc <= s1 && s1 <= c) 8455 ok++; 8456 8457 } else if (c == '\\' && (c = *p++) == '\0') 8458 return (0); 8459 8460 lc = c; /* save left-hand 'c' for next iteration */ 8461 8462 if (notflag) { 8463 if (s1 != c) 8464 ok++; 8465 else 8466 return (0); 8467 } else if (s1 == c) 8468 ok++; 8469 8470 if ((c = *p++) == '\0') 8471 return (0); 8472 8473 } while (c != ']'); 8474 8475 if (ok) 8476 goto top; 8477 8478 return (0); 8479 } 8480 8481 case '\\': 8482 if ((c = *p++) == '\0') 8483 return (0); 8484 /*FALLTHRU*/ 8485 8486 default: 8487 if (c != s1) 8488 return (0); 8489 /*FALLTHRU*/ 8490 8491 case '?': 8492 if (s1 != '\0') 8493 goto top; 8494 return (0); 8495 8496 case '*': 8497 while (*p == '*') 8498 p++; /* consecutive *'s are identical to a single one */ 8499 8500 if (*p == '\0') 8501 return (1); 8502 8503 for (s = olds; *s != '\0'; s++) { 8504 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 8505 return (gs); 8506 } 8507 8508 return (0); 8509 } 8510 } 8511 8512 /*ARGSUSED*/ 8513 static int 8514 dtrace_match_string(const char *s, const char *p, int depth) 8515 { 8516 return (s != NULL && strcmp(s, p) == 0); 8517 } 8518 8519 /*ARGSUSED*/ 8520 static int 8521 dtrace_match_nul(const char *s, const char *p, int depth) 8522 { 8523 return (1); /* always match the empty pattern */ 8524 } 8525 8526 /*ARGSUSED*/ 8527 static int 8528 dtrace_match_nonzero(const char *s, const char *p, int depth) 8529 { 8530 return (s != NULL && s[0] != '\0'); 8531 } 8532 8533 static int 8534 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 8535 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 8536 { 8537 dtrace_probe_t template, *probe; 8538 dtrace_hash_t *hash = NULL; 8539 int len, best = INT_MAX, nmatched = 0; 8540 dtrace_id_t i; 8541 8542 ASSERT(MUTEX_HELD(&dtrace_lock)); 8543 8544 /* 8545 * If the probe ID is specified in the key, just lookup by ID and 8546 * invoke the match callback once if a matching probe is found. 8547 */ 8548 if (pkp->dtpk_id != DTRACE_IDNONE) { 8549 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 8550 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 8551 (void) (*matched)(probe, arg); 8552 nmatched++; 8553 } 8554 return (nmatched); 8555 } 8556 8557 template.dtpr_mod = (char *)pkp->dtpk_mod; 8558 template.dtpr_func = (char *)pkp->dtpk_func; 8559 template.dtpr_name = (char *)pkp->dtpk_name; 8560 8561 /* 8562 * We want to find the most distinct of the module name, function 8563 * name, and name. So for each one that is not a glob pattern or 8564 * empty string, we perform a lookup in the corresponding hash and 8565 * use the hash table with the fewest collisions to do our search. 8566 */ 8567 if (pkp->dtpk_mmatch == &dtrace_match_string && 8568 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 8569 best = len; 8570 hash = dtrace_bymod; 8571 } 8572 8573 if (pkp->dtpk_fmatch == &dtrace_match_string && 8574 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 8575 best = len; 8576 hash = dtrace_byfunc; 8577 } 8578 8579 if (pkp->dtpk_nmatch == &dtrace_match_string && 8580 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 8581 best = len; 8582 hash = dtrace_byname; 8583 } 8584 8585 /* 8586 * If we did not select a hash table, iterate over every probe and 8587 * invoke our callback for each one that matches our input probe key. 8588 */ 8589 if (hash == NULL) { 8590 for (i = 0; i < dtrace_nprobes; i++) { 8591 if ((probe = dtrace_probes[i]) == NULL || 8592 dtrace_match_probe(probe, pkp, priv, uid, 8593 zoneid) <= 0) 8594 continue; 8595 8596 nmatched++; 8597 8598 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 8599 break; 8600 } 8601 8602 return (nmatched); 8603 } 8604 8605 /* 8606 * If we selected a hash table, iterate over each probe of the same key 8607 * name and invoke the callback for every probe that matches the other 8608 * attributes of our input probe key. 8609 */ 8610 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 8611 probe = *(DTRACE_HASHNEXT(hash, probe))) { 8612 8613 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 8614 continue; 8615 8616 nmatched++; 8617 8618 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 8619 break; 8620 } 8621 8622 return (nmatched); 8623 } 8624 8625 /* 8626 * Return the function pointer dtrace_probecmp() should use to compare the 8627 * specified pattern with a string. For NULL or empty patterns, we select 8628 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 8629 * For non-empty non-glob strings, we use dtrace_match_string(). 8630 */ 8631 static dtrace_probekey_f * 8632 dtrace_probekey_func(const char *p) 8633 { 8634 char c; 8635 8636 if (p == NULL || *p == '\0') 8637 return (&dtrace_match_nul); 8638 8639 while ((c = *p++) != '\0') { 8640 if (c == '[' || c == '?' || c == '*' || c == '\\') 8641 return (&dtrace_match_glob); 8642 } 8643 8644 return (&dtrace_match_string); 8645 } 8646 8647 /* 8648 * Build a probe comparison key for use with dtrace_match_probe() from the 8649 * given probe description. By convention, a null key only matches anchored 8650 * probes: if each field is the empty string, reset dtpk_fmatch to 8651 * dtrace_match_nonzero(). 8652 */ 8653 static void 8654 dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 8655 { 8656 pkp->dtpk_prov = pdp->dtpd_provider; 8657 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 8658 8659 pkp->dtpk_mod = pdp->dtpd_mod; 8660 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 8661 8662 pkp->dtpk_func = pdp->dtpd_func; 8663 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 8664 8665 pkp->dtpk_name = pdp->dtpd_name; 8666 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 8667 8668 pkp->dtpk_id = pdp->dtpd_id; 8669 8670 if (pkp->dtpk_id == DTRACE_IDNONE && 8671 pkp->dtpk_pmatch == &dtrace_match_nul && 8672 pkp->dtpk_mmatch == &dtrace_match_nul && 8673 pkp->dtpk_fmatch == &dtrace_match_nul && 8674 pkp->dtpk_nmatch == &dtrace_match_nul) 8675 pkp->dtpk_fmatch = &dtrace_match_nonzero; 8676 } 8677 8678 /* 8679 * DTrace Provider-to-Framework API Functions 8680 * 8681 * These functions implement much of the Provider-to-Framework API, as 8682 * described in <sys/dtrace.h>. The parts of the API not in this section are 8683 * the functions in the API for probe management (found below), and 8684 * dtrace_probe() itself (found above). 8685 */ 8686 8687 /* 8688 * Register the calling provider with the DTrace framework. This should 8689 * generally be called by DTrace providers in their attach(9E) entry point. 8690 */ 8691 int 8692 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 8693 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 8694 { 8695 dtrace_provider_t *provider; 8696 8697 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 8698 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8699 "arguments", name ? name : "<NULL>"); 8700 return (EINVAL); 8701 } 8702 8703 if (name[0] == '\0' || dtrace_badname(name)) { 8704 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8705 "provider name", name); 8706 return (EINVAL); 8707 } 8708 8709 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 8710 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 8711 pops->dtps_destroy == NULL || 8712 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 8713 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8714 "provider ops", name); 8715 return (EINVAL); 8716 } 8717 8718 if (dtrace_badattr(&pap->dtpa_provider) || 8719 dtrace_badattr(&pap->dtpa_mod) || 8720 dtrace_badattr(&pap->dtpa_func) || 8721 dtrace_badattr(&pap->dtpa_name) || 8722 dtrace_badattr(&pap->dtpa_args)) { 8723 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8724 "provider attributes", name); 8725 return (EINVAL); 8726 } 8727 8728 if (priv & ~DTRACE_PRIV_ALL) { 8729 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8730 "privilege attributes", name); 8731 return (EINVAL); 8732 } 8733 8734 if ((priv & DTRACE_PRIV_KERNEL) && 8735 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 8736 pops->dtps_usermode == NULL) { 8737 cmn_err(CE_WARN, "failed to register provider '%s': need " 8738 "dtps_usermode() op for given privilege attributes", name); 8739 return (EINVAL); 8740 } 8741 8742 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 8743 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8744 (void) strcpy(provider->dtpv_name, name); 8745 8746 provider->dtpv_attr = *pap; 8747 provider->dtpv_priv.dtpp_flags = priv; 8748 if (cr != NULL) { 8749 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 8750 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 8751 } 8752 provider->dtpv_pops = *pops; 8753 8754 if (pops->dtps_provide == NULL) { 8755 ASSERT(pops->dtps_provide_module != NULL); 8756 provider->dtpv_pops.dtps_provide = 8757 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 8758 } 8759 8760 if (pops->dtps_provide_module == NULL) { 8761 ASSERT(pops->dtps_provide != NULL); 8762 provider->dtpv_pops.dtps_provide_module = 8763 (void (*)(void *, modctl_t *))dtrace_nullop; 8764 } 8765 8766 if (pops->dtps_suspend == NULL) { 8767 ASSERT(pops->dtps_resume == NULL); 8768 provider->dtpv_pops.dtps_suspend = 8769 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 8770 provider->dtpv_pops.dtps_resume = 8771 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 8772 } 8773 8774 provider->dtpv_arg = arg; 8775 *idp = (dtrace_provider_id_t)provider; 8776 8777 if (pops == &dtrace_provider_ops) { 8778 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8779 ASSERT(MUTEX_HELD(&dtrace_lock)); 8780 ASSERT(dtrace_anon.dta_enabling == NULL); 8781 8782 /* 8783 * We make sure that the DTrace provider is at the head of 8784 * the provider chain. 8785 */ 8786 provider->dtpv_next = dtrace_provider; 8787 dtrace_provider = provider; 8788 return (0); 8789 } 8790 8791 mutex_enter(&dtrace_provider_lock); 8792 mutex_enter(&dtrace_lock); 8793 8794 /* 8795 * If there is at least one provider registered, we'll add this 8796 * provider after the first provider. 8797 */ 8798 if (dtrace_provider != NULL) { 8799 provider->dtpv_next = dtrace_provider->dtpv_next; 8800 dtrace_provider->dtpv_next = provider; 8801 } else { 8802 dtrace_provider = provider; 8803 } 8804 8805 if (dtrace_retained != NULL) { 8806 dtrace_enabling_provide(provider); 8807 8808 /* 8809 * Now we need to call dtrace_enabling_matchall() -- which 8810 * will acquire cpu_lock and dtrace_lock. We therefore need 8811 * to drop all of our locks before calling into it... 8812 */ 8813 mutex_exit(&dtrace_lock); 8814 mutex_exit(&dtrace_provider_lock); 8815 dtrace_enabling_matchall(); 8816 8817 return (0); 8818 } 8819 8820 mutex_exit(&dtrace_lock); 8821 mutex_exit(&dtrace_provider_lock); 8822 8823 return (0); 8824 } 8825 8826 /* 8827 * Unregister the specified provider from the DTrace framework. This should 8828 * generally be called by DTrace providers in their detach(9E) entry point. 8829 */ 8830 int 8831 dtrace_unregister(dtrace_provider_id_t id) 8832 { 8833 dtrace_provider_t *old = (dtrace_provider_t *)id; 8834 dtrace_provider_t *prev = NULL; 8835 int i, self = 0, noreap = 0; 8836 dtrace_probe_t *probe, *first = NULL; 8837 8838 if (old->dtpv_pops.dtps_enable == 8839 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 8840 /* 8841 * If DTrace itself is the provider, we're called with locks 8842 * already held. 8843 */ 8844 ASSERT(old == dtrace_provider); 8845 #ifdef illumos 8846 ASSERT(dtrace_devi != NULL); 8847 #endif 8848 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8849 ASSERT(MUTEX_HELD(&dtrace_lock)); 8850 self = 1; 8851 8852 if (dtrace_provider->dtpv_next != NULL) { 8853 /* 8854 * There's another provider here; return failure. 8855 */ 8856 return (EBUSY); 8857 } 8858 } else { 8859 mutex_enter(&dtrace_provider_lock); 8860 #ifdef illumos 8861 mutex_enter(&mod_lock); 8862 #endif 8863 mutex_enter(&dtrace_lock); 8864 } 8865 8866 /* 8867 * If anyone has /dev/dtrace open, or if there are anonymous enabled 8868 * probes, we refuse to let providers slither away, unless this 8869 * provider has already been explicitly invalidated. 8870 */ 8871 if (!old->dtpv_defunct && 8872 (dtrace_opens || (dtrace_anon.dta_state != NULL && 8873 dtrace_anon.dta_state->dts_necbs > 0))) { 8874 if (!self) { 8875 mutex_exit(&dtrace_lock); 8876 #ifdef illumos 8877 mutex_exit(&mod_lock); 8878 #endif 8879 mutex_exit(&dtrace_provider_lock); 8880 } 8881 return (EBUSY); 8882 } 8883 8884 /* 8885 * Attempt to destroy the probes associated with this provider. 8886 */ 8887 for (i = 0; i < dtrace_nprobes; i++) { 8888 if ((probe = dtrace_probes[i]) == NULL) 8889 continue; 8890 8891 if (probe->dtpr_provider != old) 8892 continue; 8893 8894 if (probe->dtpr_ecb == NULL) 8895 continue; 8896 8897 /* 8898 * If we are trying to unregister a defunct provider, and the 8899 * provider was made defunct within the interval dictated by 8900 * dtrace_unregister_defunct_reap, we'll (asynchronously) 8901 * attempt to reap our enablings. To denote that the provider 8902 * should reattempt to unregister itself at some point in the 8903 * future, we will return a differentiable error code (EAGAIN 8904 * instead of EBUSY) in this case. 8905 */ 8906 if (dtrace_gethrtime() - old->dtpv_defunct > 8907 dtrace_unregister_defunct_reap) 8908 noreap = 1; 8909 8910 if (!self) { 8911 mutex_exit(&dtrace_lock); 8912 #ifdef illumos 8913 mutex_exit(&mod_lock); 8914 #endif 8915 mutex_exit(&dtrace_provider_lock); 8916 } 8917 8918 if (noreap) 8919 return (EBUSY); 8920 8921 (void) taskq_dispatch(dtrace_taskq, 8922 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP); 8923 8924 return (EAGAIN); 8925 } 8926 8927 /* 8928 * All of the probes for this provider are disabled; we can safely 8929 * remove all of them from their hash chains and from the probe array. 8930 */ 8931 for (i = 0; i < dtrace_nprobes; i++) { 8932 if ((probe = dtrace_probes[i]) == NULL) 8933 continue; 8934 8935 if (probe->dtpr_provider != old) 8936 continue; 8937 8938 dtrace_probes[i] = NULL; 8939 8940 dtrace_hash_remove(dtrace_bymod, probe); 8941 dtrace_hash_remove(dtrace_byfunc, probe); 8942 dtrace_hash_remove(dtrace_byname, probe); 8943 8944 if (first == NULL) { 8945 first = probe; 8946 probe->dtpr_nextmod = NULL; 8947 } else { 8948 probe->dtpr_nextmod = first; 8949 first = probe; 8950 } 8951 } 8952 8953 /* 8954 * The provider's probes have been removed from the hash chains and 8955 * from the probe array. Now issue a dtrace_sync() to be sure that 8956 * everyone has cleared out from any probe array processing. 8957 */ 8958 dtrace_sync(); 8959 8960 for (probe = first; probe != NULL; probe = first) { 8961 first = probe->dtpr_nextmod; 8962 8963 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 8964 probe->dtpr_arg); 8965 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 8966 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 8967 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 8968 #ifdef illumos 8969 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 8970 #else 8971 free_unr(dtrace_arena, probe->dtpr_id); 8972 #endif 8973 kmem_free(probe, sizeof (dtrace_probe_t)); 8974 } 8975 8976 if ((prev = dtrace_provider) == old) { 8977 #ifdef illumos 8978 ASSERT(self || dtrace_devi == NULL); 8979 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 8980 #endif 8981 dtrace_provider = old->dtpv_next; 8982 } else { 8983 while (prev != NULL && prev->dtpv_next != old) 8984 prev = prev->dtpv_next; 8985 8986 if (prev == NULL) { 8987 panic("attempt to unregister non-existent " 8988 "dtrace provider %p\n", (void *)id); 8989 } 8990 8991 prev->dtpv_next = old->dtpv_next; 8992 } 8993 8994 if (!self) { 8995 mutex_exit(&dtrace_lock); 8996 #ifdef illumos 8997 mutex_exit(&mod_lock); 8998 #endif 8999 mutex_exit(&dtrace_provider_lock); 9000 } 9001 9002 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 9003 kmem_free(old, sizeof (dtrace_provider_t)); 9004 9005 return (0); 9006 } 9007 9008 /* 9009 * Invalidate the specified provider. All subsequent probe lookups for the 9010 * specified provider will fail, but its probes will not be removed. 9011 */ 9012 void 9013 dtrace_invalidate(dtrace_provider_id_t id) 9014 { 9015 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 9016 9017 ASSERT(pvp->dtpv_pops.dtps_enable != 9018 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 9019 9020 mutex_enter(&dtrace_provider_lock); 9021 mutex_enter(&dtrace_lock); 9022 9023 pvp->dtpv_defunct = dtrace_gethrtime(); 9024 9025 mutex_exit(&dtrace_lock); 9026 mutex_exit(&dtrace_provider_lock); 9027 } 9028 9029 /* 9030 * Indicate whether or not DTrace has attached. 9031 */ 9032 int 9033 dtrace_attached(void) 9034 { 9035 /* 9036 * dtrace_provider will be non-NULL iff the DTrace driver has 9037 * attached. (It's non-NULL because DTrace is always itself a 9038 * provider.) 9039 */ 9040 return (dtrace_provider != NULL); 9041 } 9042 9043 /* 9044 * Remove all the unenabled probes for the given provider. This function is 9045 * not unlike dtrace_unregister(), except that it doesn't remove the provider 9046 * -- just as many of its associated probes as it can. 9047 */ 9048 int 9049 dtrace_condense(dtrace_provider_id_t id) 9050 { 9051 dtrace_provider_t *prov = (dtrace_provider_t *)id; 9052 int i; 9053 dtrace_probe_t *probe; 9054 9055 /* 9056 * Make sure this isn't the dtrace provider itself. 9057 */ 9058 ASSERT(prov->dtpv_pops.dtps_enable != 9059 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 9060 9061 mutex_enter(&dtrace_provider_lock); 9062 mutex_enter(&dtrace_lock); 9063 9064 /* 9065 * Attempt to destroy the probes associated with this provider. 9066 */ 9067 for (i = 0; i < dtrace_nprobes; i++) { 9068 if ((probe = dtrace_probes[i]) == NULL) 9069 continue; 9070 9071 if (probe->dtpr_provider != prov) 9072 continue; 9073 9074 if (probe->dtpr_ecb != NULL) 9075 continue; 9076 9077 dtrace_probes[i] = NULL; 9078 9079 dtrace_hash_remove(dtrace_bymod, probe); 9080 dtrace_hash_remove(dtrace_byfunc, probe); 9081 dtrace_hash_remove(dtrace_byname, probe); 9082 9083 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 9084 probe->dtpr_arg); 9085 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 9086 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 9087 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 9088 kmem_free(probe, sizeof (dtrace_probe_t)); 9089 #ifdef illumos 9090 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 9091 #else 9092 free_unr(dtrace_arena, i + 1); 9093 #endif 9094 } 9095 9096 mutex_exit(&dtrace_lock); 9097 mutex_exit(&dtrace_provider_lock); 9098 9099 return (0); 9100 } 9101 9102 /* 9103 * DTrace Probe Management Functions 9104 * 9105 * The functions in this section perform the DTrace probe management, 9106 * including functions to create probes, look-up probes, and call into the 9107 * providers to request that probes be provided. Some of these functions are 9108 * in the Provider-to-Framework API; these functions can be identified by the 9109 * fact that they are not declared "static". 9110 */ 9111 9112 /* 9113 * Create a probe with the specified module name, function name, and name. 9114 */ 9115 dtrace_id_t 9116 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 9117 const char *func, const char *name, int aframes, void *arg) 9118 { 9119 dtrace_probe_t *probe, **probes; 9120 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 9121 dtrace_id_t id; 9122 9123 if (provider == dtrace_provider) { 9124 ASSERT(MUTEX_HELD(&dtrace_lock)); 9125 } else { 9126 mutex_enter(&dtrace_lock); 9127 } 9128 9129 #ifdef illumos 9130 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 9131 VM_BESTFIT | VM_SLEEP); 9132 #else 9133 id = alloc_unr(dtrace_arena); 9134 #endif 9135 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 9136 9137 probe->dtpr_id = id; 9138 probe->dtpr_gen = dtrace_probegen++; 9139 probe->dtpr_mod = dtrace_strdup(mod); 9140 probe->dtpr_func = dtrace_strdup(func); 9141 probe->dtpr_name = dtrace_strdup(name); 9142 probe->dtpr_arg = arg; 9143 probe->dtpr_aframes = aframes; 9144 probe->dtpr_provider = provider; 9145 9146 dtrace_hash_add(dtrace_bymod, probe); 9147 dtrace_hash_add(dtrace_byfunc, probe); 9148 dtrace_hash_add(dtrace_byname, probe); 9149 9150 if (id - 1 >= dtrace_nprobes) { 9151 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 9152 size_t nsize = osize << 1; 9153 9154 if (nsize == 0) { 9155 ASSERT(osize == 0); 9156 ASSERT(dtrace_probes == NULL); 9157 nsize = sizeof (dtrace_probe_t *); 9158 } 9159 9160 probes = kmem_zalloc(nsize, KM_SLEEP); 9161 9162 if (dtrace_probes == NULL) { 9163 ASSERT(osize == 0); 9164 dtrace_probes = probes; 9165 dtrace_nprobes = 1; 9166 } else { 9167 dtrace_probe_t **oprobes = dtrace_probes; 9168 9169 bcopy(oprobes, probes, osize); 9170 dtrace_membar_producer(); 9171 dtrace_probes = probes; 9172 9173 dtrace_sync(); 9174 9175 /* 9176 * All CPUs are now seeing the new probes array; we can 9177 * safely free the old array. 9178 */ 9179 kmem_free(oprobes, osize); 9180 dtrace_nprobes <<= 1; 9181 } 9182 9183 ASSERT(id - 1 < dtrace_nprobes); 9184 } 9185 9186 ASSERT(dtrace_probes[id - 1] == NULL); 9187 dtrace_probes[id - 1] = probe; 9188 9189 if (provider != dtrace_provider) 9190 mutex_exit(&dtrace_lock); 9191 9192 return (id); 9193 } 9194 9195 static dtrace_probe_t * 9196 dtrace_probe_lookup_id(dtrace_id_t id) 9197 { 9198 ASSERT(MUTEX_HELD(&dtrace_lock)); 9199 9200 if (id == 0 || id > dtrace_nprobes) 9201 return (NULL); 9202 9203 return (dtrace_probes[id - 1]); 9204 } 9205 9206 static int 9207 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 9208 { 9209 *((dtrace_id_t *)arg) = probe->dtpr_id; 9210 9211 return (DTRACE_MATCH_DONE); 9212 } 9213 9214 /* 9215 * Look up a probe based on provider and one or more of module name, function 9216 * name and probe name. 9217 */ 9218 dtrace_id_t 9219 dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 9220 char *func, char *name) 9221 { 9222 dtrace_probekey_t pkey; 9223 dtrace_id_t id; 9224 int match; 9225 9226 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 9227 pkey.dtpk_pmatch = &dtrace_match_string; 9228 pkey.dtpk_mod = mod; 9229 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 9230 pkey.dtpk_func = func; 9231 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 9232 pkey.dtpk_name = name; 9233 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 9234 pkey.dtpk_id = DTRACE_IDNONE; 9235 9236 mutex_enter(&dtrace_lock); 9237 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 9238 dtrace_probe_lookup_match, &id); 9239 mutex_exit(&dtrace_lock); 9240 9241 ASSERT(match == 1 || match == 0); 9242 return (match ? id : 0); 9243 } 9244 9245 /* 9246 * Returns the probe argument associated with the specified probe. 9247 */ 9248 void * 9249 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 9250 { 9251 dtrace_probe_t *probe; 9252 void *rval = NULL; 9253 9254 mutex_enter(&dtrace_lock); 9255 9256 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 9257 probe->dtpr_provider == (dtrace_provider_t *)id) 9258 rval = probe->dtpr_arg; 9259 9260 mutex_exit(&dtrace_lock); 9261 9262 return (rval); 9263 } 9264 9265 /* 9266 * Copy a probe into a probe description. 9267 */ 9268 static void 9269 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 9270 { 9271 bzero(pdp, sizeof (dtrace_probedesc_t)); 9272 pdp->dtpd_id = prp->dtpr_id; 9273 9274 (void) strncpy(pdp->dtpd_provider, 9275 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 9276 9277 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 9278 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 9279 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 9280 } 9281 9282 /* 9283 * Called to indicate that a probe -- or probes -- should be provided by a 9284 * specfied provider. If the specified description is NULL, the provider will 9285 * be told to provide all of its probes. (This is done whenever a new 9286 * consumer comes along, or whenever a retained enabling is to be matched.) If 9287 * the specified description is non-NULL, the provider is given the 9288 * opportunity to dynamically provide the specified probe, allowing providers 9289 * to support the creation of probes on-the-fly. (So-called _autocreated_ 9290 * probes.) If the provider is NULL, the operations will be applied to all 9291 * providers; if the provider is non-NULL the operations will only be applied 9292 * to the specified provider. The dtrace_provider_lock must be held, and the 9293 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 9294 * will need to grab the dtrace_lock when it reenters the framework through 9295 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 9296 */ 9297 static void 9298 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 9299 { 9300 #ifdef illumos 9301 modctl_t *ctl; 9302 #endif 9303 int all = 0; 9304 9305 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 9306 9307 if (prv == NULL) { 9308 all = 1; 9309 prv = dtrace_provider; 9310 } 9311 9312 do { 9313 /* 9314 * First, call the blanket provide operation. 9315 */ 9316 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 9317 9318 #ifdef illumos 9319 /* 9320 * Now call the per-module provide operation. We will grab 9321 * mod_lock to prevent the list from being modified. Note 9322 * that this also prevents the mod_busy bits from changing. 9323 * (mod_busy can only be changed with mod_lock held.) 9324 */ 9325 mutex_enter(&mod_lock); 9326 9327 ctl = &modules; 9328 do { 9329 if (ctl->mod_busy || ctl->mod_mp == NULL) 9330 continue; 9331 9332 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 9333 9334 } while ((ctl = ctl->mod_next) != &modules); 9335 9336 mutex_exit(&mod_lock); 9337 #endif 9338 } while (all && (prv = prv->dtpv_next) != NULL); 9339 } 9340 9341 #ifdef illumos 9342 /* 9343 * Iterate over each probe, and call the Framework-to-Provider API function 9344 * denoted by offs. 9345 */ 9346 static void 9347 dtrace_probe_foreach(uintptr_t offs) 9348 { 9349 dtrace_provider_t *prov; 9350 void (*func)(void *, dtrace_id_t, void *); 9351 dtrace_probe_t *probe; 9352 dtrace_icookie_t cookie; 9353 int i; 9354 9355 /* 9356 * We disable interrupts to walk through the probe array. This is 9357 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 9358 * won't see stale data. 9359 */ 9360 cookie = dtrace_interrupt_disable(); 9361 9362 for (i = 0; i < dtrace_nprobes; i++) { 9363 if ((probe = dtrace_probes[i]) == NULL) 9364 continue; 9365 9366 if (probe->dtpr_ecb == NULL) { 9367 /* 9368 * This probe isn't enabled -- don't call the function. 9369 */ 9370 continue; 9371 } 9372 9373 prov = probe->dtpr_provider; 9374 func = *((void(**)(void *, dtrace_id_t, void *)) 9375 ((uintptr_t)&prov->dtpv_pops + offs)); 9376 9377 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 9378 } 9379 9380 dtrace_interrupt_enable(cookie); 9381 } 9382 #endif 9383 9384 static int 9385 dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 9386 { 9387 dtrace_probekey_t pkey; 9388 uint32_t priv; 9389 uid_t uid; 9390 zoneid_t zoneid; 9391 9392 ASSERT(MUTEX_HELD(&dtrace_lock)); 9393 dtrace_ecb_create_cache = NULL; 9394 9395 if (desc == NULL) { 9396 /* 9397 * If we're passed a NULL description, we're being asked to 9398 * create an ECB with a NULL probe. 9399 */ 9400 (void) dtrace_ecb_create_enable(NULL, enab); 9401 return (0); 9402 } 9403 9404 dtrace_probekey(desc, &pkey); 9405 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 9406 &priv, &uid, &zoneid); 9407 9408 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 9409 enab)); 9410 } 9411 9412 /* 9413 * DTrace Helper Provider Functions 9414 */ 9415 static void 9416 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 9417 { 9418 attr->dtat_name = DOF_ATTR_NAME(dofattr); 9419 attr->dtat_data = DOF_ATTR_DATA(dofattr); 9420 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 9421 } 9422 9423 static void 9424 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 9425 const dof_provider_t *dofprov, char *strtab) 9426 { 9427 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 9428 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 9429 dofprov->dofpv_provattr); 9430 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 9431 dofprov->dofpv_modattr); 9432 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 9433 dofprov->dofpv_funcattr); 9434 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 9435 dofprov->dofpv_nameattr); 9436 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 9437 dofprov->dofpv_argsattr); 9438 } 9439 9440 static void 9441 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 9442 { 9443 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9444 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9445 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 9446 dof_provider_t *provider; 9447 dof_probe_t *probe; 9448 uint32_t *off, *enoff; 9449 uint8_t *arg; 9450 char *strtab; 9451 uint_t i, nprobes; 9452 dtrace_helper_provdesc_t dhpv; 9453 dtrace_helper_probedesc_t dhpb; 9454 dtrace_meta_t *meta = dtrace_meta_pid; 9455 dtrace_mops_t *mops = &meta->dtm_mops; 9456 void *parg; 9457 9458 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 9459 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9460 provider->dofpv_strtab * dof->dofh_secsize); 9461 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9462 provider->dofpv_probes * dof->dofh_secsize); 9463 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9464 provider->dofpv_prargs * dof->dofh_secsize); 9465 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9466 provider->dofpv_proffs * dof->dofh_secsize); 9467 9468 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 9469 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 9470 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 9471 enoff = NULL; 9472 9473 /* 9474 * See dtrace_helper_provider_validate(). 9475 */ 9476 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 9477 provider->dofpv_prenoffs != DOF_SECT_NONE) { 9478 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9479 provider->dofpv_prenoffs * dof->dofh_secsize); 9480 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 9481 } 9482 9483 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 9484 9485 /* 9486 * Create the provider. 9487 */ 9488 dtrace_dofprov2hprov(&dhpv, provider, strtab); 9489 9490 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 9491 return; 9492 9493 meta->dtm_count++; 9494 9495 /* 9496 * Create the probes. 9497 */ 9498 for (i = 0; i < nprobes; i++) { 9499 probe = (dof_probe_t *)(uintptr_t)(daddr + 9500 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 9501 9502 /* See the check in dtrace_helper_provider_validate(). */ 9503 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) 9504 continue; 9505 9506 dhpb.dthpb_mod = dhp->dofhp_mod; 9507 dhpb.dthpb_func = strtab + probe->dofpr_func; 9508 dhpb.dthpb_name = strtab + probe->dofpr_name; 9509 dhpb.dthpb_base = probe->dofpr_addr; 9510 dhpb.dthpb_offs = off + probe->dofpr_offidx; 9511 dhpb.dthpb_noffs = probe->dofpr_noffs; 9512 if (enoff != NULL) { 9513 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 9514 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 9515 } else { 9516 dhpb.dthpb_enoffs = NULL; 9517 dhpb.dthpb_nenoffs = 0; 9518 } 9519 dhpb.dthpb_args = arg + probe->dofpr_argidx; 9520 dhpb.dthpb_nargc = probe->dofpr_nargc; 9521 dhpb.dthpb_xargc = probe->dofpr_xargc; 9522 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 9523 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 9524 9525 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 9526 } 9527 } 9528 9529 static void 9530 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 9531 { 9532 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9533 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9534 int i; 9535 9536 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 9537 9538 for (i = 0; i < dof->dofh_secnum; i++) { 9539 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 9540 dof->dofh_secoff + i * dof->dofh_secsize); 9541 9542 if (sec->dofs_type != DOF_SECT_PROVIDER) 9543 continue; 9544 9545 dtrace_helper_provide_one(dhp, sec, pid); 9546 } 9547 9548 /* 9549 * We may have just created probes, so we must now rematch against 9550 * any retained enablings. Note that this call will acquire both 9551 * cpu_lock and dtrace_lock; the fact that we are holding 9552 * dtrace_meta_lock now is what defines the ordering with respect to 9553 * these three locks. 9554 */ 9555 dtrace_enabling_matchall(); 9556 } 9557 9558 static void 9559 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 9560 { 9561 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9562 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9563 dof_sec_t *str_sec; 9564 dof_provider_t *provider; 9565 char *strtab; 9566 dtrace_helper_provdesc_t dhpv; 9567 dtrace_meta_t *meta = dtrace_meta_pid; 9568 dtrace_mops_t *mops = &meta->dtm_mops; 9569 9570 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 9571 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9572 provider->dofpv_strtab * dof->dofh_secsize); 9573 9574 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 9575 9576 /* 9577 * Create the provider. 9578 */ 9579 dtrace_dofprov2hprov(&dhpv, provider, strtab); 9580 9581 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 9582 9583 meta->dtm_count--; 9584 } 9585 9586 static void 9587 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 9588 { 9589 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9590 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9591 int i; 9592 9593 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 9594 9595 for (i = 0; i < dof->dofh_secnum; i++) { 9596 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 9597 dof->dofh_secoff + i * dof->dofh_secsize); 9598 9599 if (sec->dofs_type != DOF_SECT_PROVIDER) 9600 continue; 9601 9602 dtrace_helper_provider_remove_one(dhp, sec, pid); 9603 } 9604 } 9605 9606 /* 9607 * DTrace Meta Provider-to-Framework API Functions 9608 * 9609 * These functions implement the Meta Provider-to-Framework API, as described 9610 * in <sys/dtrace.h>. 9611 */ 9612 int 9613 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 9614 dtrace_meta_provider_id_t *idp) 9615 { 9616 dtrace_meta_t *meta; 9617 dtrace_helpers_t *help, *next; 9618 int i; 9619 9620 *idp = DTRACE_METAPROVNONE; 9621 9622 /* 9623 * We strictly don't need the name, but we hold onto it for 9624 * debuggability. All hail error queues! 9625 */ 9626 if (name == NULL) { 9627 cmn_err(CE_WARN, "failed to register meta-provider: " 9628 "invalid name"); 9629 return (EINVAL); 9630 } 9631 9632 if (mops == NULL || 9633 mops->dtms_create_probe == NULL || 9634 mops->dtms_provide_pid == NULL || 9635 mops->dtms_remove_pid == NULL) { 9636 cmn_err(CE_WARN, "failed to register meta-register %s: " 9637 "invalid ops", name); 9638 return (EINVAL); 9639 } 9640 9641 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 9642 meta->dtm_mops = *mops; 9643 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 9644 (void) strcpy(meta->dtm_name, name); 9645 meta->dtm_arg = arg; 9646 9647 mutex_enter(&dtrace_meta_lock); 9648 mutex_enter(&dtrace_lock); 9649 9650 if (dtrace_meta_pid != NULL) { 9651 mutex_exit(&dtrace_lock); 9652 mutex_exit(&dtrace_meta_lock); 9653 cmn_err(CE_WARN, "failed to register meta-register %s: " 9654 "user-land meta-provider exists", name); 9655 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 9656 kmem_free(meta, sizeof (dtrace_meta_t)); 9657 return (EINVAL); 9658 } 9659 9660 dtrace_meta_pid = meta; 9661 *idp = (dtrace_meta_provider_id_t)meta; 9662 9663 /* 9664 * If there are providers and probes ready to go, pass them 9665 * off to the new meta provider now. 9666 */ 9667 9668 help = dtrace_deferred_pid; 9669 dtrace_deferred_pid = NULL; 9670 9671 mutex_exit(&dtrace_lock); 9672 9673 while (help != NULL) { 9674 for (i = 0; i < help->dthps_nprovs; i++) { 9675 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 9676 help->dthps_pid); 9677 } 9678 9679 next = help->dthps_next; 9680 help->dthps_next = NULL; 9681 help->dthps_prev = NULL; 9682 help->dthps_deferred = 0; 9683 help = next; 9684 } 9685 9686 mutex_exit(&dtrace_meta_lock); 9687 9688 return (0); 9689 } 9690 9691 int 9692 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 9693 { 9694 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 9695 9696 mutex_enter(&dtrace_meta_lock); 9697 mutex_enter(&dtrace_lock); 9698 9699 if (old == dtrace_meta_pid) { 9700 pp = &dtrace_meta_pid; 9701 } else { 9702 panic("attempt to unregister non-existent " 9703 "dtrace meta-provider %p\n", (void *)old); 9704 } 9705 9706 if (old->dtm_count != 0) { 9707 mutex_exit(&dtrace_lock); 9708 mutex_exit(&dtrace_meta_lock); 9709 return (EBUSY); 9710 } 9711 9712 *pp = NULL; 9713 9714 mutex_exit(&dtrace_lock); 9715 mutex_exit(&dtrace_meta_lock); 9716 9717 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 9718 kmem_free(old, sizeof (dtrace_meta_t)); 9719 9720 return (0); 9721 } 9722 9723 9724 /* 9725 * DTrace DIF Object Functions 9726 */ 9727 static int 9728 dtrace_difo_err(uint_t pc, const char *format, ...) 9729 { 9730 if (dtrace_err_verbose) { 9731 va_list alist; 9732 9733 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 9734 va_start(alist, format); 9735 (void) vuprintf(format, alist); 9736 va_end(alist); 9737 } 9738 9739 #ifdef DTRACE_ERRDEBUG 9740 dtrace_errdebug(format); 9741 #endif 9742 return (1); 9743 } 9744 9745 /* 9746 * Validate a DTrace DIF object by checking the IR instructions. The following 9747 * rules are currently enforced by dtrace_difo_validate(): 9748 * 9749 * 1. Each instruction must have a valid opcode 9750 * 2. Each register, string, variable, or subroutine reference must be valid 9751 * 3. No instruction can modify register %r0 (must be zero) 9752 * 4. All instruction reserved bits must be set to zero 9753 * 5. The last instruction must be a "ret" instruction 9754 * 6. All branch targets must reference a valid instruction _after_ the branch 9755 */ 9756 static int 9757 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 9758 cred_t *cr) 9759 { 9760 int err = 0, i; 9761 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 9762 int kcheckload; 9763 uint_t pc; 9764 int maxglobal = -1, maxlocal = -1, maxtlocal = -1; 9765 9766 kcheckload = cr == NULL || 9767 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 9768 9769 dp->dtdo_destructive = 0; 9770 9771 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 9772 dif_instr_t instr = dp->dtdo_buf[pc]; 9773 9774 uint_t r1 = DIF_INSTR_R1(instr); 9775 uint_t r2 = DIF_INSTR_R2(instr); 9776 uint_t rd = DIF_INSTR_RD(instr); 9777 uint_t rs = DIF_INSTR_RS(instr); 9778 uint_t label = DIF_INSTR_LABEL(instr); 9779 uint_t v = DIF_INSTR_VAR(instr); 9780 uint_t subr = DIF_INSTR_SUBR(instr); 9781 uint_t type = DIF_INSTR_TYPE(instr); 9782 uint_t op = DIF_INSTR_OP(instr); 9783 9784 switch (op) { 9785 case DIF_OP_OR: 9786 case DIF_OP_XOR: 9787 case DIF_OP_AND: 9788 case DIF_OP_SLL: 9789 case DIF_OP_SRL: 9790 case DIF_OP_SRA: 9791 case DIF_OP_SUB: 9792 case DIF_OP_ADD: 9793 case DIF_OP_MUL: 9794 case DIF_OP_SDIV: 9795 case DIF_OP_UDIV: 9796 case DIF_OP_SREM: 9797 case DIF_OP_UREM: 9798 case DIF_OP_COPYS: 9799 if (r1 >= nregs) 9800 err += efunc(pc, "invalid register %u\n", r1); 9801 if (r2 >= nregs) 9802 err += efunc(pc, "invalid register %u\n", r2); 9803 if (rd >= nregs) 9804 err += efunc(pc, "invalid register %u\n", rd); 9805 if (rd == 0) 9806 err += efunc(pc, "cannot write to %r0\n"); 9807 break; 9808 case DIF_OP_NOT: 9809 case DIF_OP_MOV: 9810 case DIF_OP_ALLOCS: 9811 if (r1 >= nregs) 9812 err += efunc(pc, "invalid register %u\n", r1); 9813 if (r2 != 0) 9814 err += efunc(pc, "non-zero reserved bits\n"); 9815 if (rd >= nregs) 9816 err += efunc(pc, "invalid register %u\n", rd); 9817 if (rd == 0) 9818 err += efunc(pc, "cannot write to %r0\n"); 9819 break; 9820 case DIF_OP_LDSB: 9821 case DIF_OP_LDSH: 9822 case DIF_OP_LDSW: 9823 case DIF_OP_LDUB: 9824 case DIF_OP_LDUH: 9825 case DIF_OP_LDUW: 9826 case DIF_OP_LDX: 9827 if (r1 >= nregs) 9828 err += efunc(pc, "invalid register %u\n", r1); 9829 if (r2 != 0) 9830 err += efunc(pc, "non-zero reserved bits\n"); 9831 if (rd >= nregs) 9832 err += efunc(pc, "invalid register %u\n", rd); 9833 if (rd == 0) 9834 err += efunc(pc, "cannot write to %r0\n"); 9835 if (kcheckload) 9836 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 9837 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 9838 break; 9839 case DIF_OP_RLDSB: 9840 case DIF_OP_RLDSH: 9841 case DIF_OP_RLDSW: 9842 case DIF_OP_RLDUB: 9843 case DIF_OP_RLDUH: 9844 case DIF_OP_RLDUW: 9845 case DIF_OP_RLDX: 9846 if (r1 >= nregs) 9847 err += efunc(pc, "invalid register %u\n", r1); 9848 if (r2 != 0) 9849 err += efunc(pc, "non-zero reserved bits\n"); 9850 if (rd >= nregs) 9851 err += efunc(pc, "invalid register %u\n", rd); 9852 if (rd == 0) 9853 err += efunc(pc, "cannot write to %r0\n"); 9854 break; 9855 case DIF_OP_ULDSB: 9856 case DIF_OP_ULDSH: 9857 case DIF_OP_ULDSW: 9858 case DIF_OP_ULDUB: 9859 case DIF_OP_ULDUH: 9860 case DIF_OP_ULDUW: 9861 case DIF_OP_ULDX: 9862 if (r1 >= nregs) 9863 err += efunc(pc, "invalid register %u\n", r1); 9864 if (r2 != 0) 9865 err += efunc(pc, "non-zero reserved bits\n"); 9866 if (rd >= nregs) 9867 err += efunc(pc, "invalid register %u\n", rd); 9868 if (rd == 0) 9869 err += efunc(pc, "cannot write to %r0\n"); 9870 break; 9871 case DIF_OP_STB: 9872 case DIF_OP_STH: 9873 case DIF_OP_STW: 9874 case DIF_OP_STX: 9875 if (r1 >= nregs) 9876 err += efunc(pc, "invalid register %u\n", r1); 9877 if (r2 != 0) 9878 err += efunc(pc, "non-zero reserved bits\n"); 9879 if (rd >= nregs) 9880 err += efunc(pc, "invalid register %u\n", rd); 9881 if (rd == 0) 9882 err += efunc(pc, "cannot write to 0 address\n"); 9883 break; 9884 case DIF_OP_CMP: 9885 case DIF_OP_SCMP: 9886 if (r1 >= nregs) 9887 err += efunc(pc, "invalid register %u\n", r1); 9888 if (r2 >= nregs) 9889 err += efunc(pc, "invalid register %u\n", r2); 9890 if (rd != 0) 9891 err += efunc(pc, "non-zero reserved bits\n"); 9892 break; 9893 case DIF_OP_TST: 9894 if (r1 >= nregs) 9895 err += efunc(pc, "invalid register %u\n", r1); 9896 if (r2 != 0 || rd != 0) 9897 err += efunc(pc, "non-zero reserved bits\n"); 9898 break; 9899 case DIF_OP_BA: 9900 case DIF_OP_BE: 9901 case DIF_OP_BNE: 9902 case DIF_OP_BG: 9903 case DIF_OP_BGU: 9904 case DIF_OP_BGE: 9905 case DIF_OP_BGEU: 9906 case DIF_OP_BL: 9907 case DIF_OP_BLU: 9908 case DIF_OP_BLE: 9909 case DIF_OP_BLEU: 9910 if (label >= dp->dtdo_len) { 9911 err += efunc(pc, "invalid branch target %u\n", 9912 label); 9913 } 9914 if (label <= pc) { 9915 err += efunc(pc, "backward branch to %u\n", 9916 label); 9917 } 9918 break; 9919 case DIF_OP_RET: 9920 if (r1 != 0 || r2 != 0) 9921 err += efunc(pc, "non-zero reserved bits\n"); 9922 if (rd >= nregs) 9923 err += efunc(pc, "invalid register %u\n", rd); 9924 break; 9925 case DIF_OP_NOP: 9926 case DIF_OP_POPTS: 9927 case DIF_OP_FLUSHTS: 9928 if (r1 != 0 || r2 != 0 || rd != 0) 9929 err += efunc(pc, "non-zero reserved bits\n"); 9930 break; 9931 case DIF_OP_SETX: 9932 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 9933 err += efunc(pc, "invalid integer ref %u\n", 9934 DIF_INSTR_INTEGER(instr)); 9935 } 9936 if (rd >= nregs) 9937 err += efunc(pc, "invalid register %u\n", rd); 9938 if (rd == 0) 9939 err += efunc(pc, "cannot write to %r0\n"); 9940 break; 9941 case DIF_OP_SETS: 9942 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 9943 err += efunc(pc, "invalid string ref %u\n", 9944 DIF_INSTR_STRING(instr)); 9945 } 9946 if (rd >= nregs) 9947 err += efunc(pc, "invalid register %u\n", rd); 9948 if (rd == 0) 9949 err += efunc(pc, "cannot write to %r0\n"); 9950 break; 9951 case DIF_OP_LDGA: 9952 case DIF_OP_LDTA: 9953 if (r1 > DIF_VAR_ARRAY_MAX) 9954 err += efunc(pc, "invalid array %u\n", r1); 9955 if (r2 >= nregs) 9956 err += efunc(pc, "invalid register %u\n", r2); 9957 if (rd >= nregs) 9958 err += efunc(pc, "invalid register %u\n", rd); 9959 if (rd == 0) 9960 err += efunc(pc, "cannot write to %r0\n"); 9961 break; 9962 case DIF_OP_LDGS: 9963 case DIF_OP_LDTS: 9964 case DIF_OP_LDLS: 9965 case DIF_OP_LDGAA: 9966 case DIF_OP_LDTAA: 9967 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 9968 err += efunc(pc, "invalid variable %u\n", v); 9969 if (rd >= nregs) 9970 err += efunc(pc, "invalid register %u\n", rd); 9971 if (rd == 0) 9972 err += efunc(pc, "cannot write to %r0\n"); 9973 break; 9974 case DIF_OP_STGS: 9975 case DIF_OP_STTS: 9976 case DIF_OP_STLS: 9977 case DIF_OP_STGAA: 9978 case DIF_OP_STTAA: 9979 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 9980 err += efunc(pc, "invalid variable %u\n", v); 9981 if (rs >= nregs) 9982 err += efunc(pc, "invalid register %u\n", rd); 9983 break; 9984 case DIF_OP_CALL: 9985 if (subr > DIF_SUBR_MAX) 9986 err += efunc(pc, "invalid subr %u\n", subr); 9987 if (rd >= nregs) 9988 err += efunc(pc, "invalid register %u\n", rd); 9989 if (rd == 0) 9990 err += efunc(pc, "cannot write to %r0\n"); 9991 9992 if (subr == DIF_SUBR_COPYOUT || 9993 subr == DIF_SUBR_COPYOUTSTR) { 9994 dp->dtdo_destructive = 1; 9995 } 9996 9997 if (subr == DIF_SUBR_GETF) { 9998 /* 9999 * If we have a getf() we need to record that 10000 * in our state. Note that our state can be 10001 * NULL if this is a helper -- but in that 10002 * case, the call to getf() is itself illegal, 10003 * and will be caught (slightly later) when 10004 * the helper is validated. 10005 */ 10006 if (vstate->dtvs_state != NULL) 10007 vstate->dtvs_state->dts_getf++; 10008 } 10009 10010 break; 10011 case DIF_OP_PUSHTR: 10012 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 10013 err += efunc(pc, "invalid ref type %u\n", type); 10014 if (r2 >= nregs) 10015 err += efunc(pc, "invalid register %u\n", r2); 10016 if (rs >= nregs) 10017 err += efunc(pc, "invalid register %u\n", rs); 10018 break; 10019 case DIF_OP_PUSHTV: 10020 if (type != DIF_TYPE_CTF) 10021 err += efunc(pc, "invalid val type %u\n", type); 10022 if (r2 >= nregs) 10023 err += efunc(pc, "invalid register %u\n", r2); 10024 if (rs >= nregs) 10025 err += efunc(pc, "invalid register %u\n", rs); 10026 break; 10027 default: 10028 err += efunc(pc, "invalid opcode %u\n", 10029 DIF_INSTR_OP(instr)); 10030 } 10031 } 10032 10033 if (dp->dtdo_len != 0 && 10034 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 10035 err += efunc(dp->dtdo_len - 1, 10036 "expected 'ret' as last DIF instruction\n"); 10037 } 10038 10039 if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) { 10040 /* 10041 * If we're not returning by reference, the size must be either 10042 * 0 or the size of one of the base types. 10043 */ 10044 switch (dp->dtdo_rtype.dtdt_size) { 10045 case 0: 10046 case sizeof (uint8_t): 10047 case sizeof (uint16_t): 10048 case sizeof (uint32_t): 10049 case sizeof (uint64_t): 10050 break; 10051 10052 default: 10053 err += efunc(dp->dtdo_len - 1, "bad return size\n"); 10054 } 10055 } 10056 10057 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 10058 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 10059 dtrace_diftype_t *vt, *et; 10060 uint_t id, ndx; 10061 10062 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 10063 v->dtdv_scope != DIFV_SCOPE_THREAD && 10064 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 10065 err += efunc(i, "unrecognized variable scope %d\n", 10066 v->dtdv_scope); 10067 break; 10068 } 10069 10070 if (v->dtdv_kind != DIFV_KIND_ARRAY && 10071 v->dtdv_kind != DIFV_KIND_SCALAR) { 10072 err += efunc(i, "unrecognized variable type %d\n", 10073 v->dtdv_kind); 10074 break; 10075 } 10076 10077 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 10078 err += efunc(i, "%d exceeds variable id limit\n", id); 10079 break; 10080 } 10081 10082 if (id < DIF_VAR_OTHER_UBASE) 10083 continue; 10084 10085 /* 10086 * For user-defined variables, we need to check that this 10087 * definition is identical to any previous definition that we 10088 * encountered. 10089 */ 10090 ndx = id - DIF_VAR_OTHER_UBASE; 10091 10092 switch (v->dtdv_scope) { 10093 case DIFV_SCOPE_GLOBAL: 10094 if (maxglobal == -1 || ndx > maxglobal) 10095 maxglobal = ndx; 10096 10097 if (ndx < vstate->dtvs_nglobals) { 10098 dtrace_statvar_t *svar; 10099 10100 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 10101 existing = &svar->dtsv_var; 10102 } 10103 10104 break; 10105 10106 case DIFV_SCOPE_THREAD: 10107 if (maxtlocal == -1 || ndx > maxtlocal) 10108 maxtlocal = ndx; 10109 10110 if (ndx < vstate->dtvs_ntlocals) 10111 existing = &vstate->dtvs_tlocals[ndx]; 10112 break; 10113 10114 case DIFV_SCOPE_LOCAL: 10115 if (maxlocal == -1 || ndx > maxlocal) 10116 maxlocal = ndx; 10117 10118 if (ndx < vstate->dtvs_nlocals) { 10119 dtrace_statvar_t *svar; 10120 10121 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 10122 existing = &svar->dtsv_var; 10123 } 10124 10125 break; 10126 } 10127 10128 vt = &v->dtdv_type; 10129 10130 if (vt->dtdt_flags & DIF_TF_BYREF) { 10131 if (vt->dtdt_size == 0) { 10132 err += efunc(i, "zero-sized variable\n"); 10133 break; 10134 } 10135 10136 if ((v->dtdv_scope == DIFV_SCOPE_GLOBAL || 10137 v->dtdv_scope == DIFV_SCOPE_LOCAL) && 10138 vt->dtdt_size > dtrace_statvar_maxsize) { 10139 err += efunc(i, "oversized by-ref static\n"); 10140 break; 10141 } 10142 } 10143 10144 if (existing == NULL || existing->dtdv_id == 0) 10145 continue; 10146 10147 ASSERT(existing->dtdv_id == v->dtdv_id); 10148 ASSERT(existing->dtdv_scope == v->dtdv_scope); 10149 10150 if (existing->dtdv_kind != v->dtdv_kind) 10151 err += efunc(i, "%d changed variable kind\n", id); 10152 10153 et = &existing->dtdv_type; 10154 10155 if (vt->dtdt_flags != et->dtdt_flags) { 10156 err += efunc(i, "%d changed variable type flags\n", id); 10157 break; 10158 } 10159 10160 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 10161 err += efunc(i, "%d changed variable type size\n", id); 10162 break; 10163 } 10164 } 10165 10166 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 10167 dif_instr_t instr = dp->dtdo_buf[pc]; 10168 10169 uint_t v = DIF_INSTR_VAR(instr); 10170 uint_t op = DIF_INSTR_OP(instr); 10171 10172 switch (op) { 10173 case DIF_OP_LDGS: 10174 case DIF_OP_LDGAA: 10175 case DIF_OP_STGS: 10176 case DIF_OP_STGAA: 10177 if (v > DIF_VAR_OTHER_UBASE + maxglobal) 10178 err += efunc(pc, "invalid variable %u\n", v); 10179 break; 10180 case DIF_OP_LDTS: 10181 case DIF_OP_LDTAA: 10182 case DIF_OP_STTS: 10183 case DIF_OP_STTAA: 10184 if (v > DIF_VAR_OTHER_UBASE + maxtlocal) 10185 err += efunc(pc, "invalid variable %u\n", v); 10186 break; 10187 case DIF_OP_LDLS: 10188 case DIF_OP_STLS: 10189 if (v > DIF_VAR_OTHER_UBASE + maxlocal) 10190 err += efunc(pc, "invalid variable %u\n", v); 10191 break; 10192 default: 10193 break; 10194 } 10195 } 10196 10197 return (err); 10198 } 10199 10200 /* 10201 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 10202 * are much more constrained than normal DIFOs. Specifically, they may 10203 * not: 10204 * 10205 * 1. Make calls to subroutines other than copyin(), copyinstr() or 10206 * miscellaneous string routines 10207 * 2. Access DTrace variables other than the args[] array, and the 10208 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 10209 * 3. Have thread-local variables. 10210 * 4. Have dynamic variables. 10211 */ 10212 static int 10213 dtrace_difo_validate_helper(dtrace_difo_t *dp) 10214 { 10215 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 10216 int err = 0; 10217 uint_t pc; 10218 10219 for (pc = 0; pc < dp->dtdo_len; pc++) { 10220 dif_instr_t instr = dp->dtdo_buf[pc]; 10221 10222 uint_t v = DIF_INSTR_VAR(instr); 10223 uint_t subr = DIF_INSTR_SUBR(instr); 10224 uint_t op = DIF_INSTR_OP(instr); 10225 10226 switch (op) { 10227 case DIF_OP_OR: 10228 case DIF_OP_XOR: 10229 case DIF_OP_AND: 10230 case DIF_OP_SLL: 10231 case DIF_OP_SRL: 10232 case DIF_OP_SRA: 10233 case DIF_OP_SUB: 10234 case DIF_OP_ADD: 10235 case DIF_OP_MUL: 10236 case DIF_OP_SDIV: 10237 case DIF_OP_UDIV: 10238 case DIF_OP_SREM: 10239 case DIF_OP_UREM: 10240 case DIF_OP_COPYS: 10241 case DIF_OP_NOT: 10242 case DIF_OP_MOV: 10243 case DIF_OP_RLDSB: 10244 case DIF_OP_RLDSH: 10245 case DIF_OP_RLDSW: 10246 case DIF_OP_RLDUB: 10247 case DIF_OP_RLDUH: 10248 case DIF_OP_RLDUW: 10249 case DIF_OP_RLDX: 10250 case DIF_OP_ULDSB: 10251 case DIF_OP_ULDSH: 10252 case DIF_OP_ULDSW: 10253 case DIF_OP_ULDUB: 10254 case DIF_OP_ULDUH: 10255 case DIF_OP_ULDUW: 10256 case DIF_OP_ULDX: 10257 case DIF_OP_STB: 10258 case DIF_OP_STH: 10259 case DIF_OP_STW: 10260 case DIF_OP_STX: 10261 case DIF_OP_ALLOCS: 10262 case DIF_OP_CMP: 10263 case DIF_OP_SCMP: 10264 case DIF_OP_TST: 10265 case DIF_OP_BA: 10266 case DIF_OP_BE: 10267 case DIF_OP_BNE: 10268 case DIF_OP_BG: 10269 case DIF_OP_BGU: 10270 case DIF_OP_BGE: 10271 case DIF_OP_BGEU: 10272 case DIF_OP_BL: 10273 case DIF_OP_BLU: 10274 case DIF_OP_BLE: 10275 case DIF_OP_BLEU: 10276 case DIF_OP_RET: 10277 case DIF_OP_NOP: 10278 case DIF_OP_POPTS: 10279 case DIF_OP_FLUSHTS: 10280 case DIF_OP_SETX: 10281 case DIF_OP_SETS: 10282 case DIF_OP_LDGA: 10283 case DIF_OP_LDLS: 10284 case DIF_OP_STGS: 10285 case DIF_OP_STLS: 10286 case DIF_OP_PUSHTR: 10287 case DIF_OP_PUSHTV: 10288 break; 10289 10290 case DIF_OP_LDGS: 10291 if (v >= DIF_VAR_OTHER_UBASE) 10292 break; 10293 10294 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 10295 break; 10296 10297 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 10298 v == DIF_VAR_PPID || v == DIF_VAR_TID || 10299 v == DIF_VAR_EXECARGS || 10300 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 10301 v == DIF_VAR_UID || v == DIF_VAR_GID) 10302 break; 10303 10304 err += efunc(pc, "illegal variable %u\n", v); 10305 break; 10306 10307 case DIF_OP_LDTA: 10308 case DIF_OP_LDTS: 10309 case DIF_OP_LDGAA: 10310 case DIF_OP_LDTAA: 10311 err += efunc(pc, "illegal dynamic variable load\n"); 10312 break; 10313 10314 case DIF_OP_STTS: 10315 case DIF_OP_STGAA: 10316 case DIF_OP_STTAA: 10317 err += efunc(pc, "illegal dynamic variable store\n"); 10318 break; 10319 10320 case DIF_OP_CALL: 10321 if (subr == DIF_SUBR_ALLOCA || 10322 subr == DIF_SUBR_BCOPY || 10323 subr == DIF_SUBR_COPYIN || 10324 subr == DIF_SUBR_COPYINTO || 10325 subr == DIF_SUBR_COPYINSTR || 10326 subr == DIF_SUBR_INDEX || 10327 subr == DIF_SUBR_INET_NTOA || 10328 subr == DIF_SUBR_INET_NTOA6 || 10329 subr == DIF_SUBR_INET_NTOP || 10330 subr == DIF_SUBR_JSON || 10331 subr == DIF_SUBR_LLTOSTR || 10332 subr == DIF_SUBR_STRTOLL || 10333 subr == DIF_SUBR_RINDEX || 10334 subr == DIF_SUBR_STRCHR || 10335 subr == DIF_SUBR_STRJOIN || 10336 subr == DIF_SUBR_STRRCHR || 10337 subr == DIF_SUBR_STRSTR || 10338 subr == DIF_SUBR_HTONS || 10339 subr == DIF_SUBR_HTONL || 10340 subr == DIF_SUBR_HTONLL || 10341 subr == DIF_SUBR_NTOHS || 10342 subr == DIF_SUBR_NTOHL || 10343 subr == DIF_SUBR_NTOHLL || 10344 subr == DIF_SUBR_MEMREF) 10345 break; 10346 #ifdef __FreeBSD__ 10347 if (subr == DIF_SUBR_MEMSTR) 10348 break; 10349 #endif 10350 10351 err += efunc(pc, "invalid subr %u\n", subr); 10352 break; 10353 10354 default: 10355 err += efunc(pc, "invalid opcode %u\n", 10356 DIF_INSTR_OP(instr)); 10357 } 10358 } 10359 10360 return (err); 10361 } 10362 10363 /* 10364 * Returns 1 if the expression in the DIF object can be cached on a per-thread 10365 * basis; 0 if not. 10366 */ 10367 static int 10368 dtrace_difo_cacheable(dtrace_difo_t *dp) 10369 { 10370 int i; 10371 10372 if (dp == NULL) 10373 return (0); 10374 10375 for (i = 0; i < dp->dtdo_varlen; i++) { 10376 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10377 10378 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 10379 continue; 10380 10381 switch (v->dtdv_id) { 10382 case DIF_VAR_CURTHREAD: 10383 case DIF_VAR_PID: 10384 case DIF_VAR_TID: 10385 case DIF_VAR_EXECARGS: 10386 case DIF_VAR_EXECNAME: 10387 case DIF_VAR_ZONENAME: 10388 break; 10389 10390 default: 10391 return (0); 10392 } 10393 } 10394 10395 /* 10396 * This DIF object may be cacheable. Now we need to look for any 10397 * array loading instructions, any memory loading instructions, or 10398 * any stores to thread-local variables. 10399 */ 10400 for (i = 0; i < dp->dtdo_len; i++) { 10401 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 10402 10403 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 10404 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 10405 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 10406 op == DIF_OP_LDGA || op == DIF_OP_STTS) 10407 return (0); 10408 } 10409 10410 return (1); 10411 } 10412 10413 static void 10414 dtrace_difo_hold(dtrace_difo_t *dp) 10415 { 10416 int i; 10417 10418 ASSERT(MUTEX_HELD(&dtrace_lock)); 10419 10420 dp->dtdo_refcnt++; 10421 ASSERT(dp->dtdo_refcnt != 0); 10422 10423 /* 10424 * We need to check this DIF object for references to the variable 10425 * DIF_VAR_VTIMESTAMP. 10426 */ 10427 for (i = 0; i < dp->dtdo_varlen; i++) { 10428 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10429 10430 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 10431 continue; 10432 10433 if (dtrace_vtime_references++ == 0) 10434 dtrace_vtime_enable(); 10435 } 10436 } 10437 10438 /* 10439 * This routine calculates the dynamic variable chunksize for a given DIF 10440 * object. The calculation is not fool-proof, and can probably be tricked by 10441 * malicious DIF -- but it works for all compiler-generated DIF. Because this 10442 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 10443 * if a dynamic variable size exceeds the chunksize. 10444 */ 10445 static void 10446 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10447 { 10448 uint64_t sval = 0; 10449 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 10450 const dif_instr_t *text = dp->dtdo_buf; 10451 uint_t pc, srd = 0; 10452 uint_t ttop = 0; 10453 size_t size, ksize; 10454 uint_t id, i; 10455 10456 for (pc = 0; pc < dp->dtdo_len; pc++) { 10457 dif_instr_t instr = text[pc]; 10458 uint_t op = DIF_INSTR_OP(instr); 10459 uint_t rd = DIF_INSTR_RD(instr); 10460 uint_t r1 = DIF_INSTR_R1(instr); 10461 uint_t nkeys = 0; 10462 uchar_t scope = 0; 10463 10464 dtrace_key_t *key = tupregs; 10465 10466 switch (op) { 10467 case DIF_OP_SETX: 10468 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 10469 srd = rd; 10470 continue; 10471 10472 case DIF_OP_STTS: 10473 key = &tupregs[DIF_DTR_NREGS]; 10474 key[0].dttk_size = 0; 10475 key[1].dttk_size = 0; 10476 nkeys = 2; 10477 scope = DIFV_SCOPE_THREAD; 10478 break; 10479 10480 case DIF_OP_STGAA: 10481 case DIF_OP_STTAA: 10482 nkeys = ttop; 10483 10484 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 10485 key[nkeys++].dttk_size = 0; 10486 10487 key[nkeys++].dttk_size = 0; 10488 10489 if (op == DIF_OP_STTAA) { 10490 scope = DIFV_SCOPE_THREAD; 10491 } else { 10492 scope = DIFV_SCOPE_GLOBAL; 10493 } 10494 10495 break; 10496 10497 case DIF_OP_PUSHTR: 10498 if (ttop == DIF_DTR_NREGS) 10499 return; 10500 10501 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 10502 /* 10503 * If the register for the size of the "pushtr" 10504 * is %r0 (or the value is 0) and the type is 10505 * a string, we'll use the system-wide default 10506 * string size. 10507 */ 10508 tupregs[ttop++].dttk_size = 10509 dtrace_strsize_default; 10510 } else { 10511 if (srd == 0) 10512 return; 10513 10514 if (sval > LONG_MAX) 10515 return; 10516 10517 tupregs[ttop++].dttk_size = sval; 10518 } 10519 10520 break; 10521 10522 case DIF_OP_PUSHTV: 10523 if (ttop == DIF_DTR_NREGS) 10524 return; 10525 10526 tupregs[ttop++].dttk_size = 0; 10527 break; 10528 10529 case DIF_OP_FLUSHTS: 10530 ttop = 0; 10531 break; 10532 10533 case DIF_OP_POPTS: 10534 if (ttop != 0) 10535 ttop--; 10536 break; 10537 } 10538 10539 sval = 0; 10540 srd = 0; 10541 10542 if (nkeys == 0) 10543 continue; 10544 10545 /* 10546 * We have a dynamic variable allocation; calculate its size. 10547 */ 10548 for (ksize = 0, i = 0; i < nkeys; i++) 10549 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 10550 10551 size = sizeof (dtrace_dynvar_t); 10552 size += sizeof (dtrace_key_t) * (nkeys - 1); 10553 size += ksize; 10554 10555 /* 10556 * Now we need to determine the size of the stored data. 10557 */ 10558 id = DIF_INSTR_VAR(instr); 10559 10560 for (i = 0; i < dp->dtdo_varlen; i++) { 10561 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10562 10563 if (v->dtdv_id == id && v->dtdv_scope == scope) { 10564 size += v->dtdv_type.dtdt_size; 10565 break; 10566 } 10567 } 10568 10569 if (i == dp->dtdo_varlen) 10570 return; 10571 10572 /* 10573 * We have the size. If this is larger than the chunk size 10574 * for our dynamic variable state, reset the chunk size. 10575 */ 10576 size = P2ROUNDUP(size, sizeof (uint64_t)); 10577 10578 /* 10579 * Before setting the chunk size, check that we're not going 10580 * to set it to a negative value... 10581 */ 10582 if (size > LONG_MAX) 10583 return; 10584 10585 /* 10586 * ...and make certain that we didn't badly overflow. 10587 */ 10588 if (size < ksize || size < sizeof (dtrace_dynvar_t)) 10589 return; 10590 10591 if (size > vstate->dtvs_dynvars.dtds_chunksize) 10592 vstate->dtvs_dynvars.dtds_chunksize = size; 10593 } 10594 } 10595 10596 static void 10597 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10598 { 10599 int i, oldsvars, osz, nsz, otlocals, ntlocals; 10600 uint_t id; 10601 10602 ASSERT(MUTEX_HELD(&dtrace_lock)); 10603 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 10604 10605 for (i = 0; i < dp->dtdo_varlen; i++) { 10606 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10607 dtrace_statvar_t *svar, ***svarp = NULL; 10608 size_t dsize = 0; 10609 uint8_t scope = v->dtdv_scope; 10610 int *np = NULL; 10611 10612 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 10613 continue; 10614 10615 id -= DIF_VAR_OTHER_UBASE; 10616 10617 switch (scope) { 10618 case DIFV_SCOPE_THREAD: 10619 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 10620 dtrace_difv_t *tlocals; 10621 10622 if ((ntlocals = (otlocals << 1)) == 0) 10623 ntlocals = 1; 10624 10625 osz = otlocals * sizeof (dtrace_difv_t); 10626 nsz = ntlocals * sizeof (dtrace_difv_t); 10627 10628 tlocals = kmem_zalloc(nsz, KM_SLEEP); 10629 10630 if (osz != 0) { 10631 bcopy(vstate->dtvs_tlocals, 10632 tlocals, osz); 10633 kmem_free(vstate->dtvs_tlocals, osz); 10634 } 10635 10636 vstate->dtvs_tlocals = tlocals; 10637 vstate->dtvs_ntlocals = ntlocals; 10638 } 10639 10640 vstate->dtvs_tlocals[id] = *v; 10641 continue; 10642 10643 case DIFV_SCOPE_LOCAL: 10644 np = &vstate->dtvs_nlocals; 10645 svarp = &vstate->dtvs_locals; 10646 10647 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 10648 dsize = NCPU * (v->dtdv_type.dtdt_size + 10649 sizeof (uint64_t)); 10650 else 10651 dsize = NCPU * sizeof (uint64_t); 10652 10653 break; 10654 10655 case DIFV_SCOPE_GLOBAL: 10656 np = &vstate->dtvs_nglobals; 10657 svarp = &vstate->dtvs_globals; 10658 10659 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 10660 dsize = v->dtdv_type.dtdt_size + 10661 sizeof (uint64_t); 10662 10663 break; 10664 10665 default: 10666 ASSERT(0); 10667 } 10668 10669 while (id >= (oldsvars = *np)) { 10670 dtrace_statvar_t **statics; 10671 int newsvars, oldsize, newsize; 10672 10673 if ((newsvars = (oldsvars << 1)) == 0) 10674 newsvars = 1; 10675 10676 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 10677 newsize = newsvars * sizeof (dtrace_statvar_t *); 10678 10679 statics = kmem_zalloc(newsize, KM_SLEEP); 10680 10681 if (oldsize != 0) { 10682 bcopy(*svarp, statics, oldsize); 10683 kmem_free(*svarp, oldsize); 10684 } 10685 10686 *svarp = statics; 10687 *np = newsvars; 10688 } 10689 10690 if ((svar = (*svarp)[id]) == NULL) { 10691 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 10692 svar->dtsv_var = *v; 10693 10694 if ((svar->dtsv_size = dsize) != 0) { 10695 svar->dtsv_data = (uint64_t)(uintptr_t) 10696 kmem_zalloc(dsize, KM_SLEEP); 10697 } 10698 10699 (*svarp)[id] = svar; 10700 } 10701 10702 svar->dtsv_refcnt++; 10703 } 10704 10705 dtrace_difo_chunksize(dp, vstate); 10706 dtrace_difo_hold(dp); 10707 } 10708 10709 static dtrace_difo_t * 10710 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10711 { 10712 dtrace_difo_t *new; 10713 size_t sz; 10714 10715 ASSERT(dp->dtdo_buf != NULL); 10716 ASSERT(dp->dtdo_refcnt != 0); 10717 10718 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 10719 10720 ASSERT(dp->dtdo_buf != NULL); 10721 sz = dp->dtdo_len * sizeof (dif_instr_t); 10722 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 10723 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 10724 new->dtdo_len = dp->dtdo_len; 10725 10726 if (dp->dtdo_strtab != NULL) { 10727 ASSERT(dp->dtdo_strlen != 0); 10728 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 10729 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 10730 new->dtdo_strlen = dp->dtdo_strlen; 10731 } 10732 10733 if (dp->dtdo_inttab != NULL) { 10734 ASSERT(dp->dtdo_intlen != 0); 10735 sz = dp->dtdo_intlen * sizeof (uint64_t); 10736 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 10737 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 10738 new->dtdo_intlen = dp->dtdo_intlen; 10739 } 10740 10741 if (dp->dtdo_vartab != NULL) { 10742 ASSERT(dp->dtdo_varlen != 0); 10743 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 10744 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 10745 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 10746 new->dtdo_varlen = dp->dtdo_varlen; 10747 } 10748 10749 dtrace_difo_init(new, vstate); 10750 return (new); 10751 } 10752 10753 static void 10754 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10755 { 10756 int i; 10757 10758 ASSERT(dp->dtdo_refcnt == 0); 10759 10760 for (i = 0; i < dp->dtdo_varlen; i++) { 10761 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10762 dtrace_statvar_t *svar, **svarp = NULL; 10763 uint_t id; 10764 uint8_t scope = v->dtdv_scope; 10765 int *np = NULL; 10766 10767 switch (scope) { 10768 case DIFV_SCOPE_THREAD: 10769 continue; 10770 10771 case DIFV_SCOPE_LOCAL: 10772 np = &vstate->dtvs_nlocals; 10773 svarp = vstate->dtvs_locals; 10774 break; 10775 10776 case DIFV_SCOPE_GLOBAL: 10777 np = &vstate->dtvs_nglobals; 10778 svarp = vstate->dtvs_globals; 10779 break; 10780 10781 default: 10782 ASSERT(0); 10783 } 10784 10785 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 10786 continue; 10787 10788 id -= DIF_VAR_OTHER_UBASE; 10789 ASSERT(id < *np); 10790 10791 svar = svarp[id]; 10792 ASSERT(svar != NULL); 10793 ASSERT(svar->dtsv_refcnt > 0); 10794 10795 if (--svar->dtsv_refcnt > 0) 10796 continue; 10797 10798 if (svar->dtsv_size != 0) { 10799 ASSERT(svar->dtsv_data != 0); 10800 kmem_free((void *)(uintptr_t)svar->dtsv_data, 10801 svar->dtsv_size); 10802 } 10803 10804 kmem_free(svar, sizeof (dtrace_statvar_t)); 10805 svarp[id] = NULL; 10806 } 10807 10808 if (dp->dtdo_buf != NULL) 10809 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 10810 if (dp->dtdo_inttab != NULL) 10811 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 10812 if (dp->dtdo_strtab != NULL) 10813 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 10814 if (dp->dtdo_vartab != NULL) 10815 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 10816 10817 kmem_free(dp, sizeof (dtrace_difo_t)); 10818 } 10819 10820 static void 10821 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10822 { 10823 int i; 10824 10825 ASSERT(MUTEX_HELD(&dtrace_lock)); 10826 ASSERT(dp->dtdo_refcnt != 0); 10827 10828 for (i = 0; i < dp->dtdo_varlen; i++) { 10829 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10830 10831 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 10832 continue; 10833 10834 ASSERT(dtrace_vtime_references > 0); 10835 if (--dtrace_vtime_references == 0) 10836 dtrace_vtime_disable(); 10837 } 10838 10839 if (--dp->dtdo_refcnt == 0) 10840 dtrace_difo_destroy(dp, vstate); 10841 } 10842 10843 /* 10844 * DTrace Format Functions 10845 */ 10846 static uint16_t 10847 dtrace_format_add(dtrace_state_t *state, char *str) 10848 { 10849 char *fmt, **new; 10850 uint16_t ndx, len = strlen(str) + 1; 10851 10852 fmt = kmem_zalloc(len, KM_SLEEP); 10853 bcopy(str, fmt, len); 10854 10855 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 10856 if (state->dts_formats[ndx] == NULL) { 10857 state->dts_formats[ndx] = fmt; 10858 return (ndx + 1); 10859 } 10860 } 10861 10862 if (state->dts_nformats == USHRT_MAX) { 10863 /* 10864 * This is only likely if a denial-of-service attack is being 10865 * attempted. As such, it's okay to fail silently here. 10866 */ 10867 kmem_free(fmt, len); 10868 return (0); 10869 } 10870 10871 /* 10872 * For simplicity, we always resize the formats array to be exactly the 10873 * number of formats. 10874 */ 10875 ndx = state->dts_nformats++; 10876 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 10877 10878 if (state->dts_formats != NULL) { 10879 ASSERT(ndx != 0); 10880 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 10881 kmem_free(state->dts_formats, ndx * sizeof (char *)); 10882 } 10883 10884 state->dts_formats = new; 10885 state->dts_formats[ndx] = fmt; 10886 10887 return (ndx + 1); 10888 } 10889 10890 static void 10891 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 10892 { 10893 char *fmt; 10894 10895 ASSERT(state->dts_formats != NULL); 10896 ASSERT(format <= state->dts_nformats); 10897 ASSERT(state->dts_formats[format - 1] != NULL); 10898 10899 fmt = state->dts_formats[format - 1]; 10900 kmem_free(fmt, strlen(fmt) + 1); 10901 state->dts_formats[format - 1] = NULL; 10902 } 10903 10904 static void 10905 dtrace_format_destroy(dtrace_state_t *state) 10906 { 10907 int i; 10908 10909 if (state->dts_nformats == 0) { 10910 ASSERT(state->dts_formats == NULL); 10911 return; 10912 } 10913 10914 ASSERT(state->dts_formats != NULL); 10915 10916 for (i = 0; i < state->dts_nformats; i++) { 10917 char *fmt = state->dts_formats[i]; 10918 10919 if (fmt == NULL) 10920 continue; 10921 10922 kmem_free(fmt, strlen(fmt) + 1); 10923 } 10924 10925 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 10926 state->dts_nformats = 0; 10927 state->dts_formats = NULL; 10928 } 10929 10930 /* 10931 * DTrace Predicate Functions 10932 */ 10933 static dtrace_predicate_t * 10934 dtrace_predicate_create(dtrace_difo_t *dp) 10935 { 10936 dtrace_predicate_t *pred; 10937 10938 ASSERT(MUTEX_HELD(&dtrace_lock)); 10939 ASSERT(dp->dtdo_refcnt != 0); 10940 10941 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 10942 pred->dtp_difo = dp; 10943 pred->dtp_refcnt = 1; 10944 10945 if (!dtrace_difo_cacheable(dp)) 10946 return (pred); 10947 10948 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 10949 /* 10950 * This is only theoretically possible -- we have had 2^32 10951 * cacheable predicates on this machine. We cannot allow any 10952 * more predicates to become cacheable: as unlikely as it is, 10953 * there may be a thread caching a (now stale) predicate cache 10954 * ID. (N.B.: the temptation is being successfully resisted to 10955 * have this cmn_err() "Holy shit -- we executed this code!") 10956 */ 10957 return (pred); 10958 } 10959 10960 pred->dtp_cacheid = dtrace_predcache_id++; 10961 10962 return (pred); 10963 } 10964 10965 static void 10966 dtrace_predicate_hold(dtrace_predicate_t *pred) 10967 { 10968 ASSERT(MUTEX_HELD(&dtrace_lock)); 10969 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 10970 ASSERT(pred->dtp_refcnt > 0); 10971 10972 pred->dtp_refcnt++; 10973 } 10974 10975 static void 10976 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 10977 { 10978 dtrace_difo_t *dp = pred->dtp_difo; 10979 10980 ASSERT(MUTEX_HELD(&dtrace_lock)); 10981 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 10982 ASSERT(pred->dtp_refcnt > 0); 10983 10984 if (--pred->dtp_refcnt == 0) { 10985 dtrace_difo_release(pred->dtp_difo, vstate); 10986 kmem_free(pred, sizeof (dtrace_predicate_t)); 10987 } 10988 } 10989 10990 /* 10991 * DTrace Action Description Functions 10992 */ 10993 static dtrace_actdesc_t * 10994 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 10995 uint64_t uarg, uint64_t arg) 10996 { 10997 dtrace_actdesc_t *act; 10998 10999 #ifdef illumos 11000 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 11001 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 11002 #endif 11003 11004 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 11005 act->dtad_kind = kind; 11006 act->dtad_ntuple = ntuple; 11007 act->dtad_uarg = uarg; 11008 act->dtad_arg = arg; 11009 act->dtad_refcnt = 1; 11010 11011 return (act); 11012 } 11013 11014 static void 11015 dtrace_actdesc_hold(dtrace_actdesc_t *act) 11016 { 11017 ASSERT(act->dtad_refcnt >= 1); 11018 act->dtad_refcnt++; 11019 } 11020 11021 static void 11022 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 11023 { 11024 dtrace_actkind_t kind = act->dtad_kind; 11025 dtrace_difo_t *dp; 11026 11027 ASSERT(act->dtad_refcnt >= 1); 11028 11029 if (--act->dtad_refcnt != 0) 11030 return; 11031 11032 if ((dp = act->dtad_difo) != NULL) 11033 dtrace_difo_release(dp, vstate); 11034 11035 if (DTRACEACT_ISPRINTFLIKE(kind)) { 11036 char *str = (char *)(uintptr_t)act->dtad_arg; 11037 11038 #ifdef illumos 11039 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 11040 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 11041 #endif 11042 11043 if (str != NULL) 11044 kmem_free(str, strlen(str) + 1); 11045 } 11046 11047 kmem_free(act, sizeof (dtrace_actdesc_t)); 11048 } 11049 11050 /* 11051 * DTrace ECB Functions 11052 */ 11053 static dtrace_ecb_t * 11054 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 11055 { 11056 dtrace_ecb_t *ecb; 11057 dtrace_epid_t epid; 11058 11059 ASSERT(MUTEX_HELD(&dtrace_lock)); 11060 11061 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 11062 ecb->dte_predicate = NULL; 11063 ecb->dte_probe = probe; 11064 11065 /* 11066 * The default size is the size of the default action: recording 11067 * the header. 11068 */ 11069 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t); 11070 ecb->dte_alignment = sizeof (dtrace_epid_t); 11071 11072 epid = state->dts_epid++; 11073 11074 if (epid - 1 >= state->dts_necbs) { 11075 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 11076 int necbs = state->dts_necbs << 1; 11077 11078 ASSERT(epid == state->dts_necbs + 1); 11079 11080 if (necbs == 0) { 11081 ASSERT(oecbs == NULL); 11082 necbs = 1; 11083 } 11084 11085 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 11086 11087 if (oecbs != NULL) 11088 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 11089 11090 dtrace_membar_producer(); 11091 state->dts_ecbs = ecbs; 11092 11093 if (oecbs != NULL) { 11094 /* 11095 * If this state is active, we must dtrace_sync() 11096 * before we can free the old dts_ecbs array: we're 11097 * coming in hot, and there may be active ring 11098 * buffer processing (which indexes into the dts_ecbs 11099 * array) on another CPU. 11100 */ 11101 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 11102 dtrace_sync(); 11103 11104 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 11105 } 11106 11107 dtrace_membar_producer(); 11108 state->dts_necbs = necbs; 11109 } 11110 11111 ecb->dte_state = state; 11112 11113 ASSERT(state->dts_ecbs[epid - 1] == NULL); 11114 dtrace_membar_producer(); 11115 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 11116 11117 return (ecb); 11118 } 11119 11120 static void 11121 dtrace_ecb_enable(dtrace_ecb_t *ecb) 11122 { 11123 dtrace_probe_t *probe = ecb->dte_probe; 11124 11125 ASSERT(MUTEX_HELD(&cpu_lock)); 11126 ASSERT(MUTEX_HELD(&dtrace_lock)); 11127 ASSERT(ecb->dte_next == NULL); 11128 11129 if (probe == NULL) { 11130 /* 11131 * This is the NULL probe -- there's nothing to do. 11132 */ 11133 return; 11134 } 11135 11136 if (probe->dtpr_ecb == NULL) { 11137 dtrace_provider_t *prov = probe->dtpr_provider; 11138 11139 /* 11140 * We're the first ECB on this probe. 11141 */ 11142 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 11143 11144 if (ecb->dte_predicate != NULL) 11145 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 11146 11147 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 11148 probe->dtpr_id, probe->dtpr_arg); 11149 } else { 11150 /* 11151 * This probe is already active. Swing the last pointer to 11152 * point to the new ECB, and issue a dtrace_sync() to assure 11153 * that all CPUs have seen the change. 11154 */ 11155 ASSERT(probe->dtpr_ecb_last != NULL); 11156 probe->dtpr_ecb_last->dte_next = ecb; 11157 probe->dtpr_ecb_last = ecb; 11158 probe->dtpr_predcache = 0; 11159 11160 dtrace_sync(); 11161 } 11162 } 11163 11164 static int 11165 dtrace_ecb_resize(dtrace_ecb_t *ecb) 11166 { 11167 dtrace_action_t *act; 11168 uint32_t curneeded = UINT32_MAX; 11169 uint32_t aggbase = UINT32_MAX; 11170 11171 /* 11172 * If we record anything, we always record the dtrace_rechdr_t. (And 11173 * we always record it first.) 11174 */ 11175 ecb->dte_size = sizeof (dtrace_rechdr_t); 11176 ecb->dte_alignment = sizeof (dtrace_epid_t); 11177 11178 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 11179 dtrace_recdesc_t *rec = &act->dta_rec; 11180 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1); 11181 11182 ecb->dte_alignment = MAX(ecb->dte_alignment, 11183 rec->dtrd_alignment); 11184 11185 if (DTRACEACT_ISAGG(act->dta_kind)) { 11186 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 11187 11188 ASSERT(rec->dtrd_size != 0); 11189 ASSERT(agg->dtag_first != NULL); 11190 ASSERT(act->dta_prev->dta_intuple); 11191 ASSERT(aggbase != UINT32_MAX); 11192 ASSERT(curneeded != UINT32_MAX); 11193 11194 agg->dtag_base = aggbase; 11195 11196 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 11197 rec->dtrd_offset = curneeded; 11198 if (curneeded + rec->dtrd_size < curneeded) 11199 return (EINVAL); 11200 curneeded += rec->dtrd_size; 11201 ecb->dte_needed = MAX(ecb->dte_needed, curneeded); 11202 11203 aggbase = UINT32_MAX; 11204 curneeded = UINT32_MAX; 11205 } else if (act->dta_intuple) { 11206 if (curneeded == UINT32_MAX) { 11207 /* 11208 * This is the first record in a tuple. Align 11209 * curneeded to be at offset 4 in an 8-byte 11210 * aligned block. 11211 */ 11212 ASSERT(act->dta_prev == NULL || 11213 !act->dta_prev->dta_intuple); 11214 ASSERT3U(aggbase, ==, UINT32_MAX); 11215 curneeded = P2PHASEUP(ecb->dte_size, 11216 sizeof (uint64_t), sizeof (dtrace_aggid_t)); 11217 11218 aggbase = curneeded - sizeof (dtrace_aggid_t); 11219 ASSERT(IS_P2ALIGNED(aggbase, 11220 sizeof (uint64_t))); 11221 } 11222 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 11223 rec->dtrd_offset = curneeded; 11224 if (curneeded + rec->dtrd_size < curneeded) 11225 return (EINVAL); 11226 curneeded += rec->dtrd_size; 11227 } else { 11228 /* tuples must be followed by an aggregation */ 11229 ASSERT(act->dta_prev == NULL || 11230 !act->dta_prev->dta_intuple); 11231 11232 ecb->dte_size = P2ROUNDUP(ecb->dte_size, 11233 rec->dtrd_alignment); 11234 rec->dtrd_offset = ecb->dte_size; 11235 if (ecb->dte_size + rec->dtrd_size < ecb->dte_size) 11236 return (EINVAL); 11237 ecb->dte_size += rec->dtrd_size; 11238 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size); 11239 } 11240 } 11241 11242 if ((act = ecb->dte_action) != NULL && 11243 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 11244 ecb->dte_size == sizeof (dtrace_rechdr_t)) { 11245 /* 11246 * If the size is still sizeof (dtrace_rechdr_t), then all 11247 * actions store no data; set the size to 0. 11248 */ 11249 ecb->dte_size = 0; 11250 } 11251 11252 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t)); 11253 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t))); 11254 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed, 11255 ecb->dte_needed); 11256 return (0); 11257 } 11258 11259 static dtrace_action_t * 11260 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 11261 { 11262 dtrace_aggregation_t *agg; 11263 size_t size = sizeof (uint64_t); 11264 int ntuple = desc->dtad_ntuple; 11265 dtrace_action_t *act; 11266 dtrace_recdesc_t *frec; 11267 dtrace_aggid_t aggid; 11268 dtrace_state_t *state = ecb->dte_state; 11269 11270 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 11271 agg->dtag_ecb = ecb; 11272 11273 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 11274 11275 switch (desc->dtad_kind) { 11276 case DTRACEAGG_MIN: 11277 agg->dtag_initial = INT64_MAX; 11278 agg->dtag_aggregate = dtrace_aggregate_min; 11279 break; 11280 11281 case DTRACEAGG_MAX: 11282 agg->dtag_initial = INT64_MIN; 11283 agg->dtag_aggregate = dtrace_aggregate_max; 11284 break; 11285 11286 case DTRACEAGG_COUNT: 11287 agg->dtag_aggregate = dtrace_aggregate_count; 11288 break; 11289 11290 case DTRACEAGG_QUANTIZE: 11291 agg->dtag_aggregate = dtrace_aggregate_quantize; 11292 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 11293 sizeof (uint64_t); 11294 break; 11295 11296 case DTRACEAGG_LQUANTIZE: { 11297 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 11298 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 11299 11300 agg->dtag_initial = desc->dtad_arg; 11301 agg->dtag_aggregate = dtrace_aggregate_lquantize; 11302 11303 if (step == 0 || levels == 0) 11304 goto err; 11305 11306 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 11307 break; 11308 } 11309 11310 case DTRACEAGG_LLQUANTIZE: { 11311 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 11312 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 11313 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 11314 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 11315 int64_t v; 11316 11317 agg->dtag_initial = desc->dtad_arg; 11318 agg->dtag_aggregate = dtrace_aggregate_llquantize; 11319 11320 if (factor < 2 || low >= high || nsteps < factor) 11321 goto err; 11322 11323 /* 11324 * Now check that the number of steps evenly divides a power 11325 * of the factor. (This assures both integer bucket size and 11326 * linearity within each magnitude.) 11327 */ 11328 for (v = factor; v < nsteps; v *= factor) 11329 continue; 11330 11331 if ((v % nsteps) || (nsteps % factor)) 11332 goto err; 11333 11334 size = (dtrace_aggregate_llquantize_bucket(factor, 11335 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 11336 break; 11337 } 11338 11339 case DTRACEAGG_AVG: 11340 agg->dtag_aggregate = dtrace_aggregate_avg; 11341 size = sizeof (uint64_t) * 2; 11342 break; 11343 11344 case DTRACEAGG_STDDEV: 11345 agg->dtag_aggregate = dtrace_aggregate_stddev; 11346 size = sizeof (uint64_t) * 4; 11347 break; 11348 11349 case DTRACEAGG_SUM: 11350 agg->dtag_aggregate = dtrace_aggregate_sum; 11351 break; 11352 11353 default: 11354 goto err; 11355 } 11356 11357 agg->dtag_action.dta_rec.dtrd_size = size; 11358 11359 if (ntuple == 0) 11360 goto err; 11361 11362 /* 11363 * We must make sure that we have enough actions for the n-tuple. 11364 */ 11365 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 11366 if (DTRACEACT_ISAGG(act->dta_kind)) 11367 break; 11368 11369 if (--ntuple == 0) { 11370 /* 11371 * This is the action with which our n-tuple begins. 11372 */ 11373 agg->dtag_first = act; 11374 goto success; 11375 } 11376 } 11377 11378 /* 11379 * This n-tuple is short by ntuple elements. Return failure. 11380 */ 11381 ASSERT(ntuple != 0); 11382 err: 11383 kmem_free(agg, sizeof (dtrace_aggregation_t)); 11384 return (NULL); 11385 11386 success: 11387 /* 11388 * If the last action in the tuple has a size of zero, it's actually 11389 * an expression argument for the aggregating action. 11390 */ 11391 ASSERT(ecb->dte_action_last != NULL); 11392 act = ecb->dte_action_last; 11393 11394 if (act->dta_kind == DTRACEACT_DIFEXPR) { 11395 ASSERT(act->dta_difo != NULL); 11396 11397 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 11398 agg->dtag_hasarg = 1; 11399 } 11400 11401 /* 11402 * We need to allocate an id for this aggregation. 11403 */ 11404 #ifdef illumos 11405 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 11406 VM_BESTFIT | VM_SLEEP); 11407 #else 11408 aggid = alloc_unr(state->dts_aggid_arena); 11409 #endif 11410 11411 if (aggid - 1 >= state->dts_naggregations) { 11412 dtrace_aggregation_t **oaggs = state->dts_aggregations; 11413 dtrace_aggregation_t **aggs; 11414 int naggs = state->dts_naggregations << 1; 11415 int onaggs = state->dts_naggregations; 11416 11417 ASSERT(aggid == state->dts_naggregations + 1); 11418 11419 if (naggs == 0) { 11420 ASSERT(oaggs == NULL); 11421 naggs = 1; 11422 } 11423 11424 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 11425 11426 if (oaggs != NULL) { 11427 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 11428 kmem_free(oaggs, onaggs * sizeof (*aggs)); 11429 } 11430 11431 state->dts_aggregations = aggs; 11432 state->dts_naggregations = naggs; 11433 } 11434 11435 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 11436 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 11437 11438 frec = &agg->dtag_first->dta_rec; 11439 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 11440 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 11441 11442 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 11443 ASSERT(!act->dta_intuple); 11444 act->dta_intuple = 1; 11445 } 11446 11447 return (&agg->dtag_action); 11448 } 11449 11450 static void 11451 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 11452 { 11453 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 11454 dtrace_state_t *state = ecb->dte_state; 11455 dtrace_aggid_t aggid = agg->dtag_id; 11456 11457 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 11458 #ifdef illumos 11459 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 11460 #else 11461 free_unr(state->dts_aggid_arena, aggid); 11462 #endif 11463 11464 ASSERT(state->dts_aggregations[aggid - 1] == agg); 11465 state->dts_aggregations[aggid - 1] = NULL; 11466 11467 kmem_free(agg, sizeof (dtrace_aggregation_t)); 11468 } 11469 11470 static int 11471 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 11472 { 11473 dtrace_action_t *action, *last; 11474 dtrace_difo_t *dp = desc->dtad_difo; 11475 uint32_t size = 0, align = sizeof (uint8_t), mask; 11476 uint16_t format = 0; 11477 dtrace_recdesc_t *rec; 11478 dtrace_state_t *state = ecb->dte_state; 11479 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 11480 uint64_t arg = desc->dtad_arg; 11481 11482 ASSERT(MUTEX_HELD(&dtrace_lock)); 11483 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 11484 11485 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 11486 /* 11487 * If this is an aggregating action, there must be neither 11488 * a speculate nor a commit on the action chain. 11489 */ 11490 dtrace_action_t *act; 11491 11492 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 11493 if (act->dta_kind == DTRACEACT_COMMIT) 11494 return (EINVAL); 11495 11496 if (act->dta_kind == DTRACEACT_SPECULATE) 11497 return (EINVAL); 11498 } 11499 11500 action = dtrace_ecb_aggregation_create(ecb, desc); 11501 11502 if (action == NULL) 11503 return (EINVAL); 11504 } else { 11505 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 11506 (desc->dtad_kind == DTRACEACT_DIFEXPR && 11507 dp != NULL && dp->dtdo_destructive)) { 11508 state->dts_destructive = 1; 11509 } 11510 11511 switch (desc->dtad_kind) { 11512 case DTRACEACT_PRINTF: 11513 case DTRACEACT_PRINTA: 11514 case DTRACEACT_SYSTEM: 11515 case DTRACEACT_FREOPEN: 11516 case DTRACEACT_DIFEXPR: 11517 /* 11518 * We know that our arg is a string -- turn it into a 11519 * format. 11520 */ 11521 if (arg == 0) { 11522 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA || 11523 desc->dtad_kind == DTRACEACT_DIFEXPR); 11524 format = 0; 11525 } else { 11526 ASSERT(arg != 0); 11527 #ifdef illumos 11528 ASSERT(arg > KERNELBASE); 11529 #endif 11530 format = dtrace_format_add(state, 11531 (char *)(uintptr_t)arg); 11532 } 11533 11534 /*FALLTHROUGH*/ 11535 case DTRACEACT_LIBACT: 11536 case DTRACEACT_TRACEMEM: 11537 case DTRACEACT_TRACEMEM_DYNSIZE: 11538 if (dp == NULL) 11539 return (EINVAL); 11540 11541 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 11542 break; 11543 11544 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 11545 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11546 return (EINVAL); 11547 11548 size = opt[DTRACEOPT_STRSIZE]; 11549 } 11550 11551 break; 11552 11553 case DTRACEACT_STACK: 11554 if ((nframes = arg) == 0) { 11555 nframes = opt[DTRACEOPT_STACKFRAMES]; 11556 ASSERT(nframes > 0); 11557 arg = nframes; 11558 } 11559 11560 size = nframes * sizeof (pc_t); 11561 break; 11562 11563 case DTRACEACT_JSTACK: 11564 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 11565 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 11566 11567 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 11568 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 11569 11570 arg = DTRACE_USTACK_ARG(nframes, strsize); 11571 11572 /*FALLTHROUGH*/ 11573 case DTRACEACT_USTACK: 11574 if (desc->dtad_kind != DTRACEACT_JSTACK && 11575 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 11576 strsize = DTRACE_USTACK_STRSIZE(arg); 11577 nframes = opt[DTRACEOPT_USTACKFRAMES]; 11578 ASSERT(nframes > 0); 11579 arg = DTRACE_USTACK_ARG(nframes, strsize); 11580 } 11581 11582 /* 11583 * Save a slot for the pid. 11584 */ 11585 size = (nframes + 1) * sizeof (uint64_t); 11586 size += DTRACE_USTACK_STRSIZE(arg); 11587 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 11588 11589 break; 11590 11591 case DTRACEACT_SYM: 11592 case DTRACEACT_MOD: 11593 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 11594 sizeof (uint64_t)) || 11595 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11596 return (EINVAL); 11597 break; 11598 11599 case DTRACEACT_USYM: 11600 case DTRACEACT_UMOD: 11601 case DTRACEACT_UADDR: 11602 if (dp == NULL || 11603 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 11604 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11605 return (EINVAL); 11606 11607 /* 11608 * We have a slot for the pid, plus a slot for the 11609 * argument. To keep things simple (aligned with 11610 * bitness-neutral sizing), we store each as a 64-bit 11611 * quantity. 11612 */ 11613 size = 2 * sizeof (uint64_t); 11614 break; 11615 11616 case DTRACEACT_STOP: 11617 case DTRACEACT_BREAKPOINT: 11618 case DTRACEACT_PANIC: 11619 break; 11620 11621 case DTRACEACT_CHILL: 11622 case DTRACEACT_DISCARD: 11623 case DTRACEACT_RAISE: 11624 if (dp == NULL) 11625 return (EINVAL); 11626 break; 11627 11628 case DTRACEACT_EXIT: 11629 if (dp == NULL || 11630 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 11631 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11632 return (EINVAL); 11633 break; 11634 11635 case DTRACEACT_SPECULATE: 11636 if (ecb->dte_size > sizeof (dtrace_rechdr_t)) 11637 return (EINVAL); 11638 11639 if (dp == NULL) 11640 return (EINVAL); 11641 11642 state->dts_speculates = 1; 11643 break; 11644 11645 case DTRACEACT_PRINTM: 11646 size = dp->dtdo_rtype.dtdt_size; 11647 break; 11648 11649 case DTRACEACT_COMMIT: { 11650 dtrace_action_t *act = ecb->dte_action; 11651 11652 for (; act != NULL; act = act->dta_next) { 11653 if (act->dta_kind == DTRACEACT_COMMIT) 11654 return (EINVAL); 11655 } 11656 11657 if (dp == NULL) 11658 return (EINVAL); 11659 break; 11660 } 11661 11662 default: 11663 return (EINVAL); 11664 } 11665 11666 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 11667 /* 11668 * If this is a data-storing action or a speculate, 11669 * we must be sure that there isn't a commit on the 11670 * action chain. 11671 */ 11672 dtrace_action_t *act = ecb->dte_action; 11673 11674 for (; act != NULL; act = act->dta_next) { 11675 if (act->dta_kind == DTRACEACT_COMMIT) 11676 return (EINVAL); 11677 } 11678 } 11679 11680 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 11681 action->dta_rec.dtrd_size = size; 11682 } 11683 11684 action->dta_refcnt = 1; 11685 rec = &action->dta_rec; 11686 size = rec->dtrd_size; 11687 11688 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 11689 if (!(size & mask)) { 11690 align = mask + 1; 11691 break; 11692 } 11693 } 11694 11695 action->dta_kind = desc->dtad_kind; 11696 11697 if ((action->dta_difo = dp) != NULL) 11698 dtrace_difo_hold(dp); 11699 11700 rec->dtrd_action = action->dta_kind; 11701 rec->dtrd_arg = arg; 11702 rec->dtrd_uarg = desc->dtad_uarg; 11703 rec->dtrd_alignment = (uint16_t)align; 11704 rec->dtrd_format = format; 11705 11706 if ((last = ecb->dte_action_last) != NULL) { 11707 ASSERT(ecb->dte_action != NULL); 11708 action->dta_prev = last; 11709 last->dta_next = action; 11710 } else { 11711 ASSERT(ecb->dte_action == NULL); 11712 ecb->dte_action = action; 11713 } 11714 11715 ecb->dte_action_last = action; 11716 11717 return (0); 11718 } 11719 11720 static void 11721 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 11722 { 11723 dtrace_action_t *act = ecb->dte_action, *next; 11724 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 11725 dtrace_difo_t *dp; 11726 uint16_t format; 11727 11728 if (act != NULL && act->dta_refcnt > 1) { 11729 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 11730 act->dta_refcnt--; 11731 } else { 11732 for (; act != NULL; act = next) { 11733 next = act->dta_next; 11734 ASSERT(next != NULL || act == ecb->dte_action_last); 11735 ASSERT(act->dta_refcnt == 1); 11736 11737 if ((format = act->dta_rec.dtrd_format) != 0) 11738 dtrace_format_remove(ecb->dte_state, format); 11739 11740 if ((dp = act->dta_difo) != NULL) 11741 dtrace_difo_release(dp, vstate); 11742 11743 if (DTRACEACT_ISAGG(act->dta_kind)) { 11744 dtrace_ecb_aggregation_destroy(ecb, act); 11745 } else { 11746 kmem_free(act, sizeof (dtrace_action_t)); 11747 } 11748 } 11749 } 11750 11751 ecb->dte_action = NULL; 11752 ecb->dte_action_last = NULL; 11753 ecb->dte_size = 0; 11754 } 11755 11756 static void 11757 dtrace_ecb_disable(dtrace_ecb_t *ecb) 11758 { 11759 /* 11760 * We disable the ECB by removing it from its probe. 11761 */ 11762 dtrace_ecb_t *pecb, *prev = NULL; 11763 dtrace_probe_t *probe = ecb->dte_probe; 11764 11765 ASSERT(MUTEX_HELD(&dtrace_lock)); 11766 11767 if (probe == NULL) { 11768 /* 11769 * This is the NULL probe; there is nothing to disable. 11770 */ 11771 return; 11772 } 11773 11774 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 11775 if (pecb == ecb) 11776 break; 11777 prev = pecb; 11778 } 11779 11780 ASSERT(pecb != NULL); 11781 11782 if (prev == NULL) { 11783 probe->dtpr_ecb = ecb->dte_next; 11784 } else { 11785 prev->dte_next = ecb->dte_next; 11786 } 11787 11788 if (ecb == probe->dtpr_ecb_last) { 11789 ASSERT(ecb->dte_next == NULL); 11790 probe->dtpr_ecb_last = prev; 11791 } 11792 11793 /* 11794 * The ECB has been disconnected from the probe; now sync to assure 11795 * that all CPUs have seen the change before returning. 11796 */ 11797 dtrace_sync(); 11798 11799 if (probe->dtpr_ecb == NULL) { 11800 /* 11801 * That was the last ECB on the probe; clear the predicate 11802 * cache ID for the probe, disable it and sync one more time 11803 * to assure that we'll never hit it again. 11804 */ 11805 dtrace_provider_t *prov = probe->dtpr_provider; 11806 11807 ASSERT(ecb->dte_next == NULL); 11808 ASSERT(probe->dtpr_ecb_last == NULL); 11809 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 11810 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 11811 probe->dtpr_id, probe->dtpr_arg); 11812 dtrace_sync(); 11813 } else { 11814 /* 11815 * There is at least one ECB remaining on the probe. If there 11816 * is _exactly_ one, set the probe's predicate cache ID to be 11817 * the predicate cache ID of the remaining ECB. 11818 */ 11819 ASSERT(probe->dtpr_ecb_last != NULL); 11820 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 11821 11822 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 11823 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 11824 11825 ASSERT(probe->dtpr_ecb->dte_next == NULL); 11826 11827 if (p != NULL) 11828 probe->dtpr_predcache = p->dtp_cacheid; 11829 } 11830 11831 ecb->dte_next = NULL; 11832 } 11833 } 11834 11835 static void 11836 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 11837 { 11838 dtrace_state_t *state = ecb->dte_state; 11839 dtrace_vstate_t *vstate = &state->dts_vstate; 11840 dtrace_predicate_t *pred; 11841 dtrace_epid_t epid = ecb->dte_epid; 11842 11843 ASSERT(MUTEX_HELD(&dtrace_lock)); 11844 ASSERT(ecb->dte_next == NULL); 11845 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 11846 11847 if ((pred = ecb->dte_predicate) != NULL) 11848 dtrace_predicate_release(pred, vstate); 11849 11850 dtrace_ecb_action_remove(ecb); 11851 11852 ASSERT(state->dts_ecbs[epid - 1] == ecb); 11853 state->dts_ecbs[epid - 1] = NULL; 11854 11855 kmem_free(ecb, sizeof (dtrace_ecb_t)); 11856 } 11857 11858 static dtrace_ecb_t * 11859 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 11860 dtrace_enabling_t *enab) 11861 { 11862 dtrace_ecb_t *ecb; 11863 dtrace_predicate_t *pred; 11864 dtrace_actdesc_t *act; 11865 dtrace_provider_t *prov; 11866 dtrace_ecbdesc_t *desc = enab->dten_current; 11867 11868 ASSERT(MUTEX_HELD(&dtrace_lock)); 11869 ASSERT(state != NULL); 11870 11871 ecb = dtrace_ecb_add(state, probe); 11872 ecb->dte_uarg = desc->dted_uarg; 11873 11874 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 11875 dtrace_predicate_hold(pred); 11876 ecb->dte_predicate = pred; 11877 } 11878 11879 if (probe != NULL) { 11880 /* 11881 * If the provider shows more leg than the consumer is old 11882 * enough to see, we need to enable the appropriate implicit 11883 * predicate bits to prevent the ecb from activating at 11884 * revealing times. 11885 * 11886 * Providers specifying DTRACE_PRIV_USER at register time 11887 * are stating that they need the /proc-style privilege 11888 * model to be enforced, and this is what DTRACE_COND_OWNER 11889 * and DTRACE_COND_ZONEOWNER will then do at probe time. 11890 */ 11891 prov = probe->dtpr_provider; 11892 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 11893 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 11894 ecb->dte_cond |= DTRACE_COND_OWNER; 11895 11896 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 11897 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 11898 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 11899 11900 /* 11901 * If the provider shows us kernel innards and the user 11902 * is lacking sufficient privilege, enable the 11903 * DTRACE_COND_USERMODE implicit predicate. 11904 */ 11905 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 11906 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 11907 ecb->dte_cond |= DTRACE_COND_USERMODE; 11908 } 11909 11910 if (dtrace_ecb_create_cache != NULL) { 11911 /* 11912 * If we have a cached ecb, we'll use its action list instead 11913 * of creating our own (saving both time and space). 11914 */ 11915 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 11916 dtrace_action_t *act = cached->dte_action; 11917 11918 if (act != NULL) { 11919 ASSERT(act->dta_refcnt > 0); 11920 act->dta_refcnt++; 11921 ecb->dte_action = act; 11922 ecb->dte_action_last = cached->dte_action_last; 11923 ecb->dte_needed = cached->dte_needed; 11924 ecb->dte_size = cached->dte_size; 11925 ecb->dte_alignment = cached->dte_alignment; 11926 } 11927 11928 return (ecb); 11929 } 11930 11931 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 11932 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 11933 dtrace_ecb_destroy(ecb); 11934 return (NULL); 11935 } 11936 } 11937 11938 if ((enab->dten_error = dtrace_ecb_resize(ecb)) != 0) { 11939 dtrace_ecb_destroy(ecb); 11940 return (NULL); 11941 } 11942 11943 return (dtrace_ecb_create_cache = ecb); 11944 } 11945 11946 static int 11947 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 11948 { 11949 dtrace_ecb_t *ecb; 11950 dtrace_enabling_t *enab = arg; 11951 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 11952 11953 ASSERT(state != NULL); 11954 11955 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 11956 /* 11957 * This probe was created in a generation for which this 11958 * enabling has previously created ECBs; we don't want to 11959 * enable it again, so just kick out. 11960 */ 11961 return (DTRACE_MATCH_NEXT); 11962 } 11963 11964 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 11965 return (DTRACE_MATCH_DONE); 11966 11967 dtrace_ecb_enable(ecb); 11968 return (DTRACE_MATCH_NEXT); 11969 } 11970 11971 static dtrace_ecb_t * 11972 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 11973 { 11974 dtrace_ecb_t *ecb; 11975 11976 ASSERT(MUTEX_HELD(&dtrace_lock)); 11977 11978 if (id == 0 || id > state->dts_necbs) 11979 return (NULL); 11980 11981 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 11982 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 11983 11984 return (state->dts_ecbs[id - 1]); 11985 } 11986 11987 static dtrace_aggregation_t * 11988 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 11989 { 11990 dtrace_aggregation_t *agg; 11991 11992 ASSERT(MUTEX_HELD(&dtrace_lock)); 11993 11994 if (id == 0 || id > state->dts_naggregations) 11995 return (NULL); 11996 11997 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 11998 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 11999 agg->dtag_id == id); 12000 12001 return (state->dts_aggregations[id - 1]); 12002 } 12003 12004 /* 12005 * DTrace Buffer Functions 12006 * 12007 * The following functions manipulate DTrace buffers. Most of these functions 12008 * are called in the context of establishing or processing consumer state; 12009 * exceptions are explicitly noted. 12010 */ 12011 12012 /* 12013 * Note: called from cross call context. This function switches the two 12014 * buffers on a given CPU. The atomicity of this operation is assured by 12015 * disabling interrupts while the actual switch takes place; the disabling of 12016 * interrupts serializes the execution with any execution of dtrace_probe() on 12017 * the same CPU. 12018 */ 12019 static void 12020 dtrace_buffer_switch(dtrace_buffer_t *buf) 12021 { 12022 caddr_t tomax = buf->dtb_tomax; 12023 caddr_t xamot = buf->dtb_xamot; 12024 dtrace_icookie_t cookie; 12025 hrtime_t now; 12026 12027 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 12028 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 12029 12030 cookie = dtrace_interrupt_disable(); 12031 now = dtrace_gethrtime(); 12032 buf->dtb_tomax = xamot; 12033 buf->dtb_xamot = tomax; 12034 buf->dtb_xamot_drops = buf->dtb_drops; 12035 buf->dtb_xamot_offset = buf->dtb_offset; 12036 buf->dtb_xamot_errors = buf->dtb_errors; 12037 buf->dtb_xamot_flags = buf->dtb_flags; 12038 buf->dtb_offset = 0; 12039 buf->dtb_drops = 0; 12040 buf->dtb_errors = 0; 12041 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 12042 buf->dtb_interval = now - buf->dtb_switched; 12043 buf->dtb_switched = now; 12044 dtrace_interrupt_enable(cookie); 12045 } 12046 12047 /* 12048 * Note: called from cross call context. This function activates a buffer 12049 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 12050 * is guaranteed by the disabling of interrupts. 12051 */ 12052 static void 12053 dtrace_buffer_activate(dtrace_state_t *state) 12054 { 12055 dtrace_buffer_t *buf; 12056 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 12057 12058 buf = &state->dts_buffer[curcpu]; 12059 12060 if (buf->dtb_tomax != NULL) { 12061 /* 12062 * We might like to assert that the buffer is marked inactive, 12063 * but this isn't necessarily true: the buffer for the CPU 12064 * that processes the BEGIN probe has its buffer activated 12065 * manually. In this case, we take the (harmless) action 12066 * re-clearing the bit INACTIVE bit. 12067 */ 12068 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 12069 } 12070 12071 dtrace_interrupt_enable(cookie); 12072 } 12073 12074 #ifdef __FreeBSD__ 12075 /* 12076 * Activate the specified per-CPU buffer. This is used instead of 12077 * dtrace_buffer_activate() when APs have not yet started, i.e. when 12078 * activating anonymous state. 12079 */ 12080 static void 12081 dtrace_buffer_activate_cpu(dtrace_state_t *state, int cpu) 12082 { 12083 12084 if (state->dts_buffer[cpu].dtb_tomax != NULL) 12085 state->dts_buffer[cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 12086 } 12087 #endif 12088 12089 static int 12090 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 12091 processorid_t cpu, int *factor) 12092 { 12093 #ifdef illumos 12094 cpu_t *cp; 12095 #endif 12096 dtrace_buffer_t *buf; 12097 int allocated = 0, desired = 0; 12098 12099 #ifdef illumos 12100 ASSERT(MUTEX_HELD(&cpu_lock)); 12101 ASSERT(MUTEX_HELD(&dtrace_lock)); 12102 12103 *factor = 1; 12104 12105 if (size > dtrace_nonroot_maxsize && 12106 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 12107 return (EFBIG); 12108 12109 cp = cpu_list; 12110 12111 do { 12112 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 12113 continue; 12114 12115 buf = &bufs[cp->cpu_id]; 12116 12117 /* 12118 * If there is already a buffer allocated for this CPU, it 12119 * is only possible that this is a DR event. In this case, 12120 */ 12121 if (buf->dtb_tomax != NULL) { 12122 ASSERT(buf->dtb_size == size); 12123 continue; 12124 } 12125 12126 ASSERT(buf->dtb_xamot == NULL); 12127 12128 if ((buf->dtb_tomax = kmem_zalloc(size, 12129 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 12130 goto err; 12131 12132 buf->dtb_size = size; 12133 buf->dtb_flags = flags; 12134 buf->dtb_offset = 0; 12135 buf->dtb_drops = 0; 12136 12137 if (flags & DTRACEBUF_NOSWITCH) 12138 continue; 12139 12140 if ((buf->dtb_xamot = kmem_zalloc(size, 12141 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 12142 goto err; 12143 } while ((cp = cp->cpu_next) != cpu_list); 12144 12145 return (0); 12146 12147 err: 12148 cp = cpu_list; 12149 12150 do { 12151 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 12152 continue; 12153 12154 buf = &bufs[cp->cpu_id]; 12155 desired += 2; 12156 12157 if (buf->dtb_xamot != NULL) { 12158 ASSERT(buf->dtb_tomax != NULL); 12159 ASSERT(buf->dtb_size == size); 12160 kmem_free(buf->dtb_xamot, size); 12161 allocated++; 12162 } 12163 12164 if (buf->dtb_tomax != NULL) { 12165 ASSERT(buf->dtb_size == size); 12166 kmem_free(buf->dtb_tomax, size); 12167 allocated++; 12168 } 12169 12170 buf->dtb_tomax = NULL; 12171 buf->dtb_xamot = NULL; 12172 buf->dtb_size = 0; 12173 } while ((cp = cp->cpu_next) != cpu_list); 12174 #else 12175 int i; 12176 12177 *factor = 1; 12178 #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \ 12179 defined(__mips__) || defined(__powerpc__) || defined(__riscv) 12180 /* 12181 * FreeBSD isn't good at limiting the amount of memory we 12182 * ask to malloc, so let's place a limit here before trying 12183 * to do something that might well end in tears at bedtime. 12184 */ 12185 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 12186 return (ENOMEM); 12187 #endif 12188 12189 ASSERT(MUTEX_HELD(&dtrace_lock)); 12190 CPU_FOREACH(i) { 12191 if (cpu != DTRACE_CPUALL && cpu != i) 12192 continue; 12193 12194 buf = &bufs[i]; 12195 12196 /* 12197 * If there is already a buffer allocated for this CPU, it 12198 * is only possible that this is a DR event. In this case, 12199 * the buffer size must match our specified size. 12200 */ 12201 if (buf->dtb_tomax != NULL) { 12202 ASSERT(buf->dtb_size == size); 12203 continue; 12204 } 12205 12206 ASSERT(buf->dtb_xamot == NULL); 12207 12208 if ((buf->dtb_tomax = kmem_zalloc(size, 12209 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 12210 goto err; 12211 12212 buf->dtb_size = size; 12213 buf->dtb_flags = flags; 12214 buf->dtb_offset = 0; 12215 buf->dtb_drops = 0; 12216 12217 if (flags & DTRACEBUF_NOSWITCH) 12218 continue; 12219 12220 if ((buf->dtb_xamot = kmem_zalloc(size, 12221 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 12222 goto err; 12223 } 12224 12225 return (0); 12226 12227 err: 12228 /* 12229 * Error allocating memory, so free the buffers that were 12230 * allocated before the failed allocation. 12231 */ 12232 CPU_FOREACH(i) { 12233 if (cpu != DTRACE_CPUALL && cpu != i) 12234 continue; 12235 12236 buf = &bufs[i]; 12237 desired += 2; 12238 12239 if (buf->dtb_xamot != NULL) { 12240 ASSERT(buf->dtb_tomax != NULL); 12241 ASSERT(buf->dtb_size == size); 12242 kmem_free(buf->dtb_xamot, size); 12243 allocated++; 12244 } 12245 12246 if (buf->dtb_tomax != NULL) { 12247 ASSERT(buf->dtb_size == size); 12248 kmem_free(buf->dtb_tomax, size); 12249 allocated++; 12250 } 12251 12252 buf->dtb_tomax = NULL; 12253 buf->dtb_xamot = NULL; 12254 buf->dtb_size = 0; 12255 12256 } 12257 #endif 12258 *factor = desired / (allocated > 0 ? allocated : 1); 12259 12260 return (ENOMEM); 12261 } 12262 12263 /* 12264 * Note: called from probe context. This function just increments the drop 12265 * count on a buffer. It has been made a function to allow for the 12266 * possibility of understanding the source of mysterious drop counts. (A 12267 * problem for which one may be particularly disappointed that DTrace cannot 12268 * be used to understand DTrace.) 12269 */ 12270 static void 12271 dtrace_buffer_drop(dtrace_buffer_t *buf) 12272 { 12273 buf->dtb_drops++; 12274 } 12275 12276 /* 12277 * Note: called from probe context. This function is called to reserve space 12278 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 12279 * mstate. Returns the new offset in the buffer, or a negative value if an 12280 * error has occurred. 12281 */ 12282 static intptr_t 12283 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 12284 dtrace_state_t *state, dtrace_mstate_t *mstate) 12285 { 12286 intptr_t offs = buf->dtb_offset, soffs; 12287 intptr_t woffs; 12288 caddr_t tomax; 12289 size_t total; 12290 12291 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 12292 return (-1); 12293 12294 if ((tomax = buf->dtb_tomax) == NULL) { 12295 dtrace_buffer_drop(buf); 12296 return (-1); 12297 } 12298 12299 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 12300 while (offs & (align - 1)) { 12301 /* 12302 * Assert that our alignment is off by a number which 12303 * is itself sizeof (uint32_t) aligned. 12304 */ 12305 ASSERT(!((align - (offs & (align - 1))) & 12306 (sizeof (uint32_t) - 1))); 12307 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 12308 offs += sizeof (uint32_t); 12309 } 12310 12311 if ((soffs = offs + needed) > buf->dtb_size) { 12312 dtrace_buffer_drop(buf); 12313 return (-1); 12314 } 12315 12316 if (mstate == NULL) 12317 return (offs); 12318 12319 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 12320 mstate->dtms_scratch_size = buf->dtb_size - soffs; 12321 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 12322 12323 return (offs); 12324 } 12325 12326 if (buf->dtb_flags & DTRACEBUF_FILL) { 12327 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 12328 (buf->dtb_flags & DTRACEBUF_FULL)) 12329 return (-1); 12330 goto out; 12331 } 12332 12333 total = needed + (offs & (align - 1)); 12334 12335 /* 12336 * For a ring buffer, life is quite a bit more complicated. Before 12337 * we can store any padding, we need to adjust our wrapping offset. 12338 * (If we've never before wrapped or we're not about to, no adjustment 12339 * is required.) 12340 */ 12341 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 12342 offs + total > buf->dtb_size) { 12343 woffs = buf->dtb_xamot_offset; 12344 12345 if (offs + total > buf->dtb_size) { 12346 /* 12347 * We can't fit in the end of the buffer. First, a 12348 * sanity check that we can fit in the buffer at all. 12349 */ 12350 if (total > buf->dtb_size) { 12351 dtrace_buffer_drop(buf); 12352 return (-1); 12353 } 12354 12355 /* 12356 * We're going to be storing at the top of the buffer, 12357 * so now we need to deal with the wrapped offset. We 12358 * only reset our wrapped offset to 0 if it is 12359 * currently greater than the current offset. If it 12360 * is less than the current offset, it is because a 12361 * previous allocation induced a wrap -- but the 12362 * allocation didn't subsequently take the space due 12363 * to an error or false predicate evaluation. In this 12364 * case, we'll just leave the wrapped offset alone: if 12365 * the wrapped offset hasn't been advanced far enough 12366 * for this allocation, it will be adjusted in the 12367 * lower loop. 12368 */ 12369 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 12370 if (woffs >= offs) 12371 woffs = 0; 12372 } else { 12373 woffs = 0; 12374 } 12375 12376 /* 12377 * Now we know that we're going to be storing to the 12378 * top of the buffer and that there is room for us 12379 * there. We need to clear the buffer from the current 12380 * offset to the end (there may be old gunk there). 12381 */ 12382 while (offs < buf->dtb_size) 12383 tomax[offs++] = 0; 12384 12385 /* 12386 * We need to set our offset to zero. And because we 12387 * are wrapping, we need to set the bit indicating as 12388 * much. We can also adjust our needed space back 12389 * down to the space required by the ECB -- we know 12390 * that the top of the buffer is aligned. 12391 */ 12392 offs = 0; 12393 total = needed; 12394 buf->dtb_flags |= DTRACEBUF_WRAPPED; 12395 } else { 12396 /* 12397 * There is room for us in the buffer, so we simply 12398 * need to check the wrapped offset. 12399 */ 12400 if (woffs < offs) { 12401 /* 12402 * The wrapped offset is less than the offset. 12403 * This can happen if we allocated buffer space 12404 * that induced a wrap, but then we didn't 12405 * subsequently take the space due to an error 12406 * or false predicate evaluation. This is 12407 * okay; we know that _this_ allocation isn't 12408 * going to induce a wrap. We still can't 12409 * reset the wrapped offset to be zero, 12410 * however: the space may have been trashed in 12411 * the previous failed probe attempt. But at 12412 * least the wrapped offset doesn't need to 12413 * be adjusted at all... 12414 */ 12415 goto out; 12416 } 12417 } 12418 12419 while (offs + total > woffs) { 12420 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 12421 size_t size; 12422 12423 if (epid == DTRACE_EPIDNONE) { 12424 size = sizeof (uint32_t); 12425 } else { 12426 ASSERT3U(epid, <=, state->dts_necbs); 12427 ASSERT(state->dts_ecbs[epid - 1] != NULL); 12428 12429 size = state->dts_ecbs[epid - 1]->dte_size; 12430 } 12431 12432 ASSERT(woffs + size <= buf->dtb_size); 12433 ASSERT(size != 0); 12434 12435 if (woffs + size == buf->dtb_size) { 12436 /* 12437 * We've reached the end of the buffer; we want 12438 * to set the wrapped offset to 0 and break 12439 * out. However, if the offs is 0, then we're 12440 * in a strange edge-condition: the amount of 12441 * space that we want to reserve plus the size 12442 * of the record that we're overwriting is 12443 * greater than the size of the buffer. This 12444 * is problematic because if we reserve the 12445 * space but subsequently don't consume it (due 12446 * to a failed predicate or error) the wrapped 12447 * offset will be 0 -- yet the EPID at offset 0 12448 * will not be committed. This situation is 12449 * relatively easy to deal with: if we're in 12450 * this case, the buffer is indistinguishable 12451 * from one that hasn't wrapped; we need only 12452 * finish the job by clearing the wrapped bit, 12453 * explicitly setting the offset to be 0, and 12454 * zero'ing out the old data in the buffer. 12455 */ 12456 if (offs == 0) { 12457 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 12458 buf->dtb_offset = 0; 12459 woffs = total; 12460 12461 while (woffs < buf->dtb_size) 12462 tomax[woffs++] = 0; 12463 } 12464 12465 woffs = 0; 12466 break; 12467 } 12468 12469 woffs += size; 12470 } 12471 12472 /* 12473 * We have a wrapped offset. It may be that the wrapped offset 12474 * has become zero -- that's okay. 12475 */ 12476 buf->dtb_xamot_offset = woffs; 12477 } 12478 12479 out: 12480 /* 12481 * Now we can plow the buffer with any necessary padding. 12482 */ 12483 while (offs & (align - 1)) { 12484 /* 12485 * Assert that our alignment is off by a number which 12486 * is itself sizeof (uint32_t) aligned. 12487 */ 12488 ASSERT(!((align - (offs & (align - 1))) & 12489 (sizeof (uint32_t) - 1))); 12490 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 12491 offs += sizeof (uint32_t); 12492 } 12493 12494 if (buf->dtb_flags & DTRACEBUF_FILL) { 12495 if (offs + needed > buf->dtb_size - state->dts_reserve) { 12496 buf->dtb_flags |= DTRACEBUF_FULL; 12497 return (-1); 12498 } 12499 } 12500 12501 if (mstate == NULL) 12502 return (offs); 12503 12504 /* 12505 * For ring buffers and fill buffers, the scratch space is always 12506 * the inactive buffer. 12507 */ 12508 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 12509 mstate->dtms_scratch_size = buf->dtb_size; 12510 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 12511 12512 return (offs); 12513 } 12514 12515 static void 12516 dtrace_buffer_polish(dtrace_buffer_t *buf) 12517 { 12518 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 12519 ASSERT(MUTEX_HELD(&dtrace_lock)); 12520 12521 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 12522 return; 12523 12524 /* 12525 * We need to polish the ring buffer. There are three cases: 12526 * 12527 * - The first (and presumably most common) is that there is no gap 12528 * between the buffer offset and the wrapped offset. In this case, 12529 * there is nothing in the buffer that isn't valid data; we can 12530 * mark the buffer as polished and return. 12531 * 12532 * - The second (less common than the first but still more common 12533 * than the third) is that there is a gap between the buffer offset 12534 * and the wrapped offset, and the wrapped offset is larger than the 12535 * buffer offset. This can happen because of an alignment issue, or 12536 * can happen because of a call to dtrace_buffer_reserve() that 12537 * didn't subsequently consume the buffer space. In this case, 12538 * we need to zero the data from the buffer offset to the wrapped 12539 * offset. 12540 * 12541 * - The third (and least common) is that there is a gap between the 12542 * buffer offset and the wrapped offset, but the wrapped offset is 12543 * _less_ than the buffer offset. This can only happen because a 12544 * call to dtrace_buffer_reserve() induced a wrap, but the space 12545 * was not subsequently consumed. In this case, we need to zero the 12546 * space from the offset to the end of the buffer _and_ from the 12547 * top of the buffer to the wrapped offset. 12548 */ 12549 if (buf->dtb_offset < buf->dtb_xamot_offset) { 12550 bzero(buf->dtb_tomax + buf->dtb_offset, 12551 buf->dtb_xamot_offset - buf->dtb_offset); 12552 } 12553 12554 if (buf->dtb_offset > buf->dtb_xamot_offset) { 12555 bzero(buf->dtb_tomax + buf->dtb_offset, 12556 buf->dtb_size - buf->dtb_offset); 12557 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 12558 } 12559 } 12560 12561 /* 12562 * This routine determines if data generated at the specified time has likely 12563 * been entirely consumed at user-level. This routine is called to determine 12564 * if an ECB on a defunct probe (but for an active enabling) can be safely 12565 * disabled and destroyed. 12566 */ 12567 static int 12568 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when) 12569 { 12570 int i; 12571 12572 for (i = 0; i < NCPU; i++) { 12573 dtrace_buffer_t *buf = &bufs[i]; 12574 12575 if (buf->dtb_size == 0) 12576 continue; 12577 12578 if (buf->dtb_flags & DTRACEBUF_RING) 12579 return (0); 12580 12581 if (!buf->dtb_switched && buf->dtb_offset != 0) 12582 return (0); 12583 12584 if (buf->dtb_switched - buf->dtb_interval < when) 12585 return (0); 12586 } 12587 12588 return (1); 12589 } 12590 12591 static void 12592 dtrace_buffer_free(dtrace_buffer_t *bufs) 12593 { 12594 int i; 12595 12596 for (i = 0; i < NCPU; i++) { 12597 dtrace_buffer_t *buf = &bufs[i]; 12598 12599 if (buf->dtb_tomax == NULL) { 12600 ASSERT(buf->dtb_xamot == NULL); 12601 ASSERT(buf->dtb_size == 0); 12602 continue; 12603 } 12604 12605 if (buf->dtb_xamot != NULL) { 12606 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 12607 kmem_free(buf->dtb_xamot, buf->dtb_size); 12608 } 12609 12610 kmem_free(buf->dtb_tomax, buf->dtb_size); 12611 buf->dtb_size = 0; 12612 buf->dtb_tomax = NULL; 12613 buf->dtb_xamot = NULL; 12614 } 12615 } 12616 12617 /* 12618 * DTrace Enabling Functions 12619 */ 12620 static dtrace_enabling_t * 12621 dtrace_enabling_create(dtrace_vstate_t *vstate) 12622 { 12623 dtrace_enabling_t *enab; 12624 12625 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 12626 enab->dten_vstate = vstate; 12627 12628 return (enab); 12629 } 12630 12631 static void 12632 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 12633 { 12634 dtrace_ecbdesc_t **ndesc; 12635 size_t osize, nsize; 12636 12637 /* 12638 * We can't add to enablings after we've enabled them, or after we've 12639 * retained them. 12640 */ 12641 ASSERT(enab->dten_probegen == 0); 12642 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 12643 12644 if (enab->dten_ndesc < enab->dten_maxdesc) { 12645 enab->dten_desc[enab->dten_ndesc++] = ecb; 12646 return; 12647 } 12648 12649 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 12650 12651 if (enab->dten_maxdesc == 0) { 12652 enab->dten_maxdesc = 1; 12653 } else { 12654 enab->dten_maxdesc <<= 1; 12655 } 12656 12657 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 12658 12659 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 12660 ndesc = kmem_zalloc(nsize, KM_SLEEP); 12661 bcopy(enab->dten_desc, ndesc, osize); 12662 if (enab->dten_desc != NULL) 12663 kmem_free(enab->dten_desc, osize); 12664 12665 enab->dten_desc = ndesc; 12666 enab->dten_desc[enab->dten_ndesc++] = ecb; 12667 } 12668 12669 static void 12670 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 12671 dtrace_probedesc_t *pd) 12672 { 12673 dtrace_ecbdesc_t *new; 12674 dtrace_predicate_t *pred; 12675 dtrace_actdesc_t *act; 12676 12677 /* 12678 * We're going to create a new ECB description that matches the 12679 * specified ECB in every way, but has the specified probe description. 12680 */ 12681 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12682 12683 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 12684 dtrace_predicate_hold(pred); 12685 12686 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 12687 dtrace_actdesc_hold(act); 12688 12689 new->dted_action = ecb->dted_action; 12690 new->dted_pred = ecb->dted_pred; 12691 new->dted_probe = *pd; 12692 new->dted_uarg = ecb->dted_uarg; 12693 12694 dtrace_enabling_add(enab, new); 12695 } 12696 12697 static void 12698 dtrace_enabling_dump(dtrace_enabling_t *enab) 12699 { 12700 int i; 12701 12702 for (i = 0; i < enab->dten_ndesc; i++) { 12703 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 12704 12705 #ifdef __FreeBSD__ 12706 printf("dtrace: enabling probe %d (%s:%s:%s:%s)\n", i, 12707 desc->dtpd_provider, desc->dtpd_mod, 12708 desc->dtpd_func, desc->dtpd_name); 12709 #else 12710 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 12711 desc->dtpd_provider, desc->dtpd_mod, 12712 desc->dtpd_func, desc->dtpd_name); 12713 #endif 12714 } 12715 } 12716 12717 static void 12718 dtrace_enabling_destroy(dtrace_enabling_t *enab) 12719 { 12720 int i; 12721 dtrace_ecbdesc_t *ep; 12722 dtrace_vstate_t *vstate = enab->dten_vstate; 12723 12724 ASSERT(MUTEX_HELD(&dtrace_lock)); 12725 12726 for (i = 0; i < enab->dten_ndesc; i++) { 12727 dtrace_actdesc_t *act, *next; 12728 dtrace_predicate_t *pred; 12729 12730 ep = enab->dten_desc[i]; 12731 12732 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 12733 dtrace_predicate_release(pred, vstate); 12734 12735 for (act = ep->dted_action; act != NULL; act = next) { 12736 next = act->dtad_next; 12737 dtrace_actdesc_release(act, vstate); 12738 } 12739 12740 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12741 } 12742 12743 if (enab->dten_desc != NULL) 12744 kmem_free(enab->dten_desc, 12745 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 12746 12747 /* 12748 * If this was a retained enabling, decrement the dts_nretained count 12749 * and take it off of the dtrace_retained list. 12750 */ 12751 if (enab->dten_prev != NULL || enab->dten_next != NULL || 12752 dtrace_retained == enab) { 12753 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12754 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 12755 enab->dten_vstate->dtvs_state->dts_nretained--; 12756 dtrace_retained_gen++; 12757 } 12758 12759 if (enab->dten_prev == NULL) { 12760 if (dtrace_retained == enab) { 12761 dtrace_retained = enab->dten_next; 12762 12763 if (dtrace_retained != NULL) 12764 dtrace_retained->dten_prev = NULL; 12765 } 12766 } else { 12767 ASSERT(enab != dtrace_retained); 12768 ASSERT(dtrace_retained != NULL); 12769 enab->dten_prev->dten_next = enab->dten_next; 12770 } 12771 12772 if (enab->dten_next != NULL) { 12773 ASSERT(dtrace_retained != NULL); 12774 enab->dten_next->dten_prev = enab->dten_prev; 12775 } 12776 12777 kmem_free(enab, sizeof (dtrace_enabling_t)); 12778 } 12779 12780 static int 12781 dtrace_enabling_retain(dtrace_enabling_t *enab) 12782 { 12783 dtrace_state_t *state; 12784 12785 ASSERT(MUTEX_HELD(&dtrace_lock)); 12786 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 12787 ASSERT(enab->dten_vstate != NULL); 12788 12789 state = enab->dten_vstate->dtvs_state; 12790 ASSERT(state != NULL); 12791 12792 /* 12793 * We only allow each state to retain dtrace_retain_max enablings. 12794 */ 12795 if (state->dts_nretained >= dtrace_retain_max) 12796 return (ENOSPC); 12797 12798 state->dts_nretained++; 12799 dtrace_retained_gen++; 12800 12801 if (dtrace_retained == NULL) { 12802 dtrace_retained = enab; 12803 return (0); 12804 } 12805 12806 enab->dten_next = dtrace_retained; 12807 dtrace_retained->dten_prev = enab; 12808 dtrace_retained = enab; 12809 12810 return (0); 12811 } 12812 12813 static int 12814 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 12815 dtrace_probedesc_t *create) 12816 { 12817 dtrace_enabling_t *new, *enab; 12818 int found = 0, err = ENOENT; 12819 12820 ASSERT(MUTEX_HELD(&dtrace_lock)); 12821 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 12822 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 12823 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 12824 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 12825 12826 new = dtrace_enabling_create(&state->dts_vstate); 12827 12828 /* 12829 * Iterate over all retained enablings, looking for enablings that 12830 * match the specified state. 12831 */ 12832 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12833 int i; 12834 12835 /* 12836 * dtvs_state can only be NULL for helper enablings -- and 12837 * helper enablings can't be retained. 12838 */ 12839 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12840 12841 if (enab->dten_vstate->dtvs_state != state) 12842 continue; 12843 12844 /* 12845 * Now iterate over each probe description; we're looking for 12846 * an exact match to the specified probe description. 12847 */ 12848 for (i = 0; i < enab->dten_ndesc; i++) { 12849 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12850 dtrace_probedesc_t *pd = &ep->dted_probe; 12851 12852 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 12853 continue; 12854 12855 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 12856 continue; 12857 12858 if (strcmp(pd->dtpd_func, match->dtpd_func)) 12859 continue; 12860 12861 if (strcmp(pd->dtpd_name, match->dtpd_name)) 12862 continue; 12863 12864 /* 12865 * We have a winning probe! Add it to our growing 12866 * enabling. 12867 */ 12868 found = 1; 12869 dtrace_enabling_addlike(new, ep, create); 12870 } 12871 } 12872 12873 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 12874 dtrace_enabling_destroy(new); 12875 return (err); 12876 } 12877 12878 return (0); 12879 } 12880 12881 static void 12882 dtrace_enabling_retract(dtrace_state_t *state) 12883 { 12884 dtrace_enabling_t *enab, *next; 12885 12886 ASSERT(MUTEX_HELD(&dtrace_lock)); 12887 12888 /* 12889 * Iterate over all retained enablings, destroy the enablings retained 12890 * for the specified state. 12891 */ 12892 for (enab = dtrace_retained; enab != NULL; enab = next) { 12893 next = enab->dten_next; 12894 12895 /* 12896 * dtvs_state can only be NULL for helper enablings -- and 12897 * helper enablings can't be retained. 12898 */ 12899 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12900 12901 if (enab->dten_vstate->dtvs_state == state) { 12902 ASSERT(state->dts_nretained > 0); 12903 dtrace_enabling_destroy(enab); 12904 } 12905 } 12906 12907 ASSERT(state->dts_nretained == 0); 12908 } 12909 12910 static int 12911 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 12912 { 12913 int i = 0; 12914 int matched = 0; 12915 12916 ASSERT(MUTEX_HELD(&cpu_lock)); 12917 ASSERT(MUTEX_HELD(&dtrace_lock)); 12918 12919 for (i = 0; i < enab->dten_ndesc; i++) { 12920 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12921 12922 enab->dten_current = ep; 12923 enab->dten_error = 0; 12924 12925 matched += dtrace_probe_enable(&ep->dted_probe, enab); 12926 12927 if (enab->dten_error != 0) { 12928 /* 12929 * If we get an error half-way through enabling the 12930 * probes, we kick out -- perhaps with some number of 12931 * them enabled. Leaving enabled probes enabled may 12932 * be slightly confusing for user-level, but we expect 12933 * that no one will attempt to actually drive on in 12934 * the face of such errors. If this is an anonymous 12935 * enabling (indicated with a NULL nmatched pointer), 12936 * we cmn_err() a message. We aren't expecting to 12937 * get such an error -- such as it can exist at all, 12938 * it would be a result of corrupted DOF in the driver 12939 * properties. 12940 */ 12941 if (nmatched == NULL) { 12942 cmn_err(CE_WARN, "dtrace_enabling_match() " 12943 "error on %p: %d", (void *)ep, 12944 enab->dten_error); 12945 } 12946 12947 return (enab->dten_error); 12948 } 12949 } 12950 12951 enab->dten_probegen = dtrace_probegen; 12952 if (nmatched != NULL) 12953 *nmatched = matched; 12954 12955 return (0); 12956 } 12957 12958 static void 12959 dtrace_enabling_matchall(void) 12960 { 12961 dtrace_enabling_t *enab; 12962 12963 mutex_enter(&cpu_lock); 12964 mutex_enter(&dtrace_lock); 12965 12966 /* 12967 * Iterate over all retained enablings to see if any probes match 12968 * against them. We only perform this operation on enablings for which 12969 * we have sufficient permissions by virtue of being in the global zone 12970 * or in the same zone as the DTrace client. Because we can be called 12971 * after dtrace_detach() has been called, we cannot assert that there 12972 * are retained enablings. We can safely load from dtrace_retained, 12973 * however: the taskq_destroy() at the end of dtrace_detach() will 12974 * block pending our completion. 12975 */ 12976 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12977 #ifdef illumos 12978 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 12979 12980 if (INGLOBALZONE(curproc) || 12981 cr != NULL && getzoneid() == crgetzoneid(cr)) 12982 #endif 12983 (void) dtrace_enabling_match(enab, NULL); 12984 } 12985 12986 mutex_exit(&dtrace_lock); 12987 mutex_exit(&cpu_lock); 12988 } 12989 12990 /* 12991 * If an enabling is to be enabled without having matched probes (that is, if 12992 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 12993 * enabling must be _primed_ by creating an ECB for every ECB description. 12994 * This must be done to assure that we know the number of speculations, the 12995 * number of aggregations, the minimum buffer size needed, etc. before we 12996 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 12997 * enabling any probes, we create ECBs for every ECB decription, but with a 12998 * NULL probe -- which is exactly what this function does. 12999 */ 13000 static void 13001 dtrace_enabling_prime(dtrace_state_t *state) 13002 { 13003 dtrace_enabling_t *enab; 13004 int i; 13005 13006 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 13007 ASSERT(enab->dten_vstate->dtvs_state != NULL); 13008 13009 if (enab->dten_vstate->dtvs_state != state) 13010 continue; 13011 13012 /* 13013 * We don't want to prime an enabling more than once, lest 13014 * we allow a malicious user to induce resource exhaustion. 13015 * (The ECBs that result from priming an enabling aren't 13016 * leaked -- but they also aren't deallocated until the 13017 * consumer state is destroyed.) 13018 */ 13019 if (enab->dten_primed) 13020 continue; 13021 13022 for (i = 0; i < enab->dten_ndesc; i++) { 13023 enab->dten_current = enab->dten_desc[i]; 13024 (void) dtrace_probe_enable(NULL, enab); 13025 } 13026 13027 enab->dten_primed = 1; 13028 } 13029 } 13030 13031 /* 13032 * Called to indicate that probes should be provided due to retained 13033 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 13034 * must take an initial lap through the enabling calling the dtps_provide() 13035 * entry point explicitly to allow for autocreated probes. 13036 */ 13037 static void 13038 dtrace_enabling_provide(dtrace_provider_t *prv) 13039 { 13040 int i, all = 0; 13041 dtrace_probedesc_t desc; 13042 dtrace_genid_t gen; 13043 13044 ASSERT(MUTEX_HELD(&dtrace_lock)); 13045 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 13046 13047 if (prv == NULL) { 13048 all = 1; 13049 prv = dtrace_provider; 13050 } 13051 13052 do { 13053 dtrace_enabling_t *enab; 13054 void *parg = prv->dtpv_arg; 13055 13056 retry: 13057 gen = dtrace_retained_gen; 13058 for (enab = dtrace_retained; enab != NULL; 13059 enab = enab->dten_next) { 13060 for (i = 0; i < enab->dten_ndesc; i++) { 13061 desc = enab->dten_desc[i]->dted_probe; 13062 mutex_exit(&dtrace_lock); 13063 prv->dtpv_pops.dtps_provide(parg, &desc); 13064 mutex_enter(&dtrace_lock); 13065 /* 13066 * Process the retained enablings again if 13067 * they have changed while we weren't holding 13068 * dtrace_lock. 13069 */ 13070 if (gen != dtrace_retained_gen) 13071 goto retry; 13072 } 13073 } 13074 } while (all && (prv = prv->dtpv_next) != NULL); 13075 13076 mutex_exit(&dtrace_lock); 13077 dtrace_probe_provide(NULL, all ? NULL : prv); 13078 mutex_enter(&dtrace_lock); 13079 } 13080 13081 /* 13082 * Called to reap ECBs that are attached to probes from defunct providers. 13083 */ 13084 static void 13085 dtrace_enabling_reap(void) 13086 { 13087 dtrace_provider_t *prov; 13088 dtrace_probe_t *probe; 13089 dtrace_ecb_t *ecb; 13090 hrtime_t when; 13091 int i; 13092 13093 mutex_enter(&cpu_lock); 13094 mutex_enter(&dtrace_lock); 13095 13096 for (i = 0; i < dtrace_nprobes; i++) { 13097 if ((probe = dtrace_probes[i]) == NULL) 13098 continue; 13099 13100 if (probe->dtpr_ecb == NULL) 13101 continue; 13102 13103 prov = probe->dtpr_provider; 13104 13105 if ((when = prov->dtpv_defunct) == 0) 13106 continue; 13107 13108 /* 13109 * We have ECBs on a defunct provider: we want to reap these 13110 * ECBs to allow the provider to unregister. The destruction 13111 * of these ECBs must be done carefully: if we destroy the ECB 13112 * and the consumer later wishes to consume an EPID that 13113 * corresponds to the destroyed ECB (and if the EPID metadata 13114 * has not been previously consumed), the consumer will abort 13115 * processing on the unknown EPID. To reduce (but not, sadly, 13116 * eliminate) the possibility of this, we will only destroy an 13117 * ECB for a defunct provider if, for the state that 13118 * corresponds to the ECB: 13119 * 13120 * (a) There is no speculative tracing (which can effectively 13121 * cache an EPID for an arbitrary amount of time). 13122 * 13123 * (b) The principal buffers have been switched twice since the 13124 * provider became defunct. 13125 * 13126 * (c) The aggregation buffers are of zero size or have been 13127 * switched twice since the provider became defunct. 13128 * 13129 * We use dts_speculates to determine (a) and call a function 13130 * (dtrace_buffer_consumed()) to determine (b) and (c). Note 13131 * that as soon as we've been unable to destroy one of the ECBs 13132 * associated with the probe, we quit trying -- reaping is only 13133 * fruitful in as much as we can destroy all ECBs associated 13134 * with the defunct provider's probes. 13135 */ 13136 while ((ecb = probe->dtpr_ecb) != NULL) { 13137 dtrace_state_t *state = ecb->dte_state; 13138 dtrace_buffer_t *buf = state->dts_buffer; 13139 dtrace_buffer_t *aggbuf = state->dts_aggbuffer; 13140 13141 if (state->dts_speculates) 13142 break; 13143 13144 if (!dtrace_buffer_consumed(buf, when)) 13145 break; 13146 13147 if (!dtrace_buffer_consumed(aggbuf, when)) 13148 break; 13149 13150 dtrace_ecb_disable(ecb); 13151 ASSERT(probe->dtpr_ecb != ecb); 13152 dtrace_ecb_destroy(ecb); 13153 } 13154 } 13155 13156 mutex_exit(&dtrace_lock); 13157 mutex_exit(&cpu_lock); 13158 } 13159 13160 /* 13161 * DTrace DOF Functions 13162 */ 13163 /*ARGSUSED*/ 13164 static void 13165 dtrace_dof_error(dof_hdr_t *dof, const char *str) 13166 { 13167 if (dtrace_err_verbose) 13168 cmn_err(CE_WARN, "failed to process DOF: %s", str); 13169 13170 #ifdef DTRACE_ERRDEBUG 13171 dtrace_errdebug(str); 13172 #endif 13173 } 13174 13175 /* 13176 * Create DOF out of a currently enabled state. Right now, we only create 13177 * DOF containing the run-time options -- but this could be expanded to create 13178 * complete DOF representing the enabled state. 13179 */ 13180 static dof_hdr_t * 13181 dtrace_dof_create(dtrace_state_t *state) 13182 { 13183 dof_hdr_t *dof; 13184 dof_sec_t *sec; 13185 dof_optdesc_t *opt; 13186 int i, len = sizeof (dof_hdr_t) + 13187 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 13188 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 13189 13190 ASSERT(MUTEX_HELD(&dtrace_lock)); 13191 13192 dof = kmem_zalloc(len, KM_SLEEP); 13193 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 13194 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 13195 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 13196 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 13197 13198 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 13199 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 13200 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 13201 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 13202 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 13203 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 13204 13205 dof->dofh_flags = 0; 13206 dof->dofh_hdrsize = sizeof (dof_hdr_t); 13207 dof->dofh_secsize = sizeof (dof_sec_t); 13208 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 13209 dof->dofh_secoff = sizeof (dof_hdr_t); 13210 dof->dofh_loadsz = len; 13211 dof->dofh_filesz = len; 13212 dof->dofh_pad = 0; 13213 13214 /* 13215 * Fill in the option section header... 13216 */ 13217 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 13218 sec->dofs_type = DOF_SECT_OPTDESC; 13219 sec->dofs_align = sizeof (uint64_t); 13220 sec->dofs_flags = DOF_SECF_LOAD; 13221 sec->dofs_entsize = sizeof (dof_optdesc_t); 13222 13223 opt = (dof_optdesc_t *)((uintptr_t)sec + 13224 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 13225 13226 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 13227 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 13228 13229 for (i = 0; i < DTRACEOPT_MAX; i++) { 13230 opt[i].dofo_option = i; 13231 opt[i].dofo_strtab = DOF_SECIDX_NONE; 13232 opt[i].dofo_value = state->dts_options[i]; 13233 } 13234 13235 return (dof); 13236 } 13237 13238 static dof_hdr_t * 13239 dtrace_dof_copyin(uintptr_t uarg, int *errp) 13240 { 13241 dof_hdr_t hdr, *dof; 13242 13243 ASSERT(!MUTEX_HELD(&dtrace_lock)); 13244 13245 /* 13246 * First, we're going to copyin() the sizeof (dof_hdr_t). 13247 */ 13248 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 13249 dtrace_dof_error(NULL, "failed to copyin DOF header"); 13250 *errp = EFAULT; 13251 return (NULL); 13252 } 13253 13254 /* 13255 * Now we'll allocate the entire DOF and copy it in -- provided 13256 * that the length isn't outrageous. 13257 */ 13258 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 13259 dtrace_dof_error(&hdr, "load size exceeds maximum"); 13260 *errp = E2BIG; 13261 return (NULL); 13262 } 13263 13264 if (hdr.dofh_loadsz < sizeof (hdr)) { 13265 dtrace_dof_error(&hdr, "invalid load size"); 13266 *errp = EINVAL; 13267 return (NULL); 13268 } 13269 13270 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 13271 13272 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 || 13273 dof->dofh_loadsz != hdr.dofh_loadsz) { 13274 kmem_free(dof, hdr.dofh_loadsz); 13275 *errp = EFAULT; 13276 return (NULL); 13277 } 13278 13279 return (dof); 13280 } 13281 13282 #ifdef __FreeBSD__ 13283 static dof_hdr_t * 13284 dtrace_dof_copyin_proc(struct proc *p, uintptr_t uarg, int *errp) 13285 { 13286 dof_hdr_t hdr, *dof; 13287 struct thread *td; 13288 size_t loadsz; 13289 13290 ASSERT(!MUTEX_HELD(&dtrace_lock)); 13291 13292 td = curthread; 13293 13294 /* 13295 * First, we're going to copyin() the sizeof (dof_hdr_t). 13296 */ 13297 if (proc_readmem(td, p, uarg, &hdr, sizeof(hdr)) != sizeof(hdr)) { 13298 dtrace_dof_error(NULL, "failed to copyin DOF header"); 13299 *errp = EFAULT; 13300 return (NULL); 13301 } 13302 13303 /* 13304 * Now we'll allocate the entire DOF and copy it in -- provided 13305 * that the length isn't outrageous. 13306 */ 13307 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 13308 dtrace_dof_error(&hdr, "load size exceeds maximum"); 13309 *errp = E2BIG; 13310 return (NULL); 13311 } 13312 loadsz = (size_t)hdr.dofh_loadsz; 13313 13314 if (loadsz < sizeof (hdr)) { 13315 dtrace_dof_error(&hdr, "invalid load size"); 13316 *errp = EINVAL; 13317 return (NULL); 13318 } 13319 13320 dof = kmem_alloc(loadsz, KM_SLEEP); 13321 13322 if (proc_readmem(td, p, uarg, dof, loadsz) != loadsz || 13323 dof->dofh_loadsz != loadsz) { 13324 kmem_free(dof, hdr.dofh_loadsz); 13325 *errp = EFAULT; 13326 return (NULL); 13327 } 13328 13329 return (dof); 13330 } 13331 13332 static __inline uchar_t 13333 dtrace_dof_char(char c) 13334 { 13335 13336 switch (c) { 13337 case '0': 13338 case '1': 13339 case '2': 13340 case '3': 13341 case '4': 13342 case '5': 13343 case '6': 13344 case '7': 13345 case '8': 13346 case '9': 13347 return (c - '0'); 13348 case 'A': 13349 case 'B': 13350 case 'C': 13351 case 'D': 13352 case 'E': 13353 case 'F': 13354 return (c - 'A' + 10); 13355 case 'a': 13356 case 'b': 13357 case 'c': 13358 case 'd': 13359 case 'e': 13360 case 'f': 13361 return (c - 'a' + 10); 13362 } 13363 /* Should not reach here. */ 13364 return (UCHAR_MAX); 13365 } 13366 #endif /* __FreeBSD__ */ 13367 13368 static dof_hdr_t * 13369 dtrace_dof_property(const char *name) 13370 { 13371 #ifdef __FreeBSD__ 13372 uint8_t *dofbuf; 13373 u_char *data, *eol; 13374 caddr_t doffile; 13375 size_t bytes, len, i; 13376 dof_hdr_t *dof; 13377 u_char c1, c2; 13378 13379 dof = NULL; 13380 13381 doffile = preload_search_by_type("dtrace_dof"); 13382 if (doffile == NULL) 13383 return (NULL); 13384 13385 data = preload_fetch_addr(doffile); 13386 len = preload_fetch_size(doffile); 13387 for (;;) { 13388 /* Look for the end of the line. All lines end in a newline. */ 13389 eol = memchr(data, '\n', len); 13390 if (eol == NULL) 13391 return (NULL); 13392 13393 if (strncmp(name, data, strlen(name)) == 0) 13394 break; 13395 13396 eol++; /* skip past the newline */ 13397 len -= eol - data; 13398 data = eol; 13399 } 13400 13401 /* We've found the data corresponding to the specified key. */ 13402 13403 data += strlen(name) + 1; /* skip past the '=' */ 13404 len = eol - data; 13405 if (len % 2 != 0) { 13406 dtrace_dof_error(NULL, "invalid DOF encoding length"); 13407 goto doferr; 13408 } 13409 bytes = len / 2; 13410 if (bytes < sizeof(dof_hdr_t)) { 13411 dtrace_dof_error(NULL, "truncated header"); 13412 goto doferr; 13413 } 13414 13415 /* 13416 * Each byte is represented by the two ASCII characters in its hex 13417 * representation. 13418 */ 13419 dofbuf = malloc(bytes, M_SOLARIS, M_WAITOK); 13420 for (i = 0; i < bytes; i++) { 13421 c1 = dtrace_dof_char(data[i * 2]); 13422 c2 = dtrace_dof_char(data[i * 2 + 1]); 13423 if (c1 == UCHAR_MAX || c2 == UCHAR_MAX) { 13424 dtrace_dof_error(NULL, "invalid hex char in DOF"); 13425 goto doferr; 13426 } 13427 dofbuf[i] = c1 * 16 + c2; 13428 } 13429 13430 dof = (dof_hdr_t *)dofbuf; 13431 if (bytes < dof->dofh_loadsz) { 13432 dtrace_dof_error(NULL, "truncated DOF"); 13433 goto doferr; 13434 } 13435 13436 if (dof->dofh_loadsz >= dtrace_dof_maxsize) { 13437 dtrace_dof_error(NULL, "oversized DOF"); 13438 goto doferr; 13439 } 13440 13441 return (dof); 13442 13443 doferr: 13444 free(dof, M_SOLARIS); 13445 return (NULL); 13446 #else /* __FreeBSD__ */ 13447 uchar_t *buf; 13448 uint64_t loadsz; 13449 unsigned int len, i; 13450 dof_hdr_t *dof; 13451 13452 /* 13453 * Unfortunately, array of values in .conf files are always (and 13454 * only) interpreted to be integer arrays. We must read our DOF 13455 * as an integer array, and then squeeze it into a byte array. 13456 */ 13457 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 13458 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 13459 return (NULL); 13460 13461 for (i = 0; i < len; i++) 13462 buf[i] = (uchar_t)(((int *)buf)[i]); 13463 13464 if (len < sizeof (dof_hdr_t)) { 13465 ddi_prop_free(buf); 13466 dtrace_dof_error(NULL, "truncated header"); 13467 return (NULL); 13468 } 13469 13470 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 13471 ddi_prop_free(buf); 13472 dtrace_dof_error(NULL, "truncated DOF"); 13473 return (NULL); 13474 } 13475 13476 if (loadsz >= dtrace_dof_maxsize) { 13477 ddi_prop_free(buf); 13478 dtrace_dof_error(NULL, "oversized DOF"); 13479 return (NULL); 13480 } 13481 13482 dof = kmem_alloc(loadsz, KM_SLEEP); 13483 bcopy(buf, dof, loadsz); 13484 ddi_prop_free(buf); 13485 13486 return (dof); 13487 #endif /* !__FreeBSD__ */ 13488 } 13489 13490 static void 13491 dtrace_dof_destroy(dof_hdr_t *dof) 13492 { 13493 kmem_free(dof, dof->dofh_loadsz); 13494 } 13495 13496 /* 13497 * Return the dof_sec_t pointer corresponding to a given section index. If the 13498 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 13499 * a type other than DOF_SECT_NONE is specified, the header is checked against 13500 * this type and NULL is returned if the types do not match. 13501 */ 13502 static dof_sec_t * 13503 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 13504 { 13505 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 13506 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 13507 13508 if (i >= dof->dofh_secnum) { 13509 dtrace_dof_error(dof, "referenced section index is invalid"); 13510 return (NULL); 13511 } 13512 13513 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 13514 dtrace_dof_error(dof, "referenced section is not loadable"); 13515 return (NULL); 13516 } 13517 13518 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 13519 dtrace_dof_error(dof, "referenced section is the wrong type"); 13520 return (NULL); 13521 } 13522 13523 return (sec); 13524 } 13525 13526 static dtrace_probedesc_t * 13527 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 13528 { 13529 dof_probedesc_t *probe; 13530 dof_sec_t *strtab; 13531 uintptr_t daddr = (uintptr_t)dof; 13532 uintptr_t str; 13533 size_t size; 13534 13535 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 13536 dtrace_dof_error(dof, "invalid probe section"); 13537 return (NULL); 13538 } 13539 13540 if (sec->dofs_align != sizeof (dof_secidx_t)) { 13541 dtrace_dof_error(dof, "bad alignment in probe description"); 13542 return (NULL); 13543 } 13544 13545 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 13546 dtrace_dof_error(dof, "truncated probe description"); 13547 return (NULL); 13548 } 13549 13550 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 13551 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 13552 13553 if (strtab == NULL) 13554 return (NULL); 13555 13556 str = daddr + strtab->dofs_offset; 13557 size = strtab->dofs_size; 13558 13559 if (probe->dofp_provider >= strtab->dofs_size) { 13560 dtrace_dof_error(dof, "corrupt probe provider"); 13561 return (NULL); 13562 } 13563 13564 (void) strncpy(desc->dtpd_provider, 13565 (char *)(str + probe->dofp_provider), 13566 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 13567 13568 if (probe->dofp_mod >= strtab->dofs_size) { 13569 dtrace_dof_error(dof, "corrupt probe module"); 13570 return (NULL); 13571 } 13572 13573 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 13574 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 13575 13576 if (probe->dofp_func >= strtab->dofs_size) { 13577 dtrace_dof_error(dof, "corrupt probe function"); 13578 return (NULL); 13579 } 13580 13581 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 13582 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 13583 13584 if (probe->dofp_name >= strtab->dofs_size) { 13585 dtrace_dof_error(dof, "corrupt probe name"); 13586 return (NULL); 13587 } 13588 13589 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 13590 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 13591 13592 return (desc); 13593 } 13594 13595 static dtrace_difo_t * 13596 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13597 cred_t *cr) 13598 { 13599 dtrace_difo_t *dp; 13600 size_t ttl = 0; 13601 dof_difohdr_t *dofd; 13602 uintptr_t daddr = (uintptr_t)dof; 13603 size_t max = dtrace_difo_maxsize; 13604 int i, l, n; 13605 13606 static const struct { 13607 int section; 13608 int bufoffs; 13609 int lenoffs; 13610 int entsize; 13611 int align; 13612 const char *msg; 13613 } difo[] = { 13614 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 13615 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 13616 sizeof (dif_instr_t), "multiple DIF sections" }, 13617 13618 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 13619 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 13620 sizeof (uint64_t), "multiple integer tables" }, 13621 13622 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 13623 offsetof(dtrace_difo_t, dtdo_strlen), 0, 13624 sizeof (char), "multiple string tables" }, 13625 13626 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 13627 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 13628 sizeof (uint_t), "multiple variable tables" }, 13629 13630 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 13631 }; 13632 13633 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 13634 dtrace_dof_error(dof, "invalid DIFO header section"); 13635 return (NULL); 13636 } 13637 13638 if (sec->dofs_align != sizeof (dof_secidx_t)) { 13639 dtrace_dof_error(dof, "bad alignment in DIFO header"); 13640 return (NULL); 13641 } 13642 13643 if (sec->dofs_size < sizeof (dof_difohdr_t) || 13644 sec->dofs_size % sizeof (dof_secidx_t)) { 13645 dtrace_dof_error(dof, "bad size in DIFO header"); 13646 return (NULL); 13647 } 13648 13649 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 13650 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 13651 13652 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 13653 dp->dtdo_rtype = dofd->dofd_rtype; 13654 13655 for (l = 0; l < n; l++) { 13656 dof_sec_t *subsec; 13657 void **bufp; 13658 uint32_t *lenp; 13659 13660 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 13661 dofd->dofd_links[l])) == NULL) 13662 goto err; /* invalid section link */ 13663 13664 if (ttl + subsec->dofs_size > max) { 13665 dtrace_dof_error(dof, "exceeds maximum size"); 13666 goto err; 13667 } 13668 13669 ttl += subsec->dofs_size; 13670 13671 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 13672 if (subsec->dofs_type != difo[i].section) 13673 continue; 13674 13675 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 13676 dtrace_dof_error(dof, "section not loaded"); 13677 goto err; 13678 } 13679 13680 if (subsec->dofs_align != difo[i].align) { 13681 dtrace_dof_error(dof, "bad alignment"); 13682 goto err; 13683 } 13684 13685 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 13686 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 13687 13688 if (*bufp != NULL) { 13689 dtrace_dof_error(dof, difo[i].msg); 13690 goto err; 13691 } 13692 13693 if (difo[i].entsize != subsec->dofs_entsize) { 13694 dtrace_dof_error(dof, "entry size mismatch"); 13695 goto err; 13696 } 13697 13698 if (subsec->dofs_entsize != 0 && 13699 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 13700 dtrace_dof_error(dof, "corrupt entry size"); 13701 goto err; 13702 } 13703 13704 *lenp = subsec->dofs_size; 13705 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 13706 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 13707 *bufp, subsec->dofs_size); 13708 13709 if (subsec->dofs_entsize != 0) 13710 *lenp /= subsec->dofs_entsize; 13711 13712 break; 13713 } 13714 13715 /* 13716 * If we encounter a loadable DIFO sub-section that is not 13717 * known to us, assume this is a broken program and fail. 13718 */ 13719 if (difo[i].section == DOF_SECT_NONE && 13720 (subsec->dofs_flags & DOF_SECF_LOAD)) { 13721 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 13722 goto err; 13723 } 13724 } 13725 13726 if (dp->dtdo_buf == NULL) { 13727 /* 13728 * We can't have a DIF object without DIF text. 13729 */ 13730 dtrace_dof_error(dof, "missing DIF text"); 13731 goto err; 13732 } 13733 13734 /* 13735 * Before we validate the DIF object, run through the variable table 13736 * looking for the strings -- if any of their size are under, we'll set 13737 * their size to be the system-wide default string size. Note that 13738 * this should _not_ happen if the "strsize" option has been set -- 13739 * in this case, the compiler should have set the size to reflect the 13740 * setting of the option. 13741 */ 13742 for (i = 0; i < dp->dtdo_varlen; i++) { 13743 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 13744 dtrace_diftype_t *t = &v->dtdv_type; 13745 13746 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 13747 continue; 13748 13749 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 13750 t->dtdt_size = dtrace_strsize_default; 13751 } 13752 13753 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 13754 goto err; 13755 13756 dtrace_difo_init(dp, vstate); 13757 return (dp); 13758 13759 err: 13760 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 13761 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 13762 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 13763 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 13764 13765 kmem_free(dp, sizeof (dtrace_difo_t)); 13766 return (NULL); 13767 } 13768 13769 static dtrace_predicate_t * 13770 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13771 cred_t *cr) 13772 { 13773 dtrace_difo_t *dp; 13774 13775 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 13776 return (NULL); 13777 13778 return (dtrace_predicate_create(dp)); 13779 } 13780 13781 static dtrace_actdesc_t * 13782 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13783 cred_t *cr) 13784 { 13785 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 13786 dof_actdesc_t *desc; 13787 dof_sec_t *difosec; 13788 size_t offs; 13789 uintptr_t daddr = (uintptr_t)dof; 13790 uint64_t arg; 13791 dtrace_actkind_t kind; 13792 13793 if (sec->dofs_type != DOF_SECT_ACTDESC) { 13794 dtrace_dof_error(dof, "invalid action section"); 13795 return (NULL); 13796 } 13797 13798 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 13799 dtrace_dof_error(dof, "truncated action description"); 13800 return (NULL); 13801 } 13802 13803 if (sec->dofs_align != sizeof (uint64_t)) { 13804 dtrace_dof_error(dof, "bad alignment in action description"); 13805 return (NULL); 13806 } 13807 13808 if (sec->dofs_size < sec->dofs_entsize) { 13809 dtrace_dof_error(dof, "section entry size exceeds total size"); 13810 return (NULL); 13811 } 13812 13813 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 13814 dtrace_dof_error(dof, "bad entry size in action description"); 13815 return (NULL); 13816 } 13817 13818 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 13819 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 13820 return (NULL); 13821 } 13822 13823 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 13824 desc = (dof_actdesc_t *)(daddr + 13825 (uintptr_t)sec->dofs_offset + offs); 13826 kind = (dtrace_actkind_t)desc->dofa_kind; 13827 13828 if ((DTRACEACT_ISPRINTFLIKE(kind) && 13829 (kind != DTRACEACT_PRINTA || 13830 desc->dofa_strtab != DOF_SECIDX_NONE)) || 13831 (kind == DTRACEACT_DIFEXPR && 13832 desc->dofa_strtab != DOF_SECIDX_NONE)) { 13833 dof_sec_t *strtab; 13834 char *str, *fmt; 13835 uint64_t i; 13836 13837 /* 13838 * The argument to these actions is an index into the 13839 * DOF string table. For printf()-like actions, this 13840 * is the format string. For print(), this is the 13841 * CTF type of the expression result. 13842 */ 13843 if ((strtab = dtrace_dof_sect(dof, 13844 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 13845 goto err; 13846 13847 str = (char *)((uintptr_t)dof + 13848 (uintptr_t)strtab->dofs_offset); 13849 13850 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 13851 if (str[i] == '\0') 13852 break; 13853 } 13854 13855 if (i >= strtab->dofs_size) { 13856 dtrace_dof_error(dof, "bogus format string"); 13857 goto err; 13858 } 13859 13860 if (i == desc->dofa_arg) { 13861 dtrace_dof_error(dof, "empty format string"); 13862 goto err; 13863 } 13864 13865 i -= desc->dofa_arg; 13866 fmt = kmem_alloc(i + 1, KM_SLEEP); 13867 bcopy(&str[desc->dofa_arg], fmt, i + 1); 13868 arg = (uint64_t)(uintptr_t)fmt; 13869 } else { 13870 if (kind == DTRACEACT_PRINTA) { 13871 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 13872 arg = 0; 13873 } else { 13874 arg = desc->dofa_arg; 13875 } 13876 } 13877 13878 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 13879 desc->dofa_uarg, arg); 13880 13881 if (last != NULL) { 13882 last->dtad_next = act; 13883 } else { 13884 first = act; 13885 } 13886 13887 last = act; 13888 13889 if (desc->dofa_difo == DOF_SECIDX_NONE) 13890 continue; 13891 13892 if ((difosec = dtrace_dof_sect(dof, 13893 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 13894 goto err; 13895 13896 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 13897 13898 if (act->dtad_difo == NULL) 13899 goto err; 13900 } 13901 13902 ASSERT(first != NULL); 13903 return (first); 13904 13905 err: 13906 for (act = first; act != NULL; act = next) { 13907 next = act->dtad_next; 13908 dtrace_actdesc_release(act, vstate); 13909 } 13910 13911 return (NULL); 13912 } 13913 13914 static dtrace_ecbdesc_t * 13915 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13916 cred_t *cr) 13917 { 13918 dtrace_ecbdesc_t *ep; 13919 dof_ecbdesc_t *ecb; 13920 dtrace_probedesc_t *desc; 13921 dtrace_predicate_t *pred = NULL; 13922 13923 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 13924 dtrace_dof_error(dof, "truncated ECB description"); 13925 return (NULL); 13926 } 13927 13928 if (sec->dofs_align != sizeof (uint64_t)) { 13929 dtrace_dof_error(dof, "bad alignment in ECB description"); 13930 return (NULL); 13931 } 13932 13933 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 13934 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 13935 13936 if (sec == NULL) 13937 return (NULL); 13938 13939 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 13940 ep->dted_uarg = ecb->dofe_uarg; 13941 desc = &ep->dted_probe; 13942 13943 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 13944 goto err; 13945 13946 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 13947 if ((sec = dtrace_dof_sect(dof, 13948 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 13949 goto err; 13950 13951 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 13952 goto err; 13953 13954 ep->dted_pred.dtpdd_predicate = pred; 13955 } 13956 13957 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 13958 if ((sec = dtrace_dof_sect(dof, 13959 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 13960 goto err; 13961 13962 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 13963 13964 if (ep->dted_action == NULL) 13965 goto err; 13966 } 13967 13968 return (ep); 13969 13970 err: 13971 if (pred != NULL) 13972 dtrace_predicate_release(pred, vstate); 13973 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 13974 return (NULL); 13975 } 13976 13977 /* 13978 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 13979 * specified DOF. SETX relocations are computed using 'ubase', the base load 13980 * address of the object containing the DOF, and DOFREL relocations are relative 13981 * to the relocation offset within the DOF. 13982 */ 13983 static int 13984 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase, 13985 uint64_t udaddr) 13986 { 13987 uintptr_t daddr = (uintptr_t)dof; 13988 uintptr_t ts_end; 13989 dof_relohdr_t *dofr = 13990 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 13991 dof_sec_t *ss, *rs, *ts; 13992 dof_relodesc_t *r; 13993 uint_t i, n; 13994 13995 if (sec->dofs_size < sizeof (dof_relohdr_t) || 13996 sec->dofs_align != sizeof (dof_secidx_t)) { 13997 dtrace_dof_error(dof, "invalid relocation header"); 13998 return (-1); 13999 } 14000 14001 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 14002 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 14003 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 14004 ts_end = (uintptr_t)ts + sizeof (dof_sec_t); 14005 14006 if (ss == NULL || rs == NULL || ts == NULL) 14007 return (-1); /* dtrace_dof_error() has been called already */ 14008 14009 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 14010 rs->dofs_align != sizeof (uint64_t)) { 14011 dtrace_dof_error(dof, "invalid relocation section"); 14012 return (-1); 14013 } 14014 14015 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 14016 n = rs->dofs_size / rs->dofs_entsize; 14017 14018 for (i = 0; i < n; i++) { 14019 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 14020 14021 switch (r->dofr_type) { 14022 case DOF_RELO_NONE: 14023 break; 14024 case DOF_RELO_SETX: 14025 case DOF_RELO_DOFREL: 14026 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 14027 sizeof (uint64_t) > ts->dofs_size) { 14028 dtrace_dof_error(dof, "bad relocation offset"); 14029 return (-1); 14030 } 14031 14032 if (taddr >= (uintptr_t)ts && taddr < ts_end) { 14033 dtrace_dof_error(dof, "bad relocation offset"); 14034 return (-1); 14035 } 14036 14037 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 14038 dtrace_dof_error(dof, "misaligned setx relo"); 14039 return (-1); 14040 } 14041 14042 if (r->dofr_type == DOF_RELO_SETX) 14043 *(uint64_t *)taddr += ubase; 14044 else 14045 *(uint64_t *)taddr += 14046 udaddr + ts->dofs_offset + r->dofr_offset; 14047 break; 14048 default: 14049 dtrace_dof_error(dof, "invalid relocation type"); 14050 return (-1); 14051 } 14052 14053 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 14054 } 14055 14056 return (0); 14057 } 14058 14059 /* 14060 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 14061 * header: it should be at the front of a memory region that is at least 14062 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 14063 * size. It need not be validated in any other way. 14064 */ 14065 static int 14066 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 14067 dtrace_enabling_t **enabp, uint64_t ubase, uint64_t udaddr, int noprobes) 14068 { 14069 uint64_t len = dof->dofh_loadsz, seclen; 14070 uintptr_t daddr = (uintptr_t)dof; 14071 dtrace_ecbdesc_t *ep; 14072 dtrace_enabling_t *enab; 14073 uint_t i; 14074 14075 ASSERT(MUTEX_HELD(&dtrace_lock)); 14076 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 14077 14078 /* 14079 * Check the DOF header identification bytes. In addition to checking 14080 * valid settings, we also verify that unused bits/bytes are zeroed so 14081 * we can use them later without fear of regressing existing binaries. 14082 */ 14083 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 14084 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 14085 dtrace_dof_error(dof, "DOF magic string mismatch"); 14086 return (-1); 14087 } 14088 14089 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 14090 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 14091 dtrace_dof_error(dof, "DOF has invalid data model"); 14092 return (-1); 14093 } 14094 14095 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 14096 dtrace_dof_error(dof, "DOF encoding mismatch"); 14097 return (-1); 14098 } 14099 14100 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14101 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 14102 dtrace_dof_error(dof, "DOF version mismatch"); 14103 return (-1); 14104 } 14105 14106 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 14107 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 14108 return (-1); 14109 } 14110 14111 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 14112 dtrace_dof_error(dof, "DOF uses too many integer registers"); 14113 return (-1); 14114 } 14115 14116 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 14117 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 14118 return (-1); 14119 } 14120 14121 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 14122 if (dof->dofh_ident[i] != 0) { 14123 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 14124 return (-1); 14125 } 14126 } 14127 14128 if (dof->dofh_flags & ~DOF_FL_VALID) { 14129 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 14130 return (-1); 14131 } 14132 14133 if (dof->dofh_secsize == 0) { 14134 dtrace_dof_error(dof, "zero section header size"); 14135 return (-1); 14136 } 14137 14138 /* 14139 * Check that the section headers don't exceed the amount of DOF 14140 * data. Note that we cast the section size and number of sections 14141 * to uint64_t's to prevent possible overflow in the multiplication. 14142 */ 14143 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 14144 14145 if (dof->dofh_secoff > len || seclen > len || 14146 dof->dofh_secoff + seclen > len) { 14147 dtrace_dof_error(dof, "truncated section headers"); 14148 return (-1); 14149 } 14150 14151 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 14152 dtrace_dof_error(dof, "misaligned section headers"); 14153 return (-1); 14154 } 14155 14156 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 14157 dtrace_dof_error(dof, "misaligned section size"); 14158 return (-1); 14159 } 14160 14161 /* 14162 * Take an initial pass through the section headers to be sure that 14163 * the headers don't have stray offsets. If the 'noprobes' flag is 14164 * set, do not permit sections relating to providers, probes, or args. 14165 */ 14166 for (i = 0; i < dof->dofh_secnum; i++) { 14167 dof_sec_t *sec = (dof_sec_t *)(daddr + 14168 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14169 14170 if (noprobes) { 14171 switch (sec->dofs_type) { 14172 case DOF_SECT_PROVIDER: 14173 case DOF_SECT_PROBES: 14174 case DOF_SECT_PRARGS: 14175 case DOF_SECT_PROFFS: 14176 dtrace_dof_error(dof, "illegal sections " 14177 "for enabling"); 14178 return (-1); 14179 } 14180 } 14181 14182 if (DOF_SEC_ISLOADABLE(sec->dofs_type) && 14183 !(sec->dofs_flags & DOF_SECF_LOAD)) { 14184 dtrace_dof_error(dof, "loadable section with load " 14185 "flag unset"); 14186 return (-1); 14187 } 14188 14189 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 14190 continue; /* just ignore non-loadable sections */ 14191 14192 if (!ISP2(sec->dofs_align)) { 14193 dtrace_dof_error(dof, "bad section alignment"); 14194 return (-1); 14195 } 14196 14197 if (sec->dofs_offset & (sec->dofs_align - 1)) { 14198 dtrace_dof_error(dof, "misaligned section"); 14199 return (-1); 14200 } 14201 14202 if (sec->dofs_offset > len || sec->dofs_size > len || 14203 sec->dofs_offset + sec->dofs_size > len) { 14204 dtrace_dof_error(dof, "corrupt section header"); 14205 return (-1); 14206 } 14207 14208 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 14209 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 14210 dtrace_dof_error(dof, "non-terminating string table"); 14211 return (-1); 14212 } 14213 } 14214 14215 /* 14216 * Take a second pass through the sections and locate and perform any 14217 * relocations that are present. We do this after the first pass to 14218 * be sure that all sections have had their headers validated. 14219 */ 14220 for (i = 0; i < dof->dofh_secnum; i++) { 14221 dof_sec_t *sec = (dof_sec_t *)(daddr + 14222 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14223 14224 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 14225 continue; /* skip sections that are not loadable */ 14226 14227 switch (sec->dofs_type) { 14228 case DOF_SECT_URELHDR: 14229 if (dtrace_dof_relocate(dof, sec, ubase, udaddr) != 0) 14230 return (-1); 14231 break; 14232 } 14233 } 14234 14235 if ((enab = *enabp) == NULL) 14236 enab = *enabp = dtrace_enabling_create(vstate); 14237 14238 for (i = 0; i < dof->dofh_secnum; i++) { 14239 dof_sec_t *sec = (dof_sec_t *)(daddr + 14240 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14241 14242 if (sec->dofs_type != DOF_SECT_ECBDESC) 14243 continue; 14244 14245 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 14246 dtrace_enabling_destroy(enab); 14247 *enabp = NULL; 14248 return (-1); 14249 } 14250 14251 dtrace_enabling_add(enab, ep); 14252 } 14253 14254 return (0); 14255 } 14256 14257 /* 14258 * Process DOF for any options. This routine assumes that the DOF has been 14259 * at least processed by dtrace_dof_slurp(). 14260 */ 14261 static int 14262 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 14263 { 14264 int i, rval; 14265 uint32_t entsize; 14266 size_t offs; 14267 dof_optdesc_t *desc; 14268 14269 for (i = 0; i < dof->dofh_secnum; i++) { 14270 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 14271 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14272 14273 if (sec->dofs_type != DOF_SECT_OPTDESC) 14274 continue; 14275 14276 if (sec->dofs_align != sizeof (uint64_t)) { 14277 dtrace_dof_error(dof, "bad alignment in " 14278 "option description"); 14279 return (EINVAL); 14280 } 14281 14282 if ((entsize = sec->dofs_entsize) == 0) { 14283 dtrace_dof_error(dof, "zeroed option entry size"); 14284 return (EINVAL); 14285 } 14286 14287 if (entsize < sizeof (dof_optdesc_t)) { 14288 dtrace_dof_error(dof, "bad option entry size"); 14289 return (EINVAL); 14290 } 14291 14292 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 14293 desc = (dof_optdesc_t *)((uintptr_t)dof + 14294 (uintptr_t)sec->dofs_offset + offs); 14295 14296 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 14297 dtrace_dof_error(dof, "non-zero option string"); 14298 return (EINVAL); 14299 } 14300 14301 if (desc->dofo_value == DTRACEOPT_UNSET) { 14302 dtrace_dof_error(dof, "unset option"); 14303 return (EINVAL); 14304 } 14305 14306 if ((rval = dtrace_state_option(state, 14307 desc->dofo_option, desc->dofo_value)) != 0) { 14308 dtrace_dof_error(dof, "rejected option"); 14309 return (rval); 14310 } 14311 } 14312 } 14313 14314 return (0); 14315 } 14316 14317 /* 14318 * DTrace Consumer State Functions 14319 */ 14320 static int 14321 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 14322 { 14323 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 14324 void *base; 14325 uintptr_t limit; 14326 dtrace_dynvar_t *dvar, *next, *start; 14327 int i; 14328 14329 ASSERT(MUTEX_HELD(&dtrace_lock)); 14330 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 14331 14332 bzero(dstate, sizeof (dtrace_dstate_t)); 14333 14334 if ((dstate->dtds_chunksize = chunksize) == 0) 14335 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 14336 14337 VERIFY(dstate->dtds_chunksize < LONG_MAX); 14338 14339 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 14340 size = min; 14341 14342 if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL) 14343 return (ENOMEM); 14344 14345 dstate->dtds_size = size; 14346 dstate->dtds_base = base; 14347 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 14348 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 14349 14350 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 14351 14352 if (hashsize != 1 && (hashsize & 1)) 14353 hashsize--; 14354 14355 dstate->dtds_hashsize = hashsize; 14356 dstate->dtds_hash = dstate->dtds_base; 14357 14358 /* 14359 * Set all of our hash buckets to point to the single sink, and (if 14360 * it hasn't already been set), set the sink's hash value to be the 14361 * sink sentinel value. The sink is needed for dynamic variable 14362 * lookups to know that they have iterated over an entire, valid hash 14363 * chain. 14364 */ 14365 for (i = 0; i < hashsize; i++) 14366 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 14367 14368 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 14369 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 14370 14371 /* 14372 * Determine number of active CPUs. Divide free list evenly among 14373 * active CPUs. 14374 */ 14375 start = (dtrace_dynvar_t *) 14376 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 14377 limit = (uintptr_t)base + size; 14378 14379 VERIFY((uintptr_t)start < limit); 14380 VERIFY((uintptr_t)start >= (uintptr_t)base); 14381 14382 maxper = (limit - (uintptr_t)start) / NCPU; 14383 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 14384 14385 #ifndef illumos 14386 CPU_FOREACH(i) { 14387 #else 14388 for (i = 0; i < NCPU; i++) { 14389 #endif 14390 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 14391 14392 /* 14393 * If we don't even have enough chunks to make it once through 14394 * NCPUs, we're just going to allocate everything to the first 14395 * CPU. And if we're on the last CPU, we're going to allocate 14396 * whatever is left over. In either case, we set the limit to 14397 * be the limit of the dynamic variable space. 14398 */ 14399 if (maxper == 0 || i == NCPU - 1) { 14400 limit = (uintptr_t)base + size; 14401 start = NULL; 14402 } else { 14403 limit = (uintptr_t)start + maxper; 14404 start = (dtrace_dynvar_t *)limit; 14405 } 14406 14407 VERIFY(limit <= (uintptr_t)base + size); 14408 14409 for (;;) { 14410 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 14411 dstate->dtds_chunksize); 14412 14413 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 14414 break; 14415 14416 VERIFY((uintptr_t)dvar >= (uintptr_t)base && 14417 (uintptr_t)dvar <= (uintptr_t)base + size); 14418 dvar->dtdv_next = next; 14419 dvar = next; 14420 } 14421 14422 if (maxper == 0) 14423 break; 14424 } 14425 14426 return (0); 14427 } 14428 14429 static void 14430 dtrace_dstate_fini(dtrace_dstate_t *dstate) 14431 { 14432 ASSERT(MUTEX_HELD(&cpu_lock)); 14433 14434 if (dstate->dtds_base == NULL) 14435 return; 14436 14437 kmem_free(dstate->dtds_base, dstate->dtds_size); 14438 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 14439 } 14440 14441 static void 14442 dtrace_vstate_fini(dtrace_vstate_t *vstate) 14443 { 14444 /* 14445 * Logical XOR, where are you? 14446 */ 14447 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 14448 14449 if (vstate->dtvs_nglobals > 0) { 14450 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 14451 sizeof (dtrace_statvar_t *)); 14452 } 14453 14454 if (vstate->dtvs_ntlocals > 0) { 14455 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 14456 sizeof (dtrace_difv_t)); 14457 } 14458 14459 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 14460 14461 if (vstate->dtvs_nlocals > 0) { 14462 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 14463 sizeof (dtrace_statvar_t *)); 14464 } 14465 } 14466 14467 #ifdef illumos 14468 static void 14469 dtrace_state_clean(dtrace_state_t *state) 14470 { 14471 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 14472 return; 14473 14474 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 14475 dtrace_speculation_clean(state); 14476 } 14477 14478 static void 14479 dtrace_state_deadman(dtrace_state_t *state) 14480 { 14481 hrtime_t now; 14482 14483 dtrace_sync(); 14484 14485 now = dtrace_gethrtime(); 14486 14487 if (state != dtrace_anon.dta_state && 14488 now - state->dts_laststatus >= dtrace_deadman_user) 14489 return; 14490 14491 /* 14492 * We must be sure that dts_alive never appears to be less than the 14493 * value upon entry to dtrace_state_deadman(), and because we lack a 14494 * dtrace_cas64(), we cannot store to it atomically. We thus instead 14495 * store INT64_MAX to it, followed by a memory barrier, followed by 14496 * the new value. This assures that dts_alive never appears to be 14497 * less than its true value, regardless of the order in which the 14498 * stores to the underlying storage are issued. 14499 */ 14500 state->dts_alive = INT64_MAX; 14501 dtrace_membar_producer(); 14502 state->dts_alive = now; 14503 } 14504 #else /* !illumos */ 14505 static void 14506 dtrace_state_clean(void *arg) 14507 { 14508 dtrace_state_t *state = arg; 14509 dtrace_optval_t *opt = state->dts_options; 14510 14511 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 14512 return; 14513 14514 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 14515 dtrace_speculation_clean(state); 14516 14517 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 14518 dtrace_state_clean, state); 14519 } 14520 14521 static void 14522 dtrace_state_deadman(void *arg) 14523 { 14524 dtrace_state_t *state = arg; 14525 hrtime_t now; 14526 14527 dtrace_sync(); 14528 14529 dtrace_debug_output(); 14530 14531 now = dtrace_gethrtime(); 14532 14533 if (state != dtrace_anon.dta_state && 14534 now - state->dts_laststatus >= dtrace_deadman_user) 14535 return; 14536 14537 /* 14538 * We must be sure that dts_alive never appears to be less than the 14539 * value upon entry to dtrace_state_deadman(), and because we lack a 14540 * dtrace_cas64(), we cannot store to it atomically. We thus instead 14541 * store INT64_MAX to it, followed by a memory barrier, followed by 14542 * the new value. This assures that dts_alive never appears to be 14543 * less than its true value, regardless of the order in which the 14544 * stores to the underlying storage are issued. 14545 */ 14546 state->dts_alive = INT64_MAX; 14547 dtrace_membar_producer(); 14548 state->dts_alive = now; 14549 14550 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 14551 dtrace_state_deadman, state); 14552 } 14553 #endif /* illumos */ 14554 14555 static dtrace_state_t * 14556 #ifdef illumos 14557 dtrace_state_create(dev_t *devp, cred_t *cr) 14558 #else 14559 dtrace_state_create(struct cdev *dev, struct ucred *cred __unused) 14560 #endif 14561 { 14562 #ifdef illumos 14563 minor_t minor; 14564 major_t major; 14565 #else 14566 cred_t *cr = NULL; 14567 int m = 0; 14568 #endif 14569 char c[30]; 14570 dtrace_state_t *state; 14571 dtrace_optval_t *opt; 14572 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 14573 int cpu_it; 14574 14575 ASSERT(MUTEX_HELD(&dtrace_lock)); 14576 ASSERT(MUTEX_HELD(&cpu_lock)); 14577 14578 #ifdef illumos 14579 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 14580 VM_BESTFIT | VM_SLEEP); 14581 14582 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 14583 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 14584 return (NULL); 14585 } 14586 14587 state = ddi_get_soft_state(dtrace_softstate, minor); 14588 #else 14589 if (dev != NULL) { 14590 cr = dev->si_cred; 14591 m = dev2unit(dev); 14592 } 14593 14594 /* Allocate memory for the state. */ 14595 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 14596 #endif 14597 14598 state->dts_epid = DTRACE_EPIDNONE + 1; 14599 14600 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 14601 #ifdef illumos 14602 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 14603 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 14604 14605 if (devp != NULL) { 14606 major = getemajor(*devp); 14607 } else { 14608 major = ddi_driver_major(dtrace_devi); 14609 } 14610 14611 state->dts_dev = makedevice(major, minor); 14612 14613 if (devp != NULL) 14614 *devp = state->dts_dev; 14615 #else 14616 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 14617 state->dts_dev = dev; 14618 #endif 14619 14620 /* 14621 * We allocate NCPU buffers. On the one hand, this can be quite 14622 * a bit of memory per instance (nearly 36K on a Starcat). On the 14623 * other hand, it saves an additional memory reference in the probe 14624 * path. 14625 */ 14626 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 14627 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 14628 14629 /* 14630 * Allocate and initialise the per-process per-CPU random state. 14631 * SI_SUB_RANDOM < SI_SUB_DTRACE_ANON therefore entropy device is 14632 * assumed to be seeded at this point (if from Fortuna seed file). 14633 */ 14634 arc4random_buf(&state->dts_rstate[0], 2 * sizeof(uint64_t)); 14635 for (cpu_it = 1; cpu_it < NCPU; cpu_it++) { 14636 /* 14637 * Each CPU is assigned a 2^64 period, non-overlapping 14638 * subsequence. 14639 */ 14640 dtrace_xoroshiro128_plus_jump(state->dts_rstate[cpu_it-1], 14641 state->dts_rstate[cpu_it]); 14642 } 14643 14644 #ifdef illumos 14645 state->dts_cleaner = CYCLIC_NONE; 14646 state->dts_deadman = CYCLIC_NONE; 14647 #else 14648 callout_init(&state->dts_cleaner, 1); 14649 callout_init(&state->dts_deadman, 1); 14650 #endif 14651 state->dts_vstate.dtvs_state = state; 14652 14653 for (i = 0; i < DTRACEOPT_MAX; i++) 14654 state->dts_options[i] = DTRACEOPT_UNSET; 14655 14656 /* 14657 * Set the default options. 14658 */ 14659 opt = state->dts_options; 14660 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 14661 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 14662 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 14663 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 14664 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 14665 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 14666 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 14667 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 14668 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 14669 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 14670 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 14671 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 14672 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 14673 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 14674 14675 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 14676 14677 /* 14678 * Depending on the user credentials, we set flag bits which alter probe 14679 * visibility or the amount of destructiveness allowed. In the case of 14680 * actual anonymous tracing, or the possession of all privileges, all of 14681 * the normal checks are bypassed. 14682 */ 14683 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 14684 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 14685 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 14686 } else { 14687 /* 14688 * Set up the credentials for this instantiation. We take a 14689 * hold on the credential to prevent it from disappearing on 14690 * us; this in turn prevents the zone_t referenced by this 14691 * credential from disappearing. This means that we can 14692 * examine the credential and the zone from probe context. 14693 */ 14694 crhold(cr); 14695 state->dts_cred.dcr_cred = cr; 14696 14697 /* 14698 * CRA_PROC means "we have *some* privilege for dtrace" and 14699 * unlocks the use of variables like pid, zonename, etc. 14700 */ 14701 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 14702 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 14703 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 14704 } 14705 14706 /* 14707 * dtrace_user allows use of syscall and profile providers. 14708 * If the user also has proc_owner and/or proc_zone, we 14709 * extend the scope to include additional visibility and 14710 * destructive power. 14711 */ 14712 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 14713 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 14714 state->dts_cred.dcr_visible |= 14715 DTRACE_CRV_ALLPROC; 14716 14717 state->dts_cred.dcr_action |= 14718 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14719 } 14720 14721 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 14722 state->dts_cred.dcr_visible |= 14723 DTRACE_CRV_ALLZONE; 14724 14725 state->dts_cred.dcr_action |= 14726 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14727 } 14728 14729 /* 14730 * If we have all privs in whatever zone this is, 14731 * we can do destructive things to processes which 14732 * have altered credentials. 14733 */ 14734 #ifdef illumos 14735 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 14736 cr->cr_zone->zone_privset)) { 14737 state->dts_cred.dcr_action |= 14738 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 14739 } 14740 #endif 14741 } 14742 14743 /* 14744 * Holding the dtrace_kernel privilege also implies that 14745 * the user has the dtrace_user privilege from a visibility 14746 * perspective. But without further privileges, some 14747 * destructive actions are not available. 14748 */ 14749 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 14750 /* 14751 * Make all probes in all zones visible. However, 14752 * this doesn't mean that all actions become available 14753 * to all zones. 14754 */ 14755 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 14756 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 14757 14758 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 14759 DTRACE_CRA_PROC; 14760 /* 14761 * Holding proc_owner means that destructive actions 14762 * for *this* zone are allowed. 14763 */ 14764 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 14765 state->dts_cred.dcr_action |= 14766 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14767 14768 /* 14769 * Holding proc_zone means that destructive actions 14770 * for this user/group ID in all zones is allowed. 14771 */ 14772 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 14773 state->dts_cred.dcr_action |= 14774 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14775 14776 #ifdef illumos 14777 /* 14778 * If we have all privs in whatever zone this is, 14779 * we can do destructive things to processes which 14780 * have altered credentials. 14781 */ 14782 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 14783 cr->cr_zone->zone_privset)) { 14784 state->dts_cred.dcr_action |= 14785 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 14786 } 14787 #endif 14788 } 14789 14790 /* 14791 * Holding the dtrace_proc privilege gives control over fasttrap 14792 * and pid providers. We need to grant wider destructive 14793 * privileges in the event that the user has proc_owner and/or 14794 * proc_zone. 14795 */ 14796 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 14797 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 14798 state->dts_cred.dcr_action |= 14799 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14800 14801 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 14802 state->dts_cred.dcr_action |= 14803 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14804 } 14805 } 14806 14807 return (state); 14808 } 14809 14810 static int 14811 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 14812 { 14813 dtrace_optval_t *opt = state->dts_options, size; 14814 processorid_t cpu = 0;; 14815 int flags = 0, rval, factor, divisor = 1; 14816 14817 ASSERT(MUTEX_HELD(&dtrace_lock)); 14818 ASSERT(MUTEX_HELD(&cpu_lock)); 14819 ASSERT(which < DTRACEOPT_MAX); 14820 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 14821 (state == dtrace_anon.dta_state && 14822 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 14823 14824 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 14825 return (0); 14826 14827 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 14828 cpu = opt[DTRACEOPT_CPU]; 14829 14830 if (which == DTRACEOPT_SPECSIZE) 14831 flags |= DTRACEBUF_NOSWITCH; 14832 14833 if (which == DTRACEOPT_BUFSIZE) { 14834 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 14835 flags |= DTRACEBUF_RING; 14836 14837 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 14838 flags |= DTRACEBUF_FILL; 14839 14840 if (state != dtrace_anon.dta_state || 14841 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 14842 flags |= DTRACEBUF_INACTIVE; 14843 } 14844 14845 for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) { 14846 /* 14847 * The size must be 8-byte aligned. If the size is not 8-byte 14848 * aligned, drop it down by the difference. 14849 */ 14850 if (size & (sizeof (uint64_t) - 1)) 14851 size -= size & (sizeof (uint64_t) - 1); 14852 14853 if (size < state->dts_reserve) { 14854 /* 14855 * Buffers always must be large enough to accommodate 14856 * their prereserved space. We return E2BIG instead 14857 * of ENOMEM in this case to allow for user-level 14858 * software to differentiate the cases. 14859 */ 14860 return (E2BIG); 14861 } 14862 14863 rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor); 14864 14865 if (rval != ENOMEM) { 14866 opt[which] = size; 14867 return (rval); 14868 } 14869 14870 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 14871 return (rval); 14872 14873 for (divisor = 2; divisor < factor; divisor <<= 1) 14874 continue; 14875 } 14876 14877 return (ENOMEM); 14878 } 14879 14880 static int 14881 dtrace_state_buffers(dtrace_state_t *state) 14882 { 14883 dtrace_speculation_t *spec = state->dts_speculations; 14884 int rval, i; 14885 14886 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 14887 DTRACEOPT_BUFSIZE)) != 0) 14888 return (rval); 14889 14890 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 14891 DTRACEOPT_AGGSIZE)) != 0) 14892 return (rval); 14893 14894 for (i = 0; i < state->dts_nspeculations; i++) { 14895 if ((rval = dtrace_state_buffer(state, 14896 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 14897 return (rval); 14898 } 14899 14900 return (0); 14901 } 14902 14903 static void 14904 dtrace_state_prereserve(dtrace_state_t *state) 14905 { 14906 dtrace_ecb_t *ecb; 14907 dtrace_probe_t *probe; 14908 14909 state->dts_reserve = 0; 14910 14911 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 14912 return; 14913 14914 /* 14915 * If our buffer policy is a "fill" buffer policy, we need to set the 14916 * prereserved space to be the space required by the END probes. 14917 */ 14918 probe = dtrace_probes[dtrace_probeid_end - 1]; 14919 ASSERT(probe != NULL); 14920 14921 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 14922 if (ecb->dte_state != state) 14923 continue; 14924 14925 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 14926 } 14927 } 14928 14929 static int 14930 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 14931 { 14932 dtrace_optval_t *opt = state->dts_options, sz, nspec; 14933 dtrace_speculation_t *spec; 14934 dtrace_buffer_t *buf; 14935 #ifdef illumos 14936 cyc_handler_t hdlr; 14937 cyc_time_t when; 14938 #endif 14939 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 14940 dtrace_icookie_t cookie; 14941 14942 mutex_enter(&cpu_lock); 14943 mutex_enter(&dtrace_lock); 14944 14945 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 14946 rval = EBUSY; 14947 goto out; 14948 } 14949 14950 /* 14951 * Before we can perform any checks, we must prime all of the 14952 * retained enablings that correspond to this state. 14953 */ 14954 dtrace_enabling_prime(state); 14955 14956 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 14957 rval = EACCES; 14958 goto out; 14959 } 14960 14961 dtrace_state_prereserve(state); 14962 14963 /* 14964 * Now we want to do is try to allocate our speculations. 14965 * We do not automatically resize the number of speculations; if 14966 * this fails, we will fail the operation. 14967 */ 14968 nspec = opt[DTRACEOPT_NSPEC]; 14969 ASSERT(nspec != DTRACEOPT_UNSET); 14970 14971 if (nspec > INT_MAX) { 14972 rval = ENOMEM; 14973 goto out; 14974 } 14975 14976 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), 14977 KM_NOSLEEP | KM_NORMALPRI); 14978 14979 if (spec == NULL) { 14980 rval = ENOMEM; 14981 goto out; 14982 } 14983 14984 state->dts_speculations = spec; 14985 state->dts_nspeculations = (int)nspec; 14986 14987 for (i = 0; i < nspec; i++) { 14988 if ((buf = kmem_zalloc(bufsize, 14989 KM_NOSLEEP | KM_NORMALPRI)) == NULL) { 14990 rval = ENOMEM; 14991 goto err; 14992 } 14993 14994 spec[i].dtsp_buffer = buf; 14995 } 14996 14997 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 14998 if (dtrace_anon.dta_state == NULL) { 14999 rval = ENOENT; 15000 goto out; 15001 } 15002 15003 if (state->dts_necbs != 0) { 15004 rval = EALREADY; 15005 goto out; 15006 } 15007 15008 state->dts_anon = dtrace_anon_grab(); 15009 ASSERT(state->dts_anon != NULL); 15010 state = state->dts_anon; 15011 15012 /* 15013 * We want "grabanon" to be set in the grabbed state, so we'll 15014 * copy that option value from the grabbing state into the 15015 * grabbed state. 15016 */ 15017 state->dts_options[DTRACEOPT_GRABANON] = 15018 opt[DTRACEOPT_GRABANON]; 15019 15020 *cpu = dtrace_anon.dta_beganon; 15021 15022 /* 15023 * If the anonymous state is active (as it almost certainly 15024 * is if the anonymous enabling ultimately matched anything), 15025 * we don't allow any further option processing -- but we 15026 * don't return failure. 15027 */ 15028 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 15029 goto out; 15030 } 15031 15032 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 15033 opt[DTRACEOPT_AGGSIZE] != 0) { 15034 if (state->dts_aggregations == NULL) { 15035 /* 15036 * We're not going to create an aggregation buffer 15037 * because we don't have any ECBs that contain 15038 * aggregations -- set this option to 0. 15039 */ 15040 opt[DTRACEOPT_AGGSIZE] = 0; 15041 } else { 15042 /* 15043 * If we have an aggregation buffer, we must also have 15044 * a buffer to use as scratch. 15045 */ 15046 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 15047 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 15048 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 15049 } 15050 } 15051 } 15052 15053 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 15054 opt[DTRACEOPT_SPECSIZE] != 0) { 15055 if (!state->dts_speculates) { 15056 /* 15057 * We're not going to create speculation buffers 15058 * because we don't have any ECBs that actually 15059 * speculate -- set the speculation size to 0. 15060 */ 15061 opt[DTRACEOPT_SPECSIZE] = 0; 15062 } 15063 } 15064 15065 /* 15066 * The bare minimum size for any buffer that we're actually going to 15067 * do anything to is sizeof (uint64_t). 15068 */ 15069 sz = sizeof (uint64_t); 15070 15071 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 15072 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 15073 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 15074 /* 15075 * A buffer size has been explicitly set to 0 (or to a size 15076 * that will be adjusted to 0) and we need the space -- we 15077 * need to return failure. We return ENOSPC to differentiate 15078 * it from failing to allocate a buffer due to failure to meet 15079 * the reserve (for which we return E2BIG). 15080 */ 15081 rval = ENOSPC; 15082 goto out; 15083 } 15084 15085 if ((rval = dtrace_state_buffers(state)) != 0) 15086 goto err; 15087 15088 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 15089 sz = dtrace_dstate_defsize; 15090 15091 do { 15092 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 15093 15094 if (rval == 0) 15095 break; 15096 15097 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 15098 goto err; 15099 } while (sz >>= 1); 15100 15101 opt[DTRACEOPT_DYNVARSIZE] = sz; 15102 15103 if (rval != 0) 15104 goto err; 15105 15106 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 15107 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 15108 15109 if (opt[DTRACEOPT_CLEANRATE] == 0) 15110 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 15111 15112 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 15113 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 15114 15115 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 15116 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 15117 15118 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 15119 #ifdef illumos 15120 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 15121 hdlr.cyh_arg = state; 15122 hdlr.cyh_level = CY_LOW_LEVEL; 15123 15124 when.cyt_when = 0; 15125 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 15126 15127 state->dts_cleaner = cyclic_add(&hdlr, &when); 15128 15129 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 15130 hdlr.cyh_arg = state; 15131 hdlr.cyh_level = CY_LOW_LEVEL; 15132 15133 when.cyt_when = 0; 15134 when.cyt_interval = dtrace_deadman_interval; 15135 15136 state->dts_deadman = cyclic_add(&hdlr, &when); 15137 #else 15138 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 15139 dtrace_state_clean, state); 15140 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 15141 dtrace_state_deadman, state); 15142 #endif 15143 15144 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 15145 15146 #ifdef illumos 15147 if (state->dts_getf != 0 && 15148 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { 15149 /* 15150 * We don't have kernel privs but we have at least one call 15151 * to getf(); we need to bump our zone's count, and (if 15152 * this is the first enabling to have an unprivileged call 15153 * to getf()) we need to hook into closef(). 15154 */ 15155 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++; 15156 15157 if (dtrace_getf++ == 0) { 15158 ASSERT(dtrace_closef == NULL); 15159 dtrace_closef = dtrace_getf_barrier; 15160 } 15161 } 15162 #endif 15163 15164 /* 15165 * Now it's time to actually fire the BEGIN probe. We need to disable 15166 * interrupts here both to record the CPU on which we fired the BEGIN 15167 * probe (the data from this CPU will be processed first at user 15168 * level) and to manually activate the buffer for this CPU. 15169 */ 15170 cookie = dtrace_interrupt_disable(); 15171 *cpu = curcpu; 15172 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 15173 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 15174 15175 dtrace_probe(dtrace_probeid_begin, 15176 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 15177 dtrace_interrupt_enable(cookie); 15178 /* 15179 * We may have had an exit action from a BEGIN probe; only change our 15180 * state to ACTIVE if we're still in WARMUP. 15181 */ 15182 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 15183 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 15184 15185 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 15186 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 15187 15188 #ifdef __FreeBSD__ 15189 /* 15190 * We enable anonymous tracing before APs are started, so we must 15191 * activate buffers using the current CPU. 15192 */ 15193 if (state == dtrace_anon.dta_state) 15194 for (int i = 0; i < NCPU; i++) 15195 dtrace_buffer_activate_cpu(state, i); 15196 else 15197 dtrace_xcall(DTRACE_CPUALL, 15198 (dtrace_xcall_t)dtrace_buffer_activate, state); 15199 #else 15200 /* 15201 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 15202 * want each CPU to transition its principal buffer out of the 15203 * INACTIVE state. Doing this assures that no CPU will suddenly begin 15204 * processing an ECB halfway down a probe's ECB chain; all CPUs will 15205 * atomically transition from processing none of a state's ECBs to 15206 * processing all of them. 15207 */ 15208 dtrace_xcall(DTRACE_CPUALL, 15209 (dtrace_xcall_t)dtrace_buffer_activate, state); 15210 #endif 15211 goto out; 15212 15213 err: 15214 dtrace_buffer_free(state->dts_buffer); 15215 dtrace_buffer_free(state->dts_aggbuffer); 15216 15217 if ((nspec = state->dts_nspeculations) == 0) { 15218 ASSERT(state->dts_speculations == NULL); 15219 goto out; 15220 } 15221 15222 spec = state->dts_speculations; 15223 ASSERT(spec != NULL); 15224 15225 for (i = 0; i < state->dts_nspeculations; i++) { 15226 if ((buf = spec[i].dtsp_buffer) == NULL) 15227 break; 15228 15229 dtrace_buffer_free(buf); 15230 kmem_free(buf, bufsize); 15231 } 15232 15233 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 15234 state->dts_nspeculations = 0; 15235 state->dts_speculations = NULL; 15236 15237 out: 15238 mutex_exit(&dtrace_lock); 15239 mutex_exit(&cpu_lock); 15240 15241 return (rval); 15242 } 15243 15244 static int 15245 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 15246 { 15247 dtrace_icookie_t cookie; 15248 15249 ASSERT(MUTEX_HELD(&dtrace_lock)); 15250 15251 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 15252 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 15253 return (EINVAL); 15254 15255 /* 15256 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 15257 * to be sure that every CPU has seen it. See below for the details 15258 * on why this is done. 15259 */ 15260 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 15261 dtrace_sync(); 15262 15263 /* 15264 * By this point, it is impossible for any CPU to be still processing 15265 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 15266 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 15267 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 15268 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 15269 * iff we're in the END probe. 15270 */ 15271 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 15272 dtrace_sync(); 15273 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 15274 15275 /* 15276 * Finally, we can release the reserve and call the END probe. We 15277 * disable interrupts across calling the END probe to allow us to 15278 * return the CPU on which we actually called the END probe. This 15279 * allows user-land to be sure that this CPU's principal buffer is 15280 * processed last. 15281 */ 15282 state->dts_reserve = 0; 15283 15284 cookie = dtrace_interrupt_disable(); 15285 *cpu = curcpu; 15286 dtrace_probe(dtrace_probeid_end, 15287 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 15288 dtrace_interrupt_enable(cookie); 15289 15290 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 15291 dtrace_sync(); 15292 15293 #ifdef illumos 15294 if (state->dts_getf != 0 && 15295 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { 15296 /* 15297 * We don't have kernel privs but we have at least one call 15298 * to getf(); we need to lower our zone's count, and (if 15299 * this is the last enabling to have an unprivileged call 15300 * to getf()) we need to clear the closef() hook. 15301 */ 15302 ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0); 15303 ASSERT(dtrace_closef == dtrace_getf_barrier); 15304 ASSERT(dtrace_getf > 0); 15305 15306 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--; 15307 15308 if (--dtrace_getf == 0) 15309 dtrace_closef = NULL; 15310 } 15311 #endif 15312 15313 return (0); 15314 } 15315 15316 static int 15317 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 15318 dtrace_optval_t val) 15319 { 15320 ASSERT(MUTEX_HELD(&dtrace_lock)); 15321 15322 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 15323 return (EBUSY); 15324 15325 if (option >= DTRACEOPT_MAX) 15326 return (EINVAL); 15327 15328 if (option != DTRACEOPT_CPU && val < 0) 15329 return (EINVAL); 15330 15331 switch (option) { 15332 case DTRACEOPT_DESTRUCTIVE: 15333 if (dtrace_destructive_disallow) 15334 return (EACCES); 15335 15336 state->dts_cred.dcr_destructive = 1; 15337 break; 15338 15339 case DTRACEOPT_BUFSIZE: 15340 case DTRACEOPT_DYNVARSIZE: 15341 case DTRACEOPT_AGGSIZE: 15342 case DTRACEOPT_SPECSIZE: 15343 case DTRACEOPT_STRSIZE: 15344 if (val < 0) 15345 return (EINVAL); 15346 15347 if (val >= LONG_MAX) { 15348 /* 15349 * If this is an otherwise negative value, set it to 15350 * the highest multiple of 128m less than LONG_MAX. 15351 * Technically, we're adjusting the size without 15352 * regard to the buffer resizing policy, but in fact, 15353 * this has no effect -- if we set the buffer size to 15354 * ~LONG_MAX and the buffer policy is ultimately set to 15355 * be "manual", the buffer allocation is guaranteed to 15356 * fail, if only because the allocation requires two 15357 * buffers. (We set the the size to the highest 15358 * multiple of 128m because it ensures that the size 15359 * will remain a multiple of a megabyte when 15360 * repeatedly halved -- all the way down to 15m.) 15361 */ 15362 val = LONG_MAX - (1 << 27) + 1; 15363 } 15364 } 15365 15366 state->dts_options[option] = val; 15367 15368 return (0); 15369 } 15370 15371 static void 15372 dtrace_state_destroy(dtrace_state_t *state) 15373 { 15374 dtrace_ecb_t *ecb; 15375 dtrace_vstate_t *vstate = &state->dts_vstate; 15376 #ifdef illumos 15377 minor_t minor = getminor(state->dts_dev); 15378 #endif 15379 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 15380 dtrace_speculation_t *spec = state->dts_speculations; 15381 int nspec = state->dts_nspeculations; 15382 uint32_t match; 15383 15384 ASSERT(MUTEX_HELD(&dtrace_lock)); 15385 ASSERT(MUTEX_HELD(&cpu_lock)); 15386 15387 /* 15388 * First, retract any retained enablings for this state. 15389 */ 15390 dtrace_enabling_retract(state); 15391 ASSERT(state->dts_nretained == 0); 15392 15393 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 15394 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 15395 /* 15396 * We have managed to come into dtrace_state_destroy() on a 15397 * hot enabling -- almost certainly because of a disorderly 15398 * shutdown of a consumer. (That is, a consumer that is 15399 * exiting without having called dtrace_stop().) In this case, 15400 * we're going to set our activity to be KILLED, and then 15401 * issue a sync to be sure that everyone is out of probe 15402 * context before we start blowing away ECBs. 15403 */ 15404 state->dts_activity = DTRACE_ACTIVITY_KILLED; 15405 dtrace_sync(); 15406 } 15407 15408 /* 15409 * Release the credential hold we took in dtrace_state_create(). 15410 */ 15411 if (state->dts_cred.dcr_cred != NULL) 15412 crfree(state->dts_cred.dcr_cred); 15413 15414 /* 15415 * Now we can safely disable and destroy any enabled probes. Because 15416 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 15417 * (especially if they're all enabled), we take two passes through the 15418 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 15419 * in the second we disable whatever is left over. 15420 */ 15421 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 15422 for (i = 0; i < state->dts_necbs; i++) { 15423 if ((ecb = state->dts_ecbs[i]) == NULL) 15424 continue; 15425 15426 if (match && ecb->dte_probe != NULL) { 15427 dtrace_probe_t *probe = ecb->dte_probe; 15428 dtrace_provider_t *prov = probe->dtpr_provider; 15429 15430 if (!(prov->dtpv_priv.dtpp_flags & match)) 15431 continue; 15432 } 15433 15434 dtrace_ecb_disable(ecb); 15435 dtrace_ecb_destroy(ecb); 15436 } 15437 15438 if (!match) 15439 break; 15440 } 15441 15442 /* 15443 * Before we free the buffers, perform one more sync to assure that 15444 * every CPU is out of probe context. 15445 */ 15446 dtrace_sync(); 15447 15448 dtrace_buffer_free(state->dts_buffer); 15449 dtrace_buffer_free(state->dts_aggbuffer); 15450 15451 for (i = 0; i < nspec; i++) 15452 dtrace_buffer_free(spec[i].dtsp_buffer); 15453 15454 #ifdef illumos 15455 if (state->dts_cleaner != CYCLIC_NONE) 15456 cyclic_remove(state->dts_cleaner); 15457 15458 if (state->dts_deadman != CYCLIC_NONE) 15459 cyclic_remove(state->dts_deadman); 15460 #else 15461 callout_stop(&state->dts_cleaner); 15462 callout_drain(&state->dts_cleaner); 15463 callout_stop(&state->dts_deadman); 15464 callout_drain(&state->dts_deadman); 15465 #endif 15466 15467 dtrace_dstate_fini(&vstate->dtvs_dynvars); 15468 dtrace_vstate_fini(vstate); 15469 if (state->dts_ecbs != NULL) 15470 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 15471 15472 if (state->dts_aggregations != NULL) { 15473 #ifdef DEBUG 15474 for (i = 0; i < state->dts_naggregations; i++) 15475 ASSERT(state->dts_aggregations[i] == NULL); 15476 #endif 15477 ASSERT(state->dts_naggregations > 0); 15478 kmem_free(state->dts_aggregations, 15479 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 15480 } 15481 15482 kmem_free(state->dts_buffer, bufsize); 15483 kmem_free(state->dts_aggbuffer, bufsize); 15484 15485 for (i = 0; i < nspec; i++) 15486 kmem_free(spec[i].dtsp_buffer, bufsize); 15487 15488 if (spec != NULL) 15489 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 15490 15491 dtrace_format_destroy(state); 15492 15493 if (state->dts_aggid_arena != NULL) { 15494 #ifdef illumos 15495 vmem_destroy(state->dts_aggid_arena); 15496 #else 15497 delete_unrhdr(state->dts_aggid_arena); 15498 #endif 15499 state->dts_aggid_arena = NULL; 15500 } 15501 #ifdef illumos 15502 ddi_soft_state_free(dtrace_softstate, minor); 15503 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 15504 #endif 15505 } 15506 15507 /* 15508 * DTrace Anonymous Enabling Functions 15509 */ 15510 static dtrace_state_t * 15511 dtrace_anon_grab(void) 15512 { 15513 dtrace_state_t *state; 15514 15515 ASSERT(MUTEX_HELD(&dtrace_lock)); 15516 15517 if ((state = dtrace_anon.dta_state) == NULL) { 15518 ASSERT(dtrace_anon.dta_enabling == NULL); 15519 return (NULL); 15520 } 15521 15522 ASSERT(dtrace_anon.dta_enabling != NULL); 15523 ASSERT(dtrace_retained != NULL); 15524 15525 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 15526 dtrace_anon.dta_enabling = NULL; 15527 dtrace_anon.dta_state = NULL; 15528 15529 return (state); 15530 } 15531 15532 static void 15533 dtrace_anon_property(void) 15534 { 15535 int i, rv; 15536 dtrace_state_t *state; 15537 dof_hdr_t *dof; 15538 char c[32]; /* enough for "dof-data-" + digits */ 15539 15540 ASSERT(MUTEX_HELD(&dtrace_lock)); 15541 ASSERT(MUTEX_HELD(&cpu_lock)); 15542 15543 for (i = 0; ; i++) { 15544 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 15545 15546 dtrace_err_verbose = 1; 15547 15548 if ((dof = dtrace_dof_property(c)) == NULL) { 15549 dtrace_err_verbose = 0; 15550 break; 15551 } 15552 15553 #ifdef illumos 15554 /* 15555 * We want to create anonymous state, so we need to transition 15556 * the kernel debugger to indicate that DTrace is active. If 15557 * this fails (e.g. because the debugger has modified text in 15558 * some way), we won't continue with the processing. 15559 */ 15560 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15561 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 15562 "enabling ignored."); 15563 dtrace_dof_destroy(dof); 15564 break; 15565 } 15566 #endif 15567 15568 /* 15569 * If we haven't allocated an anonymous state, we'll do so now. 15570 */ 15571 if ((state = dtrace_anon.dta_state) == NULL) { 15572 state = dtrace_state_create(NULL, NULL); 15573 dtrace_anon.dta_state = state; 15574 15575 if (state == NULL) { 15576 /* 15577 * This basically shouldn't happen: the only 15578 * failure mode from dtrace_state_create() is a 15579 * failure of ddi_soft_state_zalloc() that 15580 * itself should never happen. Still, the 15581 * interface allows for a failure mode, and 15582 * we want to fail as gracefully as possible: 15583 * we'll emit an error message and cease 15584 * processing anonymous state in this case. 15585 */ 15586 cmn_err(CE_WARN, "failed to create " 15587 "anonymous state"); 15588 dtrace_dof_destroy(dof); 15589 break; 15590 } 15591 } 15592 15593 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 15594 &dtrace_anon.dta_enabling, 0, 0, B_TRUE); 15595 15596 if (rv == 0) 15597 rv = dtrace_dof_options(dof, state); 15598 15599 dtrace_err_verbose = 0; 15600 dtrace_dof_destroy(dof); 15601 15602 if (rv != 0) { 15603 /* 15604 * This is malformed DOF; chuck any anonymous state 15605 * that we created. 15606 */ 15607 ASSERT(dtrace_anon.dta_enabling == NULL); 15608 dtrace_state_destroy(state); 15609 dtrace_anon.dta_state = NULL; 15610 break; 15611 } 15612 15613 ASSERT(dtrace_anon.dta_enabling != NULL); 15614 } 15615 15616 if (dtrace_anon.dta_enabling != NULL) { 15617 int rval; 15618 15619 /* 15620 * dtrace_enabling_retain() can only fail because we are 15621 * trying to retain more enablings than are allowed -- but 15622 * we only have one anonymous enabling, and we are guaranteed 15623 * to be allowed at least one retained enabling; we assert 15624 * that dtrace_enabling_retain() returns success. 15625 */ 15626 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 15627 ASSERT(rval == 0); 15628 15629 dtrace_enabling_dump(dtrace_anon.dta_enabling); 15630 } 15631 } 15632 15633 /* 15634 * DTrace Helper Functions 15635 */ 15636 static void 15637 dtrace_helper_trace(dtrace_helper_action_t *helper, 15638 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 15639 { 15640 uint32_t size, next, nnext, i; 15641 dtrace_helptrace_t *ent, *buffer; 15642 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 15643 15644 if ((buffer = dtrace_helptrace_buffer) == NULL) 15645 return; 15646 15647 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 15648 15649 /* 15650 * What would a tracing framework be without its own tracing 15651 * framework? (Well, a hell of a lot simpler, for starters...) 15652 */ 15653 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 15654 sizeof (uint64_t) - sizeof (uint64_t); 15655 15656 /* 15657 * Iterate until we can allocate a slot in the trace buffer. 15658 */ 15659 do { 15660 next = dtrace_helptrace_next; 15661 15662 if (next + size < dtrace_helptrace_bufsize) { 15663 nnext = next + size; 15664 } else { 15665 nnext = size; 15666 } 15667 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 15668 15669 /* 15670 * We have our slot; fill it in. 15671 */ 15672 if (nnext == size) { 15673 dtrace_helptrace_wrapped++; 15674 next = 0; 15675 } 15676 15677 ent = (dtrace_helptrace_t *)((uintptr_t)buffer + next); 15678 ent->dtht_helper = helper; 15679 ent->dtht_where = where; 15680 ent->dtht_nlocals = vstate->dtvs_nlocals; 15681 15682 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 15683 mstate->dtms_fltoffs : -1; 15684 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 15685 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 15686 15687 for (i = 0; i < vstate->dtvs_nlocals; i++) { 15688 dtrace_statvar_t *svar; 15689 15690 if ((svar = vstate->dtvs_locals[i]) == NULL) 15691 continue; 15692 15693 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 15694 ent->dtht_locals[i] = 15695 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 15696 } 15697 } 15698 15699 static uint64_t 15700 dtrace_helper(int which, dtrace_mstate_t *mstate, 15701 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 15702 { 15703 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 15704 uint64_t sarg0 = mstate->dtms_arg[0]; 15705 uint64_t sarg1 = mstate->dtms_arg[1]; 15706 uint64_t rval = 0; 15707 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 15708 dtrace_helper_action_t *helper; 15709 dtrace_vstate_t *vstate; 15710 dtrace_difo_t *pred; 15711 int i, trace = dtrace_helptrace_buffer != NULL; 15712 15713 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 15714 15715 if (helpers == NULL) 15716 return (0); 15717 15718 if ((helper = helpers->dthps_actions[which]) == NULL) 15719 return (0); 15720 15721 vstate = &helpers->dthps_vstate; 15722 mstate->dtms_arg[0] = arg0; 15723 mstate->dtms_arg[1] = arg1; 15724 15725 /* 15726 * Now iterate over each helper. If its predicate evaluates to 'true', 15727 * we'll call the corresponding actions. Note that the below calls 15728 * to dtrace_dif_emulate() may set faults in machine state. This is 15729 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 15730 * the stored DIF offset with its own (which is the desired behavior). 15731 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 15732 * from machine state; this is okay, too. 15733 */ 15734 for (; helper != NULL; helper = helper->dtha_next) { 15735 if ((pred = helper->dtha_predicate) != NULL) { 15736 if (trace) 15737 dtrace_helper_trace(helper, mstate, vstate, 0); 15738 15739 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 15740 goto next; 15741 15742 if (*flags & CPU_DTRACE_FAULT) 15743 goto err; 15744 } 15745 15746 for (i = 0; i < helper->dtha_nactions; i++) { 15747 if (trace) 15748 dtrace_helper_trace(helper, 15749 mstate, vstate, i + 1); 15750 15751 rval = dtrace_dif_emulate(helper->dtha_actions[i], 15752 mstate, vstate, state); 15753 15754 if (*flags & CPU_DTRACE_FAULT) 15755 goto err; 15756 } 15757 15758 next: 15759 if (trace) 15760 dtrace_helper_trace(helper, mstate, vstate, 15761 DTRACE_HELPTRACE_NEXT); 15762 } 15763 15764 if (trace) 15765 dtrace_helper_trace(helper, mstate, vstate, 15766 DTRACE_HELPTRACE_DONE); 15767 15768 /* 15769 * Restore the arg0 that we saved upon entry. 15770 */ 15771 mstate->dtms_arg[0] = sarg0; 15772 mstate->dtms_arg[1] = sarg1; 15773 15774 return (rval); 15775 15776 err: 15777 if (trace) 15778 dtrace_helper_trace(helper, mstate, vstate, 15779 DTRACE_HELPTRACE_ERR); 15780 15781 /* 15782 * Restore the arg0 that we saved upon entry. 15783 */ 15784 mstate->dtms_arg[0] = sarg0; 15785 mstate->dtms_arg[1] = sarg1; 15786 15787 return (0); 15788 } 15789 15790 static void 15791 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 15792 dtrace_vstate_t *vstate) 15793 { 15794 int i; 15795 15796 if (helper->dtha_predicate != NULL) 15797 dtrace_difo_release(helper->dtha_predicate, vstate); 15798 15799 for (i = 0; i < helper->dtha_nactions; i++) { 15800 ASSERT(helper->dtha_actions[i] != NULL); 15801 dtrace_difo_release(helper->dtha_actions[i], vstate); 15802 } 15803 15804 kmem_free(helper->dtha_actions, 15805 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 15806 kmem_free(helper, sizeof (dtrace_helper_action_t)); 15807 } 15808 15809 static int 15810 dtrace_helper_destroygen(dtrace_helpers_t *help, int gen) 15811 { 15812 proc_t *p = curproc; 15813 dtrace_vstate_t *vstate; 15814 int i; 15815 15816 if (help == NULL) 15817 help = p->p_dtrace_helpers; 15818 15819 ASSERT(MUTEX_HELD(&dtrace_lock)); 15820 15821 if (help == NULL || gen > help->dthps_generation) 15822 return (EINVAL); 15823 15824 vstate = &help->dthps_vstate; 15825 15826 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 15827 dtrace_helper_action_t *last = NULL, *h, *next; 15828 15829 for (h = help->dthps_actions[i]; h != NULL; h = next) { 15830 next = h->dtha_next; 15831 15832 if (h->dtha_generation == gen) { 15833 if (last != NULL) { 15834 last->dtha_next = next; 15835 } else { 15836 help->dthps_actions[i] = next; 15837 } 15838 15839 dtrace_helper_action_destroy(h, vstate); 15840 } else { 15841 last = h; 15842 } 15843 } 15844 } 15845 15846 /* 15847 * Interate until we've cleared out all helper providers with the 15848 * given generation number. 15849 */ 15850 for (;;) { 15851 dtrace_helper_provider_t *prov; 15852 15853 /* 15854 * Look for a helper provider with the right generation. We 15855 * have to start back at the beginning of the list each time 15856 * because we drop dtrace_lock. It's unlikely that we'll make 15857 * more than two passes. 15858 */ 15859 for (i = 0; i < help->dthps_nprovs; i++) { 15860 prov = help->dthps_provs[i]; 15861 15862 if (prov->dthp_generation == gen) 15863 break; 15864 } 15865 15866 /* 15867 * If there were no matches, we're done. 15868 */ 15869 if (i == help->dthps_nprovs) 15870 break; 15871 15872 /* 15873 * Move the last helper provider into this slot. 15874 */ 15875 help->dthps_nprovs--; 15876 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 15877 help->dthps_provs[help->dthps_nprovs] = NULL; 15878 15879 mutex_exit(&dtrace_lock); 15880 15881 /* 15882 * If we have a meta provider, remove this helper provider. 15883 */ 15884 mutex_enter(&dtrace_meta_lock); 15885 if (dtrace_meta_pid != NULL) { 15886 ASSERT(dtrace_deferred_pid == NULL); 15887 dtrace_helper_provider_remove(&prov->dthp_prov, 15888 p->p_pid); 15889 } 15890 mutex_exit(&dtrace_meta_lock); 15891 15892 dtrace_helper_provider_destroy(prov); 15893 15894 mutex_enter(&dtrace_lock); 15895 } 15896 15897 return (0); 15898 } 15899 15900 static int 15901 dtrace_helper_validate(dtrace_helper_action_t *helper) 15902 { 15903 int err = 0, i; 15904 dtrace_difo_t *dp; 15905 15906 if ((dp = helper->dtha_predicate) != NULL) 15907 err += dtrace_difo_validate_helper(dp); 15908 15909 for (i = 0; i < helper->dtha_nactions; i++) 15910 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 15911 15912 return (err == 0); 15913 } 15914 15915 static int 15916 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep, 15917 dtrace_helpers_t *help) 15918 { 15919 dtrace_helper_action_t *helper, *last; 15920 dtrace_actdesc_t *act; 15921 dtrace_vstate_t *vstate; 15922 dtrace_predicate_t *pred; 15923 int count = 0, nactions = 0, i; 15924 15925 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 15926 return (EINVAL); 15927 15928 last = help->dthps_actions[which]; 15929 vstate = &help->dthps_vstate; 15930 15931 for (count = 0; last != NULL; last = last->dtha_next) { 15932 count++; 15933 if (last->dtha_next == NULL) 15934 break; 15935 } 15936 15937 /* 15938 * If we already have dtrace_helper_actions_max helper actions for this 15939 * helper action type, we'll refuse to add a new one. 15940 */ 15941 if (count >= dtrace_helper_actions_max) 15942 return (ENOSPC); 15943 15944 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 15945 helper->dtha_generation = help->dthps_generation; 15946 15947 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 15948 ASSERT(pred->dtp_difo != NULL); 15949 dtrace_difo_hold(pred->dtp_difo); 15950 helper->dtha_predicate = pred->dtp_difo; 15951 } 15952 15953 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 15954 if (act->dtad_kind != DTRACEACT_DIFEXPR) 15955 goto err; 15956 15957 if (act->dtad_difo == NULL) 15958 goto err; 15959 15960 nactions++; 15961 } 15962 15963 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 15964 (helper->dtha_nactions = nactions), KM_SLEEP); 15965 15966 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 15967 dtrace_difo_hold(act->dtad_difo); 15968 helper->dtha_actions[i++] = act->dtad_difo; 15969 } 15970 15971 if (!dtrace_helper_validate(helper)) 15972 goto err; 15973 15974 if (last == NULL) { 15975 help->dthps_actions[which] = helper; 15976 } else { 15977 last->dtha_next = helper; 15978 } 15979 15980 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 15981 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 15982 dtrace_helptrace_next = 0; 15983 } 15984 15985 return (0); 15986 err: 15987 dtrace_helper_action_destroy(helper, vstate); 15988 return (EINVAL); 15989 } 15990 15991 static void 15992 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 15993 dof_helper_t *dofhp) 15994 { 15995 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 15996 15997 mutex_enter(&dtrace_meta_lock); 15998 mutex_enter(&dtrace_lock); 15999 16000 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 16001 /* 16002 * If the dtrace module is loaded but not attached, or if 16003 * there aren't isn't a meta provider registered to deal with 16004 * these provider descriptions, we need to postpone creating 16005 * the actual providers until later. 16006 */ 16007 16008 if (help->dthps_next == NULL && help->dthps_prev == NULL && 16009 dtrace_deferred_pid != help) { 16010 help->dthps_deferred = 1; 16011 help->dthps_pid = p->p_pid; 16012 help->dthps_next = dtrace_deferred_pid; 16013 help->dthps_prev = NULL; 16014 if (dtrace_deferred_pid != NULL) 16015 dtrace_deferred_pid->dthps_prev = help; 16016 dtrace_deferred_pid = help; 16017 } 16018 16019 mutex_exit(&dtrace_lock); 16020 16021 } else if (dofhp != NULL) { 16022 /* 16023 * If the dtrace module is loaded and we have a particular 16024 * helper provider description, pass that off to the 16025 * meta provider. 16026 */ 16027 16028 mutex_exit(&dtrace_lock); 16029 16030 dtrace_helper_provide(dofhp, p->p_pid); 16031 16032 } else { 16033 /* 16034 * Otherwise, just pass all the helper provider descriptions 16035 * off to the meta provider. 16036 */ 16037 16038 int i; 16039 mutex_exit(&dtrace_lock); 16040 16041 for (i = 0; i < help->dthps_nprovs; i++) { 16042 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 16043 p->p_pid); 16044 } 16045 } 16046 16047 mutex_exit(&dtrace_meta_lock); 16048 } 16049 16050 static int 16051 dtrace_helper_provider_add(dof_helper_t *dofhp, dtrace_helpers_t *help, int gen) 16052 { 16053 dtrace_helper_provider_t *hprov, **tmp_provs; 16054 uint_t tmp_maxprovs, i; 16055 16056 ASSERT(MUTEX_HELD(&dtrace_lock)); 16057 ASSERT(help != NULL); 16058 16059 /* 16060 * If we already have dtrace_helper_providers_max helper providers, 16061 * we're refuse to add a new one. 16062 */ 16063 if (help->dthps_nprovs >= dtrace_helper_providers_max) 16064 return (ENOSPC); 16065 16066 /* 16067 * Check to make sure this isn't a duplicate. 16068 */ 16069 for (i = 0; i < help->dthps_nprovs; i++) { 16070 if (dofhp->dofhp_addr == 16071 help->dthps_provs[i]->dthp_prov.dofhp_addr) 16072 return (EALREADY); 16073 } 16074 16075 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 16076 hprov->dthp_prov = *dofhp; 16077 hprov->dthp_ref = 1; 16078 hprov->dthp_generation = gen; 16079 16080 /* 16081 * Allocate a bigger table for helper providers if it's already full. 16082 */ 16083 if (help->dthps_maxprovs == help->dthps_nprovs) { 16084 tmp_maxprovs = help->dthps_maxprovs; 16085 tmp_provs = help->dthps_provs; 16086 16087 if (help->dthps_maxprovs == 0) 16088 help->dthps_maxprovs = 2; 16089 else 16090 help->dthps_maxprovs *= 2; 16091 if (help->dthps_maxprovs > dtrace_helper_providers_max) 16092 help->dthps_maxprovs = dtrace_helper_providers_max; 16093 16094 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 16095 16096 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 16097 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 16098 16099 if (tmp_provs != NULL) { 16100 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 16101 sizeof (dtrace_helper_provider_t *)); 16102 kmem_free(tmp_provs, tmp_maxprovs * 16103 sizeof (dtrace_helper_provider_t *)); 16104 } 16105 } 16106 16107 help->dthps_provs[help->dthps_nprovs] = hprov; 16108 help->dthps_nprovs++; 16109 16110 return (0); 16111 } 16112 16113 static void 16114 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 16115 { 16116 mutex_enter(&dtrace_lock); 16117 16118 if (--hprov->dthp_ref == 0) { 16119 dof_hdr_t *dof; 16120 mutex_exit(&dtrace_lock); 16121 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 16122 dtrace_dof_destroy(dof); 16123 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 16124 } else { 16125 mutex_exit(&dtrace_lock); 16126 } 16127 } 16128 16129 static int 16130 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 16131 { 16132 uintptr_t daddr = (uintptr_t)dof; 16133 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 16134 dof_provider_t *provider; 16135 dof_probe_t *probe; 16136 uint8_t *arg; 16137 char *strtab, *typestr; 16138 dof_stridx_t typeidx; 16139 size_t typesz; 16140 uint_t nprobes, j, k; 16141 16142 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 16143 16144 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 16145 dtrace_dof_error(dof, "misaligned section offset"); 16146 return (-1); 16147 } 16148 16149 /* 16150 * The section needs to be large enough to contain the DOF provider 16151 * structure appropriate for the given version. 16152 */ 16153 if (sec->dofs_size < 16154 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 16155 offsetof(dof_provider_t, dofpv_prenoffs) : 16156 sizeof (dof_provider_t))) { 16157 dtrace_dof_error(dof, "provider section too small"); 16158 return (-1); 16159 } 16160 16161 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 16162 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 16163 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 16164 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 16165 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 16166 16167 if (str_sec == NULL || prb_sec == NULL || 16168 arg_sec == NULL || off_sec == NULL) 16169 return (-1); 16170 16171 enoff_sec = NULL; 16172 16173 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 16174 provider->dofpv_prenoffs != DOF_SECT_NONE && 16175 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 16176 provider->dofpv_prenoffs)) == NULL) 16177 return (-1); 16178 16179 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 16180 16181 if (provider->dofpv_name >= str_sec->dofs_size || 16182 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 16183 dtrace_dof_error(dof, "invalid provider name"); 16184 return (-1); 16185 } 16186 16187 if (prb_sec->dofs_entsize == 0 || 16188 prb_sec->dofs_entsize > prb_sec->dofs_size) { 16189 dtrace_dof_error(dof, "invalid entry size"); 16190 return (-1); 16191 } 16192 16193 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 16194 dtrace_dof_error(dof, "misaligned entry size"); 16195 return (-1); 16196 } 16197 16198 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 16199 dtrace_dof_error(dof, "invalid entry size"); 16200 return (-1); 16201 } 16202 16203 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 16204 dtrace_dof_error(dof, "misaligned section offset"); 16205 return (-1); 16206 } 16207 16208 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 16209 dtrace_dof_error(dof, "invalid entry size"); 16210 return (-1); 16211 } 16212 16213 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 16214 16215 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 16216 16217 /* 16218 * Take a pass through the probes to check for errors. 16219 */ 16220 for (j = 0; j < nprobes; j++) { 16221 probe = (dof_probe_t *)(uintptr_t)(daddr + 16222 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 16223 16224 if (probe->dofpr_func >= str_sec->dofs_size) { 16225 dtrace_dof_error(dof, "invalid function name"); 16226 return (-1); 16227 } 16228 16229 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 16230 dtrace_dof_error(dof, "function name too long"); 16231 /* 16232 * Keep going if the function name is too long. 16233 * Unlike provider and probe names, we cannot reasonably 16234 * impose restrictions on function names, since they're 16235 * a property of the code being instrumented. We will 16236 * skip this probe in dtrace_helper_provide_one(). 16237 */ 16238 } 16239 16240 if (probe->dofpr_name >= str_sec->dofs_size || 16241 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 16242 dtrace_dof_error(dof, "invalid probe name"); 16243 return (-1); 16244 } 16245 16246 /* 16247 * The offset count must not wrap the index, and the offsets 16248 * must also not overflow the section's data. 16249 */ 16250 if (probe->dofpr_offidx + probe->dofpr_noffs < 16251 probe->dofpr_offidx || 16252 (probe->dofpr_offidx + probe->dofpr_noffs) * 16253 off_sec->dofs_entsize > off_sec->dofs_size) { 16254 dtrace_dof_error(dof, "invalid probe offset"); 16255 return (-1); 16256 } 16257 16258 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 16259 /* 16260 * If there's no is-enabled offset section, make sure 16261 * there aren't any is-enabled offsets. Otherwise 16262 * perform the same checks as for probe offsets 16263 * (immediately above). 16264 */ 16265 if (enoff_sec == NULL) { 16266 if (probe->dofpr_enoffidx != 0 || 16267 probe->dofpr_nenoffs != 0) { 16268 dtrace_dof_error(dof, "is-enabled " 16269 "offsets with null section"); 16270 return (-1); 16271 } 16272 } else if (probe->dofpr_enoffidx + 16273 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 16274 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 16275 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 16276 dtrace_dof_error(dof, "invalid is-enabled " 16277 "offset"); 16278 return (-1); 16279 } 16280 16281 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 16282 dtrace_dof_error(dof, "zero probe and " 16283 "is-enabled offsets"); 16284 return (-1); 16285 } 16286 } else if (probe->dofpr_noffs == 0) { 16287 dtrace_dof_error(dof, "zero probe offsets"); 16288 return (-1); 16289 } 16290 16291 if (probe->dofpr_argidx + probe->dofpr_xargc < 16292 probe->dofpr_argidx || 16293 (probe->dofpr_argidx + probe->dofpr_xargc) * 16294 arg_sec->dofs_entsize > arg_sec->dofs_size) { 16295 dtrace_dof_error(dof, "invalid args"); 16296 return (-1); 16297 } 16298 16299 typeidx = probe->dofpr_nargv; 16300 typestr = strtab + probe->dofpr_nargv; 16301 for (k = 0; k < probe->dofpr_nargc; k++) { 16302 if (typeidx >= str_sec->dofs_size) { 16303 dtrace_dof_error(dof, "bad " 16304 "native argument type"); 16305 return (-1); 16306 } 16307 16308 typesz = strlen(typestr) + 1; 16309 if (typesz > DTRACE_ARGTYPELEN) { 16310 dtrace_dof_error(dof, "native " 16311 "argument type too long"); 16312 return (-1); 16313 } 16314 typeidx += typesz; 16315 typestr += typesz; 16316 } 16317 16318 typeidx = probe->dofpr_xargv; 16319 typestr = strtab + probe->dofpr_xargv; 16320 for (k = 0; k < probe->dofpr_xargc; k++) { 16321 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 16322 dtrace_dof_error(dof, "bad " 16323 "native argument index"); 16324 return (-1); 16325 } 16326 16327 if (typeidx >= str_sec->dofs_size) { 16328 dtrace_dof_error(dof, "bad " 16329 "translated argument type"); 16330 return (-1); 16331 } 16332 16333 typesz = strlen(typestr) + 1; 16334 if (typesz > DTRACE_ARGTYPELEN) { 16335 dtrace_dof_error(dof, "translated argument " 16336 "type too long"); 16337 return (-1); 16338 } 16339 16340 typeidx += typesz; 16341 typestr += typesz; 16342 } 16343 } 16344 16345 return (0); 16346 } 16347 16348 static int 16349 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp, struct proc *p) 16350 { 16351 dtrace_helpers_t *help; 16352 dtrace_vstate_t *vstate; 16353 dtrace_enabling_t *enab = NULL; 16354 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 16355 uintptr_t daddr = (uintptr_t)dof; 16356 16357 ASSERT(MUTEX_HELD(&dtrace_lock)); 16358 16359 if ((help = p->p_dtrace_helpers) == NULL) 16360 help = dtrace_helpers_create(p); 16361 16362 vstate = &help->dthps_vstate; 16363 16364 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, dhp->dofhp_addr, 16365 dhp->dofhp_dof, B_FALSE)) != 0) { 16366 dtrace_dof_destroy(dof); 16367 return (rv); 16368 } 16369 16370 /* 16371 * Look for helper providers and validate their descriptions. 16372 */ 16373 for (i = 0; i < dof->dofh_secnum; i++) { 16374 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 16375 dof->dofh_secoff + i * dof->dofh_secsize); 16376 16377 if (sec->dofs_type != DOF_SECT_PROVIDER) 16378 continue; 16379 16380 if (dtrace_helper_provider_validate(dof, sec) != 0) { 16381 dtrace_enabling_destroy(enab); 16382 dtrace_dof_destroy(dof); 16383 return (-1); 16384 } 16385 16386 nprovs++; 16387 } 16388 16389 /* 16390 * Now we need to walk through the ECB descriptions in the enabling. 16391 */ 16392 for (i = 0; i < enab->dten_ndesc; i++) { 16393 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 16394 dtrace_probedesc_t *desc = &ep->dted_probe; 16395 16396 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 16397 continue; 16398 16399 if (strcmp(desc->dtpd_mod, "helper") != 0) 16400 continue; 16401 16402 if (strcmp(desc->dtpd_func, "ustack") != 0) 16403 continue; 16404 16405 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 16406 ep, help)) != 0) { 16407 /* 16408 * Adding this helper action failed -- we are now going 16409 * to rip out the entire generation and return failure. 16410 */ 16411 (void) dtrace_helper_destroygen(help, 16412 help->dthps_generation); 16413 dtrace_enabling_destroy(enab); 16414 dtrace_dof_destroy(dof); 16415 return (-1); 16416 } 16417 16418 nhelpers++; 16419 } 16420 16421 if (nhelpers < enab->dten_ndesc) 16422 dtrace_dof_error(dof, "unmatched helpers"); 16423 16424 gen = help->dthps_generation++; 16425 dtrace_enabling_destroy(enab); 16426 16427 if (nprovs > 0) { 16428 /* 16429 * Now that this is in-kernel, we change the sense of the 16430 * members: dofhp_dof denotes the in-kernel copy of the DOF 16431 * and dofhp_addr denotes the address at user-level. 16432 */ 16433 dhp->dofhp_addr = dhp->dofhp_dof; 16434 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 16435 16436 if (dtrace_helper_provider_add(dhp, help, gen) == 0) { 16437 mutex_exit(&dtrace_lock); 16438 dtrace_helper_provider_register(p, help, dhp); 16439 mutex_enter(&dtrace_lock); 16440 16441 destroy = 0; 16442 } 16443 } 16444 16445 if (destroy) 16446 dtrace_dof_destroy(dof); 16447 16448 return (gen); 16449 } 16450 16451 static dtrace_helpers_t * 16452 dtrace_helpers_create(proc_t *p) 16453 { 16454 dtrace_helpers_t *help; 16455 16456 ASSERT(MUTEX_HELD(&dtrace_lock)); 16457 ASSERT(p->p_dtrace_helpers == NULL); 16458 16459 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 16460 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 16461 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 16462 16463 p->p_dtrace_helpers = help; 16464 dtrace_helpers++; 16465 16466 return (help); 16467 } 16468 16469 #ifdef illumos 16470 static 16471 #endif 16472 void 16473 dtrace_helpers_destroy(proc_t *p) 16474 { 16475 dtrace_helpers_t *help; 16476 dtrace_vstate_t *vstate; 16477 #ifdef illumos 16478 proc_t *p = curproc; 16479 #endif 16480 int i; 16481 16482 mutex_enter(&dtrace_lock); 16483 16484 ASSERT(p->p_dtrace_helpers != NULL); 16485 ASSERT(dtrace_helpers > 0); 16486 16487 help = p->p_dtrace_helpers; 16488 vstate = &help->dthps_vstate; 16489 16490 /* 16491 * We're now going to lose the help from this process. 16492 */ 16493 p->p_dtrace_helpers = NULL; 16494 dtrace_sync(); 16495 16496 /* 16497 * Destory the helper actions. 16498 */ 16499 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 16500 dtrace_helper_action_t *h, *next; 16501 16502 for (h = help->dthps_actions[i]; h != NULL; h = next) { 16503 next = h->dtha_next; 16504 dtrace_helper_action_destroy(h, vstate); 16505 h = next; 16506 } 16507 } 16508 16509 mutex_exit(&dtrace_lock); 16510 16511 /* 16512 * Destroy the helper providers. 16513 */ 16514 if (help->dthps_maxprovs > 0) { 16515 mutex_enter(&dtrace_meta_lock); 16516 if (dtrace_meta_pid != NULL) { 16517 ASSERT(dtrace_deferred_pid == NULL); 16518 16519 for (i = 0; i < help->dthps_nprovs; i++) { 16520 dtrace_helper_provider_remove( 16521 &help->dthps_provs[i]->dthp_prov, p->p_pid); 16522 } 16523 } else { 16524 mutex_enter(&dtrace_lock); 16525 ASSERT(help->dthps_deferred == 0 || 16526 help->dthps_next != NULL || 16527 help->dthps_prev != NULL || 16528 help == dtrace_deferred_pid); 16529 16530 /* 16531 * Remove the helper from the deferred list. 16532 */ 16533 if (help->dthps_next != NULL) 16534 help->dthps_next->dthps_prev = help->dthps_prev; 16535 if (help->dthps_prev != NULL) 16536 help->dthps_prev->dthps_next = help->dthps_next; 16537 if (dtrace_deferred_pid == help) { 16538 dtrace_deferred_pid = help->dthps_next; 16539 ASSERT(help->dthps_prev == NULL); 16540 } 16541 16542 mutex_exit(&dtrace_lock); 16543 } 16544 16545 mutex_exit(&dtrace_meta_lock); 16546 16547 for (i = 0; i < help->dthps_nprovs; i++) { 16548 dtrace_helper_provider_destroy(help->dthps_provs[i]); 16549 } 16550 16551 kmem_free(help->dthps_provs, help->dthps_maxprovs * 16552 sizeof (dtrace_helper_provider_t *)); 16553 } 16554 16555 mutex_enter(&dtrace_lock); 16556 16557 dtrace_vstate_fini(&help->dthps_vstate); 16558 kmem_free(help->dthps_actions, 16559 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 16560 kmem_free(help, sizeof (dtrace_helpers_t)); 16561 16562 --dtrace_helpers; 16563 mutex_exit(&dtrace_lock); 16564 } 16565 16566 #ifdef illumos 16567 static 16568 #endif 16569 void 16570 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 16571 { 16572 dtrace_helpers_t *help, *newhelp; 16573 dtrace_helper_action_t *helper, *new, *last; 16574 dtrace_difo_t *dp; 16575 dtrace_vstate_t *vstate; 16576 int i, j, sz, hasprovs = 0; 16577 16578 mutex_enter(&dtrace_lock); 16579 ASSERT(from->p_dtrace_helpers != NULL); 16580 ASSERT(dtrace_helpers > 0); 16581 16582 help = from->p_dtrace_helpers; 16583 newhelp = dtrace_helpers_create(to); 16584 ASSERT(to->p_dtrace_helpers != NULL); 16585 16586 newhelp->dthps_generation = help->dthps_generation; 16587 vstate = &newhelp->dthps_vstate; 16588 16589 /* 16590 * Duplicate the helper actions. 16591 */ 16592 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 16593 if ((helper = help->dthps_actions[i]) == NULL) 16594 continue; 16595 16596 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 16597 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 16598 KM_SLEEP); 16599 new->dtha_generation = helper->dtha_generation; 16600 16601 if ((dp = helper->dtha_predicate) != NULL) { 16602 dp = dtrace_difo_duplicate(dp, vstate); 16603 new->dtha_predicate = dp; 16604 } 16605 16606 new->dtha_nactions = helper->dtha_nactions; 16607 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 16608 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 16609 16610 for (j = 0; j < new->dtha_nactions; j++) { 16611 dtrace_difo_t *dp = helper->dtha_actions[j]; 16612 16613 ASSERT(dp != NULL); 16614 dp = dtrace_difo_duplicate(dp, vstate); 16615 new->dtha_actions[j] = dp; 16616 } 16617 16618 if (last != NULL) { 16619 last->dtha_next = new; 16620 } else { 16621 newhelp->dthps_actions[i] = new; 16622 } 16623 16624 last = new; 16625 } 16626 } 16627 16628 /* 16629 * Duplicate the helper providers and register them with the 16630 * DTrace framework. 16631 */ 16632 if (help->dthps_nprovs > 0) { 16633 newhelp->dthps_nprovs = help->dthps_nprovs; 16634 newhelp->dthps_maxprovs = help->dthps_nprovs; 16635 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 16636 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 16637 for (i = 0; i < newhelp->dthps_nprovs; i++) { 16638 newhelp->dthps_provs[i] = help->dthps_provs[i]; 16639 newhelp->dthps_provs[i]->dthp_ref++; 16640 } 16641 16642 hasprovs = 1; 16643 } 16644 16645 mutex_exit(&dtrace_lock); 16646 16647 if (hasprovs) 16648 dtrace_helper_provider_register(to, newhelp, NULL); 16649 } 16650 16651 /* 16652 * DTrace Hook Functions 16653 */ 16654 static void 16655 dtrace_module_loaded(modctl_t *ctl) 16656 { 16657 dtrace_provider_t *prv; 16658 16659 mutex_enter(&dtrace_provider_lock); 16660 #ifdef illumos 16661 mutex_enter(&mod_lock); 16662 #endif 16663 16664 #ifdef illumos 16665 ASSERT(ctl->mod_busy); 16666 #endif 16667 16668 /* 16669 * We're going to call each providers per-module provide operation 16670 * specifying only this module. 16671 */ 16672 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 16673 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 16674 16675 #ifdef illumos 16676 mutex_exit(&mod_lock); 16677 #endif 16678 mutex_exit(&dtrace_provider_lock); 16679 16680 /* 16681 * If we have any retained enablings, we need to match against them. 16682 * Enabling probes requires that cpu_lock be held, and we cannot hold 16683 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 16684 * module. (In particular, this happens when loading scheduling 16685 * classes.) So if we have any retained enablings, we need to dispatch 16686 * our task queue to do the match for us. 16687 */ 16688 mutex_enter(&dtrace_lock); 16689 16690 if (dtrace_retained == NULL) { 16691 mutex_exit(&dtrace_lock); 16692 return; 16693 } 16694 16695 (void) taskq_dispatch(dtrace_taskq, 16696 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 16697 16698 mutex_exit(&dtrace_lock); 16699 16700 /* 16701 * And now, for a little heuristic sleaze: in general, we want to 16702 * match modules as soon as they load. However, we cannot guarantee 16703 * this, because it would lead us to the lock ordering violation 16704 * outlined above. The common case, of course, is that cpu_lock is 16705 * _not_ held -- so we delay here for a clock tick, hoping that that's 16706 * long enough for the task queue to do its work. If it's not, it's 16707 * not a serious problem -- it just means that the module that we 16708 * just loaded may not be immediately instrumentable. 16709 */ 16710 delay(1); 16711 } 16712 16713 static void 16714 #ifdef illumos 16715 dtrace_module_unloaded(modctl_t *ctl) 16716 #else 16717 dtrace_module_unloaded(modctl_t *ctl, int *error) 16718 #endif 16719 { 16720 dtrace_probe_t template, *probe, *first, *next; 16721 dtrace_provider_t *prov; 16722 #ifndef illumos 16723 char modname[DTRACE_MODNAMELEN]; 16724 size_t len; 16725 #endif 16726 16727 #ifdef illumos 16728 template.dtpr_mod = ctl->mod_modname; 16729 #else 16730 /* Handle the fact that ctl->filename may end in ".ko". */ 16731 strlcpy(modname, ctl->filename, sizeof(modname)); 16732 len = strlen(ctl->filename); 16733 if (len > 3 && strcmp(modname + len - 3, ".ko") == 0) 16734 modname[len - 3] = '\0'; 16735 template.dtpr_mod = modname; 16736 #endif 16737 16738 mutex_enter(&dtrace_provider_lock); 16739 #ifdef illumos 16740 mutex_enter(&mod_lock); 16741 #endif 16742 mutex_enter(&dtrace_lock); 16743 16744 #ifndef illumos 16745 if (ctl->nenabled > 0) { 16746 /* Don't allow unloads if a probe is enabled. */ 16747 mutex_exit(&dtrace_provider_lock); 16748 mutex_exit(&dtrace_lock); 16749 *error = -1; 16750 printf( 16751 "kldunload: attempt to unload module that has DTrace probes enabled\n"); 16752 return; 16753 } 16754 #endif 16755 16756 if (dtrace_bymod == NULL) { 16757 /* 16758 * The DTrace module is loaded (obviously) but not attached; 16759 * we don't have any work to do. 16760 */ 16761 mutex_exit(&dtrace_provider_lock); 16762 #ifdef illumos 16763 mutex_exit(&mod_lock); 16764 #endif 16765 mutex_exit(&dtrace_lock); 16766 return; 16767 } 16768 16769 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 16770 probe != NULL; probe = probe->dtpr_nextmod) { 16771 if (probe->dtpr_ecb != NULL) { 16772 mutex_exit(&dtrace_provider_lock); 16773 #ifdef illumos 16774 mutex_exit(&mod_lock); 16775 #endif 16776 mutex_exit(&dtrace_lock); 16777 16778 /* 16779 * This shouldn't _actually_ be possible -- we're 16780 * unloading a module that has an enabled probe in it. 16781 * (It's normally up to the provider to make sure that 16782 * this can't happen.) However, because dtps_enable() 16783 * doesn't have a failure mode, there can be an 16784 * enable/unload race. Upshot: we don't want to 16785 * assert, but we're not going to disable the 16786 * probe, either. 16787 */ 16788 if (dtrace_err_verbose) { 16789 #ifdef illumos 16790 cmn_err(CE_WARN, "unloaded module '%s' had " 16791 "enabled probes", ctl->mod_modname); 16792 #else 16793 cmn_err(CE_WARN, "unloaded module '%s' had " 16794 "enabled probes", modname); 16795 #endif 16796 } 16797 16798 return; 16799 } 16800 } 16801 16802 probe = first; 16803 16804 for (first = NULL; probe != NULL; probe = next) { 16805 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 16806 16807 dtrace_probes[probe->dtpr_id - 1] = NULL; 16808 16809 next = probe->dtpr_nextmod; 16810 dtrace_hash_remove(dtrace_bymod, probe); 16811 dtrace_hash_remove(dtrace_byfunc, probe); 16812 dtrace_hash_remove(dtrace_byname, probe); 16813 16814 if (first == NULL) { 16815 first = probe; 16816 probe->dtpr_nextmod = NULL; 16817 } else { 16818 probe->dtpr_nextmod = first; 16819 first = probe; 16820 } 16821 } 16822 16823 /* 16824 * We've removed all of the module's probes from the hash chains and 16825 * from the probe array. Now issue a dtrace_sync() to be sure that 16826 * everyone has cleared out from any probe array processing. 16827 */ 16828 dtrace_sync(); 16829 16830 for (probe = first; probe != NULL; probe = first) { 16831 first = probe->dtpr_nextmod; 16832 prov = probe->dtpr_provider; 16833 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 16834 probe->dtpr_arg); 16835 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 16836 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 16837 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 16838 #ifdef illumos 16839 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 16840 #else 16841 free_unr(dtrace_arena, probe->dtpr_id); 16842 #endif 16843 kmem_free(probe, sizeof (dtrace_probe_t)); 16844 } 16845 16846 mutex_exit(&dtrace_lock); 16847 #ifdef illumos 16848 mutex_exit(&mod_lock); 16849 #endif 16850 mutex_exit(&dtrace_provider_lock); 16851 } 16852 16853 #ifndef illumos 16854 static void 16855 dtrace_kld_load(void *arg __unused, linker_file_t lf) 16856 { 16857 16858 dtrace_module_loaded(lf); 16859 } 16860 16861 static void 16862 dtrace_kld_unload_try(void *arg __unused, linker_file_t lf, int *error) 16863 { 16864 16865 if (*error != 0) 16866 /* We already have an error, so don't do anything. */ 16867 return; 16868 dtrace_module_unloaded(lf, error); 16869 } 16870 #endif 16871 16872 #ifdef illumos 16873 static void 16874 dtrace_suspend(void) 16875 { 16876 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 16877 } 16878 16879 static void 16880 dtrace_resume(void) 16881 { 16882 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 16883 } 16884 #endif 16885 16886 static int 16887 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 16888 { 16889 ASSERT(MUTEX_HELD(&cpu_lock)); 16890 mutex_enter(&dtrace_lock); 16891 16892 switch (what) { 16893 case CPU_CONFIG: { 16894 dtrace_state_t *state; 16895 dtrace_optval_t *opt, rs, c; 16896 16897 /* 16898 * For now, we only allocate a new buffer for anonymous state. 16899 */ 16900 if ((state = dtrace_anon.dta_state) == NULL) 16901 break; 16902 16903 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 16904 break; 16905 16906 opt = state->dts_options; 16907 c = opt[DTRACEOPT_CPU]; 16908 16909 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 16910 break; 16911 16912 /* 16913 * Regardless of what the actual policy is, we're going to 16914 * temporarily set our resize policy to be manual. We're 16915 * also going to temporarily set our CPU option to denote 16916 * the newly configured CPU. 16917 */ 16918 rs = opt[DTRACEOPT_BUFRESIZE]; 16919 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 16920 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 16921 16922 (void) dtrace_state_buffers(state); 16923 16924 opt[DTRACEOPT_BUFRESIZE] = rs; 16925 opt[DTRACEOPT_CPU] = c; 16926 16927 break; 16928 } 16929 16930 case CPU_UNCONFIG: 16931 /* 16932 * We don't free the buffer in the CPU_UNCONFIG case. (The 16933 * buffer will be freed when the consumer exits.) 16934 */ 16935 break; 16936 16937 default: 16938 break; 16939 } 16940 16941 mutex_exit(&dtrace_lock); 16942 return (0); 16943 } 16944 16945 #ifdef illumos 16946 static void 16947 dtrace_cpu_setup_initial(processorid_t cpu) 16948 { 16949 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 16950 } 16951 #endif 16952 16953 static void 16954 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 16955 { 16956 if (dtrace_toxranges >= dtrace_toxranges_max) { 16957 int osize, nsize; 16958 dtrace_toxrange_t *range; 16959 16960 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 16961 16962 if (osize == 0) { 16963 ASSERT(dtrace_toxrange == NULL); 16964 ASSERT(dtrace_toxranges_max == 0); 16965 dtrace_toxranges_max = 1; 16966 } else { 16967 dtrace_toxranges_max <<= 1; 16968 } 16969 16970 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 16971 range = kmem_zalloc(nsize, KM_SLEEP); 16972 16973 if (dtrace_toxrange != NULL) { 16974 ASSERT(osize != 0); 16975 bcopy(dtrace_toxrange, range, osize); 16976 kmem_free(dtrace_toxrange, osize); 16977 } 16978 16979 dtrace_toxrange = range; 16980 } 16981 16982 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 16983 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 16984 16985 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 16986 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 16987 dtrace_toxranges++; 16988 } 16989 16990 static void 16991 dtrace_getf_barrier() 16992 { 16993 #ifdef illumos 16994 /* 16995 * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings 16996 * that contain calls to getf(), this routine will be called on every 16997 * closef() before either the underlying vnode is released or the 16998 * file_t itself is freed. By the time we are here, it is essential 16999 * that the file_t can no longer be accessed from a call to getf() 17000 * in probe context -- that assures that a dtrace_sync() can be used 17001 * to clear out any enablings referring to the old structures. 17002 */ 17003 if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 || 17004 kcred->cr_zone->zone_dtrace_getf != 0) 17005 dtrace_sync(); 17006 #endif 17007 } 17008 17009 /* 17010 * DTrace Driver Cookbook Functions 17011 */ 17012 #ifdef illumos 17013 /*ARGSUSED*/ 17014 static int 17015 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 17016 { 17017 dtrace_provider_id_t id; 17018 dtrace_state_t *state = NULL; 17019 dtrace_enabling_t *enab; 17020 17021 mutex_enter(&cpu_lock); 17022 mutex_enter(&dtrace_provider_lock); 17023 mutex_enter(&dtrace_lock); 17024 17025 if (ddi_soft_state_init(&dtrace_softstate, 17026 sizeof (dtrace_state_t), 0) != 0) { 17027 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 17028 mutex_exit(&cpu_lock); 17029 mutex_exit(&dtrace_provider_lock); 17030 mutex_exit(&dtrace_lock); 17031 return (DDI_FAILURE); 17032 } 17033 17034 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 17035 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 17036 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 17037 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 17038 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 17039 ddi_remove_minor_node(devi, NULL); 17040 ddi_soft_state_fini(&dtrace_softstate); 17041 mutex_exit(&cpu_lock); 17042 mutex_exit(&dtrace_provider_lock); 17043 mutex_exit(&dtrace_lock); 17044 return (DDI_FAILURE); 17045 } 17046 17047 ddi_report_dev(devi); 17048 dtrace_devi = devi; 17049 17050 dtrace_modload = dtrace_module_loaded; 17051 dtrace_modunload = dtrace_module_unloaded; 17052 dtrace_cpu_init = dtrace_cpu_setup_initial; 17053 dtrace_helpers_cleanup = dtrace_helpers_destroy; 17054 dtrace_helpers_fork = dtrace_helpers_duplicate; 17055 dtrace_cpustart_init = dtrace_suspend; 17056 dtrace_cpustart_fini = dtrace_resume; 17057 dtrace_debugger_init = dtrace_suspend; 17058 dtrace_debugger_fini = dtrace_resume; 17059 17060 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 17061 17062 ASSERT(MUTEX_HELD(&cpu_lock)); 17063 17064 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 17065 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 17066 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 17067 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 17068 VM_SLEEP | VMC_IDENTIFIER); 17069 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 17070 1, INT_MAX, 0); 17071 17072 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 17073 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 17074 NULL, NULL, NULL, NULL, NULL, 0); 17075 17076 ASSERT(MUTEX_HELD(&cpu_lock)); 17077 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 17078 offsetof(dtrace_probe_t, dtpr_nextmod), 17079 offsetof(dtrace_probe_t, dtpr_prevmod)); 17080 17081 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 17082 offsetof(dtrace_probe_t, dtpr_nextfunc), 17083 offsetof(dtrace_probe_t, dtpr_prevfunc)); 17084 17085 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 17086 offsetof(dtrace_probe_t, dtpr_nextname), 17087 offsetof(dtrace_probe_t, dtpr_prevname)); 17088 17089 if (dtrace_retain_max < 1) { 17090 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 17091 "setting to 1", dtrace_retain_max); 17092 dtrace_retain_max = 1; 17093 } 17094 17095 /* 17096 * Now discover our toxic ranges. 17097 */ 17098 dtrace_toxic_ranges(dtrace_toxrange_add); 17099 17100 /* 17101 * Before we register ourselves as a provider to our own framework, 17102 * we would like to assert that dtrace_provider is NULL -- but that's 17103 * not true if we were loaded as a dependency of a DTrace provider. 17104 * Once we've registered, we can assert that dtrace_provider is our 17105 * pseudo provider. 17106 */ 17107 (void) dtrace_register("dtrace", &dtrace_provider_attr, 17108 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 17109 17110 ASSERT(dtrace_provider != NULL); 17111 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 17112 17113 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 17114 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 17115 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 17116 dtrace_provider, NULL, NULL, "END", 0, NULL); 17117 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 17118 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 17119 17120 dtrace_anon_property(); 17121 mutex_exit(&cpu_lock); 17122 17123 /* 17124 * If there are already providers, we must ask them to provide their 17125 * probes, and then match any anonymous enabling against them. Note 17126 * that there should be no other retained enablings at this time: 17127 * the only retained enablings at this time should be the anonymous 17128 * enabling. 17129 */ 17130 if (dtrace_anon.dta_enabling != NULL) { 17131 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 17132 17133 dtrace_enabling_provide(NULL); 17134 state = dtrace_anon.dta_state; 17135 17136 /* 17137 * We couldn't hold cpu_lock across the above call to 17138 * dtrace_enabling_provide(), but we must hold it to actually 17139 * enable the probes. We have to drop all of our locks, pick 17140 * up cpu_lock, and regain our locks before matching the 17141 * retained anonymous enabling. 17142 */ 17143 mutex_exit(&dtrace_lock); 17144 mutex_exit(&dtrace_provider_lock); 17145 17146 mutex_enter(&cpu_lock); 17147 mutex_enter(&dtrace_provider_lock); 17148 mutex_enter(&dtrace_lock); 17149 17150 if ((enab = dtrace_anon.dta_enabling) != NULL) 17151 (void) dtrace_enabling_match(enab, NULL); 17152 17153 mutex_exit(&cpu_lock); 17154 } 17155 17156 mutex_exit(&dtrace_lock); 17157 mutex_exit(&dtrace_provider_lock); 17158 17159 if (state != NULL) { 17160 /* 17161 * If we created any anonymous state, set it going now. 17162 */ 17163 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 17164 } 17165 17166 return (DDI_SUCCESS); 17167 } 17168 #endif /* illumos */ 17169 17170 #ifndef illumos 17171 static void dtrace_dtr(void *); 17172 #endif 17173 17174 /*ARGSUSED*/ 17175 static int 17176 #ifdef illumos 17177 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 17178 #else 17179 dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 17180 #endif 17181 { 17182 dtrace_state_t *state; 17183 uint32_t priv; 17184 uid_t uid; 17185 zoneid_t zoneid; 17186 17187 #ifdef illumos 17188 if (getminor(*devp) == DTRACEMNRN_HELPER) 17189 return (0); 17190 17191 /* 17192 * If this wasn't an open with the "helper" minor, then it must be 17193 * the "dtrace" minor. 17194 */ 17195 if (getminor(*devp) == DTRACEMNRN_DTRACE) 17196 return (ENXIO); 17197 #else 17198 cred_t *cred_p = NULL; 17199 cred_p = dev->si_cred; 17200 17201 /* 17202 * If no DTRACE_PRIV_* bits are set in the credential, then the 17203 * caller lacks sufficient permission to do anything with DTrace. 17204 */ 17205 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 17206 if (priv == DTRACE_PRIV_NONE) { 17207 #endif 17208 17209 return (EACCES); 17210 } 17211 17212 /* 17213 * Ask all providers to provide all their probes. 17214 */ 17215 mutex_enter(&dtrace_provider_lock); 17216 dtrace_probe_provide(NULL, NULL); 17217 mutex_exit(&dtrace_provider_lock); 17218 17219 mutex_enter(&cpu_lock); 17220 mutex_enter(&dtrace_lock); 17221 dtrace_opens++; 17222 dtrace_membar_producer(); 17223 17224 #ifdef illumos 17225 /* 17226 * If the kernel debugger is active (that is, if the kernel debugger 17227 * modified text in some way), we won't allow the open. 17228 */ 17229 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 17230 dtrace_opens--; 17231 mutex_exit(&cpu_lock); 17232 mutex_exit(&dtrace_lock); 17233 return (EBUSY); 17234 } 17235 17236 if (dtrace_helptrace_enable && dtrace_helptrace_buffer == NULL) { 17237 /* 17238 * If DTrace helper tracing is enabled, we need to allocate the 17239 * trace buffer and initialize the values. 17240 */ 17241 dtrace_helptrace_buffer = 17242 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 17243 dtrace_helptrace_next = 0; 17244 dtrace_helptrace_wrapped = 0; 17245 dtrace_helptrace_enable = 0; 17246 } 17247 17248 state = dtrace_state_create(devp, cred_p); 17249 #else 17250 state = dtrace_state_create(dev, NULL); 17251 devfs_set_cdevpriv(state, dtrace_dtr); 17252 #endif 17253 17254 mutex_exit(&cpu_lock); 17255 17256 if (state == NULL) { 17257 #ifdef illumos 17258 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 17259 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 17260 #else 17261 --dtrace_opens; 17262 #endif 17263 mutex_exit(&dtrace_lock); 17264 return (EAGAIN); 17265 } 17266 17267 mutex_exit(&dtrace_lock); 17268 17269 return (0); 17270 } 17271 17272 /*ARGSUSED*/ 17273 #ifdef illumos 17274 static int 17275 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 17276 #else 17277 static void 17278 dtrace_dtr(void *data) 17279 #endif 17280 { 17281 #ifdef illumos 17282 minor_t minor = getminor(dev); 17283 dtrace_state_t *state; 17284 #endif 17285 dtrace_helptrace_t *buf = NULL; 17286 17287 #ifdef illumos 17288 if (minor == DTRACEMNRN_HELPER) 17289 return (0); 17290 17291 state = ddi_get_soft_state(dtrace_softstate, minor); 17292 #else 17293 dtrace_state_t *state = data; 17294 #endif 17295 17296 mutex_enter(&cpu_lock); 17297 mutex_enter(&dtrace_lock); 17298 17299 #ifdef illumos 17300 if (state->dts_anon) 17301 #else 17302 if (state != NULL && state->dts_anon) 17303 #endif 17304 { 17305 /* 17306 * There is anonymous state. Destroy that first. 17307 */ 17308 ASSERT(dtrace_anon.dta_state == NULL); 17309 dtrace_state_destroy(state->dts_anon); 17310 } 17311 17312 if (dtrace_helptrace_disable) { 17313 /* 17314 * If we have been told to disable helper tracing, set the 17315 * buffer to NULL before calling into dtrace_state_destroy(); 17316 * we take advantage of its dtrace_sync() to know that no 17317 * CPU is in probe context with enabled helper tracing 17318 * after it returns. 17319 */ 17320 buf = dtrace_helptrace_buffer; 17321 dtrace_helptrace_buffer = NULL; 17322 } 17323 17324 #ifdef illumos 17325 dtrace_state_destroy(state); 17326 #else 17327 if (state != NULL) { 17328 dtrace_state_destroy(state); 17329 kmem_free(state, 0); 17330 } 17331 #endif 17332 ASSERT(dtrace_opens > 0); 17333 17334 #ifdef illumos 17335 /* 17336 * Only relinquish control of the kernel debugger interface when there 17337 * are no consumers and no anonymous enablings. 17338 */ 17339 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 17340 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 17341 #else 17342 --dtrace_opens; 17343 #endif 17344 17345 if (buf != NULL) { 17346 kmem_free(buf, dtrace_helptrace_bufsize); 17347 dtrace_helptrace_disable = 0; 17348 } 17349 17350 mutex_exit(&dtrace_lock); 17351 mutex_exit(&cpu_lock); 17352 17353 #ifdef illumos 17354 return (0); 17355 #endif 17356 } 17357 17358 #ifdef illumos 17359 /*ARGSUSED*/ 17360 static int 17361 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 17362 { 17363 int rval; 17364 dof_helper_t help, *dhp = NULL; 17365 17366 switch (cmd) { 17367 case DTRACEHIOC_ADDDOF: 17368 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 17369 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 17370 return (EFAULT); 17371 } 17372 17373 dhp = &help; 17374 arg = (intptr_t)help.dofhp_dof; 17375 /*FALLTHROUGH*/ 17376 17377 case DTRACEHIOC_ADD: { 17378 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 17379 17380 if (dof == NULL) 17381 return (rval); 17382 17383 mutex_enter(&dtrace_lock); 17384 17385 /* 17386 * dtrace_helper_slurp() takes responsibility for the dof -- 17387 * it may free it now or it may save it and free it later. 17388 */ 17389 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 17390 *rv = rval; 17391 rval = 0; 17392 } else { 17393 rval = EINVAL; 17394 } 17395 17396 mutex_exit(&dtrace_lock); 17397 return (rval); 17398 } 17399 17400 case DTRACEHIOC_REMOVE: { 17401 mutex_enter(&dtrace_lock); 17402 rval = dtrace_helper_destroygen(NULL, arg); 17403 mutex_exit(&dtrace_lock); 17404 17405 return (rval); 17406 } 17407 17408 default: 17409 break; 17410 } 17411 17412 return (ENOTTY); 17413 } 17414 17415 /*ARGSUSED*/ 17416 static int 17417 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 17418 { 17419 minor_t minor = getminor(dev); 17420 dtrace_state_t *state; 17421 int rval; 17422 17423 if (minor == DTRACEMNRN_HELPER) 17424 return (dtrace_ioctl_helper(cmd, arg, rv)); 17425 17426 state = ddi_get_soft_state(dtrace_softstate, minor); 17427 17428 if (state->dts_anon) { 17429 ASSERT(dtrace_anon.dta_state == NULL); 17430 state = state->dts_anon; 17431 } 17432 17433 switch (cmd) { 17434 case DTRACEIOC_PROVIDER: { 17435 dtrace_providerdesc_t pvd; 17436 dtrace_provider_t *pvp; 17437 17438 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 17439 return (EFAULT); 17440 17441 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 17442 mutex_enter(&dtrace_provider_lock); 17443 17444 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 17445 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 17446 break; 17447 } 17448 17449 mutex_exit(&dtrace_provider_lock); 17450 17451 if (pvp == NULL) 17452 return (ESRCH); 17453 17454 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 17455 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 17456 17457 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 17458 return (EFAULT); 17459 17460 return (0); 17461 } 17462 17463 case DTRACEIOC_EPROBE: { 17464 dtrace_eprobedesc_t epdesc; 17465 dtrace_ecb_t *ecb; 17466 dtrace_action_t *act; 17467 void *buf; 17468 size_t size; 17469 uintptr_t dest; 17470 int nrecs; 17471 17472 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 17473 return (EFAULT); 17474 17475 mutex_enter(&dtrace_lock); 17476 17477 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 17478 mutex_exit(&dtrace_lock); 17479 return (EINVAL); 17480 } 17481 17482 if (ecb->dte_probe == NULL) { 17483 mutex_exit(&dtrace_lock); 17484 return (EINVAL); 17485 } 17486 17487 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 17488 epdesc.dtepd_uarg = ecb->dte_uarg; 17489 epdesc.dtepd_size = ecb->dte_size; 17490 17491 nrecs = epdesc.dtepd_nrecs; 17492 epdesc.dtepd_nrecs = 0; 17493 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 17494 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 17495 continue; 17496 17497 epdesc.dtepd_nrecs++; 17498 } 17499 17500 /* 17501 * Now that we have the size, we need to allocate a temporary 17502 * buffer in which to store the complete description. We need 17503 * the temporary buffer to be able to drop dtrace_lock() 17504 * across the copyout(), below. 17505 */ 17506 size = sizeof (dtrace_eprobedesc_t) + 17507 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 17508 17509 buf = kmem_alloc(size, KM_SLEEP); 17510 dest = (uintptr_t)buf; 17511 17512 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 17513 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 17514 17515 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 17516 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 17517 continue; 17518 17519 if (nrecs-- == 0) 17520 break; 17521 17522 bcopy(&act->dta_rec, (void *)dest, 17523 sizeof (dtrace_recdesc_t)); 17524 dest += sizeof (dtrace_recdesc_t); 17525 } 17526 17527 mutex_exit(&dtrace_lock); 17528 17529 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 17530 kmem_free(buf, size); 17531 return (EFAULT); 17532 } 17533 17534 kmem_free(buf, size); 17535 return (0); 17536 } 17537 17538 case DTRACEIOC_AGGDESC: { 17539 dtrace_aggdesc_t aggdesc; 17540 dtrace_action_t *act; 17541 dtrace_aggregation_t *agg; 17542 int nrecs; 17543 uint32_t offs; 17544 dtrace_recdesc_t *lrec; 17545 void *buf; 17546 size_t size; 17547 uintptr_t dest; 17548 17549 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 17550 return (EFAULT); 17551 17552 mutex_enter(&dtrace_lock); 17553 17554 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 17555 mutex_exit(&dtrace_lock); 17556 return (EINVAL); 17557 } 17558 17559 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 17560 17561 nrecs = aggdesc.dtagd_nrecs; 17562 aggdesc.dtagd_nrecs = 0; 17563 17564 offs = agg->dtag_base; 17565 lrec = &agg->dtag_action.dta_rec; 17566 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 17567 17568 for (act = agg->dtag_first; ; act = act->dta_next) { 17569 ASSERT(act->dta_intuple || 17570 DTRACEACT_ISAGG(act->dta_kind)); 17571 17572 /* 17573 * If this action has a record size of zero, it 17574 * denotes an argument to the aggregating action. 17575 * Because the presence of this record doesn't (or 17576 * shouldn't) affect the way the data is interpreted, 17577 * we don't copy it out to save user-level the 17578 * confusion of dealing with a zero-length record. 17579 */ 17580 if (act->dta_rec.dtrd_size == 0) { 17581 ASSERT(agg->dtag_hasarg); 17582 continue; 17583 } 17584 17585 aggdesc.dtagd_nrecs++; 17586 17587 if (act == &agg->dtag_action) 17588 break; 17589 } 17590 17591 /* 17592 * Now that we have the size, we need to allocate a temporary 17593 * buffer in which to store the complete description. We need 17594 * the temporary buffer to be able to drop dtrace_lock() 17595 * across the copyout(), below. 17596 */ 17597 size = sizeof (dtrace_aggdesc_t) + 17598 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 17599 17600 buf = kmem_alloc(size, KM_SLEEP); 17601 dest = (uintptr_t)buf; 17602 17603 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 17604 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 17605 17606 for (act = agg->dtag_first; ; act = act->dta_next) { 17607 dtrace_recdesc_t rec = act->dta_rec; 17608 17609 /* 17610 * See the comment in the above loop for why we pass 17611 * over zero-length records. 17612 */ 17613 if (rec.dtrd_size == 0) { 17614 ASSERT(agg->dtag_hasarg); 17615 continue; 17616 } 17617 17618 if (nrecs-- == 0) 17619 break; 17620 17621 rec.dtrd_offset -= offs; 17622 bcopy(&rec, (void *)dest, sizeof (rec)); 17623 dest += sizeof (dtrace_recdesc_t); 17624 17625 if (act == &agg->dtag_action) 17626 break; 17627 } 17628 17629 mutex_exit(&dtrace_lock); 17630 17631 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 17632 kmem_free(buf, size); 17633 return (EFAULT); 17634 } 17635 17636 kmem_free(buf, size); 17637 return (0); 17638 } 17639 17640 case DTRACEIOC_ENABLE: { 17641 dof_hdr_t *dof; 17642 dtrace_enabling_t *enab = NULL; 17643 dtrace_vstate_t *vstate; 17644 int err = 0; 17645 17646 *rv = 0; 17647 17648 /* 17649 * If a NULL argument has been passed, we take this as our 17650 * cue to reevaluate our enablings. 17651 */ 17652 if (arg == NULL) { 17653 dtrace_enabling_matchall(); 17654 17655 return (0); 17656 } 17657 17658 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 17659 return (rval); 17660 17661 mutex_enter(&cpu_lock); 17662 mutex_enter(&dtrace_lock); 17663 vstate = &state->dts_vstate; 17664 17665 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 17666 mutex_exit(&dtrace_lock); 17667 mutex_exit(&cpu_lock); 17668 dtrace_dof_destroy(dof); 17669 return (EBUSY); 17670 } 17671 17672 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 17673 mutex_exit(&dtrace_lock); 17674 mutex_exit(&cpu_lock); 17675 dtrace_dof_destroy(dof); 17676 return (EINVAL); 17677 } 17678 17679 if ((rval = dtrace_dof_options(dof, state)) != 0) { 17680 dtrace_enabling_destroy(enab); 17681 mutex_exit(&dtrace_lock); 17682 mutex_exit(&cpu_lock); 17683 dtrace_dof_destroy(dof); 17684 return (rval); 17685 } 17686 17687 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 17688 err = dtrace_enabling_retain(enab); 17689 } else { 17690 dtrace_enabling_destroy(enab); 17691 } 17692 17693 mutex_exit(&cpu_lock); 17694 mutex_exit(&dtrace_lock); 17695 dtrace_dof_destroy(dof); 17696 17697 return (err); 17698 } 17699 17700 case DTRACEIOC_REPLICATE: { 17701 dtrace_repldesc_t desc; 17702 dtrace_probedesc_t *match = &desc.dtrpd_match; 17703 dtrace_probedesc_t *create = &desc.dtrpd_create; 17704 int err; 17705 17706 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17707 return (EFAULT); 17708 17709 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17710 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17711 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17712 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17713 17714 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17715 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17716 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17717 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17718 17719 mutex_enter(&dtrace_lock); 17720 err = dtrace_enabling_replicate(state, match, create); 17721 mutex_exit(&dtrace_lock); 17722 17723 return (err); 17724 } 17725 17726 case DTRACEIOC_PROBEMATCH: 17727 case DTRACEIOC_PROBES: { 17728 dtrace_probe_t *probe = NULL; 17729 dtrace_probedesc_t desc; 17730 dtrace_probekey_t pkey; 17731 dtrace_id_t i; 17732 int m = 0; 17733 uint32_t priv; 17734 uid_t uid; 17735 zoneid_t zoneid; 17736 17737 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17738 return (EFAULT); 17739 17740 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17741 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17742 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17743 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17744 17745 /* 17746 * Before we attempt to match this probe, we want to give 17747 * all providers the opportunity to provide it. 17748 */ 17749 if (desc.dtpd_id == DTRACE_IDNONE) { 17750 mutex_enter(&dtrace_provider_lock); 17751 dtrace_probe_provide(&desc, NULL); 17752 mutex_exit(&dtrace_provider_lock); 17753 desc.dtpd_id++; 17754 } 17755 17756 if (cmd == DTRACEIOC_PROBEMATCH) { 17757 dtrace_probekey(&desc, &pkey); 17758 pkey.dtpk_id = DTRACE_IDNONE; 17759 } 17760 17761 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 17762 17763 mutex_enter(&dtrace_lock); 17764 17765 if (cmd == DTRACEIOC_PROBEMATCH) { 17766 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 17767 if ((probe = dtrace_probes[i - 1]) != NULL && 17768 (m = dtrace_match_probe(probe, &pkey, 17769 priv, uid, zoneid)) != 0) 17770 break; 17771 } 17772 17773 if (m < 0) { 17774 mutex_exit(&dtrace_lock); 17775 return (EINVAL); 17776 } 17777 17778 } else { 17779 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 17780 if ((probe = dtrace_probes[i - 1]) != NULL && 17781 dtrace_match_priv(probe, priv, uid, zoneid)) 17782 break; 17783 } 17784 } 17785 17786 if (probe == NULL) { 17787 mutex_exit(&dtrace_lock); 17788 return (ESRCH); 17789 } 17790 17791 dtrace_probe_description(probe, &desc); 17792 mutex_exit(&dtrace_lock); 17793 17794 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17795 return (EFAULT); 17796 17797 return (0); 17798 } 17799 17800 case DTRACEIOC_PROBEARG: { 17801 dtrace_argdesc_t desc; 17802 dtrace_probe_t *probe; 17803 dtrace_provider_t *prov; 17804 17805 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17806 return (EFAULT); 17807 17808 if (desc.dtargd_id == DTRACE_IDNONE) 17809 return (EINVAL); 17810 17811 if (desc.dtargd_ndx == DTRACE_ARGNONE) 17812 return (EINVAL); 17813 17814 mutex_enter(&dtrace_provider_lock); 17815 mutex_enter(&mod_lock); 17816 mutex_enter(&dtrace_lock); 17817 17818 if (desc.dtargd_id > dtrace_nprobes) { 17819 mutex_exit(&dtrace_lock); 17820 mutex_exit(&mod_lock); 17821 mutex_exit(&dtrace_provider_lock); 17822 return (EINVAL); 17823 } 17824 17825 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 17826 mutex_exit(&dtrace_lock); 17827 mutex_exit(&mod_lock); 17828 mutex_exit(&dtrace_provider_lock); 17829 return (EINVAL); 17830 } 17831 17832 mutex_exit(&dtrace_lock); 17833 17834 prov = probe->dtpr_provider; 17835 17836 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 17837 /* 17838 * There isn't any typed information for this probe. 17839 * Set the argument number to DTRACE_ARGNONE. 17840 */ 17841 desc.dtargd_ndx = DTRACE_ARGNONE; 17842 } else { 17843 desc.dtargd_native[0] = '\0'; 17844 desc.dtargd_xlate[0] = '\0'; 17845 desc.dtargd_mapping = desc.dtargd_ndx; 17846 17847 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 17848 probe->dtpr_id, probe->dtpr_arg, &desc); 17849 } 17850 17851 mutex_exit(&mod_lock); 17852 mutex_exit(&dtrace_provider_lock); 17853 17854 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17855 return (EFAULT); 17856 17857 return (0); 17858 } 17859 17860 case DTRACEIOC_GO: { 17861 processorid_t cpuid; 17862 rval = dtrace_state_go(state, &cpuid); 17863 17864 if (rval != 0) 17865 return (rval); 17866 17867 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 17868 return (EFAULT); 17869 17870 return (0); 17871 } 17872 17873 case DTRACEIOC_STOP: { 17874 processorid_t cpuid; 17875 17876 mutex_enter(&dtrace_lock); 17877 rval = dtrace_state_stop(state, &cpuid); 17878 mutex_exit(&dtrace_lock); 17879 17880 if (rval != 0) 17881 return (rval); 17882 17883 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 17884 return (EFAULT); 17885 17886 return (0); 17887 } 17888 17889 case DTRACEIOC_DOFGET: { 17890 dof_hdr_t hdr, *dof; 17891 uint64_t len; 17892 17893 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 17894 return (EFAULT); 17895 17896 mutex_enter(&dtrace_lock); 17897 dof = dtrace_dof_create(state); 17898 mutex_exit(&dtrace_lock); 17899 17900 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 17901 rval = copyout(dof, (void *)arg, len); 17902 dtrace_dof_destroy(dof); 17903 17904 return (rval == 0 ? 0 : EFAULT); 17905 } 17906 17907 case DTRACEIOC_AGGSNAP: 17908 case DTRACEIOC_BUFSNAP: { 17909 dtrace_bufdesc_t desc; 17910 caddr_t cached; 17911 dtrace_buffer_t *buf; 17912 17913 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17914 return (EFAULT); 17915 17916 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 17917 return (EINVAL); 17918 17919 mutex_enter(&dtrace_lock); 17920 17921 if (cmd == DTRACEIOC_BUFSNAP) { 17922 buf = &state->dts_buffer[desc.dtbd_cpu]; 17923 } else { 17924 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 17925 } 17926 17927 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 17928 size_t sz = buf->dtb_offset; 17929 17930 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 17931 mutex_exit(&dtrace_lock); 17932 return (EBUSY); 17933 } 17934 17935 /* 17936 * If this buffer has already been consumed, we're 17937 * going to indicate that there's nothing left here 17938 * to consume. 17939 */ 17940 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 17941 mutex_exit(&dtrace_lock); 17942 17943 desc.dtbd_size = 0; 17944 desc.dtbd_drops = 0; 17945 desc.dtbd_errors = 0; 17946 desc.dtbd_oldest = 0; 17947 sz = sizeof (desc); 17948 17949 if (copyout(&desc, (void *)arg, sz) != 0) 17950 return (EFAULT); 17951 17952 return (0); 17953 } 17954 17955 /* 17956 * If this is a ring buffer that has wrapped, we want 17957 * to copy the whole thing out. 17958 */ 17959 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 17960 dtrace_buffer_polish(buf); 17961 sz = buf->dtb_size; 17962 } 17963 17964 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 17965 mutex_exit(&dtrace_lock); 17966 return (EFAULT); 17967 } 17968 17969 desc.dtbd_size = sz; 17970 desc.dtbd_drops = buf->dtb_drops; 17971 desc.dtbd_errors = buf->dtb_errors; 17972 desc.dtbd_oldest = buf->dtb_xamot_offset; 17973 desc.dtbd_timestamp = dtrace_gethrtime(); 17974 17975 mutex_exit(&dtrace_lock); 17976 17977 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17978 return (EFAULT); 17979 17980 buf->dtb_flags |= DTRACEBUF_CONSUMED; 17981 17982 return (0); 17983 } 17984 17985 if (buf->dtb_tomax == NULL) { 17986 ASSERT(buf->dtb_xamot == NULL); 17987 mutex_exit(&dtrace_lock); 17988 return (ENOENT); 17989 } 17990 17991 cached = buf->dtb_tomax; 17992 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 17993 17994 dtrace_xcall(desc.dtbd_cpu, 17995 (dtrace_xcall_t)dtrace_buffer_switch, buf); 17996 17997 state->dts_errors += buf->dtb_xamot_errors; 17998 17999 /* 18000 * If the buffers did not actually switch, then the cross call 18001 * did not take place -- presumably because the given CPU is 18002 * not in the ready set. If this is the case, we'll return 18003 * ENOENT. 18004 */ 18005 if (buf->dtb_tomax == cached) { 18006 ASSERT(buf->dtb_xamot != cached); 18007 mutex_exit(&dtrace_lock); 18008 return (ENOENT); 18009 } 18010 18011 ASSERT(cached == buf->dtb_xamot); 18012 18013 /* 18014 * We have our snapshot; now copy it out. 18015 */ 18016 if (copyout(buf->dtb_xamot, desc.dtbd_data, 18017 buf->dtb_xamot_offset) != 0) { 18018 mutex_exit(&dtrace_lock); 18019 return (EFAULT); 18020 } 18021 18022 desc.dtbd_size = buf->dtb_xamot_offset; 18023 desc.dtbd_drops = buf->dtb_xamot_drops; 18024 desc.dtbd_errors = buf->dtb_xamot_errors; 18025 desc.dtbd_oldest = 0; 18026 desc.dtbd_timestamp = buf->dtb_switched; 18027 18028 mutex_exit(&dtrace_lock); 18029 18030 /* 18031 * Finally, copy out the buffer description. 18032 */ 18033 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 18034 return (EFAULT); 18035 18036 return (0); 18037 } 18038 18039 case DTRACEIOC_CONF: { 18040 dtrace_conf_t conf; 18041 18042 bzero(&conf, sizeof (conf)); 18043 conf.dtc_difversion = DIF_VERSION; 18044 conf.dtc_difintregs = DIF_DIR_NREGS; 18045 conf.dtc_diftupregs = DIF_DTR_NREGS; 18046 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 18047 18048 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 18049 return (EFAULT); 18050 18051 return (0); 18052 } 18053 18054 case DTRACEIOC_STATUS: { 18055 dtrace_status_t stat; 18056 dtrace_dstate_t *dstate; 18057 int i, j; 18058 uint64_t nerrs; 18059 18060 /* 18061 * See the comment in dtrace_state_deadman() for the reason 18062 * for setting dts_laststatus to INT64_MAX before setting 18063 * it to the correct value. 18064 */ 18065 state->dts_laststatus = INT64_MAX; 18066 dtrace_membar_producer(); 18067 state->dts_laststatus = dtrace_gethrtime(); 18068 18069 bzero(&stat, sizeof (stat)); 18070 18071 mutex_enter(&dtrace_lock); 18072 18073 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 18074 mutex_exit(&dtrace_lock); 18075 return (ENOENT); 18076 } 18077 18078 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 18079 stat.dtst_exiting = 1; 18080 18081 nerrs = state->dts_errors; 18082 dstate = &state->dts_vstate.dtvs_dynvars; 18083 18084 for (i = 0; i < NCPU; i++) { 18085 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 18086 18087 stat.dtst_dyndrops += dcpu->dtdsc_drops; 18088 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 18089 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 18090 18091 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 18092 stat.dtst_filled++; 18093 18094 nerrs += state->dts_buffer[i].dtb_errors; 18095 18096 for (j = 0; j < state->dts_nspeculations; j++) { 18097 dtrace_speculation_t *spec; 18098 dtrace_buffer_t *buf; 18099 18100 spec = &state->dts_speculations[j]; 18101 buf = &spec->dtsp_buffer[i]; 18102 stat.dtst_specdrops += buf->dtb_xamot_drops; 18103 } 18104 } 18105 18106 stat.dtst_specdrops_busy = state->dts_speculations_busy; 18107 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 18108 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 18109 stat.dtst_dblerrors = state->dts_dblerrors; 18110 stat.dtst_killed = 18111 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 18112 stat.dtst_errors = nerrs; 18113 18114 mutex_exit(&dtrace_lock); 18115 18116 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 18117 return (EFAULT); 18118 18119 return (0); 18120 } 18121 18122 case DTRACEIOC_FORMAT: { 18123 dtrace_fmtdesc_t fmt; 18124 char *str; 18125 int len; 18126 18127 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 18128 return (EFAULT); 18129 18130 mutex_enter(&dtrace_lock); 18131 18132 if (fmt.dtfd_format == 0 || 18133 fmt.dtfd_format > state->dts_nformats) { 18134 mutex_exit(&dtrace_lock); 18135 return (EINVAL); 18136 } 18137 18138 /* 18139 * Format strings are allocated contiguously and they are 18140 * never freed; if a format index is less than the number 18141 * of formats, we can assert that the format map is non-NULL 18142 * and that the format for the specified index is non-NULL. 18143 */ 18144 ASSERT(state->dts_formats != NULL); 18145 str = state->dts_formats[fmt.dtfd_format - 1]; 18146 ASSERT(str != NULL); 18147 18148 len = strlen(str) + 1; 18149 18150 if (len > fmt.dtfd_length) { 18151 fmt.dtfd_length = len; 18152 18153 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 18154 mutex_exit(&dtrace_lock); 18155 return (EINVAL); 18156 } 18157 } else { 18158 if (copyout(str, fmt.dtfd_string, len) != 0) { 18159 mutex_exit(&dtrace_lock); 18160 return (EINVAL); 18161 } 18162 } 18163 18164 mutex_exit(&dtrace_lock); 18165 return (0); 18166 } 18167 18168 default: 18169 break; 18170 } 18171 18172 return (ENOTTY); 18173 } 18174 18175 /*ARGSUSED*/ 18176 static int 18177 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 18178 { 18179 dtrace_state_t *state; 18180 18181 switch (cmd) { 18182 case DDI_DETACH: 18183 break; 18184 18185 case DDI_SUSPEND: 18186 return (DDI_SUCCESS); 18187 18188 default: 18189 return (DDI_FAILURE); 18190 } 18191 18192 mutex_enter(&cpu_lock); 18193 mutex_enter(&dtrace_provider_lock); 18194 mutex_enter(&dtrace_lock); 18195 18196 ASSERT(dtrace_opens == 0); 18197 18198 if (dtrace_helpers > 0) { 18199 mutex_exit(&dtrace_provider_lock); 18200 mutex_exit(&dtrace_lock); 18201 mutex_exit(&cpu_lock); 18202 return (DDI_FAILURE); 18203 } 18204 18205 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 18206 mutex_exit(&dtrace_provider_lock); 18207 mutex_exit(&dtrace_lock); 18208 mutex_exit(&cpu_lock); 18209 return (DDI_FAILURE); 18210 } 18211 18212 dtrace_provider = NULL; 18213 18214 if ((state = dtrace_anon_grab()) != NULL) { 18215 /* 18216 * If there were ECBs on this state, the provider should 18217 * have not been allowed to detach; assert that there is 18218 * none. 18219 */ 18220 ASSERT(state->dts_necbs == 0); 18221 dtrace_state_destroy(state); 18222 18223 /* 18224 * If we're being detached with anonymous state, we need to 18225 * indicate to the kernel debugger that DTrace is now inactive. 18226 */ 18227 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 18228 } 18229 18230 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 18231 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 18232 dtrace_cpu_init = NULL; 18233 dtrace_helpers_cleanup = NULL; 18234 dtrace_helpers_fork = NULL; 18235 dtrace_cpustart_init = NULL; 18236 dtrace_cpustart_fini = NULL; 18237 dtrace_debugger_init = NULL; 18238 dtrace_debugger_fini = NULL; 18239 dtrace_modload = NULL; 18240 dtrace_modunload = NULL; 18241 18242 ASSERT(dtrace_getf == 0); 18243 ASSERT(dtrace_closef == NULL); 18244 18245 mutex_exit(&cpu_lock); 18246 18247 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 18248 dtrace_probes = NULL; 18249 dtrace_nprobes = 0; 18250 18251 dtrace_hash_destroy(dtrace_bymod); 18252 dtrace_hash_destroy(dtrace_byfunc); 18253 dtrace_hash_destroy(dtrace_byname); 18254 dtrace_bymod = NULL; 18255 dtrace_byfunc = NULL; 18256 dtrace_byname = NULL; 18257 18258 kmem_cache_destroy(dtrace_state_cache); 18259 vmem_destroy(dtrace_minor); 18260 vmem_destroy(dtrace_arena); 18261 18262 if (dtrace_toxrange != NULL) { 18263 kmem_free(dtrace_toxrange, 18264 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 18265 dtrace_toxrange = NULL; 18266 dtrace_toxranges = 0; 18267 dtrace_toxranges_max = 0; 18268 } 18269 18270 ddi_remove_minor_node(dtrace_devi, NULL); 18271 dtrace_devi = NULL; 18272 18273 ddi_soft_state_fini(&dtrace_softstate); 18274 18275 ASSERT(dtrace_vtime_references == 0); 18276 ASSERT(dtrace_opens == 0); 18277 ASSERT(dtrace_retained == NULL); 18278 18279 mutex_exit(&dtrace_lock); 18280 mutex_exit(&dtrace_provider_lock); 18281 18282 /* 18283 * We don't destroy the task queue until after we have dropped our 18284 * locks (taskq_destroy() may block on running tasks). To prevent 18285 * attempting to do work after we have effectively detached but before 18286 * the task queue has been destroyed, all tasks dispatched via the 18287 * task queue must check that DTrace is still attached before 18288 * performing any operation. 18289 */ 18290 taskq_destroy(dtrace_taskq); 18291 dtrace_taskq = NULL; 18292 18293 return (DDI_SUCCESS); 18294 } 18295 #endif 18296 18297 #ifdef illumos 18298 /*ARGSUSED*/ 18299 static int 18300 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 18301 { 18302 int error; 18303 18304 switch (infocmd) { 18305 case DDI_INFO_DEVT2DEVINFO: 18306 *result = (void *)dtrace_devi; 18307 error = DDI_SUCCESS; 18308 break; 18309 case DDI_INFO_DEVT2INSTANCE: 18310 *result = (void *)0; 18311 error = DDI_SUCCESS; 18312 break; 18313 default: 18314 error = DDI_FAILURE; 18315 } 18316 return (error); 18317 } 18318 #endif 18319 18320 #ifdef illumos 18321 static struct cb_ops dtrace_cb_ops = { 18322 dtrace_open, /* open */ 18323 dtrace_close, /* close */ 18324 nulldev, /* strategy */ 18325 nulldev, /* print */ 18326 nodev, /* dump */ 18327 nodev, /* read */ 18328 nodev, /* write */ 18329 dtrace_ioctl, /* ioctl */ 18330 nodev, /* devmap */ 18331 nodev, /* mmap */ 18332 nodev, /* segmap */ 18333 nochpoll, /* poll */ 18334 ddi_prop_op, /* cb_prop_op */ 18335 0, /* streamtab */ 18336 D_NEW | D_MP /* Driver compatibility flag */ 18337 }; 18338 18339 static struct dev_ops dtrace_ops = { 18340 DEVO_REV, /* devo_rev */ 18341 0, /* refcnt */ 18342 dtrace_info, /* get_dev_info */ 18343 nulldev, /* identify */ 18344 nulldev, /* probe */ 18345 dtrace_attach, /* attach */ 18346 dtrace_detach, /* detach */ 18347 nodev, /* reset */ 18348 &dtrace_cb_ops, /* driver operations */ 18349 NULL, /* bus operations */ 18350 nodev /* dev power */ 18351 }; 18352 18353 static struct modldrv modldrv = { 18354 &mod_driverops, /* module type (this is a pseudo driver) */ 18355 "Dynamic Tracing", /* name of module */ 18356 &dtrace_ops, /* driver ops */ 18357 }; 18358 18359 static struct modlinkage modlinkage = { 18360 MODREV_1, 18361 (void *)&modldrv, 18362 NULL 18363 }; 18364 18365 int 18366 _init(void) 18367 { 18368 return (mod_install(&modlinkage)); 18369 } 18370 18371 int 18372 _info(struct modinfo *modinfop) 18373 { 18374 return (mod_info(&modlinkage, modinfop)); 18375 } 18376 18377 int 18378 _fini(void) 18379 { 18380 return (mod_remove(&modlinkage)); 18381 } 18382 #else 18383 18384 static d_ioctl_t dtrace_ioctl; 18385 static d_ioctl_t dtrace_ioctl_helper; 18386 static void dtrace_load(void *); 18387 static int dtrace_unload(void); 18388 static struct cdev *dtrace_dev; 18389 static struct cdev *helper_dev; 18390 18391 void dtrace_invop_init(void); 18392 void dtrace_invop_uninit(void); 18393 18394 static struct cdevsw dtrace_cdevsw = { 18395 .d_version = D_VERSION, 18396 .d_ioctl = dtrace_ioctl, 18397 .d_open = dtrace_open, 18398 .d_name = "dtrace", 18399 }; 18400 18401 static struct cdevsw helper_cdevsw = { 18402 .d_version = D_VERSION, 18403 .d_ioctl = dtrace_ioctl_helper, 18404 .d_name = "helper", 18405 }; 18406 18407 #include <dtrace_anon.c> 18408 #include <dtrace_ioctl.c> 18409 #include <dtrace_load.c> 18410 #include <dtrace_modevent.c> 18411 #include <dtrace_sysctl.c> 18412 #include <dtrace_unload.c> 18413 #include <dtrace_vtime.c> 18414 #include <dtrace_hacks.c> 18415 #include <dtrace_isa.c> 18416 18417 SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 18418 SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 18419 SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 18420 18421 DEV_MODULE(dtrace, dtrace_modevent, NULL); 18422 MODULE_VERSION(dtrace, 1); 18423 MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 18424 #endif 18425