1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD$ 22 */ 23 24 /* 25 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 26 * Copyright (c) 2016, Joyent, Inc. All rights reserved. 27 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 28 */ 29 30 /* 31 * DTrace - Dynamic Tracing for Solaris 32 * 33 * This is the implementation of the Solaris Dynamic Tracing framework 34 * (DTrace). The user-visible interface to DTrace is described at length in 35 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 36 * library, the in-kernel DTrace framework, and the DTrace providers are 37 * described in the block comments in the <sys/dtrace.h> header file. The 38 * internal architecture of DTrace is described in the block comments in the 39 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 40 * implementation very much assume mastery of all of these sources; if one has 41 * an unanswered question about the implementation, one should consult them 42 * first. 43 * 44 * The functions here are ordered roughly as follows: 45 * 46 * - Probe context functions 47 * - Probe hashing functions 48 * - Non-probe context utility functions 49 * - Matching functions 50 * - Provider-to-Framework API functions 51 * - Probe management functions 52 * - DIF object functions 53 * - Format functions 54 * - Predicate functions 55 * - ECB functions 56 * - Buffer functions 57 * - Enabling functions 58 * - DOF functions 59 * - Anonymous enabling functions 60 * - Consumer state functions 61 * - Helper functions 62 * - Hook functions 63 * - Driver cookbook functions 64 * 65 * Each group of functions begins with a block comment labelled the "DTrace 66 * [Group] Functions", allowing one to find each block by searching forward 67 * on capital-f functions. 68 */ 69 #include <sys/errno.h> 70 #include <sys/param.h> 71 #include <sys/types.h> 72 #ifndef illumos 73 #include <sys/time.h> 74 #endif 75 #include <sys/stat.h> 76 #include <sys/conf.h> 77 #include <sys/systm.h> 78 #include <sys/endian.h> 79 #ifdef illumos 80 #include <sys/ddi.h> 81 #include <sys/sunddi.h> 82 #endif 83 #include <sys/cpuvar.h> 84 #include <sys/kmem.h> 85 #ifdef illumos 86 #include <sys/strsubr.h> 87 #endif 88 #include <sys/sysmacros.h> 89 #include <sys/dtrace_impl.h> 90 #include <sys/atomic.h> 91 #include <sys/cmn_err.h> 92 #ifdef illumos 93 #include <sys/mutex_impl.h> 94 #include <sys/rwlock_impl.h> 95 #endif 96 #include <sys/ctf_api.h> 97 #ifdef illumos 98 #include <sys/panic.h> 99 #include <sys/priv_impl.h> 100 #endif 101 #ifdef illumos 102 #include <sys/cred_impl.h> 103 #include <sys/procfs_isa.h> 104 #endif 105 #include <sys/taskq.h> 106 #ifdef illumos 107 #include <sys/mkdev.h> 108 #include <sys/kdi.h> 109 #endif 110 #include <sys/zone.h> 111 #include <sys/socket.h> 112 #include <netinet/in.h> 113 #include "strtolctype.h" 114 115 /* FreeBSD includes: */ 116 #ifndef illumos 117 #include <sys/callout.h> 118 #include <sys/ctype.h> 119 #include <sys/eventhandler.h> 120 #include <sys/limits.h> 121 #include <sys/linker.h> 122 #include <sys/kdb.h> 123 #include <sys/jail.h> 124 #include <sys/kernel.h> 125 #include <sys/malloc.h> 126 #include <sys/lock.h> 127 #include <sys/mutex.h> 128 #include <sys/ptrace.h> 129 #include <sys/random.h> 130 #include <sys/rwlock.h> 131 #include <sys/sx.h> 132 #include <sys/sysctl.h> 133 134 135 #include <sys/mount.h> 136 #undef AT_UID 137 #undef AT_GID 138 #include <sys/vnode.h> 139 #include <sys/cred.h> 140 141 #include <sys/dtrace_bsd.h> 142 143 #include <netinet/in.h> 144 145 #include "dtrace_cddl.h" 146 #include "dtrace_debug.c" 147 #endif 148 149 #include "dtrace_xoroshiro128_plus.h" 150 151 /* 152 * DTrace Tunable Variables 153 * 154 * The following variables may be tuned by adding a line to /etc/system that 155 * includes both the name of the DTrace module ("dtrace") and the name of the 156 * variable. For example: 157 * 158 * set dtrace:dtrace_destructive_disallow = 1 159 * 160 * In general, the only variables that one should be tuning this way are those 161 * that affect system-wide DTrace behavior, and for which the default behavior 162 * is undesirable. Most of these variables are tunable on a per-consumer 163 * basis using DTrace options, and need not be tuned on a system-wide basis. 164 * When tuning these variables, avoid pathological values; while some attempt 165 * is made to verify the integrity of these variables, they are not considered 166 * part of the supported interface to DTrace, and they are therefore not 167 * checked comprehensively. Further, these variables should not be tuned 168 * dynamically via "mdb -kw" or other means; they should only be tuned via 169 * /etc/system. 170 */ 171 int dtrace_destructive_disallow = 0; 172 #ifndef illumos 173 /* Positive logic version of dtrace_destructive_disallow for loader tunable */ 174 int dtrace_allow_destructive = 1; 175 #endif 176 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 177 size_t dtrace_difo_maxsize = (256 * 1024); 178 dtrace_optval_t dtrace_dof_maxsize = (8 * 1024 * 1024); 179 size_t dtrace_statvar_maxsize = (16 * 1024); 180 size_t dtrace_actions_max = (16 * 1024); 181 size_t dtrace_retain_max = 1024; 182 dtrace_optval_t dtrace_helper_actions_max = 128; 183 dtrace_optval_t dtrace_helper_providers_max = 32; 184 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 185 size_t dtrace_strsize_default = 256; 186 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 187 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 188 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 189 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 190 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 191 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 192 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 193 dtrace_optval_t dtrace_nspec_default = 1; 194 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 195 dtrace_optval_t dtrace_stackframes_default = 20; 196 dtrace_optval_t dtrace_ustackframes_default = 20; 197 dtrace_optval_t dtrace_jstackframes_default = 50; 198 dtrace_optval_t dtrace_jstackstrsize_default = 512; 199 int dtrace_msgdsize_max = 128; 200 hrtime_t dtrace_chill_max = MSEC2NSEC(500); /* 500 ms */ 201 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 202 int dtrace_devdepth_max = 32; 203 int dtrace_err_verbose; 204 hrtime_t dtrace_deadman_interval = NANOSEC; 205 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 206 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 207 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC; 208 #ifndef illumos 209 int dtrace_memstr_max = 4096; 210 #endif 211 212 /* 213 * DTrace External Variables 214 * 215 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 216 * available to DTrace consumers via the backtick (`) syntax. One of these, 217 * dtrace_zero, is made deliberately so: it is provided as a source of 218 * well-known, zero-filled memory. While this variable is not documented, 219 * it is used by some translators as an implementation detail. 220 */ 221 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 222 223 /* 224 * DTrace Internal Variables 225 */ 226 #ifdef illumos 227 static dev_info_t *dtrace_devi; /* device info */ 228 #endif 229 #ifdef illumos 230 static vmem_t *dtrace_arena; /* probe ID arena */ 231 static vmem_t *dtrace_minor; /* minor number arena */ 232 #else 233 static taskq_t *dtrace_taskq; /* task queue */ 234 static struct unrhdr *dtrace_arena; /* Probe ID number. */ 235 #endif 236 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 237 static int dtrace_nprobes; /* number of probes */ 238 static dtrace_provider_t *dtrace_provider; /* provider list */ 239 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 240 static int dtrace_opens; /* number of opens */ 241 static int dtrace_helpers; /* number of helpers */ 242 static int dtrace_getf; /* number of unpriv getf()s */ 243 #ifdef illumos 244 static void *dtrace_softstate; /* softstate pointer */ 245 #endif 246 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 247 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 248 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 249 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 250 static int dtrace_toxranges; /* number of toxic ranges */ 251 static int dtrace_toxranges_max; /* size of toxic range array */ 252 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 253 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 254 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 255 static kthread_t *dtrace_panicked; /* panicking thread */ 256 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 257 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 258 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 259 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 260 static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */ 261 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 262 static int dtrace_dynvar_failclean; /* dynvars failed to clean */ 263 #ifndef illumos 264 static struct mtx dtrace_unr_mtx; 265 MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 266 static eventhandler_tag dtrace_kld_load_tag; 267 static eventhandler_tag dtrace_kld_unload_try_tag; 268 #endif 269 270 /* 271 * DTrace Locking 272 * DTrace is protected by three (relatively coarse-grained) locks: 273 * 274 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 275 * including enabling state, probes, ECBs, consumer state, helper state, 276 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 277 * probe context is lock-free -- synchronization is handled via the 278 * dtrace_sync() cross call mechanism. 279 * 280 * (2) dtrace_provider_lock is required when manipulating provider state, or 281 * when provider state must be held constant. 282 * 283 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 284 * when meta provider state must be held constant. 285 * 286 * The lock ordering between these three locks is dtrace_meta_lock before 287 * dtrace_provider_lock before dtrace_lock. (In particular, there are 288 * several places where dtrace_provider_lock is held by the framework as it 289 * calls into the providers -- which then call back into the framework, 290 * grabbing dtrace_lock.) 291 * 292 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 293 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 294 * role as a coarse-grained lock; it is acquired before both of these locks. 295 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 296 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 297 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 298 * acquired _between_ dtrace_provider_lock and dtrace_lock. 299 */ 300 static kmutex_t dtrace_lock; /* probe state lock */ 301 static kmutex_t dtrace_provider_lock; /* provider state lock */ 302 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 303 304 #ifndef illumos 305 /* XXX FreeBSD hacks. */ 306 #define cr_suid cr_svuid 307 #define cr_sgid cr_svgid 308 #define ipaddr_t in_addr_t 309 #define mod_modname pathname 310 #define vuprintf vprintf 311 #ifndef crgetzoneid 312 #define crgetzoneid(_a) 0 313 #endif 314 #define ttoproc(_a) ((_a)->td_proc) 315 #define SNOCD 0 316 #define CPU_ON_INTR(_a) 0 317 318 #define PRIV_EFFECTIVE (1 << 0) 319 #define PRIV_DTRACE_KERNEL (1 << 1) 320 #define PRIV_DTRACE_PROC (1 << 2) 321 #define PRIV_DTRACE_USER (1 << 3) 322 #define PRIV_PROC_OWNER (1 << 4) 323 #define PRIV_PROC_ZONE (1 << 5) 324 #define PRIV_ALL ~0 325 326 SYSCTL_DECL(_debug_dtrace); 327 SYSCTL_DECL(_kern_dtrace); 328 #endif 329 330 #ifdef illumos 331 #define curcpu CPU->cpu_id 332 #endif 333 334 335 /* 336 * DTrace Provider Variables 337 * 338 * These are the variables relating to DTrace as a provider (that is, the 339 * provider of the BEGIN, END, and ERROR probes). 340 */ 341 static dtrace_pattr_t dtrace_provider_attr = { 342 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 343 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 344 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 345 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 346 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 347 }; 348 349 static void 350 dtrace_nullop(void) 351 {} 352 353 static dtrace_pops_t dtrace_provider_ops = { 354 .dtps_provide = (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 355 .dtps_provide_module = (void (*)(void *, modctl_t *))dtrace_nullop, 356 .dtps_enable = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 357 .dtps_disable = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 358 .dtps_suspend = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 359 .dtps_resume = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 360 .dtps_getargdesc = NULL, 361 .dtps_getargval = NULL, 362 .dtps_usermode = NULL, 363 .dtps_destroy = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 364 }; 365 366 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 367 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 368 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 369 370 /* 371 * DTrace Helper Tracing Variables 372 * 373 * These variables should be set dynamically to enable helper tracing. The 374 * only variables that should be set are dtrace_helptrace_enable (which should 375 * be set to a non-zero value to allocate helper tracing buffers on the next 376 * open of /dev/dtrace) and dtrace_helptrace_disable (which should be set to a 377 * non-zero value to deallocate helper tracing buffers on the next close of 378 * /dev/dtrace). When (and only when) helper tracing is disabled, the 379 * buffer size may also be set via dtrace_helptrace_bufsize. 380 */ 381 int dtrace_helptrace_enable = 0; 382 int dtrace_helptrace_disable = 0; 383 int dtrace_helptrace_bufsize = 16 * 1024 * 1024; 384 uint32_t dtrace_helptrace_nlocals; 385 static dtrace_helptrace_t *dtrace_helptrace_buffer; 386 static uint32_t dtrace_helptrace_next = 0; 387 static int dtrace_helptrace_wrapped = 0; 388 389 /* 390 * DTrace Error Hashing 391 * 392 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 393 * table. This is very useful for checking coverage of tests that are 394 * expected to induce DIF or DOF processing errors, and may be useful for 395 * debugging problems in the DIF code generator or in DOF generation . The 396 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 397 */ 398 #ifdef DEBUG 399 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 400 static const char *dtrace_errlast; 401 static kthread_t *dtrace_errthread; 402 static kmutex_t dtrace_errlock; 403 #endif 404 405 /* 406 * DTrace Macros and Constants 407 * 408 * These are various macros that are useful in various spots in the 409 * implementation, along with a few random constants that have no meaning 410 * outside of the implementation. There is no real structure to this cpp 411 * mishmash -- but is there ever? 412 */ 413 #define DTRACE_HASHSTR(hash, probe) \ 414 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 415 416 #define DTRACE_HASHNEXT(hash, probe) \ 417 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 418 419 #define DTRACE_HASHPREV(hash, probe) \ 420 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 421 422 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 423 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 424 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 425 426 #define DTRACE_AGGHASHSIZE_SLEW 17 427 428 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 429 430 /* 431 * The key for a thread-local variable consists of the lower 61 bits of the 432 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 433 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 434 * equal to a variable identifier. This is necessary (but not sufficient) to 435 * assure that global associative arrays never collide with thread-local 436 * variables. To guarantee that they cannot collide, we must also define the 437 * order for keying dynamic variables. That order is: 438 * 439 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 440 * 441 * Because the variable-key and the tls-key are in orthogonal spaces, there is 442 * no way for a global variable key signature to match a thread-local key 443 * signature. 444 */ 445 #ifdef illumos 446 #define DTRACE_TLS_THRKEY(where) { \ 447 uint_t intr = 0; \ 448 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 449 for (; actv; actv >>= 1) \ 450 intr++; \ 451 ASSERT(intr < (1 << 3)); \ 452 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 453 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 454 } 455 #else 456 #define DTRACE_TLS_THRKEY(where) { \ 457 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 458 uint_t intr = 0; \ 459 uint_t actv = _c->cpu_intr_actv; \ 460 for (; actv; actv >>= 1) \ 461 intr++; \ 462 ASSERT(intr < (1 << 3)); \ 463 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 464 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 465 } 466 #endif 467 468 #define DT_BSWAP_8(x) ((x) & 0xff) 469 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 470 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 471 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 472 473 #define DT_MASK_LO 0x00000000FFFFFFFFULL 474 475 #define DTRACE_STORE(type, tomax, offset, what) \ 476 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 477 478 #ifndef __x86 479 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 480 if (addr & (size - 1)) { \ 481 *flags |= CPU_DTRACE_BADALIGN; \ 482 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 483 return (0); \ 484 } 485 #else 486 #define DTRACE_ALIGNCHECK(addr, size, flags) 487 #endif 488 489 /* 490 * Test whether a range of memory starting at testaddr of size testsz falls 491 * within the range of memory described by addr, sz. We take care to avoid 492 * problems with overflow and underflow of the unsigned quantities, and 493 * disallow all negative sizes. Ranges of size 0 are allowed. 494 */ 495 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 496 ((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \ 497 (testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \ 498 (testaddr) + (testsz) >= (testaddr)) 499 500 #define DTRACE_RANGE_REMAIN(remp, addr, baseaddr, basesz) \ 501 do { \ 502 if ((remp) != NULL) { \ 503 *(remp) = (uintptr_t)(baseaddr) + (basesz) - (addr); \ 504 } \ 505 } while (0) 506 507 508 /* 509 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 510 * alloc_sz on the righthand side of the comparison in order to avoid overflow 511 * or underflow in the comparison with it. This is simpler than the INRANGE 512 * check above, because we know that the dtms_scratch_ptr is valid in the 513 * range. Allocations of size zero are allowed. 514 */ 515 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 516 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 517 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 518 519 #define DTRACE_LOADFUNC(bits) \ 520 /*CSTYLED*/ \ 521 uint##bits##_t \ 522 dtrace_load##bits(uintptr_t addr) \ 523 { \ 524 size_t size = bits / NBBY; \ 525 /*CSTYLED*/ \ 526 uint##bits##_t rval; \ 527 int i; \ 528 volatile uint16_t *flags = (volatile uint16_t *) \ 529 &cpu_core[curcpu].cpuc_dtrace_flags; \ 530 \ 531 DTRACE_ALIGNCHECK(addr, size, flags); \ 532 \ 533 for (i = 0; i < dtrace_toxranges; i++) { \ 534 if (addr >= dtrace_toxrange[i].dtt_limit) \ 535 continue; \ 536 \ 537 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 538 continue; \ 539 \ 540 /* \ 541 * This address falls within a toxic region; return 0. \ 542 */ \ 543 *flags |= CPU_DTRACE_BADADDR; \ 544 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 545 return (0); \ 546 } \ 547 \ 548 *flags |= CPU_DTRACE_NOFAULT; \ 549 /*CSTYLED*/ \ 550 rval = *((volatile uint##bits##_t *)addr); \ 551 *flags &= ~CPU_DTRACE_NOFAULT; \ 552 \ 553 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 554 } 555 556 #ifdef _LP64 557 #define dtrace_loadptr dtrace_load64 558 #else 559 #define dtrace_loadptr dtrace_load32 560 #endif 561 562 #define DTRACE_DYNHASH_FREE 0 563 #define DTRACE_DYNHASH_SINK 1 564 #define DTRACE_DYNHASH_VALID 2 565 566 #define DTRACE_MATCH_NEXT 0 567 #define DTRACE_MATCH_DONE 1 568 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 569 #define DTRACE_STATE_ALIGN 64 570 571 #define DTRACE_FLAGS2FLT(flags) \ 572 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 573 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 574 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 575 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 576 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 577 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 578 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 579 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 580 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 581 DTRACEFLT_UNKNOWN) 582 583 #define DTRACEACT_ISSTRING(act) \ 584 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 585 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 586 587 /* Function prototype definitions: */ 588 static size_t dtrace_strlen(const char *, size_t); 589 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 590 static void dtrace_enabling_provide(dtrace_provider_t *); 591 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 592 static void dtrace_enabling_matchall(void); 593 static void dtrace_enabling_reap(void); 594 static dtrace_state_t *dtrace_anon_grab(void); 595 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 596 dtrace_state_t *, uint64_t, uint64_t); 597 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 598 static void dtrace_buffer_drop(dtrace_buffer_t *); 599 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when); 600 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 601 dtrace_state_t *, dtrace_mstate_t *); 602 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 603 dtrace_optval_t); 604 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 605 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 606 uint16_t dtrace_load16(uintptr_t); 607 uint32_t dtrace_load32(uintptr_t); 608 uint64_t dtrace_load64(uintptr_t); 609 uint8_t dtrace_load8(uintptr_t); 610 void dtrace_dynvar_clean(dtrace_dstate_t *); 611 dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 612 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 613 uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 614 static int dtrace_priv_proc(dtrace_state_t *); 615 static void dtrace_getf_barrier(void); 616 static int dtrace_canload_remains(uint64_t, size_t, size_t *, 617 dtrace_mstate_t *, dtrace_vstate_t *); 618 static int dtrace_canstore_remains(uint64_t, size_t, size_t *, 619 dtrace_mstate_t *, dtrace_vstate_t *); 620 621 /* 622 * DTrace Probe Context Functions 623 * 624 * These functions are called from probe context. Because probe context is 625 * any context in which C may be called, arbitrarily locks may be held, 626 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 627 * As a result, functions called from probe context may only call other DTrace 628 * support functions -- they may not interact at all with the system at large. 629 * (Note that the ASSERT macro is made probe-context safe by redefining it in 630 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 631 * loads are to be performed from probe context, they _must_ be in terms of 632 * the safe dtrace_load*() variants. 633 * 634 * Some functions in this block are not actually called from probe context; 635 * for these functions, there will be a comment above the function reading 636 * "Note: not called from probe context." 637 */ 638 void 639 dtrace_panic(const char *format, ...) 640 { 641 va_list alist; 642 643 va_start(alist, format); 644 #ifdef __FreeBSD__ 645 vpanic(format, alist); 646 #else 647 dtrace_vpanic(format, alist); 648 #endif 649 va_end(alist); 650 } 651 652 int 653 dtrace_assfail(const char *a, const char *f, int l) 654 { 655 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 656 657 /* 658 * We just need something here that even the most clever compiler 659 * cannot optimize away. 660 */ 661 return (a[(uintptr_t)f]); 662 } 663 664 /* 665 * Atomically increment a specified error counter from probe context. 666 */ 667 static void 668 dtrace_error(uint32_t *counter) 669 { 670 /* 671 * Most counters stored to in probe context are per-CPU counters. 672 * However, there are some error conditions that are sufficiently 673 * arcane that they don't merit per-CPU storage. If these counters 674 * are incremented concurrently on different CPUs, scalability will be 675 * adversely affected -- but we don't expect them to be white-hot in a 676 * correctly constructed enabling... 677 */ 678 uint32_t oval, nval; 679 680 do { 681 oval = *counter; 682 683 if ((nval = oval + 1) == 0) { 684 /* 685 * If the counter would wrap, set it to 1 -- assuring 686 * that the counter is never zero when we have seen 687 * errors. (The counter must be 32-bits because we 688 * aren't guaranteed a 64-bit compare&swap operation.) 689 * To save this code both the infamy of being fingered 690 * by a priggish news story and the indignity of being 691 * the target of a neo-puritan witch trial, we're 692 * carefully avoiding any colorful description of the 693 * likelihood of this condition -- but suffice it to 694 * say that it is only slightly more likely than the 695 * overflow of predicate cache IDs, as discussed in 696 * dtrace_predicate_create(). 697 */ 698 nval = 1; 699 } 700 } while (dtrace_cas32(counter, oval, nval) != oval); 701 } 702 703 /* 704 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 705 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 706 */ 707 /* BEGIN CSTYLED */ 708 DTRACE_LOADFUNC(8) 709 DTRACE_LOADFUNC(16) 710 DTRACE_LOADFUNC(32) 711 DTRACE_LOADFUNC(64) 712 /* END CSTYLED */ 713 714 static int 715 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 716 { 717 if (dest < mstate->dtms_scratch_base) 718 return (0); 719 720 if (dest + size < dest) 721 return (0); 722 723 if (dest + size > mstate->dtms_scratch_ptr) 724 return (0); 725 726 return (1); 727 } 728 729 static int 730 dtrace_canstore_statvar(uint64_t addr, size_t sz, size_t *remain, 731 dtrace_statvar_t **svars, int nsvars) 732 { 733 int i; 734 size_t maxglobalsize, maxlocalsize; 735 736 if (nsvars == 0) 737 return (0); 738 739 maxglobalsize = dtrace_statvar_maxsize + sizeof (uint64_t); 740 maxlocalsize = maxglobalsize * NCPU; 741 742 for (i = 0; i < nsvars; i++) { 743 dtrace_statvar_t *svar = svars[i]; 744 uint8_t scope; 745 size_t size; 746 747 if (svar == NULL || (size = svar->dtsv_size) == 0) 748 continue; 749 750 scope = svar->dtsv_var.dtdv_scope; 751 752 /* 753 * We verify that our size is valid in the spirit of providing 754 * defense in depth: we want to prevent attackers from using 755 * DTrace to escalate an orthogonal kernel heap corruption bug 756 * into the ability to store to arbitrary locations in memory. 757 */ 758 VERIFY((scope == DIFV_SCOPE_GLOBAL && size <= maxglobalsize) || 759 (scope == DIFV_SCOPE_LOCAL && size <= maxlocalsize)); 760 761 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, 762 svar->dtsv_size)) { 763 DTRACE_RANGE_REMAIN(remain, addr, svar->dtsv_data, 764 svar->dtsv_size); 765 return (1); 766 } 767 } 768 769 return (0); 770 } 771 772 /* 773 * Check to see if the address is within a memory region to which a store may 774 * be issued. This includes the DTrace scratch areas, and any DTrace variable 775 * region. The caller of dtrace_canstore() is responsible for performing any 776 * alignment checks that are needed before stores are actually executed. 777 */ 778 static int 779 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 780 dtrace_vstate_t *vstate) 781 { 782 return (dtrace_canstore_remains(addr, sz, NULL, mstate, vstate)); 783 } 784 785 /* 786 * Implementation of dtrace_canstore which communicates the upper bound of the 787 * allowed memory region. 788 */ 789 static int 790 dtrace_canstore_remains(uint64_t addr, size_t sz, size_t *remain, 791 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 792 { 793 /* 794 * First, check to see if the address is in scratch space... 795 */ 796 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 797 mstate->dtms_scratch_size)) { 798 DTRACE_RANGE_REMAIN(remain, addr, mstate->dtms_scratch_base, 799 mstate->dtms_scratch_size); 800 return (1); 801 } 802 803 /* 804 * Now check to see if it's a dynamic variable. This check will pick 805 * up both thread-local variables and any global dynamically-allocated 806 * variables. 807 */ 808 if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base, 809 vstate->dtvs_dynvars.dtds_size)) { 810 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 811 uintptr_t base = (uintptr_t)dstate->dtds_base + 812 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 813 uintptr_t chunkoffs; 814 dtrace_dynvar_t *dvar; 815 816 /* 817 * Before we assume that we can store here, we need to make 818 * sure that it isn't in our metadata -- storing to our 819 * dynamic variable metadata would corrupt our state. For 820 * the range to not include any dynamic variable metadata, 821 * it must: 822 * 823 * (1) Start above the hash table that is at the base of 824 * the dynamic variable space 825 * 826 * (2) Have a starting chunk offset that is beyond the 827 * dtrace_dynvar_t that is at the base of every chunk 828 * 829 * (3) Not span a chunk boundary 830 * 831 * (4) Not be in the tuple space of a dynamic variable 832 * 833 */ 834 if (addr < base) 835 return (0); 836 837 chunkoffs = (addr - base) % dstate->dtds_chunksize; 838 839 if (chunkoffs < sizeof (dtrace_dynvar_t)) 840 return (0); 841 842 if (chunkoffs + sz > dstate->dtds_chunksize) 843 return (0); 844 845 dvar = (dtrace_dynvar_t *)((uintptr_t)addr - chunkoffs); 846 847 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) 848 return (0); 849 850 if (chunkoffs < sizeof (dtrace_dynvar_t) + 851 ((dvar->dtdv_tuple.dtt_nkeys - 1) * sizeof (dtrace_key_t))) 852 return (0); 853 854 DTRACE_RANGE_REMAIN(remain, addr, dvar, dstate->dtds_chunksize); 855 return (1); 856 } 857 858 /* 859 * Finally, check the static local and global variables. These checks 860 * take the longest, so we perform them last. 861 */ 862 if (dtrace_canstore_statvar(addr, sz, remain, 863 vstate->dtvs_locals, vstate->dtvs_nlocals)) 864 return (1); 865 866 if (dtrace_canstore_statvar(addr, sz, remain, 867 vstate->dtvs_globals, vstate->dtvs_nglobals)) 868 return (1); 869 870 return (0); 871 } 872 873 874 /* 875 * Convenience routine to check to see if the address is within a memory 876 * region in which a load may be issued given the user's privilege level; 877 * if not, it sets the appropriate error flags and loads 'addr' into the 878 * illegal value slot. 879 * 880 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 881 * appropriate memory access protection. 882 */ 883 static int 884 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 885 dtrace_vstate_t *vstate) 886 { 887 return (dtrace_canload_remains(addr, sz, NULL, mstate, vstate)); 888 } 889 890 /* 891 * Implementation of dtrace_canload which communicates the uppoer bound of the 892 * allowed memory region. 893 */ 894 static int 895 dtrace_canload_remains(uint64_t addr, size_t sz, size_t *remain, 896 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 897 { 898 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 899 file_t *fp; 900 901 /* 902 * If we hold the privilege to read from kernel memory, then 903 * everything is readable. 904 */ 905 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) { 906 DTRACE_RANGE_REMAIN(remain, addr, addr, sz); 907 return (1); 908 } 909 910 /* 911 * You can obviously read that which you can store. 912 */ 913 if (dtrace_canstore_remains(addr, sz, remain, mstate, vstate)) 914 return (1); 915 916 /* 917 * We're allowed to read from our own string table. 918 */ 919 if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab, 920 mstate->dtms_difo->dtdo_strlen)) { 921 DTRACE_RANGE_REMAIN(remain, addr, 922 mstate->dtms_difo->dtdo_strtab, 923 mstate->dtms_difo->dtdo_strlen); 924 return (1); 925 } 926 927 if (vstate->dtvs_state != NULL && 928 dtrace_priv_proc(vstate->dtvs_state)) { 929 proc_t *p; 930 931 /* 932 * When we have privileges to the current process, there are 933 * several context-related kernel structures that are safe to 934 * read, even absent the privilege to read from kernel memory. 935 * These reads are safe because these structures contain only 936 * state that (1) we're permitted to read, (2) is harmless or 937 * (3) contains pointers to additional kernel state that we're 938 * not permitted to read (and as such, do not present an 939 * opportunity for privilege escalation). Finally (and 940 * critically), because of the nature of their relation with 941 * the current thread context, the memory associated with these 942 * structures cannot change over the duration of probe context, 943 * and it is therefore impossible for this memory to be 944 * deallocated and reallocated as something else while it's 945 * being operated upon. 946 */ 947 if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t))) { 948 DTRACE_RANGE_REMAIN(remain, addr, curthread, 949 sizeof (kthread_t)); 950 return (1); 951 } 952 953 if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr, 954 sz, curthread->t_procp, sizeof (proc_t))) { 955 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_procp, 956 sizeof (proc_t)); 957 return (1); 958 } 959 960 if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz, 961 curthread->t_cred, sizeof (cred_t))) { 962 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cred, 963 sizeof (cred_t)); 964 return (1); 965 } 966 967 #ifdef illumos 968 if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz, 969 &(p->p_pidp->pid_id), sizeof (pid_t))) { 970 DTRACE_RANGE_REMAIN(remain, addr, &(p->p_pidp->pid_id), 971 sizeof (pid_t)); 972 return (1); 973 } 974 975 if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz, 976 curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) { 977 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cpu, 978 offsetof(cpu_t, cpu_pause_thread)); 979 return (1); 980 } 981 #endif 982 } 983 984 if ((fp = mstate->dtms_getf) != NULL) { 985 uintptr_t psz = sizeof (void *); 986 vnode_t *vp; 987 vnodeops_t *op; 988 989 /* 990 * When getf() returns a file_t, the enabling is implicitly 991 * granted the (transient) right to read the returned file_t 992 * as well as the v_path and v_op->vnop_name of the underlying 993 * vnode. These accesses are allowed after a successful 994 * getf() because the members that they refer to cannot change 995 * once set -- and the barrier logic in the kernel's closef() 996 * path assures that the file_t and its referenced vode_t 997 * cannot themselves be stale (that is, it impossible for 998 * either dtms_getf itself or its f_vnode member to reference 999 * freed memory). 1000 */ 1001 if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t))) { 1002 DTRACE_RANGE_REMAIN(remain, addr, fp, sizeof (file_t)); 1003 return (1); 1004 } 1005 1006 if ((vp = fp->f_vnode) != NULL) { 1007 size_t slen; 1008 #ifdef illumos 1009 if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz)) { 1010 DTRACE_RANGE_REMAIN(remain, addr, &vp->v_path, 1011 psz); 1012 return (1); 1013 } 1014 slen = strlen(vp->v_path) + 1; 1015 if (DTRACE_INRANGE(addr, sz, vp->v_path, slen)) { 1016 DTRACE_RANGE_REMAIN(remain, addr, vp->v_path, 1017 slen); 1018 return (1); 1019 } 1020 #endif 1021 1022 if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz)) { 1023 DTRACE_RANGE_REMAIN(remain, addr, &vp->v_op, 1024 psz); 1025 return (1); 1026 } 1027 1028 #ifdef illumos 1029 if ((op = vp->v_op) != NULL && 1030 DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) { 1031 DTRACE_RANGE_REMAIN(remain, addr, 1032 &op->vnop_name, psz); 1033 return (1); 1034 } 1035 1036 if (op != NULL && op->vnop_name != NULL && 1037 DTRACE_INRANGE(addr, sz, op->vnop_name, 1038 (slen = strlen(op->vnop_name) + 1))) { 1039 DTRACE_RANGE_REMAIN(remain, addr, 1040 op->vnop_name, slen); 1041 return (1); 1042 } 1043 #endif 1044 } 1045 } 1046 1047 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 1048 *illval = addr; 1049 return (0); 1050 } 1051 1052 /* 1053 * Convenience routine to check to see if a given string is within a memory 1054 * region in which a load may be issued given the user's privilege level; 1055 * this exists so that we don't need to issue unnecessary dtrace_strlen() 1056 * calls in the event that the user has all privileges. 1057 */ 1058 static int 1059 dtrace_strcanload(uint64_t addr, size_t sz, size_t *remain, 1060 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1061 { 1062 size_t rsize; 1063 1064 /* 1065 * If we hold the privilege to read from kernel memory, then 1066 * everything is readable. 1067 */ 1068 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) { 1069 DTRACE_RANGE_REMAIN(remain, addr, addr, sz); 1070 return (1); 1071 } 1072 1073 /* 1074 * Even if the caller is uninterested in querying the remaining valid 1075 * range, it is required to ensure that the access is allowed. 1076 */ 1077 if (remain == NULL) { 1078 remain = &rsize; 1079 } 1080 if (dtrace_canload_remains(addr, 0, remain, mstate, vstate)) { 1081 size_t strsz; 1082 /* 1083 * Perform the strlen after determining the length of the 1084 * memory region which is accessible. This prevents timing 1085 * information from being used to find NULs in memory which is 1086 * not accessible to the caller. 1087 */ 1088 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, 1089 MIN(sz, *remain)); 1090 if (strsz <= *remain) { 1091 return (1); 1092 } 1093 } 1094 1095 return (0); 1096 } 1097 1098 /* 1099 * Convenience routine to check to see if a given variable is within a memory 1100 * region in which a load may be issued given the user's privilege level. 1101 */ 1102 static int 1103 dtrace_vcanload(void *src, dtrace_diftype_t *type, size_t *remain, 1104 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1105 { 1106 size_t sz; 1107 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1108 1109 /* 1110 * Calculate the max size before performing any checks since even 1111 * DTRACE_ACCESS_KERNEL-credentialed callers expect that this function 1112 * return the max length via 'remain'. 1113 */ 1114 if (type->dtdt_kind == DIF_TYPE_STRING) { 1115 dtrace_state_t *state = vstate->dtvs_state; 1116 1117 if (state != NULL) { 1118 sz = state->dts_options[DTRACEOPT_STRSIZE]; 1119 } else { 1120 /* 1121 * In helper context, we have a NULL state; fall back 1122 * to using the system-wide default for the string size 1123 * in this case. 1124 */ 1125 sz = dtrace_strsize_default; 1126 } 1127 } else { 1128 sz = type->dtdt_size; 1129 } 1130 1131 /* 1132 * If we hold the privilege to read from kernel memory, then 1133 * everything is readable. 1134 */ 1135 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) { 1136 DTRACE_RANGE_REMAIN(remain, (uintptr_t)src, src, sz); 1137 return (1); 1138 } 1139 1140 if (type->dtdt_kind == DIF_TYPE_STRING) { 1141 return (dtrace_strcanload((uintptr_t)src, sz, remain, mstate, 1142 vstate)); 1143 } 1144 return (dtrace_canload_remains((uintptr_t)src, sz, remain, mstate, 1145 vstate)); 1146 } 1147 1148 /* 1149 * Convert a string to a signed integer using safe loads. 1150 * 1151 * NOTE: This function uses various macros from strtolctype.h to manipulate 1152 * digit values, etc -- these have all been checked to ensure they make 1153 * no additional function calls. 1154 */ 1155 static int64_t 1156 dtrace_strtoll(char *input, int base, size_t limit) 1157 { 1158 uintptr_t pos = (uintptr_t)input; 1159 int64_t val = 0; 1160 int x; 1161 boolean_t neg = B_FALSE; 1162 char c, cc, ccc; 1163 uintptr_t end = pos + limit; 1164 1165 /* 1166 * Consume any whitespace preceding digits. 1167 */ 1168 while ((c = dtrace_load8(pos)) == ' ' || c == '\t') 1169 pos++; 1170 1171 /* 1172 * Handle an explicit sign if one is present. 1173 */ 1174 if (c == '-' || c == '+') { 1175 if (c == '-') 1176 neg = B_TRUE; 1177 c = dtrace_load8(++pos); 1178 } 1179 1180 /* 1181 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it 1182 * if present. 1183 */ 1184 if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' || 1185 cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) { 1186 pos += 2; 1187 c = ccc; 1188 } 1189 1190 /* 1191 * Read in contiguous digits until the first non-digit character. 1192 */ 1193 for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base; 1194 c = dtrace_load8(++pos)) 1195 val = val * base + x; 1196 1197 return (neg ? -val : val); 1198 } 1199 1200 /* 1201 * Compare two strings using safe loads. 1202 */ 1203 static int 1204 dtrace_strncmp(char *s1, char *s2, size_t limit) 1205 { 1206 uint8_t c1, c2; 1207 volatile uint16_t *flags; 1208 1209 if (s1 == s2 || limit == 0) 1210 return (0); 1211 1212 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1213 1214 do { 1215 if (s1 == NULL) { 1216 c1 = '\0'; 1217 } else { 1218 c1 = dtrace_load8((uintptr_t)s1++); 1219 } 1220 1221 if (s2 == NULL) { 1222 c2 = '\0'; 1223 } else { 1224 c2 = dtrace_load8((uintptr_t)s2++); 1225 } 1226 1227 if (c1 != c2) 1228 return (c1 - c2); 1229 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 1230 1231 return (0); 1232 } 1233 1234 /* 1235 * Compute strlen(s) for a string using safe memory accesses. The additional 1236 * len parameter is used to specify a maximum length to ensure completion. 1237 */ 1238 static size_t 1239 dtrace_strlen(const char *s, size_t lim) 1240 { 1241 uint_t len; 1242 1243 for (len = 0; len != lim; len++) { 1244 if (dtrace_load8((uintptr_t)s++) == '\0') 1245 break; 1246 } 1247 1248 return (len); 1249 } 1250 1251 /* 1252 * Check if an address falls within a toxic region. 1253 */ 1254 static int 1255 dtrace_istoxic(uintptr_t kaddr, size_t size) 1256 { 1257 uintptr_t taddr, tsize; 1258 int i; 1259 1260 for (i = 0; i < dtrace_toxranges; i++) { 1261 taddr = dtrace_toxrange[i].dtt_base; 1262 tsize = dtrace_toxrange[i].dtt_limit - taddr; 1263 1264 if (kaddr - taddr < tsize) { 1265 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1266 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 1267 return (1); 1268 } 1269 1270 if (taddr - kaddr < size) { 1271 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1272 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 1273 return (1); 1274 } 1275 } 1276 1277 return (0); 1278 } 1279 1280 /* 1281 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 1282 * memory specified by the DIF program. The dst is assumed to be safe memory 1283 * that we can store to directly because it is managed by DTrace. As with 1284 * standard bcopy, overlapping copies are handled properly. 1285 */ 1286 static void 1287 dtrace_bcopy(const void *src, void *dst, size_t len) 1288 { 1289 if (len != 0) { 1290 uint8_t *s1 = dst; 1291 const uint8_t *s2 = src; 1292 1293 if (s1 <= s2) { 1294 do { 1295 *s1++ = dtrace_load8((uintptr_t)s2++); 1296 } while (--len != 0); 1297 } else { 1298 s2 += len; 1299 s1 += len; 1300 1301 do { 1302 *--s1 = dtrace_load8((uintptr_t)--s2); 1303 } while (--len != 0); 1304 } 1305 } 1306 } 1307 1308 /* 1309 * Copy src to dst using safe memory accesses, up to either the specified 1310 * length, or the point that a nul byte is encountered. The src is assumed to 1311 * be unsafe memory specified by the DIF program. The dst is assumed to be 1312 * safe memory that we can store to directly because it is managed by DTrace. 1313 * Unlike dtrace_bcopy(), overlapping regions are not handled. 1314 */ 1315 static void 1316 dtrace_strcpy(const void *src, void *dst, size_t len) 1317 { 1318 if (len != 0) { 1319 uint8_t *s1 = dst, c; 1320 const uint8_t *s2 = src; 1321 1322 do { 1323 *s1++ = c = dtrace_load8((uintptr_t)s2++); 1324 } while (--len != 0 && c != '\0'); 1325 } 1326 } 1327 1328 /* 1329 * Copy src to dst, deriving the size and type from the specified (BYREF) 1330 * variable type. The src is assumed to be unsafe memory specified by the DIF 1331 * program. The dst is assumed to be DTrace variable memory that is of the 1332 * specified type; we assume that we can store to directly. 1333 */ 1334 static void 1335 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type, size_t limit) 1336 { 1337 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1338 1339 if (type->dtdt_kind == DIF_TYPE_STRING) { 1340 dtrace_strcpy(src, dst, MIN(type->dtdt_size, limit)); 1341 } else { 1342 dtrace_bcopy(src, dst, MIN(type->dtdt_size, limit)); 1343 } 1344 } 1345 1346 /* 1347 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1348 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1349 * safe memory that we can access directly because it is managed by DTrace. 1350 */ 1351 static int 1352 dtrace_bcmp(const void *s1, const void *s2, size_t len) 1353 { 1354 volatile uint16_t *flags; 1355 1356 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1357 1358 if (s1 == s2) 1359 return (0); 1360 1361 if (s1 == NULL || s2 == NULL) 1362 return (1); 1363 1364 if (s1 != s2 && len != 0) { 1365 const uint8_t *ps1 = s1; 1366 const uint8_t *ps2 = s2; 1367 1368 do { 1369 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1370 return (1); 1371 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1372 } 1373 return (0); 1374 } 1375 1376 /* 1377 * Zero the specified region using a simple byte-by-byte loop. Note that this 1378 * is for safe DTrace-managed memory only. 1379 */ 1380 static void 1381 dtrace_bzero(void *dst, size_t len) 1382 { 1383 uchar_t *cp; 1384 1385 for (cp = dst; len != 0; len--) 1386 *cp++ = 0; 1387 } 1388 1389 static void 1390 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1391 { 1392 uint64_t result[2]; 1393 1394 result[0] = addend1[0] + addend2[0]; 1395 result[1] = addend1[1] + addend2[1] + 1396 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1397 1398 sum[0] = result[0]; 1399 sum[1] = result[1]; 1400 } 1401 1402 /* 1403 * Shift the 128-bit value in a by b. If b is positive, shift left. 1404 * If b is negative, shift right. 1405 */ 1406 static void 1407 dtrace_shift_128(uint64_t *a, int b) 1408 { 1409 uint64_t mask; 1410 1411 if (b == 0) 1412 return; 1413 1414 if (b < 0) { 1415 b = -b; 1416 if (b >= 64) { 1417 a[0] = a[1] >> (b - 64); 1418 a[1] = 0; 1419 } else { 1420 a[0] >>= b; 1421 mask = 1LL << (64 - b); 1422 mask -= 1; 1423 a[0] |= ((a[1] & mask) << (64 - b)); 1424 a[1] >>= b; 1425 } 1426 } else { 1427 if (b >= 64) { 1428 a[1] = a[0] << (b - 64); 1429 a[0] = 0; 1430 } else { 1431 a[1] <<= b; 1432 mask = a[0] >> (64 - b); 1433 a[1] |= mask; 1434 a[0] <<= b; 1435 } 1436 } 1437 } 1438 1439 /* 1440 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1441 * use native multiplication on those, and then re-combine into the 1442 * resulting 128-bit value. 1443 * 1444 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1445 * hi1 * hi2 << 64 + 1446 * hi1 * lo2 << 32 + 1447 * hi2 * lo1 << 32 + 1448 * lo1 * lo2 1449 */ 1450 static void 1451 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1452 { 1453 uint64_t hi1, hi2, lo1, lo2; 1454 uint64_t tmp[2]; 1455 1456 hi1 = factor1 >> 32; 1457 hi2 = factor2 >> 32; 1458 1459 lo1 = factor1 & DT_MASK_LO; 1460 lo2 = factor2 & DT_MASK_LO; 1461 1462 product[0] = lo1 * lo2; 1463 product[1] = hi1 * hi2; 1464 1465 tmp[0] = hi1 * lo2; 1466 tmp[1] = 0; 1467 dtrace_shift_128(tmp, 32); 1468 dtrace_add_128(product, tmp, product); 1469 1470 tmp[0] = hi2 * lo1; 1471 tmp[1] = 0; 1472 dtrace_shift_128(tmp, 32); 1473 dtrace_add_128(product, tmp, product); 1474 } 1475 1476 /* 1477 * This privilege check should be used by actions and subroutines to 1478 * verify that the user credentials of the process that enabled the 1479 * invoking ECB match the target credentials 1480 */ 1481 static int 1482 dtrace_priv_proc_common_user(dtrace_state_t *state) 1483 { 1484 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1485 1486 /* 1487 * We should always have a non-NULL state cred here, since if cred 1488 * is null (anonymous tracing), we fast-path bypass this routine. 1489 */ 1490 ASSERT(s_cr != NULL); 1491 1492 if ((cr = CRED()) != NULL && 1493 s_cr->cr_uid == cr->cr_uid && 1494 s_cr->cr_uid == cr->cr_ruid && 1495 s_cr->cr_uid == cr->cr_suid && 1496 s_cr->cr_gid == cr->cr_gid && 1497 s_cr->cr_gid == cr->cr_rgid && 1498 s_cr->cr_gid == cr->cr_sgid) 1499 return (1); 1500 1501 return (0); 1502 } 1503 1504 /* 1505 * This privilege check should be used by actions and subroutines to 1506 * verify that the zone of the process that enabled the invoking ECB 1507 * matches the target credentials 1508 */ 1509 static int 1510 dtrace_priv_proc_common_zone(dtrace_state_t *state) 1511 { 1512 #ifdef illumos 1513 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1514 1515 /* 1516 * We should always have a non-NULL state cred here, since if cred 1517 * is null (anonymous tracing), we fast-path bypass this routine. 1518 */ 1519 ASSERT(s_cr != NULL); 1520 1521 if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone) 1522 return (1); 1523 1524 return (0); 1525 #else 1526 return (1); 1527 #endif 1528 } 1529 1530 /* 1531 * This privilege check should be used by actions and subroutines to 1532 * verify that the process has not setuid or changed credentials. 1533 */ 1534 static int 1535 dtrace_priv_proc_common_nocd(void) 1536 { 1537 proc_t *proc; 1538 1539 if ((proc = ttoproc(curthread)) != NULL && 1540 !(proc->p_flag & SNOCD)) 1541 return (1); 1542 1543 return (0); 1544 } 1545 1546 static int 1547 dtrace_priv_proc_destructive(dtrace_state_t *state) 1548 { 1549 int action = state->dts_cred.dcr_action; 1550 1551 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1552 dtrace_priv_proc_common_zone(state) == 0) 1553 goto bad; 1554 1555 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1556 dtrace_priv_proc_common_user(state) == 0) 1557 goto bad; 1558 1559 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1560 dtrace_priv_proc_common_nocd() == 0) 1561 goto bad; 1562 1563 return (1); 1564 1565 bad: 1566 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1567 1568 return (0); 1569 } 1570 1571 static int 1572 dtrace_priv_proc_control(dtrace_state_t *state) 1573 { 1574 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1575 return (1); 1576 1577 if (dtrace_priv_proc_common_zone(state) && 1578 dtrace_priv_proc_common_user(state) && 1579 dtrace_priv_proc_common_nocd()) 1580 return (1); 1581 1582 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1583 1584 return (0); 1585 } 1586 1587 static int 1588 dtrace_priv_proc(dtrace_state_t *state) 1589 { 1590 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1591 return (1); 1592 1593 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1594 1595 return (0); 1596 } 1597 1598 static int 1599 dtrace_priv_kernel(dtrace_state_t *state) 1600 { 1601 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1602 return (1); 1603 1604 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1605 1606 return (0); 1607 } 1608 1609 static int 1610 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1611 { 1612 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1613 return (1); 1614 1615 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1616 1617 return (0); 1618 } 1619 1620 /* 1621 * Determine if the dte_cond of the specified ECB allows for processing of 1622 * the current probe to continue. Note that this routine may allow continued 1623 * processing, but with access(es) stripped from the mstate's dtms_access 1624 * field. 1625 */ 1626 static int 1627 dtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate, 1628 dtrace_ecb_t *ecb) 1629 { 1630 dtrace_probe_t *probe = ecb->dte_probe; 1631 dtrace_provider_t *prov = probe->dtpr_provider; 1632 dtrace_pops_t *pops = &prov->dtpv_pops; 1633 int mode = DTRACE_MODE_NOPRIV_DROP; 1634 1635 ASSERT(ecb->dte_cond); 1636 1637 #ifdef illumos 1638 if (pops->dtps_mode != NULL) { 1639 mode = pops->dtps_mode(prov->dtpv_arg, 1640 probe->dtpr_id, probe->dtpr_arg); 1641 1642 ASSERT((mode & DTRACE_MODE_USER) || 1643 (mode & DTRACE_MODE_KERNEL)); 1644 ASSERT((mode & DTRACE_MODE_NOPRIV_RESTRICT) || 1645 (mode & DTRACE_MODE_NOPRIV_DROP)); 1646 } 1647 1648 /* 1649 * If the dte_cond bits indicate that this consumer is only allowed to 1650 * see user-mode firings of this probe, call the provider's dtps_mode() 1651 * entry point to check that the probe was fired while in a user 1652 * context. If that's not the case, use the policy specified by the 1653 * provider to determine if we drop the probe or merely restrict 1654 * operation. 1655 */ 1656 if (ecb->dte_cond & DTRACE_COND_USERMODE) { 1657 ASSERT(mode != DTRACE_MODE_NOPRIV_DROP); 1658 1659 if (!(mode & DTRACE_MODE_USER)) { 1660 if (mode & DTRACE_MODE_NOPRIV_DROP) 1661 return (0); 1662 1663 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS; 1664 } 1665 } 1666 #endif 1667 1668 /* 1669 * This is more subtle than it looks. We have to be absolutely certain 1670 * that CRED() isn't going to change out from under us so it's only 1671 * legit to examine that structure if we're in constrained situations. 1672 * Currently, the only times we'll this check is if a non-super-user 1673 * has enabled the profile or syscall providers -- providers that 1674 * allow visibility of all processes. For the profile case, the check 1675 * above will ensure that we're examining a user context. 1676 */ 1677 if (ecb->dte_cond & DTRACE_COND_OWNER) { 1678 cred_t *cr; 1679 cred_t *s_cr = state->dts_cred.dcr_cred; 1680 proc_t *proc; 1681 1682 ASSERT(s_cr != NULL); 1683 1684 if ((cr = CRED()) == NULL || 1685 s_cr->cr_uid != cr->cr_uid || 1686 s_cr->cr_uid != cr->cr_ruid || 1687 s_cr->cr_uid != cr->cr_suid || 1688 s_cr->cr_gid != cr->cr_gid || 1689 s_cr->cr_gid != cr->cr_rgid || 1690 s_cr->cr_gid != cr->cr_sgid || 1691 (proc = ttoproc(curthread)) == NULL || 1692 (proc->p_flag & SNOCD)) { 1693 if (mode & DTRACE_MODE_NOPRIV_DROP) 1694 return (0); 1695 1696 #ifdef illumos 1697 mstate->dtms_access &= ~DTRACE_ACCESS_PROC; 1698 #endif 1699 } 1700 } 1701 1702 #ifdef illumos 1703 /* 1704 * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not 1705 * in our zone, check to see if our mode policy is to restrict rather 1706 * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC 1707 * and DTRACE_ACCESS_ARGS 1708 */ 1709 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 1710 cred_t *cr; 1711 cred_t *s_cr = state->dts_cred.dcr_cred; 1712 1713 ASSERT(s_cr != NULL); 1714 1715 if ((cr = CRED()) == NULL || 1716 s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) { 1717 if (mode & DTRACE_MODE_NOPRIV_DROP) 1718 return (0); 1719 1720 mstate->dtms_access &= 1721 ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS); 1722 } 1723 } 1724 #endif 1725 1726 return (1); 1727 } 1728 1729 /* 1730 * Note: not called from probe context. This function is called 1731 * asynchronously (and at a regular interval) from outside of probe context to 1732 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1733 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1734 */ 1735 void 1736 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1737 { 1738 dtrace_dynvar_t *dirty; 1739 dtrace_dstate_percpu_t *dcpu; 1740 dtrace_dynvar_t **rinsep; 1741 int i, j, work = 0; 1742 1743 for (i = 0; i < NCPU; i++) { 1744 dcpu = &dstate->dtds_percpu[i]; 1745 rinsep = &dcpu->dtdsc_rinsing; 1746 1747 /* 1748 * If the dirty list is NULL, there is no dirty work to do. 1749 */ 1750 if (dcpu->dtdsc_dirty == NULL) 1751 continue; 1752 1753 if (dcpu->dtdsc_rinsing != NULL) { 1754 /* 1755 * If the rinsing list is non-NULL, then it is because 1756 * this CPU was selected to accept another CPU's 1757 * dirty list -- and since that time, dirty buffers 1758 * have accumulated. This is a highly unlikely 1759 * condition, but we choose to ignore the dirty 1760 * buffers -- they'll be picked up a future cleanse. 1761 */ 1762 continue; 1763 } 1764 1765 if (dcpu->dtdsc_clean != NULL) { 1766 /* 1767 * If the clean list is non-NULL, then we're in a 1768 * situation where a CPU has done deallocations (we 1769 * have a non-NULL dirty list) but no allocations (we 1770 * also have a non-NULL clean list). We can't simply 1771 * move the dirty list into the clean list on this 1772 * CPU, yet we also don't want to allow this condition 1773 * to persist, lest a short clean list prevent a 1774 * massive dirty list from being cleaned (which in 1775 * turn could lead to otherwise avoidable dynamic 1776 * drops). To deal with this, we look for some CPU 1777 * with a NULL clean list, NULL dirty list, and NULL 1778 * rinsing list -- and then we borrow this CPU to 1779 * rinse our dirty list. 1780 */ 1781 for (j = 0; j < NCPU; j++) { 1782 dtrace_dstate_percpu_t *rinser; 1783 1784 rinser = &dstate->dtds_percpu[j]; 1785 1786 if (rinser->dtdsc_rinsing != NULL) 1787 continue; 1788 1789 if (rinser->dtdsc_dirty != NULL) 1790 continue; 1791 1792 if (rinser->dtdsc_clean != NULL) 1793 continue; 1794 1795 rinsep = &rinser->dtdsc_rinsing; 1796 break; 1797 } 1798 1799 if (j == NCPU) { 1800 /* 1801 * We were unable to find another CPU that 1802 * could accept this dirty list -- we are 1803 * therefore unable to clean it now. 1804 */ 1805 dtrace_dynvar_failclean++; 1806 continue; 1807 } 1808 } 1809 1810 work = 1; 1811 1812 /* 1813 * Atomically move the dirty list aside. 1814 */ 1815 do { 1816 dirty = dcpu->dtdsc_dirty; 1817 1818 /* 1819 * Before we zap the dirty list, set the rinsing list. 1820 * (This allows for a potential assertion in 1821 * dtrace_dynvar(): if a free dynamic variable appears 1822 * on a hash chain, either the dirty list or the 1823 * rinsing list for some CPU must be non-NULL.) 1824 */ 1825 *rinsep = dirty; 1826 dtrace_membar_producer(); 1827 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1828 dirty, NULL) != dirty); 1829 } 1830 1831 if (!work) { 1832 /* 1833 * We have no work to do; we can simply return. 1834 */ 1835 return; 1836 } 1837 1838 dtrace_sync(); 1839 1840 for (i = 0; i < NCPU; i++) { 1841 dcpu = &dstate->dtds_percpu[i]; 1842 1843 if (dcpu->dtdsc_rinsing == NULL) 1844 continue; 1845 1846 /* 1847 * We are now guaranteed that no hash chain contains a pointer 1848 * into this dirty list; we can make it clean. 1849 */ 1850 ASSERT(dcpu->dtdsc_clean == NULL); 1851 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1852 dcpu->dtdsc_rinsing = NULL; 1853 } 1854 1855 /* 1856 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1857 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1858 * This prevents a race whereby a CPU incorrectly decides that 1859 * the state should be something other than DTRACE_DSTATE_CLEAN 1860 * after dtrace_dynvar_clean() has completed. 1861 */ 1862 dtrace_sync(); 1863 1864 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1865 } 1866 1867 /* 1868 * Depending on the value of the op parameter, this function looks-up, 1869 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1870 * allocation is requested, this function will return a pointer to a 1871 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1872 * variable can be allocated. If NULL is returned, the appropriate counter 1873 * will be incremented. 1874 */ 1875 dtrace_dynvar_t * 1876 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1877 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1878 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1879 { 1880 uint64_t hashval = DTRACE_DYNHASH_VALID; 1881 dtrace_dynhash_t *hash = dstate->dtds_hash; 1882 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1883 processorid_t me = curcpu, cpu = me; 1884 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1885 size_t bucket, ksize; 1886 size_t chunksize = dstate->dtds_chunksize; 1887 uintptr_t kdata, lock, nstate; 1888 uint_t i; 1889 1890 ASSERT(nkeys != 0); 1891 1892 /* 1893 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1894 * algorithm. For the by-value portions, we perform the algorithm in 1895 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1896 * bit, and seems to have only a minute effect on distribution. For 1897 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1898 * over each referenced byte. It's painful to do this, but it's much 1899 * better than pathological hash distribution. The efficacy of the 1900 * hashing algorithm (and a comparison with other algorithms) may be 1901 * found by running the ::dtrace_dynstat MDB dcmd. 1902 */ 1903 for (i = 0; i < nkeys; i++) { 1904 if (key[i].dttk_size == 0) { 1905 uint64_t val = key[i].dttk_value; 1906 1907 hashval += (val >> 48) & 0xffff; 1908 hashval += (hashval << 10); 1909 hashval ^= (hashval >> 6); 1910 1911 hashval += (val >> 32) & 0xffff; 1912 hashval += (hashval << 10); 1913 hashval ^= (hashval >> 6); 1914 1915 hashval += (val >> 16) & 0xffff; 1916 hashval += (hashval << 10); 1917 hashval ^= (hashval >> 6); 1918 1919 hashval += val & 0xffff; 1920 hashval += (hashval << 10); 1921 hashval ^= (hashval >> 6); 1922 } else { 1923 /* 1924 * This is incredibly painful, but it beats the hell 1925 * out of the alternative. 1926 */ 1927 uint64_t j, size = key[i].dttk_size; 1928 uintptr_t base = (uintptr_t)key[i].dttk_value; 1929 1930 if (!dtrace_canload(base, size, mstate, vstate)) 1931 break; 1932 1933 for (j = 0; j < size; j++) { 1934 hashval += dtrace_load8(base + j); 1935 hashval += (hashval << 10); 1936 hashval ^= (hashval >> 6); 1937 } 1938 } 1939 } 1940 1941 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1942 return (NULL); 1943 1944 hashval += (hashval << 3); 1945 hashval ^= (hashval >> 11); 1946 hashval += (hashval << 15); 1947 1948 /* 1949 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1950 * comes out to be one of our two sentinel hash values. If this 1951 * actually happens, we set the hashval to be a value known to be a 1952 * non-sentinel value. 1953 */ 1954 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1955 hashval = DTRACE_DYNHASH_VALID; 1956 1957 /* 1958 * Yes, it's painful to do a divide here. If the cycle count becomes 1959 * important here, tricks can be pulled to reduce it. (However, it's 1960 * critical that hash collisions be kept to an absolute minimum; 1961 * they're much more painful than a divide.) It's better to have a 1962 * solution that generates few collisions and still keeps things 1963 * relatively simple. 1964 */ 1965 bucket = hashval % dstate->dtds_hashsize; 1966 1967 if (op == DTRACE_DYNVAR_DEALLOC) { 1968 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1969 1970 for (;;) { 1971 while ((lock = *lockp) & 1) 1972 continue; 1973 1974 if (dtrace_casptr((volatile void *)lockp, 1975 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1976 break; 1977 } 1978 1979 dtrace_membar_producer(); 1980 } 1981 1982 top: 1983 prev = NULL; 1984 lock = hash[bucket].dtdh_lock; 1985 1986 dtrace_membar_consumer(); 1987 1988 start = hash[bucket].dtdh_chain; 1989 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1990 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1991 op != DTRACE_DYNVAR_DEALLOC)); 1992 1993 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1994 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1995 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1996 1997 if (dvar->dtdv_hashval != hashval) { 1998 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1999 /* 2000 * We've reached the sink, and therefore the 2001 * end of the hash chain; we can kick out of 2002 * the loop knowing that we have seen a valid 2003 * snapshot of state. 2004 */ 2005 ASSERT(dvar->dtdv_next == NULL); 2006 ASSERT(dvar == &dtrace_dynhash_sink); 2007 break; 2008 } 2009 2010 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 2011 /* 2012 * We've gone off the rails: somewhere along 2013 * the line, one of the members of this hash 2014 * chain was deleted. Note that we could also 2015 * detect this by simply letting this loop run 2016 * to completion, as we would eventually hit 2017 * the end of the dirty list. However, we 2018 * want to avoid running the length of the 2019 * dirty list unnecessarily (it might be quite 2020 * long), so we catch this as early as 2021 * possible by detecting the hash marker. In 2022 * this case, we simply set dvar to NULL and 2023 * break; the conditional after the loop will 2024 * send us back to top. 2025 */ 2026 dvar = NULL; 2027 break; 2028 } 2029 2030 goto next; 2031 } 2032 2033 if (dtuple->dtt_nkeys != nkeys) 2034 goto next; 2035 2036 for (i = 0; i < nkeys; i++, dkey++) { 2037 if (dkey->dttk_size != key[i].dttk_size) 2038 goto next; /* size or type mismatch */ 2039 2040 if (dkey->dttk_size != 0) { 2041 if (dtrace_bcmp( 2042 (void *)(uintptr_t)key[i].dttk_value, 2043 (void *)(uintptr_t)dkey->dttk_value, 2044 dkey->dttk_size)) 2045 goto next; 2046 } else { 2047 if (dkey->dttk_value != key[i].dttk_value) 2048 goto next; 2049 } 2050 } 2051 2052 if (op != DTRACE_DYNVAR_DEALLOC) 2053 return (dvar); 2054 2055 ASSERT(dvar->dtdv_next == NULL || 2056 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 2057 2058 if (prev != NULL) { 2059 ASSERT(hash[bucket].dtdh_chain != dvar); 2060 ASSERT(start != dvar); 2061 ASSERT(prev->dtdv_next == dvar); 2062 prev->dtdv_next = dvar->dtdv_next; 2063 } else { 2064 if (dtrace_casptr(&hash[bucket].dtdh_chain, 2065 start, dvar->dtdv_next) != start) { 2066 /* 2067 * We have failed to atomically swing the 2068 * hash table head pointer, presumably because 2069 * of a conflicting allocation on another CPU. 2070 * We need to reread the hash chain and try 2071 * again. 2072 */ 2073 goto top; 2074 } 2075 } 2076 2077 dtrace_membar_producer(); 2078 2079 /* 2080 * Now set the hash value to indicate that it's free. 2081 */ 2082 ASSERT(hash[bucket].dtdh_chain != dvar); 2083 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 2084 2085 dtrace_membar_producer(); 2086 2087 /* 2088 * Set the next pointer to point at the dirty list, and 2089 * atomically swing the dirty pointer to the newly freed dvar. 2090 */ 2091 do { 2092 next = dcpu->dtdsc_dirty; 2093 dvar->dtdv_next = next; 2094 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 2095 2096 /* 2097 * Finally, unlock this hash bucket. 2098 */ 2099 ASSERT(hash[bucket].dtdh_lock == lock); 2100 ASSERT(lock & 1); 2101 hash[bucket].dtdh_lock++; 2102 2103 return (NULL); 2104 next: 2105 prev = dvar; 2106 continue; 2107 } 2108 2109 if (dvar == NULL) { 2110 /* 2111 * If dvar is NULL, it is because we went off the rails: 2112 * one of the elements that we traversed in the hash chain 2113 * was deleted while we were traversing it. In this case, 2114 * we assert that we aren't doing a dealloc (deallocs lock 2115 * the hash bucket to prevent themselves from racing with 2116 * one another), and retry the hash chain traversal. 2117 */ 2118 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 2119 goto top; 2120 } 2121 2122 if (op != DTRACE_DYNVAR_ALLOC) { 2123 /* 2124 * If we are not to allocate a new variable, we want to 2125 * return NULL now. Before we return, check that the value 2126 * of the lock word hasn't changed. If it has, we may have 2127 * seen an inconsistent snapshot. 2128 */ 2129 if (op == DTRACE_DYNVAR_NOALLOC) { 2130 if (hash[bucket].dtdh_lock != lock) 2131 goto top; 2132 } else { 2133 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 2134 ASSERT(hash[bucket].dtdh_lock == lock); 2135 ASSERT(lock & 1); 2136 hash[bucket].dtdh_lock++; 2137 } 2138 2139 return (NULL); 2140 } 2141 2142 /* 2143 * We need to allocate a new dynamic variable. The size we need is the 2144 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 2145 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 2146 * the size of any referred-to data (dsize). We then round the final 2147 * size up to the chunksize for allocation. 2148 */ 2149 for (ksize = 0, i = 0; i < nkeys; i++) 2150 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 2151 2152 /* 2153 * This should be pretty much impossible, but could happen if, say, 2154 * strange DIF specified the tuple. Ideally, this should be an 2155 * assertion and not an error condition -- but that requires that the 2156 * chunksize calculation in dtrace_difo_chunksize() be absolutely 2157 * bullet-proof. (That is, it must not be able to be fooled by 2158 * malicious DIF.) Given the lack of backwards branches in DIF, 2159 * solving this would presumably not amount to solving the Halting 2160 * Problem -- but it still seems awfully hard. 2161 */ 2162 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 2163 ksize + dsize > chunksize) { 2164 dcpu->dtdsc_drops++; 2165 return (NULL); 2166 } 2167 2168 nstate = DTRACE_DSTATE_EMPTY; 2169 2170 do { 2171 retry: 2172 free = dcpu->dtdsc_free; 2173 2174 if (free == NULL) { 2175 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 2176 void *rval; 2177 2178 if (clean == NULL) { 2179 /* 2180 * We're out of dynamic variable space on 2181 * this CPU. Unless we have tried all CPUs, 2182 * we'll try to allocate from a different 2183 * CPU. 2184 */ 2185 switch (dstate->dtds_state) { 2186 case DTRACE_DSTATE_CLEAN: { 2187 void *sp = &dstate->dtds_state; 2188 2189 if (++cpu >= NCPU) 2190 cpu = 0; 2191 2192 if (dcpu->dtdsc_dirty != NULL && 2193 nstate == DTRACE_DSTATE_EMPTY) 2194 nstate = DTRACE_DSTATE_DIRTY; 2195 2196 if (dcpu->dtdsc_rinsing != NULL) 2197 nstate = DTRACE_DSTATE_RINSING; 2198 2199 dcpu = &dstate->dtds_percpu[cpu]; 2200 2201 if (cpu != me) 2202 goto retry; 2203 2204 (void) dtrace_cas32(sp, 2205 DTRACE_DSTATE_CLEAN, nstate); 2206 2207 /* 2208 * To increment the correct bean 2209 * counter, take another lap. 2210 */ 2211 goto retry; 2212 } 2213 2214 case DTRACE_DSTATE_DIRTY: 2215 dcpu->dtdsc_dirty_drops++; 2216 break; 2217 2218 case DTRACE_DSTATE_RINSING: 2219 dcpu->dtdsc_rinsing_drops++; 2220 break; 2221 2222 case DTRACE_DSTATE_EMPTY: 2223 dcpu->dtdsc_drops++; 2224 break; 2225 } 2226 2227 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 2228 return (NULL); 2229 } 2230 2231 /* 2232 * The clean list appears to be non-empty. We want to 2233 * move the clean list to the free list; we start by 2234 * moving the clean pointer aside. 2235 */ 2236 if (dtrace_casptr(&dcpu->dtdsc_clean, 2237 clean, NULL) != clean) { 2238 /* 2239 * We are in one of two situations: 2240 * 2241 * (a) The clean list was switched to the 2242 * free list by another CPU. 2243 * 2244 * (b) The clean list was added to by the 2245 * cleansing cyclic. 2246 * 2247 * In either of these situations, we can 2248 * just reattempt the free list allocation. 2249 */ 2250 goto retry; 2251 } 2252 2253 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 2254 2255 /* 2256 * Now we'll move the clean list to our free list. 2257 * It's impossible for this to fail: the only way 2258 * the free list can be updated is through this 2259 * code path, and only one CPU can own the clean list. 2260 * Thus, it would only be possible for this to fail if 2261 * this code were racing with dtrace_dynvar_clean(). 2262 * (That is, if dtrace_dynvar_clean() updated the clean 2263 * list, and we ended up racing to update the free 2264 * list.) This race is prevented by the dtrace_sync() 2265 * in dtrace_dynvar_clean() -- which flushes the 2266 * owners of the clean lists out before resetting 2267 * the clean lists. 2268 */ 2269 dcpu = &dstate->dtds_percpu[me]; 2270 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 2271 ASSERT(rval == NULL); 2272 goto retry; 2273 } 2274 2275 dvar = free; 2276 new_free = dvar->dtdv_next; 2277 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 2278 2279 /* 2280 * We have now allocated a new chunk. We copy the tuple keys into the 2281 * tuple array and copy any referenced key data into the data space 2282 * following the tuple array. As we do this, we relocate dttk_value 2283 * in the final tuple to point to the key data address in the chunk. 2284 */ 2285 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 2286 dvar->dtdv_data = (void *)(kdata + ksize); 2287 dvar->dtdv_tuple.dtt_nkeys = nkeys; 2288 2289 for (i = 0; i < nkeys; i++) { 2290 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 2291 size_t kesize = key[i].dttk_size; 2292 2293 if (kesize != 0) { 2294 dtrace_bcopy( 2295 (const void *)(uintptr_t)key[i].dttk_value, 2296 (void *)kdata, kesize); 2297 dkey->dttk_value = kdata; 2298 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 2299 } else { 2300 dkey->dttk_value = key[i].dttk_value; 2301 } 2302 2303 dkey->dttk_size = kesize; 2304 } 2305 2306 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 2307 dvar->dtdv_hashval = hashval; 2308 dvar->dtdv_next = start; 2309 2310 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 2311 return (dvar); 2312 2313 /* 2314 * The cas has failed. Either another CPU is adding an element to 2315 * this hash chain, or another CPU is deleting an element from this 2316 * hash chain. The simplest way to deal with both of these cases 2317 * (though not necessarily the most efficient) is to free our 2318 * allocated block and re-attempt it all. Note that the free is 2319 * to the dirty list and _not_ to the free list. This is to prevent 2320 * races with allocators, above. 2321 */ 2322 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 2323 2324 dtrace_membar_producer(); 2325 2326 do { 2327 free = dcpu->dtdsc_dirty; 2328 dvar->dtdv_next = free; 2329 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 2330 2331 goto top; 2332 } 2333 2334 /*ARGSUSED*/ 2335 static void 2336 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 2337 { 2338 if ((int64_t)nval < (int64_t)*oval) 2339 *oval = nval; 2340 } 2341 2342 /*ARGSUSED*/ 2343 static void 2344 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 2345 { 2346 if ((int64_t)nval > (int64_t)*oval) 2347 *oval = nval; 2348 } 2349 2350 static void 2351 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 2352 { 2353 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 2354 int64_t val = (int64_t)nval; 2355 2356 if (val < 0) { 2357 for (i = 0; i < zero; i++) { 2358 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 2359 quanta[i] += incr; 2360 return; 2361 } 2362 } 2363 } else { 2364 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 2365 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 2366 quanta[i - 1] += incr; 2367 return; 2368 } 2369 } 2370 2371 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 2372 return; 2373 } 2374 2375 ASSERT(0); 2376 } 2377 2378 static void 2379 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 2380 { 2381 uint64_t arg = *lquanta++; 2382 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 2383 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 2384 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 2385 int32_t val = (int32_t)nval, level; 2386 2387 ASSERT(step != 0); 2388 ASSERT(levels != 0); 2389 2390 if (val < base) { 2391 /* 2392 * This is an underflow. 2393 */ 2394 lquanta[0] += incr; 2395 return; 2396 } 2397 2398 level = (val - base) / step; 2399 2400 if (level < levels) { 2401 lquanta[level + 1] += incr; 2402 return; 2403 } 2404 2405 /* 2406 * This is an overflow. 2407 */ 2408 lquanta[levels + 1] += incr; 2409 } 2410 2411 static int 2412 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 2413 uint16_t high, uint16_t nsteps, int64_t value) 2414 { 2415 int64_t this = 1, last, next; 2416 int base = 1, order; 2417 2418 ASSERT(factor <= nsteps); 2419 ASSERT(nsteps % factor == 0); 2420 2421 for (order = 0; order < low; order++) 2422 this *= factor; 2423 2424 /* 2425 * If our value is less than our factor taken to the power of the 2426 * low order of magnitude, it goes into the zeroth bucket. 2427 */ 2428 if (value < (last = this)) 2429 return (0); 2430 2431 for (this *= factor; order <= high; order++) { 2432 int nbuckets = this > nsteps ? nsteps : this; 2433 2434 if ((next = this * factor) < this) { 2435 /* 2436 * We should not generally get log/linear quantizations 2437 * with a high magnitude that allows 64-bits to 2438 * overflow, but we nonetheless protect against this 2439 * by explicitly checking for overflow, and clamping 2440 * our value accordingly. 2441 */ 2442 value = this - 1; 2443 } 2444 2445 if (value < this) { 2446 /* 2447 * If our value lies within this order of magnitude, 2448 * determine its position by taking the offset within 2449 * the order of magnitude, dividing by the bucket 2450 * width, and adding to our (accumulated) base. 2451 */ 2452 return (base + (value - last) / (this / nbuckets)); 2453 } 2454 2455 base += nbuckets - (nbuckets / factor); 2456 last = this; 2457 this = next; 2458 } 2459 2460 /* 2461 * Our value is greater than or equal to our factor taken to the 2462 * power of one plus the high magnitude -- return the top bucket. 2463 */ 2464 return (base); 2465 } 2466 2467 static void 2468 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 2469 { 2470 uint64_t arg = *llquanta++; 2471 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 2472 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 2473 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 2474 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 2475 2476 llquanta[dtrace_aggregate_llquantize_bucket(factor, 2477 low, high, nsteps, nval)] += incr; 2478 } 2479 2480 /*ARGSUSED*/ 2481 static void 2482 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 2483 { 2484 data[0]++; 2485 data[1] += nval; 2486 } 2487 2488 /*ARGSUSED*/ 2489 static void 2490 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 2491 { 2492 int64_t snval = (int64_t)nval; 2493 uint64_t tmp[2]; 2494 2495 data[0]++; 2496 data[1] += nval; 2497 2498 /* 2499 * What we want to say here is: 2500 * 2501 * data[2] += nval * nval; 2502 * 2503 * But given that nval is 64-bit, we could easily overflow, so 2504 * we do this as 128-bit arithmetic. 2505 */ 2506 if (snval < 0) 2507 snval = -snval; 2508 2509 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2510 dtrace_add_128(data + 2, tmp, data + 2); 2511 } 2512 2513 /*ARGSUSED*/ 2514 static void 2515 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2516 { 2517 *oval = *oval + 1; 2518 } 2519 2520 /*ARGSUSED*/ 2521 static void 2522 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2523 { 2524 *oval += nval; 2525 } 2526 2527 /* 2528 * Aggregate given the tuple in the principal data buffer, and the aggregating 2529 * action denoted by the specified dtrace_aggregation_t. The aggregation 2530 * buffer is specified as the buf parameter. This routine does not return 2531 * failure; if there is no space in the aggregation buffer, the data will be 2532 * dropped, and a corresponding counter incremented. 2533 */ 2534 static void 2535 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2536 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2537 { 2538 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2539 uint32_t i, ndx, size, fsize; 2540 uint32_t align = sizeof (uint64_t) - 1; 2541 dtrace_aggbuffer_t *agb; 2542 dtrace_aggkey_t *key; 2543 uint32_t hashval = 0, limit, isstr; 2544 caddr_t tomax, data, kdata; 2545 dtrace_actkind_t action; 2546 dtrace_action_t *act; 2547 uintptr_t offs; 2548 2549 if (buf == NULL) 2550 return; 2551 2552 if (!agg->dtag_hasarg) { 2553 /* 2554 * Currently, only quantize() and lquantize() take additional 2555 * arguments, and they have the same semantics: an increment 2556 * value that defaults to 1 when not present. If additional 2557 * aggregating actions take arguments, the setting of the 2558 * default argument value will presumably have to become more 2559 * sophisticated... 2560 */ 2561 arg = 1; 2562 } 2563 2564 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2565 size = rec->dtrd_offset - agg->dtag_base; 2566 fsize = size + rec->dtrd_size; 2567 2568 ASSERT(dbuf->dtb_tomax != NULL); 2569 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2570 2571 if ((tomax = buf->dtb_tomax) == NULL) { 2572 dtrace_buffer_drop(buf); 2573 return; 2574 } 2575 2576 /* 2577 * The metastructure is always at the bottom of the buffer. 2578 */ 2579 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2580 sizeof (dtrace_aggbuffer_t)); 2581 2582 if (buf->dtb_offset == 0) { 2583 /* 2584 * We just kludge up approximately 1/8th of the size to be 2585 * buckets. If this guess ends up being routinely 2586 * off-the-mark, we may need to dynamically readjust this 2587 * based on past performance. 2588 */ 2589 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2590 2591 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2592 (uintptr_t)tomax || hashsize == 0) { 2593 /* 2594 * We've been given a ludicrously small buffer; 2595 * increment our drop count and leave. 2596 */ 2597 dtrace_buffer_drop(buf); 2598 return; 2599 } 2600 2601 /* 2602 * And now, a pathetic attempt to try to get a an odd (or 2603 * perchance, a prime) hash size for better hash distribution. 2604 */ 2605 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2606 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2607 2608 agb->dtagb_hashsize = hashsize; 2609 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2610 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2611 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2612 2613 for (i = 0; i < agb->dtagb_hashsize; i++) 2614 agb->dtagb_hash[i] = NULL; 2615 } 2616 2617 ASSERT(agg->dtag_first != NULL); 2618 ASSERT(agg->dtag_first->dta_intuple); 2619 2620 /* 2621 * Calculate the hash value based on the key. Note that we _don't_ 2622 * include the aggid in the hashing (but we will store it as part of 2623 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2624 * algorithm: a simple, quick algorithm that has no known funnels, and 2625 * gets good distribution in practice. The efficacy of the hashing 2626 * algorithm (and a comparison with other algorithms) may be found by 2627 * running the ::dtrace_aggstat MDB dcmd. 2628 */ 2629 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2630 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2631 limit = i + act->dta_rec.dtrd_size; 2632 ASSERT(limit <= size); 2633 isstr = DTRACEACT_ISSTRING(act); 2634 2635 for (; i < limit; i++) { 2636 hashval += data[i]; 2637 hashval += (hashval << 10); 2638 hashval ^= (hashval >> 6); 2639 2640 if (isstr && data[i] == '\0') 2641 break; 2642 } 2643 } 2644 2645 hashval += (hashval << 3); 2646 hashval ^= (hashval >> 11); 2647 hashval += (hashval << 15); 2648 2649 /* 2650 * Yes, the divide here is expensive -- but it's generally the least 2651 * of the performance issues given the amount of data that we iterate 2652 * over to compute hash values, compare data, etc. 2653 */ 2654 ndx = hashval % agb->dtagb_hashsize; 2655 2656 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2657 ASSERT((caddr_t)key >= tomax); 2658 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2659 2660 if (hashval != key->dtak_hashval || key->dtak_size != size) 2661 continue; 2662 2663 kdata = key->dtak_data; 2664 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2665 2666 for (act = agg->dtag_first; act->dta_intuple; 2667 act = act->dta_next) { 2668 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2669 limit = i + act->dta_rec.dtrd_size; 2670 ASSERT(limit <= size); 2671 isstr = DTRACEACT_ISSTRING(act); 2672 2673 for (; i < limit; i++) { 2674 if (kdata[i] != data[i]) 2675 goto next; 2676 2677 if (isstr && data[i] == '\0') 2678 break; 2679 } 2680 } 2681 2682 if (action != key->dtak_action) { 2683 /* 2684 * We are aggregating on the same value in the same 2685 * aggregation with two different aggregating actions. 2686 * (This should have been picked up in the compiler, 2687 * so we may be dealing with errant or devious DIF.) 2688 * This is an error condition; we indicate as much, 2689 * and return. 2690 */ 2691 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2692 return; 2693 } 2694 2695 /* 2696 * This is a hit: we need to apply the aggregator to 2697 * the value at this key. 2698 */ 2699 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2700 return; 2701 next: 2702 continue; 2703 } 2704 2705 /* 2706 * We didn't find it. We need to allocate some zero-filled space, 2707 * link it into the hash table appropriately, and apply the aggregator 2708 * to the (zero-filled) value. 2709 */ 2710 offs = buf->dtb_offset; 2711 while (offs & (align - 1)) 2712 offs += sizeof (uint32_t); 2713 2714 /* 2715 * If we don't have enough room to both allocate a new key _and_ 2716 * its associated data, increment the drop count and return. 2717 */ 2718 if ((uintptr_t)tomax + offs + fsize > 2719 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2720 dtrace_buffer_drop(buf); 2721 return; 2722 } 2723 2724 /*CONSTCOND*/ 2725 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2726 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2727 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2728 2729 key->dtak_data = kdata = tomax + offs; 2730 buf->dtb_offset = offs + fsize; 2731 2732 /* 2733 * Now copy the data across. 2734 */ 2735 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2736 2737 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2738 kdata[i] = data[i]; 2739 2740 /* 2741 * Because strings are not zeroed out by default, we need to iterate 2742 * looking for actions that store strings, and we need to explicitly 2743 * pad these strings out with zeroes. 2744 */ 2745 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2746 int nul; 2747 2748 if (!DTRACEACT_ISSTRING(act)) 2749 continue; 2750 2751 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2752 limit = i + act->dta_rec.dtrd_size; 2753 ASSERT(limit <= size); 2754 2755 for (nul = 0; i < limit; i++) { 2756 if (nul) { 2757 kdata[i] = '\0'; 2758 continue; 2759 } 2760 2761 if (data[i] != '\0') 2762 continue; 2763 2764 nul = 1; 2765 } 2766 } 2767 2768 for (i = size; i < fsize; i++) 2769 kdata[i] = 0; 2770 2771 key->dtak_hashval = hashval; 2772 key->dtak_size = size; 2773 key->dtak_action = action; 2774 key->dtak_next = agb->dtagb_hash[ndx]; 2775 agb->dtagb_hash[ndx] = key; 2776 2777 /* 2778 * Finally, apply the aggregator. 2779 */ 2780 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2781 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2782 } 2783 2784 /* 2785 * Given consumer state, this routine finds a speculation in the INACTIVE 2786 * state and transitions it into the ACTIVE state. If there is no speculation 2787 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2788 * incremented -- it is up to the caller to take appropriate action. 2789 */ 2790 static int 2791 dtrace_speculation(dtrace_state_t *state) 2792 { 2793 int i = 0; 2794 dtrace_speculation_state_t curstate; 2795 uint32_t *stat = &state->dts_speculations_unavail, count; 2796 2797 while (i < state->dts_nspeculations) { 2798 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2799 2800 curstate = spec->dtsp_state; 2801 2802 if (curstate != DTRACESPEC_INACTIVE) { 2803 if (curstate == DTRACESPEC_COMMITTINGMANY || 2804 curstate == DTRACESPEC_COMMITTING || 2805 curstate == DTRACESPEC_DISCARDING) 2806 stat = &state->dts_speculations_busy; 2807 i++; 2808 continue; 2809 } 2810 2811 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2812 curstate, DTRACESPEC_ACTIVE) == curstate) 2813 return (i + 1); 2814 } 2815 2816 /* 2817 * We couldn't find a speculation. If we found as much as a single 2818 * busy speculation buffer, we'll attribute this failure as "busy" 2819 * instead of "unavail". 2820 */ 2821 do { 2822 count = *stat; 2823 } while (dtrace_cas32(stat, count, count + 1) != count); 2824 2825 return (0); 2826 } 2827 2828 /* 2829 * This routine commits an active speculation. If the specified speculation 2830 * is not in a valid state to perform a commit(), this routine will silently do 2831 * nothing. The state of the specified speculation is transitioned according 2832 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2833 */ 2834 static void 2835 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2836 dtrace_specid_t which) 2837 { 2838 dtrace_speculation_t *spec; 2839 dtrace_buffer_t *src, *dest; 2840 uintptr_t daddr, saddr, dlimit, slimit; 2841 dtrace_speculation_state_t curstate, new = 0; 2842 intptr_t offs; 2843 uint64_t timestamp; 2844 2845 if (which == 0) 2846 return; 2847 2848 if (which > state->dts_nspeculations) { 2849 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2850 return; 2851 } 2852 2853 spec = &state->dts_speculations[which - 1]; 2854 src = &spec->dtsp_buffer[cpu]; 2855 dest = &state->dts_buffer[cpu]; 2856 2857 do { 2858 curstate = spec->dtsp_state; 2859 2860 if (curstate == DTRACESPEC_COMMITTINGMANY) 2861 break; 2862 2863 switch (curstate) { 2864 case DTRACESPEC_INACTIVE: 2865 case DTRACESPEC_DISCARDING: 2866 return; 2867 2868 case DTRACESPEC_COMMITTING: 2869 /* 2870 * This is only possible if we are (a) commit()'ing 2871 * without having done a prior speculate() on this CPU 2872 * and (b) racing with another commit() on a different 2873 * CPU. There's nothing to do -- we just assert that 2874 * our offset is 0. 2875 */ 2876 ASSERT(src->dtb_offset == 0); 2877 return; 2878 2879 case DTRACESPEC_ACTIVE: 2880 new = DTRACESPEC_COMMITTING; 2881 break; 2882 2883 case DTRACESPEC_ACTIVEONE: 2884 /* 2885 * This speculation is active on one CPU. If our 2886 * buffer offset is non-zero, we know that the one CPU 2887 * must be us. Otherwise, we are committing on a 2888 * different CPU from the speculate(), and we must 2889 * rely on being asynchronously cleaned. 2890 */ 2891 if (src->dtb_offset != 0) { 2892 new = DTRACESPEC_COMMITTING; 2893 break; 2894 } 2895 /*FALLTHROUGH*/ 2896 2897 case DTRACESPEC_ACTIVEMANY: 2898 new = DTRACESPEC_COMMITTINGMANY; 2899 break; 2900 2901 default: 2902 ASSERT(0); 2903 } 2904 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2905 curstate, new) != curstate); 2906 2907 /* 2908 * We have set the state to indicate that we are committing this 2909 * speculation. Now reserve the necessary space in the destination 2910 * buffer. 2911 */ 2912 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2913 sizeof (uint64_t), state, NULL)) < 0) { 2914 dtrace_buffer_drop(dest); 2915 goto out; 2916 } 2917 2918 /* 2919 * We have sufficient space to copy the speculative buffer into the 2920 * primary buffer. First, modify the speculative buffer, filling 2921 * in the timestamp of all entries with the curstate time. The data 2922 * must have the commit() time rather than the time it was traced, 2923 * so that all entries in the primary buffer are in timestamp order. 2924 */ 2925 timestamp = dtrace_gethrtime(); 2926 saddr = (uintptr_t)src->dtb_tomax; 2927 slimit = saddr + src->dtb_offset; 2928 while (saddr < slimit) { 2929 size_t size; 2930 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr; 2931 2932 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) { 2933 saddr += sizeof (dtrace_epid_t); 2934 continue; 2935 } 2936 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs); 2937 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size; 2938 2939 ASSERT3U(saddr + size, <=, slimit); 2940 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t)); 2941 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX); 2942 2943 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp); 2944 2945 saddr += size; 2946 } 2947 2948 /* 2949 * Copy the buffer across. (Note that this is a 2950 * highly subobtimal bcopy(); in the unlikely event that this becomes 2951 * a serious performance issue, a high-performance DTrace-specific 2952 * bcopy() should obviously be invented.) 2953 */ 2954 daddr = (uintptr_t)dest->dtb_tomax + offs; 2955 dlimit = daddr + src->dtb_offset; 2956 saddr = (uintptr_t)src->dtb_tomax; 2957 2958 /* 2959 * First, the aligned portion. 2960 */ 2961 while (dlimit - daddr >= sizeof (uint64_t)) { 2962 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2963 2964 daddr += sizeof (uint64_t); 2965 saddr += sizeof (uint64_t); 2966 } 2967 2968 /* 2969 * Now any left-over bit... 2970 */ 2971 while (dlimit - daddr) 2972 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2973 2974 /* 2975 * Finally, commit the reserved space in the destination buffer. 2976 */ 2977 dest->dtb_offset = offs + src->dtb_offset; 2978 2979 out: 2980 /* 2981 * If we're lucky enough to be the only active CPU on this speculation 2982 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2983 */ 2984 if (curstate == DTRACESPEC_ACTIVE || 2985 (curstate == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2986 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2987 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2988 2989 ASSERT(rval == DTRACESPEC_COMMITTING); 2990 } 2991 2992 src->dtb_offset = 0; 2993 src->dtb_xamot_drops += src->dtb_drops; 2994 src->dtb_drops = 0; 2995 } 2996 2997 /* 2998 * This routine discards an active speculation. If the specified speculation 2999 * is not in a valid state to perform a discard(), this routine will silently 3000 * do nothing. The state of the specified speculation is transitioned 3001 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 3002 */ 3003 static void 3004 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 3005 dtrace_specid_t which) 3006 { 3007 dtrace_speculation_t *spec; 3008 dtrace_speculation_state_t curstate, new = 0; 3009 dtrace_buffer_t *buf; 3010 3011 if (which == 0) 3012 return; 3013 3014 if (which > state->dts_nspeculations) { 3015 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 3016 return; 3017 } 3018 3019 spec = &state->dts_speculations[which - 1]; 3020 buf = &spec->dtsp_buffer[cpu]; 3021 3022 do { 3023 curstate = spec->dtsp_state; 3024 3025 switch (curstate) { 3026 case DTRACESPEC_INACTIVE: 3027 case DTRACESPEC_COMMITTINGMANY: 3028 case DTRACESPEC_COMMITTING: 3029 case DTRACESPEC_DISCARDING: 3030 return; 3031 3032 case DTRACESPEC_ACTIVE: 3033 case DTRACESPEC_ACTIVEMANY: 3034 new = DTRACESPEC_DISCARDING; 3035 break; 3036 3037 case DTRACESPEC_ACTIVEONE: 3038 if (buf->dtb_offset != 0) { 3039 new = DTRACESPEC_INACTIVE; 3040 } else { 3041 new = DTRACESPEC_DISCARDING; 3042 } 3043 break; 3044 3045 default: 3046 ASSERT(0); 3047 } 3048 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 3049 curstate, new) != curstate); 3050 3051 buf->dtb_offset = 0; 3052 buf->dtb_drops = 0; 3053 } 3054 3055 /* 3056 * Note: not called from probe context. This function is called 3057 * asynchronously from cross call context to clean any speculations that are 3058 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 3059 * transitioned back to the INACTIVE state until all CPUs have cleaned the 3060 * speculation. 3061 */ 3062 static void 3063 dtrace_speculation_clean_here(dtrace_state_t *state) 3064 { 3065 dtrace_icookie_t cookie; 3066 processorid_t cpu = curcpu; 3067 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 3068 dtrace_specid_t i; 3069 3070 cookie = dtrace_interrupt_disable(); 3071 3072 if (dest->dtb_tomax == NULL) { 3073 dtrace_interrupt_enable(cookie); 3074 return; 3075 } 3076 3077 for (i = 0; i < state->dts_nspeculations; i++) { 3078 dtrace_speculation_t *spec = &state->dts_speculations[i]; 3079 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 3080 3081 if (src->dtb_tomax == NULL) 3082 continue; 3083 3084 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 3085 src->dtb_offset = 0; 3086 continue; 3087 } 3088 3089 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 3090 continue; 3091 3092 if (src->dtb_offset == 0) 3093 continue; 3094 3095 dtrace_speculation_commit(state, cpu, i + 1); 3096 } 3097 3098 dtrace_interrupt_enable(cookie); 3099 } 3100 3101 /* 3102 * Note: not called from probe context. This function is called 3103 * asynchronously (and at a regular interval) to clean any speculations that 3104 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 3105 * is work to be done, it cross calls all CPUs to perform that work; 3106 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 3107 * INACTIVE state until they have been cleaned by all CPUs. 3108 */ 3109 static void 3110 dtrace_speculation_clean(dtrace_state_t *state) 3111 { 3112 int work = 0, rv; 3113 dtrace_specid_t i; 3114 3115 for (i = 0; i < state->dts_nspeculations; i++) { 3116 dtrace_speculation_t *spec = &state->dts_speculations[i]; 3117 3118 ASSERT(!spec->dtsp_cleaning); 3119 3120 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 3121 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 3122 continue; 3123 3124 work++; 3125 spec->dtsp_cleaning = 1; 3126 } 3127 3128 if (!work) 3129 return; 3130 3131 dtrace_xcall(DTRACE_CPUALL, 3132 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 3133 3134 /* 3135 * We now know that all CPUs have committed or discarded their 3136 * speculation buffers, as appropriate. We can now set the state 3137 * to inactive. 3138 */ 3139 for (i = 0; i < state->dts_nspeculations; i++) { 3140 dtrace_speculation_t *spec = &state->dts_speculations[i]; 3141 dtrace_speculation_state_t curstate, new; 3142 3143 if (!spec->dtsp_cleaning) 3144 continue; 3145 3146 curstate = spec->dtsp_state; 3147 ASSERT(curstate == DTRACESPEC_DISCARDING || 3148 curstate == DTRACESPEC_COMMITTINGMANY); 3149 3150 new = DTRACESPEC_INACTIVE; 3151 3152 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, curstate, new); 3153 ASSERT(rv == curstate); 3154 spec->dtsp_cleaning = 0; 3155 } 3156 } 3157 3158 /* 3159 * Called as part of a speculate() to get the speculative buffer associated 3160 * with a given speculation. Returns NULL if the specified speculation is not 3161 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 3162 * the active CPU is not the specified CPU -- the speculation will be 3163 * atomically transitioned into the ACTIVEMANY state. 3164 */ 3165 static dtrace_buffer_t * 3166 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 3167 dtrace_specid_t which) 3168 { 3169 dtrace_speculation_t *spec; 3170 dtrace_speculation_state_t curstate, new = 0; 3171 dtrace_buffer_t *buf; 3172 3173 if (which == 0) 3174 return (NULL); 3175 3176 if (which > state->dts_nspeculations) { 3177 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 3178 return (NULL); 3179 } 3180 3181 spec = &state->dts_speculations[which - 1]; 3182 buf = &spec->dtsp_buffer[cpuid]; 3183 3184 do { 3185 curstate = spec->dtsp_state; 3186 3187 switch (curstate) { 3188 case DTRACESPEC_INACTIVE: 3189 case DTRACESPEC_COMMITTINGMANY: 3190 case DTRACESPEC_DISCARDING: 3191 return (NULL); 3192 3193 case DTRACESPEC_COMMITTING: 3194 ASSERT(buf->dtb_offset == 0); 3195 return (NULL); 3196 3197 case DTRACESPEC_ACTIVEONE: 3198 /* 3199 * This speculation is currently active on one CPU. 3200 * Check the offset in the buffer; if it's non-zero, 3201 * that CPU must be us (and we leave the state alone). 3202 * If it's zero, assume that we're starting on a new 3203 * CPU -- and change the state to indicate that the 3204 * speculation is active on more than one CPU. 3205 */ 3206 if (buf->dtb_offset != 0) 3207 return (buf); 3208 3209 new = DTRACESPEC_ACTIVEMANY; 3210 break; 3211 3212 case DTRACESPEC_ACTIVEMANY: 3213 return (buf); 3214 3215 case DTRACESPEC_ACTIVE: 3216 new = DTRACESPEC_ACTIVEONE; 3217 break; 3218 3219 default: 3220 ASSERT(0); 3221 } 3222 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 3223 curstate, new) != curstate); 3224 3225 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 3226 return (buf); 3227 } 3228 3229 /* 3230 * Return a string. In the event that the user lacks the privilege to access 3231 * arbitrary kernel memory, we copy the string out to scratch memory so that we 3232 * don't fail access checking. 3233 * 3234 * dtrace_dif_variable() uses this routine as a helper for various 3235 * builtin values such as 'execname' and 'probefunc.' 3236 */ 3237 uintptr_t 3238 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 3239 dtrace_mstate_t *mstate) 3240 { 3241 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3242 uintptr_t ret; 3243 size_t strsz; 3244 3245 /* 3246 * The easy case: this probe is allowed to read all of memory, so 3247 * we can just return this as a vanilla pointer. 3248 */ 3249 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 3250 return (addr); 3251 3252 /* 3253 * This is the tougher case: we copy the string in question from 3254 * kernel memory into scratch memory and return it that way: this 3255 * ensures that we won't trip up when access checking tests the 3256 * BYREF return value. 3257 */ 3258 strsz = dtrace_strlen((char *)addr, size) + 1; 3259 3260 if (mstate->dtms_scratch_ptr + strsz > 3261 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3262 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3263 return (0); 3264 } 3265 3266 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 3267 strsz); 3268 ret = mstate->dtms_scratch_ptr; 3269 mstate->dtms_scratch_ptr += strsz; 3270 return (ret); 3271 } 3272 3273 /* 3274 * Return a string from a memoy address which is known to have one or 3275 * more concatenated, individually zero terminated, sub-strings. 3276 * In the event that the user lacks the privilege to access 3277 * arbitrary kernel memory, we copy the string out to scratch memory so that we 3278 * don't fail access checking. 3279 * 3280 * dtrace_dif_variable() uses this routine as a helper for various 3281 * builtin values such as 'execargs'. 3282 */ 3283 static uintptr_t 3284 dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 3285 dtrace_mstate_t *mstate) 3286 { 3287 char *p; 3288 size_t i; 3289 uintptr_t ret; 3290 3291 if (mstate->dtms_scratch_ptr + strsz > 3292 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3293 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3294 return (0); 3295 } 3296 3297 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 3298 strsz); 3299 3300 /* Replace sub-string termination characters with a space. */ 3301 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 3302 p++, i++) 3303 if (*p == '\0') 3304 *p = ' '; 3305 3306 ret = mstate->dtms_scratch_ptr; 3307 mstate->dtms_scratch_ptr += strsz; 3308 return (ret); 3309 } 3310 3311 /* 3312 * This function implements the DIF emulator's variable lookups. The emulator 3313 * passes a reserved variable identifier and optional built-in array index. 3314 */ 3315 static uint64_t 3316 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 3317 uint64_t ndx) 3318 { 3319 /* 3320 * If we're accessing one of the uncached arguments, we'll turn this 3321 * into a reference in the args array. 3322 */ 3323 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 3324 ndx = v - DIF_VAR_ARG0; 3325 v = DIF_VAR_ARGS; 3326 } 3327 3328 switch (v) { 3329 case DIF_VAR_ARGS: 3330 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 3331 if (ndx >= sizeof (mstate->dtms_arg) / 3332 sizeof (mstate->dtms_arg[0])) { 3333 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3334 dtrace_provider_t *pv; 3335 uint64_t val; 3336 3337 pv = mstate->dtms_probe->dtpr_provider; 3338 if (pv->dtpv_pops.dtps_getargval != NULL) 3339 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 3340 mstate->dtms_probe->dtpr_id, 3341 mstate->dtms_probe->dtpr_arg, ndx, aframes); 3342 else 3343 val = dtrace_getarg(ndx, aframes); 3344 3345 /* 3346 * This is regrettably required to keep the compiler 3347 * from tail-optimizing the call to dtrace_getarg(). 3348 * The condition always evaluates to true, but the 3349 * compiler has no way of figuring that out a priori. 3350 * (None of this would be necessary if the compiler 3351 * could be relied upon to _always_ tail-optimize 3352 * the call to dtrace_getarg() -- but it can't.) 3353 */ 3354 if (mstate->dtms_probe != NULL) 3355 return (val); 3356 3357 ASSERT(0); 3358 } 3359 3360 return (mstate->dtms_arg[ndx]); 3361 3362 #ifdef illumos 3363 case DIF_VAR_UREGS: { 3364 klwp_t *lwp; 3365 3366 if (!dtrace_priv_proc(state)) 3367 return (0); 3368 3369 if ((lwp = curthread->t_lwp) == NULL) { 3370 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 3371 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 3372 return (0); 3373 } 3374 3375 return (dtrace_getreg(lwp->lwp_regs, ndx)); 3376 return (0); 3377 } 3378 #else 3379 case DIF_VAR_UREGS: { 3380 struct trapframe *tframe; 3381 3382 if (!dtrace_priv_proc(state)) 3383 return (0); 3384 3385 if ((tframe = curthread->td_frame) == NULL) { 3386 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 3387 cpu_core[curcpu].cpuc_dtrace_illval = 0; 3388 return (0); 3389 } 3390 3391 return (dtrace_getreg(tframe, ndx)); 3392 } 3393 #endif 3394 3395 case DIF_VAR_CURTHREAD: 3396 if (!dtrace_priv_proc(state)) 3397 return (0); 3398 return ((uint64_t)(uintptr_t)curthread); 3399 3400 case DIF_VAR_TIMESTAMP: 3401 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 3402 mstate->dtms_timestamp = dtrace_gethrtime(); 3403 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 3404 } 3405 return (mstate->dtms_timestamp); 3406 3407 case DIF_VAR_VTIMESTAMP: 3408 ASSERT(dtrace_vtime_references != 0); 3409 return (curthread->t_dtrace_vtime); 3410 3411 case DIF_VAR_WALLTIMESTAMP: 3412 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 3413 mstate->dtms_walltimestamp = dtrace_gethrestime(); 3414 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 3415 } 3416 return (mstate->dtms_walltimestamp); 3417 3418 #ifdef illumos 3419 case DIF_VAR_IPL: 3420 if (!dtrace_priv_kernel(state)) 3421 return (0); 3422 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 3423 mstate->dtms_ipl = dtrace_getipl(); 3424 mstate->dtms_present |= DTRACE_MSTATE_IPL; 3425 } 3426 return (mstate->dtms_ipl); 3427 #endif 3428 3429 case DIF_VAR_EPID: 3430 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 3431 return (mstate->dtms_epid); 3432 3433 case DIF_VAR_ID: 3434 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3435 return (mstate->dtms_probe->dtpr_id); 3436 3437 case DIF_VAR_STACKDEPTH: 3438 if (!dtrace_priv_kernel(state)) 3439 return (0); 3440 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 3441 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3442 3443 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 3444 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 3445 } 3446 return (mstate->dtms_stackdepth); 3447 3448 case DIF_VAR_USTACKDEPTH: 3449 if (!dtrace_priv_proc(state)) 3450 return (0); 3451 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 3452 /* 3453 * See comment in DIF_VAR_PID. 3454 */ 3455 if (DTRACE_ANCHORED(mstate->dtms_probe) && 3456 CPU_ON_INTR(CPU)) { 3457 mstate->dtms_ustackdepth = 0; 3458 } else { 3459 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3460 mstate->dtms_ustackdepth = 3461 dtrace_getustackdepth(); 3462 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3463 } 3464 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 3465 } 3466 return (mstate->dtms_ustackdepth); 3467 3468 case DIF_VAR_CALLER: 3469 if (!dtrace_priv_kernel(state)) 3470 return (0); 3471 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 3472 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3473 3474 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 3475 /* 3476 * If this is an unanchored probe, we are 3477 * required to go through the slow path: 3478 * dtrace_caller() only guarantees correct 3479 * results for anchored probes. 3480 */ 3481 pc_t caller[2] = {0, 0}; 3482 3483 dtrace_getpcstack(caller, 2, aframes, 3484 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 3485 mstate->dtms_caller = caller[1]; 3486 } else if ((mstate->dtms_caller = 3487 dtrace_caller(aframes)) == -1) { 3488 /* 3489 * We have failed to do this the quick way; 3490 * we must resort to the slower approach of 3491 * calling dtrace_getpcstack(). 3492 */ 3493 pc_t caller = 0; 3494 3495 dtrace_getpcstack(&caller, 1, aframes, NULL); 3496 mstate->dtms_caller = caller; 3497 } 3498 3499 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 3500 } 3501 return (mstate->dtms_caller); 3502 3503 case DIF_VAR_UCALLER: 3504 if (!dtrace_priv_proc(state)) 3505 return (0); 3506 3507 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 3508 uint64_t ustack[3]; 3509 3510 /* 3511 * dtrace_getupcstack() fills in the first uint64_t 3512 * with the current PID. The second uint64_t will 3513 * be the program counter at user-level. The third 3514 * uint64_t will contain the caller, which is what 3515 * we're after. 3516 */ 3517 ustack[2] = 0; 3518 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3519 dtrace_getupcstack(ustack, 3); 3520 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3521 mstate->dtms_ucaller = ustack[2]; 3522 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 3523 } 3524 3525 return (mstate->dtms_ucaller); 3526 3527 case DIF_VAR_PROBEPROV: 3528 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3529 return (dtrace_dif_varstr( 3530 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 3531 state, mstate)); 3532 3533 case DIF_VAR_PROBEMOD: 3534 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3535 return (dtrace_dif_varstr( 3536 (uintptr_t)mstate->dtms_probe->dtpr_mod, 3537 state, mstate)); 3538 3539 case DIF_VAR_PROBEFUNC: 3540 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3541 return (dtrace_dif_varstr( 3542 (uintptr_t)mstate->dtms_probe->dtpr_func, 3543 state, mstate)); 3544 3545 case DIF_VAR_PROBENAME: 3546 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3547 return (dtrace_dif_varstr( 3548 (uintptr_t)mstate->dtms_probe->dtpr_name, 3549 state, mstate)); 3550 3551 case DIF_VAR_PID: 3552 if (!dtrace_priv_proc(state)) 3553 return (0); 3554 3555 #ifdef illumos 3556 /* 3557 * Note that we are assuming that an unanchored probe is 3558 * always due to a high-level interrupt. (And we're assuming 3559 * that there is only a single high level interrupt.) 3560 */ 3561 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3562 return (pid0.pid_id); 3563 3564 /* 3565 * It is always safe to dereference one's own t_procp pointer: 3566 * it always points to a valid, allocated proc structure. 3567 * Further, it is always safe to dereference the p_pidp member 3568 * of one's own proc structure. (These are truisms becuase 3569 * threads and processes don't clean up their own state -- 3570 * they leave that task to whomever reaps them.) 3571 */ 3572 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3573 #else 3574 return ((uint64_t)curproc->p_pid); 3575 #endif 3576 3577 case DIF_VAR_PPID: 3578 if (!dtrace_priv_proc(state)) 3579 return (0); 3580 3581 #ifdef illumos 3582 /* 3583 * See comment in DIF_VAR_PID. 3584 */ 3585 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3586 return (pid0.pid_id); 3587 3588 /* 3589 * It is always safe to dereference one's own t_procp pointer: 3590 * it always points to a valid, allocated proc structure. 3591 * (This is true because threads don't clean up their own 3592 * state -- they leave that task to whomever reaps them.) 3593 */ 3594 return ((uint64_t)curthread->t_procp->p_ppid); 3595 #else 3596 if (curproc->p_pid == proc0.p_pid) 3597 return (curproc->p_pid); 3598 else 3599 return (curproc->p_pptr->p_pid); 3600 #endif 3601 3602 case DIF_VAR_TID: 3603 #ifdef illumos 3604 /* 3605 * See comment in DIF_VAR_PID. 3606 */ 3607 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3608 return (0); 3609 #endif 3610 3611 return ((uint64_t)curthread->t_tid); 3612 3613 case DIF_VAR_EXECARGS: { 3614 struct pargs *p_args = curthread->td_proc->p_args; 3615 3616 if (p_args == NULL) 3617 return(0); 3618 3619 return (dtrace_dif_varstrz( 3620 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3621 } 3622 3623 case DIF_VAR_EXECNAME: 3624 #ifdef illumos 3625 if (!dtrace_priv_proc(state)) 3626 return (0); 3627 3628 /* 3629 * See comment in DIF_VAR_PID. 3630 */ 3631 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3632 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3633 3634 /* 3635 * It is always safe to dereference one's own t_procp pointer: 3636 * it always points to a valid, allocated proc structure. 3637 * (This is true because threads don't clean up their own 3638 * state -- they leave that task to whomever reaps them.) 3639 */ 3640 return (dtrace_dif_varstr( 3641 (uintptr_t)curthread->t_procp->p_user.u_comm, 3642 state, mstate)); 3643 #else 3644 return (dtrace_dif_varstr( 3645 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3646 #endif 3647 3648 case DIF_VAR_ZONENAME: 3649 #ifdef illumos 3650 if (!dtrace_priv_proc(state)) 3651 return (0); 3652 3653 /* 3654 * See comment in DIF_VAR_PID. 3655 */ 3656 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3657 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3658 3659 /* 3660 * It is always safe to dereference one's own t_procp pointer: 3661 * it always points to a valid, allocated proc structure. 3662 * (This is true because threads don't clean up their own 3663 * state -- they leave that task to whomever reaps them.) 3664 */ 3665 return (dtrace_dif_varstr( 3666 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3667 state, mstate)); 3668 #elif defined(__FreeBSD__) 3669 /* 3670 * On FreeBSD, we introduce compatibility to zonename by falling through 3671 * into jailname. 3672 */ 3673 case DIF_VAR_JAILNAME: 3674 if (!dtrace_priv_kernel(state)) 3675 return (0); 3676 3677 return (dtrace_dif_varstr( 3678 (uintptr_t)curthread->td_ucred->cr_prison->pr_name, 3679 state, mstate)); 3680 3681 case DIF_VAR_JID: 3682 if (!dtrace_priv_kernel(state)) 3683 return (0); 3684 3685 return ((uint64_t)curthread->td_ucred->cr_prison->pr_id); 3686 #else 3687 return (0); 3688 #endif 3689 3690 case DIF_VAR_UID: 3691 if (!dtrace_priv_proc(state)) 3692 return (0); 3693 3694 #ifdef illumos 3695 /* 3696 * See comment in DIF_VAR_PID. 3697 */ 3698 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3699 return ((uint64_t)p0.p_cred->cr_uid); 3700 3701 /* 3702 * It is always safe to dereference one's own t_procp pointer: 3703 * it always points to a valid, allocated proc structure. 3704 * (This is true because threads don't clean up their own 3705 * state -- they leave that task to whomever reaps them.) 3706 * 3707 * Additionally, it is safe to dereference one's own process 3708 * credential, since this is never NULL after process birth. 3709 */ 3710 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3711 #else 3712 return ((uint64_t)curthread->td_ucred->cr_uid); 3713 #endif 3714 3715 case DIF_VAR_GID: 3716 if (!dtrace_priv_proc(state)) 3717 return (0); 3718 3719 #ifdef illumos 3720 /* 3721 * See comment in DIF_VAR_PID. 3722 */ 3723 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3724 return ((uint64_t)p0.p_cred->cr_gid); 3725 3726 /* 3727 * It is always safe to dereference one's own t_procp pointer: 3728 * it always points to a valid, allocated proc structure. 3729 * (This is true because threads don't clean up their own 3730 * state -- they leave that task to whomever reaps them.) 3731 * 3732 * Additionally, it is safe to dereference one's own process 3733 * credential, since this is never NULL after process birth. 3734 */ 3735 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3736 #else 3737 return ((uint64_t)curthread->td_ucred->cr_gid); 3738 #endif 3739 3740 case DIF_VAR_ERRNO: { 3741 #ifdef illumos 3742 klwp_t *lwp; 3743 if (!dtrace_priv_proc(state)) 3744 return (0); 3745 3746 /* 3747 * See comment in DIF_VAR_PID. 3748 */ 3749 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3750 return (0); 3751 3752 /* 3753 * It is always safe to dereference one's own t_lwp pointer in 3754 * the event that this pointer is non-NULL. (This is true 3755 * because threads and lwps don't clean up their own state -- 3756 * they leave that task to whomever reaps them.) 3757 */ 3758 if ((lwp = curthread->t_lwp) == NULL) 3759 return (0); 3760 3761 return ((uint64_t)lwp->lwp_errno); 3762 #else 3763 return (curthread->td_errno); 3764 #endif 3765 } 3766 #ifndef illumos 3767 case DIF_VAR_CPU: { 3768 return curcpu; 3769 } 3770 #endif 3771 default: 3772 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3773 return (0); 3774 } 3775 } 3776 3777 3778 typedef enum dtrace_json_state { 3779 DTRACE_JSON_REST = 1, 3780 DTRACE_JSON_OBJECT, 3781 DTRACE_JSON_STRING, 3782 DTRACE_JSON_STRING_ESCAPE, 3783 DTRACE_JSON_STRING_ESCAPE_UNICODE, 3784 DTRACE_JSON_COLON, 3785 DTRACE_JSON_COMMA, 3786 DTRACE_JSON_VALUE, 3787 DTRACE_JSON_IDENTIFIER, 3788 DTRACE_JSON_NUMBER, 3789 DTRACE_JSON_NUMBER_FRAC, 3790 DTRACE_JSON_NUMBER_EXP, 3791 DTRACE_JSON_COLLECT_OBJECT 3792 } dtrace_json_state_t; 3793 3794 /* 3795 * This function possesses just enough knowledge about JSON to extract a single 3796 * value from a JSON string and store it in the scratch buffer. It is able 3797 * to extract nested object values, and members of arrays by index. 3798 * 3799 * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to 3800 * be looked up as we descend into the object tree. e.g. 3801 * 3802 * foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL 3803 * with nelems = 5. 3804 * 3805 * The run time of this function must be bounded above by strsize to limit the 3806 * amount of work done in probe context. As such, it is implemented as a 3807 * simple state machine, reading one character at a time using safe loads 3808 * until we find the requested element, hit a parsing error or run off the 3809 * end of the object or string. 3810 * 3811 * As there is no way for a subroutine to return an error without interrupting 3812 * clause execution, we simply return NULL in the event of a missing key or any 3813 * other error condition. Each NULL return in this function is commented with 3814 * the error condition it represents -- parsing or otherwise. 3815 * 3816 * The set of states for the state machine closely matches the JSON 3817 * specification (http://json.org/). Briefly: 3818 * 3819 * DTRACE_JSON_REST: 3820 * Skip whitespace until we find either a top-level Object, moving 3821 * to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE. 3822 * 3823 * DTRACE_JSON_OBJECT: 3824 * Locate the next key String in an Object. Sets a flag to denote 3825 * the next String as a key string and moves to DTRACE_JSON_STRING. 3826 * 3827 * DTRACE_JSON_COLON: 3828 * Skip whitespace until we find the colon that separates key Strings 3829 * from their values. Once found, move to DTRACE_JSON_VALUE. 3830 * 3831 * DTRACE_JSON_VALUE: 3832 * Detects the type of the next value (String, Number, Identifier, Object 3833 * or Array) and routes to the states that process that type. Here we also 3834 * deal with the element selector list if we are requested to traverse down 3835 * into the object tree. 3836 * 3837 * DTRACE_JSON_COMMA: 3838 * Skip whitespace until we find the comma that separates key-value pairs 3839 * in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays 3840 * (similarly DTRACE_JSON_VALUE). All following literal value processing 3841 * states return to this state at the end of their value, unless otherwise 3842 * noted. 3843 * 3844 * DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP: 3845 * Processes a Number literal from the JSON, including any exponent 3846 * component that may be present. Numbers are returned as strings, which 3847 * may be passed to strtoll() if an integer is required. 3848 * 3849 * DTRACE_JSON_IDENTIFIER: 3850 * Processes a "true", "false" or "null" literal in the JSON. 3851 * 3852 * DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE, 3853 * DTRACE_JSON_STRING_ESCAPE_UNICODE: 3854 * Processes a String literal from the JSON, whether the String denotes 3855 * a key, a value or part of a larger Object. Handles all escape sequences 3856 * present in the specification, including four-digit unicode characters, 3857 * but merely includes the escape sequence without converting it to the 3858 * actual escaped character. If the String is flagged as a key, we 3859 * move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA. 3860 * 3861 * DTRACE_JSON_COLLECT_OBJECT: 3862 * This state collects an entire Object (or Array), correctly handling 3863 * embedded strings. If the full element selector list matches this nested 3864 * object, we return the Object in full as a string. If not, we use this 3865 * state to skip to the next value at this level and continue processing. 3866 * 3867 * NOTE: This function uses various macros from strtolctype.h to manipulate 3868 * digit values, etc -- these have all been checked to ensure they make 3869 * no additional function calls. 3870 */ 3871 static char * 3872 dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems, 3873 char *dest) 3874 { 3875 dtrace_json_state_t state = DTRACE_JSON_REST; 3876 int64_t array_elem = INT64_MIN; 3877 int64_t array_pos = 0; 3878 uint8_t escape_unicount = 0; 3879 boolean_t string_is_key = B_FALSE; 3880 boolean_t collect_object = B_FALSE; 3881 boolean_t found_key = B_FALSE; 3882 boolean_t in_array = B_FALSE; 3883 uint32_t braces = 0, brackets = 0; 3884 char *elem = elemlist; 3885 char *dd = dest; 3886 uintptr_t cur; 3887 3888 for (cur = json; cur < json + size; cur++) { 3889 char cc = dtrace_load8(cur); 3890 if (cc == '\0') 3891 return (NULL); 3892 3893 switch (state) { 3894 case DTRACE_JSON_REST: 3895 if (isspace(cc)) 3896 break; 3897 3898 if (cc == '{') { 3899 state = DTRACE_JSON_OBJECT; 3900 break; 3901 } 3902 3903 if (cc == '[') { 3904 in_array = B_TRUE; 3905 array_pos = 0; 3906 array_elem = dtrace_strtoll(elem, 10, size); 3907 found_key = array_elem == 0 ? B_TRUE : B_FALSE; 3908 state = DTRACE_JSON_VALUE; 3909 break; 3910 } 3911 3912 /* 3913 * ERROR: expected to find a top-level object or array. 3914 */ 3915 return (NULL); 3916 case DTRACE_JSON_OBJECT: 3917 if (isspace(cc)) 3918 break; 3919 3920 if (cc == '"') { 3921 state = DTRACE_JSON_STRING; 3922 string_is_key = B_TRUE; 3923 break; 3924 } 3925 3926 /* 3927 * ERROR: either the object did not start with a key 3928 * string, or we've run off the end of the object 3929 * without finding the requested key. 3930 */ 3931 return (NULL); 3932 case DTRACE_JSON_STRING: 3933 if (cc == '\\') { 3934 *dd++ = '\\'; 3935 state = DTRACE_JSON_STRING_ESCAPE; 3936 break; 3937 } 3938 3939 if (cc == '"') { 3940 if (collect_object) { 3941 /* 3942 * We don't reset the dest here, as 3943 * the string is part of a larger 3944 * object being collected. 3945 */ 3946 *dd++ = cc; 3947 collect_object = B_FALSE; 3948 state = DTRACE_JSON_COLLECT_OBJECT; 3949 break; 3950 } 3951 *dd = '\0'; 3952 dd = dest; /* reset string buffer */ 3953 if (string_is_key) { 3954 if (dtrace_strncmp(dest, elem, 3955 size) == 0) 3956 found_key = B_TRUE; 3957 } else if (found_key) { 3958 if (nelems > 1) { 3959 /* 3960 * We expected an object, not 3961 * this string. 3962 */ 3963 return (NULL); 3964 } 3965 return (dest); 3966 } 3967 state = string_is_key ? DTRACE_JSON_COLON : 3968 DTRACE_JSON_COMMA; 3969 string_is_key = B_FALSE; 3970 break; 3971 } 3972 3973 *dd++ = cc; 3974 break; 3975 case DTRACE_JSON_STRING_ESCAPE: 3976 *dd++ = cc; 3977 if (cc == 'u') { 3978 escape_unicount = 0; 3979 state = DTRACE_JSON_STRING_ESCAPE_UNICODE; 3980 } else { 3981 state = DTRACE_JSON_STRING; 3982 } 3983 break; 3984 case DTRACE_JSON_STRING_ESCAPE_UNICODE: 3985 if (!isxdigit(cc)) { 3986 /* 3987 * ERROR: invalid unicode escape, expected 3988 * four valid hexidecimal digits. 3989 */ 3990 return (NULL); 3991 } 3992 3993 *dd++ = cc; 3994 if (++escape_unicount == 4) 3995 state = DTRACE_JSON_STRING; 3996 break; 3997 case DTRACE_JSON_COLON: 3998 if (isspace(cc)) 3999 break; 4000 4001 if (cc == ':') { 4002 state = DTRACE_JSON_VALUE; 4003 break; 4004 } 4005 4006 /* 4007 * ERROR: expected a colon. 4008 */ 4009 return (NULL); 4010 case DTRACE_JSON_COMMA: 4011 if (isspace(cc)) 4012 break; 4013 4014 if (cc == ',') { 4015 if (in_array) { 4016 state = DTRACE_JSON_VALUE; 4017 if (++array_pos == array_elem) 4018 found_key = B_TRUE; 4019 } else { 4020 state = DTRACE_JSON_OBJECT; 4021 } 4022 break; 4023 } 4024 4025 /* 4026 * ERROR: either we hit an unexpected character, or 4027 * we reached the end of the object or array without 4028 * finding the requested key. 4029 */ 4030 return (NULL); 4031 case DTRACE_JSON_IDENTIFIER: 4032 if (islower(cc)) { 4033 *dd++ = cc; 4034 break; 4035 } 4036 4037 *dd = '\0'; 4038 dd = dest; /* reset string buffer */ 4039 4040 if (dtrace_strncmp(dest, "true", 5) == 0 || 4041 dtrace_strncmp(dest, "false", 6) == 0 || 4042 dtrace_strncmp(dest, "null", 5) == 0) { 4043 if (found_key) { 4044 if (nelems > 1) { 4045 /* 4046 * ERROR: We expected an object, 4047 * not this identifier. 4048 */ 4049 return (NULL); 4050 } 4051 return (dest); 4052 } else { 4053 cur--; 4054 state = DTRACE_JSON_COMMA; 4055 break; 4056 } 4057 } 4058 4059 /* 4060 * ERROR: we did not recognise the identifier as one 4061 * of those in the JSON specification. 4062 */ 4063 return (NULL); 4064 case DTRACE_JSON_NUMBER: 4065 if (cc == '.') { 4066 *dd++ = cc; 4067 state = DTRACE_JSON_NUMBER_FRAC; 4068 break; 4069 } 4070 4071 if (cc == 'x' || cc == 'X') { 4072 /* 4073 * ERROR: specification explicitly excludes 4074 * hexidecimal or octal numbers. 4075 */ 4076 return (NULL); 4077 } 4078 4079 /* FALLTHRU */ 4080 case DTRACE_JSON_NUMBER_FRAC: 4081 if (cc == 'e' || cc == 'E') { 4082 *dd++ = cc; 4083 state = DTRACE_JSON_NUMBER_EXP; 4084 break; 4085 } 4086 4087 if (cc == '+' || cc == '-') { 4088 /* 4089 * ERROR: expect sign as part of exponent only. 4090 */ 4091 return (NULL); 4092 } 4093 /* FALLTHRU */ 4094 case DTRACE_JSON_NUMBER_EXP: 4095 if (isdigit(cc) || cc == '+' || cc == '-') { 4096 *dd++ = cc; 4097 break; 4098 } 4099 4100 *dd = '\0'; 4101 dd = dest; /* reset string buffer */ 4102 if (found_key) { 4103 if (nelems > 1) { 4104 /* 4105 * ERROR: We expected an object, not 4106 * this number. 4107 */ 4108 return (NULL); 4109 } 4110 return (dest); 4111 } 4112 4113 cur--; 4114 state = DTRACE_JSON_COMMA; 4115 break; 4116 case DTRACE_JSON_VALUE: 4117 if (isspace(cc)) 4118 break; 4119 4120 if (cc == '{' || cc == '[') { 4121 if (nelems > 1 && found_key) { 4122 in_array = cc == '[' ? B_TRUE : B_FALSE; 4123 /* 4124 * If our element selector directs us 4125 * to descend into this nested object, 4126 * then move to the next selector 4127 * element in the list and restart the 4128 * state machine. 4129 */ 4130 while (*elem != '\0') 4131 elem++; 4132 elem++; /* skip the inter-element NUL */ 4133 nelems--; 4134 dd = dest; 4135 if (in_array) { 4136 state = DTRACE_JSON_VALUE; 4137 array_pos = 0; 4138 array_elem = dtrace_strtoll( 4139 elem, 10, size); 4140 found_key = array_elem == 0 ? 4141 B_TRUE : B_FALSE; 4142 } else { 4143 found_key = B_FALSE; 4144 state = DTRACE_JSON_OBJECT; 4145 } 4146 break; 4147 } 4148 4149 /* 4150 * Otherwise, we wish to either skip this 4151 * nested object or return it in full. 4152 */ 4153 if (cc == '[') 4154 brackets = 1; 4155 else 4156 braces = 1; 4157 *dd++ = cc; 4158 state = DTRACE_JSON_COLLECT_OBJECT; 4159 break; 4160 } 4161 4162 if (cc == '"') { 4163 state = DTRACE_JSON_STRING; 4164 break; 4165 } 4166 4167 if (islower(cc)) { 4168 /* 4169 * Here we deal with true, false and null. 4170 */ 4171 *dd++ = cc; 4172 state = DTRACE_JSON_IDENTIFIER; 4173 break; 4174 } 4175 4176 if (cc == '-' || isdigit(cc)) { 4177 *dd++ = cc; 4178 state = DTRACE_JSON_NUMBER; 4179 break; 4180 } 4181 4182 /* 4183 * ERROR: unexpected character at start of value. 4184 */ 4185 return (NULL); 4186 case DTRACE_JSON_COLLECT_OBJECT: 4187 if (cc == '\0') 4188 /* 4189 * ERROR: unexpected end of input. 4190 */ 4191 return (NULL); 4192 4193 *dd++ = cc; 4194 if (cc == '"') { 4195 collect_object = B_TRUE; 4196 state = DTRACE_JSON_STRING; 4197 break; 4198 } 4199 4200 if (cc == ']') { 4201 if (brackets-- == 0) { 4202 /* 4203 * ERROR: unbalanced brackets. 4204 */ 4205 return (NULL); 4206 } 4207 } else if (cc == '}') { 4208 if (braces-- == 0) { 4209 /* 4210 * ERROR: unbalanced braces. 4211 */ 4212 return (NULL); 4213 } 4214 } else if (cc == '{') { 4215 braces++; 4216 } else if (cc == '[') { 4217 brackets++; 4218 } 4219 4220 if (brackets == 0 && braces == 0) { 4221 if (found_key) { 4222 *dd = '\0'; 4223 return (dest); 4224 } 4225 dd = dest; /* reset string buffer */ 4226 state = DTRACE_JSON_COMMA; 4227 } 4228 break; 4229 } 4230 } 4231 return (NULL); 4232 } 4233 4234 /* 4235 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 4236 * Notice that we don't bother validating the proper number of arguments or 4237 * their types in the tuple stack. This isn't needed because all argument 4238 * interpretation is safe because of our load safety -- the worst that can 4239 * happen is that a bogus program can obtain bogus results. 4240 */ 4241 static void 4242 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 4243 dtrace_key_t *tupregs, int nargs, 4244 dtrace_mstate_t *mstate, dtrace_state_t *state) 4245 { 4246 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4247 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4248 dtrace_vstate_t *vstate = &state->dts_vstate; 4249 4250 #ifdef illumos 4251 union { 4252 mutex_impl_t mi; 4253 uint64_t mx; 4254 } m; 4255 4256 union { 4257 krwlock_t ri; 4258 uintptr_t rw; 4259 } r; 4260 #else 4261 struct thread *lowner; 4262 union { 4263 struct lock_object *li; 4264 uintptr_t lx; 4265 } l; 4266 #endif 4267 4268 switch (subr) { 4269 case DIF_SUBR_RAND: 4270 regs[rd] = dtrace_xoroshiro128_plus_next( 4271 state->dts_rstate[curcpu]); 4272 break; 4273 4274 #ifdef illumos 4275 case DIF_SUBR_MUTEX_OWNED: 4276 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4277 mstate, vstate)) { 4278 regs[rd] = 0; 4279 break; 4280 } 4281 4282 m.mx = dtrace_load64(tupregs[0].dttk_value); 4283 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 4284 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 4285 else 4286 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 4287 break; 4288 4289 case DIF_SUBR_MUTEX_OWNER: 4290 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4291 mstate, vstate)) { 4292 regs[rd] = 0; 4293 break; 4294 } 4295 4296 m.mx = dtrace_load64(tupregs[0].dttk_value); 4297 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 4298 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 4299 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 4300 else 4301 regs[rd] = 0; 4302 break; 4303 4304 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 4305 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4306 mstate, vstate)) { 4307 regs[rd] = 0; 4308 break; 4309 } 4310 4311 m.mx = dtrace_load64(tupregs[0].dttk_value); 4312 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 4313 break; 4314 4315 case DIF_SUBR_MUTEX_TYPE_SPIN: 4316 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4317 mstate, vstate)) { 4318 regs[rd] = 0; 4319 break; 4320 } 4321 4322 m.mx = dtrace_load64(tupregs[0].dttk_value); 4323 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 4324 break; 4325 4326 case DIF_SUBR_RW_READ_HELD: { 4327 uintptr_t tmp; 4328 4329 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4330 mstate, vstate)) { 4331 regs[rd] = 0; 4332 break; 4333 } 4334 4335 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4336 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 4337 break; 4338 } 4339 4340 case DIF_SUBR_RW_WRITE_HELD: 4341 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 4342 mstate, vstate)) { 4343 regs[rd] = 0; 4344 break; 4345 } 4346 4347 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4348 regs[rd] = _RW_WRITE_HELD(&r.ri); 4349 break; 4350 4351 case DIF_SUBR_RW_ISWRITER: 4352 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 4353 mstate, vstate)) { 4354 regs[rd] = 0; 4355 break; 4356 } 4357 4358 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4359 regs[rd] = _RW_ISWRITER(&r.ri); 4360 break; 4361 4362 #else /* !illumos */ 4363 case DIF_SUBR_MUTEX_OWNED: 4364 if (!dtrace_canload(tupregs[0].dttk_value, 4365 sizeof (struct lock_object), mstate, vstate)) { 4366 regs[rd] = 0; 4367 break; 4368 } 4369 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4370 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4371 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4372 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4373 break; 4374 4375 case DIF_SUBR_MUTEX_OWNER: 4376 if (!dtrace_canload(tupregs[0].dttk_value, 4377 sizeof (struct lock_object), mstate, vstate)) { 4378 regs[rd] = 0; 4379 break; 4380 } 4381 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4382 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4383 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4384 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4385 regs[rd] = (uintptr_t)lowner; 4386 break; 4387 4388 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 4389 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 4390 mstate, vstate)) { 4391 regs[rd] = 0; 4392 break; 4393 } 4394 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4395 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4396 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SLEEPLOCK) != 0; 4397 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4398 break; 4399 4400 case DIF_SUBR_MUTEX_TYPE_SPIN: 4401 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 4402 mstate, vstate)) { 4403 regs[rd] = 0; 4404 break; 4405 } 4406 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4407 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4408 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 4409 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4410 break; 4411 4412 case DIF_SUBR_RW_READ_HELD: 4413 case DIF_SUBR_SX_SHARED_HELD: 4414 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4415 mstate, vstate)) { 4416 regs[rd] = 0; 4417 break; 4418 } 4419 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4420 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4421 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 4422 lowner == NULL; 4423 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4424 break; 4425 4426 case DIF_SUBR_RW_WRITE_HELD: 4427 case DIF_SUBR_SX_EXCLUSIVE_HELD: 4428 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4429 mstate, vstate)) { 4430 regs[rd] = 0; 4431 break; 4432 } 4433 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 4434 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4435 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 4436 lowner != NULL; 4437 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4438 break; 4439 4440 case DIF_SUBR_RW_ISWRITER: 4441 case DIF_SUBR_SX_ISEXCLUSIVE: 4442 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4443 mstate, vstate)) { 4444 regs[rd] = 0; 4445 break; 4446 } 4447 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 4448 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4449 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4450 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4451 regs[rd] = (lowner == curthread); 4452 break; 4453 #endif /* illumos */ 4454 4455 case DIF_SUBR_BCOPY: { 4456 /* 4457 * We need to be sure that the destination is in the scratch 4458 * region -- no other region is allowed. 4459 */ 4460 uintptr_t src = tupregs[0].dttk_value; 4461 uintptr_t dest = tupregs[1].dttk_value; 4462 size_t size = tupregs[2].dttk_value; 4463 4464 if (!dtrace_inscratch(dest, size, mstate)) { 4465 *flags |= CPU_DTRACE_BADADDR; 4466 *illval = regs[rd]; 4467 break; 4468 } 4469 4470 if (!dtrace_canload(src, size, mstate, vstate)) { 4471 regs[rd] = 0; 4472 break; 4473 } 4474 4475 dtrace_bcopy((void *)src, (void *)dest, size); 4476 break; 4477 } 4478 4479 case DIF_SUBR_ALLOCA: 4480 case DIF_SUBR_COPYIN: { 4481 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4482 uint64_t size = 4483 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 4484 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 4485 4486 /* 4487 * This action doesn't require any credential checks since 4488 * probes will not activate in user contexts to which the 4489 * enabling user does not have permissions. 4490 */ 4491 4492 /* 4493 * Rounding up the user allocation size could have overflowed 4494 * a large, bogus allocation (like -1ULL) to 0. 4495 */ 4496 if (scratch_size < size || 4497 !DTRACE_INSCRATCH(mstate, scratch_size)) { 4498 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4499 regs[rd] = 0; 4500 break; 4501 } 4502 4503 if (subr == DIF_SUBR_COPYIN) { 4504 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4505 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 4506 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4507 } 4508 4509 mstate->dtms_scratch_ptr += scratch_size; 4510 regs[rd] = dest; 4511 break; 4512 } 4513 4514 case DIF_SUBR_COPYINTO: { 4515 uint64_t size = tupregs[1].dttk_value; 4516 uintptr_t dest = tupregs[2].dttk_value; 4517 4518 /* 4519 * This action doesn't require any credential checks since 4520 * probes will not activate in user contexts to which the 4521 * enabling user does not have permissions. 4522 */ 4523 if (!dtrace_inscratch(dest, size, mstate)) { 4524 *flags |= CPU_DTRACE_BADADDR; 4525 *illval = regs[rd]; 4526 break; 4527 } 4528 4529 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4530 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 4531 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4532 break; 4533 } 4534 4535 case DIF_SUBR_COPYINSTR: { 4536 uintptr_t dest = mstate->dtms_scratch_ptr; 4537 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4538 4539 if (nargs > 1 && tupregs[1].dttk_value < size) 4540 size = tupregs[1].dttk_value + 1; 4541 4542 /* 4543 * This action doesn't require any credential checks since 4544 * probes will not activate in user contexts to which the 4545 * enabling user does not have permissions. 4546 */ 4547 if (!DTRACE_INSCRATCH(mstate, size)) { 4548 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4549 regs[rd] = 0; 4550 break; 4551 } 4552 4553 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4554 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 4555 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4556 4557 ((char *)dest)[size - 1] = '\0'; 4558 mstate->dtms_scratch_ptr += size; 4559 regs[rd] = dest; 4560 break; 4561 } 4562 4563 #ifdef illumos 4564 case DIF_SUBR_MSGSIZE: 4565 case DIF_SUBR_MSGDSIZE: { 4566 uintptr_t baddr = tupregs[0].dttk_value, daddr; 4567 uintptr_t wptr, rptr; 4568 size_t count = 0; 4569 int cont = 0; 4570 4571 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 4572 4573 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 4574 vstate)) { 4575 regs[rd] = 0; 4576 break; 4577 } 4578 4579 wptr = dtrace_loadptr(baddr + 4580 offsetof(mblk_t, b_wptr)); 4581 4582 rptr = dtrace_loadptr(baddr + 4583 offsetof(mblk_t, b_rptr)); 4584 4585 if (wptr < rptr) { 4586 *flags |= CPU_DTRACE_BADADDR; 4587 *illval = tupregs[0].dttk_value; 4588 break; 4589 } 4590 4591 daddr = dtrace_loadptr(baddr + 4592 offsetof(mblk_t, b_datap)); 4593 4594 baddr = dtrace_loadptr(baddr + 4595 offsetof(mblk_t, b_cont)); 4596 4597 /* 4598 * We want to prevent against denial-of-service here, 4599 * so we're only going to search the list for 4600 * dtrace_msgdsize_max mblks. 4601 */ 4602 if (cont++ > dtrace_msgdsize_max) { 4603 *flags |= CPU_DTRACE_ILLOP; 4604 break; 4605 } 4606 4607 if (subr == DIF_SUBR_MSGDSIZE) { 4608 if (dtrace_load8(daddr + 4609 offsetof(dblk_t, db_type)) != M_DATA) 4610 continue; 4611 } 4612 4613 count += wptr - rptr; 4614 } 4615 4616 if (!(*flags & CPU_DTRACE_FAULT)) 4617 regs[rd] = count; 4618 4619 break; 4620 } 4621 #endif 4622 4623 case DIF_SUBR_PROGENYOF: { 4624 pid_t pid = tupregs[0].dttk_value; 4625 proc_t *p; 4626 int rval = 0; 4627 4628 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4629 4630 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 4631 #ifdef illumos 4632 if (p->p_pidp->pid_id == pid) { 4633 #else 4634 if (p->p_pid == pid) { 4635 #endif 4636 rval = 1; 4637 break; 4638 } 4639 } 4640 4641 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4642 4643 regs[rd] = rval; 4644 break; 4645 } 4646 4647 case DIF_SUBR_SPECULATION: 4648 regs[rd] = dtrace_speculation(state); 4649 break; 4650 4651 case DIF_SUBR_COPYOUT: { 4652 uintptr_t kaddr = tupregs[0].dttk_value; 4653 uintptr_t uaddr = tupregs[1].dttk_value; 4654 uint64_t size = tupregs[2].dttk_value; 4655 4656 if (!dtrace_destructive_disallow && 4657 dtrace_priv_proc_control(state) && 4658 !dtrace_istoxic(kaddr, size) && 4659 dtrace_canload(kaddr, size, mstate, vstate)) { 4660 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4661 dtrace_copyout(kaddr, uaddr, size, flags); 4662 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4663 } 4664 break; 4665 } 4666 4667 case DIF_SUBR_COPYOUTSTR: { 4668 uintptr_t kaddr = tupregs[0].dttk_value; 4669 uintptr_t uaddr = tupregs[1].dttk_value; 4670 uint64_t size = tupregs[2].dttk_value; 4671 size_t lim; 4672 4673 if (!dtrace_destructive_disallow && 4674 dtrace_priv_proc_control(state) && 4675 !dtrace_istoxic(kaddr, size) && 4676 dtrace_strcanload(kaddr, size, &lim, mstate, vstate)) { 4677 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4678 dtrace_copyoutstr(kaddr, uaddr, lim, flags); 4679 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4680 } 4681 break; 4682 } 4683 4684 case DIF_SUBR_STRLEN: { 4685 size_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4686 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 4687 size_t lim; 4688 4689 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) { 4690 regs[rd] = 0; 4691 break; 4692 } 4693 4694 regs[rd] = dtrace_strlen((char *)addr, lim); 4695 break; 4696 } 4697 4698 case DIF_SUBR_STRCHR: 4699 case DIF_SUBR_STRRCHR: { 4700 /* 4701 * We're going to iterate over the string looking for the 4702 * specified character. We will iterate until we have reached 4703 * the string length or we have found the character. If this 4704 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 4705 * of the specified character instead of the first. 4706 */ 4707 uintptr_t addr = tupregs[0].dttk_value; 4708 uintptr_t addr_limit; 4709 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4710 size_t lim; 4711 char c, target = (char)tupregs[1].dttk_value; 4712 4713 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) { 4714 regs[rd] = 0; 4715 break; 4716 } 4717 addr_limit = addr + lim; 4718 4719 for (regs[rd] = 0; addr < addr_limit; addr++) { 4720 if ((c = dtrace_load8(addr)) == target) { 4721 regs[rd] = addr; 4722 4723 if (subr == DIF_SUBR_STRCHR) 4724 break; 4725 } 4726 4727 if (c == '\0') 4728 break; 4729 } 4730 break; 4731 } 4732 4733 case DIF_SUBR_STRSTR: 4734 case DIF_SUBR_INDEX: 4735 case DIF_SUBR_RINDEX: { 4736 /* 4737 * We're going to iterate over the string looking for the 4738 * specified string. We will iterate until we have reached 4739 * the string length or we have found the string. (Yes, this 4740 * is done in the most naive way possible -- but considering 4741 * that the string we're searching for is likely to be 4742 * relatively short, the complexity of Rabin-Karp or similar 4743 * hardly seems merited.) 4744 */ 4745 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 4746 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 4747 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4748 size_t len = dtrace_strlen(addr, size); 4749 size_t sublen = dtrace_strlen(substr, size); 4750 char *limit = addr + len, *orig = addr; 4751 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 4752 int inc = 1; 4753 4754 regs[rd] = notfound; 4755 4756 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 4757 regs[rd] = 0; 4758 break; 4759 } 4760 4761 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 4762 vstate)) { 4763 regs[rd] = 0; 4764 break; 4765 } 4766 4767 /* 4768 * strstr() and index()/rindex() have similar semantics if 4769 * both strings are the empty string: strstr() returns a 4770 * pointer to the (empty) string, and index() and rindex() 4771 * both return index 0 (regardless of any position argument). 4772 */ 4773 if (sublen == 0 && len == 0) { 4774 if (subr == DIF_SUBR_STRSTR) 4775 regs[rd] = (uintptr_t)addr; 4776 else 4777 regs[rd] = 0; 4778 break; 4779 } 4780 4781 if (subr != DIF_SUBR_STRSTR) { 4782 if (subr == DIF_SUBR_RINDEX) { 4783 limit = orig - 1; 4784 addr += len; 4785 inc = -1; 4786 } 4787 4788 /* 4789 * Both index() and rindex() take an optional position 4790 * argument that denotes the starting position. 4791 */ 4792 if (nargs == 3) { 4793 int64_t pos = (int64_t)tupregs[2].dttk_value; 4794 4795 /* 4796 * If the position argument to index() is 4797 * negative, Perl implicitly clamps it at 4798 * zero. This semantic is a little surprising 4799 * given the special meaning of negative 4800 * positions to similar Perl functions like 4801 * substr(), but it appears to reflect a 4802 * notion that index() can start from a 4803 * negative index and increment its way up to 4804 * the string. Given this notion, Perl's 4805 * rindex() is at least self-consistent in 4806 * that it implicitly clamps positions greater 4807 * than the string length to be the string 4808 * length. Where Perl completely loses 4809 * coherence, however, is when the specified 4810 * substring is the empty string (""). In 4811 * this case, even if the position is 4812 * negative, rindex() returns 0 -- and even if 4813 * the position is greater than the length, 4814 * index() returns the string length. These 4815 * semantics violate the notion that index() 4816 * should never return a value less than the 4817 * specified position and that rindex() should 4818 * never return a value greater than the 4819 * specified position. (One assumes that 4820 * these semantics are artifacts of Perl's 4821 * implementation and not the results of 4822 * deliberate design -- it beggars belief that 4823 * even Larry Wall could desire such oddness.) 4824 * While in the abstract one would wish for 4825 * consistent position semantics across 4826 * substr(), index() and rindex() -- or at the 4827 * very least self-consistent position 4828 * semantics for index() and rindex() -- we 4829 * instead opt to keep with the extant Perl 4830 * semantics, in all their broken glory. (Do 4831 * we have more desire to maintain Perl's 4832 * semantics than Perl does? Probably.) 4833 */ 4834 if (subr == DIF_SUBR_RINDEX) { 4835 if (pos < 0) { 4836 if (sublen == 0) 4837 regs[rd] = 0; 4838 break; 4839 } 4840 4841 if (pos > len) 4842 pos = len; 4843 } else { 4844 if (pos < 0) 4845 pos = 0; 4846 4847 if (pos >= len) { 4848 if (sublen == 0) 4849 regs[rd] = len; 4850 break; 4851 } 4852 } 4853 4854 addr = orig + pos; 4855 } 4856 } 4857 4858 for (regs[rd] = notfound; addr != limit; addr += inc) { 4859 if (dtrace_strncmp(addr, substr, sublen) == 0) { 4860 if (subr != DIF_SUBR_STRSTR) { 4861 /* 4862 * As D index() and rindex() are 4863 * modeled on Perl (and not on awk), 4864 * we return a zero-based (and not a 4865 * one-based) index. (For you Perl 4866 * weenies: no, we're not going to add 4867 * $[ -- and shouldn't you be at a con 4868 * or something?) 4869 */ 4870 regs[rd] = (uintptr_t)(addr - orig); 4871 break; 4872 } 4873 4874 ASSERT(subr == DIF_SUBR_STRSTR); 4875 regs[rd] = (uintptr_t)addr; 4876 break; 4877 } 4878 } 4879 4880 break; 4881 } 4882 4883 case DIF_SUBR_STRTOK: { 4884 uintptr_t addr = tupregs[0].dttk_value; 4885 uintptr_t tokaddr = tupregs[1].dttk_value; 4886 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4887 uintptr_t limit, toklimit; 4888 size_t clim; 4889 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 4890 char *dest = (char *)mstate->dtms_scratch_ptr; 4891 int i; 4892 4893 /* 4894 * Check both the token buffer and (later) the input buffer, 4895 * since both could be non-scratch addresses. 4896 */ 4897 if (!dtrace_strcanload(tokaddr, size, &clim, mstate, vstate)) { 4898 regs[rd] = 0; 4899 break; 4900 } 4901 toklimit = tokaddr + clim; 4902 4903 if (!DTRACE_INSCRATCH(mstate, size)) { 4904 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4905 regs[rd] = 0; 4906 break; 4907 } 4908 4909 if (addr == 0) { 4910 /* 4911 * If the address specified is NULL, we use our saved 4912 * strtok pointer from the mstate. Note that this 4913 * means that the saved strtok pointer is _only_ 4914 * valid within multiple enablings of the same probe -- 4915 * it behaves like an implicit clause-local variable. 4916 */ 4917 addr = mstate->dtms_strtok; 4918 limit = mstate->dtms_strtok_limit; 4919 } else { 4920 /* 4921 * If the user-specified address is non-NULL we must 4922 * access check it. This is the only time we have 4923 * a chance to do so, since this address may reside 4924 * in the string table of this clause-- future calls 4925 * (when we fetch addr from mstate->dtms_strtok) 4926 * would fail this access check. 4927 */ 4928 if (!dtrace_strcanload(addr, size, &clim, mstate, 4929 vstate)) { 4930 regs[rd] = 0; 4931 break; 4932 } 4933 limit = addr + clim; 4934 } 4935 4936 /* 4937 * First, zero the token map, and then process the token 4938 * string -- setting a bit in the map for every character 4939 * found in the token string. 4940 */ 4941 for (i = 0; i < sizeof (tokmap); i++) 4942 tokmap[i] = 0; 4943 4944 for (; tokaddr < toklimit; tokaddr++) { 4945 if ((c = dtrace_load8(tokaddr)) == '\0') 4946 break; 4947 4948 ASSERT((c >> 3) < sizeof (tokmap)); 4949 tokmap[c >> 3] |= (1 << (c & 0x7)); 4950 } 4951 4952 for (; addr < limit; addr++) { 4953 /* 4954 * We're looking for a character that is _not_ 4955 * contained in the token string. 4956 */ 4957 if ((c = dtrace_load8(addr)) == '\0') 4958 break; 4959 4960 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 4961 break; 4962 } 4963 4964 if (c == '\0') { 4965 /* 4966 * We reached the end of the string without finding 4967 * any character that was not in the token string. 4968 * We return NULL in this case, and we set the saved 4969 * address to NULL as well. 4970 */ 4971 regs[rd] = 0; 4972 mstate->dtms_strtok = 0; 4973 mstate->dtms_strtok_limit = 0; 4974 break; 4975 } 4976 4977 /* 4978 * From here on, we're copying into the destination string. 4979 */ 4980 for (i = 0; addr < limit && i < size - 1; addr++) { 4981 if ((c = dtrace_load8(addr)) == '\0') 4982 break; 4983 4984 if (tokmap[c >> 3] & (1 << (c & 0x7))) 4985 break; 4986 4987 ASSERT(i < size); 4988 dest[i++] = c; 4989 } 4990 4991 ASSERT(i < size); 4992 dest[i] = '\0'; 4993 regs[rd] = (uintptr_t)dest; 4994 mstate->dtms_scratch_ptr += size; 4995 mstate->dtms_strtok = addr; 4996 mstate->dtms_strtok_limit = limit; 4997 break; 4998 } 4999 5000 case DIF_SUBR_SUBSTR: { 5001 uintptr_t s = tupregs[0].dttk_value; 5002 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5003 char *d = (char *)mstate->dtms_scratch_ptr; 5004 int64_t index = (int64_t)tupregs[1].dttk_value; 5005 int64_t remaining = (int64_t)tupregs[2].dttk_value; 5006 size_t len = dtrace_strlen((char *)s, size); 5007 int64_t i; 5008 5009 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 5010 regs[rd] = 0; 5011 break; 5012 } 5013 5014 if (!DTRACE_INSCRATCH(mstate, size)) { 5015 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5016 regs[rd] = 0; 5017 break; 5018 } 5019 5020 if (nargs <= 2) 5021 remaining = (int64_t)size; 5022 5023 if (index < 0) { 5024 index += len; 5025 5026 if (index < 0 && index + remaining > 0) { 5027 remaining += index; 5028 index = 0; 5029 } 5030 } 5031 5032 if (index >= len || index < 0) { 5033 remaining = 0; 5034 } else if (remaining < 0) { 5035 remaining += len - index; 5036 } else if (index + remaining > size) { 5037 remaining = size - index; 5038 } 5039 5040 for (i = 0; i < remaining; i++) { 5041 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 5042 break; 5043 } 5044 5045 d[i] = '\0'; 5046 5047 mstate->dtms_scratch_ptr += size; 5048 regs[rd] = (uintptr_t)d; 5049 break; 5050 } 5051 5052 case DIF_SUBR_JSON: { 5053 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5054 uintptr_t json = tupregs[0].dttk_value; 5055 size_t jsonlen = dtrace_strlen((char *)json, size); 5056 uintptr_t elem = tupregs[1].dttk_value; 5057 size_t elemlen = dtrace_strlen((char *)elem, size); 5058 5059 char *dest = (char *)mstate->dtms_scratch_ptr; 5060 char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1; 5061 char *ee = elemlist; 5062 int nelems = 1; 5063 uintptr_t cur; 5064 5065 if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) || 5066 !dtrace_canload(elem, elemlen + 1, mstate, vstate)) { 5067 regs[rd] = 0; 5068 break; 5069 } 5070 5071 if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) { 5072 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5073 regs[rd] = 0; 5074 break; 5075 } 5076 5077 /* 5078 * Read the element selector and split it up into a packed list 5079 * of strings. 5080 */ 5081 for (cur = elem; cur < elem + elemlen; cur++) { 5082 char cc = dtrace_load8(cur); 5083 5084 if (cur == elem && cc == '[') { 5085 /* 5086 * If the first element selector key is 5087 * actually an array index then ignore the 5088 * bracket. 5089 */ 5090 continue; 5091 } 5092 5093 if (cc == ']') 5094 continue; 5095 5096 if (cc == '.' || cc == '[') { 5097 nelems++; 5098 cc = '\0'; 5099 } 5100 5101 *ee++ = cc; 5102 } 5103 *ee++ = '\0'; 5104 5105 if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist, 5106 nelems, dest)) != 0) 5107 mstate->dtms_scratch_ptr += jsonlen + 1; 5108 break; 5109 } 5110 5111 case DIF_SUBR_TOUPPER: 5112 case DIF_SUBR_TOLOWER: { 5113 uintptr_t s = tupregs[0].dttk_value; 5114 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5115 char *dest = (char *)mstate->dtms_scratch_ptr, c; 5116 size_t len = dtrace_strlen((char *)s, size); 5117 char lower, upper, convert; 5118 int64_t i; 5119 5120 if (subr == DIF_SUBR_TOUPPER) { 5121 lower = 'a'; 5122 upper = 'z'; 5123 convert = 'A'; 5124 } else { 5125 lower = 'A'; 5126 upper = 'Z'; 5127 convert = 'a'; 5128 } 5129 5130 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 5131 regs[rd] = 0; 5132 break; 5133 } 5134 5135 if (!DTRACE_INSCRATCH(mstate, size)) { 5136 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5137 regs[rd] = 0; 5138 break; 5139 } 5140 5141 for (i = 0; i < size - 1; i++) { 5142 if ((c = dtrace_load8(s + i)) == '\0') 5143 break; 5144 5145 if (c >= lower && c <= upper) 5146 c = convert + (c - lower); 5147 5148 dest[i] = c; 5149 } 5150 5151 ASSERT(i < size); 5152 dest[i] = '\0'; 5153 regs[rd] = (uintptr_t)dest; 5154 mstate->dtms_scratch_ptr += size; 5155 break; 5156 } 5157 5158 #ifdef illumos 5159 case DIF_SUBR_GETMAJOR: 5160 #ifdef _LP64 5161 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 5162 #else 5163 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 5164 #endif 5165 break; 5166 5167 case DIF_SUBR_GETMINOR: 5168 #ifdef _LP64 5169 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 5170 #else 5171 regs[rd] = tupregs[0].dttk_value & MAXMIN; 5172 #endif 5173 break; 5174 5175 case DIF_SUBR_DDI_PATHNAME: { 5176 /* 5177 * This one is a galactic mess. We are going to roughly 5178 * emulate ddi_pathname(), but it's made more complicated 5179 * by the fact that we (a) want to include the minor name and 5180 * (b) must proceed iteratively instead of recursively. 5181 */ 5182 uintptr_t dest = mstate->dtms_scratch_ptr; 5183 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5184 char *start = (char *)dest, *end = start + size - 1; 5185 uintptr_t daddr = tupregs[0].dttk_value; 5186 int64_t minor = (int64_t)tupregs[1].dttk_value; 5187 char *s; 5188 int i, len, depth = 0; 5189 5190 /* 5191 * Due to all the pointer jumping we do and context we must 5192 * rely upon, we just mandate that the user must have kernel 5193 * read privileges to use this routine. 5194 */ 5195 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 5196 *flags |= CPU_DTRACE_KPRIV; 5197 *illval = daddr; 5198 regs[rd] = 0; 5199 } 5200 5201 if (!DTRACE_INSCRATCH(mstate, size)) { 5202 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5203 regs[rd] = 0; 5204 break; 5205 } 5206 5207 *end = '\0'; 5208 5209 /* 5210 * We want to have a name for the minor. In order to do this, 5211 * we need to walk the minor list from the devinfo. We want 5212 * to be sure that we don't infinitely walk a circular list, 5213 * so we check for circularity by sending a scout pointer 5214 * ahead two elements for every element that we iterate over; 5215 * if the list is circular, these will ultimately point to the 5216 * same element. You may recognize this little trick as the 5217 * answer to a stupid interview question -- one that always 5218 * seems to be asked by those who had to have it laboriously 5219 * explained to them, and who can't even concisely describe 5220 * the conditions under which one would be forced to resort to 5221 * this technique. Needless to say, those conditions are 5222 * found here -- and probably only here. Is this the only use 5223 * of this infamous trick in shipping, production code? If it 5224 * isn't, it probably should be... 5225 */ 5226 if (minor != -1) { 5227 uintptr_t maddr = dtrace_loadptr(daddr + 5228 offsetof(struct dev_info, devi_minor)); 5229 5230 uintptr_t next = offsetof(struct ddi_minor_data, next); 5231 uintptr_t name = offsetof(struct ddi_minor_data, 5232 d_minor) + offsetof(struct ddi_minor, name); 5233 uintptr_t dev = offsetof(struct ddi_minor_data, 5234 d_minor) + offsetof(struct ddi_minor, dev); 5235 uintptr_t scout; 5236 5237 if (maddr != NULL) 5238 scout = dtrace_loadptr(maddr + next); 5239 5240 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 5241 uint64_t m; 5242 #ifdef _LP64 5243 m = dtrace_load64(maddr + dev) & MAXMIN64; 5244 #else 5245 m = dtrace_load32(maddr + dev) & MAXMIN; 5246 #endif 5247 if (m != minor) { 5248 maddr = dtrace_loadptr(maddr + next); 5249 5250 if (scout == NULL) 5251 continue; 5252 5253 scout = dtrace_loadptr(scout + next); 5254 5255 if (scout == NULL) 5256 continue; 5257 5258 scout = dtrace_loadptr(scout + next); 5259 5260 if (scout == NULL) 5261 continue; 5262 5263 if (scout == maddr) { 5264 *flags |= CPU_DTRACE_ILLOP; 5265 break; 5266 } 5267 5268 continue; 5269 } 5270 5271 /* 5272 * We have the minor data. Now we need to 5273 * copy the minor's name into the end of the 5274 * pathname. 5275 */ 5276 s = (char *)dtrace_loadptr(maddr + name); 5277 len = dtrace_strlen(s, size); 5278 5279 if (*flags & CPU_DTRACE_FAULT) 5280 break; 5281 5282 if (len != 0) { 5283 if ((end -= (len + 1)) < start) 5284 break; 5285 5286 *end = ':'; 5287 } 5288 5289 for (i = 1; i <= len; i++) 5290 end[i] = dtrace_load8((uintptr_t)s++); 5291 break; 5292 } 5293 } 5294 5295 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 5296 ddi_node_state_t devi_state; 5297 5298 devi_state = dtrace_load32(daddr + 5299 offsetof(struct dev_info, devi_node_state)); 5300 5301 if (*flags & CPU_DTRACE_FAULT) 5302 break; 5303 5304 if (devi_state >= DS_INITIALIZED) { 5305 s = (char *)dtrace_loadptr(daddr + 5306 offsetof(struct dev_info, devi_addr)); 5307 len = dtrace_strlen(s, size); 5308 5309 if (*flags & CPU_DTRACE_FAULT) 5310 break; 5311 5312 if (len != 0) { 5313 if ((end -= (len + 1)) < start) 5314 break; 5315 5316 *end = '@'; 5317 } 5318 5319 for (i = 1; i <= len; i++) 5320 end[i] = dtrace_load8((uintptr_t)s++); 5321 } 5322 5323 /* 5324 * Now for the node name... 5325 */ 5326 s = (char *)dtrace_loadptr(daddr + 5327 offsetof(struct dev_info, devi_node_name)); 5328 5329 daddr = dtrace_loadptr(daddr + 5330 offsetof(struct dev_info, devi_parent)); 5331 5332 /* 5333 * If our parent is NULL (that is, if we're the root 5334 * node), we're going to use the special path 5335 * "devices". 5336 */ 5337 if (daddr == 0) 5338 s = "devices"; 5339 5340 len = dtrace_strlen(s, size); 5341 if (*flags & CPU_DTRACE_FAULT) 5342 break; 5343 5344 if ((end -= (len + 1)) < start) 5345 break; 5346 5347 for (i = 1; i <= len; i++) 5348 end[i] = dtrace_load8((uintptr_t)s++); 5349 *end = '/'; 5350 5351 if (depth++ > dtrace_devdepth_max) { 5352 *flags |= CPU_DTRACE_ILLOP; 5353 break; 5354 } 5355 } 5356 5357 if (end < start) 5358 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5359 5360 if (daddr == 0) { 5361 regs[rd] = (uintptr_t)end; 5362 mstate->dtms_scratch_ptr += size; 5363 } 5364 5365 break; 5366 } 5367 #endif 5368 5369 case DIF_SUBR_STRJOIN: { 5370 char *d = (char *)mstate->dtms_scratch_ptr; 5371 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5372 uintptr_t s1 = tupregs[0].dttk_value; 5373 uintptr_t s2 = tupregs[1].dttk_value; 5374 int i = 0, j = 0; 5375 size_t lim1, lim2; 5376 char c; 5377 5378 if (!dtrace_strcanload(s1, size, &lim1, mstate, vstate) || 5379 !dtrace_strcanload(s2, size, &lim2, mstate, vstate)) { 5380 regs[rd] = 0; 5381 break; 5382 } 5383 5384 if (!DTRACE_INSCRATCH(mstate, size)) { 5385 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5386 regs[rd] = 0; 5387 break; 5388 } 5389 5390 for (;;) { 5391 if (i >= size) { 5392 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5393 regs[rd] = 0; 5394 break; 5395 } 5396 c = (i >= lim1) ? '\0' : dtrace_load8(s1++); 5397 if ((d[i++] = c) == '\0') { 5398 i--; 5399 break; 5400 } 5401 } 5402 5403 for (;;) { 5404 if (i >= size) { 5405 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5406 regs[rd] = 0; 5407 break; 5408 } 5409 5410 c = (j++ >= lim2) ? '\0' : dtrace_load8(s2++); 5411 if ((d[i++] = c) == '\0') 5412 break; 5413 } 5414 5415 if (i < size) { 5416 mstate->dtms_scratch_ptr += i; 5417 regs[rd] = (uintptr_t)d; 5418 } 5419 5420 break; 5421 } 5422 5423 case DIF_SUBR_STRTOLL: { 5424 uintptr_t s = tupregs[0].dttk_value; 5425 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5426 size_t lim; 5427 int base = 10; 5428 5429 if (nargs > 1) { 5430 if ((base = tupregs[1].dttk_value) <= 1 || 5431 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 5432 *flags |= CPU_DTRACE_ILLOP; 5433 break; 5434 } 5435 } 5436 5437 if (!dtrace_strcanload(s, size, &lim, mstate, vstate)) { 5438 regs[rd] = INT64_MIN; 5439 break; 5440 } 5441 5442 regs[rd] = dtrace_strtoll((char *)s, base, lim); 5443 break; 5444 } 5445 5446 case DIF_SUBR_LLTOSTR: { 5447 int64_t i = (int64_t)tupregs[0].dttk_value; 5448 uint64_t val, digit; 5449 uint64_t size = 65; /* enough room for 2^64 in binary */ 5450 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 5451 int base = 10; 5452 5453 if (nargs > 1) { 5454 if ((base = tupregs[1].dttk_value) <= 1 || 5455 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 5456 *flags |= CPU_DTRACE_ILLOP; 5457 break; 5458 } 5459 } 5460 5461 val = (base == 10 && i < 0) ? i * -1 : i; 5462 5463 if (!DTRACE_INSCRATCH(mstate, size)) { 5464 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5465 regs[rd] = 0; 5466 break; 5467 } 5468 5469 for (*end-- = '\0'; val; val /= base) { 5470 if ((digit = val % base) <= '9' - '0') { 5471 *end-- = '0' + digit; 5472 } else { 5473 *end-- = 'a' + (digit - ('9' - '0') - 1); 5474 } 5475 } 5476 5477 if (i == 0 && base == 16) 5478 *end-- = '0'; 5479 5480 if (base == 16) 5481 *end-- = 'x'; 5482 5483 if (i == 0 || base == 8 || base == 16) 5484 *end-- = '0'; 5485 5486 if (i < 0 && base == 10) 5487 *end-- = '-'; 5488 5489 regs[rd] = (uintptr_t)end + 1; 5490 mstate->dtms_scratch_ptr += size; 5491 break; 5492 } 5493 5494 case DIF_SUBR_HTONS: 5495 case DIF_SUBR_NTOHS: 5496 #if BYTE_ORDER == BIG_ENDIAN 5497 regs[rd] = (uint16_t)tupregs[0].dttk_value; 5498 #else 5499 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 5500 #endif 5501 break; 5502 5503 5504 case DIF_SUBR_HTONL: 5505 case DIF_SUBR_NTOHL: 5506 #if BYTE_ORDER == BIG_ENDIAN 5507 regs[rd] = (uint32_t)tupregs[0].dttk_value; 5508 #else 5509 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 5510 #endif 5511 break; 5512 5513 5514 case DIF_SUBR_HTONLL: 5515 case DIF_SUBR_NTOHLL: 5516 #if BYTE_ORDER == BIG_ENDIAN 5517 regs[rd] = (uint64_t)tupregs[0].dttk_value; 5518 #else 5519 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 5520 #endif 5521 break; 5522 5523 5524 case DIF_SUBR_DIRNAME: 5525 case DIF_SUBR_BASENAME: { 5526 char *dest = (char *)mstate->dtms_scratch_ptr; 5527 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5528 uintptr_t src = tupregs[0].dttk_value; 5529 int i, j, len = dtrace_strlen((char *)src, size); 5530 int lastbase = -1, firstbase = -1, lastdir = -1; 5531 int start, end; 5532 5533 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 5534 regs[rd] = 0; 5535 break; 5536 } 5537 5538 if (!DTRACE_INSCRATCH(mstate, size)) { 5539 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5540 regs[rd] = 0; 5541 break; 5542 } 5543 5544 /* 5545 * The basename and dirname for a zero-length string is 5546 * defined to be "." 5547 */ 5548 if (len == 0) { 5549 len = 1; 5550 src = (uintptr_t)"."; 5551 } 5552 5553 /* 5554 * Start from the back of the string, moving back toward the 5555 * front until we see a character that isn't a slash. That 5556 * character is the last character in the basename. 5557 */ 5558 for (i = len - 1; i >= 0; i--) { 5559 if (dtrace_load8(src + i) != '/') 5560 break; 5561 } 5562 5563 if (i >= 0) 5564 lastbase = i; 5565 5566 /* 5567 * Starting from the last character in the basename, move 5568 * towards the front until we find a slash. The character 5569 * that we processed immediately before that is the first 5570 * character in the basename. 5571 */ 5572 for (; i >= 0; i--) { 5573 if (dtrace_load8(src + i) == '/') 5574 break; 5575 } 5576 5577 if (i >= 0) 5578 firstbase = i + 1; 5579 5580 /* 5581 * Now keep going until we find a non-slash character. That 5582 * character is the last character in the dirname. 5583 */ 5584 for (; i >= 0; i--) { 5585 if (dtrace_load8(src + i) != '/') 5586 break; 5587 } 5588 5589 if (i >= 0) 5590 lastdir = i; 5591 5592 ASSERT(!(lastbase == -1 && firstbase != -1)); 5593 ASSERT(!(firstbase == -1 && lastdir != -1)); 5594 5595 if (lastbase == -1) { 5596 /* 5597 * We didn't find a non-slash character. We know that 5598 * the length is non-zero, so the whole string must be 5599 * slashes. In either the dirname or the basename 5600 * case, we return '/'. 5601 */ 5602 ASSERT(firstbase == -1); 5603 firstbase = lastbase = lastdir = 0; 5604 } 5605 5606 if (firstbase == -1) { 5607 /* 5608 * The entire string consists only of a basename 5609 * component. If we're looking for dirname, we need 5610 * to change our string to be just "."; if we're 5611 * looking for a basename, we'll just set the first 5612 * character of the basename to be 0. 5613 */ 5614 if (subr == DIF_SUBR_DIRNAME) { 5615 ASSERT(lastdir == -1); 5616 src = (uintptr_t)"."; 5617 lastdir = 0; 5618 } else { 5619 firstbase = 0; 5620 } 5621 } 5622 5623 if (subr == DIF_SUBR_DIRNAME) { 5624 if (lastdir == -1) { 5625 /* 5626 * We know that we have a slash in the name -- 5627 * or lastdir would be set to 0, above. And 5628 * because lastdir is -1, we know that this 5629 * slash must be the first character. (That 5630 * is, the full string must be of the form 5631 * "/basename".) In this case, the last 5632 * character of the directory name is 0. 5633 */ 5634 lastdir = 0; 5635 } 5636 5637 start = 0; 5638 end = lastdir; 5639 } else { 5640 ASSERT(subr == DIF_SUBR_BASENAME); 5641 ASSERT(firstbase != -1 && lastbase != -1); 5642 start = firstbase; 5643 end = lastbase; 5644 } 5645 5646 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 5647 dest[j] = dtrace_load8(src + i); 5648 5649 dest[j] = '\0'; 5650 regs[rd] = (uintptr_t)dest; 5651 mstate->dtms_scratch_ptr += size; 5652 break; 5653 } 5654 5655 case DIF_SUBR_GETF: { 5656 uintptr_t fd = tupregs[0].dttk_value; 5657 struct filedesc *fdp; 5658 file_t *fp; 5659 5660 if (!dtrace_priv_proc(state)) { 5661 regs[rd] = 0; 5662 break; 5663 } 5664 fdp = curproc->p_fd; 5665 FILEDESC_SLOCK(fdp); 5666 fp = fget_locked(fdp, fd); 5667 mstate->dtms_getf = fp; 5668 regs[rd] = (uintptr_t)fp; 5669 FILEDESC_SUNLOCK(fdp); 5670 break; 5671 } 5672 5673 case DIF_SUBR_CLEANPATH: { 5674 char *dest = (char *)mstate->dtms_scratch_ptr, c; 5675 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5676 uintptr_t src = tupregs[0].dttk_value; 5677 size_t lim; 5678 int i = 0, j = 0; 5679 #ifdef illumos 5680 zone_t *z; 5681 #endif 5682 5683 if (!dtrace_strcanload(src, size, &lim, mstate, vstate)) { 5684 regs[rd] = 0; 5685 break; 5686 } 5687 5688 if (!DTRACE_INSCRATCH(mstate, size)) { 5689 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5690 regs[rd] = 0; 5691 break; 5692 } 5693 5694 /* 5695 * Move forward, loading each character. 5696 */ 5697 do { 5698 c = (i >= lim) ? '\0' : dtrace_load8(src + i++); 5699 next: 5700 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 5701 break; 5702 5703 if (c != '/') { 5704 dest[j++] = c; 5705 continue; 5706 } 5707 5708 c = (i >= lim) ? '\0' : dtrace_load8(src + i++); 5709 5710 if (c == '/') { 5711 /* 5712 * We have two slashes -- we can just advance 5713 * to the next character. 5714 */ 5715 goto next; 5716 } 5717 5718 if (c != '.') { 5719 /* 5720 * This is not "." and it's not ".." -- we can 5721 * just store the "/" and this character and 5722 * drive on. 5723 */ 5724 dest[j++] = '/'; 5725 dest[j++] = c; 5726 continue; 5727 } 5728 5729 c = (i >= lim) ? '\0' : dtrace_load8(src + i++); 5730 5731 if (c == '/') { 5732 /* 5733 * This is a "/./" component. We're not going 5734 * to store anything in the destination buffer; 5735 * we're just going to go to the next component. 5736 */ 5737 goto next; 5738 } 5739 5740 if (c != '.') { 5741 /* 5742 * This is not ".." -- we can just store the 5743 * "/." and this character and continue 5744 * processing. 5745 */ 5746 dest[j++] = '/'; 5747 dest[j++] = '.'; 5748 dest[j++] = c; 5749 continue; 5750 } 5751 5752 c = (i >= lim) ? '\0' : dtrace_load8(src + i++); 5753 5754 if (c != '/' && c != '\0') { 5755 /* 5756 * This is not ".." -- it's "..[mumble]". 5757 * We'll store the "/.." and this character 5758 * and continue processing. 5759 */ 5760 dest[j++] = '/'; 5761 dest[j++] = '.'; 5762 dest[j++] = '.'; 5763 dest[j++] = c; 5764 continue; 5765 } 5766 5767 /* 5768 * This is "/../" or "/..\0". We need to back up 5769 * our destination pointer until we find a "/". 5770 */ 5771 i--; 5772 while (j != 0 && dest[--j] != '/') 5773 continue; 5774 5775 if (c == '\0') 5776 dest[++j] = '/'; 5777 } while (c != '\0'); 5778 5779 dest[j] = '\0'; 5780 5781 #ifdef illumos 5782 if (mstate->dtms_getf != NULL && 5783 !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) && 5784 (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) { 5785 /* 5786 * If we've done a getf() as a part of this ECB and we 5787 * don't have kernel access (and we're not in the global 5788 * zone), check if the path we cleaned up begins with 5789 * the zone's root path, and trim it off if so. Note 5790 * that this is an output cleanliness issue, not a 5791 * security issue: knowing one's zone root path does 5792 * not enable privilege escalation. 5793 */ 5794 if (strstr(dest, z->zone_rootpath) == dest) 5795 dest += strlen(z->zone_rootpath) - 1; 5796 } 5797 #endif 5798 5799 regs[rd] = (uintptr_t)dest; 5800 mstate->dtms_scratch_ptr += size; 5801 break; 5802 } 5803 5804 case DIF_SUBR_INET_NTOA: 5805 case DIF_SUBR_INET_NTOA6: 5806 case DIF_SUBR_INET_NTOP: { 5807 size_t size; 5808 int af, argi, i; 5809 char *base, *end; 5810 5811 if (subr == DIF_SUBR_INET_NTOP) { 5812 af = (int)tupregs[0].dttk_value; 5813 argi = 1; 5814 } else { 5815 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 5816 argi = 0; 5817 } 5818 5819 if (af == AF_INET) { 5820 ipaddr_t ip4; 5821 uint8_t *ptr8, val; 5822 5823 if (!dtrace_canload(tupregs[argi].dttk_value, 5824 sizeof (ipaddr_t), mstate, vstate)) { 5825 regs[rd] = 0; 5826 break; 5827 } 5828 5829 /* 5830 * Safely load the IPv4 address. 5831 */ 5832 ip4 = dtrace_load32(tupregs[argi].dttk_value); 5833 5834 /* 5835 * Check an IPv4 string will fit in scratch. 5836 */ 5837 size = INET_ADDRSTRLEN; 5838 if (!DTRACE_INSCRATCH(mstate, size)) { 5839 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5840 regs[rd] = 0; 5841 break; 5842 } 5843 base = (char *)mstate->dtms_scratch_ptr; 5844 end = (char *)mstate->dtms_scratch_ptr + size - 1; 5845 5846 /* 5847 * Stringify as a dotted decimal quad. 5848 */ 5849 *end-- = '\0'; 5850 ptr8 = (uint8_t *)&ip4; 5851 for (i = 3; i >= 0; i--) { 5852 val = ptr8[i]; 5853 5854 if (val == 0) { 5855 *end-- = '0'; 5856 } else { 5857 for (; val; val /= 10) { 5858 *end-- = '0' + (val % 10); 5859 } 5860 } 5861 5862 if (i > 0) 5863 *end-- = '.'; 5864 } 5865 ASSERT(end + 1 >= base); 5866 5867 } else if (af == AF_INET6) { 5868 struct in6_addr ip6; 5869 int firstzero, tryzero, numzero, v6end; 5870 uint16_t val; 5871 const char digits[] = "0123456789abcdef"; 5872 5873 /* 5874 * Stringify using RFC 1884 convention 2 - 16 bit 5875 * hexadecimal values with a zero-run compression. 5876 * Lower case hexadecimal digits are used. 5877 * eg, fe80::214:4fff:fe0b:76c8. 5878 * The IPv4 embedded form is returned for inet_ntop, 5879 * just the IPv4 string is returned for inet_ntoa6. 5880 */ 5881 5882 if (!dtrace_canload(tupregs[argi].dttk_value, 5883 sizeof (struct in6_addr), mstate, vstate)) { 5884 regs[rd] = 0; 5885 break; 5886 } 5887 5888 /* 5889 * Safely load the IPv6 address. 5890 */ 5891 dtrace_bcopy( 5892 (void *)(uintptr_t)tupregs[argi].dttk_value, 5893 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 5894 5895 /* 5896 * Check an IPv6 string will fit in scratch. 5897 */ 5898 size = INET6_ADDRSTRLEN; 5899 if (!DTRACE_INSCRATCH(mstate, size)) { 5900 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5901 regs[rd] = 0; 5902 break; 5903 } 5904 base = (char *)mstate->dtms_scratch_ptr; 5905 end = (char *)mstate->dtms_scratch_ptr + size - 1; 5906 *end-- = '\0'; 5907 5908 /* 5909 * Find the longest run of 16 bit zero values 5910 * for the single allowed zero compression - "::". 5911 */ 5912 firstzero = -1; 5913 tryzero = -1; 5914 numzero = 1; 5915 for (i = 0; i < sizeof (struct in6_addr); i++) { 5916 #ifdef illumos 5917 if (ip6._S6_un._S6_u8[i] == 0 && 5918 #else 5919 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 5920 #endif 5921 tryzero == -1 && i % 2 == 0) { 5922 tryzero = i; 5923 continue; 5924 } 5925 5926 if (tryzero != -1 && 5927 #ifdef illumos 5928 (ip6._S6_un._S6_u8[i] != 0 || 5929 #else 5930 (ip6.__u6_addr.__u6_addr8[i] != 0 || 5931 #endif 5932 i == sizeof (struct in6_addr) - 1)) { 5933 5934 if (i - tryzero <= numzero) { 5935 tryzero = -1; 5936 continue; 5937 } 5938 5939 firstzero = tryzero; 5940 numzero = i - i % 2 - tryzero; 5941 tryzero = -1; 5942 5943 #ifdef illumos 5944 if (ip6._S6_un._S6_u8[i] == 0 && 5945 #else 5946 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 5947 #endif 5948 i == sizeof (struct in6_addr) - 1) 5949 numzero += 2; 5950 } 5951 } 5952 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 5953 5954 /* 5955 * Check for an IPv4 embedded address. 5956 */ 5957 v6end = sizeof (struct in6_addr) - 2; 5958 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 5959 IN6_IS_ADDR_V4COMPAT(&ip6)) { 5960 for (i = sizeof (struct in6_addr) - 1; 5961 i >= DTRACE_V4MAPPED_OFFSET; i--) { 5962 ASSERT(end >= base); 5963 5964 #ifdef illumos 5965 val = ip6._S6_un._S6_u8[i]; 5966 #else 5967 val = ip6.__u6_addr.__u6_addr8[i]; 5968 #endif 5969 5970 if (val == 0) { 5971 *end-- = '0'; 5972 } else { 5973 for (; val; val /= 10) { 5974 *end-- = '0' + val % 10; 5975 } 5976 } 5977 5978 if (i > DTRACE_V4MAPPED_OFFSET) 5979 *end-- = '.'; 5980 } 5981 5982 if (subr == DIF_SUBR_INET_NTOA6) 5983 goto inetout; 5984 5985 /* 5986 * Set v6end to skip the IPv4 address that 5987 * we have already stringified. 5988 */ 5989 v6end = 10; 5990 } 5991 5992 /* 5993 * Build the IPv6 string by working through the 5994 * address in reverse. 5995 */ 5996 for (i = v6end; i >= 0; i -= 2) { 5997 ASSERT(end >= base); 5998 5999 if (i == firstzero + numzero - 2) { 6000 *end-- = ':'; 6001 *end-- = ':'; 6002 i -= numzero - 2; 6003 continue; 6004 } 6005 6006 if (i < 14 && i != firstzero - 2) 6007 *end-- = ':'; 6008 6009 #ifdef illumos 6010 val = (ip6._S6_un._S6_u8[i] << 8) + 6011 ip6._S6_un._S6_u8[i + 1]; 6012 #else 6013 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 6014 ip6.__u6_addr.__u6_addr8[i + 1]; 6015 #endif 6016 6017 if (val == 0) { 6018 *end-- = '0'; 6019 } else { 6020 for (; val; val /= 16) { 6021 *end-- = digits[val % 16]; 6022 } 6023 } 6024 } 6025 ASSERT(end + 1 >= base); 6026 6027 } else { 6028 /* 6029 * The user didn't use AH_INET or AH_INET6. 6030 */ 6031 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 6032 regs[rd] = 0; 6033 break; 6034 } 6035 6036 inetout: regs[rd] = (uintptr_t)end + 1; 6037 mstate->dtms_scratch_ptr += size; 6038 break; 6039 } 6040 6041 case DIF_SUBR_MEMREF: { 6042 uintptr_t size = 2 * sizeof(uintptr_t); 6043 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 6044 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 6045 6046 /* address and length */ 6047 memref[0] = tupregs[0].dttk_value; 6048 memref[1] = tupregs[1].dttk_value; 6049 6050 regs[rd] = (uintptr_t) memref; 6051 mstate->dtms_scratch_ptr += scratch_size; 6052 break; 6053 } 6054 6055 #ifndef illumos 6056 case DIF_SUBR_MEMSTR: { 6057 char *str = (char *)mstate->dtms_scratch_ptr; 6058 uintptr_t mem = tupregs[0].dttk_value; 6059 char c = tupregs[1].dttk_value; 6060 size_t size = tupregs[2].dttk_value; 6061 uint8_t n; 6062 int i; 6063 6064 regs[rd] = 0; 6065 6066 if (size == 0) 6067 break; 6068 6069 if (!dtrace_canload(mem, size - 1, mstate, vstate)) 6070 break; 6071 6072 if (!DTRACE_INSCRATCH(mstate, size)) { 6073 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 6074 break; 6075 } 6076 6077 if (dtrace_memstr_max != 0 && size > dtrace_memstr_max) { 6078 *flags |= CPU_DTRACE_ILLOP; 6079 break; 6080 } 6081 6082 for (i = 0; i < size - 1; i++) { 6083 n = dtrace_load8(mem++); 6084 str[i] = (n == 0) ? c : n; 6085 } 6086 str[size - 1] = 0; 6087 6088 regs[rd] = (uintptr_t)str; 6089 mstate->dtms_scratch_ptr += size; 6090 break; 6091 } 6092 #endif 6093 } 6094 } 6095 6096 /* 6097 * Emulate the execution of DTrace IR instructions specified by the given 6098 * DIF object. This function is deliberately void of assertions as all of 6099 * the necessary checks are handled by a call to dtrace_difo_validate(). 6100 */ 6101 static uint64_t 6102 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 6103 dtrace_vstate_t *vstate, dtrace_state_t *state) 6104 { 6105 const dif_instr_t *text = difo->dtdo_buf; 6106 const uint_t textlen = difo->dtdo_len; 6107 const char *strtab = difo->dtdo_strtab; 6108 const uint64_t *inttab = difo->dtdo_inttab; 6109 6110 uint64_t rval = 0; 6111 dtrace_statvar_t *svar; 6112 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 6113 dtrace_difv_t *v; 6114 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 6115 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 6116 6117 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 6118 uint64_t regs[DIF_DIR_NREGS]; 6119 uint64_t *tmp; 6120 6121 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 6122 int64_t cc_r; 6123 uint_t pc = 0, id, opc = 0; 6124 uint8_t ttop = 0; 6125 dif_instr_t instr; 6126 uint_t r1, r2, rd; 6127 6128 /* 6129 * We stash the current DIF object into the machine state: we need it 6130 * for subsequent access checking. 6131 */ 6132 mstate->dtms_difo = difo; 6133 6134 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 6135 6136 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 6137 opc = pc; 6138 6139 instr = text[pc++]; 6140 r1 = DIF_INSTR_R1(instr); 6141 r2 = DIF_INSTR_R2(instr); 6142 rd = DIF_INSTR_RD(instr); 6143 6144 switch (DIF_INSTR_OP(instr)) { 6145 case DIF_OP_OR: 6146 regs[rd] = regs[r1] | regs[r2]; 6147 break; 6148 case DIF_OP_XOR: 6149 regs[rd] = regs[r1] ^ regs[r2]; 6150 break; 6151 case DIF_OP_AND: 6152 regs[rd] = regs[r1] & regs[r2]; 6153 break; 6154 case DIF_OP_SLL: 6155 regs[rd] = regs[r1] << regs[r2]; 6156 break; 6157 case DIF_OP_SRL: 6158 regs[rd] = regs[r1] >> regs[r2]; 6159 break; 6160 case DIF_OP_SUB: 6161 regs[rd] = regs[r1] - regs[r2]; 6162 break; 6163 case DIF_OP_ADD: 6164 regs[rd] = regs[r1] + regs[r2]; 6165 break; 6166 case DIF_OP_MUL: 6167 regs[rd] = regs[r1] * regs[r2]; 6168 break; 6169 case DIF_OP_SDIV: 6170 if (regs[r2] == 0) { 6171 regs[rd] = 0; 6172 *flags |= CPU_DTRACE_DIVZERO; 6173 } else { 6174 regs[rd] = (int64_t)regs[r1] / 6175 (int64_t)regs[r2]; 6176 } 6177 break; 6178 6179 case DIF_OP_UDIV: 6180 if (regs[r2] == 0) { 6181 regs[rd] = 0; 6182 *flags |= CPU_DTRACE_DIVZERO; 6183 } else { 6184 regs[rd] = regs[r1] / regs[r2]; 6185 } 6186 break; 6187 6188 case DIF_OP_SREM: 6189 if (regs[r2] == 0) { 6190 regs[rd] = 0; 6191 *flags |= CPU_DTRACE_DIVZERO; 6192 } else { 6193 regs[rd] = (int64_t)regs[r1] % 6194 (int64_t)regs[r2]; 6195 } 6196 break; 6197 6198 case DIF_OP_UREM: 6199 if (regs[r2] == 0) { 6200 regs[rd] = 0; 6201 *flags |= CPU_DTRACE_DIVZERO; 6202 } else { 6203 regs[rd] = regs[r1] % regs[r2]; 6204 } 6205 break; 6206 6207 case DIF_OP_NOT: 6208 regs[rd] = ~regs[r1]; 6209 break; 6210 case DIF_OP_MOV: 6211 regs[rd] = regs[r1]; 6212 break; 6213 case DIF_OP_CMP: 6214 cc_r = regs[r1] - regs[r2]; 6215 cc_n = cc_r < 0; 6216 cc_z = cc_r == 0; 6217 cc_v = 0; 6218 cc_c = regs[r1] < regs[r2]; 6219 break; 6220 case DIF_OP_TST: 6221 cc_n = cc_v = cc_c = 0; 6222 cc_z = regs[r1] == 0; 6223 break; 6224 case DIF_OP_BA: 6225 pc = DIF_INSTR_LABEL(instr); 6226 break; 6227 case DIF_OP_BE: 6228 if (cc_z) 6229 pc = DIF_INSTR_LABEL(instr); 6230 break; 6231 case DIF_OP_BNE: 6232 if (cc_z == 0) 6233 pc = DIF_INSTR_LABEL(instr); 6234 break; 6235 case DIF_OP_BG: 6236 if ((cc_z | (cc_n ^ cc_v)) == 0) 6237 pc = DIF_INSTR_LABEL(instr); 6238 break; 6239 case DIF_OP_BGU: 6240 if ((cc_c | cc_z) == 0) 6241 pc = DIF_INSTR_LABEL(instr); 6242 break; 6243 case DIF_OP_BGE: 6244 if ((cc_n ^ cc_v) == 0) 6245 pc = DIF_INSTR_LABEL(instr); 6246 break; 6247 case DIF_OP_BGEU: 6248 if (cc_c == 0) 6249 pc = DIF_INSTR_LABEL(instr); 6250 break; 6251 case DIF_OP_BL: 6252 if (cc_n ^ cc_v) 6253 pc = DIF_INSTR_LABEL(instr); 6254 break; 6255 case DIF_OP_BLU: 6256 if (cc_c) 6257 pc = DIF_INSTR_LABEL(instr); 6258 break; 6259 case DIF_OP_BLE: 6260 if (cc_z | (cc_n ^ cc_v)) 6261 pc = DIF_INSTR_LABEL(instr); 6262 break; 6263 case DIF_OP_BLEU: 6264 if (cc_c | cc_z) 6265 pc = DIF_INSTR_LABEL(instr); 6266 break; 6267 case DIF_OP_RLDSB: 6268 if (!dtrace_canload(regs[r1], 1, mstate, vstate)) 6269 break; 6270 /*FALLTHROUGH*/ 6271 case DIF_OP_LDSB: 6272 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 6273 break; 6274 case DIF_OP_RLDSH: 6275 if (!dtrace_canload(regs[r1], 2, mstate, vstate)) 6276 break; 6277 /*FALLTHROUGH*/ 6278 case DIF_OP_LDSH: 6279 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 6280 break; 6281 case DIF_OP_RLDSW: 6282 if (!dtrace_canload(regs[r1], 4, mstate, vstate)) 6283 break; 6284 /*FALLTHROUGH*/ 6285 case DIF_OP_LDSW: 6286 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 6287 break; 6288 case DIF_OP_RLDUB: 6289 if (!dtrace_canload(regs[r1], 1, mstate, vstate)) 6290 break; 6291 /*FALLTHROUGH*/ 6292 case DIF_OP_LDUB: 6293 regs[rd] = dtrace_load8(regs[r1]); 6294 break; 6295 case DIF_OP_RLDUH: 6296 if (!dtrace_canload(regs[r1], 2, mstate, vstate)) 6297 break; 6298 /*FALLTHROUGH*/ 6299 case DIF_OP_LDUH: 6300 regs[rd] = dtrace_load16(regs[r1]); 6301 break; 6302 case DIF_OP_RLDUW: 6303 if (!dtrace_canload(regs[r1], 4, mstate, vstate)) 6304 break; 6305 /*FALLTHROUGH*/ 6306 case DIF_OP_LDUW: 6307 regs[rd] = dtrace_load32(regs[r1]); 6308 break; 6309 case DIF_OP_RLDX: 6310 if (!dtrace_canload(regs[r1], 8, mstate, vstate)) 6311 break; 6312 /*FALLTHROUGH*/ 6313 case DIF_OP_LDX: 6314 regs[rd] = dtrace_load64(regs[r1]); 6315 break; 6316 case DIF_OP_ULDSB: 6317 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6318 regs[rd] = (int8_t) 6319 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 6320 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6321 break; 6322 case DIF_OP_ULDSH: 6323 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6324 regs[rd] = (int16_t) 6325 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 6326 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6327 break; 6328 case DIF_OP_ULDSW: 6329 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6330 regs[rd] = (int32_t) 6331 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 6332 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6333 break; 6334 case DIF_OP_ULDUB: 6335 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6336 regs[rd] = 6337 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 6338 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6339 break; 6340 case DIF_OP_ULDUH: 6341 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6342 regs[rd] = 6343 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 6344 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6345 break; 6346 case DIF_OP_ULDUW: 6347 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6348 regs[rd] = 6349 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 6350 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6351 break; 6352 case DIF_OP_ULDX: 6353 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6354 regs[rd] = 6355 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 6356 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6357 break; 6358 case DIF_OP_RET: 6359 rval = regs[rd]; 6360 pc = textlen; 6361 break; 6362 case DIF_OP_NOP: 6363 break; 6364 case DIF_OP_SETX: 6365 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 6366 break; 6367 case DIF_OP_SETS: 6368 regs[rd] = (uint64_t)(uintptr_t) 6369 (strtab + DIF_INSTR_STRING(instr)); 6370 break; 6371 case DIF_OP_SCMP: { 6372 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 6373 uintptr_t s1 = regs[r1]; 6374 uintptr_t s2 = regs[r2]; 6375 size_t lim1, lim2; 6376 6377 if (s1 != 0 && 6378 !dtrace_strcanload(s1, sz, &lim1, mstate, vstate)) 6379 break; 6380 if (s2 != 0 && 6381 !dtrace_strcanload(s2, sz, &lim2, mstate, vstate)) 6382 break; 6383 6384 cc_r = dtrace_strncmp((char *)s1, (char *)s2, 6385 MIN(lim1, lim2)); 6386 6387 cc_n = cc_r < 0; 6388 cc_z = cc_r == 0; 6389 cc_v = cc_c = 0; 6390 break; 6391 } 6392 case DIF_OP_LDGA: 6393 regs[rd] = dtrace_dif_variable(mstate, state, 6394 r1, regs[r2]); 6395 break; 6396 case DIF_OP_LDGS: 6397 id = DIF_INSTR_VAR(instr); 6398 6399 if (id >= DIF_VAR_OTHER_UBASE) { 6400 uintptr_t a; 6401 6402 id -= DIF_VAR_OTHER_UBASE; 6403 svar = vstate->dtvs_globals[id]; 6404 ASSERT(svar != NULL); 6405 v = &svar->dtsv_var; 6406 6407 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 6408 regs[rd] = svar->dtsv_data; 6409 break; 6410 } 6411 6412 a = (uintptr_t)svar->dtsv_data; 6413 6414 if (*(uint8_t *)a == UINT8_MAX) { 6415 /* 6416 * If the 0th byte is set to UINT8_MAX 6417 * then this is to be treated as a 6418 * reference to a NULL variable. 6419 */ 6420 regs[rd] = 0; 6421 } else { 6422 regs[rd] = a + sizeof (uint64_t); 6423 } 6424 6425 break; 6426 } 6427 6428 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 6429 break; 6430 6431 case DIF_OP_STGS: 6432 id = DIF_INSTR_VAR(instr); 6433 6434 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6435 id -= DIF_VAR_OTHER_UBASE; 6436 6437 VERIFY(id < vstate->dtvs_nglobals); 6438 svar = vstate->dtvs_globals[id]; 6439 ASSERT(svar != NULL); 6440 v = &svar->dtsv_var; 6441 6442 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6443 uintptr_t a = (uintptr_t)svar->dtsv_data; 6444 size_t lim; 6445 6446 ASSERT(a != 0); 6447 ASSERT(svar->dtsv_size != 0); 6448 6449 if (regs[rd] == 0) { 6450 *(uint8_t *)a = UINT8_MAX; 6451 break; 6452 } else { 6453 *(uint8_t *)a = 0; 6454 a += sizeof (uint64_t); 6455 } 6456 if (!dtrace_vcanload( 6457 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6458 &lim, mstate, vstate)) 6459 break; 6460 6461 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6462 (void *)a, &v->dtdv_type, lim); 6463 break; 6464 } 6465 6466 svar->dtsv_data = regs[rd]; 6467 break; 6468 6469 case DIF_OP_LDTA: 6470 /* 6471 * There are no DTrace built-in thread-local arrays at 6472 * present. This opcode is saved for future work. 6473 */ 6474 *flags |= CPU_DTRACE_ILLOP; 6475 regs[rd] = 0; 6476 break; 6477 6478 case DIF_OP_LDLS: 6479 id = DIF_INSTR_VAR(instr); 6480 6481 if (id < DIF_VAR_OTHER_UBASE) { 6482 /* 6483 * For now, this has no meaning. 6484 */ 6485 regs[rd] = 0; 6486 break; 6487 } 6488 6489 id -= DIF_VAR_OTHER_UBASE; 6490 6491 ASSERT(id < vstate->dtvs_nlocals); 6492 ASSERT(vstate->dtvs_locals != NULL); 6493 6494 svar = vstate->dtvs_locals[id]; 6495 ASSERT(svar != NULL); 6496 v = &svar->dtsv_var; 6497 6498 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6499 uintptr_t a = (uintptr_t)svar->dtsv_data; 6500 size_t sz = v->dtdv_type.dtdt_size; 6501 size_t lim; 6502 6503 sz += sizeof (uint64_t); 6504 ASSERT(svar->dtsv_size == NCPU * sz); 6505 a += curcpu * sz; 6506 6507 if (*(uint8_t *)a == UINT8_MAX) { 6508 /* 6509 * If the 0th byte is set to UINT8_MAX 6510 * then this is to be treated as a 6511 * reference to a NULL variable. 6512 */ 6513 regs[rd] = 0; 6514 } else { 6515 regs[rd] = a + sizeof (uint64_t); 6516 } 6517 6518 break; 6519 } 6520 6521 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 6522 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 6523 regs[rd] = tmp[curcpu]; 6524 break; 6525 6526 case DIF_OP_STLS: 6527 id = DIF_INSTR_VAR(instr); 6528 6529 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6530 id -= DIF_VAR_OTHER_UBASE; 6531 VERIFY(id < vstate->dtvs_nlocals); 6532 6533 ASSERT(vstate->dtvs_locals != NULL); 6534 svar = vstate->dtvs_locals[id]; 6535 ASSERT(svar != NULL); 6536 v = &svar->dtsv_var; 6537 6538 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6539 uintptr_t a = (uintptr_t)svar->dtsv_data; 6540 size_t sz = v->dtdv_type.dtdt_size; 6541 size_t lim; 6542 6543 sz += sizeof (uint64_t); 6544 ASSERT(svar->dtsv_size == NCPU * sz); 6545 a += curcpu * sz; 6546 6547 if (regs[rd] == 0) { 6548 *(uint8_t *)a = UINT8_MAX; 6549 break; 6550 } else { 6551 *(uint8_t *)a = 0; 6552 a += sizeof (uint64_t); 6553 } 6554 6555 if (!dtrace_vcanload( 6556 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6557 &lim, mstate, vstate)) 6558 break; 6559 6560 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6561 (void *)a, &v->dtdv_type, lim); 6562 break; 6563 } 6564 6565 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 6566 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 6567 tmp[curcpu] = regs[rd]; 6568 break; 6569 6570 case DIF_OP_LDTS: { 6571 dtrace_dynvar_t *dvar; 6572 dtrace_key_t *key; 6573 6574 id = DIF_INSTR_VAR(instr); 6575 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6576 id -= DIF_VAR_OTHER_UBASE; 6577 v = &vstate->dtvs_tlocals[id]; 6578 6579 key = &tupregs[DIF_DTR_NREGS]; 6580 key[0].dttk_value = (uint64_t)id; 6581 key[0].dttk_size = 0; 6582 DTRACE_TLS_THRKEY(key[1].dttk_value); 6583 key[1].dttk_size = 0; 6584 6585 dvar = dtrace_dynvar(dstate, 2, key, 6586 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 6587 mstate, vstate); 6588 6589 if (dvar == NULL) { 6590 regs[rd] = 0; 6591 break; 6592 } 6593 6594 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6595 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 6596 } else { 6597 regs[rd] = *((uint64_t *)dvar->dtdv_data); 6598 } 6599 6600 break; 6601 } 6602 6603 case DIF_OP_STTS: { 6604 dtrace_dynvar_t *dvar; 6605 dtrace_key_t *key; 6606 6607 id = DIF_INSTR_VAR(instr); 6608 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6609 id -= DIF_VAR_OTHER_UBASE; 6610 VERIFY(id < vstate->dtvs_ntlocals); 6611 6612 key = &tupregs[DIF_DTR_NREGS]; 6613 key[0].dttk_value = (uint64_t)id; 6614 key[0].dttk_size = 0; 6615 DTRACE_TLS_THRKEY(key[1].dttk_value); 6616 key[1].dttk_size = 0; 6617 v = &vstate->dtvs_tlocals[id]; 6618 6619 dvar = dtrace_dynvar(dstate, 2, key, 6620 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6621 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6622 regs[rd] ? DTRACE_DYNVAR_ALLOC : 6623 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 6624 6625 /* 6626 * Given that we're storing to thread-local data, 6627 * we need to flush our predicate cache. 6628 */ 6629 curthread->t_predcache = 0; 6630 6631 if (dvar == NULL) 6632 break; 6633 6634 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6635 size_t lim; 6636 6637 if (!dtrace_vcanload( 6638 (void *)(uintptr_t)regs[rd], 6639 &v->dtdv_type, &lim, mstate, vstate)) 6640 break; 6641 6642 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6643 dvar->dtdv_data, &v->dtdv_type, lim); 6644 } else { 6645 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 6646 } 6647 6648 break; 6649 } 6650 6651 case DIF_OP_SRA: 6652 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 6653 break; 6654 6655 case DIF_OP_CALL: 6656 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 6657 regs, tupregs, ttop, mstate, state); 6658 break; 6659 6660 case DIF_OP_PUSHTR: 6661 if (ttop == DIF_DTR_NREGS) { 6662 *flags |= CPU_DTRACE_TUPOFLOW; 6663 break; 6664 } 6665 6666 if (r1 == DIF_TYPE_STRING) { 6667 /* 6668 * If this is a string type and the size is 0, 6669 * we'll use the system-wide default string 6670 * size. Note that we are _not_ looking at 6671 * the value of the DTRACEOPT_STRSIZE option; 6672 * had this been set, we would expect to have 6673 * a non-zero size value in the "pushtr". 6674 */ 6675 tupregs[ttop].dttk_size = 6676 dtrace_strlen((char *)(uintptr_t)regs[rd], 6677 regs[r2] ? regs[r2] : 6678 dtrace_strsize_default) + 1; 6679 } else { 6680 if (regs[r2] > LONG_MAX) { 6681 *flags |= CPU_DTRACE_ILLOP; 6682 break; 6683 } 6684 6685 tupregs[ttop].dttk_size = regs[r2]; 6686 } 6687 6688 tupregs[ttop++].dttk_value = regs[rd]; 6689 break; 6690 6691 case DIF_OP_PUSHTV: 6692 if (ttop == DIF_DTR_NREGS) { 6693 *flags |= CPU_DTRACE_TUPOFLOW; 6694 break; 6695 } 6696 6697 tupregs[ttop].dttk_value = regs[rd]; 6698 tupregs[ttop++].dttk_size = 0; 6699 break; 6700 6701 case DIF_OP_POPTS: 6702 if (ttop != 0) 6703 ttop--; 6704 break; 6705 6706 case DIF_OP_FLUSHTS: 6707 ttop = 0; 6708 break; 6709 6710 case DIF_OP_LDGAA: 6711 case DIF_OP_LDTAA: { 6712 dtrace_dynvar_t *dvar; 6713 dtrace_key_t *key = tupregs; 6714 uint_t nkeys = ttop; 6715 6716 id = DIF_INSTR_VAR(instr); 6717 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6718 id -= DIF_VAR_OTHER_UBASE; 6719 6720 key[nkeys].dttk_value = (uint64_t)id; 6721 key[nkeys++].dttk_size = 0; 6722 6723 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 6724 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 6725 key[nkeys++].dttk_size = 0; 6726 VERIFY(id < vstate->dtvs_ntlocals); 6727 v = &vstate->dtvs_tlocals[id]; 6728 } else { 6729 VERIFY(id < vstate->dtvs_nglobals); 6730 v = &vstate->dtvs_globals[id]->dtsv_var; 6731 } 6732 6733 dvar = dtrace_dynvar(dstate, nkeys, key, 6734 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6735 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6736 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 6737 6738 if (dvar == NULL) { 6739 regs[rd] = 0; 6740 break; 6741 } 6742 6743 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6744 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 6745 } else { 6746 regs[rd] = *((uint64_t *)dvar->dtdv_data); 6747 } 6748 6749 break; 6750 } 6751 6752 case DIF_OP_STGAA: 6753 case DIF_OP_STTAA: { 6754 dtrace_dynvar_t *dvar; 6755 dtrace_key_t *key = tupregs; 6756 uint_t nkeys = ttop; 6757 6758 id = DIF_INSTR_VAR(instr); 6759 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6760 id -= DIF_VAR_OTHER_UBASE; 6761 6762 key[nkeys].dttk_value = (uint64_t)id; 6763 key[nkeys++].dttk_size = 0; 6764 6765 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 6766 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 6767 key[nkeys++].dttk_size = 0; 6768 VERIFY(id < vstate->dtvs_ntlocals); 6769 v = &vstate->dtvs_tlocals[id]; 6770 } else { 6771 VERIFY(id < vstate->dtvs_nglobals); 6772 v = &vstate->dtvs_globals[id]->dtsv_var; 6773 } 6774 6775 dvar = dtrace_dynvar(dstate, nkeys, key, 6776 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6777 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6778 regs[rd] ? DTRACE_DYNVAR_ALLOC : 6779 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 6780 6781 if (dvar == NULL) 6782 break; 6783 6784 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6785 size_t lim; 6786 6787 if (!dtrace_vcanload( 6788 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6789 &lim, mstate, vstate)) 6790 break; 6791 6792 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6793 dvar->dtdv_data, &v->dtdv_type, lim); 6794 } else { 6795 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 6796 } 6797 6798 break; 6799 } 6800 6801 case DIF_OP_ALLOCS: { 6802 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 6803 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 6804 6805 /* 6806 * Rounding up the user allocation size could have 6807 * overflowed large, bogus allocations (like -1ULL) to 6808 * 0. 6809 */ 6810 if (size < regs[r1] || 6811 !DTRACE_INSCRATCH(mstate, size)) { 6812 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 6813 regs[rd] = 0; 6814 break; 6815 } 6816 6817 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 6818 mstate->dtms_scratch_ptr += size; 6819 regs[rd] = ptr; 6820 break; 6821 } 6822 6823 case DIF_OP_COPYS: 6824 if (!dtrace_canstore(regs[rd], regs[r2], 6825 mstate, vstate)) { 6826 *flags |= CPU_DTRACE_BADADDR; 6827 *illval = regs[rd]; 6828 break; 6829 } 6830 6831 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 6832 break; 6833 6834 dtrace_bcopy((void *)(uintptr_t)regs[r1], 6835 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 6836 break; 6837 6838 case DIF_OP_STB: 6839 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 6840 *flags |= CPU_DTRACE_BADADDR; 6841 *illval = regs[rd]; 6842 break; 6843 } 6844 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 6845 break; 6846 6847 case DIF_OP_STH: 6848 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 6849 *flags |= CPU_DTRACE_BADADDR; 6850 *illval = regs[rd]; 6851 break; 6852 } 6853 if (regs[rd] & 1) { 6854 *flags |= CPU_DTRACE_BADALIGN; 6855 *illval = regs[rd]; 6856 break; 6857 } 6858 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 6859 break; 6860 6861 case DIF_OP_STW: 6862 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 6863 *flags |= CPU_DTRACE_BADADDR; 6864 *illval = regs[rd]; 6865 break; 6866 } 6867 if (regs[rd] & 3) { 6868 *flags |= CPU_DTRACE_BADALIGN; 6869 *illval = regs[rd]; 6870 break; 6871 } 6872 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 6873 break; 6874 6875 case DIF_OP_STX: 6876 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 6877 *flags |= CPU_DTRACE_BADADDR; 6878 *illval = regs[rd]; 6879 break; 6880 } 6881 if (regs[rd] & 7) { 6882 *flags |= CPU_DTRACE_BADALIGN; 6883 *illval = regs[rd]; 6884 break; 6885 } 6886 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 6887 break; 6888 } 6889 } 6890 6891 if (!(*flags & CPU_DTRACE_FAULT)) 6892 return (rval); 6893 6894 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 6895 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 6896 6897 return (0); 6898 } 6899 6900 static void 6901 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 6902 { 6903 dtrace_probe_t *probe = ecb->dte_probe; 6904 dtrace_provider_t *prov = probe->dtpr_provider; 6905 char c[DTRACE_FULLNAMELEN + 80], *str; 6906 char *msg = "dtrace: breakpoint action at probe "; 6907 char *ecbmsg = " (ecb "; 6908 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 6909 uintptr_t val = (uintptr_t)ecb; 6910 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 6911 6912 if (dtrace_destructive_disallow) 6913 return; 6914 6915 /* 6916 * It's impossible to be taking action on the NULL probe. 6917 */ 6918 ASSERT(probe != NULL); 6919 6920 /* 6921 * This is a poor man's (destitute man's?) sprintf(): we want to 6922 * print the provider name, module name, function name and name of 6923 * the probe, along with the hex address of the ECB with the breakpoint 6924 * action -- all of which we must place in the character buffer by 6925 * hand. 6926 */ 6927 while (*msg != '\0') 6928 c[i++] = *msg++; 6929 6930 for (str = prov->dtpv_name; *str != '\0'; str++) 6931 c[i++] = *str; 6932 c[i++] = ':'; 6933 6934 for (str = probe->dtpr_mod; *str != '\0'; str++) 6935 c[i++] = *str; 6936 c[i++] = ':'; 6937 6938 for (str = probe->dtpr_func; *str != '\0'; str++) 6939 c[i++] = *str; 6940 c[i++] = ':'; 6941 6942 for (str = probe->dtpr_name; *str != '\0'; str++) 6943 c[i++] = *str; 6944 6945 while (*ecbmsg != '\0') 6946 c[i++] = *ecbmsg++; 6947 6948 while (shift >= 0) { 6949 mask = (uintptr_t)0xf << shift; 6950 6951 if (val >= ((uintptr_t)1 << shift)) 6952 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 6953 shift -= 4; 6954 } 6955 6956 c[i++] = ')'; 6957 c[i] = '\0'; 6958 6959 #ifdef illumos 6960 debug_enter(c); 6961 #else 6962 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 6963 #endif 6964 } 6965 6966 static void 6967 dtrace_action_panic(dtrace_ecb_t *ecb) 6968 { 6969 dtrace_probe_t *probe = ecb->dte_probe; 6970 6971 /* 6972 * It's impossible to be taking action on the NULL probe. 6973 */ 6974 ASSERT(probe != NULL); 6975 6976 if (dtrace_destructive_disallow) 6977 return; 6978 6979 if (dtrace_panicked != NULL) 6980 return; 6981 6982 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 6983 return; 6984 6985 /* 6986 * We won the right to panic. (We want to be sure that only one 6987 * thread calls panic() from dtrace_probe(), and that panic() is 6988 * called exactly once.) 6989 */ 6990 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 6991 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 6992 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 6993 } 6994 6995 static void 6996 dtrace_action_raise(uint64_t sig) 6997 { 6998 if (dtrace_destructive_disallow) 6999 return; 7000 7001 if (sig >= NSIG) { 7002 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 7003 return; 7004 } 7005 7006 #ifdef illumos 7007 /* 7008 * raise() has a queue depth of 1 -- we ignore all subsequent 7009 * invocations of the raise() action. 7010 */ 7011 if (curthread->t_dtrace_sig == 0) 7012 curthread->t_dtrace_sig = (uint8_t)sig; 7013 7014 curthread->t_sig_check = 1; 7015 aston(curthread); 7016 #else 7017 struct proc *p = curproc; 7018 PROC_LOCK(p); 7019 kern_psignal(p, sig); 7020 PROC_UNLOCK(p); 7021 #endif 7022 } 7023 7024 static void 7025 dtrace_action_stop(void) 7026 { 7027 if (dtrace_destructive_disallow) 7028 return; 7029 7030 #ifdef illumos 7031 if (!curthread->t_dtrace_stop) { 7032 curthread->t_dtrace_stop = 1; 7033 curthread->t_sig_check = 1; 7034 aston(curthread); 7035 } 7036 #else 7037 struct proc *p = curproc; 7038 PROC_LOCK(p); 7039 kern_psignal(p, SIGSTOP); 7040 PROC_UNLOCK(p); 7041 #endif 7042 } 7043 7044 static void 7045 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 7046 { 7047 hrtime_t now; 7048 volatile uint16_t *flags; 7049 #ifdef illumos 7050 cpu_t *cpu = CPU; 7051 #else 7052 cpu_t *cpu = &solaris_cpu[curcpu]; 7053 #endif 7054 7055 if (dtrace_destructive_disallow) 7056 return; 7057 7058 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 7059 7060 now = dtrace_gethrtime(); 7061 7062 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 7063 /* 7064 * We need to advance the mark to the current time. 7065 */ 7066 cpu->cpu_dtrace_chillmark = now; 7067 cpu->cpu_dtrace_chilled = 0; 7068 } 7069 7070 /* 7071 * Now check to see if the requested chill time would take us over 7072 * the maximum amount of time allowed in the chill interval. (Or 7073 * worse, if the calculation itself induces overflow.) 7074 */ 7075 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 7076 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 7077 *flags |= CPU_DTRACE_ILLOP; 7078 return; 7079 } 7080 7081 while (dtrace_gethrtime() - now < val) 7082 continue; 7083 7084 /* 7085 * Normally, we assure that the value of the variable "timestamp" does 7086 * not change within an ECB. The presence of chill() represents an 7087 * exception to this rule, however. 7088 */ 7089 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 7090 cpu->cpu_dtrace_chilled += val; 7091 } 7092 7093 static void 7094 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 7095 uint64_t *buf, uint64_t arg) 7096 { 7097 int nframes = DTRACE_USTACK_NFRAMES(arg); 7098 int strsize = DTRACE_USTACK_STRSIZE(arg); 7099 uint64_t *pcs = &buf[1], *fps; 7100 char *str = (char *)&pcs[nframes]; 7101 int size, offs = 0, i, j; 7102 size_t rem; 7103 uintptr_t old = mstate->dtms_scratch_ptr, saved; 7104 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 7105 char *sym; 7106 7107 /* 7108 * Should be taking a faster path if string space has not been 7109 * allocated. 7110 */ 7111 ASSERT(strsize != 0); 7112 7113 /* 7114 * We will first allocate some temporary space for the frame pointers. 7115 */ 7116 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 7117 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 7118 (nframes * sizeof (uint64_t)); 7119 7120 if (!DTRACE_INSCRATCH(mstate, size)) { 7121 /* 7122 * Not enough room for our frame pointers -- need to indicate 7123 * that we ran out of scratch space. 7124 */ 7125 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 7126 return; 7127 } 7128 7129 mstate->dtms_scratch_ptr += size; 7130 saved = mstate->dtms_scratch_ptr; 7131 7132 /* 7133 * Now get a stack with both program counters and frame pointers. 7134 */ 7135 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7136 dtrace_getufpstack(buf, fps, nframes + 1); 7137 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7138 7139 /* 7140 * If that faulted, we're cooked. 7141 */ 7142 if (*flags & CPU_DTRACE_FAULT) 7143 goto out; 7144 7145 /* 7146 * Now we want to walk up the stack, calling the USTACK helper. For 7147 * each iteration, we restore the scratch pointer. 7148 */ 7149 for (i = 0; i < nframes; i++) { 7150 mstate->dtms_scratch_ptr = saved; 7151 7152 if (offs >= strsize) 7153 break; 7154 7155 sym = (char *)(uintptr_t)dtrace_helper( 7156 DTRACE_HELPER_ACTION_USTACK, 7157 mstate, state, pcs[i], fps[i]); 7158 7159 /* 7160 * If we faulted while running the helper, we're going to 7161 * clear the fault and null out the corresponding string. 7162 */ 7163 if (*flags & CPU_DTRACE_FAULT) { 7164 *flags &= ~CPU_DTRACE_FAULT; 7165 str[offs++] = '\0'; 7166 continue; 7167 } 7168 7169 if (sym == NULL) { 7170 str[offs++] = '\0'; 7171 continue; 7172 } 7173 7174 if (!dtrace_strcanload((uintptr_t)sym, strsize, &rem, mstate, 7175 &(state->dts_vstate))) { 7176 str[offs++] = '\0'; 7177 continue; 7178 } 7179 7180 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7181 7182 /* 7183 * Now copy in the string that the helper returned to us. 7184 */ 7185 for (j = 0; offs + j < strsize && j < rem; j++) { 7186 if ((str[offs + j] = sym[j]) == '\0') 7187 break; 7188 } 7189 7190 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7191 7192 offs += j + 1; 7193 } 7194 7195 if (offs >= strsize) { 7196 /* 7197 * If we didn't have room for all of the strings, we don't 7198 * abort processing -- this needn't be a fatal error -- but we 7199 * still want to increment a counter (dts_stkstroverflows) to 7200 * allow this condition to be warned about. (If this is from 7201 * a jstack() action, it is easily tuned via jstackstrsize.) 7202 */ 7203 dtrace_error(&state->dts_stkstroverflows); 7204 } 7205 7206 while (offs < strsize) 7207 str[offs++] = '\0'; 7208 7209 out: 7210 mstate->dtms_scratch_ptr = old; 7211 } 7212 7213 static void 7214 dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size, 7215 size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind) 7216 { 7217 volatile uint16_t *flags; 7218 uint64_t val = *valp; 7219 size_t valoffs = *valoffsp; 7220 7221 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 7222 ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF); 7223 7224 /* 7225 * If this is a string, we're going to only load until we find the zero 7226 * byte -- after which we'll store zero bytes. 7227 */ 7228 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 7229 char c = '\0' + 1; 7230 size_t s; 7231 7232 for (s = 0; s < size; s++) { 7233 if (c != '\0' && dtkind == DIF_TF_BYREF) { 7234 c = dtrace_load8(val++); 7235 } else if (c != '\0' && dtkind == DIF_TF_BYUREF) { 7236 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7237 c = dtrace_fuword8((void *)(uintptr_t)val++); 7238 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7239 if (*flags & CPU_DTRACE_FAULT) 7240 break; 7241 } 7242 7243 DTRACE_STORE(uint8_t, tomax, valoffs++, c); 7244 7245 if (c == '\0' && intuple) 7246 break; 7247 } 7248 } else { 7249 uint8_t c; 7250 while (valoffs < end) { 7251 if (dtkind == DIF_TF_BYREF) { 7252 c = dtrace_load8(val++); 7253 } else if (dtkind == DIF_TF_BYUREF) { 7254 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7255 c = dtrace_fuword8((void *)(uintptr_t)val++); 7256 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7257 if (*flags & CPU_DTRACE_FAULT) 7258 break; 7259 } 7260 7261 DTRACE_STORE(uint8_t, tomax, 7262 valoffs++, c); 7263 } 7264 } 7265 7266 *valp = val; 7267 *valoffsp = valoffs; 7268 } 7269 7270 /* 7271 * Disables interrupts and sets the per-thread inprobe flag. When DEBUG is 7272 * defined, we also assert that we are not recursing unless the probe ID is an 7273 * error probe. 7274 */ 7275 static dtrace_icookie_t 7276 dtrace_probe_enter(dtrace_id_t id) 7277 { 7278 dtrace_icookie_t cookie; 7279 7280 cookie = dtrace_interrupt_disable(); 7281 7282 /* 7283 * Unless this is an ERROR probe, we are not allowed to recurse in 7284 * dtrace_probe(). Recursing into DTrace probe usually means that a 7285 * function is instrumented that should not have been instrumented or 7286 * that the ordering guarantee of the records will be violated, 7287 * resulting in unexpected output. If there is an exception to this 7288 * assertion, a new case should be added. 7289 */ 7290 ASSERT(curthread->t_dtrace_inprobe == 0 || 7291 id == dtrace_probeid_error); 7292 curthread->t_dtrace_inprobe = 1; 7293 7294 return (cookie); 7295 } 7296 7297 /* 7298 * Clears the per-thread inprobe flag and enables interrupts. 7299 */ 7300 static void 7301 dtrace_probe_exit(dtrace_icookie_t cookie) 7302 { 7303 7304 curthread->t_dtrace_inprobe = 0; 7305 dtrace_interrupt_enable(cookie); 7306 } 7307 7308 /* 7309 * If you're looking for the epicenter of DTrace, you just found it. This 7310 * is the function called by the provider to fire a probe -- from which all 7311 * subsequent probe-context DTrace activity emanates. 7312 */ 7313 void 7314 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 7315 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 7316 { 7317 processorid_t cpuid; 7318 dtrace_icookie_t cookie; 7319 dtrace_probe_t *probe; 7320 dtrace_mstate_t mstate; 7321 dtrace_ecb_t *ecb; 7322 dtrace_action_t *act; 7323 intptr_t offs; 7324 size_t size; 7325 int vtime, onintr; 7326 volatile uint16_t *flags; 7327 hrtime_t now; 7328 7329 if (panicstr != NULL) 7330 return; 7331 7332 #ifdef illumos 7333 /* 7334 * Kick out immediately if this CPU is still being born (in which case 7335 * curthread will be set to -1) or the current thread can't allow 7336 * probes in its current context. 7337 */ 7338 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 7339 return; 7340 #endif 7341 7342 cookie = dtrace_probe_enter(id); 7343 probe = dtrace_probes[id - 1]; 7344 cpuid = curcpu; 7345 onintr = CPU_ON_INTR(CPU); 7346 7347 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 7348 probe->dtpr_predcache == curthread->t_predcache) { 7349 /* 7350 * We have hit in the predicate cache; we know that 7351 * this predicate would evaluate to be false. 7352 */ 7353 dtrace_probe_exit(cookie); 7354 return; 7355 } 7356 7357 #ifdef illumos 7358 if (panic_quiesce) { 7359 #else 7360 if (panicstr != NULL) { 7361 #endif 7362 /* 7363 * We don't trace anything if we're panicking. 7364 */ 7365 dtrace_probe_exit(cookie); 7366 return; 7367 } 7368 7369 now = mstate.dtms_timestamp = dtrace_gethrtime(); 7370 mstate.dtms_present = DTRACE_MSTATE_TIMESTAMP; 7371 vtime = dtrace_vtime_references != 0; 7372 7373 if (vtime && curthread->t_dtrace_start) 7374 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 7375 7376 mstate.dtms_difo = NULL; 7377 mstate.dtms_probe = probe; 7378 mstate.dtms_strtok = 0; 7379 mstate.dtms_arg[0] = arg0; 7380 mstate.dtms_arg[1] = arg1; 7381 mstate.dtms_arg[2] = arg2; 7382 mstate.dtms_arg[3] = arg3; 7383 mstate.dtms_arg[4] = arg4; 7384 7385 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 7386 7387 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 7388 dtrace_predicate_t *pred = ecb->dte_predicate; 7389 dtrace_state_t *state = ecb->dte_state; 7390 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 7391 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 7392 dtrace_vstate_t *vstate = &state->dts_vstate; 7393 dtrace_provider_t *prov = probe->dtpr_provider; 7394 uint64_t tracememsize = 0; 7395 int committed = 0; 7396 caddr_t tomax; 7397 7398 /* 7399 * A little subtlety with the following (seemingly innocuous) 7400 * declaration of the automatic 'val': by looking at the 7401 * code, you might think that it could be declared in the 7402 * action processing loop, below. (That is, it's only used in 7403 * the action processing loop.) However, it must be declared 7404 * out of that scope because in the case of DIF expression 7405 * arguments to aggregating actions, one iteration of the 7406 * action loop will use the last iteration's value. 7407 */ 7408 uint64_t val = 0; 7409 7410 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 7411 mstate.dtms_getf = NULL; 7412 7413 *flags &= ~CPU_DTRACE_ERROR; 7414 7415 if (prov == dtrace_provider) { 7416 /* 7417 * If dtrace itself is the provider of this probe, 7418 * we're only going to continue processing the ECB if 7419 * arg0 (the dtrace_state_t) is equal to the ECB's 7420 * creating state. (This prevents disjoint consumers 7421 * from seeing one another's metaprobes.) 7422 */ 7423 if (arg0 != (uint64_t)(uintptr_t)state) 7424 continue; 7425 } 7426 7427 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 7428 /* 7429 * We're not currently active. If our provider isn't 7430 * the dtrace pseudo provider, we're not interested. 7431 */ 7432 if (prov != dtrace_provider) 7433 continue; 7434 7435 /* 7436 * Now we must further check if we are in the BEGIN 7437 * probe. If we are, we will only continue processing 7438 * if we're still in WARMUP -- if one BEGIN enabling 7439 * has invoked the exit() action, we don't want to 7440 * evaluate subsequent BEGIN enablings. 7441 */ 7442 if (probe->dtpr_id == dtrace_probeid_begin && 7443 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 7444 ASSERT(state->dts_activity == 7445 DTRACE_ACTIVITY_DRAINING); 7446 continue; 7447 } 7448 } 7449 7450 if (ecb->dte_cond) { 7451 /* 7452 * If the dte_cond bits indicate that this 7453 * consumer is only allowed to see user-mode firings 7454 * of this probe, call the provider's dtps_usermode() 7455 * entry point to check that the probe was fired 7456 * while in a user context. Skip this ECB if that's 7457 * not the case. 7458 */ 7459 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 7460 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 7461 probe->dtpr_id, probe->dtpr_arg) == 0) 7462 continue; 7463 7464 #ifdef illumos 7465 /* 7466 * This is more subtle than it looks. We have to be 7467 * absolutely certain that CRED() isn't going to 7468 * change out from under us so it's only legit to 7469 * examine that structure if we're in constrained 7470 * situations. Currently, the only times we'll this 7471 * check is if a non-super-user has enabled the 7472 * profile or syscall providers -- providers that 7473 * allow visibility of all processes. For the 7474 * profile case, the check above will ensure that 7475 * we're examining a user context. 7476 */ 7477 if (ecb->dte_cond & DTRACE_COND_OWNER) { 7478 cred_t *cr; 7479 cred_t *s_cr = 7480 ecb->dte_state->dts_cred.dcr_cred; 7481 proc_t *proc; 7482 7483 ASSERT(s_cr != NULL); 7484 7485 if ((cr = CRED()) == NULL || 7486 s_cr->cr_uid != cr->cr_uid || 7487 s_cr->cr_uid != cr->cr_ruid || 7488 s_cr->cr_uid != cr->cr_suid || 7489 s_cr->cr_gid != cr->cr_gid || 7490 s_cr->cr_gid != cr->cr_rgid || 7491 s_cr->cr_gid != cr->cr_sgid || 7492 (proc = ttoproc(curthread)) == NULL || 7493 (proc->p_flag & SNOCD)) 7494 continue; 7495 } 7496 7497 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 7498 cred_t *cr; 7499 cred_t *s_cr = 7500 ecb->dte_state->dts_cred.dcr_cred; 7501 7502 ASSERT(s_cr != NULL); 7503 7504 if ((cr = CRED()) == NULL || 7505 s_cr->cr_zone->zone_id != 7506 cr->cr_zone->zone_id) 7507 continue; 7508 } 7509 #endif 7510 } 7511 7512 if (now - state->dts_alive > dtrace_deadman_timeout) { 7513 /* 7514 * We seem to be dead. Unless we (a) have kernel 7515 * destructive permissions (b) have explicitly enabled 7516 * destructive actions and (c) destructive actions have 7517 * not been disabled, we're going to transition into 7518 * the KILLED state, from which no further processing 7519 * on this state will be performed. 7520 */ 7521 if (!dtrace_priv_kernel_destructive(state) || 7522 !state->dts_cred.dcr_destructive || 7523 dtrace_destructive_disallow) { 7524 void *activity = &state->dts_activity; 7525 dtrace_activity_t curstate; 7526 7527 do { 7528 curstate = state->dts_activity; 7529 } while (dtrace_cas32(activity, curstate, 7530 DTRACE_ACTIVITY_KILLED) != curstate); 7531 7532 continue; 7533 } 7534 } 7535 7536 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 7537 ecb->dte_alignment, state, &mstate)) < 0) 7538 continue; 7539 7540 tomax = buf->dtb_tomax; 7541 ASSERT(tomax != NULL); 7542 7543 if (ecb->dte_size != 0) { 7544 dtrace_rechdr_t dtrh; 7545 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 7546 mstate.dtms_timestamp = dtrace_gethrtime(); 7547 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP; 7548 } 7549 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t)); 7550 dtrh.dtrh_epid = ecb->dte_epid; 7551 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh, 7552 mstate.dtms_timestamp); 7553 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh; 7554 } 7555 7556 mstate.dtms_epid = ecb->dte_epid; 7557 mstate.dtms_present |= DTRACE_MSTATE_EPID; 7558 7559 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 7560 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 7561 else 7562 mstate.dtms_access = 0; 7563 7564 if (pred != NULL) { 7565 dtrace_difo_t *dp = pred->dtp_difo; 7566 uint64_t rval; 7567 7568 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 7569 7570 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 7571 dtrace_cacheid_t cid = probe->dtpr_predcache; 7572 7573 if (cid != DTRACE_CACHEIDNONE && !onintr) { 7574 /* 7575 * Update the predicate cache... 7576 */ 7577 ASSERT(cid == pred->dtp_cacheid); 7578 curthread->t_predcache = cid; 7579 } 7580 7581 continue; 7582 } 7583 } 7584 7585 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 7586 act != NULL; act = act->dta_next) { 7587 size_t valoffs; 7588 dtrace_difo_t *dp; 7589 dtrace_recdesc_t *rec = &act->dta_rec; 7590 7591 size = rec->dtrd_size; 7592 valoffs = offs + rec->dtrd_offset; 7593 7594 if (DTRACEACT_ISAGG(act->dta_kind)) { 7595 uint64_t v = 0xbad; 7596 dtrace_aggregation_t *agg; 7597 7598 agg = (dtrace_aggregation_t *)act; 7599 7600 if ((dp = act->dta_difo) != NULL) 7601 v = dtrace_dif_emulate(dp, 7602 &mstate, vstate, state); 7603 7604 if (*flags & CPU_DTRACE_ERROR) 7605 continue; 7606 7607 /* 7608 * Note that we always pass the expression 7609 * value from the previous iteration of the 7610 * action loop. This value will only be used 7611 * if there is an expression argument to the 7612 * aggregating action, denoted by the 7613 * dtag_hasarg field. 7614 */ 7615 dtrace_aggregate(agg, buf, 7616 offs, aggbuf, v, val); 7617 continue; 7618 } 7619 7620 switch (act->dta_kind) { 7621 case DTRACEACT_STOP: 7622 if (dtrace_priv_proc_destructive(state)) 7623 dtrace_action_stop(); 7624 continue; 7625 7626 case DTRACEACT_BREAKPOINT: 7627 if (dtrace_priv_kernel_destructive(state)) 7628 dtrace_action_breakpoint(ecb); 7629 continue; 7630 7631 case DTRACEACT_PANIC: 7632 if (dtrace_priv_kernel_destructive(state)) 7633 dtrace_action_panic(ecb); 7634 continue; 7635 7636 case DTRACEACT_STACK: 7637 if (!dtrace_priv_kernel(state)) 7638 continue; 7639 7640 dtrace_getpcstack((pc_t *)(tomax + valoffs), 7641 size / sizeof (pc_t), probe->dtpr_aframes, 7642 DTRACE_ANCHORED(probe) ? NULL : 7643 (uint32_t *)arg0); 7644 continue; 7645 7646 case DTRACEACT_JSTACK: 7647 case DTRACEACT_USTACK: 7648 if (!dtrace_priv_proc(state)) 7649 continue; 7650 7651 /* 7652 * See comment in DIF_VAR_PID. 7653 */ 7654 if (DTRACE_ANCHORED(mstate.dtms_probe) && 7655 CPU_ON_INTR(CPU)) { 7656 int depth = DTRACE_USTACK_NFRAMES( 7657 rec->dtrd_arg) + 1; 7658 7659 dtrace_bzero((void *)(tomax + valoffs), 7660 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 7661 + depth * sizeof (uint64_t)); 7662 7663 continue; 7664 } 7665 7666 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 7667 curproc->p_dtrace_helpers != NULL) { 7668 /* 7669 * This is the slow path -- we have 7670 * allocated string space, and we're 7671 * getting the stack of a process that 7672 * has helpers. Call into a separate 7673 * routine to perform this processing. 7674 */ 7675 dtrace_action_ustack(&mstate, state, 7676 (uint64_t *)(tomax + valoffs), 7677 rec->dtrd_arg); 7678 continue; 7679 } 7680 7681 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7682 dtrace_getupcstack((uint64_t *) 7683 (tomax + valoffs), 7684 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 7685 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7686 continue; 7687 7688 default: 7689 break; 7690 } 7691 7692 dp = act->dta_difo; 7693 ASSERT(dp != NULL); 7694 7695 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 7696 7697 if (*flags & CPU_DTRACE_ERROR) 7698 continue; 7699 7700 switch (act->dta_kind) { 7701 case DTRACEACT_SPECULATE: { 7702 dtrace_rechdr_t *dtrh; 7703 7704 ASSERT(buf == &state->dts_buffer[cpuid]); 7705 buf = dtrace_speculation_buffer(state, 7706 cpuid, val); 7707 7708 if (buf == NULL) { 7709 *flags |= CPU_DTRACE_DROP; 7710 continue; 7711 } 7712 7713 offs = dtrace_buffer_reserve(buf, 7714 ecb->dte_needed, ecb->dte_alignment, 7715 state, NULL); 7716 7717 if (offs < 0) { 7718 *flags |= CPU_DTRACE_DROP; 7719 continue; 7720 } 7721 7722 tomax = buf->dtb_tomax; 7723 ASSERT(tomax != NULL); 7724 7725 if (ecb->dte_size == 0) 7726 continue; 7727 7728 ASSERT3U(ecb->dte_size, >=, 7729 sizeof (dtrace_rechdr_t)); 7730 dtrh = ((void *)(tomax + offs)); 7731 dtrh->dtrh_epid = ecb->dte_epid; 7732 /* 7733 * When the speculation is committed, all of 7734 * the records in the speculative buffer will 7735 * have their timestamps set to the commit 7736 * time. Until then, it is set to a sentinel 7737 * value, for debugability. 7738 */ 7739 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX); 7740 continue; 7741 } 7742 7743 case DTRACEACT_PRINTM: { 7744 /* The DIF returns a 'memref'. */ 7745 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 7746 7747 /* Get the size from the memref. */ 7748 size = memref[1]; 7749 7750 /* 7751 * Check if the size exceeds the allocated 7752 * buffer size. 7753 */ 7754 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 7755 /* Flag a drop! */ 7756 *flags |= CPU_DTRACE_DROP; 7757 continue; 7758 } 7759 7760 /* Store the size in the buffer first. */ 7761 DTRACE_STORE(uintptr_t, tomax, 7762 valoffs, size); 7763 7764 /* 7765 * Offset the buffer address to the start 7766 * of the data. 7767 */ 7768 valoffs += sizeof(uintptr_t); 7769 7770 /* 7771 * Reset to the memory address rather than 7772 * the memref array, then let the BYREF 7773 * code below do the work to store the 7774 * memory data in the buffer. 7775 */ 7776 val = memref[0]; 7777 break; 7778 } 7779 7780 case DTRACEACT_CHILL: 7781 if (dtrace_priv_kernel_destructive(state)) 7782 dtrace_action_chill(&mstate, val); 7783 continue; 7784 7785 case DTRACEACT_RAISE: 7786 if (dtrace_priv_proc_destructive(state)) 7787 dtrace_action_raise(val); 7788 continue; 7789 7790 case DTRACEACT_COMMIT: 7791 ASSERT(!committed); 7792 7793 /* 7794 * We need to commit our buffer state. 7795 */ 7796 if (ecb->dte_size) 7797 buf->dtb_offset = offs + ecb->dte_size; 7798 buf = &state->dts_buffer[cpuid]; 7799 dtrace_speculation_commit(state, cpuid, val); 7800 committed = 1; 7801 continue; 7802 7803 case DTRACEACT_DISCARD: 7804 dtrace_speculation_discard(state, cpuid, val); 7805 continue; 7806 7807 case DTRACEACT_DIFEXPR: 7808 case DTRACEACT_LIBACT: 7809 case DTRACEACT_PRINTF: 7810 case DTRACEACT_PRINTA: 7811 case DTRACEACT_SYSTEM: 7812 case DTRACEACT_FREOPEN: 7813 case DTRACEACT_TRACEMEM: 7814 break; 7815 7816 case DTRACEACT_TRACEMEM_DYNSIZE: 7817 tracememsize = val; 7818 break; 7819 7820 case DTRACEACT_SYM: 7821 case DTRACEACT_MOD: 7822 if (!dtrace_priv_kernel(state)) 7823 continue; 7824 break; 7825 7826 case DTRACEACT_USYM: 7827 case DTRACEACT_UMOD: 7828 case DTRACEACT_UADDR: { 7829 #ifdef illumos 7830 struct pid *pid = curthread->t_procp->p_pidp; 7831 #endif 7832 7833 if (!dtrace_priv_proc(state)) 7834 continue; 7835 7836 DTRACE_STORE(uint64_t, tomax, 7837 #ifdef illumos 7838 valoffs, (uint64_t)pid->pid_id); 7839 #else 7840 valoffs, (uint64_t) curproc->p_pid); 7841 #endif 7842 DTRACE_STORE(uint64_t, tomax, 7843 valoffs + sizeof (uint64_t), val); 7844 7845 continue; 7846 } 7847 7848 case DTRACEACT_EXIT: { 7849 /* 7850 * For the exit action, we are going to attempt 7851 * to atomically set our activity to be 7852 * draining. If this fails (either because 7853 * another CPU has beat us to the exit action, 7854 * or because our current activity is something 7855 * other than ACTIVE or WARMUP), we will 7856 * continue. This assures that the exit action 7857 * can be successfully recorded at most once 7858 * when we're in the ACTIVE state. If we're 7859 * encountering the exit() action while in 7860 * COOLDOWN, however, we want to honor the new 7861 * status code. (We know that we're the only 7862 * thread in COOLDOWN, so there is no race.) 7863 */ 7864 void *activity = &state->dts_activity; 7865 dtrace_activity_t curstate = state->dts_activity; 7866 7867 if (curstate == DTRACE_ACTIVITY_COOLDOWN) 7868 break; 7869 7870 if (curstate != DTRACE_ACTIVITY_WARMUP) 7871 curstate = DTRACE_ACTIVITY_ACTIVE; 7872 7873 if (dtrace_cas32(activity, curstate, 7874 DTRACE_ACTIVITY_DRAINING) != curstate) { 7875 *flags |= CPU_DTRACE_DROP; 7876 continue; 7877 } 7878 7879 break; 7880 } 7881 7882 default: 7883 ASSERT(0); 7884 } 7885 7886 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF || 7887 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYUREF) { 7888 uintptr_t end = valoffs + size; 7889 7890 if (tracememsize != 0 && 7891 valoffs + tracememsize < end) { 7892 end = valoffs + tracememsize; 7893 tracememsize = 0; 7894 } 7895 7896 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF && 7897 !dtrace_vcanload((void *)(uintptr_t)val, 7898 &dp->dtdo_rtype, NULL, &mstate, vstate)) 7899 continue; 7900 7901 dtrace_store_by_ref(dp, tomax, size, &valoffs, 7902 &val, end, act->dta_intuple, 7903 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ? 7904 DIF_TF_BYREF: DIF_TF_BYUREF); 7905 continue; 7906 } 7907 7908 switch (size) { 7909 case 0: 7910 break; 7911 7912 case sizeof (uint8_t): 7913 DTRACE_STORE(uint8_t, tomax, valoffs, val); 7914 break; 7915 case sizeof (uint16_t): 7916 DTRACE_STORE(uint16_t, tomax, valoffs, val); 7917 break; 7918 case sizeof (uint32_t): 7919 DTRACE_STORE(uint32_t, tomax, valoffs, val); 7920 break; 7921 case sizeof (uint64_t): 7922 DTRACE_STORE(uint64_t, tomax, valoffs, val); 7923 break; 7924 default: 7925 /* 7926 * Any other size should have been returned by 7927 * reference, not by value. 7928 */ 7929 ASSERT(0); 7930 break; 7931 } 7932 } 7933 7934 if (*flags & CPU_DTRACE_DROP) 7935 continue; 7936 7937 if (*flags & CPU_DTRACE_FAULT) { 7938 int ndx; 7939 dtrace_action_t *err; 7940 7941 buf->dtb_errors++; 7942 7943 if (probe->dtpr_id == dtrace_probeid_error) { 7944 /* 7945 * There's nothing we can do -- we had an 7946 * error on the error probe. We bump an 7947 * error counter to at least indicate that 7948 * this condition happened. 7949 */ 7950 dtrace_error(&state->dts_dblerrors); 7951 continue; 7952 } 7953 7954 if (vtime) { 7955 /* 7956 * Before recursing on dtrace_probe(), we 7957 * need to explicitly clear out our start 7958 * time to prevent it from being accumulated 7959 * into t_dtrace_vtime. 7960 */ 7961 curthread->t_dtrace_start = 0; 7962 } 7963 7964 /* 7965 * Iterate over the actions to figure out which action 7966 * we were processing when we experienced the error. 7967 * Note that act points _past_ the faulting action; if 7968 * act is ecb->dte_action, the fault was in the 7969 * predicate, if it's ecb->dte_action->dta_next it's 7970 * in action #1, and so on. 7971 */ 7972 for (err = ecb->dte_action, ndx = 0; 7973 err != act; err = err->dta_next, ndx++) 7974 continue; 7975 7976 dtrace_probe_error(state, ecb->dte_epid, ndx, 7977 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 7978 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 7979 cpu_core[cpuid].cpuc_dtrace_illval); 7980 7981 continue; 7982 } 7983 7984 if (!committed) 7985 buf->dtb_offset = offs + ecb->dte_size; 7986 } 7987 7988 if (vtime) 7989 curthread->t_dtrace_start = dtrace_gethrtime(); 7990 7991 dtrace_probe_exit(cookie); 7992 } 7993 7994 /* 7995 * DTrace Probe Hashing Functions 7996 * 7997 * The functions in this section (and indeed, the functions in remaining 7998 * sections) are not _called_ from probe context. (Any exceptions to this are 7999 * marked with a "Note:".) Rather, they are called from elsewhere in the 8000 * DTrace framework to look-up probes in, add probes to and remove probes from 8001 * the DTrace probe hashes. (Each probe is hashed by each element of the 8002 * probe tuple -- allowing for fast lookups, regardless of what was 8003 * specified.) 8004 */ 8005 static uint_t 8006 dtrace_hash_str(const char *p) 8007 { 8008 unsigned int g; 8009 uint_t hval = 0; 8010 8011 while (*p) { 8012 hval = (hval << 4) + *p++; 8013 if ((g = (hval & 0xf0000000)) != 0) 8014 hval ^= g >> 24; 8015 hval &= ~g; 8016 } 8017 return (hval); 8018 } 8019 8020 static dtrace_hash_t * 8021 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 8022 { 8023 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 8024 8025 hash->dth_stroffs = stroffs; 8026 hash->dth_nextoffs = nextoffs; 8027 hash->dth_prevoffs = prevoffs; 8028 8029 hash->dth_size = 1; 8030 hash->dth_mask = hash->dth_size - 1; 8031 8032 hash->dth_tab = kmem_zalloc(hash->dth_size * 8033 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 8034 8035 return (hash); 8036 } 8037 8038 static void 8039 dtrace_hash_destroy(dtrace_hash_t *hash) 8040 { 8041 #ifdef DEBUG 8042 int i; 8043 8044 for (i = 0; i < hash->dth_size; i++) 8045 ASSERT(hash->dth_tab[i] == NULL); 8046 #endif 8047 8048 kmem_free(hash->dth_tab, 8049 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 8050 kmem_free(hash, sizeof (dtrace_hash_t)); 8051 } 8052 8053 static void 8054 dtrace_hash_resize(dtrace_hash_t *hash) 8055 { 8056 int size = hash->dth_size, i, ndx; 8057 int new_size = hash->dth_size << 1; 8058 int new_mask = new_size - 1; 8059 dtrace_hashbucket_t **new_tab, *bucket, *next; 8060 8061 ASSERT((new_size & new_mask) == 0); 8062 8063 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 8064 8065 for (i = 0; i < size; i++) { 8066 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 8067 dtrace_probe_t *probe = bucket->dthb_chain; 8068 8069 ASSERT(probe != NULL); 8070 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 8071 8072 next = bucket->dthb_next; 8073 bucket->dthb_next = new_tab[ndx]; 8074 new_tab[ndx] = bucket; 8075 } 8076 } 8077 8078 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 8079 hash->dth_tab = new_tab; 8080 hash->dth_size = new_size; 8081 hash->dth_mask = new_mask; 8082 } 8083 8084 static void 8085 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 8086 { 8087 int hashval = DTRACE_HASHSTR(hash, new); 8088 int ndx = hashval & hash->dth_mask; 8089 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 8090 dtrace_probe_t **nextp, **prevp; 8091 8092 for (; bucket != NULL; bucket = bucket->dthb_next) { 8093 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 8094 goto add; 8095 } 8096 8097 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 8098 dtrace_hash_resize(hash); 8099 dtrace_hash_add(hash, new); 8100 return; 8101 } 8102 8103 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 8104 bucket->dthb_next = hash->dth_tab[ndx]; 8105 hash->dth_tab[ndx] = bucket; 8106 hash->dth_nbuckets++; 8107 8108 add: 8109 nextp = DTRACE_HASHNEXT(hash, new); 8110 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 8111 *nextp = bucket->dthb_chain; 8112 8113 if (bucket->dthb_chain != NULL) { 8114 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 8115 ASSERT(*prevp == NULL); 8116 *prevp = new; 8117 } 8118 8119 bucket->dthb_chain = new; 8120 bucket->dthb_len++; 8121 } 8122 8123 static dtrace_probe_t * 8124 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 8125 { 8126 int hashval = DTRACE_HASHSTR(hash, template); 8127 int ndx = hashval & hash->dth_mask; 8128 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 8129 8130 for (; bucket != NULL; bucket = bucket->dthb_next) { 8131 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 8132 return (bucket->dthb_chain); 8133 } 8134 8135 return (NULL); 8136 } 8137 8138 static int 8139 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 8140 { 8141 int hashval = DTRACE_HASHSTR(hash, template); 8142 int ndx = hashval & hash->dth_mask; 8143 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 8144 8145 for (; bucket != NULL; bucket = bucket->dthb_next) { 8146 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 8147 return (bucket->dthb_len); 8148 } 8149 8150 return (0); 8151 } 8152 8153 static void 8154 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 8155 { 8156 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 8157 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 8158 8159 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 8160 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 8161 8162 /* 8163 * Find the bucket that we're removing this probe from. 8164 */ 8165 for (; bucket != NULL; bucket = bucket->dthb_next) { 8166 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 8167 break; 8168 } 8169 8170 ASSERT(bucket != NULL); 8171 8172 if (*prevp == NULL) { 8173 if (*nextp == NULL) { 8174 /* 8175 * The removed probe was the only probe on this 8176 * bucket; we need to remove the bucket. 8177 */ 8178 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 8179 8180 ASSERT(bucket->dthb_chain == probe); 8181 ASSERT(b != NULL); 8182 8183 if (b == bucket) { 8184 hash->dth_tab[ndx] = bucket->dthb_next; 8185 } else { 8186 while (b->dthb_next != bucket) 8187 b = b->dthb_next; 8188 b->dthb_next = bucket->dthb_next; 8189 } 8190 8191 ASSERT(hash->dth_nbuckets > 0); 8192 hash->dth_nbuckets--; 8193 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 8194 return; 8195 } 8196 8197 bucket->dthb_chain = *nextp; 8198 } else { 8199 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 8200 } 8201 8202 if (*nextp != NULL) 8203 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 8204 } 8205 8206 /* 8207 * DTrace Utility Functions 8208 * 8209 * These are random utility functions that are _not_ called from probe context. 8210 */ 8211 static int 8212 dtrace_badattr(const dtrace_attribute_t *a) 8213 { 8214 return (a->dtat_name > DTRACE_STABILITY_MAX || 8215 a->dtat_data > DTRACE_STABILITY_MAX || 8216 a->dtat_class > DTRACE_CLASS_MAX); 8217 } 8218 8219 /* 8220 * Return a duplicate copy of a string. If the specified string is NULL, 8221 * this function returns a zero-length string. 8222 */ 8223 static char * 8224 dtrace_strdup(const char *str) 8225 { 8226 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 8227 8228 if (str != NULL) 8229 (void) strcpy(new, str); 8230 8231 return (new); 8232 } 8233 8234 #define DTRACE_ISALPHA(c) \ 8235 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 8236 8237 static int 8238 dtrace_badname(const char *s) 8239 { 8240 char c; 8241 8242 if (s == NULL || (c = *s++) == '\0') 8243 return (0); 8244 8245 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 8246 return (1); 8247 8248 while ((c = *s++) != '\0') { 8249 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 8250 c != '-' && c != '_' && c != '.' && c != '`') 8251 return (1); 8252 } 8253 8254 return (0); 8255 } 8256 8257 static void 8258 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 8259 { 8260 uint32_t priv; 8261 8262 #ifdef illumos 8263 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 8264 /* 8265 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 8266 */ 8267 priv = DTRACE_PRIV_ALL; 8268 } else { 8269 *uidp = crgetuid(cr); 8270 *zoneidp = crgetzoneid(cr); 8271 8272 priv = 0; 8273 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 8274 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 8275 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 8276 priv |= DTRACE_PRIV_USER; 8277 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 8278 priv |= DTRACE_PRIV_PROC; 8279 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 8280 priv |= DTRACE_PRIV_OWNER; 8281 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 8282 priv |= DTRACE_PRIV_ZONEOWNER; 8283 } 8284 #else 8285 priv = DTRACE_PRIV_ALL; 8286 #endif 8287 8288 *privp = priv; 8289 } 8290 8291 #ifdef DTRACE_ERRDEBUG 8292 static void 8293 dtrace_errdebug(const char *str) 8294 { 8295 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 8296 int occupied = 0; 8297 8298 mutex_enter(&dtrace_errlock); 8299 dtrace_errlast = str; 8300 dtrace_errthread = curthread; 8301 8302 while (occupied++ < DTRACE_ERRHASHSZ) { 8303 if (dtrace_errhash[hval].dter_msg == str) { 8304 dtrace_errhash[hval].dter_count++; 8305 goto out; 8306 } 8307 8308 if (dtrace_errhash[hval].dter_msg != NULL) { 8309 hval = (hval + 1) % DTRACE_ERRHASHSZ; 8310 continue; 8311 } 8312 8313 dtrace_errhash[hval].dter_msg = str; 8314 dtrace_errhash[hval].dter_count = 1; 8315 goto out; 8316 } 8317 8318 panic("dtrace: undersized error hash"); 8319 out: 8320 mutex_exit(&dtrace_errlock); 8321 } 8322 #endif 8323 8324 /* 8325 * DTrace Matching Functions 8326 * 8327 * These functions are used to match groups of probes, given some elements of 8328 * a probe tuple, or some globbed expressions for elements of a probe tuple. 8329 */ 8330 static int 8331 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 8332 zoneid_t zoneid) 8333 { 8334 if (priv != DTRACE_PRIV_ALL) { 8335 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 8336 uint32_t match = priv & ppriv; 8337 8338 /* 8339 * No PRIV_DTRACE_* privileges... 8340 */ 8341 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 8342 DTRACE_PRIV_KERNEL)) == 0) 8343 return (0); 8344 8345 /* 8346 * No matching bits, but there were bits to match... 8347 */ 8348 if (match == 0 && ppriv != 0) 8349 return (0); 8350 8351 /* 8352 * Need to have permissions to the process, but don't... 8353 */ 8354 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 8355 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 8356 return (0); 8357 } 8358 8359 /* 8360 * Need to be in the same zone unless we possess the 8361 * privilege to examine all zones. 8362 */ 8363 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 8364 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 8365 return (0); 8366 } 8367 } 8368 8369 return (1); 8370 } 8371 8372 /* 8373 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 8374 * consists of input pattern strings and an ops-vector to evaluate them. 8375 * This function returns >0 for match, 0 for no match, and <0 for error. 8376 */ 8377 static int 8378 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 8379 uint32_t priv, uid_t uid, zoneid_t zoneid) 8380 { 8381 dtrace_provider_t *pvp = prp->dtpr_provider; 8382 int rv; 8383 8384 if (pvp->dtpv_defunct) 8385 return (0); 8386 8387 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 8388 return (rv); 8389 8390 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 8391 return (rv); 8392 8393 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 8394 return (rv); 8395 8396 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 8397 return (rv); 8398 8399 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 8400 return (0); 8401 8402 return (rv); 8403 } 8404 8405 /* 8406 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 8407 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 8408 * libc's version, the kernel version only applies to 8-bit ASCII strings. 8409 * In addition, all of the recursion cases except for '*' matching have been 8410 * unwound. For '*', we still implement recursive evaluation, but a depth 8411 * counter is maintained and matching is aborted if we recurse too deep. 8412 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 8413 */ 8414 static int 8415 dtrace_match_glob(const char *s, const char *p, int depth) 8416 { 8417 const char *olds; 8418 char s1, c; 8419 int gs; 8420 8421 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 8422 return (-1); 8423 8424 if (s == NULL) 8425 s = ""; /* treat NULL as empty string */ 8426 8427 top: 8428 olds = s; 8429 s1 = *s++; 8430 8431 if (p == NULL) 8432 return (0); 8433 8434 if ((c = *p++) == '\0') 8435 return (s1 == '\0'); 8436 8437 switch (c) { 8438 case '[': { 8439 int ok = 0, notflag = 0; 8440 char lc = '\0'; 8441 8442 if (s1 == '\0') 8443 return (0); 8444 8445 if (*p == '!') { 8446 notflag = 1; 8447 p++; 8448 } 8449 8450 if ((c = *p++) == '\0') 8451 return (0); 8452 8453 do { 8454 if (c == '-' && lc != '\0' && *p != ']') { 8455 if ((c = *p++) == '\0') 8456 return (0); 8457 if (c == '\\' && (c = *p++) == '\0') 8458 return (0); 8459 8460 if (notflag) { 8461 if (s1 < lc || s1 > c) 8462 ok++; 8463 else 8464 return (0); 8465 } else if (lc <= s1 && s1 <= c) 8466 ok++; 8467 8468 } else if (c == '\\' && (c = *p++) == '\0') 8469 return (0); 8470 8471 lc = c; /* save left-hand 'c' for next iteration */ 8472 8473 if (notflag) { 8474 if (s1 != c) 8475 ok++; 8476 else 8477 return (0); 8478 } else if (s1 == c) 8479 ok++; 8480 8481 if ((c = *p++) == '\0') 8482 return (0); 8483 8484 } while (c != ']'); 8485 8486 if (ok) 8487 goto top; 8488 8489 return (0); 8490 } 8491 8492 case '\\': 8493 if ((c = *p++) == '\0') 8494 return (0); 8495 /*FALLTHRU*/ 8496 8497 default: 8498 if (c != s1) 8499 return (0); 8500 /*FALLTHRU*/ 8501 8502 case '?': 8503 if (s1 != '\0') 8504 goto top; 8505 return (0); 8506 8507 case '*': 8508 while (*p == '*') 8509 p++; /* consecutive *'s are identical to a single one */ 8510 8511 if (*p == '\0') 8512 return (1); 8513 8514 for (s = olds; *s != '\0'; s++) { 8515 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 8516 return (gs); 8517 } 8518 8519 return (0); 8520 } 8521 } 8522 8523 /*ARGSUSED*/ 8524 static int 8525 dtrace_match_string(const char *s, const char *p, int depth) 8526 { 8527 return (s != NULL && strcmp(s, p) == 0); 8528 } 8529 8530 /*ARGSUSED*/ 8531 static int 8532 dtrace_match_nul(const char *s, const char *p, int depth) 8533 { 8534 return (1); /* always match the empty pattern */ 8535 } 8536 8537 /*ARGSUSED*/ 8538 static int 8539 dtrace_match_nonzero(const char *s, const char *p, int depth) 8540 { 8541 return (s != NULL && s[0] != '\0'); 8542 } 8543 8544 static int 8545 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 8546 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 8547 { 8548 dtrace_probe_t template, *probe; 8549 dtrace_hash_t *hash = NULL; 8550 int len, best = INT_MAX, nmatched = 0; 8551 dtrace_id_t i; 8552 8553 ASSERT(MUTEX_HELD(&dtrace_lock)); 8554 8555 /* 8556 * If the probe ID is specified in the key, just lookup by ID and 8557 * invoke the match callback once if a matching probe is found. 8558 */ 8559 if (pkp->dtpk_id != DTRACE_IDNONE) { 8560 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 8561 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 8562 (void) (*matched)(probe, arg); 8563 nmatched++; 8564 } 8565 return (nmatched); 8566 } 8567 8568 template.dtpr_mod = (char *)pkp->dtpk_mod; 8569 template.dtpr_func = (char *)pkp->dtpk_func; 8570 template.dtpr_name = (char *)pkp->dtpk_name; 8571 8572 /* 8573 * We want to find the most distinct of the module name, function 8574 * name, and name. So for each one that is not a glob pattern or 8575 * empty string, we perform a lookup in the corresponding hash and 8576 * use the hash table with the fewest collisions to do our search. 8577 */ 8578 if (pkp->dtpk_mmatch == &dtrace_match_string && 8579 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 8580 best = len; 8581 hash = dtrace_bymod; 8582 } 8583 8584 if (pkp->dtpk_fmatch == &dtrace_match_string && 8585 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 8586 best = len; 8587 hash = dtrace_byfunc; 8588 } 8589 8590 if (pkp->dtpk_nmatch == &dtrace_match_string && 8591 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 8592 best = len; 8593 hash = dtrace_byname; 8594 } 8595 8596 /* 8597 * If we did not select a hash table, iterate over every probe and 8598 * invoke our callback for each one that matches our input probe key. 8599 */ 8600 if (hash == NULL) { 8601 for (i = 0; i < dtrace_nprobes; i++) { 8602 if ((probe = dtrace_probes[i]) == NULL || 8603 dtrace_match_probe(probe, pkp, priv, uid, 8604 zoneid) <= 0) 8605 continue; 8606 8607 nmatched++; 8608 8609 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 8610 break; 8611 } 8612 8613 return (nmatched); 8614 } 8615 8616 /* 8617 * If we selected a hash table, iterate over each probe of the same key 8618 * name and invoke the callback for every probe that matches the other 8619 * attributes of our input probe key. 8620 */ 8621 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 8622 probe = *(DTRACE_HASHNEXT(hash, probe))) { 8623 8624 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 8625 continue; 8626 8627 nmatched++; 8628 8629 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 8630 break; 8631 } 8632 8633 return (nmatched); 8634 } 8635 8636 /* 8637 * Return the function pointer dtrace_probecmp() should use to compare the 8638 * specified pattern with a string. For NULL or empty patterns, we select 8639 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 8640 * For non-empty non-glob strings, we use dtrace_match_string(). 8641 */ 8642 static dtrace_probekey_f * 8643 dtrace_probekey_func(const char *p) 8644 { 8645 char c; 8646 8647 if (p == NULL || *p == '\0') 8648 return (&dtrace_match_nul); 8649 8650 while ((c = *p++) != '\0') { 8651 if (c == '[' || c == '?' || c == '*' || c == '\\') 8652 return (&dtrace_match_glob); 8653 } 8654 8655 return (&dtrace_match_string); 8656 } 8657 8658 /* 8659 * Build a probe comparison key for use with dtrace_match_probe() from the 8660 * given probe description. By convention, a null key only matches anchored 8661 * probes: if each field is the empty string, reset dtpk_fmatch to 8662 * dtrace_match_nonzero(). 8663 */ 8664 static void 8665 dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 8666 { 8667 pkp->dtpk_prov = pdp->dtpd_provider; 8668 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 8669 8670 pkp->dtpk_mod = pdp->dtpd_mod; 8671 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 8672 8673 pkp->dtpk_func = pdp->dtpd_func; 8674 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 8675 8676 pkp->dtpk_name = pdp->dtpd_name; 8677 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 8678 8679 pkp->dtpk_id = pdp->dtpd_id; 8680 8681 if (pkp->dtpk_id == DTRACE_IDNONE && 8682 pkp->dtpk_pmatch == &dtrace_match_nul && 8683 pkp->dtpk_mmatch == &dtrace_match_nul && 8684 pkp->dtpk_fmatch == &dtrace_match_nul && 8685 pkp->dtpk_nmatch == &dtrace_match_nul) 8686 pkp->dtpk_fmatch = &dtrace_match_nonzero; 8687 } 8688 8689 /* 8690 * DTrace Provider-to-Framework API Functions 8691 * 8692 * These functions implement much of the Provider-to-Framework API, as 8693 * described in <sys/dtrace.h>. The parts of the API not in this section are 8694 * the functions in the API for probe management (found below), and 8695 * dtrace_probe() itself (found above). 8696 */ 8697 8698 /* 8699 * Register the calling provider with the DTrace framework. This should 8700 * generally be called by DTrace providers in their attach(9E) entry point. 8701 */ 8702 int 8703 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 8704 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 8705 { 8706 dtrace_provider_t *provider; 8707 8708 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 8709 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8710 "arguments", name ? name : "<NULL>"); 8711 return (EINVAL); 8712 } 8713 8714 if (name[0] == '\0' || dtrace_badname(name)) { 8715 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8716 "provider name", name); 8717 return (EINVAL); 8718 } 8719 8720 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 8721 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 8722 pops->dtps_destroy == NULL || 8723 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 8724 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8725 "provider ops", name); 8726 return (EINVAL); 8727 } 8728 8729 if (dtrace_badattr(&pap->dtpa_provider) || 8730 dtrace_badattr(&pap->dtpa_mod) || 8731 dtrace_badattr(&pap->dtpa_func) || 8732 dtrace_badattr(&pap->dtpa_name) || 8733 dtrace_badattr(&pap->dtpa_args)) { 8734 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8735 "provider attributes", name); 8736 return (EINVAL); 8737 } 8738 8739 if (priv & ~DTRACE_PRIV_ALL) { 8740 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8741 "privilege attributes", name); 8742 return (EINVAL); 8743 } 8744 8745 if ((priv & DTRACE_PRIV_KERNEL) && 8746 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 8747 pops->dtps_usermode == NULL) { 8748 cmn_err(CE_WARN, "failed to register provider '%s': need " 8749 "dtps_usermode() op for given privilege attributes", name); 8750 return (EINVAL); 8751 } 8752 8753 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 8754 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8755 (void) strcpy(provider->dtpv_name, name); 8756 8757 provider->dtpv_attr = *pap; 8758 provider->dtpv_priv.dtpp_flags = priv; 8759 if (cr != NULL) { 8760 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 8761 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 8762 } 8763 provider->dtpv_pops = *pops; 8764 8765 if (pops->dtps_provide == NULL) { 8766 ASSERT(pops->dtps_provide_module != NULL); 8767 provider->dtpv_pops.dtps_provide = 8768 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 8769 } 8770 8771 if (pops->dtps_provide_module == NULL) { 8772 ASSERT(pops->dtps_provide != NULL); 8773 provider->dtpv_pops.dtps_provide_module = 8774 (void (*)(void *, modctl_t *))dtrace_nullop; 8775 } 8776 8777 if (pops->dtps_suspend == NULL) { 8778 ASSERT(pops->dtps_resume == NULL); 8779 provider->dtpv_pops.dtps_suspend = 8780 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 8781 provider->dtpv_pops.dtps_resume = 8782 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 8783 } 8784 8785 provider->dtpv_arg = arg; 8786 *idp = (dtrace_provider_id_t)provider; 8787 8788 if (pops == &dtrace_provider_ops) { 8789 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8790 ASSERT(MUTEX_HELD(&dtrace_lock)); 8791 ASSERT(dtrace_anon.dta_enabling == NULL); 8792 8793 /* 8794 * We make sure that the DTrace provider is at the head of 8795 * the provider chain. 8796 */ 8797 provider->dtpv_next = dtrace_provider; 8798 dtrace_provider = provider; 8799 return (0); 8800 } 8801 8802 mutex_enter(&dtrace_provider_lock); 8803 mutex_enter(&dtrace_lock); 8804 8805 /* 8806 * If there is at least one provider registered, we'll add this 8807 * provider after the first provider. 8808 */ 8809 if (dtrace_provider != NULL) { 8810 provider->dtpv_next = dtrace_provider->dtpv_next; 8811 dtrace_provider->dtpv_next = provider; 8812 } else { 8813 dtrace_provider = provider; 8814 } 8815 8816 if (dtrace_retained != NULL) { 8817 dtrace_enabling_provide(provider); 8818 8819 /* 8820 * Now we need to call dtrace_enabling_matchall() -- which 8821 * will acquire cpu_lock and dtrace_lock. We therefore need 8822 * to drop all of our locks before calling into it... 8823 */ 8824 mutex_exit(&dtrace_lock); 8825 mutex_exit(&dtrace_provider_lock); 8826 dtrace_enabling_matchall(); 8827 8828 return (0); 8829 } 8830 8831 mutex_exit(&dtrace_lock); 8832 mutex_exit(&dtrace_provider_lock); 8833 8834 return (0); 8835 } 8836 8837 /* 8838 * Unregister the specified provider from the DTrace framework. This should 8839 * generally be called by DTrace providers in their detach(9E) entry point. 8840 */ 8841 int 8842 dtrace_unregister(dtrace_provider_id_t id) 8843 { 8844 dtrace_provider_t *old = (dtrace_provider_t *)id; 8845 dtrace_provider_t *prev = NULL; 8846 int i, self = 0, noreap = 0; 8847 dtrace_probe_t *probe, *first = NULL; 8848 8849 if (old->dtpv_pops.dtps_enable == 8850 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 8851 /* 8852 * If DTrace itself is the provider, we're called with locks 8853 * already held. 8854 */ 8855 ASSERT(old == dtrace_provider); 8856 #ifdef illumos 8857 ASSERT(dtrace_devi != NULL); 8858 #endif 8859 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8860 ASSERT(MUTEX_HELD(&dtrace_lock)); 8861 self = 1; 8862 8863 if (dtrace_provider->dtpv_next != NULL) { 8864 /* 8865 * There's another provider here; return failure. 8866 */ 8867 return (EBUSY); 8868 } 8869 } else { 8870 mutex_enter(&dtrace_provider_lock); 8871 #ifdef illumos 8872 mutex_enter(&mod_lock); 8873 #endif 8874 mutex_enter(&dtrace_lock); 8875 } 8876 8877 /* 8878 * If anyone has /dev/dtrace open, or if there are anonymous enabled 8879 * probes, we refuse to let providers slither away, unless this 8880 * provider has already been explicitly invalidated. 8881 */ 8882 if (!old->dtpv_defunct && 8883 (dtrace_opens || (dtrace_anon.dta_state != NULL && 8884 dtrace_anon.dta_state->dts_necbs > 0))) { 8885 if (!self) { 8886 mutex_exit(&dtrace_lock); 8887 #ifdef illumos 8888 mutex_exit(&mod_lock); 8889 #endif 8890 mutex_exit(&dtrace_provider_lock); 8891 } 8892 return (EBUSY); 8893 } 8894 8895 /* 8896 * Attempt to destroy the probes associated with this provider. 8897 */ 8898 for (i = 0; i < dtrace_nprobes; i++) { 8899 if ((probe = dtrace_probes[i]) == NULL) 8900 continue; 8901 8902 if (probe->dtpr_provider != old) 8903 continue; 8904 8905 if (probe->dtpr_ecb == NULL) 8906 continue; 8907 8908 /* 8909 * If we are trying to unregister a defunct provider, and the 8910 * provider was made defunct within the interval dictated by 8911 * dtrace_unregister_defunct_reap, we'll (asynchronously) 8912 * attempt to reap our enablings. To denote that the provider 8913 * should reattempt to unregister itself at some point in the 8914 * future, we will return a differentiable error code (EAGAIN 8915 * instead of EBUSY) in this case. 8916 */ 8917 if (dtrace_gethrtime() - old->dtpv_defunct > 8918 dtrace_unregister_defunct_reap) 8919 noreap = 1; 8920 8921 if (!self) { 8922 mutex_exit(&dtrace_lock); 8923 #ifdef illumos 8924 mutex_exit(&mod_lock); 8925 #endif 8926 mutex_exit(&dtrace_provider_lock); 8927 } 8928 8929 if (noreap) 8930 return (EBUSY); 8931 8932 (void) taskq_dispatch(dtrace_taskq, 8933 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP); 8934 8935 return (EAGAIN); 8936 } 8937 8938 /* 8939 * All of the probes for this provider are disabled; we can safely 8940 * remove all of them from their hash chains and from the probe array. 8941 */ 8942 for (i = 0; i < dtrace_nprobes; i++) { 8943 if ((probe = dtrace_probes[i]) == NULL) 8944 continue; 8945 8946 if (probe->dtpr_provider != old) 8947 continue; 8948 8949 dtrace_probes[i] = NULL; 8950 8951 dtrace_hash_remove(dtrace_bymod, probe); 8952 dtrace_hash_remove(dtrace_byfunc, probe); 8953 dtrace_hash_remove(dtrace_byname, probe); 8954 8955 if (first == NULL) { 8956 first = probe; 8957 probe->dtpr_nextmod = NULL; 8958 } else { 8959 probe->dtpr_nextmod = first; 8960 first = probe; 8961 } 8962 } 8963 8964 /* 8965 * The provider's probes have been removed from the hash chains and 8966 * from the probe array. Now issue a dtrace_sync() to be sure that 8967 * everyone has cleared out from any probe array processing. 8968 */ 8969 dtrace_sync(); 8970 8971 for (probe = first; probe != NULL; probe = first) { 8972 first = probe->dtpr_nextmod; 8973 8974 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 8975 probe->dtpr_arg); 8976 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 8977 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 8978 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 8979 #ifdef illumos 8980 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 8981 #else 8982 free_unr(dtrace_arena, probe->dtpr_id); 8983 #endif 8984 kmem_free(probe, sizeof (dtrace_probe_t)); 8985 } 8986 8987 if ((prev = dtrace_provider) == old) { 8988 #ifdef illumos 8989 ASSERT(self || dtrace_devi == NULL); 8990 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 8991 #endif 8992 dtrace_provider = old->dtpv_next; 8993 } else { 8994 while (prev != NULL && prev->dtpv_next != old) 8995 prev = prev->dtpv_next; 8996 8997 if (prev == NULL) { 8998 panic("attempt to unregister non-existent " 8999 "dtrace provider %p\n", (void *)id); 9000 } 9001 9002 prev->dtpv_next = old->dtpv_next; 9003 } 9004 9005 if (!self) { 9006 mutex_exit(&dtrace_lock); 9007 #ifdef illumos 9008 mutex_exit(&mod_lock); 9009 #endif 9010 mutex_exit(&dtrace_provider_lock); 9011 } 9012 9013 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 9014 kmem_free(old, sizeof (dtrace_provider_t)); 9015 9016 return (0); 9017 } 9018 9019 /* 9020 * Invalidate the specified provider. All subsequent probe lookups for the 9021 * specified provider will fail, but its probes will not be removed. 9022 */ 9023 void 9024 dtrace_invalidate(dtrace_provider_id_t id) 9025 { 9026 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 9027 9028 ASSERT(pvp->dtpv_pops.dtps_enable != 9029 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 9030 9031 mutex_enter(&dtrace_provider_lock); 9032 mutex_enter(&dtrace_lock); 9033 9034 pvp->dtpv_defunct = dtrace_gethrtime(); 9035 9036 mutex_exit(&dtrace_lock); 9037 mutex_exit(&dtrace_provider_lock); 9038 } 9039 9040 /* 9041 * Indicate whether or not DTrace has attached. 9042 */ 9043 int 9044 dtrace_attached(void) 9045 { 9046 /* 9047 * dtrace_provider will be non-NULL iff the DTrace driver has 9048 * attached. (It's non-NULL because DTrace is always itself a 9049 * provider.) 9050 */ 9051 return (dtrace_provider != NULL); 9052 } 9053 9054 /* 9055 * Remove all the unenabled probes for the given provider. This function is 9056 * not unlike dtrace_unregister(), except that it doesn't remove the provider 9057 * -- just as many of its associated probes as it can. 9058 */ 9059 int 9060 dtrace_condense(dtrace_provider_id_t id) 9061 { 9062 dtrace_provider_t *prov = (dtrace_provider_t *)id; 9063 int i; 9064 dtrace_probe_t *probe; 9065 9066 /* 9067 * Make sure this isn't the dtrace provider itself. 9068 */ 9069 ASSERT(prov->dtpv_pops.dtps_enable != 9070 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 9071 9072 mutex_enter(&dtrace_provider_lock); 9073 mutex_enter(&dtrace_lock); 9074 9075 /* 9076 * Attempt to destroy the probes associated with this provider. 9077 */ 9078 for (i = 0; i < dtrace_nprobes; i++) { 9079 if ((probe = dtrace_probes[i]) == NULL) 9080 continue; 9081 9082 if (probe->dtpr_provider != prov) 9083 continue; 9084 9085 if (probe->dtpr_ecb != NULL) 9086 continue; 9087 9088 dtrace_probes[i] = NULL; 9089 9090 dtrace_hash_remove(dtrace_bymod, probe); 9091 dtrace_hash_remove(dtrace_byfunc, probe); 9092 dtrace_hash_remove(dtrace_byname, probe); 9093 9094 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 9095 probe->dtpr_arg); 9096 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 9097 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 9098 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 9099 kmem_free(probe, sizeof (dtrace_probe_t)); 9100 #ifdef illumos 9101 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 9102 #else 9103 free_unr(dtrace_arena, i + 1); 9104 #endif 9105 } 9106 9107 mutex_exit(&dtrace_lock); 9108 mutex_exit(&dtrace_provider_lock); 9109 9110 return (0); 9111 } 9112 9113 /* 9114 * DTrace Probe Management Functions 9115 * 9116 * The functions in this section perform the DTrace probe management, 9117 * including functions to create probes, look-up probes, and call into the 9118 * providers to request that probes be provided. Some of these functions are 9119 * in the Provider-to-Framework API; these functions can be identified by the 9120 * fact that they are not declared "static". 9121 */ 9122 9123 /* 9124 * Create a probe with the specified module name, function name, and name. 9125 */ 9126 dtrace_id_t 9127 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 9128 const char *func, const char *name, int aframes, void *arg) 9129 { 9130 dtrace_probe_t *probe, **probes; 9131 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 9132 dtrace_id_t id; 9133 9134 if (provider == dtrace_provider) { 9135 ASSERT(MUTEX_HELD(&dtrace_lock)); 9136 } else { 9137 mutex_enter(&dtrace_lock); 9138 } 9139 9140 #ifdef illumos 9141 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 9142 VM_BESTFIT | VM_SLEEP); 9143 #else 9144 id = alloc_unr(dtrace_arena); 9145 #endif 9146 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 9147 9148 probe->dtpr_id = id; 9149 probe->dtpr_gen = dtrace_probegen++; 9150 probe->dtpr_mod = dtrace_strdup(mod); 9151 probe->dtpr_func = dtrace_strdup(func); 9152 probe->dtpr_name = dtrace_strdup(name); 9153 probe->dtpr_arg = arg; 9154 probe->dtpr_aframes = aframes; 9155 probe->dtpr_provider = provider; 9156 9157 dtrace_hash_add(dtrace_bymod, probe); 9158 dtrace_hash_add(dtrace_byfunc, probe); 9159 dtrace_hash_add(dtrace_byname, probe); 9160 9161 if (id - 1 >= dtrace_nprobes) { 9162 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 9163 size_t nsize = osize << 1; 9164 9165 if (nsize == 0) { 9166 ASSERT(osize == 0); 9167 ASSERT(dtrace_probes == NULL); 9168 nsize = sizeof (dtrace_probe_t *); 9169 } 9170 9171 probes = kmem_zalloc(nsize, KM_SLEEP); 9172 9173 if (dtrace_probes == NULL) { 9174 ASSERT(osize == 0); 9175 dtrace_probes = probes; 9176 dtrace_nprobes = 1; 9177 } else { 9178 dtrace_probe_t **oprobes = dtrace_probes; 9179 9180 bcopy(oprobes, probes, osize); 9181 dtrace_membar_producer(); 9182 dtrace_probes = probes; 9183 9184 dtrace_sync(); 9185 9186 /* 9187 * All CPUs are now seeing the new probes array; we can 9188 * safely free the old array. 9189 */ 9190 kmem_free(oprobes, osize); 9191 dtrace_nprobes <<= 1; 9192 } 9193 9194 ASSERT(id - 1 < dtrace_nprobes); 9195 } 9196 9197 ASSERT(dtrace_probes[id - 1] == NULL); 9198 dtrace_probes[id - 1] = probe; 9199 9200 if (provider != dtrace_provider) 9201 mutex_exit(&dtrace_lock); 9202 9203 return (id); 9204 } 9205 9206 static dtrace_probe_t * 9207 dtrace_probe_lookup_id(dtrace_id_t id) 9208 { 9209 ASSERT(MUTEX_HELD(&dtrace_lock)); 9210 9211 if (id == 0 || id > dtrace_nprobes) 9212 return (NULL); 9213 9214 return (dtrace_probes[id - 1]); 9215 } 9216 9217 static int 9218 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 9219 { 9220 *((dtrace_id_t *)arg) = probe->dtpr_id; 9221 9222 return (DTRACE_MATCH_DONE); 9223 } 9224 9225 /* 9226 * Look up a probe based on provider and one or more of module name, function 9227 * name and probe name. 9228 */ 9229 dtrace_id_t 9230 dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 9231 char *func, char *name) 9232 { 9233 dtrace_probekey_t pkey; 9234 dtrace_id_t id; 9235 int match; 9236 9237 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 9238 pkey.dtpk_pmatch = &dtrace_match_string; 9239 pkey.dtpk_mod = mod; 9240 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 9241 pkey.dtpk_func = func; 9242 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 9243 pkey.dtpk_name = name; 9244 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 9245 pkey.dtpk_id = DTRACE_IDNONE; 9246 9247 mutex_enter(&dtrace_lock); 9248 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 9249 dtrace_probe_lookup_match, &id); 9250 mutex_exit(&dtrace_lock); 9251 9252 ASSERT(match == 1 || match == 0); 9253 return (match ? id : 0); 9254 } 9255 9256 /* 9257 * Returns the probe argument associated with the specified probe. 9258 */ 9259 void * 9260 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 9261 { 9262 dtrace_probe_t *probe; 9263 void *rval = NULL; 9264 9265 mutex_enter(&dtrace_lock); 9266 9267 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 9268 probe->dtpr_provider == (dtrace_provider_t *)id) 9269 rval = probe->dtpr_arg; 9270 9271 mutex_exit(&dtrace_lock); 9272 9273 return (rval); 9274 } 9275 9276 /* 9277 * Copy a probe into a probe description. 9278 */ 9279 static void 9280 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 9281 { 9282 bzero(pdp, sizeof (dtrace_probedesc_t)); 9283 pdp->dtpd_id = prp->dtpr_id; 9284 9285 (void) strncpy(pdp->dtpd_provider, 9286 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 9287 9288 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 9289 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 9290 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 9291 } 9292 9293 /* 9294 * Called to indicate that a probe -- or probes -- should be provided by a 9295 * specfied provider. If the specified description is NULL, the provider will 9296 * be told to provide all of its probes. (This is done whenever a new 9297 * consumer comes along, or whenever a retained enabling is to be matched.) If 9298 * the specified description is non-NULL, the provider is given the 9299 * opportunity to dynamically provide the specified probe, allowing providers 9300 * to support the creation of probes on-the-fly. (So-called _autocreated_ 9301 * probes.) If the provider is NULL, the operations will be applied to all 9302 * providers; if the provider is non-NULL the operations will only be applied 9303 * to the specified provider. The dtrace_provider_lock must be held, and the 9304 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 9305 * will need to grab the dtrace_lock when it reenters the framework through 9306 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 9307 */ 9308 static void 9309 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 9310 { 9311 #ifdef illumos 9312 modctl_t *ctl; 9313 #endif 9314 int all = 0; 9315 9316 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 9317 9318 if (prv == NULL) { 9319 all = 1; 9320 prv = dtrace_provider; 9321 } 9322 9323 do { 9324 /* 9325 * First, call the blanket provide operation. 9326 */ 9327 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 9328 9329 #ifdef illumos 9330 /* 9331 * Now call the per-module provide operation. We will grab 9332 * mod_lock to prevent the list from being modified. Note 9333 * that this also prevents the mod_busy bits from changing. 9334 * (mod_busy can only be changed with mod_lock held.) 9335 */ 9336 mutex_enter(&mod_lock); 9337 9338 ctl = &modules; 9339 do { 9340 if (ctl->mod_busy || ctl->mod_mp == NULL) 9341 continue; 9342 9343 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 9344 9345 } while ((ctl = ctl->mod_next) != &modules); 9346 9347 mutex_exit(&mod_lock); 9348 #endif 9349 } while (all && (prv = prv->dtpv_next) != NULL); 9350 } 9351 9352 #ifdef illumos 9353 /* 9354 * Iterate over each probe, and call the Framework-to-Provider API function 9355 * denoted by offs. 9356 */ 9357 static void 9358 dtrace_probe_foreach(uintptr_t offs) 9359 { 9360 dtrace_provider_t *prov; 9361 void (*func)(void *, dtrace_id_t, void *); 9362 dtrace_probe_t *probe; 9363 dtrace_icookie_t cookie; 9364 int i; 9365 9366 /* 9367 * We disable interrupts to walk through the probe array. This is 9368 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 9369 * won't see stale data. 9370 */ 9371 cookie = dtrace_interrupt_disable(); 9372 9373 for (i = 0; i < dtrace_nprobes; i++) { 9374 if ((probe = dtrace_probes[i]) == NULL) 9375 continue; 9376 9377 if (probe->dtpr_ecb == NULL) { 9378 /* 9379 * This probe isn't enabled -- don't call the function. 9380 */ 9381 continue; 9382 } 9383 9384 prov = probe->dtpr_provider; 9385 func = *((void(**)(void *, dtrace_id_t, void *)) 9386 ((uintptr_t)&prov->dtpv_pops + offs)); 9387 9388 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 9389 } 9390 9391 dtrace_interrupt_enable(cookie); 9392 } 9393 #endif 9394 9395 static int 9396 dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 9397 { 9398 dtrace_probekey_t pkey; 9399 uint32_t priv; 9400 uid_t uid; 9401 zoneid_t zoneid; 9402 9403 ASSERT(MUTEX_HELD(&dtrace_lock)); 9404 dtrace_ecb_create_cache = NULL; 9405 9406 if (desc == NULL) { 9407 /* 9408 * If we're passed a NULL description, we're being asked to 9409 * create an ECB with a NULL probe. 9410 */ 9411 (void) dtrace_ecb_create_enable(NULL, enab); 9412 return (0); 9413 } 9414 9415 dtrace_probekey(desc, &pkey); 9416 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 9417 &priv, &uid, &zoneid); 9418 9419 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 9420 enab)); 9421 } 9422 9423 /* 9424 * DTrace Helper Provider Functions 9425 */ 9426 static void 9427 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 9428 { 9429 attr->dtat_name = DOF_ATTR_NAME(dofattr); 9430 attr->dtat_data = DOF_ATTR_DATA(dofattr); 9431 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 9432 } 9433 9434 static void 9435 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 9436 const dof_provider_t *dofprov, char *strtab) 9437 { 9438 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 9439 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 9440 dofprov->dofpv_provattr); 9441 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 9442 dofprov->dofpv_modattr); 9443 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 9444 dofprov->dofpv_funcattr); 9445 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 9446 dofprov->dofpv_nameattr); 9447 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 9448 dofprov->dofpv_argsattr); 9449 } 9450 9451 static void 9452 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 9453 { 9454 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9455 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9456 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 9457 dof_provider_t *provider; 9458 dof_probe_t *probe; 9459 uint32_t *off, *enoff; 9460 uint8_t *arg; 9461 char *strtab; 9462 uint_t i, nprobes; 9463 dtrace_helper_provdesc_t dhpv; 9464 dtrace_helper_probedesc_t dhpb; 9465 dtrace_meta_t *meta = dtrace_meta_pid; 9466 dtrace_mops_t *mops = &meta->dtm_mops; 9467 void *parg; 9468 9469 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 9470 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9471 provider->dofpv_strtab * dof->dofh_secsize); 9472 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9473 provider->dofpv_probes * dof->dofh_secsize); 9474 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9475 provider->dofpv_prargs * dof->dofh_secsize); 9476 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9477 provider->dofpv_proffs * dof->dofh_secsize); 9478 9479 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 9480 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 9481 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 9482 enoff = NULL; 9483 9484 /* 9485 * See dtrace_helper_provider_validate(). 9486 */ 9487 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 9488 provider->dofpv_prenoffs != DOF_SECT_NONE) { 9489 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9490 provider->dofpv_prenoffs * dof->dofh_secsize); 9491 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 9492 } 9493 9494 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 9495 9496 /* 9497 * Create the provider. 9498 */ 9499 dtrace_dofprov2hprov(&dhpv, provider, strtab); 9500 9501 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 9502 return; 9503 9504 meta->dtm_count++; 9505 9506 /* 9507 * Create the probes. 9508 */ 9509 for (i = 0; i < nprobes; i++) { 9510 probe = (dof_probe_t *)(uintptr_t)(daddr + 9511 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 9512 9513 /* See the check in dtrace_helper_provider_validate(). */ 9514 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) 9515 continue; 9516 9517 dhpb.dthpb_mod = dhp->dofhp_mod; 9518 dhpb.dthpb_func = strtab + probe->dofpr_func; 9519 dhpb.dthpb_name = strtab + probe->dofpr_name; 9520 dhpb.dthpb_base = probe->dofpr_addr; 9521 dhpb.dthpb_offs = off + probe->dofpr_offidx; 9522 dhpb.dthpb_noffs = probe->dofpr_noffs; 9523 if (enoff != NULL) { 9524 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 9525 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 9526 } else { 9527 dhpb.dthpb_enoffs = NULL; 9528 dhpb.dthpb_nenoffs = 0; 9529 } 9530 dhpb.dthpb_args = arg + probe->dofpr_argidx; 9531 dhpb.dthpb_nargc = probe->dofpr_nargc; 9532 dhpb.dthpb_xargc = probe->dofpr_xargc; 9533 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 9534 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 9535 9536 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 9537 } 9538 } 9539 9540 static void 9541 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 9542 { 9543 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9544 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9545 int i; 9546 9547 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 9548 9549 for (i = 0; i < dof->dofh_secnum; i++) { 9550 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 9551 dof->dofh_secoff + i * dof->dofh_secsize); 9552 9553 if (sec->dofs_type != DOF_SECT_PROVIDER) 9554 continue; 9555 9556 dtrace_helper_provide_one(dhp, sec, pid); 9557 } 9558 9559 /* 9560 * We may have just created probes, so we must now rematch against 9561 * any retained enablings. Note that this call will acquire both 9562 * cpu_lock and dtrace_lock; the fact that we are holding 9563 * dtrace_meta_lock now is what defines the ordering with respect to 9564 * these three locks. 9565 */ 9566 dtrace_enabling_matchall(); 9567 } 9568 9569 static void 9570 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 9571 { 9572 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9573 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9574 dof_sec_t *str_sec; 9575 dof_provider_t *provider; 9576 char *strtab; 9577 dtrace_helper_provdesc_t dhpv; 9578 dtrace_meta_t *meta = dtrace_meta_pid; 9579 dtrace_mops_t *mops = &meta->dtm_mops; 9580 9581 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 9582 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9583 provider->dofpv_strtab * dof->dofh_secsize); 9584 9585 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 9586 9587 /* 9588 * Create the provider. 9589 */ 9590 dtrace_dofprov2hprov(&dhpv, provider, strtab); 9591 9592 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 9593 9594 meta->dtm_count--; 9595 } 9596 9597 static void 9598 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 9599 { 9600 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9601 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9602 int i; 9603 9604 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 9605 9606 for (i = 0; i < dof->dofh_secnum; i++) { 9607 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 9608 dof->dofh_secoff + i * dof->dofh_secsize); 9609 9610 if (sec->dofs_type != DOF_SECT_PROVIDER) 9611 continue; 9612 9613 dtrace_helper_provider_remove_one(dhp, sec, pid); 9614 } 9615 } 9616 9617 /* 9618 * DTrace Meta Provider-to-Framework API Functions 9619 * 9620 * These functions implement the Meta Provider-to-Framework API, as described 9621 * in <sys/dtrace.h>. 9622 */ 9623 int 9624 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 9625 dtrace_meta_provider_id_t *idp) 9626 { 9627 dtrace_meta_t *meta; 9628 dtrace_helpers_t *help, *next; 9629 int i; 9630 9631 *idp = DTRACE_METAPROVNONE; 9632 9633 /* 9634 * We strictly don't need the name, but we hold onto it for 9635 * debuggability. All hail error queues! 9636 */ 9637 if (name == NULL) { 9638 cmn_err(CE_WARN, "failed to register meta-provider: " 9639 "invalid name"); 9640 return (EINVAL); 9641 } 9642 9643 if (mops == NULL || 9644 mops->dtms_create_probe == NULL || 9645 mops->dtms_provide_pid == NULL || 9646 mops->dtms_remove_pid == NULL) { 9647 cmn_err(CE_WARN, "failed to register meta-register %s: " 9648 "invalid ops", name); 9649 return (EINVAL); 9650 } 9651 9652 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 9653 meta->dtm_mops = *mops; 9654 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 9655 (void) strcpy(meta->dtm_name, name); 9656 meta->dtm_arg = arg; 9657 9658 mutex_enter(&dtrace_meta_lock); 9659 mutex_enter(&dtrace_lock); 9660 9661 if (dtrace_meta_pid != NULL) { 9662 mutex_exit(&dtrace_lock); 9663 mutex_exit(&dtrace_meta_lock); 9664 cmn_err(CE_WARN, "failed to register meta-register %s: " 9665 "user-land meta-provider exists", name); 9666 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 9667 kmem_free(meta, sizeof (dtrace_meta_t)); 9668 return (EINVAL); 9669 } 9670 9671 dtrace_meta_pid = meta; 9672 *idp = (dtrace_meta_provider_id_t)meta; 9673 9674 /* 9675 * If there are providers and probes ready to go, pass them 9676 * off to the new meta provider now. 9677 */ 9678 9679 help = dtrace_deferred_pid; 9680 dtrace_deferred_pid = NULL; 9681 9682 mutex_exit(&dtrace_lock); 9683 9684 while (help != NULL) { 9685 for (i = 0; i < help->dthps_nprovs; i++) { 9686 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 9687 help->dthps_pid); 9688 } 9689 9690 next = help->dthps_next; 9691 help->dthps_next = NULL; 9692 help->dthps_prev = NULL; 9693 help->dthps_deferred = 0; 9694 help = next; 9695 } 9696 9697 mutex_exit(&dtrace_meta_lock); 9698 9699 return (0); 9700 } 9701 9702 int 9703 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 9704 { 9705 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 9706 9707 mutex_enter(&dtrace_meta_lock); 9708 mutex_enter(&dtrace_lock); 9709 9710 if (old == dtrace_meta_pid) { 9711 pp = &dtrace_meta_pid; 9712 } else { 9713 panic("attempt to unregister non-existent " 9714 "dtrace meta-provider %p\n", (void *)old); 9715 } 9716 9717 if (old->dtm_count != 0) { 9718 mutex_exit(&dtrace_lock); 9719 mutex_exit(&dtrace_meta_lock); 9720 return (EBUSY); 9721 } 9722 9723 *pp = NULL; 9724 9725 mutex_exit(&dtrace_lock); 9726 mutex_exit(&dtrace_meta_lock); 9727 9728 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 9729 kmem_free(old, sizeof (dtrace_meta_t)); 9730 9731 return (0); 9732 } 9733 9734 9735 /* 9736 * DTrace DIF Object Functions 9737 */ 9738 static int 9739 dtrace_difo_err(uint_t pc, const char *format, ...) 9740 { 9741 if (dtrace_err_verbose) { 9742 va_list alist; 9743 9744 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 9745 va_start(alist, format); 9746 (void) vuprintf(format, alist); 9747 va_end(alist); 9748 } 9749 9750 #ifdef DTRACE_ERRDEBUG 9751 dtrace_errdebug(format); 9752 #endif 9753 return (1); 9754 } 9755 9756 /* 9757 * Validate a DTrace DIF object by checking the IR instructions. The following 9758 * rules are currently enforced by dtrace_difo_validate(): 9759 * 9760 * 1. Each instruction must have a valid opcode 9761 * 2. Each register, string, variable, or subroutine reference must be valid 9762 * 3. No instruction can modify register %r0 (must be zero) 9763 * 4. All instruction reserved bits must be set to zero 9764 * 5. The last instruction must be a "ret" instruction 9765 * 6. All branch targets must reference a valid instruction _after_ the branch 9766 */ 9767 static int 9768 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 9769 cred_t *cr) 9770 { 9771 int err = 0, i; 9772 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 9773 int kcheckload; 9774 uint_t pc; 9775 int maxglobal = -1, maxlocal = -1, maxtlocal = -1; 9776 9777 kcheckload = cr == NULL || 9778 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 9779 9780 dp->dtdo_destructive = 0; 9781 9782 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 9783 dif_instr_t instr = dp->dtdo_buf[pc]; 9784 9785 uint_t r1 = DIF_INSTR_R1(instr); 9786 uint_t r2 = DIF_INSTR_R2(instr); 9787 uint_t rd = DIF_INSTR_RD(instr); 9788 uint_t rs = DIF_INSTR_RS(instr); 9789 uint_t label = DIF_INSTR_LABEL(instr); 9790 uint_t v = DIF_INSTR_VAR(instr); 9791 uint_t subr = DIF_INSTR_SUBR(instr); 9792 uint_t type = DIF_INSTR_TYPE(instr); 9793 uint_t op = DIF_INSTR_OP(instr); 9794 9795 switch (op) { 9796 case DIF_OP_OR: 9797 case DIF_OP_XOR: 9798 case DIF_OP_AND: 9799 case DIF_OP_SLL: 9800 case DIF_OP_SRL: 9801 case DIF_OP_SRA: 9802 case DIF_OP_SUB: 9803 case DIF_OP_ADD: 9804 case DIF_OP_MUL: 9805 case DIF_OP_SDIV: 9806 case DIF_OP_UDIV: 9807 case DIF_OP_SREM: 9808 case DIF_OP_UREM: 9809 case DIF_OP_COPYS: 9810 if (r1 >= nregs) 9811 err += efunc(pc, "invalid register %u\n", r1); 9812 if (r2 >= nregs) 9813 err += efunc(pc, "invalid register %u\n", r2); 9814 if (rd >= nregs) 9815 err += efunc(pc, "invalid register %u\n", rd); 9816 if (rd == 0) 9817 err += efunc(pc, "cannot write to %r0\n"); 9818 break; 9819 case DIF_OP_NOT: 9820 case DIF_OP_MOV: 9821 case DIF_OP_ALLOCS: 9822 if (r1 >= nregs) 9823 err += efunc(pc, "invalid register %u\n", r1); 9824 if (r2 != 0) 9825 err += efunc(pc, "non-zero reserved bits\n"); 9826 if (rd >= nregs) 9827 err += efunc(pc, "invalid register %u\n", rd); 9828 if (rd == 0) 9829 err += efunc(pc, "cannot write to %r0\n"); 9830 break; 9831 case DIF_OP_LDSB: 9832 case DIF_OP_LDSH: 9833 case DIF_OP_LDSW: 9834 case DIF_OP_LDUB: 9835 case DIF_OP_LDUH: 9836 case DIF_OP_LDUW: 9837 case DIF_OP_LDX: 9838 if (r1 >= nregs) 9839 err += efunc(pc, "invalid register %u\n", r1); 9840 if (r2 != 0) 9841 err += efunc(pc, "non-zero reserved bits\n"); 9842 if (rd >= nregs) 9843 err += efunc(pc, "invalid register %u\n", rd); 9844 if (rd == 0) 9845 err += efunc(pc, "cannot write to %r0\n"); 9846 if (kcheckload) 9847 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 9848 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 9849 break; 9850 case DIF_OP_RLDSB: 9851 case DIF_OP_RLDSH: 9852 case DIF_OP_RLDSW: 9853 case DIF_OP_RLDUB: 9854 case DIF_OP_RLDUH: 9855 case DIF_OP_RLDUW: 9856 case DIF_OP_RLDX: 9857 if (r1 >= nregs) 9858 err += efunc(pc, "invalid register %u\n", r1); 9859 if (r2 != 0) 9860 err += efunc(pc, "non-zero reserved bits\n"); 9861 if (rd >= nregs) 9862 err += efunc(pc, "invalid register %u\n", rd); 9863 if (rd == 0) 9864 err += efunc(pc, "cannot write to %r0\n"); 9865 break; 9866 case DIF_OP_ULDSB: 9867 case DIF_OP_ULDSH: 9868 case DIF_OP_ULDSW: 9869 case DIF_OP_ULDUB: 9870 case DIF_OP_ULDUH: 9871 case DIF_OP_ULDUW: 9872 case DIF_OP_ULDX: 9873 if (r1 >= nregs) 9874 err += efunc(pc, "invalid register %u\n", r1); 9875 if (r2 != 0) 9876 err += efunc(pc, "non-zero reserved bits\n"); 9877 if (rd >= nregs) 9878 err += efunc(pc, "invalid register %u\n", rd); 9879 if (rd == 0) 9880 err += efunc(pc, "cannot write to %r0\n"); 9881 break; 9882 case DIF_OP_STB: 9883 case DIF_OP_STH: 9884 case DIF_OP_STW: 9885 case DIF_OP_STX: 9886 if (r1 >= nregs) 9887 err += efunc(pc, "invalid register %u\n", r1); 9888 if (r2 != 0) 9889 err += efunc(pc, "non-zero reserved bits\n"); 9890 if (rd >= nregs) 9891 err += efunc(pc, "invalid register %u\n", rd); 9892 if (rd == 0) 9893 err += efunc(pc, "cannot write to 0 address\n"); 9894 break; 9895 case DIF_OP_CMP: 9896 case DIF_OP_SCMP: 9897 if (r1 >= nregs) 9898 err += efunc(pc, "invalid register %u\n", r1); 9899 if (r2 >= nregs) 9900 err += efunc(pc, "invalid register %u\n", r2); 9901 if (rd != 0) 9902 err += efunc(pc, "non-zero reserved bits\n"); 9903 break; 9904 case DIF_OP_TST: 9905 if (r1 >= nregs) 9906 err += efunc(pc, "invalid register %u\n", r1); 9907 if (r2 != 0 || rd != 0) 9908 err += efunc(pc, "non-zero reserved bits\n"); 9909 break; 9910 case DIF_OP_BA: 9911 case DIF_OP_BE: 9912 case DIF_OP_BNE: 9913 case DIF_OP_BG: 9914 case DIF_OP_BGU: 9915 case DIF_OP_BGE: 9916 case DIF_OP_BGEU: 9917 case DIF_OP_BL: 9918 case DIF_OP_BLU: 9919 case DIF_OP_BLE: 9920 case DIF_OP_BLEU: 9921 if (label >= dp->dtdo_len) { 9922 err += efunc(pc, "invalid branch target %u\n", 9923 label); 9924 } 9925 if (label <= pc) { 9926 err += efunc(pc, "backward branch to %u\n", 9927 label); 9928 } 9929 break; 9930 case DIF_OP_RET: 9931 if (r1 != 0 || r2 != 0) 9932 err += efunc(pc, "non-zero reserved bits\n"); 9933 if (rd >= nregs) 9934 err += efunc(pc, "invalid register %u\n", rd); 9935 break; 9936 case DIF_OP_NOP: 9937 case DIF_OP_POPTS: 9938 case DIF_OP_FLUSHTS: 9939 if (r1 != 0 || r2 != 0 || rd != 0) 9940 err += efunc(pc, "non-zero reserved bits\n"); 9941 break; 9942 case DIF_OP_SETX: 9943 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 9944 err += efunc(pc, "invalid integer ref %u\n", 9945 DIF_INSTR_INTEGER(instr)); 9946 } 9947 if (rd >= nregs) 9948 err += efunc(pc, "invalid register %u\n", rd); 9949 if (rd == 0) 9950 err += efunc(pc, "cannot write to %r0\n"); 9951 break; 9952 case DIF_OP_SETS: 9953 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 9954 err += efunc(pc, "invalid string ref %u\n", 9955 DIF_INSTR_STRING(instr)); 9956 } 9957 if (rd >= nregs) 9958 err += efunc(pc, "invalid register %u\n", rd); 9959 if (rd == 0) 9960 err += efunc(pc, "cannot write to %r0\n"); 9961 break; 9962 case DIF_OP_LDGA: 9963 case DIF_OP_LDTA: 9964 if (r1 > DIF_VAR_ARRAY_MAX) 9965 err += efunc(pc, "invalid array %u\n", r1); 9966 if (r2 >= nregs) 9967 err += efunc(pc, "invalid register %u\n", r2); 9968 if (rd >= nregs) 9969 err += efunc(pc, "invalid register %u\n", rd); 9970 if (rd == 0) 9971 err += efunc(pc, "cannot write to %r0\n"); 9972 break; 9973 case DIF_OP_LDGS: 9974 case DIF_OP_LDTS: 9975 case DIF_OP_LDLS: 9976 case DIF_OP_LDGAA: 9977 case DIF_OP_LDTAA: 9978 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 9979 err += efunc(pc, "invalid variable %u\n", v); 9980 if (rd >= nregs) 9981 err += efunc(pc, "invalid register %u\n", rd); 9982 if (rd == 0) 9983 err += efunc(pc, "cannot write to %r0\n"); 9984 break; 9985 case DIF_OP_STGS: 9986 case DIF_OP_STTS: 9987 case DIF_OP_STLS: 9988 case DIF_OP_STGAA: 9989 case DIF_OP_STTAA: 9990 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 9991 err += efunc(pc, "invalid variable %u\n", v); 9992 if (rs >= nregs) 9993 err += efunc(pc, "invalid register %u\n", rd); 9994 break; 9995 case DIF_OP_CALL: 9996 if (subr > DIF_SUBR_MAX) 9997 err += efunc(pc, "invalid subr %u\n", subr); 9998 if (rd >= nregs) 9999 err += efunc(pc, "invalid register %u\n", rd); 10000 if (rd == 0) 10001 err += efunc(pc, "cannot write to %r0\n"); 10002 10003 if (subr == DIF_SUBR_COPYOUT || 10004 subr == DIF_SUBR_COPYOUTSTR) { 10005 dp->dtdo_destructive = 1; 10006 } 10007 10008 if (subr == DIF_SUBR_GETF) { 10009 /* 10010 * If we have a getf() we need to record that 10011 * in our state. Note that our state can be 10012 * NULL if this is a helper -- but in that 10013 * case, the call to getf() is itself illegal, 10014 * and will be caught (slightly later) when 10015 * the helper is validated. 10016 */ 10017 if (vstate->dtvs_state != NULL) 10018 vstate->dtvs_state->dts_getf++; 10019 } 10020 10021 break; 10022 case DIF_OP_PUSHTR: 10023 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 10024 err += efunc(pc, "invalid ref type %u\n", type); 10025 if (r2 >= nregs) 10026 err += efunc(pc, "invalid register %u\n", r2); 10027 if (rs >= nregs) 10028 err += efunc(pc, "invalid register %u\n", rs); 10029 break; 10030 case DIF_OP_PUSHTV: 10031 if (type != DIF_TYPE_CTF) 10032 err += efunc(pc, "invalid val type %u\n", type); 10033 if (r2 >= nregs) 10034 err += efunc(pc, "invalid register %u\n", r2); 10035 if (rs >= nregs) 10036 err += efunc(pc, "invalid register %u\n", rs); 10037 break; 10038 default: 10039 err += efunc(pc, "invalid opcode %u\n", 10040 DIF_INSTR_OP(instr)); 10041 } 10042 } 10043 10044 if (dp->dtdo_len != 0 && 10045 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 10046 err += efunc(dp->dtdo_len - 1, 10047 "expected 'ret' as last DIF instruction\n"); 10048 } 10049 10050 if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) { 10051 /* 10052 * If we're not returning by reference, the size must be either 10053 * 0 or the size of one of the base types. 10054 */ 10055 switch (dp->dtdo_rtype.dtdt_size) { 10056 case 0: 10057 case sizeof (uint8_t): 10058 case sizeof (uint16_t): 10059 case sizeof (uint32_t): 10060 case sizeof (uint64_t): 10061 break; 10062 10063 default: 10064 err += efunc(dp->dtdo_len - 1, "bad return size\n"); 10065 } 10066 } 10067 10068 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 10069 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 10070 dtrace_diftype_t *vt, *et; 10071 uint_t id, ndx; 10072 10073 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 10074 v->dtdv_scope != DIFV_SCOPE_THREAD && 10075 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 10076 err += efunc(i, "unrecognized variable scope %d\n", 10077 v->dtdv_scope); 10078 break; 10079 } 10080 10081 if (v->dtdv_kind != DIFV_KIND_ARRAY && 10082 v->dtdv_kind != DIFV_KIND_SCALAR) { 10083 err += efunc(i, "unrecognized variable type %d\n", 10084 v->dtdv_kind); 10085 break; 10086 } 10087 10088 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 10089 err += efunc(i, "%d exceeds variable id limit\n", id); 10090 break; 10091 } 10092 10093 if (id < DIF_VAR_OTHER_UBASE) 10094 continue; 10095 10096 /* 10097 * For user-defined variables, we need to check that this 10098 * definition is identical to any previous definition that we 10099 * encountered. 10100 */ 10101 ndx = id - DIF_VAR_OTHER_UBASE; 10102 10103 switch (v->dtdv_scope) { 10104 case DIFV_SCOPE_GLOBAL: 10105 if (maxglobal == -1 || ndx > maxglobal) 10106 maxglobal = ndx; 10107 10108 if (ndx < vstate->dtvs_nglobals) { 10109 dtrace_statvar_t *svar; 10110 10111 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 10112 existing = &svar->dtsv_var; 10113 } 10114 10115 break; 10116 10117 case DIFV_SCOPE_THREAD: 10118 if (maxtlocal == -1 || ndx > maxtlocal) 10119 maxtlocal = ndx; 10120 10121 if (ndx < vstate->dtvs_ntlocals) 10122 existing = &vstate->dtvs_tlocals[ndx]; 10123 break; 10124 10125 case DIFV_SCOPE_LOCAL: 10126 if (maxlocal == -1 || ndx > maxlocal) 10127 maxlocal = ndx; 10128 10129 if (ndx < vstate->dtvs_nlocals) { 10130 dtrace_statvar_t *svar; 10131 10132 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 10133 existing = &svar->dtsv_var; 10134 } 10135 10136 break; 10137 } 10138 10139 vt = &v->dtdv_type; 10140 10141 if (vt->dtdt_flags & DIF_TF_BYREF) { 10142 if (vt->dtdt_size == 0) { 10143 err += efunc(i, "zero-sized variable\n"); 10144 break; 10145 } 10146 10147 if ((v->dtdv_scope == DIFV_SCOPE_GLOBAL || 10148 v->dtdv_scope == DIFV_SCOPE_LOCAL) && 10149 vt->dtdt_size > dtrace_statvar_maxsize) { 10150 err += efunc(i, "oversized by-ref static\n"); 10151 break; 10152 } 10153 } 10154 10155 if (existing == NULL || existing->dtdv_id == 0) 10156 continue; 10157 10158 ASSERT(existing->dtdv_id == v->dtdv_id); 10159 ASSERT(existing->dtdv_scope == v->dtdv_scope); 10160 10161 if (existing->dtdv_kind != v->dtdv_kind) 10162 err += efunc(i, "%d changed variable kind\n", id); 10163 10164 et = &existing->dtdv_type; 10165 10166 if (vt->dtdt_flags != et->dtdt_flags) { 10167 err += efunc(i, "%d changed variable type flags\n", id); 10168 break; 10169 } 10170 10171 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 10172 err += efunc(i, "%d changed variable type size\n", id); 10173 break; 10174 } 10175 } 10176 10177 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 10178 dif_instr_t instr = dp->dtdo_buf[pc]; 10179 10180 uint_t v = DIF_INSTR_VAR(instr); 10181 uint_t op = DIF_INSTR_OP(instr); 10182 10183 switch (op) { 10184 case DIF_OP_LDGS: 10185 case DIF_OP_LDGAA: 10186 case DIF_OP_STGS: 10187 case DIF_OP_STGAA: 10188 if (v > DIF_VAR_OTHER_UBASE + maxglobal) 10189 err += efunc(pc, "invalid variable %u\n", v); 10190 break; 10191 case DIF_OP_LDTS: 10192 case DIF_OP_LDTAA: 10193 case DIF_OP_STTS: 10194 case DIF_OP_STTAA: 10195 if (v > DIF_VAR_OTHER_UBASE + maxtlocal) 10196 err += efunc(pc, "invalid variable %u\n", v); 10197 break; 10198 case DIF_OP_LDLS: 10199 case DIF_OP_STLS: 10200 if (v > DIF_VAR_OTHER_UBASE + maxlocal) 10201 err += efunc(pc, "invalid variable %u\n", v); 10202 break; 10203 default: 10204 break; 10205 } 10206 } 10207 10208 return (err); 10209 } 10210 10211 /* 10212 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 10213 * are much more constrained than normal DIFOs. Specifically, they may 10214 * not: 10215 * 10216 * 1. Make calls to subroutines other than copyin(), copyinstr() or 10217 * miscellaneous string routines 10218 * 2. Access DTrace variables other than the args[] array, and the 10219 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 10220 * 3. Have thread-local variables. 10221 * 4. Have dynamic variables. 10222 */ 10223 static int 10224 dtrace_difo_validate_helper(dtrace_difo_t *dp) 10225 { 10226 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 10227 int err = 0; 10228 uint_t pc; 10229 10230 for (pc = 0; pc < dp->dtdo_len; pc++) { 10231 dif_instr_t instr = dp->dtdo_buf[pc]; 10232 10233 uint_t v = DIF_INSTR_VAR(instr); 10234 uint_t subr = DIF_INSTR_SUBR(instr); 10235 uint_t op = DIF_INSTR_OP(instr); 10236 10237 switch (op) { 10238 case DIF_OP_OR: 10239 case DIF_OP_XOR: 10240 case DIF_OP_AND: 10241 case DIF_OP_SLL: 10242 case DIF_OP_SRL: 10243 case DIF_OP_SRA: 10244 case DIF_OP_SUB: 10245 case DIF_OP_ADD: 10246 case DIF_OP_MUL: 10247 case DIF_OP_SDIV: 10248 case DIF_OP_UDIV: 10249 case DIF_OP_SREM: 10250 case DIF_OP_UREM: 10251 case DIF_OP_COPYS: 10252 case DIF_OP_NOT: 10253 case DIF_OP_MOV: 10254 case DIF_OP_RLDSB: 10255 case DIF_OP_RLDSH: 10256 case DIF_OP_RLDSW: 10257 case DIF_OP_RLDUB: 10258 case DIF_OP_RLDUH: 10259 case DIF_OP_RLDUW: 10260 case DIF_OP_RLDX: 10261 case DIF_OP_ULDSB: 10262 case DIF_OP_ULDSH: 10263 case DIF_OP_ULDSW: 10264 case DIF_OP_ULDUB: 10265 case DIF_OP_ULDUH: 10266 case DIF_OP_ULDUW: 10267 case DIF_OP_ULDX: 10268 case DIF_OP_STB: 10269 case DIF_OP_STH: 10270 case DIF_OP_STW: 10271 case DIF_OP_STX: 10272 case DIF_OP_ALLOCS: 10273 case DIF_OP_CMP: 10274 case DIF_OP_SCMP: 10275 case DIF_OP_TST: 10276 case DIF_OP_BA: 10277 case DIF_OP_BE: 10278 case DIF_OP_BNE: 10279 case DIF_OP_BG: 10280 case DIF_OP_BGU: 10281 case DIF_OP_BGE: 10282 case DIF_OP_BGEU: 10283 case DIF_OP_BL: 10284 case DIF_OP_BLU: 10285 case DIF_OP_BLE: 10286 case DIF_OP_BLEU: 10287 case DIF_OP_RET: 10288 case DIF_OP_NOP: 10289 case DIF_OP_POPTS: 10290 case DIF_OP_FLUSHTS: 10291 case DIF_OP_SETX: 10292 case DIF_OP_SETS: 10293 case DIF_OP_LDGA: 10294 case DIF_OP_LDLS: 10295 case DIF_OP_STGS: 10296 case DIF_OP_STLS: 10297 case DIF_OP_PUSHTR: 10298 case DIF_OP_PUSHTV: 10299 break; 10300 10301 case DIF_OP_LDGS: 10302 if (v >= DIF_VAR_OTHER_UBASE) 10303 break; 10304 10305 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 10306 break; 10307 10308 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 10309 v == DIF_VAR_PPID || v == DIF_VAR_TID || 10310 v == DIF_VAR_EXECARGS || 10311 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 10312 v == DIF_VAR_UID || v == DIF_VAR_GID) 10313 break; 10314 10315 err += efunc(pc, "illegal variable %u\n", v); 10316 break; 10317 10318 case DIF_OP_LDTA: 10319 case DIF_OP_LDTS: 10320 case DIF_OP_LDGAA: 10321 case DIF_OP_LDTAA: 10322 err += efunc(pc, "illegal dynamic variable load\n"); 10323 break; 10324 10325 case DIF_OP_STTS: 10326 case DIF_OP_STGAA: 10327 case DIF_OP_STTAA: 10328 err += efunc(pc, "illegal dynamic variable store\n"); 10329 break; 10330 10331 case DIF_OP_CALL: 10332 if (subr == DIF_SUBR_ALLOCA || 10333 subr == DIF_SUBR_BCOPY || 10334 subr == DIF_SUBR_COPYIN || 10335 subr == DIF_SUBR_COPYINTO || 10336 subr == DIF_SUBR_COPYINSTR || 10337 subr == DIF_SUBR_INDEX || 10338 subr == DIF_SUBR_INET_NTOA || 10339 subr == DIF_SUBR_INET_NTOA6 || 10340 subr == DIF_SUBR_INET_NTOP || 10341 subr == DIF_SUBR_JSON || 10342 subr == DIF_SUBR_LLTOSTR || 10343 subr == DIF_SUBR_STRTOLL || 10344 subr == DIF_SUBR_RINDEX || 10345 subr == DIF_SUBR_STRCHR || 10346 subr == DIF_SUBR_STRJOIN || 10347 subr == DIF_SUBR_STRRCHR || 10348 subr == DIF_SUBR_STRSTR || 10349 subr == DIF_SUBR_HTONS || 10350 subr == DIF_SUBR_HTONL || 10351 subr == DIF_SUBR_HTONLL || 10352 subr == DIF_SUBR_NTOHS || 10353 subr == DIF_SUBR_NTOHL || 10354 subr == DIF_SUBR_NTOHLL || 10355 subr == DIF_SUBR_MEMREF) 10356 break; 10357 #ifdef __FreeBSD__ 10358 if (subr == DIF_SUBR_MEMSTR) 10359 break; 10360 #endif 10361 10362 err += efunc(pc, "invalid subr %u\n", subr); 10363 break; 10364 10365 default: 10366 err += efunc(pc, "invalid opcode %u\n", 10367 DIF_INSTR_OP(instr)); 10368 } 10369 } 10370 10371 return (err); 10372 } 10373 10374 /* 10375 * Returns 1 if the expression in the DIF object can be cached on a per-thread 10376 * basis; 0 if not. 10377 */ 10378 static int 10379 dtrace_difo_cacheable(dtrace_difo_t *dp) 10380 { 10381 int i; 10382 10383 if (dp == NULL) 10384 return (0); 10385 10386 for (i = 0; i < dp->dtdo_varlen; i++) { 10387 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10388 10389 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 10390 continue; 10391 10392 switch (v->dtdv_id) { 10393 case DIF_VAR_CURTHREAD: 10394 case DIF_VAR_PID: 10395 case DIF_VAR_TID: 10396 case DIF_VAR_EXECARGS: 10397 case DIF_VAR_EXECNAME: 10398 case DIF_VAR_ZONENAME: 10399 break; 10400 10401 default: 10402 return (0); 10403 } 10404 } 10405 10406 /* 10407 * This DIF object may be cacheable. Now we need to look for any 10408 * array loading instructions, any memory loading instructions, or 10409 * any stores to thread-local variables. 10410 */ 10411 for (i = 0; i < dp->dtdo_len; i++) { 10412 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 10413 10414 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 10415 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 10416 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 10417 op == DIF_OP_LDGA || op == DIF_OP_STTS) 10418 return (0); 10419 } 10420 10421 return (1); 10422 } 10423 10424 static void 10425 dtrace_difo_hold(dtrace_difo_t *dp) 10426 { 10427 int i; 10428 10429 ASSERT(MUTEX_HELD(&dtrace_lock)); 10430 10431 dp->dtdo_refcnt++; 10432 ASSERT(dp->dtdo_refcnt != 0); 10433 10434 /* 10435 * We need to check this DIF object for references to the variable 10436 * DIF_VAR_VTIMESTAMP. 10437 */ 10438 for (i = 0; i < dp->dtdo_varlen; i++) { 10439 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10440 10441 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 10442 continue; 10443 10444 if (dtrace_vtime_references++ == 0) 10445 dtrace_vtime_enable(); 10446 } 10447 } 10448 10449 /* 10450 * This routine calculates the dynamic variable chunksize for a given DIF 10451 * object. The calculation is not fool-proof, and can probably be tricked by 10452 * malicious DIF -- but it works for all compiler-generated DIF. Because this 10453 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 10454 * if a dynamic variable size exceeds the chunksize. 10455 */ 10456 static void 10457 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10458 { 10459 uint64_t sval = 0; 10460 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 10461 const dif_instr_t *text = dp->dtdo_buf; 10462 uint_t pc, srd = 0; 10463 uint_t ttop = 0; 10464 size_t size, ksize; 10465 uint_t id, i; 10466 10467 for (pc = 0; pc < dp->dtdo_len; pc++) { 10468 dif_instr_t instr = text[pc]; 10469 uint_t op = DIF_INSTR_OP(instr); 10470 uint_t rd = DIF_INSTR_RD(instr); 10471 uint_t r1 = DIF_INSTR_R1(instr); 10472 uint_t nkeys = 0; 10473 uchar_t scope = 0; 10474 10475 dtrace_key_t *key = tupregs; 10476 10477 switch (op) { 10478 case DIF_OP_SETX: 10479 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 10480 srd = rd; 10481 continue; 10482 10483 case DIF_OP_STTS: 10484 key = &tupregs[DIF_DTR_NREGS]; 10485 key[0].dttk_size = 0; 10486 key[1].dttk_size = 0; 10487 nkeys = 2; 10488 scope = DIFV_SCOPE_THREAD; 10489 break; 10490 10491 case DIF_OP_STGAA: 10492 case DIF_OP_STTAA: 10493 nkeys = ttop; 10494 10495 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 10496 key[nkeys++].dttk_size = 0; 10497 10498 key[nkeys++].dttk_size = 0; 10499 10500 if (op == DIF_OP_STTAA) { 10501 scope = DIFV_SCOPE_THREAD; 10502 } else { 10503 scope = DIFV_SCOPE_GLOBAL; 10504 } 10505 10506 break; 10507 10508 case DIF_OP_PUSHTR: 10509 if (ttop == DIF_DTR_NREGS) 10510 return; 10511 10512 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 10513 /* 10514 * If the register for the size of the "pushtr" 10515 * is %r0 (or the value is 0) and the type is 10516 * a string, we'll use the system-wide default 10517 * string size. 10518 */ 10519 tupregs[ttop++].dttk_size = 10520 dtrace_strsize_default; 10521 } else { 10522 if (srd == 0) 10523 return; 10524 10525 if (sval > LONG_MAX) 10526 return; 10527 10528 tupregs[ttop++].dttk_size = sval; 10529 } 10530 10531 break; 10532 10533 case DIF_OP_PUSHTV: 10534 if (ttop == DIF_DTR_NREGS) 10535 return; 10536 10537 tupregs[ttop++].dttk_size = 0; 10538 break; 10539 10540 case DIF_OP_FLUSHTS: 10541 ttop = 0; 10542 break; 10543 10544 case DIF_OP_POPTS: 10545 if (ttop != 0) 10546 ttop--; 10547 break; 10548 } 10549 10550 sval = 0; 10551 srd = 0; 10552 10553 if (nkeys == 0) 10554 continue; 10555 10556 /* 10557 * We have a dynamic variable allocation; calculate its size. 10558 */ 10559 for (ksize = 0, i = 0; i < nkeys; i++) 10560 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 10561 10562 size = sizeof (dtrace_dynvar_t); 10563 size += sizeof (dtrace_key_t) * (nkeys - 1); 10564 size += ksize; 10565 10566 /* 10567 * Now we need to determine the size of the stored data. 10568 */ 10569 id = DIF_INSTR_VAR(instr); 10570 10571 for (i = 0; i < dp->dtdo_varlen; i++) { 10572 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10573 10574 if (v->dtdv_id == id && v->dtdv_scope == scope) { 10575 size += v->dtdv_type.dtdt_size; 10576 break; 10577 } 10578 } 10579 10580 if (i == dp->dtdo_varlen) 10581 return; 10582 10583 /* 10584 * We have the size. If this is larger than the chunk size 10585 * for our dynamic variable state, reset the chunk size. 10586 */ 10587 size = P2ROUNDUP(size, sizeof (uint64_t)); 10588 10589 /* 10590 * Before setting the chunk size, check that we're not going 10591 * to set it to a negative value... 10592 */ 10593 if (size > LONG_MAX) 10594 return; 10595 10596 /* 10597 * ...and make certain that we didn't badly overflow. 10598 */ 10599 if (size < ksize || size < sizeof (dtrace_dynvar_t)) 10600 return; 10601 10602 if (size > vstate->dtvs_dynvars.dtds_chunksize) 10603 vstate->dtvs_dynvars.dtds_chunksize = size; 10604 } 10605 } 10606 10607 static void 10608 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10609 { 10610 int i, oldsvars, osz, nsz, otlocals, ntlocals; 10611 uint_t id; 10612 10613 ASSERT(MUTEX_HELD(&dtrace_lock)); 10614 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 10615 10616 for (i = 0; i < dp->dtdo_varlen; i++) { 10617 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10618 dtrace_statvar_t *svar, ***svarp = NULL; 10619 size_t dsize = 0; 10620 uint8_t scope = v->dtdv_scope; 10621 int *np = NULL; 10622 10623 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 10624 continue; 10625 10626 id -= DIF_VAR_OTHER_UBASE; 10627 10628 switch (scope) { 10629 case DIFV_SCOPE_THREAD: 10630 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 10631 dtrace_difv_t *tlocals; 10632 10633 if ((ntlocals = (otlocals << 1)) == 0) 10634 ntlocals = 1; 10635 10636 osz = otlocals * sizeof (dtrace_difv_t); 10637 nsz = ntlocals * sizeof (dtrace_difv_t); 10638 10639 tlocals = kmem_zalloc(nsz, KM_SLEEP); 10640 10641 if (osz != 0) { 10642 bcopy(vstate->dtvs_tlocals, 10643 tlocals, osz); 10644 kmem_free(vstate->dtvs_tlocals, osz); 10645 } 10646 10647 vstate->dtvs_tlocals = tlocals; 10648 vstate->dtvs_ntlocals = ntlocals; 10649 } 10650 10651 vstate->dtvs_tlocals[id] = *v; 10652 continue; 10653 10654 case DIFV_SCOPE_LOCAL: 10655 np = &vstate->dtvs_nlocals; 10656 svarp = &vstate->dtvs_locals; 10657 10658 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 10659 dsize = NCPU * (v->dtdv_type.dtdt_size + 10660 sizeof (uint64_t)); 10661 else 10662 dsize = NCPU * sizeof (uint64_t); 10663 10664 break; 10665 10666 case DIFV_SCOPE_GLOBAL: 10667 np = &vstate->dtvs_nglobals; 10668 svarp = &vstate->dtvs_globals; 10669 10670 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 10671 dsize = v->dtdv_type.dtdt_size + 10672 sizeof (uint64_t); 10673 10674 break; 10675 10676 default: 10677 ASSERT(0); 10678 } 10679 10680 while (id >= (oldsvars = *np)) { 10681 dtrace_statvar_t **statics; 10682 int newsvars, oldsize, newsize; 10683 10684 if ((newsvars = (oldsvars << 1)) == 0) 10685 newsvars = 1; 10686 10687 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 10688 newsize = newsvars * sizeof (dtrace_statvar_t *); 10689 10690 statics = kmem_zalloc(newsize, KM_SLEEP); 10691 10692 if (oldsize != 0) { 10693 bcopy(*svarp, statics, oldsize); 10694 kmem_free(*svarp, oldsize); 10695 } 10696 10697 *svarp = statics; 10698 *np = newsvars; 10699 } 10700 10701 if ((svar = (*svarp)[id]) == NULL) { 10702 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 10703 svar->dtsv_var = *v; 10704 10705 if ((svar->dtsv_size = dsize) != 0) { 10706 svar->dtsv_data = (uint64_t)(uintptr_t) 10707 kmem_zalloc(dsize, KM_SLEEP); 10708 } 10709 10710 (*svarp)[id] = svar; 10711 } 10712 10713 svar->dtsv_refcnt++; 10714 } 10715 10716 dtrace_difo_chunksize(dp, vstate); 10717 dtrace_difo_hold(dp); 10718 } 10719 10720 static dtrace_difo_t * 10721 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10722 { 10723 dtrace_difo_t *new; 10724 size_t sz; 10725 10726 ASSERT(dp->dtdo_buf != NULL); 10727 ASSERT(dp->dtdo_refcnt != 0); 10728 10729 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 10730 10731 ASSERT(dp->dtdo_buf != NULL); 10732 sz = dp->dtdo_len * sizeof (dif_instr_t); 10733 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 10734 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 10735 new->dtdo_len = dp->dtdo_len; 10736 10737 if (dp->dtdo_strtab != NULL) { 10738 ASSERT(dp->dtdo_strlen != 0); 10739 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 10740 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 10741 new->dtdo_strlen = dp->dtdo_strlen; 10742 } 10743 10744 if (dp->dtdo_inttab != NULL) { 10745 ASSERT(dp->dtdo_intlen != 0); 10746 sz = dp->dtdo_intlen * sizeof (uint64_t); 10747 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 10748 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 10749 new->dtdo_intlen = dp->dtdo_intlen; 10750 } 10751 10752 if (dp->dtdo_vartab != NULL) { 10753 ASSERT(dp->dtdo_varlen != 0); 10754 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 10755 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 10756 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 10757 new->dtdo_varlen = dp->dtdo_varlen; 10758 } 10759 10760 dtrace_difo_init(new, vstate); 10761 return (new); 10762 } 10763 10764 static void 10765 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10766 { 10767 int i; 10768 10769 ASSERT(dp->dtdo_refcnt == 0); 10770 10771 for (i = 0; i < dp->dtdo_varlen; i++) { 10772 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10773 dtrace_statvar_t *svar, **svarp = NULL; 10774 uint_t id; 10775 uint8_t scope = v->dtdv_scope; 10776 int *np = NULL; 10777 10778 switch (scope) { 10779 case DIFV_SCOPE_THREAD: 10780 continue; 10781 10782 case DIFV_SCOPE_LOCAL: 10783 np = &vstate->dtvs_nlocals; 10784 svarp = vstate->dtvs_locals; 10785 break; 10786 10787 case DIFV_SCOPE_GLOBAL: 10788 np = &vstate->dtvs_nglobals; 10789 svarp = vstate->dtvs_globals; 10790 break; 10791 10792 default: 10793 ASSERT(0); 10794 } 10795 10796 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 10797 continue; 10798 10799 id -= DIF_VAR_OTHER_UBASE; 10800 ASSERT(id < *np); 10801 10802 svar = svarp[id]; 10803 ASSERT(svar != NULL); 10804 ASSERT(svar->dtsv_refcnt > 0); 10805 10806 if (--svar->dtsv_refcnt > 0) 10807 continue; 10808 10809 if (svar->dtsv_size != 0) { 10810 ASSERT(svar->dtsv_data != 0); 10811 kmem_free((void *)(uintptr_t)svar->dtsv_data, 10812 svar->dtsv_size); 10813 } 10814 10815 kmem_free(svar, sizeof (dtrace_statvar_t)); 10816 svarp[id] = NULL; 10817 } 10818 10819 if (dp->dtdo_buf != NULL) 10820 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 10821 if (dp->dtdo_inttab != NULL) 10822 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 10823 if (dp->dtdo_strtab != NULL) 10824 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 10825 if (dp->dtdo_vartab != NULL) 10826 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 10827 10828 kmem_free(dp, sizeof (dtrace_difo_t)); 10829 } 10830 10831 static void 10832 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10833 { 10834 int i; 10835 10836 ASSERT(MUTEX_HELD(&dtrace_lock)); 10837 ASSERT(dp->dtdo_refcnt != 0); 10838 10839 for (i = 0; i < dp->dtdo_varlen; i++) { 10840 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10841 10842 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 10843 continue; 10844 10845 ASSERT(dtrace_vtime_references > 0); 10846 if (--dtrace_vtime_references == 0) 10847 dtrace_vtime_disable(); 10848 } 10849 10850 if (--dp->dtdo_refcnt == 0) 10851 dtrace_difo_destroy(dp, vstate); 10852 } 10853 10854 /* 10855 * DTrace Format Functions 10856 */ 10857 static uint16_t 10858 dtrace_format_add(dtrace_state_t *state, char *str) 10859 { 10860 char *fmt, **new; 10861 uint16_t ndx, len = strlen(str) + 1; 10862 10863 fmt = kmem_zalloc(len, KM_SLEEP); 10864 bcopy(str, fmt, len); 10865 10866 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 10867 if (state->dts_formats[ndx] == NULL) { 10868 state->dts_formats[ndx] = fmt; 10869 return (ndx + 1); 10870 } 10871 } 10872 10873 if (state->dts_nformats == USHRT_MAX) { 10874 /* 10875 * This is only likely if a denial-of-service attack is being 10876 * attempted. As such, it's okay to fail silently here. 10877 */ 10878 kmem_free(fmt, len); 10879 return (0); 10880 } 10881 10882 /* 10883 * For simplicity, we always resize the formats array to be exactly the 10884 * number of formats. 10885 */ 10886 ndx = state->dts_nformats++; 10887 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 10888 10889 if (state->dts_formats != NULL) { 10890 ASSERT(ndx != 0); 10891 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 10892 kmem_free(state->dts_formats, ndx * sizeof (char *)); 10893 } 10894 10895 state->dts_formats = new; 10896 state->dts_formats[ndx] = fmt; 10897 10898 return (ndx + 1); 10899 } 10900 10901 static void 10902 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 10903 { 10904 char *fmt; 10905 10906 ASSERT(state->dts_formats != NULL); 10907 ASSERT(format <= state->dts_nformats); 10908 ASSERT(state->dts_formats[format - 1] != NULL); 10909 10910 fmt = state->dts_formats[format - 1]; 10911 kmem_free(fmt, strlen(fmt) + 1); 10912 state->dts_formats[format - 1] = NULL; 10913 } 10914 10915 static void 10916 dtrace_format_destroy(dtrace_state_t *state) 10917 { 10918 int i; 10919 10920 if (state->dts_nformats == 0) { 10921 ASSERT(state->dts_formats == NULL); 10922 return; 10923 } 10924 10925 ASSERT(state->dts_formats != NULL); 10926 10927 for (i = 0; i < state->dts_nformats; i++) { 10928 char *fmt = state->dts_formats[i]; 10929 10930 if (fmt == NULL) 10931 continue; 10932 10933 kmem_free(fmt, strlen(fmt) + 1); 10934 } 10935 10936 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 10937 state->dts_nformats = 0; 10938 state->dts_formats = NULL; 10939 } 10940 10941 /* 10942 * DTrace Predicate Functions 10943 */ 10944 static dtrace_predicate_t * 10945 dtrace_predicate_create(dtrace_difo_t *dp) 10946 { 10947 dtrace_predicate_t *pred; 10948 10949 ASSERT(MUTEX_HELD(&dtrace_lock)); 10950 ASSERT(dp->dtdo_refcnt != 0); 10951 10952 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 10953 pred->dtp_difo = dp; 10954 pred->dtp_refcnt = 1; 10955 10956 if (!dtrace_difo_cacheable(dp)) 10957 return (pred); 10958 10959 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 10960 /* 10961 * This is only theoretically possible -- we have had 2^32 10962 * cacheable predicates on this machine. We cannot allow any 10963 * more predicates to become cacheable: as unlikely as it is, 10964 * there may be a thread caching a (now stale) predicate cache 10965 * ID. (N.B.: the temptation is being successfully resisted to 10966 * have this cmn_err() "Holy shit -- we executed this code!") 10967 */ 10968 return (pred); 10969 } 10970 10971 pred->dtp_cacheid = dtrace_predcache_id++; 10972 10973 return (pred); 10974 } 10975 10976 static void 10977 dtrace_predicate_hold(dtrace_predicate_t *pred) 10978 { 10979 ASSERT(MUTEX_HELD(&dtrace_lock)); 10980 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 10981 ASSERT(pred->dtp_refcnt > 0); 10982 10983 pred->dtp_refcnt++; 10984 } 10985 10986 static void 10987 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 10988 { 10989 dtrace_difo_t *dp = pred->dtp_difo; 10990 10991 ASSERT(MUTEX_HELD(&dtrace_lock)); 10992 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 10993 ASSERT(pred->dtp_refcnt > 0); 10994 10995 if (--pred->dtp_refcnt == 0) { 10996 dtrace_difo_release(pred->dtp_difo, vstate); 10997 kmem_free(pred, sizeof (dtrace_predicate_t)); 10998 } 10999 } 11000 11001 /* 11002 * DTrace Action Description Functions 11003 */ 11004 static dtrace_actdesc_t * 11005 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 11006 uint64_t uarg, uint64_t arg) 11007 { 11008 dtrace_actdesc_t *act; 11009 11010 #ifdef illumos 11011 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 11012 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 11013 #endif 11014 11015 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 11016 act->dtad_kind = kind; 11017 act->dtad_ntuple = ntuple; 11018 act->dtad_uarg = uarg; 11019 act->dtad_arg = arg; 11020 act->dtad_refcnt = 1; 11021 11022 return (act); 11023 } 11024 11025 static void 11026 dtrace_actdesc_hold(dtrace_actdesc_t *act) 11027 { 11028 ASSERT(act->dtad_refcnt >= 1); 11029 act->dtad_refcnt++; 11030 } 11031 11032 static void 11033 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 11034 { 11035 dtrace_actkind_t kind = act->dtad_kind; 11036 dtrace_difo_t *dp; 11037 11038 ASSERT(act->dtad_refcnt >= 1); 11039 11040 if (--act->dtad_refcnt != 0) 11041 return; 11042 11043 if ((dp = act->dtad_difo) != NULL) 11044 dtrace_difo_release(dp, vstate); 11045 11046 if (DTRACEACT_ISPRINTFLIKE(kind)) { 11047 char *str = (char *)(uintptr_t)act->dtad_arg; 11048 11049 #ifdef illumos 11050 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 11051 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 11052 #endif 11053 11054 if (str != NULL) 11055 kmem_free(str, strlen(str) + 1); 11056 } 11057 11058 kmem_free(act, sizeof (dtrace_actdesc_t)); 11059 } 11060 11061 /* 11062 * DTrace ECB Functions 11063 */ 11064 static dtrace_ecb_t * 11065 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 11066 { 11067 dtrace_ecb_t *ecb; 11068 dtrace_epid_t epid; 11069 11070 ASSERT(MUTEX_HELD(&dtrace_lock)); 11071 11072 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 11073 ecb->dte_predicate = NULL; 11074 ecb->dte_probe = probe; 11075 11076 /* 11077 * The default size is the size of the default action: recording 11078 * the header. 11079 */ 11080 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t); 11081 ecb->dte_alignment = sizeof (dtrace_epid_t); 11082 11083 epid = state->dts_epid++; 11084 11085 if (epid - 1 >= state->dts_necbs) { 11086 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 11087 int necbs = state->dts_necbs << 1; 11088 11089 ASSERT(epid == state->dts_necbs + 1); 11090 11091 if (necbs == 0) { 11092 ASSERT(oecbs == NULL); 11093 necbs = 1; 11094 } 11095 11096 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 11097 11098 if (oecbs != NULL) 11099 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 11100 11101 dtrace_membar_producer(); 11102 state->dts_ecbs = ecbs; 11103 11104 if (oecbs != NULL) { 11105 /* 11106 * If this state is active, we must dtrace_sync() 11107 * before we can free the old dts_ecbs array: we're 11108 * coming in hot, and there may be active ring 11109 * buffer processing (which indexes into the dts_ecbs 11110 * array) on another CPU. 11111 */ 11112 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 11113 dtrace_sync(); 11114 11115 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 11116 } 11117 11118 dtrace_membar_producer(); 11119 state->dts_necbs = necbs; 11120 } 11121 11122 ecb->dte_state = state; 11123 11124 ASSERT(state->dts_ecbs[epid - 1] == NULL); 11125 dtrace_membar_producer(); 11126 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 11127 11128 return (ecb); 11129 } 11130 11131 static void 11132 dtrace_ecb_enable(dtrace_ecb_t *ecb) 11133 { 11134 dtrace_probe_t *probe = ecb->dte_probe; 11135 11136 ASSERT(MUTEX_HELD(&cpu_lock)); 11137 ASSERT(MUTEX_HELD(&dtrace_lock)); 11138 ASSERT(ecb->dte_next == NULL); 11139 11140 if (probe == NULL) { 11141 /* 11142 * This is the NULL probe -- there's nothing to do. 11143 */ 11144 return; 11145 } 11146 11147 if (probe->dtpr_ecb == NULL) { 11148 dtrace_provider_t *prov = probe->dtpr_provider; 11149 11150 /* 11151 * We're the first ECB on this probe. 11152 */ 11153 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 11154 11155 if (ecb->dte_predicate != NULL) 11156 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 11157 11158 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 11159 probe->dtpr_id, probe->dtpr_arg); 11160 } else { 11161 /* 11162 * This probe is already active. Swing the last pointer to 11163 * point to the new ECB, and issue a dtrace_sync() to assure 11164 * that all CPUs have seen the change. 11165 */ 11166 ASSERT(probe->dtpr_ecb_last != NULL); 11167 probe->dtpr_ecb_last->dte_next = ecb; 11168 probe->dtpr_ecb_last = ecb; 11169 probe->dtpr_predcache = 0; 11170 11171 dtrace_sync(); 11172 } 11173 } 11174 11175 static int 11176 dtrace_ecb_resize(dtrace_ecb_t *ecb) 11177 { 11178 dtrace_action_t *act; 11179 uint32_t curneeded = UINT32_MAX; 11180 uint32_t aggbase = UINT32_MAX; 11181 11182 /* 11183 * If we record anything, we always record the dtrace_rechdr_t. (And 11184 * we always record it first.) 11185 */ 11186 ecb->dte_size = sizeof (dtrace_rechdr_t); 11187 ecb->dte_alignment = sizeof (dtrace_epid_t); 11188 11189 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 11190 dtrace_recdesc_t *rec = &act->dta_rec; 11191 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1); 11192 11193 ecb->dte_alignment = MAX(ecb->dte_alignment, 11194 rec->dtrd_alignment); 11195 11196 if (DTRACEACT_ISAGG(act->dta_kind)) { 11197 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 11198 11199 ASSERT(rec->dtrd_size != 0); 11200 ASSERT(agg->dtag_first != NULL); 11201 ASSERT(act->dta_prev->dta_intuple); 11202 ASSERT(aggbase != UINT32_MAX); 11203 ASSERT(curneeded != UINT32_MAX); 11204 11205 agg->dtag_base = aggbase; 11206 11207 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 11208 rec->dtrd_offset = curneeded; 11209 if (curneeded + rec->dtrd_size < curneeded) 11210 return (EINVAL); 11211 curneeded += rec->dtrd_size; 11212 ecb->dte_needed = MAX(ecb->dte_needed, curneeded); 11213 11214 aggbase = UINT32_MAX; 11215 curneeded = UINT32_MAX; 11216 } else if (act->dta_intuple) { 11217 if (curneeded == UINT32_MAX) { 11218 /* 11219 * This is the first record in a tuple. Align 11220 * curneeded to be at offset 4 in an 8-byte 11221 * aligned block. 11222 */ 11223 ASSERT(act->dta_prev == NULL || 11224 !act->dta_prev->dta_intuple); 11225 ASSERT3U(aggbase, ==, UINT32_MAX); 11226 curneeded = P2PHASEUP(ecb->dte_size, 11227 sizeof (uint64_t), sizeof (dtrace_aggid_t)); 11228 11229 aggbase = curneeded - sizeof (dtrace_aggid_t); 11230 ASSERT(IS_P2ALIGNED(aggbase, 11231 sizeof (uint64_t))); 11232 } 11233 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 11234 rec->dtrd_offset = curneeded; 11235 if (curneeded + rec->dtrd_size < curneeded) 11236 return (EINVAL); 11237 curneeded += rec->dtrd_size; 11238 } else { 11239 /* tuples must be followed by an aggregation */ 11240 ASSERT(act->dta_prev == NULL || 11241 !act->dta_prev->dta_intuple); 11242 11243 ecb->dte_size = P2ROUNDUP(ecb->dte_size, 11244 rec->dtrd_alignment); 11245 rec->dtrd_offset = ecb->dte_size; 11246 if (ecb->dte_size + rec->dtrd_size < ecb->dte_size) 11247 return (EINVAL); 11248 ecb->dte_size += rec->dtrd_size; 11249 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size); 11250 } 11251 } 11252 11253 if ((act = ecb->dte_action) != NULL && 11254 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 11255 ecb->dte_size == sizeof (dtrace_rechdr_t)) { 11256 /* 11257 * If the size is still sizeof (dtrace_rechdr_t), then all 11258 * actions store no data; set the size to 0. 11259 */ 11260 ecb->dte_size = 0; 11261 } 11262 11263 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t)); 11264 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t))); 11265 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed, 11266 ecb->dte_needed); 11267 return (0); 11268 } 11269 11270 static dtrace_action_t * 11271 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 11272 { 11273 dtrace_aggregation_t *agg; 11274 size_t size = sizeof (uint64_t); 11275 int ntuple = desc->dtad_ntuple; 11276 dtrace_action_t *act; 11277 dtrace_recdesc_t *frec; 11278 dtrace_aggid_t aggid; 11279 dtrace_state_t *state = ecb->dte_state; 11280 11281 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 11282 agg->dtag_ecb = ecb; 11283 11284 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 11285 11286 switch (desc->dtad_kind) { 11287 case DTRACEAGG_MIN: 11288 agg->dtag_initial = INT64_MAX; 11289 agg->dtag_aggregate = dtrace_aggregate_min; 11290 break; 11291 11292 case DTRACEAGG_MAX: 11293 agg->dtag_initial = INT64_MIN; 11294 agg->dtag_aggregate = dtrace_aggregate_max; 11295 break; 11296 11297 case DTRACEAGG_COUNT: 11298 agg->dtag_aggregate = dtrace_aggregate_count; 11299 break; 11300 11301 case DTRACEAGG_QUANTIZE: 11302 agg->dtag_aggregate = dtrace_aggregate_quantize; 11303 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 11304 sizeof (uint64_t); 11305 break; 11306 11307 case DTRACEAGG_LQUANTIZE: { 11308 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 11309 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 11310 11311 agg->dtag_initial = desc->dtad_arg; 11312 agg->dtag_aggregate = dtrace_aggregate_lquantize; 11313 11314 if (step == 0 || levels == 0) 11315 goto err; 11316 11317 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 11318 break; 11319 } 11320 11321 case DTRACEAGG_LLQUANTIZE: { 11322 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 11323 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 11324 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 11325 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 11326 int64_t v; 11327 11328 agg->dtag_initial = desc->dtad_arg; 11329 agg->dtag_aggregate = dtrace_aggregate_llquantize; 11330 11331 if (factor < 2 || low >= high || nsteps < factor) 11332 goto err; 11333 11334 /* 11335 * Now check that the number of steps evenly divides a power 11336 * of the factor. (This assures both integer bucket size and 11337 * linearity within each magnitude.) 11338 */ 11339 for (v = factor; v < nsteps; v *= factor) 11340 continue; 11341 11342 if ((v % nsteps) || (nsteps % factor)) 11343 goto err; 11344 11345 size = (dtrace_aggregate_llquantize_bucket(factor, 11346 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 11347 break; 11348 } 11349 11350 case DTRACEAGG_AVG: 11351 agg->dtag_aggregate = dtrace_aggregate_avg; 11352 size = sizeof (uint64_t) * 2; 11353 break; 11354 11355 case DTRACEAGG_STDDEV: 11356 agg->dtag_aggregate = dtrace_aggregate_stddev; 11357 size = sizeof (uint64_t) * 4; 11358 break; 11359 11360 case DTRACEAGG_SUM: 11361 agg->dtag_aggregate = dtrace_aggregate_sum; 11362 break; 11363 11364 default: 11365 goto err; 11366 } 11367 11368 agg->dtag_action.dta_rec.dtrd_size = size; 11369 11370 if (ntuple == 0) 11371 goto err; 11372 11373 /* 11374 * We must make sure that we have enough actions for the n-tuple. 11375 */ 11376 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 11377 if (DTRACEACT_ISAGG(act->dta_kind)) 11378 break; 11379 11380 if (--ntuple == 0) { 11381 /* 11382 * This is the action with which our n-tuple begins. 11383 */ 11384 agg->dtag_first = act; 11385 goto success; 11386 } 11387 } 11388 11389 /* 11390 * This n-tuple is short by ntuple elements. Return failure. 11391 */ 11392 ASSERT(ntuple != 0); 11393 err: 11394 kmem_free(agg, sizeof (dtrace_aggregation_t)); 11395 return (NULL); 11396 11397 success: 11398 /* 11399 * If the last action in the tuple has a size of zero, it's actually 11400 * an expression argument for the aggregating action. 11401 */ 11402 ASSERT(ecb->dte_action_last != NULL); 11403 act = ecb->dte_action_last; 11404 11405 if (act->dta_kind == DTRACEACT_DIFEXPR) { 11406 ASSERT(act->dta_difo != NULL); 11407 11408 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 11409 agg->dtag_hasarg = 1; 11410 } 11411 11412 /* 11413 * We need to allocate an id for this aggregation. 11414 */ 11415 #ifdef illumos 11416 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 11417 VM_BESTFIT | VM_SLEEP); 11418 #else 11419 aggid = alloc_unr(state->dts_aggid_arena); 11420 #endif 11421 11422 if (aggid - 1 >= state->dts_naggregations) { 11423 dtrace_aggregation_t **oaggs = state->dts_aggregations; 11424 dtrace_aggregation_t **aggs; 11425 int naggs = state->dts_naggregations << 1; 11426 int onaggs = state->dts_naggregations; 11427 11428 ASSERT(aggid == state->dts_naggregations + 1); 11429 11430 if (naggs == 0) { 11431 ASSERT(oaggs == NULL); 11432 naggs = 1; 11433 } 11434 11435 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 11436 11437 if (oaggs != NULL) { 11438 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 11439 kmem_free(oaggs, onaggs * sizeof (*aggs)); 11440 } 11441 11442 state->dts_aggregations = aggs; 11443 state->dts_naggregations = naggs; 11444 } 11445 11446 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 11447 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 11448 11449 frec = &agg->dtag_first->dta_rec; 11450 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 11451 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 11452 11453 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 11454 ASSERT(!act->dta_intuple); 11455 act->dta_intuple = 1; 11456 } 11457 11458 return (&agg->dtag_action); 11459 } 11460 11461 static void 11462 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 11463 { 11464 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 11465 dtrace_state_t *state = ecb->dte_state; 11466 dtrace_aggid_t aggid = agg->dtag_id; 11467 11468 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 11469 #ifdef illumos 11470 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 11471 #else 11472 free_unr(state->dts_aggid_arena, aggid); 11473 #endif 11474 11475 ASSERT(state->dts_aggregations[aggid - 1] == agg); 11476 state->dts_aggregations[aggid - 1] = NULL; 11477 11478 kmem_free(agg, sizeof (dtrace_aggregation_t)); 11479 } 11480 11481 static int 11482 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 11483 { 11484 dtrace_action_t *action, *last; 11485 dtrace_difo_t *dp = desc->dtad_difo; 11486 uint32_t size = 0, align = sizeof (uint8_t), mask; 11487 uint16_t format = 0; 11488 dtrace_recdesc_t *rec; 11489 dtrace_state_t *state = ecb->dte_state; 11490 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 11491 uint64_t arg = desc->dtad_arg; 11492 11493 ASSERT(MUTEX_HELD(&dtrace_lock)); 11494 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 11495 11496 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 11497 /* 11498 * If this is an aggregating action, there must be neither 11499 * a speculate nor a commit on the action chain. 11500 */ 11501 dtrace_action_t *act; 11502 11503 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 11504 if (act->dta_kind == DTRACEACT_COMMIT) 11505 return (EINVAL); 11506 11507 if (act->dta_kind == DTRACEACT_SPECULATE) 11508 return (EINVAL); 11509 } 11510 11511 action = dtrace_ecb_aggregation_create(ecb, desc); 11512 11513 if (action == NULL) 11514 return (EINVAL); 11515 } else { 11516 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 11517 (desc->dtad_kind == DTRACEACT_DIFEXPR && 11518 dp != NULL && dp->dtdo_destructive)) { 11519 state->dts_destructive = 1; 11520 } 11521 11522 switch (desc->dtad_kind) { 11523 case DTRACEACT_PRINTF: 11524 case DTRACEACT_PRINTA: 11525 case DTRACEACT_SYSTEM: 11526 case DTRACEACT_FREOPEN: 11527 case DTRACEACT_DIFEXPR: 11528 /* 11529 * We know that our arg is a string -- turn it into a 11530 * format. 11531 */ 11532 if (arg == 0) { 11533 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA || 11534 desc->dtad_kind == DTRACEACT_DIFEXPR); 11535 format = 0; 11536 } else { 11537 ASSERT(arg != 0); 11538 #ifdef illumos 11539 ASSERT(arg > KERNELBASE); 11540 #endif 11541 format = dtrace_format_add(state, 11542 (char *)(uintptr_t)arg); 11543 } 11544 11545 /*FALLTHROUGH*/ 11546 case DTRACEACT_LIBACT: 11547 case DTRACEACT_TRACEMEM: 11548 case DTRACEACT_TRACEMEM_DYNSIZE: 11549 if (dp == NULL) 11550 return (EINVAL); 11551 11552 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 11553 break; 11554 11555 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 11556 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11557 return (EINVAL); 11558 11559 size = opt[DTRACEOPT_STRSIZE]; 11560 } 11561 11562 break; 11563 11564 case DTRACEACT_STACK: 11565 if ((nframes = arg) == 0) { 11566 nframes = opt[DTRACEOPT_STACKFRAMES]; 11567 ASSERT(nframes > 0); 11568 arg = nframes; 11569 } 11570 11571 size = nframes * sizeof (pc_t); 11572 break; 11573 11574 case DTRACEACT_JSTACK: 11575 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 11576 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 11577 11578 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 11579 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 11580 11581 arg = DTRACE_USTACK_ARG(nframes, strsize); 11582 11583 /*FALLTHROUGH*/ 11584 case DTRACEACT_USTACK: 11585 if (desc->dtad_kind != DTRACEACT_JSTACK && 11586 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 11587 strsize = DTRACE_USTACK_STRSIZE(arg); 11588 nframes = opt[DTRACEOPT_USTACKFRAMES]; 11589 ASSERT(nframes > 0); 11590 arg = DTRACE_USTACK_ARG(nframes, strsize); 11591 } 11592 11593 /* 11594 * Save a slot for the pid. 11595 */ 11596 size = (nframes + 1) * sizeof (uint64_t); 11597 size += DTRACE_USTACK_STRSIZE(arg); 11598 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 11599 11600 break; 11601 11602 case DTRACEACT_SYM: 11603 case DTRACEACT_MOD: 11604 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 11605 sizeof (uint64_t)) || 11606 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11607 return (EINVAL); 11608 break; 11609 11610 case DTRACEACT_USYM: 11611 case DTRACEACT_UMOD: 11612 case DTRACEACT_UADDR: 11613 if (dp == NULL || 11614 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 11615 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11616 return (EINVAL); 11617 11618 /* 11619 * We have a slot for the pid, plus a slot for the 11620 * argument. To keep things simple (aligned with 11621 * bitness-neutral sizing), we store each as a 64-bit 11622 * quantity. 11623 */ 11624 size = 2 * sizeof (uint64_t); 11625 break; 11626 11627 case DTRACEACT_STOP: 11628 case DTRACEACT_BREAKPOINT: 11629 case DTRACEACT_PANIC: 11630 break; 11631 11632 case DTRACEACT_CHILL: 11633 case DTRACEACT_DISCARD: 11634 case DTRACEACT_RAISE: 11635 if (dp == NULL) 11636 return (EINVAL); 11637 break; 11638 11639 case DTRACEACT_EXIT: 11640 if (dp == NULL || 11641 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 11642 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11643 return (EINVAL); 11644 break; 11645 11646 case DTRACEACT_SPECULATE: 11647 if (ecb->dte_size > sizeof (dtrace_rechdr_t)) 11648 return (EINVAL); 11649 11650 if (dp == NULL) 11651 return (EINVAL); 11652 11653 state->dts_speculates = 1; 11654 break; 11655 11656 case DTRACEACT_PRINTM: 11657 size = dp->dtdo_rtype.dtdt_size; 11658 break; 11659 11660 case DTRACEACT_COMMIT: { 11661 dtrace_action_t *act = ecb->dte_action; 11662 11663 for (; act != NULL; act = act->dta_next) { 11664 if (act->dta_kind == DTRACEACT_COMMIT) 11665 return (EINVAL); 11666 } 11667 11668 if (dp == NULL) 11669 return (EINVAL); 11670 break; 11671 } 11672 11673 default: 11674 return (EINVAL); 11675 } 11676 11677 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 11678 /* 11679 * If this is a data-storing action or a speculate, 11680 * we must be sure that there isn't a commit on the 11681 * action chain. 11682 */ 11683 dtrace_action_t *act = ecb->dte_action; 11684 11685 for (; act != NULL; act = act->dta_next) { 11686 if (act->dta_kind == DTRACEACT_COMMIT) 11687 return (EINVAL); 11688 } 11689 } 11690 11691 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 11692 action->dta_rec.dtrd_size = size; 11693 } 11694 11695 action->dta_refcnt = 1; 11696 rec = &action->dta_rec; 11697 size = rec->dtrd_size; 11698 11699 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 11700 if (!(size & mask)) { 11701 align = mask + 1; 11702 break; 11703 } 11704 } 11705 11706 action->dta_kind = desc->dtad_kind; 11707 11708 if ((action->dta_difo = dp) != NULL) 11709 dtrace_difo_hold(dp); 11710 11711 rec->dtrd_action = action->dta_kind; 11712 rec->dtrd_arg = arg; 11713 rec->dtrd_uarg = desc->dtad_uarg; 11714 rec->dtrd_alignment = (uint16_t)align; 11715 rec->dtrd_format = format; 11716 11717 if ((last = ecb->dte_action_last) != NULL) { 11718 ASSERT(ecb->dte_action != NULL); 11719 action->dta_prev = last; 11720 last->dta_next = action; 11721 } else { 11722 ASSERT(ecb->dte_action == NULL); 11723 ecb->dte_action = action; 11724 } 11725 11726 ecb->dte_action_last = action; 11727 11728 return (0); 11729 } 11730 11731 static void 11732 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 11733 { 11734 dtrace_action_t *act = ecb->dte_action, *next; 11735 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 11736 dtrace_difo_t *dp; 11737 uint16_t format; 11738 11739 if (act != NULL && act->dta_refcnt > 1) { 11740 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 11741 act->dta_refcnt--; 11742 } else { 11743 for (; act != NULL; act = next) { 11744 next = act->dta_next; 11745 ASSERT(next != NULL || act == ecb->dte_action_last); 11746 ASSERT(act->dta_refcnt == 1); 11747 11748 if ((format = act->dta_rec.dtrd_format) != 0) 11749 dtrace_format_remove(ecb->dte_state, format); 11750 11751 if ((dp = act->dta_difo) != NULL) 11752 dtrace_difo_release(dp, vstate); 11753 11754 if (DTRACEACT_ISAGG(act->dta_kind)) { 11755 dtrace_ecb_aggregation_destroy(ecb, act); 11756 } else { 11757 kmem_free(act, sizeof (dtrace_action_t)); 11758 } 11759 } 11760 } 11761 11762 ecb->dte_action = NULL; 11763 ecb->dte_action_last = NULL; 11764 ecb->dte_size = 0; 11765 } 11766 11767 static void 11768 dtrace_ecb_disable(dtrace_ecb_t *ecb) 11769 { 11770 /* 11771 * We disable the ECB by removing it from its probe. 11772 */ 11773 dtrace_ecb_t *pecb, *prev = NULL; 11774 dtrace_probe_t *probe = ecb->dte_probe; 11775 11776 ASSERT(MUTEX_HELD(&dtrace_lock)); 11777 11778 if (probe == NULL) { 11779 /* 11780 * This is the NULL probe; there is nothing to disable. 11781 */ 11782 return; 11783 } 11784 11785 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 11786 if (pecb == ecb) 11787 break; 11788 prev = pecb; 11789 } 11790 11791 ASSERT(pecb != NULL); 11792 11793 if (prev == NULL) { 11794 probe->dtpr_ecb = ecb->dte_next; 11795 } else { 11796 prev->dte_next = ecb->dte_next; 11797 } 11798 11799 if (ecb == probe->dtpr_ecb_last) { 11800 ASSERT(ecb->dte_next == NULL); 11801 probe->dtpr_ecb_last = prev; 11802 } 11803 11804 /* 11805 * The ECB has been disconnected from the probe; now sync to assure 11806 * that all CPUs have seen the change before returning. 11807 */ 11808 dtrace_sync(); 11809 11810 if (probe->dtpr_ecb == NULL) { 11811 /* 11812 * That was the last ECB on the probe; clear the predicate 11813 * cache ID for the probe, disable it and sync one more time 11814 * to assure that we'll never hit it again. 11815 */ 11816 dtrace_provider_t *prov = probe->dtpr_provider; 11817 11818 ASSERT(ecb->dte_next == NULL); 11819 ASSERT(probe->dtpr_ecb_last == NULL); 11820 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 11821 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 11822 probe->dtpr_id, probe->dtpr_arg); 11823 dtrace_sync(); 11824 } else { 11825 /* 11826 * There is at least one ECB remaining on the probe. If there 11827 * is _exactly_ one, set the probe's predicate cache ID to be 11828 * the predicate cache ID of the remaining ECB. 11829 */ 11830 ASSERT(probe->dtpr_ecb_last != NULL); 11831 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 11832 11833 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 11834 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 11835 11836 ASSERT(probe->dtpr_ecb->dte_next == NULL); 11837 11838 if (p != NULL) 11839 probe->dtpr_predcache = p->dtp_cacheid; 11840 } 11841 11842 ecb->dte_next = NULL; 11843 } 11844 } 11845 11846 static void 11847 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 11848 { 11849 dtrace_state_t *state = ecb->dte_state; 11850 dtrace_vstate_t *vstate = &state->dts_vstate; 11851 dtrace_predicate_t *pred; 11852 dtrace_epid_t epid = ecb->dte_epid; 11853 11854 ASSERT(MUTEX_HELD(&dtrace_lock)); 11855 ASSERT(ecb->dte_next == NULL); 11856 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 11857 11858 if ((pred = ecb->dte_predicate) != NULL) 11859 dtrace_predicate_release(pred, vstate); 11860 11861 dtrace_ecb_action_remove(ecb); 11862 11863 ASSERT(state->dts_ecbs[epid - 1] == ecb); 11864 state->dts_ecbs[epid - 1] = NULL; 11865 11866 kmem_free(ecb, sizeof (dtrace_ecb_t)); 11867 } 11868 11869 static dtrace_ecb_t * 11870 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 11871 dtrace_enabling_t *enab) 11872 { 11873 dtrace_ecb_t *ecb; 11874 dtrace_predicate_t *pred; 11875 dtrace_actdesc_t *act; 11876 dtrace_provider_t *prov; 11877 dtrace_ecbdesc_t *desc = enab->dten_current; 11878 11879 ASSERT(MUTEX_HELD(&dtrace_lock)); 11880 ASSERT(state != NULL); 11881 11882 ecb = dtrace_ecb_add(state, probe); 11883 ecb->dte_uarg = desc->dted_uarg; 11884 11885 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 11886 dtrace_predicate_hold(pred); 11887 ecb->dte_predicate = pred; 11888 } 11889 11890 if (probe != NULL) { 11891 /* 11892 * If the provider shows more leg than the consumer is old 11893 * enough to see, we need to enable the appropriate implicit 11894 * predicate bits to prevent the ecb from activating at 11895 * revealing times. 11896 * 11897 * Providers specifying DTRACE_PRIV_USER at register time 11898 * are stating that they need the /proc-style privilege 11899 * model to be enforced, and this is what DTRACE_COND_OWNER 11900 * and DTRACE_COND_ZONEOWNER will then do at probe time. 11901 */ 11902 prov = probe->dtpr_provider; 11903 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 11904 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 11905 ecb->dte_cond |= DTRACE_COND_OWNER; 11906 11907 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 11908 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 11909 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 11910 11911 /* 11912 * If the provider shows us kernel innards and the user 11913 * is lacking sufficient privilege, enable the 11914 * DTRACE_COND_USERMODE implicit predicate. 11915 */ 11916 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 11917 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 11918 ecb->dte_cond |= DTRACE_COND_USERMODE; 11919 } 11920 11921 if (dtrace_ecb_create_cache != NULL) { 11922 /* 11923 * If we have a cached ecb, we'll use its action list instead 11924 * of creating our own (saving both time and space). 11925 */ 11926 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 11927 dtrace_action_t *act = cached->dte_action; 11928 11929 if (act != NULL) { 11930 ASSERT(act->dta_refcnt > 0); 11931 act->dta_refcnt++; 11932 ecb->dte_action = act; 11933 ecb->dte_action_last = cached->dte_action_last; 11934 ecb->dte_needed = cached->dte_needed; 11935 ecb->dte_size = cached->dte_size; 11936 ecb->dte_alignment = cached->dte_alignment; 11937 } 11938 11939 return (ecb); 11940 } 11941 11942 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 11943 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 11944 dtrace_ecb_destroy(ecb); 11945 return (NULL); 11946 } 11947 } 11948 11949 if ((enab->dten_error = dtrace_ecb_resize(ecb)) != 0) { 11950 dtrace_ecb_destroy(ecb); 11951 return (NULL); 11952 } 11953 11954 return (dtrace_ecb_create_cache = ecb); 11955 } 11956 11957 static int 11958 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 11959 { 11960 dtrace_ecb_t *ecb; 11961 dtrace_enabling_t *enab = arg; 11962 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 11963 11964 ASSERT(state != NULL); 11965 11966 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 11967 /* 11968 * This probe was created in a generation for which this 11969 * enabling has previously created ECBs; we don't want to 11970 * enable it again, so just kick out. 11971 */ 11972 return (DTRACE_MATCH_NEXT); 11973 } 11974 11975 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 11976 return (DTRACE_MATCH_DONE); 11977 11978 dtrace_ecb_enable(ecb); 11979 return (DTRACE_MATCH_NEXT); 11980 } 11981 11982 static dtrace_ecb_t * 11983 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 11984 { 11985 dtrace_ecb_t *ecb; 11986 11987 ASSERT(MUTEX_HELD(&dtrace_lock)); 11988 11989 if (id == 0 || id > state->dts_necbs) 11990 return (NULL); 11991 11992 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 11993 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 11994 11995 return (state->dts_ecbs[id - 1]); 11996 } 11997 11998 static dtrace_aggregation_t * 11999 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 12000 { 12001 dtrace_aggregation_t *agg; 12002 12003 ASSERT(MUTEX_HELD(&dtrace_lock)); 12004 12005 if (id == 0 || id > state->dts_naggregations) 12006 return (NULL); 12007 12008 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 12009 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 12010 agg->dtag_id == id); 12011 12012 return (state->dts_aggregations[id - 1]); 12013 } 12014 12015 /* 12016 * DTrace Buffer Functions 12017 * 12018 * The following functions manipulate DTrace buffers. Most of these functions 12019 * are called in the context of establishing or processing consumer state; 12020 * exceptions are explicitly noted. 12021 */ 12022 12023 /* 12024 * Note: called from cross call context. This function switches the two 12025 * buffers on a given CPU. The atomicity of this operation is assured by 12026 * disabling interrupts while the actual switch takes place; the disabling of 12027 * interrupts serializes the execution with any execution of dtrace_probe() on 12028 * the same CPU. 12029 */ 12030 static void 12031 dtrace_buffer_switch(dtrace_buffer_t *buf) 12032 { 12033 caddr_t tomax = buf->dtb_tomax; 12034 caddr_t xamot = buf->dtb_xamot; 12035 dtrace_icookie_t cookie; 12036 hrtime_t now; 12037 12038 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 12039 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 12040 12041 cookie = dtrace_interrupt_disable(); 12042 now = dtrace_gethrtime(); 12043 buf->dtb_tomax = xamot; 12044 buf->dtb_xamot = tomax; 12045 buf->dtb_xamot_drops = buf->dtb_drops; 12046 buf->dtb_xamot_offset = buf->dtb_offset; 12047 buf->dtb_xamot_errors = buf->dtb_errors; 12048 buf->dtb_xamot_flags = buf->dtb_flags; 12049 buf->dtb_offset = 0; 12050 buf->dtb_drops = 0; 12051 buf->dtb_errors = 0; 12052 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 12053 buf->dtb_interval = now - buf->dtb_switched; 12054 buf->dtb_switched = now; 12055 dtrace_interrupt_enable(cookie); 12056 } 12057 12058 /* 12059 * Note: called from cross call context. This function activates a buffer 12060 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 12061 * is guaranteed by the disabling of interrupts. 12062 */ 12063 static void 12064 dtrace_buffer_activate(dtrace_state_t *state) 12065 { 12066 dtrace_buffer_t *buf; 12067 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 12068 12069 buf = &state->dts_buffer[curcpu]; 12070 12071 if (buf->dtb_tomax != NULL) { 12072 /* 12073 * We might like to assert that the buffer is marked inactive, 12074 * but this isn't necessarily true: the buffer for the CPU 12075 * that processes the BEGIN probe has its buffer activated 12076 * manually. In this case, we take the (harmless) action 12077 * re-clearing the bit INACTIVE bit. 12078 */ 12079 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 12080 } 12081 12082 dtrace_interrupt_enable(cookie); 12083 } 12084 12085 #ifdef __FreeBSD__ 12086 /* 12087 * Activate the specified per-CPU buffer. This is used instead of 12088 * dtrace_buffer_activate() when APs have not yet started, i.e. when 12089 * activating anonymous state. 12090 */ 12091 static void 12092 dtrace_buffer_activate_cpu(dtrace_state_t *state, int cpu) 12093 { 12094 12095 if (state->dts_buffer[cpu].dtb_tomax != NULL) 12096 state->dts_buffer[cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 12097 } 12098 #endif 12099 12100 static int 12101 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 12102 processorid_t cpu, int *factor) 12103 { 12104 #ifdef illumos 12105 cpu_t *cp; 12106 #endif 12107 dtrace_buffer_t *buf; 12108 int allocated = 0, desired = 0; 12109 12110 #ifdef illumos 12111 ASSERT(MUTEX_HELD(&cpu_lock)); 12112 ASSERT(MUTEX_HELD(&dtrace_lock)); 12113 12114 *factor = 1; 12115 12116 if (size > dtrace_nonroot_maxsize && 12117 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 12118 return (EFBIG); 12119 12120 cp = cpu_list; 12121 12122 do { 12123 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 12124 continue; 12125 12126 buf = &bufs[cp->cpu_id]; 12127 12128 /* 12129 * If there is already a buffer allocated for this CPU, it 12130 * is only possible that this is a DR event. In this case, 12131 */ 12132 if (buf->dtb_tomax != NULL) { 12133 ASSERT(buf->dtb_size == size); 12134 continue; 12135 } 12136 12137 ASSERT(buf->dtb_xamot == NULL); 12138 12139 if ((buf->dtb_tomax = kmem_zalloc(size, 12140 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 12141 goto err; 12142 12143 buf->dtb_size = size; 12144 buf->dtb_flags = flags; 12145 buf->dtb_offset = 0; 12146 buf->dtb_drops = 0; 12147 12148 if (flags & DTRACEBUF_NOSWITCH) 12149 continue; 12150 12151 if ((buf->dtb_xamot = kmem_zalloc(size, 12152 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 12153 goto err; 12154 } while ((cp = cp->cpu_next) != cpu_list); 12155 12156 return (0); 12157 12158 err: 12159 cp = cpu_list; 12160 12161 do { 12162 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 12163 continue; 12164 12165 buf = &bufs[cp->cpu_id]; 12166 desired += 2; 12167 12168 if (buf->dtb_xamot != NULL) { 12169 ASSERT(buf->dtb_tomax != NULL); 12170 ASSERT(buf->dtb_size == size); 12171 kmem_free(buf->dtb_xamot, size); 12172 allocated++; 12173 } 12174 12175 if (buf->dtb_tomax != NULL) { 12176 ASSERT(buf->dtb_size == size); 12177 kmem_free(buf->dtb_tomax, size); 12178 allocated++; 12179 } 12180 12181 buf->dtb_tomax = NULL; 12182 buf->dtb_xamot = NULL; 12183 buf->dtb_size = 0; 12184 } while ((cp = cp->cpu_next) != cpu_list); 12185 #else 12186 int i; 12187 12188 *factor = 1; 12189 #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \ 12190 defined(__mips__) || defined(__powerpc__) || defined(__riscv) 12191 /* 12192 * FreeBSD isn't good at limiting the amount of memory we 12193 * ask to malloc, so let's place a limit here before trying 12194 * to do something that might well end in tears at bedtime. 12195 */ 12196 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 12197 return (ENOMEM); 12198 #endif 12199 12200 ASSERT(MUTEX_HELD(&dtrace_lock)); 12201 CPU_FOREACH(i) { 12202 if (cpu != DTRACE_CPUALL && cpu != i) 12203 continue; 12204 12205 buf = &bufs[i]; 12206 12207 /* 12208 * If there is already a buffer allocated for this CPU, it 12209 * is only possible that this is a DR event. In this case, 12210 * the buffer size must match our specified size. 12211 */ 12212 if (buf->dtb_tomax != NULL) { 12213 ASSERT(buf->dtb_size == size); 12214 continue; 12215 } 12216 12217 ASSERT(buf->dtb_xamot == NULL); 12218 12219 if ((buf->dtb_tomax = kmem_zalloc(size, 12220 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 12221 goto err; 12222 12223 buf->dtb_size = size; 12224 buf->dtb_flags = flags; 12225 buf->dtb_offset = 0; 12226 buf->dtb_drops = 0; 12227 12228 if (flags & DTRACEBUF_NOSWITCH) 12229 continue; 12230 12231 if ((buf->dtb_xamot = kmem_zalloc(size, 12232 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 12233 goto err; 12234 } 12235 12236 return (0); 12237 12238 err: 12239 /* 12240 * Error allocating memory, so free the buffers that were 12241 * allocated before the failed allocation. 12242 */ 12243 CPU_FOREACH(i) { 12244 if (cpu != DTRACE_CPUALL && cpu != i) 12245 continue; 12246 12247 buf = &bufs[i]; 12248 desired += 2; 12249 12250 if (buf->dtb_xamot != NULL) { 12251 ASSERT(buf->dtb_tomax != NULL); 12252 ASSERT(buf->dtb_size == size); 12253 kmem_free(buf->dtb_xamot, size); 12254 allocated++; 12255 } 12256 12257 if (buf->dtb_tomax != NULL) { 12258 ASSERT(buf->dtb_size == size); 12259 kmem_free(buf->dtb_tomax, size); 12260 allocated++; 12261 } 12262 12263 buf->dtb_tomax = NULL; 12264 buf->dtb_xamot = NULL; 12265 buf->dtb_size = 0; 12266 12267 } 12268 #endif 12269 *factor = desired / (allocated > 0 ? allocated : 1); 12270 12271 return (ENOMEM); 12272 } 12273 12274 /* 12275 * Note: called from probe context. This function just increments the drop 12276 * count on a buffer. It has been made a function to allow for the 12277 * possibility of understanding the source of mysterious drop counts. (A 12278 * problem for which one may be particularly disappointed that DTrace cannot 12279 * be used to understand DTrace.) 12280 */ 12281 static void 12282 dtrace_buffer_drop(dtrace_buffer_t *buf) 12283 { 12284 buf->dtb_drops++; 12285 } 12286 12287 /* 12288 * Note: called from probe context. This function is called to reserve space 12289 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 12290 * mstate. Returns the new offset in the buffer, or a negative value if an 12291 * error has occurred. 12292 */ 12293 static intptr_t 12294 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 12295 dtrace_state_t *state, dtrace_mstate_t *mstate) 12296 { 12297 intptr_t offs = buf->dtb_offset, soffs; 12298 intptr_t woffs; 12299 caddr_t tomax; 12300 size_t total; 12301 12302 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 12303 return (-1); 12304 12305 if ((tomax = buf->dtb_tomax) == NULL) { 12306 dtrace_buffer_drop(buf); 12307 return (-1); 12308 } 12309 12310 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 12311 while (offs & (align - 1)) { 12312 /* 12313 * Assert that our alignment is off by a number which 12314 * is itself sizeof (uint32_t) aligned. 12315 */ 12316 ASSERT(!((align - (offs & (align - 1))) & 12317 (sizeof (uint32_t) - 1))); 12318 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 12319 offs += sizeof (uint32_t); 12320 } 12321 12322 if ((soffs = offs + needed) > buf->dtb_size) { 12323 dtrace_buffer_drop(buf); 12324 return (-1); 12325 } 12326 12327 if (mstate == NULL) 12328 return (offs); 12329 12330 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 12331 mstate->dtms_scratch_size = buf->dtb_size - soffs; 12332 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 12333 12334 return (offs); 12335 } 12336 12337 if (buf->dtb_flags & DTRACEBUF_FILL) { 12338 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 12339 (buf->dtb_flags & DTRACEBUF_FULL)) 12340 return (-1); 12341 goto out; 12342 } 12343 12344 total = needed + (offs & (align - 1)); 12345 12346 /* 12347 * For a ring buffer, life is quite a bit more complicated. Before 12348 * we can store any padding, we need to adjust our wrapping offset. 12349 * (If we've never before wrapped or we're not about to, no adjustment 12350 * is required.) 12351 */ 12352 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 12353 offs + total > buf->dtb_size) { 12354 woffs = buf->dtb_xamot_offset; 12355 12356 if (offs + total > buf->dtb_size) { 12357 /* 12358 * We can't fit in the end of the buffer. First, a 12359 * sanity check that we can fit in the buffer at all. 12360 */ 12361 if (total > buf->dtb_size) { 12362 dtrace_buffer_drop(buf); 12363 return (-1); 12364 } 12365 12366 /* 12367 * We're going to be storing at the top of the buffer, 12368 * so now we need to deal with the wrapped offset. We 12369 * only reset our wrapped offset to 0 if it is 12370 * currently greater than the current offset. If it 12371 * is less than the current offset, it is because a 12372 * previous allocation induced a wrap -- but the 12373 * allocation didn't subsequently take the space due 12374 * to an error or false predicate evaluation. In this 12375 * case, we'll just leave the wrapped offset alone: if 12376 * the wrapped offset hasn't been advanced far enough 12377 * for this allocation, it will be adjusted in the 12378 * lower loop. 12379 */ 12380 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 12381 if (woffs >= offs) 12382 woffs = 0; 12383 } else { 12384 woffs = 0; 12385 } 12386 12387 /* 12388 * Now we know that we're going to be storing to the 12389 * top of the buffer and that there is room for us 12390 * there. We need to clear the buffer from the current 12391 * offset to the end (there may be old gunk there). 12392 */ 12393 while (offs < buf->dtb_size) 12394 tomax[offs++] = 0; 12395 12396 /* 12397 * We need to set our offset to zero. And because we 12398 * are wrapping, we need to set the bit indicating as 12399 * much. We can also adjust our needed space back 12400 * down to the space required by the ECB -- we know 12401 * that the top of the buffer is aligned. 12402 */ 12403 offs = 0; 12404 total = needed; 12405 buf->dtb_flags |= DTRACEBUF_WRAPPED; 12406 } else { 12407 /* 12408 * There is room for us in the buffer, so we simply 12409 * need to check the wrapped offset. 12410 */ 12411 if (woffs < offs) { 12412 /* 12413 * The wrapped offset is less than the offset. 12414 * This can happen if we allocated buffer space 12415 * that induced a wrap, but then we didn't 12416 * subsequently take the space due to an error 12417 * or false predicate evaluation. This is 12418 * okay; we know that _this_ allocation isn't 12419 * going to induce a wrap. We still can't 12420 * reset the wrapped offset to be zero, 12421 * however: the space may have been trashed in 12422 * the previous failed probe attempt. But at 12423 * least the wrapped offset doesn't need to 12424 * be adjusted at all... 12425 */ 12426 goto out; 12427 } 12428 } 12429 12430 while (offs + total > woffs) { 12431 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 12432 size_t size; 12433 12434 if (epid == DTRACE_EPIDNONE) { 12435 size = sizeof (uint32_t); 12436 } else { 12437 ASSERT3U(epid, <=, state->dts_necbs); 12438 ASSERT(state->dts_ecbs[epid - 1] != NULL); 12439 12440 size = state->dts_ecbs[epid - 1]->dte_size; 12441 } 12442 12443 ASSERT(woffs + size <= buf->dtb_size); 12444 ASSERT(size != 0); 12445 12446 if (woffs + size == buf->dtb_size) { 12447 /* 12448 * We've reached the end of the buffer; we want 12449 * to set the wrapped offset to 0 and break 12450 * out. However, if the offs is 0, then we're 12451 * in a strange edge-condition: the amount of 12452 * space that we want to reserve plus the size 12453 * of the record that we're overwriting is 12454 * greater than the size of the buffer. This 12455 * is problematic because if we reserve the 12456 * space but subsequently don't consume it (due 12457 * to a failed predicate or error) the wrapped 12458 * offset will be 0 -- yet the EPID at offset 0 12459 * will not be committed. This situation is 12460 * relatively easy to deal with: if we're in 12461 * this case, the buffer is indistinguishable 12462 * from one that hasn't wrapped; we need only 12463 * finish the job by clearing the wrapped bit, 12464 * explicitly setting the offset to be 0, and 12465 * zero'ing out the old data in the buffer. 12466 */ 12467 if (offs == 0) { 12468 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 12469 buf->dtb_offset = 0; 12470 woffs = total; 12471 12472 while (woffs < buf->dtb_size) 12473 tomax[woffs++] = 0; 12474 } 12475 12476 woffs = 0; 12477 break; 12478 } 12479 12480 woffs += size; 12481 } 12482 12483 /* 12484 * We have a wrapped offset. It may be that the wrapped offset 12485 * has become zero -- that's okay. 12486 */ 12487 buf->dtb_xamot_offset = woffs; 12488 } 12489 12490 out: 12491 /* 12492 * Now we can plow the buffer with any necessary padding. 12493 */ 12494 while (offs & (align - 1)) { 12495 /* 12496 * Assert that our alignment is off by a number which 12497 * is itself sizeof (uint32_t) aligned. 12498 */ 12499 ASSERT(!((align - (offs & (align - 1))) & 12500 (sizeof (uint32_t) - 1))); 12501 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 12502 offs += sizeof (uint32_t); 12503 } 12504 12505 if (buf->dtb_flags & DTRACEBUF_FILL) { 12506 if (offs + needed > buf->dtb_size - state->dts_reserve) { 12507 buf->dtb_flags |= DTRACEBUF_FULL; 12508 return (-1); 12509 } 12510 } 12511 12512 if (mstate == NULL) 12513 return (offs); 12514 12515 /* 12516 * For ring buffers and fill buffers, the scratch space is always 12517 * the inactive buffer. 12518 */ 12519 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 12520 mstate->dtms_scratch_size = buf->dtb_size; 12521 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 12522 12523 return (offs); 12524 } 12525 12526 static void 12527 dtrace_buffer_polish(dtrace_buffer_t *buf) 12528 { 12529 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 12530 ASSERT(MUTEX_HELD(&dtrace_lock)); 12531 12532 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 12533 return; 12534 12535 /* 12536 * We need to polish the ring buffer. There are three cases: 12537 * 12538 * - The first (and presumably most common) is that there is no gap 12539 * between the buffer offset and the wrapped offset. In this case, 12540 * there is nothing in the buffer that isn't valid data; we can 12541 * mark the buffer as polished and return. 12542 * 12543 * - The second (less common than the first but still more common 12544 * than the third) is that there is a gap between the buffer offset 12545 * and the wrapped offset, and the wrapped offset is larger than the 12546 * buffer offset. This can happen because of an alignment issue, or 12547 * can happen because of a call to dtrace_buffer_reserve() that 12548 * didn't subsequently consume the buffer space. In this case, 12549 * we need to zero the data from the buffer offset to the wrapped 12550 * offset. 12551 * 12552 * - The third (and least common) is that there is a gap between the 12553 * buffer offset and the wrapped offset, but the wrapped offset is 12554 * _less_ than the buffer offset. This can only happen because a 12555 * call to dtrace_buffer_reserve() induced a wrap, but the space 12556 * was not subsequently consumed. In this case, we need to zero the 12557 * space from the offset to the end of the buffer _and_ from the 12558 * top of the buffer to the wrapped offset. 12559 */ 12560 if (buf->dtb_offset < buf->dtb_xamot_offset) { 12561 bzero(buf->dtb_tomax + buf->dtb_offset, 12562 buf->dtb_xamot_offset - buf->dtb_offset); 12563 } 12564 12565 if (buf->dtb_offset > buf->dtb_xamot_offset) { 12566 bzero(buf->dtb_tomax + buf->dtb_offset, 12567 buf->dtb_size - buf->dtb_offset); 12568 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 12569 } 12570 } 12571 12572 /* 12573 * This routine determines if data generated at the specified time has likely 12574 * been entirely consumed at user-level. This routine is called to determine 12575 * if an ECB on a defunct probe (but for an active enabling) can be safely 12576 * disabled and destroyed. 12577 */ 12578 static int 12579 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when) 12580 { 12581 int i; 12582 12583 for (i = 0; i < NCPU; i++) { 12584 dtrace_buffer_t *buf = &bufs[i]; 12585 12586 if (buf->dtb_size == 0) 12587 continue; 12588 12589 if (buf->dtb_flags & DTRACEBUF_RING) 12590 return (0); 12591 12592 if (!buf->dtb_switched && buf->dtb_offset != 0) 12593 return (0); 12594 12595 if (buf->dtb_switched - buf->dtb_interval < when) 12596 return (0); 12597 } 12598 12599 return (1); 12600 } 12601 12602 static void 12603 dtrace_buffer_free(dtrace_buffer_t *bufs) 12604 { 12605 int i; 12606 12607 for (i = 0; i < NCPU; i++) { 12608 dtrace_buffer_t *buf = &bufs[i]; 12609 12610 if (buf->dtb_tomax == NULL) { 12611 ASSERT(buf->dtb_xamot == NULL); 12612 ASSERT(buf->dtb_size == 0); 12613 continue; 12614 } 12615 12616 if (buf->dtb_xamot != NULL) { 12617 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 12618 kmem_free(buf->dtb_xamot, buf->dtb_size); 12619 } 12620 12621 kmem_free(buf->dtb_tomax, buf->dtb_size); 12622 buf->dtb_size = 0; 12623 buf->dtb_tomax = NULL; 12624 buf->dtb_xamot = NULL; 12625 } 12626 } 12627 12628 /* 12629 * DTrace Enabling Functions 12630 */ 12631 static dtrace_enabling_t * 12632 dtrace_enabling_create(dtrace_vstate_t *vstate) 12633 { 12634 dtrace_enabling_t *enab; 12635 12636 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 12637 enab->dten_vstate = vstate; 12638 12639 return (enab); 12640 } 12641 12642 static void 12643 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 12644 { 12645 dtrace_ecbdesc_t **ndesc; 12646 size_t osize, nsize; 12647 12648 /* 12649 * We can't add to enablings after we've enabled them, or after we've 12650 * retained them. 12651 */ 12652 ASSERT(enab->dten_probegen == 0); 12653 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 12654 12655 if (enab->dten_ndesc < enab->dten_maxdesc) { 12656 enab->dten_desc[enab->dten_ndesc++] = ecb; 12657 return; 12658 } 12659 12660 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 12661 12662 if (enab->dten_maxdesc == 0) { 12663 enab->dten_maxdesc = 1; 12664 } else { 12665 enab->dten_maxdesc <<= 1; 12666 } 12667 12668 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 12669 12670 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 12671 ndesc = kmem_zalloc(nsize, KM_SLEEP); 12672 bcopy(enab->dten_desc, ndesc, osize); 12673 if (enab->dten_desc != NULL) 12674 kmem_free(enab->dten_desc, osize); 12675 12676 enab->dten_desc = ndesc; 12677 enab->dten_desc[enab->dten_ndesc++] = ecb; 12678 } 12679 12680 static void 12681 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 12682 dtrace_probedesc_t *pd) 12683 { 12684 dtrace_ecbdesc_t *new; 12685 dtrace_predicate_t *pred; 12686 dtrace_actdesc_t *act; 12687 12688 /* 12689 * We're going to create a new ECB description that matches the 12690 * specified ECB in every way, but has the specified probe description. 12691 */ 12692 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12693 12694 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 12695 dtrace_predicate_hold(pred); 12696 12697 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 12698 dtrace_actdesc_hold(act); 12699 12700 new->dted_action = ecb->dted_action; 12701 new->dted_pred = ecb->dted_pred; 12702 new->dted_probe = *pd; 12703 new->dted_uarg = ecb->dted_uarg; 12704 12705 dtrace_enabling_add(enab, new); 12706 } 12707 12708 static void 12709 dtrace_enabling_dump(dtrace_enabling_t *enab) 12710 { 12711 int i; 12712 12713 for (i = 0; i < enab->dten_ndesc; i++) { 12714 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 12715 12716 #ifdef __FreeBSD__ 12717 printf("dtrace: enabling probe %d (%s:%s:%s:%s)\n", i, 12718 desc->dtpd_provider, desc->dtpd_mod, 12719 desc->dtpd_func, desc->dtpd_name); 12720 #else 12721 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 12722 desc->dtpd_provider, desc->dtpd_mod, 12723 desc->dtpd_func, desc->dtpd_name); 12724 #endif 12725 } 12726 } 12727 12728 static void 12729 dtrace_enabling_destroy(dtrace_enabling_t *enab) 12730 { 12731 int i; 12732 dtrace_ecbdesc_t *ep; 12733 dtrace_vstate_t *vstate = enab->dten_vstate; 12734 12735 ASSERT(MUTEX_HELD(&dtrace_lock)); 12736 12737 for (i = 0; i < enab->dten_ndesc; i++) { 12738 dtrace_actdesc_t *act, *next; 12739 dtrace_predicate_t *pred; 12740 12741 ep = enab->dten_desc[i]; 12742 12743 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 12744 dtrace_predicate_release(pred, vstate); 12745 12746 for (act = ep->dted_action; act != NULL; act = next) { 12747 next = act->dtad_next; 12748 dtrace_actdesc_release(act, vstate); 12749 } 12750 12751 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12752 } 12753 12754 if (enab->dten_desc != NULL) 12755 kmem_free(enab->dten_desc, 12756 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 12757 12758 /* 12759 * If this was a retained enabling, decrement the dts_nretained count 12760 * and take it off of the dtrace_retained list. 12761 */ 12762 if (enab->dten_prev != NULL || enab->dten_next != NULL || 12763 dtrace_retained == enab) { 12764 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12765 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 12766 enab->dten_vstate->dtvs_state->dts_nretained--; 12767 dtrace_retained_gen++; 12768 } 12769 12770 if (enab->dten_prev == NULL) { 12771 if (dtrace_retained == enab) { 12772 dtrace_retained = enab->dten_next; 12773 12774 if (dtrace_retained != NULL) 12775 dtrace_retained->dten_prev = NULL; 12776 } 12777 } else { 12778 ASSERT(enab != dtrace_retained); 12779 ASSERT(dtrace_retained != NULL); 12780 enab->dten_prev->dten_next = enab->dten_next; 12781 } 12782 12783 if (enab->dten_next != NULL) { 12784 ASSERT(dtrace_retained != NULL); 12785 enab->dten_next->dten_prev = enab->dten_prev; 12786 } 12787 12788 kmem_free(enab, sizeof (dtrace_enabling_t)); 12789 } 12790 12791 static int 12792 dtrace_enabling_retain(dtrace_enabling_t *enab) 12793 { 12794 dtrace_state_t *state; 12795 12796 ASSERT(MUTEX_HELD(&dtrace_lock)); 12797 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 12798 ASSERT(enab->dten_vstate != NULL); 12799 12800 state = enab->dten_vstate->dtvs_state; 12801 ASSERT(state != NULL); 12802 12803 /* 12804 * We only allow each state to retain dtrace_retain_max enablings. 12805 */ 12806 if (state->dts_nretained >= dtrace_retain_max) 12807 return (ENOSPC); 12808 12809 state->dts_nretained++; 12810 dtrace_retained_gen++; 12811 12812 if (dtrace_retained == NULL) { 12813 dtrace_retained = enab; 12814 return (0); 12815 } 12816 12817 enab->dten_next = dtrace_retained; 12818 dtrace_retained->dten_prev = enab; 12819 dtrace_retained = enab; 12820 12821 return (0); 12822 } 12823 12824 static int 12825 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 12826 dtrace_probedesc_t *create) 12827 { 12828 dtrace_enabling_t *new, *enab; 12829 int found = 0, err = ENOENT; 12830 12831 ASSERT(MUTEX_HELD(&dtrace_lock)); 12832 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 12833 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 12834 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 12835 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 12836 12837 new = dtrace_enabling_create(&state->dts_vstate); 12838 12839 /* 12840 * Iterate over all retained enablings, looking for enablings that 12841 * match the specified state. 12842 */ 12843 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12844 int i; 12845 12846 /* 12847 * dtvs_state can only be NULL for helper enablings -- and 12848 * helper enablings can't be retained. 12849 */ 12850 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12851 12852 if (enab->dten_vstate->dtvs_state != state) 12853 continue; 12854 12855 /* 12856 * Now iterate over each probe description; we're looking for 12857 * an exact match to the specified probe description. 12858 */ 12859 for (i = 0; i < enab->dten_ndesc; i++) { 12860 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12861 dtrace_probedesc_t *pd = &ep->dted_probe; 12862 12863 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 12864 continue; 12865 12866 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 12867 continue; 12868 12869 if (strcmp(pd->dtpd_func, match->dtpd_func)) 12870 continue; 12871 12872 if (strcmp(pd->dtpd_name, match->dtpd_name)) 12873 continue; 12874 12875 /* 12876 * We have a winning probe! Add it to our growing 12877 * enabling. 12878 */ 12879 found = 1; 12880 dtrace_enabling_addlike(new, ep, create); 12881 } 12882 } 12883 12884 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 12885 dtrace_enabling_destroy(new); 12886 return (err); 12887 } 12888 12889 return (0); 12890 } 12891 12892 static void 12893 dtrace_enabling_retract(dtrace_state_t *state) 12894 { 12895 dtrace_enabling_t *enab, *next; 12896 12897 ASSERT(MUTEX_HELD(&dtrace_lock)); 12898 12899 /* 12900 * Iterate over all retained enablings, destroy the enablings retained 12901 * for the specified state. 12902 */ 12903 for (enab = dtrace_retained; enab != NULL; enab = next) { 12904 next = enab->dten_next; 12905 12906 /* 12907 * dtvs_state can only be NULL for helper enablings -- and 12908 * helper enablings can't be retained. 12909 */ 12910 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12911 12912 if (enab->dten_vstate->dtvs_state == state) { 12913 ASSERT(state->dts_nretained > 0); 12914 dtrace_enabling_destroy(enab); 12915 } 12916 } 12917 12918 ASSERT(state->dts_nretained == 0); 12919 } 12920 12921 static int 12922 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 12923 { 12924 int i = 0; 12925 int matched = 0; 12926 12927 ASSERT(MUTEX_HELD(&cpu_lock)); 12928 ASSERT(MUTEX_HELD(&dtrace_lock)); 12929 12930 for (i = 0; i < enab->dten_ndesc; i++) { 12931 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12932 12933 enab->dten_current = ep; 12934 enab->dten_error = 0; 12935 12936 matched += dtrace_probe_enable(&ep->dted_probe, enab); 12937 12938 if (enab->dten_error != 0) { 12939 /* 12940 * If we get an error half-way through enabling the 12941 * probes, we kick out -- perhaps with some number of 12942 * them enabled. Leaving enabled probes enabled may 12943 * be slightly confusing for user-level, but we expect 12944 * that no one will attempt to actually drive on in 12945 * the face of such errors. If this is an anonymous 12946 * enabling (indicated with a NULL nmatched pointer), 12947 * we cmn_err() a message. We aren't expecting to 12948 * get such an error -- such as it can exist at all, 12949 * it would be a result of corrupted DOF in the driver 12950 * properties. 12951 */ 12952 if (nmatched == NULL) { 12953 cmn_err(CE_WARN, "dtrace_enabling_match() " 12954 "error on %p: %d", (void *)ep, 12955 enab->dten_error); 12956 } 12957 12958 return (enab->dten_error); 12959 } 12960 } 12961 12962 enab->dten_probegen = dtrace_probegen; 12963 if (nmatched != NULL) 12964 *nmatched = matched; 12965 12966 return (0); 12967 } 12968 12969 static void 12970 dtrace_enabling_matchall(void) 12971 { 12972 dtrace_enabling_t *enab; 12973 12974 mutex_enter(&cpu_lock); 12975 mutex_enter(&dtrace_lock); 12976 12977 /* 12978 * Iterate over all retained enablings to see if any probes match 12979 * against them. We only perform this operation on enablings for which 12980 * we have sufficient permissions by virtue of being in the global zone 12981 * or in the same zone as the DTrace client. Because we can be called 12982 * after dtrace_detach() has been called, we cannot assert that there 12983 * are retained enablings. We can safely load from dtrace_retained, 12984 * however: the taskq_destroy() at the end of dtrace_detach() will 12985 * block pending our completion. 12986 */ 12987 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12988 #ifdef illumos 12989 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 12990 12991 if (INGLOBALZONE(curproc) || 12992 cr != NULL && getzoneid() == crgetzoneid(cr)) 12993 #endif 12994 (void) dtrace_enabling_match(enab, NULL); 12995 } 12996 12997 mutex_exit(&dtrace_lock); 12998 mutex_exit(&cpu_lock); 12999 } 13000 13001 /* 13002 * If an enabling is to be enabled without having matched probes (that is, if 13003 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 13004 * enabling must be _primed_ by creating an ECB for every ECB description. 13005 * This must be done to assure that we know the number of speculations, the 13006 * number of aggregations, the minimum buffer size needed, etc. before we 13007 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 13008 * enabling any probes, we create ECBs for every ECB decription, but with a 13009 * NULL probe -- which is exactly what this function does. 13010 */ 13011 static void 13012 dtrace_enabling_prime(dtrace_state_t *state) 13013 { 13014 dtrace_enabling_t *enab; 13015 int i; 13016 13017 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 13018 ASSERT(enab->dten_vstate->dtvs_state != NULL); 13019 13020 if (enab->dten_vstate->dtvs_state != state) 13021 continue; 13022 13023 /* 13024 * We don't want to prime an enabling more than once, lest 13025 * we allow a malicious user to induce resource exhaustion. 13026 * (The ECBs that result from priming an enabling aren't 13027 * leaked -- but they also aren't deallocated until the 13028 * consumer state is destroyed.) 13029 */ 13030 if (enab->dten_primed) 13031 continue; 13032 13033 for (i = 0; i < enab->dten_ndesc; i++) { 13034 enab->dten_current = enab->dten_desc[i]; 13035 (void) dtrace_probe_enable(NULL, enab); 13036 } 13037 13038 enab->dten_primed = 1; 13039 } 13040 } 13041 13042 /* 13043 * Called to indicate that probes should be provided due to retained 13044 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 13045 * must take an initial lap through the enabling calling the dtps_provide() 13046 * entry point explicitly to allow for autocreated probes. 13047 */ 13048 static void 13049 dtrace_enabling_provide(dtrace_provider_t *prv) 13050 { 13051 int i, all = 0; 13052 dtrace_probedesc_t desc; 13053 dtrace_genid_t gen; 13054 13055 ASSERT(MUTEX_HELD(&dtrace_lock)); 13056 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 13057 13058 if (prv == NULL) { 13059 all = 1; 13060 prv = dtrace_provider; 13061 } 13062 13063 do { 13064 dtrace_enabling_t *enab; 13065 void *parg = prv->dtpv_arg; 13066 13067 retry: 13068 gen = dtrace_retained_gen; 13069 for (enab = dtrace_retained; enab != NULL; 13070 enab = enab->dten_next) { 13071 for (i = 0; i < enab->dten_ndesc; i++) { 13072 desc = enab->dten_desc[i]->dted_probe; 13073 mutex_exit(&dtrace_lock); 13074 prv->dtpv_pops.dtps_provide(parg, &desc); 13075 mutex_enter(&dtrace_lock); 13076 /* 13077 * Process the retained enablings again if 13078 * they have changed while we weren't holding 13079 * dtrace_lock. 13080 */ 13081 if (gen != dtrace_retained_gen) 13082 goto retry; 13083 } 13084 } 13085 } while (all && (prv = prv->dtpv_next) != NULL); 13086 13087 mutex_exit(&dtrace_lock); 13088 dtrace_probe_provide(NULL, all ? NULL : prv); 13089 mutex_enter(&dtrace_lock); 13090 } 13091 13092 /* 13093 * Called to reap ECBs that are attached to probes from defunct providers. 13094 */ 13095 static void 13096 dtrace_enabling_reap(void) 13097 { 13098 dtrace_provider_t *prov; 13099 dtrace_probe_t *probe; 13100 dtrace_ecb_t *ecb; 13101 hrtime_t when; 13102 int i; 13103 13104 mutex_enter(&cpu_lock); 13105 mutex_enter(&dtrace_lock); 13106 13107 for (i = 0; i < dtrace_nprobes; i++) { 13108 if ((probe = dtrace_probes[i]) == NULL) 13109 continue; 13110 13111 if (probe->dtpr_ecb == NULL) 13112 continue; 13113 13114 prov = probe->dtpr_provider; 13115 13116 if ((when = prov->dtpv_defunct) == 0) 13117 continue; 13118 13119 /* 13120 * We have ECBs on a defunct provider: we want to reap these 13121 * ECBs to allow the provider to unregister. The destruction 13122 * of these ECBs must be done carefully: if we destroy the ECB 13123 * and the consumer later wishes to consume an EPID that 13124 * corresponds to the destroyed ECB (and if the EPID metadata 13125 * has not been previously consumed), the consumer will abort 13126 * processing on the unknown EPID. To reduce (but not, sadly, 13127 * eliminate) the possibility of this, we will only destroy an 13128 * ECB for a defunct provider if, for the state that 13129 * corresponds to the ECB: 13130 * 13131 * (a) There is no speculative tracing (which can effectively 13132 * cache an EPID for an arbitrary amount of time). 13133 * 13134 * (b) The principal buffers have been switched twice since the 13135 * provider became defunct. 13136 * 13137 * (c) The aggregation buffers are of zero size or have been 13138 * switched twice since the provider became defunct. 13139 * 13140 * We use dts_speculates to determine (a) and call a function 13141 * (dtrace_buffer_consumed()) to determine (b) and (c). Note 13142 * that as soon as we've been unable to destroy one of the ECBs 13143 * associated with the probe, we quit trying -- reaping is only 13144 * fruitful in as much as we can destroy all ECBs associated 13145 * with the defunct provider's probes. 13146 */ 13147 while ((ecb = probe->dtpr_ecb) != NULL) { 13148 dtrace_state_t *state = ecb->dte_state; 13149 dtrace_buffer_t *buf = state->dts_buffer; 13150 dtrace_buffer_t *aggbuf = state->dts_aggbuffer; 13151 13152 if (state->dts_speculates) 13153 break; 13154 13155 if (!dtrace_buffer_consumed(buf, when)) 13156 break; 13157 13158 if (!dtrace_buffer_consumed(aggbuf, when)) 13159 break; 13160 13161 dtrace_ecb_disable(ecb); 13162 ASSERT(probe->dtpr_ecb != ecb); 13163 dtrace_ecb_destroy(ecb); 13164 } 13165 } 13166 13167 mutex_exit(&dtrace_lock); 13168 mutex_exit(&cpu_lock); 13169 } 13170 13171 /* 13172 * DTrace DOF Functions 13173 */ 13174 /*ARGSUSED*/ 13175 static void 13176 dtrace_dof_error(dof_hdr_t *dof, const char *str) 13177 { 13178 if (dtrace_err_verbose) 13179 cmn_err(CE_WARN, "failed to process DOF: %s", str); 13180 13181 #ifdef DTRACE_ERRDEBUG 13182 dtrace_errdebug(str); 13183 #endif 13184 } 13185 13186 /* 13187 * Create DOF out of a currently enabled state. Right now, we only create 13188 * DOF containing the run-time options -- but this could be expanded to create 13189 * complete DOF representing the enabled state. 13190 */ 13191 static dof_hdr_t * 13192 dtrace_dof_create(dtrace_state_t *state) 13193 { 13194 dof_hdr_t *dof; 13195 dof_sec_t *sec; 13196 dof_optdesc_t *opt; 13197 int i, len = sizeof (dof_hdr_t) + 13198 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 13199 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 13200 13201 ASSERT(MUTEX_HELD(&dtrace_lock)); 13202 13203 dof = kmem_zalloc(len, KM_SLEEP); 13204 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 13205 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 13206 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 13207 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 13208 13209 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 13210 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 13211 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 13212 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 13213 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 13214 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 13215 13216 dof->dofh_flags = 0; 13217 dof->dofh_hdrsize = sizeof (dof_hdr_t); 13218 dof->dofh_secsize = sizeof (dof_sec_t); 13219 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 13220 dof->dofh_secoff = sizeof (dof_hdr_t); 13221 dof->dofh_loadsz = len; 13222 dof->dofh_filesz = len; 13223 dof->dofh_pad = 0; 13224 13225 /* 13226 * Fill in the option section header... 13227 */ 13228 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 13229 sec->dofs_type = DOF_SECT_OPTDESC; 13230 sec->dofs_align = sizeof (uint64_t); 13231 sec->dofs_flags = DOF_SECF_LOAD; 13232 sec->dofs_entsize = sizeof (dof_optdesc_t); 13233 13234 opt = (dof_optdesc_t *)((uintptr_t)sec + 13235 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 13236 13237 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 13238 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 13239 13240 for (i = 0; i < DTRACEOPT_MAX; i++) { 13241 opt[i].dofo_option = i; 13242 opt[i].dofo_strtab = DOF_SECIDX_NONE; 13243 opt[i].dofo_value = state->dts_options[i]; 13244 } 13245 13246 return (dof); 13247 } 13248 13249 static dof_hdr_t * 13250 dtrace_dof_copyin(uintptr_t uarg, int *errp) 13251 { 13252 dof_hdr_t hdr, *dof; 13253 13254 ASSERT(!MUTEX_HELD(&dtrace_lock)); 13255 13256 /* 13257 * First, we're going to copyin() the sizeof (dof_hdr_t). 13258 */ 13259 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 13260 dtrace_dof_error(NULL, "failed to copyin DOF header"); 13261 *errp = EFAULT; 13262 return (NULL); 13263 } 13264 13265 /* 13266 * Now we'll allocate the entire DOF and copy it in -- provided 13267 * that the length isn't outrageous. 13268 */ 13269 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 13270 dtrace_dof_error(&hdr, "load size exceeds maximum"); 13271 *errp = E2BIG; 13272 return (NULL); 13273 } 13274 13275 if (hdr.dofh_loadsz < sizeof (hdr)) { 13276 dtrace_dof_error(&hdr, "invalid load size"); 13277 *errp = EINVAL; 13278 return (NULL); 13279 } 13280 13281 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 13282 13283 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 || 13284 dof->dofh_loadsz != hdr.dofh_loadsz) { 13285 kmem_free(dof, hdr.dofh_loadsz); 13286 *errp = EFAULT; 13287 return (NULL); 13288 } 13289 13290 return (dof); 13291 } 13292 13293 #ifdef __FreeBSD__ 13294 static dof_hdr_t * 13295 dtrace_dof_copyin_proc(struct proc *p, uintptr_t uarg, int *errp) 13296 { 13297 dof_hdr_t hdr, *dof; 13298 struct thread *td; 13299 size_t loadsz; 13300 13301 ASSERT(!MUTEX_HELD(&dtrace_lock)); 13302 13303 td = curthread; 13304 13305 /* 13306 * First, we're going to copyin() the sizeof (dof_hdr_t). 13307 */ 13308 if (proc_readmem(td, p, uarg, &hdr, sizeof(hdr)) != sizeof(hdr)) { 13309 dtrace_dof_error(NULL, "failed to copyin DOF header"); 13310 *errp = EFAULT; 13311 return (NULL); 13312 } 13313 13314 /* 13315 * Now we'll allocate the entire DOF and copy it in -- provided 13316 * that the length isn't outrageous. 13317 */ 13318 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 13319 dtrace_dof_error(&hdr, "load size exceeds maximum"); 13320 *errp = E2BIG; 13321 return (NULL); 13322 } 13323 loadsz = (size_t)hdr.dofh_loadsz; 13324 13325 if (loadsz < sizeof (hdr)) { 13326 dtrace_dof_error(&hdr, "invalid load size"); 13327 *errp = EINVAL; 13328 return (NULL); 13329 } 13330 13331 dof = kmem_alloc(loadsz, KM_SLEEP); 13332 13333 if (proc_readmem(td, p, uarg, dof, loadsz) != loadsz || 13334 dof->dofh_loadsz != loadsz) { 13335 kmem_free(dof, hdr.dofh_loadsz); 13336 *errp = EFAULT; 13337 return (NULL); 13338 } 13339 13340 return (dof); 13341 } 13342 13343 static __inline uchar_t 13344 dtrace_dof_char(char c) 13345 { 13346 13347 switch (c) { 13348 case '0': 13349 case '1': 13350 case '2': 13351 case '3': 13352 case '4': 13353 case '5': 13354 case '6': 13355 case '7': 13356 case '8': 13357 case '9': 13358 return (c - '0'); 13359 case 'A': 13360 case 'B': 13361 case 'C': 13362 case 'D': 13363 case 'E': 13364 case 'F': 13365 return (c - 'A' + 10); 13366 case 'a': 13367 case 'b': 13368 case 'c': 13369 case 'd': 13370 case 'e': 13371 case 'f': 13372 return (c - 'a' + 10); 13373 } 13374 /* Should not reach here. */ 13375 return (UCHAR_MAX); 13376 } 13377 #endif /* __FreeBSD__ */ 13378 13379 static dof_hdr_t * 13380 dtrace_dof_property(const char *name) 13381 { 13382 #ifdef __FreeBSD__ 13383 uint8_t *dofbuf; 13384 u_char *data, *eol; 13385 caddr_t doffile; 13386 size_t bytes, len, i; 13387 dof_hdr_t *dof; 13388 u_char c1, c2; 13389 13390 dof = NULL; 13391 13392 doffile = preload_search_by_type("dtrace_dof"); 13393 if (doffile == NULL) 13394 return (NULL); 13395 13396 data = preload_fetch_addr(doffile); 13397 len = preload_fetch_size(doffile); 13398 for (;;) { 13399 /* Look for the end of the line. All lines end in a newline. */ 13400 eol = memchr(data, '\n', len); 13401 if (eol == NULL) 13402 return (NULL); 13403 13404 if (strncmp(name, data, strlen(name)) == 0) 13405 break; 13406 13407 eol++; /* skip past the newline */ 13408 len -= eol - data; 13409 data = eol; 13410 } 13411 13412 /* We've found the data corresponding to the specified key. */ 13413 13414 data += strlen(name) + 1; /* skip past the '=' */ 13415 len = eol - data; 13416 if (len % 2 != 0) { 13417 dtrace_dof_error(NULL, "invalid DOF encoding length"); 13418 goto doferr; 13419 } 13420 bytes = len / 2; 13421 if (bytes < sizeof(dof_hdr_t)) { 13422 dtrace_dof_error(NULL, "truncated header"); 13423 goto doferr; 13424 } 13425 13426 /* 13427 * Each byte is represented by the two ASCII characters in its hex 13428 * representation. 13429 */ 13430 dofbuf = malloc(bytes, M_SOLARIS, M_WAITOK); 13431 for (i = 0; i < bytes; i++) { 13432 c1 = dtrace_dof_char(data[i * 2]); 13433 c2 = dtrace_dof_char(data[i * 2 + 1]); 13434 if (c1 == UCHAR_MAX || c2 == UCHAR_MAX) { 13435 dtrace_dof_error(NULL, "invalid hex char in DOF"); 13436 goto doferr; 13437 } 13438 dofbuf[i] = c1 * 16 + c2; 13439 } 13440 13441 dof = (dof_hdr_t *)dofbuf; 13442 if (bytes < dof->dofh_loadsz) { 13443 dtrace_dof_error(NULL, "truncated DOF"); 13444 goto doferr; 13445 } 13446 13447 if (dof->dofh_loadsz >= dtrace_dof_maxsize) { 13448 dtrace_dof_error(NULL, "oversized DOF"); 13449 goto doferr; 13450 } 13451 13452 return (dof); 13453 13454 doferr: 13455 free(dof, M_SOLARIS); 13456 return (NULL); 13457 #else /* __FreeBSD__ */ 13458 uchar_t *buf; 13459 uint64_t loadsz; 13460 unsigned int len, i; 13461 dof_hdr_t *dof; 13462 13463 /* 13464 * Unfortunately, array of values in .conf files are always (and 13465 * only) interpreted to be integer arrays. We must read our DOF 13466 * as an integer array, and then squeeze it into a byte array. 13467 */ 13468 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 13469 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 13470 return (NULL); 13471 13472 for (i = 0; i < len; i++) 13473 buf[i] = (uchar_t)(((int *)buf)[i]); 13474 13475 if (len < sizeof (dof_hdr_t)) { 13476 ddi_prop_free(buf); 13477 dtrace_dof_error(NULL, "truncated header"); 13478 return (NULL); 13479 } 13480 13481 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 13482 ddi_prop_free(buf); 13483 dtrace_dof_error(NULL, "truncated DOF"); 13484 return (NULL); 13485 } 13486 13487 if (loadsz >= dtrace_dof_maxsize) { 13488 ddi_prop_free(buf); 13489 dtrace_dof_error(NULL, "oversized DOF"); 13490 return (NULL); 13491 } 13492 13493 dof = kmem_alloc(loadsz, KM_SLEEP); 13494 bcopy(buf, dof, loadsz); 13495 ddi_prop_free(buf); 13496 13497 return (dof); 13498 #endif /* !__FreeBSD__ */ 13499 } 13500 13501 static void 13502 dtrace_dof_destroy(dof_hdr_t *dof) 13503 { 13504 kmem_free(dof, dof->dofh_loadsz); 13505 } 13506 13507 /* 13508 * Return the dof_sec_t pointer corresponding to a given section index. If the 13509 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 13510 * a type other than DOF_SECT_NONE is specified, the header is checked against 13511 * this type and NULL is returned if the types do not match. 13512 */ 13513 static dof_sec_t * 13514 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 13515 { 13516 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 13517 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 13518 13519 if (i >= dof->dofh_secnum) { 13520 dtrace_dof_error(dof, "referenced section index is invalid"); 13521 return (NULL); 13522 } 13523 13524 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 13525 dtrace_dof_error(dof, "referenced section is not loadable"); 13526 return (NULL); 13527 } 13528 13529 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 13530 dtrace_dof_error(dof, "referenced section is the wrong type"); 13531 return (NULL); 13532 } 13533 13534 return (sec); 13535 } 13536 13537 static dtrace_probedesc_t * 13538 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 13539 { 13540 dof_probedesc_t *probe; 13541 dof_sec_t *strtab; 13542 uintptr_t daddr = (uintptr_t)dof; 13543 uintptr_t str; 13544 size_t size; 13545 13546 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 13547 dtrace_dof_error(dof, "invalid probe section"); 13548 return (NULL); 13549 } 13550 13551 if (sec->dofs_align != sizeof (dof_secidx_t)) { 13552 dtrace_dof_error(dof, "bad alignment in probe description"); 13553 return (NULL); 13554 } 13555 13556 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 13557 dtrace_dof_error(dof, "truncated probe description"); 13558 return (NULL); 13559 } 13560 13561 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 13562 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 13563 13564 if (strtab == NULL) 13565 return (NULL); 13566 13567 str = daddr + strtab->dofs_offset; 13568 size = strtab->dofs_size; 13569 13570 if (probe->dofp_provider >= strtab->dofs_size) { 13571 dtrace_dof_error(dof, "corrupt probe provider"); 13572 return (NULL); 13573 } 13574 13575 (void) strncpy(desc->dtpd_provider, 13576 (char *)(str + probe->dofp_provider), 13577 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 13578 13579 if (probe->dofp_mod >= strtab->dofs_size) { 13580 dtrace_dof_error(dof, "corrupt probe module"); 13581 return (NULL); 13582 } 13583 13584 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 13585 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 13586 13587 if (probe->dofp_func >= strtab->dofs_size) { 13588 dtrace_dof_error(dof, "corrupt probe function"); 13589 return (NULL); 13590 } 13591 13592 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 13593 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 13594 13595 if (probe->dofp_name >= strtab->dofs_size) { 13596 dtrace_dof_error(dof, "corrupt probe name"); 13597 return (NULL); 13598 } 13599 13600 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 13601 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 13602 13603 return (desc); 13604 } 13605 13606 static dtrace_difo_t * 13607 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13608 cred_t *cr) 13609 { 13610 dtrace_difo_t *dp; 13611 size_t ttl = 0; 13612 dof_difohdr_t *dofd; 13613 uintptr_t daddr = (uintptr_t)dof; 13614 size_t max = dtrace_difo_maxsize; 13615 int i, l, n; 13616 13617 static const struct { 13618 int section; 13619 int bufoffs; 13620 int lenoffs; 13621 int entsize; 13622 int align; 13623 const char *msg; 13624 } difo[] = { 13625 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 13626 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 13627 sizeof (dif_instr_t), "multiple DIF sections" }, 13628 13629 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 13630 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 13631 sizeof (uint64_t), "multiple integer tables" }, 13632 13633 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 13634 offsetof(dtrace_difo_t, dtdo_strlen), 0, 13635 sizeof (char), "multiple string tables" }, 13636 13637 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 13638 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 13639 sizeof (uint_t), "multiple variable tables" }, 13640 13641 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 13642 }; 13643 13644 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 13645 dtrace_dof_error(dof, "invalid DIFO header section"); 13646 return (NULL); 13647 } 13648 13649 if (sec->dofs_align != sizeof (dof_secidx_t)) { 13650 dtrace_dof_error(dof, "bad alignment in DIFO header"); 13651 return (NULL); 13652 } 13653 13654 if (sec->dofs_size < sizeof (dof_difohdr_t) || 13655 sec->dofs_size % sizeof (dof_secidx_t)) { 13656 dtrace_dof_error(dof, "bad size in DIFO header"); 13657 return (NULL); 13658 } 13659 13660 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 13661 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 13662 13663 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 13664 dp->dtdo_rtype = dofd->dofd_rtype; 13665 13666 for (l = 0; l < n; l++) { 13667 dof_sec_t *subsec; 13668 void **bufp; 13669 uint32_t *lenp; 13670 13671 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 13672 dofd->dofd_links[l])) == NULL) 13673 goto err; /* invalid section link */ 13674 13675 if (ttl + subsec->dofs_size > max) { 13676 dtrace_dof_error(dof, "exceeds maximum size"); 13677 goto err; 13678 } 13679 13680 ttl += subsec->dofs_size; 13681 13682 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 13683 if (subsec->dofs_type != difo[i].section) 13684 continue; 13685 13686 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 13687 dtrace_dof_error(dof, "section not loaded"); 13688 goto err; 13689 } 13690 13691 if (subsec->dofs_align != difo[i].align) { 13692 dtrace_dof_error(dof, "bad alignment"); 13693 goto err; 13694 } 13695 13696 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 13697 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 13698 13699 if (*bufp != NULL) { 13700 dtrace_dof_error(dof, difo[i].msg); 13701 goto err; 13702 } 13703 13704 if (difo[i].entsize != subsec->dofs_entsize) { 13705 dtrace_dof_error(dof, "entry size mismatch"); 13706 goto err; 13707 } 13708 13709 if (subsec->dofs_entsize != 0 && 13710 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 13711 dtrace_dof_error(dof, "corrupt entry size"); 13712 goto err; 13713 } 13714 13715 *lenp = subsec->dofs_size; 13716 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 13717 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 13718 *bufp, subsec->dofs_size); 13719 13720 if (subsec->dofs_entsize != 0) 13721 *lenp /= subsec->dofs_entsize; 13722 13723 break; 13724 } 13725 13726 /* 13727 * If we encounter a loadable DIFO sub-section that is not 13728 * known to us, assume this is a broken program and fail. 13729 */ 13730 if (difo[i].section == DOF_SECT_NONE && 13731 (subsec->dofs_flags & DOF_SECF_LOAD)) { 13732 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 13733 goto err; 13734 } 13735 } 13736 13737 if (dp->dtdo_buf == NULL) { 13738 /* 13739 * We can't have a DIF object without DIF text. 13740 */ 13741 dtrace_dof_error(dof, "missing DIF text"); 13742 goto err; 13743 } 13744 13745 /* 13746 * Before we validate the DIF object, run through the variable table 13747 * looking for the strings -- if any of their size are under, we'll set 13748 * their size to be the system-wide default string size. Note that 13749 * this should _not_ happen if the "strsize" option has been set -- 13750 * in this case, the compiler should have set the size to reflect the 13751 * setting of the option. 13752 */ 13753 for (i = 0; i < dp->dtdo_varlen; i++) { 13754 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 13755 dtrace_diftype_t *t = &v->dtdv_type; 13756 13757 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 13758 continue; 13759 13760 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 13761 t->dtdt_size = dtrace_strsize_default; 13762 } 13763 13764 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 13765 goto err; 13766 13767 dtrace_difo_init(dp, vstate); 13768 return (dp); 13769 13770 err: 13771 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 13772 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 13773 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 13774 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 13775 13776 kmem_free(dp, sizeof (dtrace_difo_t)); 13777 return (NULL); 13778 } 13779 13780 static dtrace_predicate_t * 13781 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13782 cred_t *cr) 13783 { 13784 dtrace_difo_t *dp; 13785 13786 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 13787 return (NULL); 13788 13789 return (dtrace_predicate_create(dp)); 13790 } 13791 13792 static dtrace_actdesc_t * 13793 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13794 cred_t *cr) 13795 { 13796 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 13797 dof_actdesc_t *desc; 13798 dof_sec_t *difosec; 13799 size_t offs; 13800 uintptr_t daddr = (uintptr_t)dof; 13801 uint64_t arg; 13802 dtrace_actkind_t kind; 13803 13804 if (sec->dofs_type != DOF_SECT_ACTDESC) { 13805 dtrace_dof_error(dof, "invalid action section"); 13806 return (NULL); 13807 } 13808 13809 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 13810 dtrace_dof_error(dof, "truncated action description"); 13811 return (NULL); 13812 } 13813 13814 if (sec->dofs_align != sizeof (uint64_t)) { 13815 dtrace_dof_error(dof, "bad alignment in action description"); 13816 return (NULL); 13817 } 13818 13819 if (sec->dofs_size < sec->dofs_entsize) { 13820 dtrace_dof_error(dof, "section entry size exceeds total size"); 13821 return (NULL); 13822 } 13823 13824 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 13825 dtrace_dof_error(dof, "bad entry size in action description"); 13826 return (NULL); 13827 } 13828 13829 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 13830 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 13831 return (NULL); 13832 } 13833 13834 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 13835 desc = (dof_actdesc_t *)(daddr + 13836 (uintptr_t)sec->dofs_offset + offs); 13837 kind = (dtrace_actkind_t)desc->dofa_kind; 13838 13839 if ((DTRACEACT_ISPRINTFLIKE(kind) && 13840 (kind != DTRACEACT_PRINTA || 13841 desc->dofa_strtab != DOF_SECIDX_NONE)) || 13842 (kind == DTRACEACT_DIFEXPR && 13843 desc->dofa_strtab != DOF_SECIDX_NONE)) { 13844 dof_sec_t *strtab; 13845 char *str, *fmt; 13846 uint64_t i; 13847 13848 /* 13849 * The argument to these actions is an index into the 13850 * DOF string table. For printf()-like actions, this 13851 * is the format string. For print(), this is the 13852 * CTF type of the expression result. 13853 */ 13854 if ((strtab = dtrace_dof_sect(dof, 13855 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 13856 goto err; 13857 13858 str = (char *)((uintptr_t)dof + 13859 (uintptr_t)strtab->dofs_offset); 13860 13861 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 13862 if (str[i] == '\0') 13863 break; 13864 } 13865 13866 if (i >= strtab->dofs_size) { 13867 dtrace_dof_error(dof, "bogus format string"); 13868 goto err; 13869 } 13870 13871 if (i == desc->dofa_arg) { 13872 dtrace_dof_error(dof, "empty format string"); 13873 goto err; 13874 } 13875 13876 i -= desc->dofa_arg; 13877 fmt = kmem_alloc(i + 1, KM_SLEEP); 13878 bcopy(&str[desc->dofa_arg], fmt, i + 1); 13879 arg = (uint64_t)(uintptr_t)fmt; 13880 } else { 13881 if (kind == DTRACEACT_PRINTA) { 13882 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 13883 arg = 0; 13884 } else { 13885 arg = desc->dofa_arg; 13886 } 13887 } 13888 13889 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 13890 desc->dofa_uarg, arg); 13891 13892 if (last != NULL) { 13893 last->dtad_next = act; 13894 } else { 13895 first = act; 13896 } 13897 13898 last = act; 13899 13900 if (desc->dofa_difo == DOF_SECIDX_NONE) 13901 continue; 13902 13903 if ((difosec = dtrace_dof_sect(dof, 13904 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 13905 goto err; 13906 13907 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 13908 13909 if (act->dtad_difo == NULL) 13910 goto err; 13911 } 13912 13913 ASSERT(first != NULL); 13914 return (first); 13915 13916 err: 13917 for (act = first; act != NULL; act = next) { 13918 next = act->dtad_next; 13919 dtrace_actdesc_release(act, vstate); 13920 } 13921 13922 return (NULL); 13923 } 13924 13925 static dtrace_ecbdesc_t * 13926 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13927 cred_t *cr) 13928 { 13929 dtrace_ecbdesc_t *ep; 13930 dof_ecbdesc_t *ecb; 13931 dtrace_probedesc_t *desc; 13932 dtrace_predicate_t *pred = NULL; 13933 13934 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 13935 dtrace_dof_error(dof, "truncated ECB description"); 13936 return (NULL); 13937 } 13938 13939 if (sec->dofs_align != sizeof (uint64_t)) { 13940 dtrace_dof_error(dof, "bad alignment in ECB description"); 13941 return (NULL); 13942 } 13943 13944 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 13945 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 13946 13947 if (sec == NULL) 13948 return (NULL); 13949 13950 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 13951 ep->dted_uarg = ecb->dofe_uarg; 13952 desc = &ep->dted_probe; 13953 13954 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 13955 goto err; 13956 13957 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 13958 if ((sec = dtrace_dof_sect(dof, 13959 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 13960 goto err; 13961 13962 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 13963 goto err; 13964 13965 ep->dted_pred.dtpdd_predicate = pred; 13966 } 13967 13968 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 13969 if ((sec = dtrace_dof_sect(dof, 13970 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 13971 goto err; 13972 13973 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 13974 13975 if (ep->dted_action == NULL) 13976 goto err; 13977 } 13978 13979 return (ep); 13980 13981 err: 13982 if (pred != NULL) 13983 dtrace_predicate_release(pred, vstate); 13984 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 13985 return (NULL); 13986 } 13987 13988 /* 13989 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 13990 * specified DOF. SETX relocations are computed using 'ubase', the base load 13991 * address of the object containing the DOF, and DOFREL relocations are relative 13992 * to the relocation offset within the DOF. 13993 */ 13994 static int 13995 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase, 13996 uint64_t udaddr) 13997 { 13998 uintptr_t daddr = (uintptr_t)dof; 13999 uintptr_t ts_end; 14000 dof_relohdr_t *dofr = 14001 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 14002 dof_sec_t *ss, *rs, *ts; 14003 dof_relodesc_t *r; 14004 uint_t i, n; 14005 14006 if (sec->dofs_size < sizeof (dof_relohdr_t) || 14007 sec->dofs_align != sizeof (dof_secidx_t)) { 14008 dtrace_dof_error(dof, "invalid relocation header"); 14009 return (-1); 14010 } 14011 14012 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 14013 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 14014 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 14015 ts_end = (uintptr_t)ts + sizeof (dof_sec_t); 14016 14017 if (ss == NULL || rs == NULL || ts == NULL) 14018 return (-1); /* dtrace_dof_error() has been called already */ 14019 14020 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 14021 rs->dofs_align != sizeof (uint64_t)) { 14022 dtrace_dof_error(dof, "invalid relocation section"); 14023 return (-1); 14024 } 14025 14026 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 14027 n = rs->dofs_size / rs->dofs_entsize; 14028 14029 for (i = 0; i < n; i++) { 14030 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 14031 14032 switch (r->dofr_type) { 14033 case DOF_RELO_NONE: 14034 break; 14035 case DOF_RELO_SETX: 14036 case DOF_RELO_DOFREL: 14037 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 14038 sizeof (uint64_t) > ts->dofs_size) { 14039 dtrace_dof_error(dof, "bad relocation offset"); 14040 return (-1); 14041 } 14042 14043 if (taddr >= (uintptr_t)ts && taddr < ts_end) { 14044 dtrace_dof_error(dof, "bad relocation offset"); 14045 return (-1); 14046 } 14047 14048 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 14049 dtrace_dof_error(dof, "misaligned setx relo"); 14050 return (-1); 14051 } 14052 14053 if (r->dofr_type == DOF_RELO_SETX) 14054 *(uint64_t *)taddr += ubase; 14055 else 14056 *(uint64_t *)taddr += 14057 udaddr + ts->dofs_offset + r->dofr_offset; 14058 break; 14059 default: 14060 dtrace_dof_error(dof, "invalid relocation type"); 14061 return (-1); 14062 } 14063 14064 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 14065 } 14066 14067 return (0); 14068 } 14069 14070 /* 14071 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 14072 * header: it should be at the front of a memory region that is at least 14073 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 14074 * size. It need not be validated in any other way. 14075 */ 14076 static int 14077 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 14078 dtrace_enabling_t **enabp, uint64_t ubase, uint64_t udaddr, int noprobes) 14079 { 14080 uint64_t len = dof->dofh_loadsz, seclen; 14081 uintptr_t daddr = (uintptr_t)dof; 14082 dtrace_ecbdesc_t *ep; 14083 dtrace_enabling_t *enab; 14084 uint_t i; 14085 14086 ASSERT(MUTEX_HELD(&dtrace_lock)); 14087 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 14088 14089 /* 14090 * Check the DOF header identification bytes. In addition to checking 14091 * valid settings, we also verify that unused bits/bytes are zeroed so 14092 * we can use them later without fear of regressing existing binaries. 14093 */ 14094 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 14095 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 14096 dtrace_dof_error(dof, "DOF magic string mismatch"); 14097 return (-1); 14098 } 14099 14100 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 14101 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 14102 dtrace_dof_error(dof, "DOF has invalid data model"); 14103 return (-1); 14104 } 14105 14106 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 14107 dtrace_dof_error(dof, "DOF encoding mismatch"); 14108 return (-1); 14109 } 14110 14111 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14112 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 14113 dtrace_dof_error(dof, "DOF version mismatch"); 14114 return (-1); 14115 } 14116 14117 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 14118 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 14119 return (-1); 14120 } 14121 14122 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 14123 dtrace_dof_error(dof, "DOF uses too many integer registers"); 14124 return (-1); 14125 } 14126 14127 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 14128 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 14129 return (-1); 14130 } 14131 14132 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 14133 if (dof->dofh_ident[i] != 0) { 14134 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 14135 return (-1); 14136 } 14137 } 14138 14139 if (dof->dofh_flags & ~DOF_FL_VALID) { 14140 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 14141 return (-1); 14142 } 14143 14144 if (dof->dofh_secsize == 0) { 14145 dtrace_dof_error(dof, "zero section header size"); 14146 return (-1); 14147 } 14148 14149 /* 14150 * Check that the section headers don't exceed the amount of DOF 14151 * data. Note that we cast the section size and number of sections 14152 * to uint64_t's to prevent possible overflow in the multiplication. 14153 */ 14154 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 14155 14156 if (dof->dofh_secoff > len || seclen > len || 14157 dof->dofh_secoff + seclen > len) { 14158 dtrace_dof_error(dof, "truncated section headers"); 14159 return (-1); 14160 } 14161 14162 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 14163 dtrace_dof_error(dof, "misaligned section headers"); 14164 return (-1); 14165 } 14166 14167 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 14168 dtrace_dof_error(dof, "misaligned section size"); 14169 return (-1); 14170 } 14171 14172 /* 14173 * Take an initial pass through the section headers to be sure that 14174 * the headers don't have stray offsets. If the 'noprobes' flag is 14175 * set, do not permit sections relating to providers, probes, or args. 14176 */ 14177 for (i = 0; i < dof->dofh_secnum; i++) { 14178 dof_sec_t *sec = (dof_sec_t *)(daddr + 14179 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14180 14181 if (noprobes) { 14182 switch (sec->dofs_type) { 14183 case DOF_SECT_PROVIDER: 14184 case DOF_SECT_PROBES: 14185 case DOF_SECT_PRARGS: 14186 case DOF_SECT_PROFFS: 14187 dtrace_dof_error(dof, "illegal sections " 14188 "for enabling"); 14189 return (-1); 14190 } 14191 } 14192 14193 if (DOF_SEC_ISLOADABLE(sec->dofs_type) && 14194 !(sec->dofs_flags & DOF_SECF_LOAD)) { 14195 dtrace_dof_error(dof, "loadable section with load " 14196 "flag unset"); 14197 return (-1); 14198 } 14199 14200 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 14201 continue; /* just ignore non-loadable sections */ 14202 14203 if (!ISP2(sec->dofs_align)) { 14204 dtrace_dof_error(dof, "bad section alignment"); 14205 return (-1); 14206 } 14207 14208 if (sec->dofs_offset & (sec->dofs_align - 1)) { 14209 dtrace_dof_error(dof, "misaligned section"); 14210 return (-1); 14211 } 14212 14213 if (sec->dofs_offset > len || sec->dofs_size > len || 14214 sec->dofs_offset + sec->dofs_size > len) { 14215 dtrace_dof_error(dof, "corrupt section header"); 14216 return (-1); 14217 } 14218 14219 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 14220 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 14221 dtrace_dof_error(dof, "non-terminating string table"); 14222 return (-1); 14223 } 14224 } 14225 14226 /* 14227 * Take a second pass through the sections and locate and perform any 14228 * relocations that are present. We do this after the first pass to 14229 * be sure that all sections have had their headers validated. 14230 */ 14231 for (i = 0; i < dof->dofh_secnum; i++) { 14232 dof_sec_t *sec = (dof_sec_t *)(daddr + 14233 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14234 14235 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 14236 continue; /* skip sections that are not loadable */ 14237 14238 switch (sec->dofs_type) { 14239 case DOF_SECT_URELHDR: 14240 if (dtrace_dof_relocate(dof, sec, ubase, udaddr) != 0) 14241 return (-1); 14242 break; 14243 } 14244 } 14245 14246 if ((enab = *enabp) == NULL) 14247 enab = *enabp = dtrace_enabling_create(vstate); 14248 14249 for (i = 0; i < dof->dofh_secnum; i++) { 14250 dof_sec_t *sec = (dof_sec_t *)(daddr + 14251 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14252 14253 if (sec->dofs_type != DOF_SECT_ECBDESC) 14254 continue; 14255 14256 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 14257 dtrace_enabling_destroy(enab); 14258 *enabp = NULL; 14259 return (-1); 14260 } 14261 14262 dtrace_enabling_add(enab, ep); 14263 } 14264 14265 return (0); 14266 } 14267 14268 /* 14269 * Process DOF for any options. This routine assumes that the DOF has been 14270 * at least processed by dtrace_dof_slurp(). 14271 */ 14272 static int 14273 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 14274 { 14275 int i, rval; 14276 uint32_t entsize; 14277 size_t offs; 14278 dof_optdesc_t *desc; 14279 14280 for (i = 0; i < dof->dofh_secnum; i++) { 14281 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 14282 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14283 14284 if (sec->dofs_type != DOF_SECT_OPTDESC) 14285 continue; 14286 14287 if (sec->dofs_align != sizeof (uint64_t)) { 14288 dtrace_dof_error(dof, "bad alignment in " 14289 "option description"); 14290 return (EINVAL); 14291 } 14292 14293 if ((entsize = sec->dofs_entsize) == 0) { 14294 dtrace_dof_error(dof, "zeroed option entry size"); 14295 return (EINVAL); 14296 } 14297 14298 if (entsize < sizeof (dof_optdesc_t)) { 14299 dtrace_dof_error(dof, "bad option entry size"); 14300 return (EINVAL); 14301 } 14302 14303 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 14304 desc = (dof_optdesc_t *)((uintptr_t)dof + 14305 (uintptr_t)sec->dofs_offset + offs); 14306 14307 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 14308 dtrace_dof_error(dof, "non-zero option string"); 14309 return (EINVAL); 14310 } 14311 14312 if (desc->dofo_value == DTRACEOPT_UNSET) { 14313 dtrace_dof_error(dof, "unset option"); 14314 return (EINVAL); 14315 } 14316 14317 if ((rval = dtrace_state_option(state, 14318 desc->dofo_option, desc->dofo_value)) != 0) { 14319 dtrace_dof_error(dof, "rejected option"); 14320 return (rval); 14321 } 14322 } 14323 } 14324 14325 return (0); 14326 } 14327 14328 /* 14329 * DTrace Consumer State Functions 14330 */ 14331 static int 14332 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 14333 { 14334 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 14335 void *base; 14336 uintptr_t limit; 14337 dtrace_dynvar_t *dvar, *next, *start; 14338 int i; 14339 14340 ASSERT(MUTEX_HELD(&dtrace_lock)); 14341 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 14342 14343 bzero(dstate, sizeof (dtrace_dstate_t)); 14344 14345 if ((dstate->dtds_chunksize = chunksize) == 0) 14346 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 14347 14348 VERIFY(dstate->dtds_chunksize < LONG_MAX); 14349 14350 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 14351 size = min; 14352 14353 if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL) 14354 return (ENOMEM); 14355 14356 dstate->dtds_size = size; 14357 dstate->dtds_base = base; 14358 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 14359 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 14360 14361 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 14362 14363 if (hashsize != 1 && (hashsize & 1)) 14364 hashsize--; 14365 14366 dstate->dtds_hashsize = hashsize; 14367 dstate->dtds_hash = dstate->dtds_base; 14368 14369 /* 14370 * Set all of our hash buckets to point to the single sink, and (if 14371 * it hasn't already been set), set the sink's hash value to be the 14372 * sink sentinel value. The sink is needed for dynamic variable 14373 * lookups to know that they have iterated over an entire, valid hash 14374 * chain. 14375 */ 14376 for (i = 0; i < hashsize; i++) 14377 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 14378 14379 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 14380 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 14381 14382 /* 14383 * Determine number of active CPUs. Divide free list evenly among 14384 * active CPUs. 14385 */ 14386 start = (dtrace_dynvar_t *) 14387 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 14388 limit = (uintptr_t)base + size; 14389 14390 VERIFY((uintptr_t)start < limit); 14391 VERIFY((uintptr_t)start >= (uintptr_t)base); 14392 14393 maxper = (limit - (uintptr_t)start) / NCPU; 14394 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 14395 14396 #ifndef illumos 14397 CPU_FOREACH(i) { 14398 #else 14399 for (i = 0; i < NCPU; i++) { 14400 #endif 14401 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 14402 14403 /* 14404 * If we don't even have enough chunks to make it once through 14405 * NCPUs, we're just going to allocate everything to the first 14406 * CPU. And if we're on the last CPU, we're going to allocate 14407 * whatever is left over. In either case, we set the limit to 14408 * be the limit of the dynamic variable space. 14409 */ 14410 if (maxper == 0 || i == NCPU - 1) { 14411 limit = (uintptr_t)base + size; 14412 start = NULL; 14413 } else { 14414 limit = (uintptr_t)start + maxper; 14415 start = (dtrace_dynvar_t *)limit; 14416 } 14417 14418 VERIFY(limit <= (uintptr_t)base + size); 14419 14420 for (;;) { 14421 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 14422 dstate->dtds_chunksize); 14423 14424 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 14425 break; 14426 14427 VERIFY((uintptr_t)dvar >= (uintptr_t)base && 14428 (uintptr_t)dvar <= (uintptr_t)base + size); 14429 dvar->dtdv_next = next; 14430 dvar = next; 14431 } 14432 14433 if (maxper == 0) 14434 break; 14435 } 14436 14437 return (0); 14438 } 14439 14440 static void 14441 dtrace_dstate_fini(dtrace_dstate_t *dstate) 14442 { 14443 ASSERT(MUTEX_HELD(&cpu_lock)); 14444 14445 if (dstate->dtds_base == NULL) 14446 return; 14447 14448 kmem_free(dstate->dtds_base, dstate->dtds_size); 14449 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 14450 } 14451 14452 static void 14453 dtrace_vstate_fini(dtrace_vstate_t *vstate) 14454 { 14455 /* 14456 * Logical XOR, where are you? 14457 */ 14458 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 14459 14460 if (vstate->dtvs_nglobals > 0) { 14461 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 14462 sizeof (dtrace_statvar_t *)); 14463 } 14464 14465 if (vstate->dtvs_ntlocals > 0) { 14466 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 14467 sizeof (dtrace_difv_t)); 14468 } 14469 14470 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 14471 14472 if (vstate->dtvs_nlocals > 0) { 14473 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 14474 sizeof (dtrace_statvar_t *)); 14475 } 14476 } 14477 14478 #ifdef illumos 14479 static void 14480 dtrace_state_clean(dtrace_state_t *state) 14481 { 14482 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 14483 return; 14484 14485 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 14486 dtrace_speculation_clean(state); 14487 } 14488 14489 static void 14490 dtrace_state_deadman(dtrace_state_t *state) 14491 { 14492 hrtime_t now; 14493 14494 dtrace_sync(); 14495 14496 now = dtrace_gethrtime(); 14497 14498 if (state != dtrace_anon.dta_state && 14499 now - state->dts_laststatus >= dtrace_deadman_user) 14500 return; 14501 14502 /* 14503 * We must be sure that dts_alive never appears to be less than the 14504 * value upon entry to dtrace_state_deadman(), and because we lack a 14505 * dtrace_cas64(), we cannot store to it atomically. We thus instead 14506 * store INT64_MAX to it, followed by a memory barrier, followed by 14507 * the new value. This assures that dts_alive never appears to be 14508 * less than its true value, regardless of the order in which the 14509 * stores to the underlying storage are issued. 14510 */ 14511 state->dts_alive = INT64_MAX; 14512 dtrace_membar_producer(); 14513 state->dts_alive = now; 14514 } 14515 #else /* !illumos */ 14516 static void 14517 dtrace_state_clean(void *arg) 14518 { 14519 dtrace_state_t *state = arg; 14520 dtrace_optval_t *opt = state->dts_options; 14521 14522 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 14523 return; 14524 14525 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 14526 dtrace_speculation_clean(state); 14527 14528 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 14529 dtrace_state_clean, state); 14530 } 14531 14532 static void 14533 dtrace_state_deadman(void *arg) 14534 { 14535 dtrace_state_t *state = arg; 14536 hrtime_t now; 14537 14538 dtrace_sync(); 14539 14540 dtrace_debug_output(); 14541 14542 now = dtrace_gethrtime(); 14543 14544 if (state != dtrace_anon.dta_state && 14545 now - state->dts_laststatus >= dtrace_deadman_user) 14546 return; 14547 14548 /* 14549 * We must be sure that dts_alive never appears to be less than the 14550 * value upon entry to dtrace_state_deadman(), and because we lack a 14551 * dtrace_cas64(), we cannot store to it atomically. We thus instead 14552 * store INT64_MAX to it, followed by a memory barrier, followed by 14553 * the new value. This assures that dts_alive never appears to be 14554 * less than its true value, regardless of the order in which the 14555 * stores to the underlying storage are issued. 14556 */ 14557 state->dts_alive = INT64_MAX; 14558 dtrace_membar_producer(); 14559 state->dts_alive = now; 14560 14561 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 14562 dtrace_state_deadman, state); 14563 } 14564 #endif /* illumos */ 14565 14566 static dtrace_state_t * 14567 #ifdef illumos 14568 dtrace_state_create(dev_t *devp, cred_t *cr) 14569 #else 14570 dtrace_state_create(struct cdev *dev, struct ucred *cred __unused) 14571 #endif 14572 { 14573 #ifdef illumos 14574 minor_t minor; 14575 major_t major; 14576 #else 14577 cred_t *cr = NULL; 14578 int m = 0; 14579 #endif 14580 char c[30]; 14581 dtrace_state_t *state; 14582 dtrace_optval_t *opt; 14583 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 14584 int cpu_it; 14585 14586 ASSERT(MUTEX_HELD(&dtrace_lock)); 14587 ASSERT(MUTEX_HELD(&cpu_lock)); 14588 14589 #ifdef illumos 14590 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 14591 VM_BESTFIT | VM_SLEEP); 14592 14593 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 14594 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 14595 return (NULL); 14596 } 14597 14598 state = ddi_get_soft_state(dtrace_softstate, minor); 14599 #else 14600 if (dev != NULL) { 14601 cr = dev->si_cred; 14602 m = dev2unit(dev); 14603 } 14604 14605 /* Allocate memory for the state. */ 14606 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 14607 #endif 14608 14609 state->dts_epid = DTRACE_EPIDNONE + 1; 14610 14611 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 14612 #ifdef illumos 14613 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 14614 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 14615 14616 if (devp != NULL) { 14617 major = getemajor(*devp); 14618 } else { 14619 major = ddi_driver_major(dtrace_devi); 14620 } 14621 14622 state->dts_dev = makedevice(major, minor); 14623 14624 if (devp != NULL) 14625 *devp = state->dts_dev; 14626 #else 14627 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 14628 state->dts_dev = dev; 14629 #endif 14630 14631 /* 14632 * We allocate NCPU buffers. On the one hand, this can be quite 14633 * a bit of memory per instance (nearly 36K on a Starcat). On the 14634 * other hand, it saves an additional memory reference in the probe 14635 * path. 14636 */ 14637 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 14638 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 14639 14640 /* 14641 * Allocate and initialise the per-process per-CPU random state. 14642 * SI_SUB_RANDOM < SI_SUB_DTRACE_ANON therefore entropy device is 14643 * assumed to be seeded at this point (if from Fortuna seed file). 14644 */ 14645 arc4random_buf(&state->dts_rstate[0], 2 * sizeof(uint64_t)); 14646 for (cpu_it = 1; cpu_it < NCPU; cpu_it++) { 14647 /* 14648 * Each CPU is assigned a 2^64 period, non-overlapping 14649 * subsequence. 14650 */ 14651 dtrace_xoroshiro128_plus_jump(state->dts_rstate[cpu_it-1], 14652 state->dts_rstate[cpu_it]); 14653 } 14654 14655 #ifdef illumos 14656 state->dts_cleaner = CYCLIC_NONE; 14657 state->dts_deadman = CYCLIC_NONE; 14658 #else 14659 callout_init(&state->dts_cleaner, 1); 14660 callout_init(&state->dts_deadman, 1); 14661 #endif 14662 state->dts_vstate.dtvs_state = state; 14663 14664 for (i = 0; i < DTRACEOPT_MAX; i++) 14665 state->dts_options[i] = DTRACEOPT_UNSET; 14666 14667 /* 14668 * Set the default options. 14669 */ 14670 opt = state->dts_options; 14671 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 14672 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 14673 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 14674 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 14675 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 14676 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 14677 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 14678 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 14679 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 14680 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 14681 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 14682 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 14683 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 14684 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 14685 14686 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 14687 14688 /* 14689 * Depending on the user credentials, we set flag bits which alter probe 14690 * visibility or the amount of destructiveness allowed. In the case of 14691 * actual anonymous tracing, or the possession of all privileges, all of 14692 * the normal checks are bypassed. 14693 */ 14694 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 14695 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 14696 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 14697 } else { 14698 /* 14699 * Set up the credentials for this instantiation. We take a 14700 * hold on the credential to prevent it from disappearing on 14701 * us; this in turn prevents the zone_t referenced by this 14702 * credential from disappearing. This means that we can 14703 * examine the credential and the zone from probe context. 14704 */ 14705 crhold(cr); 14706 state->dts_cred.dcr_cred = cr; 14707 14708 /* 14709 * CRA_PROC means "we have *some* privilege for dtrace" and 14710 * unlocks the use of variables like pid, zonename, etc. 14711 */ 14712 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 14713 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 14714 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 14715 } 14716 14717 /* 14718 * dtrace_user allows use of syscall and profile providers. 14719 * If the user also has proc_owner and/or proc_zone, we 14720 * extend the scope to include additional visibility and 14721 * destructive power. 14722 */ 14723 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 14724 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 14725 state->dts_cred.dcr_visible |= 14726 DTRACE_CRV_ALLPROC; 14727 14728 state->dts_cred.dcr_action |= 14729 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14730 } 14731 14732 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 14733 state->dts_cred.dcr_visible |= 14734 DTRACE_CRV_ALLZONE; 14735 14736 state->dts_cred.dcr_action |= 14737 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14738 } 14739 14740 /* 14741 * If we have all privs in whatever zone this is, 14742 * we can do destructive things to processes which 14743 * have altered credentials. 14744 */ 14745 #ifdef illumos 14746 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 14747 cr->cr_zone->zone_privset)) { 14748 state->dts_cred.dcr_action |= 14749 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 14750 } 14751 #endif 14752 } 14753 14754 /* 14755 * Holding the dtrace_kernel privilege also implies that 14756 * the user has the dtrace_user privilege from a visibility 14757 * perspective. But without further privileges, some 14758 * destructive actions are not available. 14759 */ 14760 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 14761 /* 14762 * Make all probes in all zones visible. However, 14763 * this doesn't mean that all actions become available 14764 * to all zones. 14765 */ 14766 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 14767 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 14768 14769 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 14770 DTRACE_CRA_PROC; 14771 /* 14772 * Holding proc_owner means that destructive actions 14773 * for *this* zone are allowed. 14774 */ 14775 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 14776 state->dts_cred.dcr_action |= 14777 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14778 14779 /* 14780 * Holding proc_zone means that destructive actions 14781 * for this user/group ID in all zones is allowed. 14782 */ 14783 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 14784 state->dts_cred.dcr_action |= 14785 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14786 14787 #ifdef illumos 14788 /* 14789 * If we have all privs in whatever zone this is, 14790 * we can do destructive things to processes which 14791 * have altered credentials. 14792 */ 14793 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 14794 cr->cr_zone->zone_privset)) { 14795 state->dts_cred.dcr_action |= 14796 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 14797 } 14798 #endif 14799 } 14800 14801 /* 14802 * Holding the dtrace_proc privilege gives control over fasttrap 14803 * and pid providers. We need to grant wider destructive 14804 * privileges in the event that the user has proc_owner and/or 14805 * proc_zone. 14806 */ 14807 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 14808 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 14809 state->dts_cred.dcr_action |= 14810 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14811 14812 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 14813 state->dts_cred.dcr_action |= 14814 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14815 } 14816 } 14817 14818 return (state); 14819 } 14820 14821 static int 14822 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 14823 { 14824 dtrace_optval_t *opt = state->dts_options, size; 14825 processorid_t cpu = 0;; 14826 int flags = 0, rval, factor, divisor = 1; 14827 14828 ASSERT(MUTEX_HELD(&dtrace_lock)); 14829 ASSERT(MUTEX_HELD(&cpu_lock)); 14830 ASSERT(which < DTRACEOPT_MAX); 14831 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 14832 (state == dtrace_anon.dta_state && 14833 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 14834 14835 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 14836 return (0); 14837 14838 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 14839 cpu = opt[DTRACEOPT_CPU]; 14840 14841 if (which == DTRACEOPT_SPECSIZE) 14842 flags |= DTRACEBUF_NOSWITCH; 14843 14844 if (which == DTRACEOPT_BUFSIZE) { 14845 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 14846 flags |= DTRACEBUF_RING; 14847 14848 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 14849 flags |= DTRACEBUF_FILL; 14850 14851 if (state != dtrace_anon.dta_state || 14852 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 14853 flags |= DTRACEBUF_INACTIVE; 14854 } 14855 14856 for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) { 14857 /* 14858 * The size must be 8-byte aligned. If the size is not 8-byte 14859 * aligned, drop it down by the difference. 14860 */ 14861 if (size & (sizeof (uint64_t) - 1)) 14862 size -= size & (sizeof (uint64_t) - 1); 14863 14864 if (size < state->dts_reserve) { 14865 /* 14866 * Buffers always must be large enough to accommodate 14867 * their prereserved space. We return E2BIG instead 14868 * of ENOMEM in this case to allow for user-level 14869 * software to differentiate the cases. 14870 */ 14871 return (E2BIG); 14872 } 14873 14874 rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor); 14875 14876 if (rval != ENOMEM) { 14877 opt[which] = size; 14878 return (rval); 14879 } 14880 14881 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 14882 return (rval); 14883 14884 for (divisor = 2; divisor < factor; divisor <<= 1) 14885 continue; 14886 } 14887 14888 return (ENOMEM); 14889 } 14890 14891 static int 14892 dtrace_state_buffers(dtrace_state_t *state) 14893 { 14894 dtrace_speculation_t *spec = state->dts_speculations; 14895 int rval, i; 14896 14897 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 14898 DTRACEOPT_BUFSIZE)) != 0) 14899 return (rval); 14900 14901 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 14902 DTRACEOPT_AGGSIZE)) != 0) 14903 return (rval); 14904 14905 for (i = 0; i < state->dts_nspeculations; i++) { 14906 if ((rval = dtrace_state_buffer(state, 14907 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 14908 return (rval); 14909 } 14910 14911 return (0); 14912 } 14913 14914 static void 14915 dtrace_state_prereserve(dtrace_state_t *state) 14916 { 14917 dtrace_ecb_t *ecb; 14918 dtrace_probe_t *probe; 14919 14920 state->dts_reserve = 0; 14921 14922 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 14923 return; 14924 14925 /* 14926 * If our buffer policy is a "fill" buffer policy, we need to set the 14927 * prereserved space to be the space required by the END probes. 14928 */ 14929 probe = dtrace_probes[dtrace_probeid_end - 1]; 14930 ASSERT(probe != NULL); 14931 14932 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 14933 if (ecb->dte_state != state) 14934 continue; 14935 14936 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 14937 } 14938 } 14939 14940 static int 14941 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 14942 { 14943 dtrace_optval_t *opt = state->dts_options, sz, nspec; 14944 dtrace_speculation_t *spec; 14945 dtrace_buffer_t *buf; 14946 #ifdef illumos 14947 cyc_handler_t hdlr; 14948 cyc_time_t when; 14949 #endif 14950 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 14951 dtrace_icookie_t cookie; 14952 14953 mutex_enter(&cpu_lock); 14954 mutex_enter(&dtrace_lock); 14955 14956 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 14957 rval = EBUSY; 14958 goto out; 14959 } 14960 14961 /* 14962 * Before we can perform any checks, we must prime all of the 14963 * retained enablings that correspond to this state. 14964 */ 14965 dtrace_enabling_prime(state); 14966 14967 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 14968 rval = EACCES; 14969 goto out; 14970 } 14971 14972 dtrace_state_prereserve(state); 14973 14974 /* 14975 * Now we want to do is try to allocate our speculations. 14976 * We do not automatically resize the number of speculations; if 14977 * this fails, we will fail the operation. 14978 */ 14979 nspec = opt[DTRACEOPT_NSPEC]; 14980 ASSERT(nspec != DTRACEOPT_UNSET); 14981 14982 if (nspec > INT_MAX) { 14983 rval = ENOMEM; 14984 goto out; 14985 } 14986 14987 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), 14988 KM_NOSLEEP | KM_NORMALPRI); 14989 14990 if (spec == NULL) { 14991 rval = ENOMEM; 14992 goto out; 14993 } 14994 14995 state->dts_speculations = spec; 14996 state->dts_nspeculations = (int)nspec; 14997 14998 for (i = 0; i < nspec; i++) { 14999 if ((buf = kmem_zalloc(bufsize, 15000 KM_NOSLEEP | KM_NORMALPRI)) == NULL) { 15001 rval = ENOMEM; 15002 goto err; 15003 } 15004 15005 spec[i].dtsp_buffer = buf; 15006 } 15007 15008 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 15009 if (dtrace_anon.dta_state == NULL) { 15010 rval = ENOENT; 15011 goto out; 15012 } 15013 15014 if (state->dts_necbs != 0) { 15015 rval = EALREADY; 15016 goto out; 15017 } 15018 15019 state->dts_anon = dtrace_anon_grab(); 15020 ASSERT(state->dts_anon != NULL); 15021 state = state->dts_anon; 15022 15023 /* 15024 * We want "grabanon" to be set in the grabbed state, so we'll 15025 * copy that option value from the grabbing state into the 15026 * grabbed state. 15027 */ 15028 state->dts_options[DTRACEOPT_GRABANON] = 15029 opt[DTRACEOPT_GRABANON]; 15030 15031 *cpu = dtrace_anon.dta_beganon; 15032 15033 /* 15034 * If the anonymous state is active (as it almost certainly 15035 * is if the anonymous enabling ultimately matched anything), 15036 * we don't allow any further option processing -- but we 15037 * don't return failure. 15038 */ 15039 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 15040 goto out; 15041 } 15042 15043 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 15044 opt[DTRACEOPT_AGGSIZE] != 0) { 15045 if (state->dts_aggregations == NULL) { 15046 /* 15047 * We're not going to create an aggregation buffer 15048 * because we don't have any ECBs that contain 15049 * aggregations -- set this option to 0. 15050 */ 15051 opt[DTRACEOPT_AGGSIZE] = 0; 15052 } else { 15053 /* 15054 * If we have an aggregation buffer, we must also have 15055 * a buffer to use as scratch. 15056 */ 15057 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 15058 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 15059 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 15060 } 15061 } 15062 } 15063 15064 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 15065 opt[DTRACEOPT_SPECSIZE] != 0) { 15066 if (!state->dts_speculates) { 15067 /* 15068 * We're not going to create speculation buffers 15069 * because we don't have any ECBs that actually 15070 * speculate -- set the speculation size to 0. 15071 */ 15072 opt[DTRACEOPT_SPECSIZE] = 0; 15073 } 15074 } 15075 15076 /* 15077 * The bare minimum size for any buffer that we're actually going to 15078 * do anything to is sizeof (uint64_t). 15079 */ 15080 sz = sizeof (uint64_t); 15081 15082 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 15083 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 15084 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 15085 /* 15086 * A buffer size has been explicitly set to 0 (or to a size 15087 * that will be adjusted to 0) and we need the space -- we 15088 * need to return failure. We return ENOSPC to differentiate 15089 * it from failing to allocate a buffer due to failure to meet 15090 * the reserve (for which we return E2BIG). 15091 */ 15092 rval = ENOSPC; 15093 goto out; 15094 } 15095 15096 if ((rval = dtrace_state_buffers(state)) != 0) 15097 goto err; 15098 15099 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 15100 sz = dtrace_dstate_defsize; 15101 15102 do { 15103 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 15104 15105 if (rval == 0) 15106 break; 15107 15108 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 15109 goto err; 15110 } while (sz >>= 1); 15111 15112 opt[DTRACEOPT_DYNVARSIZE] = sz; 15113 15114 if (rval != 0) 15115 goto err; 15116 15117 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 15118 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 15119 15120 if (opt[DTRACEOPT_CLEANRATE] == 0) 15121 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 15122 15123 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 15124 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 15125 15126 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 15127 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 15128 15129 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 15130 #ifdef illumos 15131 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 15132 hdlr.cyh_arg = state; 15133 hdlr.cyh_level = CY_LOW_LEVEL; 15134 15135 when.cyt_when = 0; 15136 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 15137 15138 state->dts_cleaner = cyclic_add(&hdlr, &when); 15139 15140 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 15141 hdlr.cyh_arg = state; 15142 hdlr.cyh_level = CY_LOW_LEVEL; 15143 15144 when.cyt_when = 0; 15145 when.cyt_interval = dtrace_deadman_interval; 15146 15147 state->dts_deadman = cyclic_add(&hdlr, &when); 15148 #else 15149 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 15150 dtrace_state_clean, state); 15151 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 15152 dtrace_state_deadman, state); 15153 #endif 15154 15155 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 15156 15157 #ifdef illumos 15158 if (state->dts_getf != 0 && 15159 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { 15160 /* 15161 * We don't have kernel privs but we have at least one call 15162 * to getf(); we need to bump our zone's count, and (if 15163 * this is the first enabling to have an unprivileged call 15164 * to getf()) we need to hook into closef(). 15165 */ 15166 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++; 15167 15168 if (dtrace_getf++ == 0) { 15169 ASSERT(dtrace_closef == NULL); 15170 dtrace_closef = dtrace_getf_barrier; 15171 } 15172 } 15173 #endif 15174 15175 /* 15176 * Now it's time to actually fire the BEGIN probe. We need to disable 15177 * interrupts here both to record the CPU on which we fired the BEGIN 15178 * probe (the data from this CPU will be processed first at user 15179 * level) and to manually activate the buffer for this CPU. 15180 */ 15181 cookie = dtrace_interrupt_disable(); 15182 *cpu = curcpu; 15183 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 15184 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 15185 15186 dtrace_probe(dtrace_probeid_begin, 15187 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 15188 dtrace_interrupt_enable(cookie); 15189 /* 15190 * We may have had an exit action from a BEGIN probe; only change our 15191 * state to ACTIVE if we're still in WARMUP. 15192 */ 15193 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 15194 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 15195 15196 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 15197 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 15198 15199 #ifdef __FreeBSD__ 15200 /* 15201 * We enable anonymous tracing before APs are started, so we must 15202 * activate buffers using the current CPU. 15203 */ 15204 if (state == dtrace_anon.dta_state) 15205 for (int i = 0; i < NCPU; i++) 15206 dtrace_buffer_activate_cpu(state, i); 15207 else 15208 dtrace_xcall(DTRACE_CPUALL, 15209 (dtrace_xcall_t)dtrace_buffer_activate, state); 15210 #else 15211 /* 15212 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 15213 * want each CPU to transition its principal buffer out of the 15214 * INACTIVE state. Doing this assures that no CPU will suddenly begin 15215 * processing an ECB halfway down a probe's ECB chain; all CPUs will 15216 * atomically transition from processing none of a state's ECBs to 15217 * processing all of them. 15218 */ 15219 dtrace_xcall(DTRACE_CPUALL, 15220 (dtrace_xcall_t)dtrace_buffer_activate, state); 15221 #endif 15222 goto out; 15223 15224 err: 15225 dtrace_buffer_free(state->dts_buffer); 15226 dtrace_buffer_free(state->dts_aggbuffer); 15227 15228 if ((nspec = state->dts_nspeculations) == 0) { 15229 ASSERT(state->dts_speculations == NULL); 15230 goto out; 15231 } 15232 15233 spec = state->dts_speculations; 15234 ASSERT(spec != NULL); 15235 15236 for (i = 0; i < state->dts_nspeculations; i++) { 15237 if ((buf = spec[i].dtsp_buffer) == NULL) 15238 break; 15239 15240 dtrace_buffer_free(buf); 15241 kmem_free(buf, bufsize); 15242 } 15243 15244 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 15245 state->dts_nspeculations = 0; 15246 state->dts_speculations = NULL; 15247 15248 out: 15249 mutex_exit(&dtrace_lock); 15250 mutex_exit(&cpu_lock); 15251 15252 return (rval); 15253 } 15254 15255 static int 15256 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 15257 { 15258 dtrace_icookie_t cookie; 15259 15260 ASSERT(MUTEX_HELD(&dtrace_lock)); 15261 15262 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 15263 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 15264 return (EINVAL); 15265 15266 /* 15267 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 15268 * to be sure that every CPU has seen it. See below for the details 15269 * on why this is done. 15270 */ 15271 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 15272 dtrace_sync(); 15273 15274 /* 15275 * By this point, it is impossible for any CPU to be still processing 15276 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 15277 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 15278 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 15279 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 15280 * iff we're in the END probe. 15281 */ 15282 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 15283 dtrace_sync(); 15284 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 15285 15286 /* 15287 * Finally, we can release the reserve and call the END probe. We 15288 * disable interrupts across calling the END probe to allow us to 15289 * return the CPU on which we actually called the END probe. This 15290 * allows user-land to be sure that this CPU's principal buffer is 15291 * processed last. 15292 */ 15293 state->dts_reserve = 0; 15294 15295 cookie = dtrace_interrupt_disable(); 15296 *cpu = curcpu; 15297 dtrace_probe(dtrace_probeid_end, 15298 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 15299 dtrace_interrupt_enable(cookie); 15300 15301 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 15302 dtrace_sync(); 15303 15304 #ifdef illumos 15305 if (state->dts_getf != 0 && 15306 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { 15307 /* 15308 * We don't have kernel privs but we have at least one call 15309 * to getf(); we need to lower our zone's count, and (if 15310 * this is the last enabling to have an unprivileged call 15311 * to getf()) we need to clear the closef() hook. 15312 */ 15313 ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0); 15314 ASSERT(dtrace_closef == dtrace_getf_barrier); 15315 ASSERT(dtrace_getf > 0); 15316 15317 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--; 15318 15319 if (--dtrace_getf == 0) 15320 dtrace_closef = NULL; 15321 } 15322 #endif 15323 15324 return (0); 15325 } 15326 15327 static int 15328 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 15329 dtrace_optval_t val) 15330 { 15331 ASSERT(MUTEX_HELD(&dtrace_lock)); 15332 15333 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 15334 return (EBUSY); 15335 15336 if (option >= DTRACEOPT_MAX) 15337 return (EINVAL); 15338 15339 if (option != DTRACEOPT_CPU && val < 0) 15340 return (EINVAL); 15341 15342 switch (option) { 15343 case DTRACEOPT_DESTRUCTIVE: 15344 if (dtrace_destructive_disallow) 15345 return (EACCES); 15346 15347 state->dts_cred.dcr_destructive = 1; 15348 break; 15349 15350 case DTRACEOPT_BUFSIZE: 15351 case DTRACEOPT_DYNVARSIZE: 15352 case DTRACEOPT_AGGSIZE: 15353 case DTRACEOPT_SPECSIZE: 15354 case DTRACEOPT_STRSIZE: 15355 if (val < 0) 15356 return (EINVAL); 15357 15358 if (val >= LONG_MAX) { 15359 /* 15360 * If this is an otherwise negative value, set it to 15361 * the highest multiple of 128m less than LONG_MAX. 15362 * Technically, we're adjusting the size without 15363 * regard to the buffer resizing policy, but in fact, 15364 * this has no effect -- if we set the buffer size to 15365 * ~LONG_MAX and the buffer policy is ultimately set to 15366 * be "manual", the buffer allocation is guaranteed to 15367 * fail, if only because the allocation requires two 15368 * buffers. (We set the the size to the highest 15369 * multiple of 128m because it ensures that the size 15370 * will remain a multiple of a megabyte when 15371 * repeatedly halved -- all the way down to 15m.) 15372 */ 15373 val = LONG_MAX - (1 << 27) + 1; 15374 } 15375 } 15376 15377 state->dts_options[option] = val; 15378 15379 return (0); 15380 } 15381 15382 static void 15383 dtrace_state_destroy(dtrace_state_t *state) 15384 { 15385 dtrace_ecb_t *ecb; 15386 dtrace_vstate_t *vstate = &state->dts_vstate; 15387 #ifdef illumos 15388 minor_t minor = getminor(state->dts_dev); 15389 #endif 15390 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 15391 dtrace_speculation_t *spec = state->dts_speculations; 15392 int nspec = state->dts_nspeculations; 15393 uint32_t match; 15394 15395 ASSERT(MUTEX_HELD(&dtrace_lock)); 15396 ASSERT(MUTEX_HELD(&cpu_lock)); 15397 15398 /* 15399 * First, retract any retained enablings for this state. 15400 */ 15401 dtrace_enabling_retract(state); 15402 ASSERT(state->dts_nretained == 0); 15403 15404 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 15405 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 15406 /* 15407 * We have managed to come into dtrace_state_destroy() on a 15408 * hot enabling -- almost certainly because of a disorderly 15409 * shutdown of a consumer. (That is, a consumer that is 15410 * exiting without having called dtrace_stop().) In this case, 15411 * we're going to set our activity to be KILLED, and then 15412 * issue a sync to be sure that everyone is out of probe 15413 * context before we start blowing away ECBs. 15414 */ 15415 state->dts_activity = DTRACE_ACTIVITY_KILLED; 15416 dtrace_sync(); 15417 } 15418 15419 /* 15420 * Release the credential hold we took in dtrace_state_create(). 15421 */ 15422 if (state->dts_cred.dcr_cred != NULL) 15423 crfree(state->dts_cred.dcr_cred); 15424 15425 /* 15426 * Now we can safely disable and destroy any enabled probes. Because 15427 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 15428 * (especially if they're all enabled), we take two passes through the 15429 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 15430 * in the second we disable whatever is left over. 15431 */ 15432 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 15433 for (i = 0; i < state->dts_necbs; i++) { 15434 if ((ecb = state->dts_ecbs[i]) == NULL) 15435 continue; 15436 15437 if (match && ecb->dte_probe != NULL) { 15438 dtrace_probe_t *probe = ecb->dte_probe; 15439 dtrace_provider_t *prov = probe->dtpr_provider; 15440 15441 if (!(prov->dtpv_priv.dtpp_flags & match)) 15442 continue; 15443 } 15444 15445 dtrace_ecb_disable(ecb); 15446 dtrace_ecb_destroy(ecb); 15447 } 15448 15449 if (!match) 15450 break; 15451 } 15452 15453 /* 15454 * Before we free the buffers, perform one more sync to assure that 15455 * every CPU is out of probe context. 15456 */ 15457 dtrace_sync(); 15458 15459 dtrace_buffer_free(state->dts_buffer); 15460 dtrace_buffer_free(state->dts_aggbuffer); 15461 15462 for (i = 0; i < nspec; i++) 15463 dtrace_buffer_free(spec[i].dtsp_buffer); 15464 15465 #ifdef illumos 15466 if (state->dts_cleaner != CYCLIC_NONE) 15467 cyclic_remove(state->dts_cleaner); 15468 15469 if (state->dts_deadman != CYCLIC_NONE) 15470 cyclic_remove(state->dts_deadman); 15471 #else 15472 callout_stop(&state->dts_cleaner); 15473 callout_drain(&state->dts_cleaner); 15474 callout_stop(&state->dts_deadman); 15475 callout_drain(&state->dts_deadman); 15476 #endif 15477 15478 dtrace_dstate_fini(&vstate->dtvs_dynvars); 15479 dtrace_vstate_fini(vstate); 15480 if (state->dts_ecbs != NULL) 15481 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 15482 15483 if (state->dts_aggregations != NULL) { 15484 #ifdef DEBUG 15485 for (i = 0; i < state->dts_naggregations; i++) 15486 ASSERT(state->dts_aggregations[i] == NULL); 15487 #endif 15488 ASSERT(state->dts_naggregations > 0); 15489 kmem_free(state->dts_aggregations, 15490 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 15491 } 15492 15493 kmem_free(state->dts_buffer, bufsize); 15494 kmem_free(state->dts_aggbuffer, bufsize); 15495 15496 for (i = 0; i < nspec; i++) 15497 kmem_free(spec[i].dtsp_buffer, bufsize); 15498 15499 if (spec != NULL) 15500 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 15501 15502 dtrace_format_destroy(state); 15503 15504 if (state->dts_aggid_arena != NULL) { 15505 #ifdef illumos 15506 vmem_destroy(state->dts_aggid_arena); 15507 #else 15508 delete_unrhdr(state->dts_aggid_arena); 15509 #endif 15510 state->dts_aggid_arena = NULL; 15511 } 15512 #ifdef illumos 15513 ddi_soft_state_free(dtrace_softstate, minor); 15514 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 15515 #endif 15516 } 15517 15518 /* 15519 * DTrace Anonymous Enabling Functions 15520 */ 15521 static dtrace_state_t * 15522 dtrace_anon_grab(void) 15523 { 15524 dtrace_state_t *state; 15525 15526 ASSERT(MUTEX_HELD(&dtrace_lock)); 15527 15528 if ((state = dtrace_anon.dta_state) == NULL) { 15529 ASSERT(dtrace_anon.dta_enabling == NULL); 15530 return (NULL); 15531 } 15532 15533 ASSERT(dtrace_anon.dta_enabling != NULL); 15534 ASSERT(dtrace_retained != NULL); 15535 15536 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 15537 dtrace_anon.dta_enabling = NULL; 15538 dtrace_anon.dta_state = NULL; 15539 15540 return (state); 15541 } 15542 15543 static void 15544 dtrace_anon_property(void) 15545 { 15546 int i, rv; 15547 dtrace_state_t *state; 15548 dof_hdr_t *dof; 15549 char c[32]; /* enough for "dof-data-" + digits */ 15550 15551 ASSERT(MUTEX_HELD(&dtrace_lock)); 15552 ASSERT(MUTEX_HELD(&cpu_lock)); 15553 15554 for (i = 0; ; i++) { 15555 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 15556 15557 dtrace_err_verbose = 1; 15558 15559 if ((dof = dtrace_dof_property(c)) == NULL) { 15560 dtrace_err_verbose = 0; 15561 break; 15562 } 15563 15564 #ifdef illumos 15565 /* 15566 * We want to create anonymous state, so we need to transition 15567 * the kernel debugger to indicate that DTrace is active. If 15568 * this fails (e.g. because the debugger has modified text in 15569 * some way), we won't continue with the processing. 15570 */ 15571 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15572 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 15573 "enabling ignored."); 15574 dtrace_dof_destroy(dof); 15575 break; 15576 } 15577 #endif 15578 15579 /* 15580 * If we haven't allocated an anonymous state, we'll do so now. 15581 */ 15582 if ((state = dtrace_anon.dta_state) == NULL) { 15583 state = dtrace_state_create(NULL, NULL); 15584 dtrace_anon.dta_state = state; 15585 15586 if (state == NULL) { 15587 /* 15588 * This basically shouldn't happen: the only 15589 * failure mode from dtrace_state_create() is a 15590 * failure of ddi_soft_state_zalloc() that 15591 * itself should never happen. Still, the 15592 * interface allows for a failure mode, and 15593 * we want to fail as gracefully as possible: 15594 * we'll emit an error message and cease 15595 * processing anonymous state in this case. 15596 */ 15597 cmn_err(CE_WARN, "failed to create " 15598 "anonymous state"); 15599 dtrace_dof_destroy(dof); 15600 break; 15601 } 15602 } 15603 15604 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 15605 &dtrace_anon.dta_enabling, 0, 0, B_TRUE); 15606 15607 if (rv == 0) 15608 rv = dtrace_dof_options(dof, state); 15609 15610 dtrace_err_verbose = 0; 15611 dtrace_dof_destroy(dof); 15612 15613 if (rv != 0) { 15614 /* 15615 * This is malformed DOF; chuck any anonymous state 15616 * that we created. 15617 */ 15618 ASSERT(dtrace_anon.dta_enabling == NULL); 15619 dtrace_state_destroy(state); 15620 dtrace_anon.dta_state = NULL; 15621 break; 15622 } 15623 15624 ASSERT(dtrace_anon.dta_enabling != NULL); 15625 } 15626 15627 if (dtrace_anon.dta_enabling != NULL) { 15628 int rval; 15629 15630 /* 15631 * dtrace_enabling_retain() can only fail because we are 15632 * trying to retain more enablings than are allowed -- but 15633 * we only have one anonymous enabling, and we are guaranteed 15634 * to be allowed at least one retained enabling; we assert 15635 * that dtrace_enabling_retain() returns success. 15636 */ 15637 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 15638 ASSERT(rval == 0); 15639 15640 dtrace_enabling_dump(dtrace_anon.dta_enabling); 15641 } 15642 } 15643 15644 /* 15645 * DTrace Helper Functions 15646 */ 15647 static void 15648 dtrace_helper_trace(dtrace_helper_action_t *helper, 15649 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 15650 { 15651 uint32_t size, next, nnext, i; 15652 dtrace_helptrace_t *ent, *buffer; 15653 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 15654 15655 if ((buffer = dtrace_helptrace_buffer) == NULL) 15656 return; 15657 15658 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 15659 15660 /* 15661 * What would a tracing framework be without its own tracing 15662 * framework? (Well, a hell of a lot simpler, for starters...) 15663 */ 15664 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 15665 sizeof (uint64_t) - sizeof (uint64_t); 15666 15667 /* 15668 * Iterate until we can allocate a slot in the trace buffer. 15669 */ 15670 do { 15671 next = dtrace_helptrace_next; 15672 15673 if (next + size < dtrace_helptrace_bufsize) { 15674 nnext = next + size; 15675 } else { 15676 nnext = size; 15677 } 15678 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 15679 15680 /* 15681 * We have our slot; fill it in. 15682 */ 15683 if (nnext == size) { 15684 dtrace_helptrace_wrapped++; 15685 next = 0; 15686 } 15687 15688 ent = (dtrace_helptrace_t *)((uintptr_t)buffer + next); 15689 ent->dtht_helper = helper; 15690 ent->dtht_where = where; 15691 ent->dtht_nlocals = vstate->dtvs_nlocals; 15692 15693 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 15694 mstate->dtms_fltoffs : -1; 15695 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 15696 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 15697 15698 for (i = 0; i < vstate->dtvs_nlocals; i++) { 15699 dtrace_statvar_t *svar; 15700 15701 if ((svar = vstate->dtvs_locals[i]) == NULL) 15702 continue; 15703 15704 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 15705 ent->dtht_locals[i] = 15706 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 15707 } 15708 } 15709 15710 static uint64_t 15711 dtrace_helper(int which, dtrace_mstate_t *mstate, 15712 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 15713 { 15714 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 15715 uint64_t sarg0 = mstate->dtms_arg[0]; 15716 uint64_t sarg1 = mstate->dtms_arg[1]; 15717 uint64_t rval = 0; 15718 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 15719 dtrace_helper_action_t *helper; 15720 dtrace_vstate_t *vstate; 15721 dtrace_difo_t *pred; 15722 int i, trace = dtrace_helptrace_buffer != NULL; 15723 15724 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 15725 15726 if (helpers == NULL) 15727 return (0); 15728 15729 if ((helper = helpers->dthps_actions[which]) == NULL) 15730 return (0); 15731 15732 vstate = &helpers->dthps_vstate; 15733 mstate->dtms_arg[0] = arg0; 15734 mstate->dtms_arg[1] = arg1; 15735 15736 /* 15737 * Now iterate over each helper. If its predicate evaluates to 'true', 15738 * we'll call the corresponding actions. Note that the below calls 15739 * to dtrace_dif_emulate() may set faults in machine state. This is 15740 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 15741 * the stored DIF offset with its own (which is the desired behavior). 15742 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 15743 * from machine state; this is okay, too. 15744 */ 15745 for (; helper != NULL; helper = helper->dtha_next) { 15746 if ((pred = helper->dtha_predicate) != NULL) { 15747 if (trace) 15748 dtrace_helper_trace(helper, mstate, vstate, 0); 15749 15750 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 15751 goto next; 15752 15753 if (*flags & CPU_DTRACE_FAULT) 15754 goto err; 15755 } 15756 15757 for (i = 0; i < helper->dtha_nactions; i++) { 15758 if (trace) 15759 dtrace_helper_trace(helper, 15760 mstate, vstate, i + 1); 15761 15762 rval = dtrace_dif_emulate(helper->dtha_actions[i], 15763 mstate, vstate, state); 15764 15765 if (*flags & CPU_DTRACE_FAULT) 15766 goto err; 15767 } 15768 15769 next: 15770 if (trace) 15771 dtrace_helper_trace(helper, mstate, vstate, 15772 DTRACE_HELPTRACE_NEXT); 15773 } 15774 15775 if (trace) 15776 dtrace_helper_trace(helper, mstate, vstate, 15777 DTRACE_HELPTRACE_DONE); 15778 15779 /* 15780 * Restore the arg0 that we saved upon entry. 15781 */ 15782 mstate->dtms_arg[0] = sarg0; 15783 mstate->dtms_arg[1] = sarg1; 15784 15785 return (rval); 15786 15787 err: 15788 if (trace) 15789 dtrace_helper_trace(helper, mstate, vstate, 15790 DTRACE_HELPTRACE_ERR); 15791 15792 /* 15793 * Restore the arg0 that we saved upon entry. 15794 */ 15795 mstate->dtms_arg[0] = sarg0; 15796 mstate->dtms_arg[1] = sarg1; 15797 15798 return (0); 15799 } 15800 15801 static void 15802 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 15803 dtrace_vstate_t *vstate) 15804 { 15805 int i; 15806 15807 if (helper->dtha_predicate != NULL) 15808 dtrace_difo_release(helper->dtha_predicate, vstate); 15809 15810 for (i = 0; i < helper->dtha_nactions; i++) { 15811 ASSERT(helper->dtha_actions[i] != NULL); 15812 dtrace_difo_release(helper->dtha_actions[i], vstate); 15813 } 15814 15815 kmem_free(helper->dtha_actions, 15816 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 15817 kmem_free(helper, sizeof (dtrace_helper_action_t)); 15818 } 15819 15820 static int 15821 dtrace_helper_destroygen(dtrace_helpers_t *help, int gen) 15822 { 15823 proc_t *p = curproc; 15824 dtrace_vstate_t *vstate; 15825 int i; 15826 15827 if (help == NULL) 15828 help = p->p_dtrace_helpers; 15829 15830 ASSERT(MUTEX_HELD(&dtrace_lock)); 15831 15832 if (help == NULL || gen > help->dthps_generation) 15833 return (EINVAL); 15834 15835 vstate = &help->dthps_vstate; 15836 15837 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 15838 dtrace_helper_action_t *last = NULL, *h, *next; 15839 15840 for (h = help->dthps_actions[i]; h != NULL; h = next) { 15841 next = h->dtha_next; 15842 15843 if (h->dtha_generation == gen) { 15844 if (last != NULL) { 15845 last->dtha_next = next; 15846 } else { 15847 help->dthps_actions[i] = next; 15848 } 15849 15850 dtrace_helper_action_destroy(h, vstate); 15851 } else { 15852 last = h; 15853 } 15854 } 15855 } 15856 15857 /* 15858 * Interate until we've cleared out all helper providers with the 15859 * given generation number. 15860 */ 15861 for (;;) { 15862 dtrace_helper_provider_t *prov; 15863 15864 /* 15865 * Look for a helper provider with the right generation. We 15866 * have to start back at the beginning of the list each time 15867 * because we drop dtrace_lock. It's unlikely that we'll make 15868 * more than two passes. 15869 */ 15870 for (i = 0; i < help->dthps_nprovs; i++) { 15871 prov = help->dthps_provs[i]; 15872 15873 if (prov->dthp_generation == gen) 15874 break; 15875 } 15876 15877 /* 15878 * If there were no matches, we're done. 15879 */ 15880 if (i == help->dthps_nprovs) 15881 break; 15882 15883 /* 15884 * Move the last helper provider into this slot. 15885 */ 15886 help->dthps_nprovs--; 15887 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 15888 help->dthps_provs[help->dthps_nprovs] = NULL; 15889 15890 mutex_exit(&dtrace_lock); 15891 15892 /* 15893 * If we have a meta provider, remove this helper provider. 15894 */ 15895 mutex_enter(&dtrace_meta_lock); 15896 if (dtrace_meta_pid != NULL) { 15897 ASSERT(dtrace_deferred_pid == NULL); 15898 dtrace_helper_provider_remove(&prov->dthp_prov, 15899 p->p_pid); 15900 } 15901 mutex_exit(&dtrace_meta_lock); 15902 15903 dtrace_helper_provider_destroy(prov); 15904 15905 mutex_enter(&dtrace_lock); 15906 } 15907 15908 return (0); 15909 } 15910 15911 static int 15912 dtrace_helper_validate(dtrace_helper_action_t *helper) 15913 { 15914 int err = 0, i; 15915 dtrace_difo_t *dp; 15916 15917 if ((dp = helper->dtha_predicate) != NULL) 15918 err += dtrace_difo_validate_helper(dp); 15919 15920 for (i = 0; i < helper->dtha_nactions; i++) 15921 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 15922 15923 return (err == 0); 15924 } 15925 15926 static int 15927 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep, 15928 dtrace_helpers_t *help) 15929 { 15930 dtrace_helper_action_t *helper, *last; 15931 dtrace_actdesc_t *act; 15932 dtrace_vstate_t *vstate; 15933 dtrace_predicate_t *pred; 15934 int count = 0, nactions = 0, i; 15935 15936 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 15937 return (EINVAL); 15938 15939 last = help->dthps_actions[which]; 15940 vstate = &help->dthps_vstate; 15941 15942 for (count = 0; last != NULL; last = last->dtha_next) { 15943 count++; 15944 if (last->dtha_next == NULL) 15945 break; 15946 } 15947 15948 /* 15949 * If we already have dtrace_helper_actions_max helper actions for this 15950 * helper action type, we'll refuse to add a new one. 15951 */ 15952 if (count >= dtrace_helper_actions_max) 15953 return (ENOSPC); 15954 15955 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 15956 helper->dtha_generation = help->dthps_generation; 15957 15958 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 15959 ASSERT(pred->dtp_difo != NULL); 15960 dtrace_difo_hold(pred->dtp_difo); 15961 helper->dtha_predicate = pred->dtp_difo; 15962 } 15963 15964 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 15965 if (act->dtad_kind != DTRACEACT_DIFEXPR) 15966 goto err; 15967 15968 if (act->dtad_difo == NULL) 15969 goto err; 15970 15971 nactions++; 15972 } 15973 15974 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 15975 (helper->dtha_nactions = nactions), KM_SLEEP); 15976 15977 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 15978 dtrace_difo_hold(act->dtad_difo); 15979 helper->dtha_actions[i++] = act->dtad_difo; 15980 } 15981 15982 if (!dtrace_helper_validate(helper)) 15983 goto err; 15984 15985 if (last == NULL) { 15986 help->dthps_actions[which] = helper; 15987 } else { 15988 last->dtha_next = helper; 15989 } 15990 15991 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 15992 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 15993 dtrace_helptrace_next = 0; 15994 } 15995 15996 return (0); 15997 err: 15998 dtrace_helper_action_destroy(helper, vstate); 15999 return (EINVAL); 16000 } 16001 16002 static void 16003 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 16004 dof_helper_t *dofhp) 16005 { 16006 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 16007 16008 mutex_enter(&dtrace_meta_lock); 16009 mutex_enter(&dtrace_lock); 16010 16011 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 16012 /* 16013 * If the dtrace module is loaded but not attached, or if 16014 * there aren't isn't a meta provider registered to deal with 16015 * these provider descriptions, we need to postpone creating 16016 * the actual providers until later. 16017 */ 16018 16019 if (help->dthps_next == NULL && help->dthps_prev == NULL && 16020 dtrace_deferred_pid != help) { 16021 help->dthps_deferred = 1; 16022 help->dthps_pid = p->p_pid; 16023 help->dthps_next = dtrace_deferred_pid; 16024 help->dthps_prev = NULL; 16025 if (dtrace_deferred_pid != NULL) 16026 dtrace_deferred_pid->dthps_prev = help; 16027 dtrace_deferred_pid = help; 16028 } 16029 16030 mutex_exit(&dtrace_lock); 16031 16032 } else if (dofhp != NULL) { 16033 /* 16034 * If the dtrace module is loaded and we have a particular 16035 * helper provider description, pass that off to the 16036 * meta provider. 16037 */ 16038 16039 mutex_exit(&dtrace_lock); 16040 16041 dtrace_helper_provide(dofhp, p->p_pid); 16042 16043 } else { 16044 /* 16045 * Otherwise, just pass all the helper provider descriptions 16046 * off to the meta provider. 16047 */ 16048 16049 int i; 16050 mutex_exit(&dtrace_lock); 16051 16052 for (i = 0; i < help->dthps_nprovs; i++) { 16053 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 16054 p->p_pid); 16055 } 16056 } 16057 16058 mutex_exit(&dtrace_meta_lock); 16059 } 16060 16061 static int 16062 dtrace_helper_provider_add(dof_helper_t *dofhp, dtrace_helpers_t *help, int gen) 16063 { 16064 dtrace_helper_provider_t *hprov, **tmp_provs; 16065 uint_t tmp_maxprovs, i; 16066 16067 ASSERT(MUTEX_HELD(&dtrace_lock)); 16068 ASSERT(help != NULL); 16069 16070 /* 16071 * If we already have dtrace_helper_providers_max helper providers, 16072 * we're refuse to add a new one. 16073 */ 16074 if (help->dthps_nprovs >= dtrace_helper_providers_max) 16075 return (ENOSPC); 16076 16077 /* 16078 * Check to make sure this isn't a duplicate. 16079 */ 16080 for (i = 0; i < help->dthps_nprovs; i++) { 16081 if (dofhp->dofhp_addr == 16082 help->dthps_provs[i]->dthp_prov.dofhp_addr) 16083 return (EALREADY); 16084 } 16085 16086 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 16087 hprov->dthp_prov = *dofhp; 16088 hprov->dthp_ref = 1; 16089 hprov->dthp_generation = gen; 16090 16091 /* 16092 * Allocate a bigger table for helper providers if it's already full. 16093 */ 16094 if (help->dthps_maxprovs == help->dthps_nprovs) { 16095 tmp_maxprovs = help->dthps_maxprovs; 16096 tmp_provs = help->dthps_provs; 16097 16098 if (help->dthps_maxprovs == 0) 16099 help->dthps_maxprovs = 2; 16100 else 16101 help->dthps_maxprovs *= 2; 16102 if (help->dthps_maxprovs > dtrace_helper_providers_max) 16103 help->dthps_maxprovs = dtrace_helper_providers_max; 16104 16105 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 16106 16107 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 16108 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 16109 16110 if (tmp_provs != NULL) { 16111 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 16112 sizeof (dtrace_helper_provider_t *)); 16113 kmem_free(tmp_provs, tmp_maxprovs * 16114 sizeof (dtrace_helper_provider_t *)); 16115 } 16116 } 16117 16118 help->dthps_provs[help->dthps_nprovs] = hprov; 16119 help->dthps_nprovs++; 16120 16121 return (0); 16122 } 16123 16124 static void 16125 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 16126 { 16127 mutex_enter(&dtrace_lock); 16128 16129 if (--hprov->dthp_ref == 0) { 16130 dof_hdr_t *dof; 16131 mutex_exit(&dtrace_lock); 16132 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 16133 dtrace_dof_destroy(dof); 16134 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 16135 } else { 16136 mutex_exit(&dtrace_lock); 16137 } 16138 } 16139 16140 static int 16141 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 16142 { 16143 uintptr_t daddr = (uintptr_t)dof; 16144 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 16145 dof_provider_t *provider; 16146 dof_probe_t *probe; 16147 uint8_t *arg; 16148 char *strtab, *typestr; 16149 dof_stridx_t typeidx; 16150 size_t typesz; 16151 uint_t nprobes, j, k; 16152 16153 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 16154 16155 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 16156 dtrace_dof_error(dof, "misaligned section offset"); 16157 return (-1); 16158 } 16159 16160 /* 16161 * The section needs to be large enough to contain the DOF provider 16162 * structure appropriate for the given version. 16163 */ 16164 if (sec->dofs_size < 16165 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 16166 offsetof(dof_provider_t, dofpv_prenoffs) : 16167 sizeof (dof_provider_t))) { 16168 dtrace_dof_error(dof, "provider section too small"); 16169 return (-1); 16170 } 16171 16172 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 16173 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 16174 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 16175 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 16176 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 16177 16178 if (str_sec == NULL || prb_sec == NULL || 16179 arg_sec == NULL || off_sec == NULL) 16180 return (-1); 16181 16182 enoff_sec = NULL; 16183 16184 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 16185 provider->dofpv_prenoffs != DOF_SECT_NONE && 16186 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 16187 provider->dofpv_prenoffs)) == NULL) 16188 return (-1); 16189 16190 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 16191 16192 if (provider->dofpv_name >= str_sec->dofs_size || 16193 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 16194 dtrace_dof_error(dof, "invalid provider name"); 16195 return (-1); 16196 } 16197 16198 if (prb_sec->dofs_entsize == 0 || 16199 prb_sec->dofs_entsize > prb_sec->dofs_size) { 16200 dtrace_dof_error(dof, "invalid entry size"); 16201 return (-1); 16202 } 16203 16204 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 16205 dtrace_dof_error(dof, "misaligned entry size"); 16206 return (-1); 16207 } 16208 16209 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 16210 dtrace_dof_error(dof, "invalid entry size"); 16211 return (-1); 16212 } 16213 16214 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 16215 dtrace_dof_error(dof, "misaligned section offset"); 16216 return (-1); 16217 } 16218 16219 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 16220 dtrace_dof_error(dof, "invalid entry size"); 16221 return (-1); 16222 } 16223 16224 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 16225 16226 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 16227 16228 /* 16229 * Take a pass through the probes to check for errors. 16230 */ 16231 for (j = 0; j < nprobes; j++) { 16232 probe = (dof_probe_t *)(uintptr_t)(daddr + 16233 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 16234 16235 if (probe->dofpr_func >= str_sec->dofs_size) { 16236 dtrace_dof_error(dof, "invalid function name"); 16237 return (-1); 16238 } 16239 16240 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 16241 dtrace_dof_error(dof, "function name too long"); 16242 /* 16243 * Keep going if the function name is too long. 16244 * Unlike provider and probe names, we cannot reasonably 16245 * impose restrictions on function names, since they're 16246 * a property of the code being instrumented. We will 16247 * skip this probe in dtrace_helper_provide_one(). 16248 */ 16249 } 16250 16251 if (probe->dofpr_name >= str_sec->dofs_size || 16252 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 16253 dtrace_dof_error(dof, "invalid probe name"); 16254 return (-1); 16255 } 16256 16257 /* 16258 * The offset count must not wrap the index, and the offsets 16259 * must also not overflow the section's data. 16260 */ 16261 if (probe->dofpr_offidx + probe->dofpr_noffs < 16262 probe->dofpr_offidx || 16263 (probe->dofpr_offidx + probe->dofpr_noffs) * 16264 off_sec->dofs_entsize > off_sec->dofs_size) { 16265 dtrace_dof_error(dof, "invalid probe offset"); 16266 return (-1); 16267 } 16268 16269 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 16270 /* 16271 * If there's no is-enabled offset section, make sure 16272 * there aren't any is-enabled offsets. Otherwise 16273 * perform the same checks as for probe offsets 16274 * (immediately above). 16275 */ 16276 if (enoff_sec == NULL) { 16277 if (probe->dofpr_enoffidx != 0 || 16278 probe->dofpr_nenoffs != 0) { 16279 dtrace_dof_error(dof, "is-enabled " 16280 "offsets with null section"); 16281 return (-1); 16282 } 16283 } else if (probe->dofpr_enoffidx + 16284 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 16285 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 16286 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 16287 dtrace_dof_error(dof, "invalid is-enabled " 16288 "offset"); 16289 return (-1); 16290 } 16291 16292 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 16293 dtrace_dof_error(dof, "zero probe and " 16294 "is-enabled offsets"); 16295 return (-1); 16296 } 16297 } else if (probe->dofpr_noffs == 0) { 16298 dtrace_dof_error(dof, "zero probe offsets"); 16299 return (-1); 16300 } 16301 16302 if (probe->dofpr_argidx + probe->dofpr_xargc < 16303 probe->dofpr_argidx || 16304 (probe->dofpr_argidx + probe->dofpr_xargc) * 16305 arg_sec->dofs_entsize > arg_sec->dofs_size) { 16306 dtrace_dof_error(dof, "invalid args"); 16307 return (-1); 16308 } 16309 16310 typeidx = probe->dofpr_nargv; 16311 typestr = strtab + probe->dofpr_nargv; 16312 for (k = 0; k < probe->dofpr_nargc; k++) { 16313 if (typeidx >= str_sec->dofs_size) { 16314 dtrace_dof_error(dof, "bad " 16315 "native argument type"); 16316 return (-1); 16317 } 16318 16319 typesz = strlen(typestr) + 1; 16320 if (typesz > DTRACE_ARGTYPELEN) { 16321 dtrace_dof_error(dof, "native " 16322 "argument type too long"); 16323 return (-1); 16324 } 16325 typeidx += typesz; 16326 typestr += typesz; 16327 } 16328 16329 typeidx = probe->dofpr_xargv; 16330 typestr = strtab + probe->dofpr_xargv; 16331 for (k = 0; k < probe->dofpr_xargc; k++) { 16332 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 16333 dtrace_dof_error(dof, "bad " 16334 "native argument index"); 16335 return (-1); 16336 } 16337 16338 if (typeidx >= str_sec->dofs_size) { 16339 dtrace_dof_error(dof, "bad " 16340 "translated argument type"); 16341 return (-1); 16342 } 16343 16344 typesz = strlen(typestr) + 1; 16345 if (typesz > DTRACE_ARGTYPELEN) { 16346 dtrace_dof_error(dof, "translated argument " 16347 "type too long"); 16348 return (-1); 16349 } 16350 16351 typeidx += typesz; 16352 typestr += typesz; 16353 } 16354 } 16355 16356 return (0); 16357 } 16358 16359 static int 16360 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp, struct proc *p) 16361 { 16362 dtrace_helpers_t *help; 16363 dtrace_vstate_t *vstate; 16364 dtrace_enabling_t *enab = NULL; 16365 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 16366 uintptr_t daddr = (uintptr_t)dof; 16367 16368 ASSERT(MUTEX_HELD(&dtrace_lock)); 16369 16370 if ((help = p->p_dtrace_helpers) == NULL) 16371 help = dtrace_helpers_create(p); 16372 16373 vstate = &help->dthps_vstate; 16374 16375 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, dhp->dofhp_addr, 16376 dhp->dofhp_dof, B_FALSE)) != 0) { 16377 dtrace_dof_destroy(dof); 16378 return (rv); 16379 } 16380 16381 /* 16382 * Look for helper providers and validate their descriptions. 16383 */ 16384 for (i = 0; i < dof->dofh_secnum; i++) { 16385 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 16386 dof->dofh_secoff + i * dof->dofh_secsize); 16387 16388 if (sec->dofs_type != DOF_SECT_PROVIDER) 16389 continue; 16390 16391 if (dtrace_helper_provider_validate(dof, sec) != 0) { 16392 dtrace_enabling_destroy(enab); 16393 dtrace_dof_destroy(dof); 16394 return (-1); 16395 } 16396 16397 nprovs++; 16398 } 16399 16400 /* 16401 * Now we need to walk through the ECB descriptions in the enabling. 16402 */ 16403 for (i = 0; i < enab->dten_ndesc; i++) { 16404 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 16405 dtrace_probedesc_t *desc = &ep->dted_probe; 16406 16407 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 16408 continue; 16409 16410 if (strcmp(desc->dtpd_mod, "helper") != 0) 16411 continue; 16412 16413 if (strcmp(desc->dtpd_func, "ustack") != 0) 16414 continue; 16415 16416 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 16417 ep, help)) != 0) { 16418 /* 16419 * Adding this helper action failed -- we are now going 16420 * to rip out the entire generation and return failure. 16421 */ 16422 (void) dtrace_helper_destroygen(help, 16423 help->dthps_generation); 16424 dtrace_enabling_destroy(enab); 16425 dtrace_dof_destroy(dof); 16426 return (-1); 16427 } 16428 16429 nhelpers++; 16430 } 16431 16432 if (nhelpers < enab->dten_ndesc) 16433 dtrace_dof_error(dof, "unmatched helpers"); 16434 16435 gen = help->dthps_generation++; 16436 dtrace_enabling_destroy(enab); 16437 16438 if (nprovs > 0) { 16439 /* 16440 * Now that this is in-kernel, we change the sense of the 16441 * members: dofhp_dof denotes the in-kernel copy of the DOF 16442 * and dofhp_addr denotes the address at user-level. 16443 */ 16444 dhp->dofhp_addr = dhp->dofhp_dof; 16445 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 16446 16447 if (dtrace_helper_provider_add(dhp, help, gen) == 0) { 16448 mutex_exit(&dtrace_lock); 16449 dtrace_helper_provider_register(p, help, dhp); 16450 mutex_enter(&dtrace_lock); 16451 16452 destroy = 0; 16453 } 16454 } 16455 16456 if (destroy) 16457 dtrace_dof_destroy(dof); 16458 16459 return (gen); 16460 } 16461 16462 static dtrace_helpers_t * 16463 dtrace_helpers_create(proc_t *p) 16464 { 16465 dtrace_helpers_t *help; 16466 16467 ASSERT(MUTEX_HELD(&dtrace_lock)); 16468 ASSERT(p->p_dtrace_helpers == NULL); 16469 16470 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 16471 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 16472 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 16473 16474 p->p_dtrace_helpers = help; 16475 dtrace_helpers++; 16476 16477 return (help); 16478 } 16479 16480 #ifdef illumos 16481 static 16482 #endif 16483 void 16484 dtrace_helpers_destroy(proc_t *p) 16485 { 16486 dtrace_helpers_t *help; 16487 dtrace_vstate_t *vstate; 16488 #ifdef illumos 16489 proc_t *p = curproc; 16490 #endif 16491 int i; 16492 16493 mutex_enter(&dtrace_lock); 16494 16495 ASSERT(p->p_dtrace_helpers != NULL); 16496 ASSERT(dtrace_helpers > 0); 16497 16498 help = p->p_dtrace_helpers; 16499 vstate = &help->dthps_vstate; 16500 16501 /* 16502 * We're now going to lose the help from this process. 16503 */ 16504 p->p_dtrace_helpers = NULL; 16505 dtrace_sync(); 16506 16507 /* 16508 * Destory the helper actions. 16509 */ 16510 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 16511 dtrace_helper_action_t *h, *next; 16512 16513 for (h = help->dthps_actions[i]; h != NULL; h = next) { 16514 next = h->dtha_next; 16515 dtrace_helper_action_destroy(h, vstate); 16516 h = next; 16517 } 16518 } 16519 16520 mutex_exit(&dtrace_lock); 16521 16522 /* 16523 * Destroy the helper providers. 16524 */ 16525 if (help->dthps_maxprovs > 0) { 16526 mutex_enter(&dtrace_meta_lock); 16527 if (dtrace_meta_pid != NULL) { 16528 ASSERT(dtrace_deferred_pid == NULL); 16529 16530 for (i = 0; i < help->dthps_nprovs; i++) { 16531 dtrace_helper_provider_remove( 16532 &help->dthps_provs[i]->dthp_prov, p->p_pid); 16533 } 16534 } else { 16535 mutex_enter(&dtrace_lock); 16536 ASSERT(help->dthps_deferred == 0 || 16537 help->dthps_next != NULL || 16538 help->dthps_prev != NULL || 16539 help == dtrace_deferred_pid); 16540 16541 /* 16542 * Remove the helper from the deferred list. 16543 */ 16544 if (help->dthps_next != NULL) 16545 help->dthps_next->dthps_prev = help->dthps_prev; 16546 if (help->dthps_prev != NULL) 16547 help->dthps_prev->dthps_next = help->dthps_next; 16548 if (dtrace_deferred_pid == help) { 16549 dtrace_deferred_pid = help->dthps_next; 16550 ASSERT(help->dthps_prev == NULL); 16551 } 16552 16553 mutex_exit(&dtrace_lock); 16554 } 16555 16556 mutex_exit(&dtrace_meta_lock); 16557 16558 for (i = 0; i < help->dthps_nprovs; i++) { 16559 dtrace_helper_provider_destroy(help->dthps_provs[i]); 16560 } 16561 16562 kmem_free(help->dthps_provs, help->dthps_maxprovs * 16563 sizeof (dtrace_helper_provider_t *)); 16564 } 16565 16566 mutex_enter(&dtrace_lock); 16567 16568 dtrace_vstate_fini(&help->dthps_vstate); 16569 kmem_free(help->dthps_actions, 16570 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 16571 kmem_free(help, sizeof (dtrace_helpers_t)); 16572 16573 --dtrace_helpers; 16574 mutex_exit(&dtrace_lock); 16575 } 16576 16577 #ifdef illumos 16578 static 16579 #endif 16580 void 16581 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 16582 { 16583 dtrace_helpers_t *help, *newhelp; 16584 dtrace_helper_action_t *helper, *new, *last; 16585 dtrace_difo_t *dp; 16586 dtrace_vstate_t *vstate; 16587 int i, j, sz, hasprovs = 0; 16588 16589 mutex_enter(&dtrace_lock); 16590 ASSERT(from->p_dtrace_helpers != NULL); 16591 ASSERT(dtrace_helpers > 0); 16592 16593 help = from->p_dtrace_helpers; 16594 newhelp = dtrace_helpers_create(to); 16595 ASSERT(to->p_dtrace_helpers != NULL); 16596 16597 newhelp->dthps_generation = help->dthps_generation; 16598 vstate = &newhelp->dthps_vstate; 16599 16600 /* 16601 * Duplicate the helper actions. 16602 */ 16603 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 16604 if ((helper = help->dthps_actions[i]) == NULL) 16605 continue; 16606 16607 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 16608 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 16609 KM_SLEEP); 16610 new->dtha_generation = helper->dtha_generation; 16611 16612 if ((dp = helper->dtha_predicate) != NULL) { 16613 dp = dtrace_difo_duplicate(dp, vstate); 16614 new->dtha_predicate = dp; 16615 } 16616 16617 new->dtha_nactions = helper->dtha_nactions; 16618 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 16619 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 16620 16621 for (j = 0; j < new->dtha_nactions; j++) { 16622 dtrace_difo_t *dp = helper->dtha_actions[j]; 16623 16624 ASSERT(dp != NULL); 16625 dp = dtrace_difo_duplicate(dp, vstate); 16626 new->dtha_actions[j] = dp; 16627 } 16628 16629 if (last != NULL) { 16630 last->dtha_next = new; 16631 } else { 16632 newhelp->dthps_actions[i] = new; 16633 } 16634 16635 last = new; 16636 } 16637 } 16638 16639 /* 16640 * Duplicate the helper providers and register them with the 16641 * DTrace framework. 16642 */ 16643 if (help->dthps_nprovs > 0) { 16644 newhelp->dthps_nprovs = help->dthps_nprovs; 16645 newhelp->dthps_maxprovs = help->dthps_nprovs; 16646 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 16647 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 16648 for (i = 0; i < newhelp->dthps_nprovs; i++) { 16649 newhelp->dthps_provs[i] = help->dthps_provs[i]; 16650 newhelp->dthps_provs[i]->dthp_ref++; 16651 } 16652 16653 hasprovs = 1; 16654 } 16655 16656 mutex_exit(&dtrace_lock); 16657 16658 if (hasprovs) 16659 dtrace_helper_provider_register(to, newhelp, NULL); 16660 } 16661 16662 /* 16663 * DTrace Hook Functions 16664 */ 16665 static void 16666 dtrace_module_loaded(modctl_t *ctl) 16667 { 16668 dtrace_provider_t *prv; 16669 16670 mutex_enter(&dtrace_provider_lock); 16671 #ifdef illumos 16672 mutex_enter(&mod_lock); 16673 #endif 16674 16675 #ifdef illumos 16676 ASSERT(ctl->mod_busy); 16677 #endif 16678 16679 /* 16680 * We're going to call each providers per-module provide operation 16681 * specifying only this module. 16682 */ 16683 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 16684 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 16685 16686 #ifdef illumos 16687 mutex_exit(&mod_lock); 16688 #endif 16689 mutex_exit(&dtrace_provider_lock); 16690 16691 /* 16692 * If we have any retained enablings, we need to match against them. 16693 * Enabling probes requires that cpu_lock be held, and we cannot hold 16694 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 16695 * module. (In particular, this happens when loading scheduling 16696 * classes.) So if we have any retained enablings, we need to dispatch 16697 * our task queue to do the match for us. 16698 */ 16699 mutex_enter(&dtrace_lock); 16700 16701 if (dtrace_retained == NULL) { 16702 mutex_exit(&dtrace_lock); 16703 return; 16704 } 16705 16706 (void) taskq_dispatch(dtrace_taskq, 16707 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 16708 16709 mutex_exit(&dtrace_lock); 16710 16711 /* 16712 * And now, for a little heuristic sleaze: in general, we want to 16713 * match modules as soon as they load. However, we cannot guarantee 16714 * this, because it would lead us to the lock ordering violation 16715 * outlined above. The common case, of course, is that cpu_lock is 16716 * _not_ held -- so we delay here for a clock tick, hoping that that's 16717 * long enough for the task queue to do its work. If it's not, it's 16718 * not a serious problem -- it just means that the module that we 16719 * just loaded may not be immediately instrumentable. 16720 */ 16721 delay(1); 16722 } 16723 16724 static void 16725 #ifdef illumos 16726 dtrace_module_unloaded(modctl_t *ctl) 16727 #else 16728 dtrace_module_unloaded(modctl_t *ctl, int *error) 16729 #endif 16730 { 16731 dtrace_probe_t template, *probe, *first, *next; 16732 dtrace_provider_t *prov; 16733 #ifndef illumos 16734 char modname[DTRACE_MODNAMELEN]; 16735 size_t len; 16736 #endif 16737 16738 #ifdef illumos 16739 template.dtpr_mod = ctl->mod_modname; 16740 #else 16741 /* Handle the fact that ctl->filename may end in ".ko". */ 16742 strlcpy(modname, ctl->filename, sizeof(modname)); 16743 len = strlen(ctl->filename); 16744 if (len > 3 && strcmp(modname + len - 3, ".ko") == 0) 16745 modname[len - 3] = '\0'; 16746 template.dtpr_mod = modname; 16747 #endif 16748 16749 mutex_enter(&dtrace_provider_lock); 16750 #ifdef illumos 16751 mutex_enter(&mod_lock); 16752 #endif 16753 mutex_enter(&dtrace_lock); 16754 16755 #ifndef illumos 16756 if (ctl->nenabled > 0) { 16757 /* Don't allow unloads if a probe is enabled. */ 16758 mutex_exit(&dtrace_provider_lock); 16759 mutex_exit(&dtrace_lock); 16760 *error = -1; 16761 printf( 16762 "kldunload: attempt to unload module that has DTrace probes enabled\n"); 16763 return; 16764 } 16765 #endif 16766 16767 if (dtrace_bymod == NULL) { 16768 /* 16769 * The DTrace module is loaded (obviously) but not attached; 16770 * we don't have any work to do. 16771 */ 16772 mutex_exit(&dtrace_provider_lock); 16773 #ifdef illumos 16774 mutex_exit(&mod_lock); 16775 #endif 16776 mutex_exit(&dtrace_lock); 16777 return; 16778 } 16779 16780 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 16781 probe != NULL; probe = probe->dtpr_nextmod) { 16782 if (probe->dtpr_ecb != NULL) { 16783 mutex_exit(&dtrace_provider_lock); 16784 #ifdef illumos 16785 mutex_exit(&mod_lock); 16786 #endif 16787 mutex_exit(&dtrace_lock); 16788 16789 /* 16790 * This shouldn't _actually_ be possible -- we're 16791 * unloading a module that has an enabled probe in it. 16792 * (It's normally up to the provider to make sure that 16793 * this can't happen.) However, because dtps_enable() 16794 * doesn't have a failure mode, there can be an 16795 * enable/unload race. Upshot: we don't want to 16796 * assert, but we're not going to disable the 16797 * probe, either. 16798 */ 16799 if (dtrace_err_verbose) { 16800 #ifdef illumos 16801 cmn_err(CE_WARN, "unloaded module '%s' had " 16802 "enabled probes", ctl->mod_modname); 16803 #else 16804 cmn_err(CE_WARN, "unloaded module '%s' had " 16805 "enabled probes", modname); 16806 #endif 16807 } 16808 16809 return; 16810 } 16811 } 16812 16813 probe = first; 16814 16815 for (first = NULL; probe != NULL; probe = next) { 16816 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 16817 16818 dtrace_probes[probe->dtpr_id - 1] = NULL; 16819 16820 next = probe->dtpr_nextmod; 16821 dtrace_hash_remove(dtrace_bymod, probe); 16822 dtrace_hash_remove(dtrace_byfunc, probe); 16823 dtrace_hash_remove(dtrace_byname, probe); 16824 16825 if (first == NULL) { 16826 first = probe; 16827 probe->dtpr_nextmod = NULL; 16828 } else { 16829 probe->dtpr_nextmod = first; 16830 first = probe; 16831 } 16832 } 16833 16834 /* 16835 * We've removed all of the module's probes from the hash chains and 16836 * from the probe array. Now issue a dtrace_sync() to be sure that 16837 * everyone has cleared out from any probe array processing. 16838 */ 16839 dtrace_sync(); 16840 16841 for (probe = first; probe != NULL; probe = first) { 16842 first = probe->dtpr_nextmod; 16843 prov = probe->dtpr_provider; 16844 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 16845 probe->dtpr_arg); 16846 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 16847 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 16848 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 16849 #ifdef illumos 16850 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 16851 #else 16852 free_unr(dtrace_arena, probe->dtpr_id); 16853 #endif 16854 kmem_free(probe, sizeof (dtrace_probe_t)); 16855 } 16856 16857 mutex_exit(&dtrace_lock); 16858 #ifdef illumos 16859 mutex_exit(&mod_lock); 16860 #endif 16861 mutex_exit(&dtrace_provider_lock); 16862 } 16863 16864 #ifndef illumos 16865 static void 16866 dtrace_kld_load(void *arg __unused, linker_file_t lf) 16867 { 16868 16869 dtrace_module_loaded(lf); 16870 } 16871 16872 static void 16873 dtrace_kld_unload_try(void *arg __unused, linker_file_t lf, int *error) 16874 { 16875 16876 if (*error != 0) 16877 /* We already have an error, so don't do anything. */ 16878 return; 16879 dtrace_module_unloaded(lf, error); 16880 } 16881 #endif 16882 16883 #ifdef illumos 16884 static void 16885 dtrace_suspend(void) 16886 { 16887 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 16888 } 16889 16890 static void 16891 dtrace_resume(void) 16892 { 16893 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 16894 } 16895 #endif 16896 16897 static int 16898 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 16899 { 16900 ASSERT(MUTEX_HELD(&cpu_lock)); 16901 mutex_enter(&dtrace_lock); 16902 16903 switch (what) { 16904 case CPU_CONFIG: { 16905 dtrace_state_t *state; 16906 dtrace_optval_t *opt, rs, c; 16907 16908 /* 16909 * For now, we only allocate a new buffer for anonymous state. 16910 */ 16911 if ((state = dtrace_anon.dta_state) == NULL) 16912 break; 16913 16914 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 16915 break; 16916 16917 opt = state->dts_options; 16918 c = opt[DTRACEOPT_CPU]; 16919 16920 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 16921 break; 16922 16923 /* 16924 * Regardless of what the actual policy is, we're going to 16925 * temporarily set our resize policy to be manual. We're 16926 * also going to temporarily set our CPU option to denote 16927 * the newly configured CPU. 16928 */ 16929 rs = opt[DTRACEOPT_BUFRESIZE]; 16930 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 16931 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 16932 16933 (void) dtrace_state_buffers(state); 16934 16935 opt[DTRACEOPT_BUFRESIZE] = rs; 16936 opt[DTRACEOPT_CPU] = c; 16937 16938 break; 16939 } 16940 16941 case CPU_UNCONFIG: 16942 /* 16943 * We don't free the buffer in the CPU_UNCONFIG case. (The 16944 * buffer will be freed when the consumer exits.) 16945 */ 16946 break; 16947 16948 default: 16949 break; 16950 } 16951 16952 mutex_exit(&dtrace_lock); 16953 return (0); 16954 } 16955 16956 #ifdef illumos 16957 static void 16958 dtrace_cpu_setup_initial(processorid_t cpu) 16959 { 16960 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 16961 } 16962 #endif 16963 16964 static void 16965 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 16966 { 16967 if (dtrace_toxranges >= dtrace_toxranges_max) { 16968 int osize, nsize; 16969 dtrace_toxrange_t *range; 16970 16971 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 16972 16973 if (osize == 0) { 16974 ASSERT(dtrace_toxrange == NULL); 16975 ASSERT(dtrace_toxranges_max == 0); 16976 dtrace_toxranges_max = 1; 16977 } else { 16978 dtrace_toxranges_max <<= 1; 16979 } 16980 16981 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 16982 range = kmem_zalloc(nsize, KM_SLEEP); 16983 16984 if (dtrace_toxrange != NULL) { 16985 ASSERT(osize != 0); 16986 bcopy(dtrace_toxrange, range, osize); 16987 kmem_free(dtrace_toxrange, osize); 16988 } 16989 16990 dtrace_toxrange = range; 16991 } 16992 16993 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 16994 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 16995 16996 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 16997 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 16998 dtrace_toxranges++; 16999 } 17000 17001 static void 17002 dtrace_getf_barrier() 17003 { 17004 #ifdef illumos 17005 /* 17006 * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings 17007 * that contain calls to getf(), this routine will be called on every 17008 * closef() before either the underlying vnode is released or the 17009 * file_t itself is freed. By the time we are here, it is essential 17010 * that the file_t can no longer be accessed from a call to getf() 17011 * in probe context -- that assures that a dtrace_sync() can be used 17012 * to clear out any enablings referring to the old structures. 17013 */ 17014 if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 || 17015 kcred->cr_zone->zone_dtrace_getf != 0) 17016 dtrace_sync(); 17017 #endif 17018 } 17019 17020 /* 17021 * DTrace Driver Cookbook Functions 17022 */ 17023 #ifdef illumos 17024 /*ARGSUSED*/ 17025 static int 17026 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 17027 { 17028 dtrace_provider_id_t id; 17029 dtrace_state_t *state = NULL; 17030 dtrace_enabling_t *enab; 17031 17032 mutex_enter(&cpu_lock); 17033 mutex_enter(&dtrace_provider_lock); 17034 mutex_enter(&dtrace_lock); 17035 17036 if (ddi_soft_state_init(&dtrace_softstate, 17037 sizeof (dtrace_state_t), 0) != 0) { 17038 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 17039 mutex_exit(&cpu_lock); 17040 mutex_exit(&dtrace_provider_lock); 17041 mutex_exit(&dtrace_lock); 17042 return (DDI_FAILURE); 17043 } 17044 17045 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 17046 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 17047 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 17048 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 17049 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 17050 ddi_remove_minor_node(devi, NULL); 17051 ddi_soft_state_fini(&dtrace_softstate); 17052 mutex_exit(&cpu_lock); 17053 mutex_exit(&dtrace_provider_lock); 17054 mutex_exit(&dtrace_lock); 17055 return (DDI_FAILURE); 17056 } 17057 17058 ddi_report_dev(devi); 17059 dtrace_devi = devi; 17060 17061 dtrace_modload = dtrace_module_loaded; 17062 dtrace_modunload = dtrace_module_unloaded; 17063 dtrace_cpu_init = dtrace_cpu_setup_initial; 17064 dtrace_helpers_cleanup = dtrace_helpers_destroy; 17065 dtrace_helpers_fork = dtrace_helpers_duplicate; 17066 dtrace_cpustart_init = dtrace_suspend; 17067 dtrace_cpustart_fini = dtrace_resume; 17068 dtrace_debugger_init = dtrace_suspend; 17069 dtrace_debugger_fini = dtrace_resume; 17070 17071 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 17072 17073 ASSERT(MUTEX_HELD(&cpu_lock)); 17074 17075 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 17076 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 17077 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 17078 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 17079 VM_SLEEP | VMC_IDENTIFIER); 17080 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 17081 1, INT_MAX, 0); 17082 17083 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 17084 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 17085 NULL, NULL, NULL, NULL, NULL, 0); 17086 17087 ASSERT(MUTEX_HELD(&cpu_lock)); 17088 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 17089 offsetof(dtrace_probe_t, dtpr_nextmod), 17090 offsetof(dtrace_probe_t, dtpr_prevmod)); 17091 17092 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 17093 offsetof(dtrace_probe_t, dtpr_nextfunc), 17094 offsetof(dtrace_probe_t, dtpr_prevfunc)); 17095 17096 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 17097 offsetof(dtrace_probe_t, dtpr_nextname), 17098 offsetof(dtrace_probe_t, dtpr_prevname)); 17099 17100 if (dtrace_retain_max < 1) { 17101 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 17102 "setting to 1", dtrace_retain_max); 17103 dtrace_retain_max = 1; 17104 } 17105 17106 /* 17107 * Now discover our toxic ranges. 17108 */ 17109 dtrace_toxic_ranges(dtrace_toxrange_add); 17110 17111 /* 17112 * Before we register ourselves as a provider to our own framework, 17113 * we would like to assert that dtrace_provider is NULL -- but that's 17114 * not true if we were loaded as a dependency of a DTrace provider. 17115 * Once we've registered, we can assert that dtrace_provider is our 17116 * pseudo provider. 17117 */ 17118 (void) dtrace_register("dtrace", &dtrace_provider_attr, 17119 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 17120 17121 ASSERT(dtrace_provider != NULL); 17122 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 17123 17124 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 17125 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 17126 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 17127 dtrace_provider, NULL, NULL, "END", 0, NULL); 17128 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 17129 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 17130 17131 dtrace_anon_property(); 17132 mutex_exit(&cpu_lock); 17133 17134 /* 17135 * If there are already providers, we must ask them to provide their 17136 * probes, and then match any anonymous enabling against them. Note 17137 * that there should be no other retained enablings at this time: 17138 * the only retained enablings at this time should be the anonymous 17139 * enabling. 17140 */ 17141 if (dtrace_anon.dta_enabling != NULL) { 17142 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 17143 17144 dtrace_enabling_provide(NULL); 17145 state = dtrace_anon.dta_state; 17146 17147 /* 17148 * We couldn't hold cpu_lock across the above call to 17149 * dtrace_enabling_provide(), but we must hold it to actually 17150 * enable the probes. We have to drop all of our locks, pick 17151 * up cpu_lock, and regain our locks before matching the 17152 * retained anonymous enabling. 17153 */ 17154 mutex_exit(&dtrace_lock); 17155 mutex_exit(&dtrace_provider_lock); 17156 17157 mutex_enter(&cpu_lock); 17158 mutex_enter(&dtrace_provider_lock); 17159 mutex_enter(&dtrace_lock); 17160 17161 if ((enab = dtrace_anon.dta_enabling) != NULL) 17162 (void) dtrace_enabling_match(enab, NULL); 17163 17164 mutex_exit(&cpu_lock); 17165 } 17166 17167 mutex_exit(&dtrace_lock); 17168 mutex_exit(&dtrace_provider_lock); 17169 17170 if (state != NULL) { 17171 /* 17172 * If we created any anonymous state, set it going now. 17173 */ 17174 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 17175 } 17176 17177 return (DDI_SUCCESS); 17178 } 17179 #endif /* illumos */ 17180 17181 #ifndef illumos 17182 static void dtrace_dtr(void *); 17183 #endif 17184 17185 /*ARGSUSED*/ 17186 static int 17187 #ifdef illumos 17188 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 17189 #else 17190 dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 17191 #endif 17192 { 17193 dtrace_state_t *state; 17194 uint32_t priv; 17195 uid_t uid; 17196 zoneid_t zoneid; 17197 17198 #ifdef illumos 17199 if (getminor(*devp) == DTRACEMNRN_HELPER) 17200 return (0); 17201 17202 /* 17203 * If this wasn't an open with the "helper" minor, then it must be 17204 * the "dtrace" minor. 17205 */ 17206 if (getminor(*devp) == DTRACEMNRN_DTRACE) 17207 return (ENXIO); 17208 #else 17209 cred_t *cred_p = NULL; 17210 cred_p = dev->si_cred; 17211 17212 /* 17213 * If no DTRACE_PRIV_* bits are set in the credential, then the 17214 * caller lacks sufficient permission to do anything with DTrace. 17215 */ 17216 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 17217 if (priv == DTRACE_PRIV_NONE) { 17218 #endif 17219 17220 return (EACCES); 17221 } 17222 17223 /* 17224 * Ask all providers to provide all their probes. 17225 */ 17226 mutex_enter(&dtrace_provider_lock); 17227 dtrace_probe_provide(NULL, NULL); 17228 mutex_exit(&dtrace_provider_lock); 17229 17230 mutex_enter(&cpu_lock); 17231 mutex_enter(&dtrace_lock); 17232 dtrace_opens++; 17233 dtrace_membar_producer(); 17234 17235 #ifdef illumos 17236 /* 17237 * If the kernel debugger is active (that is, if the kernel debugger 17238 * modified text in some way), we won't allow the open. 17239 */ 17240 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 17241 dtrace_opens--; 17242 mutex_exit(&cpu_lock); 17243 mutex_exit(&dtrace_lock); 17244 return (EBUSY); 17245 } 17246 17247 if (dtrace_helptrace_enable && dtrace_helptrace_buffer == NULL) { 17248 /* 17249 * If DTrace helper tracing is enabled, we need to allocate the 17250 * trace buffer and initialize the values. 17251 */ 17252 dtrace_helptrace_buffer = 17253 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 17254 dtrace_helptrace_next = 0; 17255 dtrace_helptrace_wrapped = 0; 17256 dtrace_helptrace_enable = 0; 17257 } 17258 17259 state = dtrace_state_create(devp, cred_p); 17260 #else 17261 state = dtrace_state_create(dev, NULL); 17262 devfs_set_cdevpriv(state, dtrace_dtr); 17263 #endif 17264 17265 mutex_exit(&cpu_lock); 17266 17267 if (state == NULL) { 17268 #ifdef illumos 17269 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 17270 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 17271 #else 17272 --dtrace_opens; 17273 #endif 17274 mutex_exit(&dtrace_lock); 17275 return (EAGAIN); 17276 } 17277 17278 mutex_exit(&dtrace_lock); 17279 17280 return (0); 17281 } 17282 17283 /*ARGSUSED*/ 17284 #ifdef illumos 17285 static int 17286 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 17287 #else 17288 static void 17289 dtrace_dtr(void *data) 17290 #endif 17291 { 17292 #ifdef illumos 17293 minor_t minor = getminor(dev); 17294 dtrace_state_t *state; 17295 #endif 17296 dtrace_helptrace_t *buf = NULL; 17297 17298 #ifdef illumos 17299 if (minor == DTRACEMNRN_HELPER) 17300 return (0); 17301 17302 state = ddi_get_soft_state(dtrace_softstate, minor); 17303 #else 17304 dtrace_state_t *state = data; 17305 #endif 17306 17307 mutex_enter(&cpu_lock); 17308 mutex_enter(&dtrace_lock); 17309 17310 #ifdef illumos 17311 if (state->dts_anon) 17312 #else 17313 if (state != NULL && state->dts_anon) 17314 #endif 17315 { 17316 /* 17317 * There is anonymous state. Destroy that first. 17318 */ 17319 ASSERT(dtrace_anon.dta_state == NULL); 17320 dtrace_state_destroy(state->dts_anon); 17321 } 17322 17323 if (dtrace_helptrace_disable) { 17324 /* 17325 * If we have been told to disable helper tracing, set the 17326 * buffer to NULL before calling into dtrace_state_destroy(); 17327 * we take advantage of its dtrace_sync() to know that no 17328 * CPU is in probe context with enabled helper tracing 17329 * after it returns. 17330 */ 17331 buf = dtrace_helptrace_buffer; 17332 dtrace_helptrace_buffer = NULL; 17333 } 17334 17335 #ifdef illumos 17336 dtrace_state_destroy(state); 17337 #else 17338 if (state != NULL) { 17339 dtrace_state_destroy(state); 17340 kmem_free(state, 0); 17341 } 17342 #endif 17343 ASSERT(dtrace_opens > 0); 17344 17345 #ifdef illumos 17346 /* 17347 * Only relinquish control of the kernel debugger interface when there 17348 * are no consumers and no anonymous enablings. 17349 */ 17350 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 17351 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 17352 #else 17353 --dtrace_opens; 17354 #endif 17355 17356 if (buf != NULL) { 17357 kmem_free(buf, dtrace_helptrace_bufsize); 17358 dtrace_helptrace_disable = 0; 17359 } 17360 17361 mutex_exit(&dtrace_lock); 17362 mutex_exit(&cpu_lock); 17363 17364 #ifdef illumos 17365 return (0); 17366 #endif 17367 } 17368 17369 #ifdef illumos 17370 /*ARGSUSED*/ 17371 static int 17372 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 17373 { 17374 int rval; 17375 dof_helper_t help, *dhp = NULL; 17376 17377 switch (cmd) { 17378 case DTRACEHIOC_ADDDOF: 17379 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 17380 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 17381 return (EFAULT); 17382 } 17383 17384 dhp = &help; 17385 arg = (intptr_t)help.dofhp_dof; 17386 /*FALLTHROUGH*/ 17387 17388 case DTRACEHIOC_ADD: { 17389 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 17390 17391 if (dof == NULL) 17392 return (rval); 17393 17394 mutex_enter(&dtrace_lock); 17395 17396 /* 17397 * dtrace_helper_slurp() takes responsibility for the dof -- 17398 * it may free it now or it may save it and free it later. 17399 */ 17400 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 17401 *rv = rval; 17402 rval = 0; 17403 } else { 17404 rval = EINVAL; 17405 } 17406 17407 mutex_exit(&dtrace_lock); 17408 return (rval); 17409 } 17410 17411 case DTRACEHIOC_REMOVE: { 17412 mutex_enter(&dtrace_lock); 17413 rval = dtrace_helper_destroygen(NULL, arg); 17414 mutex_exit(&dtrace_lock); 17415 17416 return (rval); 17417 } 17418 17419 default: 17420 break; 17421 } 17422 17423 return (ENOTTY); 17424 } 17425 17426 /*ARGSUSED*/ 17427 static int 17428 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 17429 { 17430 minor_t minor = getminor(dev); 17431 dtrace_state_t *state; 17432 int rval; 17433 17434 if (minor == DTRACEMNRN_HELPER) 17435 return (dtrace_ioctl_helper(cmd, arg, rv)); 17436 17437 state = ddi_get_soft_state(dtrace_softstate, minor); 17438 17439 if (state->dts_anon) { 17440 ASSERT(dtrace_anon.dta_state == NULL); 17441 state = state->dts_anon; 17442 } 17443 17444 switch (cmd) { 17445 case DTRACEIOC_PROVIDER: { 17446 dtrace_providerdesc_t pvd; 17447 dtrace_provider_t *pvp; 17448 17449 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 17450 return (EFAULT); 17451 17452 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 17453 mutex_enter(&dtrace_provider_lock); 17454 17455 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 17456 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 17457 break; 17458 } 17459 17460 mutex_exit(&dtrace_provider_lock); 17461 17462 if (pvp == NULL) 17463 return (ESRCH); 17464 17465 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 17466 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 17467 17468 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 17469 return (EFAULT); 17470 17471 return (0); 17472 } 17473 17474 case DTRACEIOC_EPROBE: { 17475 dtrace_eprobedesc_t epdesc; 17476 dtrace_ecb_t *ecb; 17477 dtrace_action_t *act; 17478 void *buf; 17479 size_t size; 17480 uintptr_t dest; 17481 int nrecs; 17482 17483 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 17484 return (EFAULT); 17485 17486 mutex_enter(&dtrace_lock); 17487 17488 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 17489 mutex_exit(&dtrace_lock); 17490 return (EINVAL); 17491 } 17492 17493 if (ecb->dte_probe == NULL) { 17494 mutex_exit(&dtrace_lock); 17495 return (EINVAL); 17496 } 17497 17498 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 17499 epdesc.dtepd_uarg = ecb->dte_uarg; 17500 epdesc.dtepd_size = ecb->dte_size; 17501 17502 nrecs = epdesc.dtepd_nrecs; 17503 epdesc.dtepd_nrecs = 0; 17504 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 17505 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 17506 continue; 17507 17508 epdesc.dtepd_nrecs++; 17509 } 17510 17511 /* 17512 * Now that we have the size, we need to allocate a temporary 17513 * buffer in which to store the complete description. We need 17514 * the temporary buffer to be able to drop dtrace_lock() 17515 * across the copyout(), below. 17516 */ 17517 size = sizeof (dtrace_eprobedesc_t) + 17518 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 17519 17520 buf = kmem_alloc(size, KM_SLEEP); 17521 dest = (uintptr_t)buf; 17522 17523 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 17524 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 17525 17526 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 17527 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 17528 continue; 17529 17530 if (nrecs-- == 0) 17531 break; 17532 17533 bcopy(&act->dta_rec, (void *)dest, 17534 sizeof (dtrace_recdesc_t)); 17535 dest += sizeof (dtrace_recdesc_t); 17536 } 17537 17538 mutex_exit(&dtrace_lock); 17539 17540 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 17541 kmem_free(buf, size); 17542 return (EFAULT); 17543 } 17544 17545 kmem_free(buf, size); 17546 return (0); 17547 } 17548 17549 case DTRACEIOC_AGGDESC: { 17550 dtrace_aggdesc_t aggdesc; 17551 dtrace_action_t *act; 17552 dtrace_aggregation_t *agg; 17553 int nrecs; 17554 uint32_t offs; 17555 dtrace_recdesc_t *lrec; 17556 void *buf; 17557 size_t size; 17558 uintptr_t dest; 17559 17560 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 17561 return (EFAULT); 17562 17563 mutex_enter(&dtrace_lock); 17564 17565 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 17566 mutex_exit(&dtrace_lock); 17567 return (EINVAL); 17568 } 17569 17570 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 17571 17572 nrecs = aggdesc.dtagd_nrecs; 17573 aggdesc.dtagd_nrecs = 0; 17574 17575 offs = agg->dtag_base; 17576 lrec = &agg->dtag_action.dta_rec; 17577 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 17578 17579 for (act = agg->dtag_first; ; act = act->dta_next) { 17580 ASSERT(act->dta_intuple || 17581 DTRACEACT_ISAGG(act->dta_kind)); 17582 17583 /* 17584 * If this action has a record size of zero, it 17585 * denotes an argument to the aggregating action. 17586 * Because the presence of this record doesn't (or 17587 * shouldn't) affect the way the data is interpreted, 17588 * we don't copy it out to save user-level the 17589 * confusion of dealing with a zero-length record. 17590 */ 17591 if (act->dta_rec.dtrd_size == 0) { 17592 ASSERT(agg->dtag_hasarg); 17593 continue; 17594 } 17595 17596 aggdesc.dtagd_nrecs++; 17597 17598 if (act == &agg->dtag_action) 17599 break; 17600 } 17601 17602 /* 17603 * Now that we have the size, we need to allocate a temporary 17604 * buffer in which to store the complete description. We need 17605 * the temporary buffer to be able to drop dtrace_lock() 17606 * across the copyout(), below. 17607 */ 17608 size = sizeof (dtrace_aggdesc_t) + 17609 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 17610 17611 buf = kmem_alloc(size, KM_SLEEP); 17612 dest = (uintptr_t)buf; 17613 17614 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 17615 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 17616 17617 for (act = agg->dtag_first; ; act = act->dta_next) { 17618 dtrace_recdesc_t rec = act->dta_rec; 17619 17620 /* 17621 * See the comment in the above loop for why we pass 17622 * over zero-length records. 17623 */ 17624 if (rec.dtrd_size == 0) { 17625 ASSERT(agg->dtag_hasarg); 17626 continue; 17627 } 17628 17629 if (nrecs-- == 0) 17630 break; 17631 17632 rec.dtrd_offset -= offs; 17633 bcopy(&rec, (void *)dest, sizeof (rec)); 17634 dest += sizeof (dtrace_recdesc_t); 17635 17636 if (act == &agg->dtag_action) 17637 break; 17638 } 17639 17640 mutex_exit(&dtrace_lock); 17641 17642 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 17643 kmem_free(buf, size); 17644 return (EFAULT); 17645 } 17646 17647 kmem_free(buf, size); 17648 return (0); 17649 } 17650 17651 case DTRACEIOC_ENABLE: { 17652 dof_hdr_t *dof; 17653 dtrace_enabling_t *enab = NULL; 17654 dtrace_vstate_t *vstate; 17655 int err = 0; 17656 17657 *rv = 0; 17658 17659 /* 17660 * If a NULL argument has been passed, we take this as our 17661 * cue to reevaluate our enablings. 17662 */ 17663 if (arg == NULL) { 17664 dtrace_enabling_matchall(); 17665 17666 return (0); 17667 } 17668 17669 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 17670 return (rval); 17671 17672 mutex_enter(&cpu_lock); 17673 mutex_enter(&dtrace_lock); 17674 vstate = &state->dts_vstate; 17675 17676 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 17677 mutex_exit(&dtrace_lock); 17678 mutex_exit(&cpu_lock); 17679 dtrace_dof_destroy(dof); 17680 return (EBUSY); 17681 } 17682 17683 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 17684 mutex_exit(&dtrace_lock); 17685 mutex_exit(&cpu_lock); 17686 dtrace_dof_destroy(dof); 17687 return (EINVAL); 17688 } 17689 17690 if ((rval = dtrace_dof_options(dof, state)) != 0) { 17691 dtrace_enabling_destroy(enab); 17692 mutex_exit(&dtrace_lock); 17693 mutex_exit(&cpu_lock); 17694 dtrace_dof_destroy(dof); 17695 return (rval); 17696 } 17697 17698 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 17699 err = dtrace_enabling_retain(enab); 17700 } else { 17701 dtrace_enabling_destroy(enab); 17702 } 17703 17704 mutex_exit(&cpu_lock); 17705 mutex_exit(&dtrace_lock); 17706 dtrace_dof_destroy(dof); 17707 17708 return (err); 17709 } 17710 17711 case DTRACEIOC_REPLICATE: { 17712 dtrace_repldesc_t desc; 17713 dtrace_probedesc_t *match = &desc.dtrpd_match; 17714 dtrace_probedesc_t *create = &desc.dtrpd_create; 17715 int err; 17716 17717 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17718 return (EFAULT); 17719 17720 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17721 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17722 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17723 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17724 17725 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17726 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17727 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17728 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17729 17730 mutex_enter(&dtrace_lock); 17731 err = dtrace_enabling_replicate(state, match, create); 17732 mutex_exit(&dtrace_lock); 17733 17734 return (err); 17735 } 17736 17737 case DTRACEIOC_PROBEMATCH: 17738 case DTRACEIOC_PROBES: { 17739 dtrace_probe_t *probe = NULL; 17740 dtrace_probedesc_t desc; 17741 dtrace_probekey_t pkey; 17742 dtrace_id_t i; 17743 int m = 0; 17744 uint32_t priv; 17745 uid_t uid; 17746 zoneid_t zoneid; 17747 17748 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17749 return (EFAULT); 17750 17751 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17752 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17753 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17754 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17755 17756 /* 17757 * Before we attempt to match this probe, we want to give 17758 * all providers the opportunity to provide it. 17759 */ 17760 if (desc.dtpd_id == DTRACE_IDNONE) { 17761 mutex_enter(&dtrace_provider_lock); 17762 dtrace_probe_provide(&desc, NULL); 17763 mutex_exit(&dtrace_provider_lock); 17764 desc.dtpd_id++; 17765 } 17766 17767 if (cmd == DTRACEIOC_PROBEMATCH) { 17768 dtrace_probekey(&desc, &pkey); 17769 pkey.dtpk_id = DTRACE_IDNONE; 17770 } 17771 17772 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 17773 17774 mutex_enter(&dtrace_lock); 17775 17776 if (cmd == DTRACEIOC_PROBEMATCH) { 17777 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 17778 if ((probe = dtrace_probes[i - 1]) != NULL && 17779 (m = dtrace_match_probe(probe, &pkey, 17780 priv, uid, zoneid)) != 0) 17781 break; 17782 } 17783 17784 if (m < 0) { 17785 mutex_exit(&dtrace_lock); 17786 return (EINVAL); 17787 } 17788 17789 } else { 17790 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 17791 if ((probe = dtrace_probes[i - 1]) != NULL && 17792 dtrace_match_priv(probe, priv, uid, zoneid)) 17793 break; 17794 } 17795 } 17796 17797 if (probe == NULL) { 17798 mutex_exit(&dtrace_lock); 17799 return (ESRCH); 17800 } 17801 17802 dtrace_probe_description(probe, &desc); 17803 mutex_exit(&dtrace_lock); 17804 17805 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17806 return (EFAULT); 17807 17808 return (0); 17809 } 17810 17811 case DTRACEIOC_PROBEARG: { 17812 dtrace_argdesc_t desc; 17813 dtrace_probe_t *probe; 17814 dtrace_provider_t *prov; 17815 17816 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17817 return (EFAULT); 17818 17819 if (desc.dtargd_id == DTRACE_IDNONE) 17820 return (EINVAL); 17821 17822 if (desc.dtargd_ndx == DTRACE_ARGNONE) 17823 return (EINVAL); 17824 17825 mutex_enter(&dtrace_provider_lock); 17826 mutex_enter(&mod_lock); 17827 mutex_enter(&dtrace_lock); 17828 17829 if (desc.dtargd_id > dtrace_nprobes) { 17830 mutex_exit(&dtrace_lock); 17831 mutex_exit(&mod_lock); 17832 mutex_exit(&dtrace_provider_lock); 17833 return (EINVAL); 17834 } 17835 17836 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 17837 mutex_exit(&dtrace_lock); 17838 mutex_exit(&mod_lock); 17839 mutex_exit(&dtrace_provider_lock); 17840 return (EINVAL); 17841 } 17842 17843 mutex_exit(&dtrace_lock); 17844 17845 prov = probe->dtpr_provider; 17846 17847 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 17848 /* 17849 * There isn't any typed information for this probe. 17850 * Set the argument number to DTRACE_ARGNONE. 17851 */ 17852 desc.dtargd_ndx = DTRACE_ARGNONE; 17853 } else { 17854 desc.dtargd_native[0] = '\0'; 17855 desc.dtargd_xlate[0] = '\0'; 17856 desc.dtargd_mapping = desc.dtargd_ndx; 17857 17858 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 17859 probe->dtpr_id, probe->dtpr_arg, &desc); 17860 } 17861 17862 mutex_exit(&mod_lock); 17863 mutex_exit(&dtrace_provider_lock); 17864 17865 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17866 return (EFAULT); 17867 17868 return (0); 17869 } 17870 17871 case DTRACEIOC_GO: { 17872 processorid_t cpuid; 17873 rval = dtrace_state_go(state, &cpuid); 17874 17875 if (rval != 0) 17876 return (rval); 17877 17878 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 17879 return (EFAULT); 17880 17881 return (0); 17882 } 17883 17884 case DTRACEIOC_STOP: { 17885 processorid_t cpuid; 17886 17887 mutex_enter(&dtrace_lock); 17888 rval = dtrace_state_stop(state, &cpuid); 17889 mutex_exit(&dtrace_lock); 17890 17891 if (rval != 0) 17892 return (rval); 17893 17894 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 17895 return (EFAULT); 17896 17897 return (0); 17898 } 17899 17900 case DTRACEIOC_DOFGET: { 17901 dof_hdr_t hdr, *dof; 17902 uint64_t len; 17903 17904 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 17905 return (EFAULT); 17906 17907 mutex_enter(&dtrace_lock); 17908 dof = dtrace_dof_create(state); 17909 mutex_exit(&dtrace_lock); 17910 17911 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 17912 rval = copyout(dof, (void *)arg, len); 17913 dtrace_dof_destroy(dof); 17914 17915 return (rval == 0 ? 0 : EFAULT); 17916 } 17917 17918 case DTRACEIOC_AGGSNAP: 17919 case DTRACEIOC_BUFSNAP: { 17920 dtrace_bufdesc_t desc; 17921 caddr_t cached; 17922 dtrace_buffer_t *buf; 17923 17924 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17925 return (EFAULT); 17926 17927 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 17928 return (EINVAL); 17929 17930 mutex_enter(&dtrace_lock); 17931 17932 if (cmd == DTRACEIOC_BUFSNAP) { 17933 buf = &state->dts_buffer[desc.dtbd_cpu]; 17934 } else { 17935 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 17936 } 17937 17938 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 17939 size_t sz = buf->dtb_offset; 17940 17941 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 17942 mutex_exit(&dtrace_lock); 17943 return (EBUSY); 17944 } 17945 17946 /* 17947 * If this buffer has already been consumed, we're 17948 * going to indicate that there's nothing left here 17949 * to consume. 17950 */ 17951 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 17952 mutex_exit(&dtrace_lock); 17953 17954 desc.dtbd_size = 0; 17955 desc.dtbd_drops = 0; 17956 desc.dtbd_errors = 0; 17957 desc.dtbd_oldest = 0; 17958 sz = sizeof (desc); 17959 17960 if (copyout(&desc, (void *)arg, sz) != 0) 17961 return (EFAULT); 17962 17963 return (0); 17964 } 17965 17966 /* 17967 * If this is a ring buffer that has wrapped, we want 17968 * to copy the whole thing out. 17969 */ 17970 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 17971 dtrace_buffer_polish(buf); 17972 sz = buf->dtb_size; 17973 } 17974 17975 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 17976 mutex_exit(&dtrace_lock); 17977 return (EFAULT); 17978 } 17979 17980 desc.dtbd_size = sz; 17981 desc.dtbd_drops = buf->dtb_drops; 17982 desc.dtbd_errors = buf->dtb_errors; 17983 desc.dtbd_oldest = buf->dtb_xamot_offset; 17984 desc.dtbd_timestamp = dtrace_gethrtime(); 17985 17986 mutex_exit(&dtrace_lock); 17987 17988 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17989 return (EFAULT); 17990 17991 buf->dtb_flags |= DTRACEBUF_CONSUMED; 17992 17993 return (0); 17994 } 17995 17996 if (buf->dtb_tomax == NULL) { 17997 ASSERT(buf->dtb_xamot == NULL); 17998 mutex_exit(&dtrace_lock); 17999 return (ENOENT); 18000 } 18001 18002 cached = buf->dtb_tomax; 18003 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 18004 18005 dtrace_xcall(desc.dtbd_cpu, 18006 (dtrace_xcall_t)dtrace_buffer_switch, buf); 18007 18008 state->dts_errors += buf->dtb_xamot_errors; 18009 18010 /* 18011 * If the buffers did not actually switch, then the cross call 18012 * did not take place -- presumably because the given CPU is 18013 * not in the ready set. If this is the case, we'll return 18014 * ENOENT. 18015 */ 18016 if (buf->dtb_tomax == cached) { 18017 ASSERT(buf->dtb_xamot != cached); 18018 mutex_exit(&dtrace_lock); 18019 return (ENOENT); 18020 } 18021 18022 ASSERT(cached == buf->dtb_xamot); 18023 18024 /* 18025 * We have our snapshot; now copy it out. 18026 */ 18027 if (copyout(buf->dtb_xamot, desc.dtbd_data, 18028 buf->dtb_xamot_offset) != 0) { 18029 mutex_exit(&dtrace_lock); 18030 return (EFAULT); 18031 } 18032 18033 desc.dtbd_size = buf->dtb_xamot_offset; 18034 desc.dtbd_drops = buf->dtb_xamot_drops; 18035 desc.dtbd_errors = buf->dtb_xamot_errors; 18036 desc.dtbd_oldest = 0; 18037 desc.dtbd_timestamp = buf->dtb_switched; 18038 18039 mutex_exit(&dtrace_lock); 18040 18041 /* 18042 * Finally, copy out the buffer description. 18043 */ 18044 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 18045 return (EFAULT); 18046 18047 return (0); 18048 } 18049 18050 case DTRACEIOC_CONF: { 18051 dtrace_conf_t conf; 18052 18053 bzero(&conf, sizeof (conf)); 18054 conf.dtc_difversion = DIF_VERSION; 18055 conf.dtc_difintregs = DIF_DIR_NREGS; 18056 conf.dtc_diftupregs = DIF_DTR_NREGS; 18057 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 18058 18059 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 18060 return (EFAULT); 18061 18062 return (0); 18063 } 18064 18065 case DTRACEIOC_STATUS: { 18066 dtrace_status_t stat; 18067 dtrace_dstate_t *dstate; 18068 int i, j; 18069 uint64_t nerrs; 18070 18071 /* 18072 * See the comment in dtrace_state_deadman() for the reason 18073 * for setting dts_laststatus to INT64_MAX before setting 18074 * it to the correct value. 18075 */ 18076 state->dts_laststatus = INT64_MAX; 18077 dtrace_membar_producer(); 18078 state->dts_laststatus = dtrace_gethrtime(); 18079 18080 bzero(&stat, sizeof (stat)); 18081 18082 mutex_enter(&dtrace_lock); 18083 18084 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 18085 mutex_exit(&dtrace_lock); 18086 return (ENOENT); 18087 } 18088 18089 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 18090 stat.dtst_exiting = 1; 18091 18092 nerrs = state->dts_errors; 18093 dstate = &state->dts_vstate.dtvs_dynvars; 18094 18095 for (i = 0; i < NCPU; i++) { 18096 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 18097 18098 stat.dtst_dyndrops += dcpu->dtdsc_drops; 18099 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 18100 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 18101 18102 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 18103 stat.dtst_filled++; 18104 18105 nerrs += state->dts_buffer[i].dtb_errors; 18106 18107 for (j = 0; j < state->dts_nspeculations; j++) { 18108 dtrace_speculation_t *spec; 18109 dtrace_buffer_t *buf; 18110 18111 spec = &state->dts_speculations[j]; 18112 buf = &spec->dtsp_buffer[i]; 18113 stat.dtst_specdrops += buf->dtb_xamot_drops; 18114 } 18115 } 18116 18117 stat.dtst_specdrops_busy = state->dts_speculations_busy; 18118 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 18119 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 18120 stat.dtst_dblerrors = state->dts_dblerrors; 18121 stat.dtst_killed = 18122 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 18123 stat.dtst_errors = nerrs; 18124 18125 mutex_exit(&dtrace_lock); 18126 18127 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 18128 return (EFAULT); 18129 18130 return (0); 18131 } 18132 18133 case DTRACEIOC_FORMAT: { 18134 dtrace_fmtdesc_t fmt; 18135 char *str; 18136 int len; 18137 18138 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 18139 return (EFAULT); 18140 18141 mutex_enter(&dtrace_lock); 18142 18143 if (fmt.dtfd_format == 0 || 18144 fmt.dtfd_format > state->dts_nformats) { 18145 mutex_exit(&dtrace_lock); 18146 return (EINVAL); 18147 } 18148 18149 /* 18150 * Format strings are allocated contiguously and they are 18151 * never freed; if a format index is less than the number 18152 * of formats, we can assert that the format map is non-NULL 18153 * and that the format for the specified index is non-NULL. 18154 */ 18155 ASSERT(state->dts_formats != NULL); 18156 str = state->dts_formats[fmt.dtfd_format - 1]; 18157 ASSERT(str != NULL); 18158 18159 len = strlen(str) + 1; 18160 18161 if (len > fmt.dtfd_length) { 18162 fmt.dtfd_length = len; 18163 18164 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 18165 mutex_exit(&dtrace_lock); 18166 return (EINVAL); 18167 } 18168 } else { 18169 if (copyout(str, fmt.dtfd_string, len) != 0) { 18170 mutex_exit(&dtrace_lock); 18171 return (EINVAL); 18172 } 18173 } 18174 18175 mutex_exit(&dtrace_lock); 18176 return (0); 18177 } 18178 18179 default: 18180 break; 18181 } 18182 18183 return (ENOTTY); 18184 } 18185 18186 /*ARGSUSED*/ 18187 static int 18188 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 18189 { 18190 dtrace_state_t *state; 18191 18192 switch (cmd) { 18193 case DDI_DETACH: 18194 break; 18195 18196 case DDI_SUSPEND: 18197 return (DDI_SUCCESS); 18198 18199 default: 18200 return (DDI_FAILURE); 18201 } 18202 18203 mutex_enter(&cpu_lock); 18204 mutex_enter(&dtrace_provider_lock); 18205 mutex_enter(&dtrace_lock); 18206 18207 ASSERT(dtrace_opens == 0); 18208 18209 if (dtrace_helpers > 0) { 18210 mutex_exit(&dtrace_provider_lock); 18211 mutex_exit(&dtrace_lock); 18212 mutex_exit(&cpu_lock); 18213 return (DDI_FAILURE); 18214 } 18215 18216 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 18217 mutex_exit(&dtrace_provider_lock); 18218 mutex_exit(&dtrace_lock); 18219 mutex_exit(&cpu_lock); 18220 return (DDI_FAILURE); 18221 } 18222 18223 dtrace_provider = NULL; 18224 18225 if ((state = dtrace_anon_grab()) != NULL) { 18226 /* 18227 * If there were ECBs on this state, the provider should 18228 * have not been allowed to detach; assert that there is 18229 * none. 18230 */ 18231 ASSERT(state->dts_necbs == 0); 18232 dtrace_state_destroy(state); 18233 18234 /* 18235 * If we're being detached with anonymous state, we need to 18236 * indicate to the kernel debugger that DTrace is now inactive. 18237 */ 18238 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 18239 } 18240 18241 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 18242 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 18243 dtrace_cpu_init = NULL; 18244 dtrace_helpers_cleanup = NULL; 18245 dtrace_helpers_fork = NULL; 18246 dtrace_cpustart_init = NULL; 18247 dtrace_cpustart_fini = NULL; 18248 dtrace_debugger_init = NULL; 18249 dtrace_debugger_fini = NULL; 18250 dtrace_modload = NULL; 18251 dtrace_modunload = NULL; 18252 18253 ASSERT(dtrace_getf == 0); 18254 ASSERT(dtrace_closef == NULL); 18255 18256 mutex_exit(&cpu_lock); 18257 18258 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 18259 dtrace_probes = NULL; 18260 dtrace_nprobes = 0; 18261 18262 dtrace_hash_destroy(dtrace_bymod); 18263 dtrace_hash_destroy(dtrace_byfunc); 18264 dtrace_hash_destroy(dtrace_byname); 18265 dtrace_bymod = NULL; 18266 dtrace_byfunc = NULL; 18267 dtrace_byname = NULL; 18268 18269 kmem_cache_destroy(dtrace_state_cache); 18270 vmem_destroy(dtrace_minor); 18271 vmem_destroy(dtrace_arena); 18272 18273 if (dtrace_toxrange != NULL) { 18274 kmem_free(dtrace_toxrange, 18275 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 18276 dtrace_toxrange = NULL; 18277 dtrace_toxranges = 0; 18278 dtrace_toxranges_max = 0; 18279 } 18280 18281 ddi_remove_minor_node(dtrace_devi, NULL); 18282 dtrace_devi = NULL; 18283 18284 ddi_soft_state_fini(&dtrace_softstate); 18285 18286 ASSERT(dtrace_vtime_references == 0); 18287 ASSERT(dtrace_opens == 0); 18288 ASSERT(dtrace_retained == NULL); 18289 18290 mutex_exit(&dtrace_lock); 18291 mutex_exit(&dtrace_provider_lock); 18292 18293 /* 18294 * We don't destroy the task queue until after we have dropped our 18295 * locks (taskq_destroy() may block on running tasks). To prevent 18296 * attempting to do work after we have effectively detached but before 18297 * the task queue has been destroyed, all tasks dispatched via the 18298 * task queue must check that DTrace is still attached before 18299 * performing any operation. 18300 */ 18301 taskq_destroy(dtrace_taskq); 18302 dtrace_taskq = NULL; 18303 18304 return (DDI_SUCCESS); 18305 } 18306 #endif 18307 18308 #ifdef illumos 18309 /*ARGSUSED*/ 18310 static int 18311 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 18312 { 18313 int error; 18314 18315 switch (infocmd) { 18316 case DDI_INFO_DEVT2DEVINFO: 18317 *result = (void *)dtrace_devi; 18318 error = DDI_SUCCESS; 18319 break; 18320 case DDI_INFO_DEVT2INSTANCE: 18321 *result = (void *)0; 18322 error = DDI_SUCCESS; 18323 break; 18324 default: 18325 error = DDI_FAILURE; 18326 } 18327 return (error); 18328 } 18329 #endif 18330 18331 #ifdef illumos 18332 static struct cb_ops dtrace_cb_ops = { 18333 dtrace_open, /* open */ 18334 dtrace_close, /* close */ 18335 nulldev, /* strategy */ 18336 nulldev, /* print */ 18337 nodev, /* dump */ 18338 nodev, /* read */ 18339 nodev, /* write */ 18340 dtrace_ioctl, /* ioctl */ 18341 nodev, /* devmap */ 18342 nodev, /* mmap */ 18343 nodev, /* segmap */ 18344 nochpoll, /* poll */ 18345 ddi_prop_op, /* cb_prop_op */ 18346 0, /* streamtab */ 18347 D_NEW | D_MP /* Driver compatibility flag */ 18348 }; 18349 18350 static struct dev_ops dtrace_ops = { 18351 DEVO_REV, /* devo_rev */ 18352 0, /* refcnt */ 18353 dtrace_info, /* get_dev_info */ 18354 nulldev, /* identify */ 18355 nulldev, /* probe */ 18356 dtrace_attach, /* attach */ 18357 dtrace_detach, /* detach */ 18358 nodev, /* reset */ 18359 &dtrace_cb_ops, /* driver operations */ 18360 NULL, /* bus operations */ 18361 nodev /* dev power */ 18362 }; 18363 18364 static struct modldrv modldrv = { 18365 &mod_driverops, /* module type (this is a pseudo driver) */ 18366 "Dynamic Tracing", /* name of module */ 18367 &dtrace_ops, /* driver ops */ 18368 }; 18369 18370 static struct modlinkage modlinkage = { 18371 MODREV_1, 18372 (void *)&modldrv, 18373 NULL 18374 }; 18375 18376 int 18377 _init(void) 18378 { 18379 return (mod_install(&modlinkage)); 18380 } 18381 18382 int 18383 _info(struct modinfo *modinfop) 18384 { 18385 return (mod_info(&modlinkage, modinfop)); 18386 } 18387 18388 int 18389 _fini(void) 18390 { 18391 return (mod_remove(&modlinkage)); 18392 } 18393 #else 18394 18395 static d_ioctl_t dtrace_ioctl; 18396 static d_ioctl_t dtrace_ioctl_helper; 18397 static void dtrace_load(void *); 18398 static int dtrace_unload(void); 18399 static struct cdev *dtrace_dev; 18400 static struct cdev *helper_dev; 18401 18402 void dtrace_invop_init(void); 18403 void dtrace_invop_uninit(void); 18404 18405 static struct cdevsw dtrace_cdevsw = { 18406 .d_version = D_VERSION, 18407 .d_ioctl = dtrace_ioctl, 18408 .d_open = dtrace_open, 18409 .d_name = "dtrace", 18410 }; 18411 18412 static struct cdevsw helper_cdevsw = { 18413 .d_version = D_VERSION, 18414 .d_ioctl = dtrace_ioctl_helper, 18415 .d_name = "helper", 18416 }; 18417 18418 #include <dtrace_anon.c> 18419 #include <dtrace_ioctl.c> 18420 #include <dtrace_load.c> 18421 #include <dtrace_modevent.c> 18422 #include <dtrace_sysctl.c> 18423 #include <dtrace_unload.c> 18424 #include <dtrace_vtime.c> 18425 #include <dtrace_hacks.c> 18426 #include <dtrace_isa.c> 18427 18428 SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 18429 SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 18430 SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 18431 18432 DEV_MODULE(dtrace, dtrace_modevent, NULL); 18433 MODULE_VERSION(dtrace, 1); 18434 MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 18435 #endif 18436