1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2003-2008, Joseph Koshy 5 * Copyright (c) 2007 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * Portions of this software were developed by A. Joseph Koshy under 9 * sponsorship from the FreeBSD Foundation and Google, Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #ifndef _SYS_PMC_H_ 34 #define _SYS_PMC_H_ 35 36 #include <dev/hwpmc/pmc_events.h> 37 #include <sys/proc.h> 38 #include <sys/counter.h> 39 #include <machine/pmc_mdep.h> 40 #include <machine/profile.h> 41 #ifdef _KERNEL 42 #include <sys/epoch.h> 43 #include <ck_queue.h> 44 #endif 45 46 #define PMC_MODULE_NAME "hwpmc" 47 #define PMC_NAME_MAX 64 /* HW counter name size */ 48 #define PMC_CLASS_MAX 8 /* max #classes of PMCs per-system */ 49 50 /* 51 * Kernel<->userland API version number [MMmmpppp] 52 * 53 * Major numbers are to be incremented when an incompatible change to 54 * the ABI occurs that older clients will not be able to handle. 55 * 56 * Minor numbers are incremented when a backwards compatible change 57 * occurs that allows older correct programs to run unchanged. For 58 * example, when support for a new PMC type is added. 59 * 60 * The patch version is incremented for every bug fix. 61 */ 62 #define PMC_VERSION_MAJOR 0x0A 63 #define PMC_VERSION_MINOR 0x00 64 #define PMC_VERSION_PATCH 0x0000 65 66 #define PMC_VERSION (PMC_VERSION_MAJOR << 24 | \ 67 PMC_VERSION_MINOR << 16 | PMC_VERSION_PATCH) 68 69 #define PMC_CPUID_LEN 64 70 /* cpu model name for pmu lookup */ 71 extern char pmc_cpuid[PMC_CPUID_LEN]; 72 73 /* 74 * Kinds of CPUs known. 75 * 76 * We keep track of CPU variants that need to be distinguished in 77 * some way for PMC operations. CPU names are grouped by manufacturer 78 * and numbered sparsely in order to minimize changes to the ABI involved 79 * when new CPUs are added. 80 * 81 * Please keep the pmc(3) manual page in sync with this list. 82 */ 83 #define __PMC_CPUS() \ 84 __PMC_CPU(AMD_K8, 0x01, "AMD K8") \ 85 __PMC_CPU(INTEL_CORE, 0x87, "Intel Core Solo/Duo") \ 86 __PMC_CPU(INTEL_CORE2, 0x88, "Intel Core2") \ 87 __PMC_CPU(INTEL_CORE2EXTREME, 0x89, "Intel Core2 Extreme") \ 88 __PMC_CPU(INTEL_ATOM, 0x8A, "Intel Atom") \ 89 __PMC_CPU(INTEL_COREI7, 0x8B, "Intel Core i7") \ 90 __PMC_CPU(INTEL_WESTMERE, 0x8C, "Intel Westmere") \ 91 __PMC_CPU(INTEL_SANDYBRIDGE, 0x8D, "Intel Sandy Bridge") \ 92 __PMC_CPU(INTEL_IVYBRIDGE, 0x8E, "Intel Ivy Bridge") \ 93 __PMC_CPU(INTEL_SANDYBRIDGE_XEON, 0x8F, "Intel Sandy Bridge Xeon") \ 94 __PMC_CPU(INTEL_IVYBRIDGE_XEON, 0x90, "Intel Ivy Bridge Xeon") \ 95 __PMC_CPU(INTEL_HASWELL, 0x91, "Intel Haswell") \ 96 __PMC_CPU(INTEL_ATOM_SILVERMONT, 0x92, "Intel Atom Silvermont") \ 97 __PMC_CPU(INTEL_NEHALEM_EX, 0x93, "Intel Nehalem Xeon 7500") \ 98 __PMC_CPU(INTEL_WESTMERE_EX, 0x94, "Intel Westmere Xeon E7") \ 99 __PMC_CPU(INTEL_HASWELL_XEON, 0x95, "Intel Haswell Xeon E5 v3") \ 100 __PMC_CPU(INTEL_BROADWELL, 0x96, "Intel Broadwell") \ 101 __PMC_CPU(INTEL_BROADWELL_XEON, 0x97, "Intel Broadwell Xeon") \ 102 __PMC_CPU(INTEL_SKYLAKE, 0x98, "Intel Skylake") \ 103 __PMC_CPU(INTEL_SKYLAKE_XEON, 0x99, "Intel Skylake Xeon") \ 104 __PMC_CPU(INTEL_ATOM_GOLDMONT, 0x9A, "Intel Atom Goldmont") \ 105 __PMC_CPU(INTEL_ICELAKE, 0x9B, "Intel Icelake") \ 106 __PMC_CPU(INTEL_ICELAKE_XEON, 0x9C, "Intel Icelake Xeon") \ 107 __PMC_CPU(INTEL_ALDERLAKE, 0x9D, "Intel Alderlake") \ 108 __PMC_CPU(INTEL_ATOM_GOLDMONT_P, 0x9E, "Intel Atom Goldmont Plus") \ 109 __PMC_CPU(INTEL_ATOM_TREMONT, 0x9F, "Intel Atom Tremont") \ 110 __PMC_CPU(INTEL_EMERALD_RAPIDS, 0xA0, "Intel Emerald Rapids") \ 111 __PMC_CPU(INTEL_ALDERLAKEN, 0xA1, "Intel AlderlakeN") \ 112 __PMC_CPU(INTEL_GRANITE_RAPIDS, 0xA2, "Intel Granite Rapids") \ 113 __PMC_CPU(INTEL_XSCALE, 0x100, "Intel XScale") \ 114 __PMC_CPU(PPC_7450, 0x300, "PowerPC MPC7450") \ 115 __PMC_CPU(PPC_E500, 0x340, "PowerPC e500 Core") \ 116 __PMC_CPU(PPC_970, 0x380, "IBM PowerPC 970") \ 117 __PMC_CPU(PPC_POWER8, 0x390, "IBM POWER8") \ 118 __PMC_CPU(GENERIC, 0x400, "Generic") \ 119 __PMC_CPU(ARMV7_CORTEX_A5, 0x500, "ARMv7 Cortex A5") \ 120 __PMC_CPU(ARMV7_CORTEX_A7, 0x501, "ARMv7 Cortex A7") \ 121 __PMC_CPU(ARMV7_CORTEX_A8, 0x502, "ARMv7 Cortex A8") \ 122 __PMC_CPU(ARMV7_CORTEX_A9, 0x503, "ARMv7 Cortex A9") \ 123 __PMC_CPU(ARMV7_CORTEX_A15, 0x504, "ARMv7 Cortex A15") \ 124 __PMC_CPU(ARMV7_CORTEX_A17, 0x505, "ARMv7 Cortex A17") \ 125 __PMC_CPU(ARMV8_CORTEX_A53, 0x600, "ARMv8 Cortex A53") \ 126 __PMC_CPU(ARMV8_CORTEX_A57, 0x601, "ARMv8 Cortex A57") \ 127 __PMC_CPU(ARMV8_CORTEX_A76, 0x602, "ARMv8 Cortex A76") 128 129 enum pmc_cputype { 130 #undef __PMC_CPU 131 #define __PMC_CPU(S,V,D) PMC_CPU_##S = V, 132 __PMC_CPUS() 133 }; 134 135 #define PMC_CPU_FIRST PMC_CPU_AMD_K8 136 #define PMC_CPU_LAST PMC_CPU_ARMV8_CORTEX_A76 137 138 /* 139 * Classes of PMCs 140 */ 141 #define __PMC_CLASSES() \ 142 __PMC_CLASS(TSC, 0x00, "CPU Timestamp counter") \ 143 __PMC_CLASS(K8, 0x02, "AMD K8 performance counters") \ 144 __PMC_CLASS(IAF, 0x06, "Intel Core2/Atom, fixed function") \ 145 __PMC_CLASS(IAP, 0x07, "Intel Core...Atom, programmable") \ 146 __PMC_CLASS(UCF, 0x08, "Intel Uncore fixed function") \ 147 __PMC_CLASS(UCP, 0x09, "Intel Uncore programmable") \ 148 __PMC_CLASS(XSCALE, 0x0A, "Intel XScale counters") \ 149 __PMC_CLASS(PPC7450, 0x0D, "Motorola MPC7450 class") \ 150 __PMC_CLASS(PPC970, 0x0E, "IBM PowerPC 970 class") \ 151 __PMC_CLASS(SOFT, 0x0F, "Software events") \ 152 __PMC_CLASS(ARMV7, 0x10, "ARMv7") \ 153 __PMC_CLASS(ARMV8, 0x11, "ARMv8") \ 154 __PMC_CLASS(E500, 0x13, "Freescale e500 class") \ 155 __PMC_CLASS(POWER8, 0x15, "IBM POWER8 class") \ 156 __PMC_CLASS(DMC620_PMU_CD2, 0x16, "ARM DMC620 Memory Controller PMU CLKDIV2") \ 157 __PMC_CLASS(DMC620_PMU_C, 0x17, "ARM DMC620 Memory Controller PMU CLK") \ 158 __PMC_CLASS(CMN600_PMU, 0x18, "Arm CoreLink CMN600 Coherent Mesh Network PMU") 159 160 enum pmc_class { 161 #undef __PMC_CLASS 162 #define __PMC_CLASS(S,V,D) PMC_CLASS_##S = V, 163 __PMC_CLASSES() 164 }; 165 166 #define PMC_CLASS_FIRST PMC_CLASS_TSC 167 #define PMC_CLASS_LAST PMC_CLASS_CMN600_PMU 168 169 /* 170 * A PMC can be in the following states: 171 * 172 * Hardware states: 173 * DISABLED -- administratively prohibited from being used. 174 * FREE -- HW available for use 175 * Software states: 176 * ALLOCATED -- allocated 177 * STOPPED -- allocated, but not counting events 178 * RUNNING -- allocated, and in operation; 'pm_runcount' 179 * holds the number of CPUs using this PMC at 180 * a given instant 181 * DELETED -- being destroyed 182 */ 183 184 #define __PMC_HWSTATES() \ 185 __PMC_STATE(DISABLED) \ 186 __PMC_STATE(FREE) 187 188 #define __PMC_SWSTATES() \ 189 __PMC_STATE(ALLOCATED) \ 190 __PMC_STATE(STOPPED) \ 191 __PMC_STATE(RUNNING) \ 192 __PMC_STATE(DELETED) 193 194 #define __PMC_STATES() \ 195 __PMC_HWSTATES() \ 196 __PMC_SWSTATES() 197 198 enum pmc_state { 199 #undef __PMC_STATE 200 #define __PMC_STATE(S) PMC_STATE_##S, 201 __PMC_STATES() 202 __PMC_STATE(MAX) 203 }; 204 205 #define PMC_STATE_FIRST PMC_STATE_DISABLED 206 #define PMC_STATE_LAST PMC_STATE_DELETED 207 208 /* 209 * An allocated PMC may used as a 'global' counter or as a 210 * 'thread-private' one. Each such mode of use can be in either 211 * statistical sampling mode or in counting mode. Thus a PMC in use 212 * 213 * SS i.e., SYSTEM STATISTICAL -- system-wide statistical profiling 214 * SC i.e., SYSTEM COUNTER -- system-wide counting mode 215 * TS i.e., THREAD STATISTICAL -- thread virtual, statistical profiling 216 * TC i.e., THREAD COUNTER -- thread virtual, counting mode 217 * 218 * Statistical profiling modes rely on the PMC periodically delivering 219 * a interrupt to the CPU (when the configured number of events have 220 * been measured), so the PMC must have the ability to generate 221 * interrupts. 222 * 223 * In counting modes, the PMC counts its configured events, with the 224 * value of the PMC being read whenever needed by its owner process. 225 * 226 * The thread specific modes "virtualize" the PMCs -- the PMCs appear 227 * to be thread private and count events only when the profiled thread 228 * actually executes on the CPU. 229 * 230 * The system-wide "global" modes keep the PMCs running all the time 231 * and are used to measure the behaviour of the whole system. 232 */ 233 234 #define __PMC_MODES() \ 235 __PMC_MODE(SS, 0) \ 236 __PMC_MODE(SC, 1) \ 237 __PMC_MODE(TS, 2) \ 238 __PMC_MODE(TC, 3) 239 240 enum pmc_mode { 241 #undef __PMC_MODE 242 #define __PMC_MODE(M,N) PMC_MODE_##M = N, 243 __PMC_MODES() 244 }; 245 246 #define PMC_MODE_FIRST PMC_MODE_SS 247 #define PMC_MODE_LAST PMC_MODE_TC 248 249 #define PMC_IS_COUNTING_MODE(mode) \ 250 ((mode) == PMC_MODE_SC || (mode) == PMC_MODE_TC) 251 #define PMC_IS_SYSTEM_MODE(mode) \ 252 ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_SC) 253 #define PMC_IS_SAMPLING_MODE(mode) \ 254 ((mode) == PMC_MODE_SS || (mode) == PMC_MODE_TS) 255 #define PMC_IS_VIRTUAL_MODE(mode) \ 256 ((mode) == PMC_MODE_TS || (mode) == PMC_MODE_TC) 257 258 /* 259 * PMC row disposition 260 */ 261 262 #define __PMC_DISPOSITIONS(N) \ 263 __PMC_DISP(STANDALONE) /* global/disabled counters */ \ 264 __PMC_DISP(FREE) /* free/available */ \ 265 __PMC_DISP(THREAD) /* thread-virtual PMCs */ \ 266 __PMC_DISP(UNKNOWN) /* sentinel */ 267 268 enum pmc_disp { 269 #undef __PMC_DISP 270 #define __PMC_DISP(D) PMC_DISP_##D , 271 __PMC_DISPOSITIONS() 272 }; 273 274 #define PMC_DISP_FIRST PMC_DISP_STANDALONE 275 #define PMC_DISP_LAST PMC_DISP_THREAD 276 277 /* 278 * Counter capabilities 279 * 280 * __PMC_CAPS(NAME, VALUE, DESCRIPTION) 281 */ 282 283 #define __PMC_CAPS() \ 284 __PMC_CAP(INTERRUPT, 0, "generate interrupts") \ 285 __PMC_CAP(USER, 1, "count user-mode events") \ 286 __PMC_CAP(SYSTEM, 2, "count system-mode events") \ 287 __PMC_CAP(EDGE, 3, "do edge detection of events") \ 288 __PMC_CAP(THRESHOLD, 4, "ignore events below a threshold") \ 289 __PMC_CAP(READ, 5, "read PMC counter") \ 290 __PMC_CAP(WRITE, 6, "reprogram PMC counter") \ 291 __PMC_CAP(INVERT, 7, "invert comparison sense") \ 292 __PMC_CAP(QUALIFIER, 8, "further qualify monitored events") \ 293 __PMC_CAP(PRECISE, 9, "perform precise sampling") \ 294 __PMC_CAP(TAGGING, 10, "tag upstream events") \ 295 __PMC_CAP(CASCADE, 11, "cascade counters") \ 296 __PMC_CAP(SYSWIDE, 12, "system wide counter") \ 297 __PMC_CAP(DOMWIDE, 13, "NUMA domain wide counter") 298 299 enum pmc_caps 300 { 301 #undef __PMC_CAP 302 #define __PMC_CAP(NAME, VALUE, DESCR) PMC_CAP_##NAME = (1 << VALUE) , 303 __PMC_CAPS() 304 }; 305 306 #define PMC_CAP_FIRST PMC_CAP_INTERRUPT 307 #define PMC_CAP_LAST PMC_CAP_DOMWIDE 308 309 /* 310 * PMC Event Numbers 311 * 312 * These are generated from the definitions in "dev/hwpmc/pmc_events.h". 313 */ 314 315 enum pmc_event { 316 #undef __PMC_EV 317 #undef __PMC_EV_BLOCK 318 #define __PMC_EV_BLOCK(C,V) PMC_EV_ ## C ## __BLOCK_START = (V) - 1 , 319 #define __PMC_EV(C,N) PMC_EV_ ## C ## _ ## N , 320 __PMC_EVENTS() 321 }; 322 323 /* 324 * PMC SYSCALL INTERFACE 325 */ 326 327 /* 328 * "PMC_OPS" -- these are the commands recognized by the kernel 329 * module, and are used when performing a system call from userland. 330 */ 331 #define __PMC_OPS() \ 332 __PMC_OP(CONFIGURELOG, "Set log file") \ 333 __PMC_OP(FLUSHLOG, "Flush log file") \ 334 __PMC_OP(GETCPUINFO, "Get system CPU information") \ 335 __PMC_OP(GETDRIVERSTATS, "Get driver statistics") \ 336 __PMC_OP(GETMODULEVERSION, "Get module version") \ 337 __PMC_OP(GETPMCINFO, "Get per-cpu PMC information") \ 338 __PMC_OP(PMCADMIN, "Set PMC state") \ 339 __PMC_OP(PMCALLOCATE, "Allocate and configure a PMC") \ 340 __PMC_OP(PMCATTACH, "Attach a PMC to a process") \ 341 __PMC_OP(PMCDETACH, "Detach a PMC from a process") \ 342 __PMC_OP(PMCGETMSR, "Get a PMC's hardware address") \ 343 __PMC_OP(PMCRELEASE, "Release a PMC") \ 344 __PMC_OP(PMCRW, "Read/Set a PMC") \ 345 __PMC_OP(PMCSETCOUNT, "Set initial count/sampling rate") \ 346 __PMC_OP(PMCSTART, "Start a PMC") \ 347 __PMC_OP(PMCSTOP, "Stop a PMC") \ 348 __PMC_OP(WRITELOG, "Write a cookie to the log file") \ 349 __PMC_OP(CLOSELOG, "Close log file") \ 350 __PMC_OP(GETDYNEVENTINFO, "Get dynamic events list") 351 352 enum pmc_ops { 353 #undef __PMC_OP 354 #define __PMC_OP(N, D) PMC_OP_##N, 355 __PMC_OPS() 356 }; 357 358 /* 359 * Flags used in operations on PMCs. 360 */ 361 362 #define PMC_F_UNUSED1 0x00000001 /* unused */ 363 #define PMC_F_DESCENDANTS 0x00000002 /*OP ALLOCATE track descendants */ 364 #define PMC_F_LOG_PROCCSW 0x00000004 /*OP ALLOCATE track ctx switches */ 365 #define PMC_F_LOG_PROCEXIT 0x00000008 /*OP ALLOCATE log proc exits */ 366 #define PMC_F_NEWVALUE 0x00000010 /*OP RW write new value */ 367 #define PMC_F_OLDVALUE 0x00000020 /*OP RW get old value */ 368 369 /* V2 API */ 370 #define PMC_F_CALLCHAIN 0x00000080 /*OP ALLOCATE capture callchains */ 371 #define PMC_F_USERCALLCHAIN 0x00000100 /*OP ALLOCATE use userspace stack */ 372 373 /* V10 API */ 374 #define PMC_F_EV_PMU 0x00000200 /* 375 * OP ALLOCATE: pm_ev has special 376 * userspace meaning; counter 377 * configuration is communicated 378 * through class-dependent fields 379 */ 380 381 /* internal flags */ 382 #define PMC_F_ATTACHED_TO_OWNER 0x00010000 /*attached to owner*/ 383 #define PMC_F_NEEDS_LOGFILE 0x00020000 /*needs log file */ 384 #define PMC_F_ATTACH_DONE 0x00040000 /*attached at least once */ 385 386 #define PMC_CALLCHAIN_DEPTH_MAX 512 387 388 #define PMC_CC_F_USERSPACE 0x01 /*userspace callchain*/ 389 390 /* 391 * Cookies used to denote allocated PMCs, and the values of PMCs. 392 */ 393 394 typedef uint32_t pmc_id_t; 395 typedef uint64_t pmc_value_t; 396 397 #define PMC_ID_INVALID (~ (pmc_id_t) 0) 398 399 /* 400 * PMC IDs have the following format: 401 * 402 * +-----------------------+-------+-----------+ 403 * | CPU | PMC MODE | CLASS | ROW INDEX | 404 * +-----------------------+-------+-----------+ 405 * 406 * where CPU is 12 bits, MODE 4, CLASS 8, and ROW INDEX 8 Field 'CPU' 407 * is set to the requested CPU for system-wide PMCs or PMC_CPU_ANY for 408 * process-mode PMCs. Field 'PMC MODE' is the allocated PMC mode. 409 * Field 'PMC CLASS' is the class of the PMC. Field 'ROW INDEX' is the 410 * row index for the PMC. 411 * 412 * The 'ROW INDEX' ranges over 0..NWPMCS where NHWPMCS is the total 413 * number of hardware PMCs on this cpu. 414 */ 415 416 #define PMC_ID_TO_ROWINDEX(ID) ((ID) & 0xFF) 417 #define PMC_ID_TO_CLASS(ID) (((ID) & 0xFF00) >> 8) 418 #define PMC_ID_TO_MODE(ID) (((ID) & 0xF0000) >> 16) 419 #define PMC_ID_TO_CPU(ID) (((ID) & 0xFFF00000) >> 20) 420 #define PMC_ID_MAKE_ID(CPU,MODE,CLASS,ROWINDEX) \ 421 ((((CPU) & 0xFFF) << 20) | (((MODE) & 0xF) << 16) | \ 422 (((CLASS) & 0xFF) << 8) | ((ROWINDEX) & 0xFF)) 423 424 /* 425 * Data structures for system calls supported by the pmc driver. 426 */ 427 428 /* 429 * OP PMCALLOCATE 430 * 431 * Allocate a PMC on the named CPU. 432 */ 433 434 #define PMC_CPU_ANY ~0 435 436 struct pmc_op_pmcallocate { 437 uint32_t pm_caps; /* PMC_CAP_* */ 438 uint32_t pm_cpu; /* CPU number or PMC_CPU_ANY */ 439 enum pmc_class pm_class; /* class of PMC desired */ 440 enum pmc_event pm_ev; /* [enum pmc_event] desired */ 441 uint32_t pm_flags; /* additional modifiers PMC_F_* */ 442 enum pmc_mode pm_mode; /* desired mode */ 443 pmc_id_t pm_pmcid; /* [return] process pmc id */ 444 pmc_value_t pm_count; /* initial/sample count */ 445 446 union pmc_md_op_pmcallocate pm_md; /* MD layer extensions */ 447 }; 448 449 /* 450 * OP PMCADMIN 451 * 452 * Set the administrative state (i.e., whether enabled or disabled) of 453 * a PMC 'pm_pmc' on CPU 'pm_cpu'. Note that 'pm_pmc' specifies an 454 * absolute PMC number and need not have been first allocated by the 455 * calling process. 456 */ 457 458 struct pmc_op_pmcadmin { 459 int pm_cpu; /* CPU# */ 460 uint32_t pm_flags; /* flags */ 461 int pm_pmc; /* PMC# */ 462 enum pmc_state pm_state; /* desired state */ 463 }; 464 465 /* 466 * OP PMCATTACH / OP PMCDETACH 467 * 468 * Attach/detach a PMC and a process. 469 */ 470 471 struct pmc_op_pmcattach { 472 pmc_id_t pm_pmc; /* PMC to attach to */ 473 pid_t pm_pid; /* target process */ 474 }; 475 476 /* 477 * OP PMCSETCOUNT 478 * 479 * Set the sampling rate (i.e., the reload count) for statistical counters. 480 * 'pm_pmcid' need to have been previously allocated using PMCALLOCATE. 481 */ 482 483 struct pmc_op_pmcsetcount { 484 pmc_value_t pm_count; /* initial/sample count */ 485 pmc_id_t pm_pmcid; /* PMC id to set */ 486 }; 487 488 /* 489 * OP PMCRW 490 * 491 * Read the value of a PMC named by 'pm_pmcid'. 'pm_pmcid' needs 492 * to have been previously allocated using PMCALLOCATE. 493 */ 494 495 struct pmc_op_pmcrw { 496 uint32_t pm_flags; /* PMC_F_{OLD,NEW}VALUE*/ 497 pmc_id_t pm_pmcid; /* pmc id */ 498 pmc_value_t pm_value; /* new&returned value */ 499 }; 500 501 /* 502 * OP GETPMCINFO 503 * 504 * retrieve PMC state for a named CPU. The caller is expected to 505 * allocate 'npmc' * 'struct pmc_info' bytes of space for the return 506 * values. 507 */ 508 509 struct pmc_info { 510 char pm_name[PMC_NAME_MAX]; /* pmc name */ 511 enum pmc_class pm_class; /* enum pmc_class */ 512 int pm_enabled; /* whether enabled */ 513 enum pmc_disp pm_rowdisp; /* FREE, THREAD or STANDLONE */ 514 pid_t pm_ownerpid; /* owner, or -1 */ 515 enum pmc_mode pm_mode; /* current mode [enum pmc_mode] */ 516 enum pmc_event pm_event; /* current event */ 517 uint32_t pm_flags; /* current flags */ 518 pmc_value_t pm_reloadcount; /* sampling counters only */ 519 }; 520 521 struct pmc_op_getpmcinfo { 522 int32_t pm_cpu; /* 0 <= cpu < mp_maxid */ 523 struct pmc_info pm_pmcs[]; /* space for 'npmc' structures */ 524 }; 525 526 /* 527 * OP GETCPUINFO 528 * 529 * Retrieve system CPU information. 530 */ 531 532 struct pmc_classinfo { 533 enum pmc_class pm_class; /* class id */ 534 uint32_t pm_caps; /* counter capabilities */ 535 uint32_t pm_width; /* width of the PMC */ 536 uint32_t pm_num; /* number of PMCs in class */ 537 }; 538 539 struct pmc_op_getcpuinfo { 540 enum pmc_cputype pm_cputype; /* what kind of CPU */ 541 uint32_t pm_ncpu; /* max CPU number */ 542 uint32_t pm_npmc; /* #PMCs per CPU */ 543 uint32_t pm_nclass; /* #classes of PMCs */ 544 struct pmc_classinfo pm_classes[PMC_CLASS_MAX]; 545 }; 546 547 /* 548 * OP CONFIGURELOG 549 * 550 * Configure a log file for writing system-wide statistics to. 551 */ 552 553 struct pmc_op_configurelog { 554 int pm_flags; 555 int pm_logfd; /* logfile fd (or -1) */ 556 }; 557 558 /* 559 * OP GETDRIVERSTATS 560 * 561 * Retrieve pmc(4) driver-wide statistics. 562 */ 563 #ifdef _KERNEL 564 struct pmc_driverstats { 565 counter_u64_t pm_intr_ignored; /* #interrupts ignored */ 566 counter_u64_t pm_intr_processed; /* #interrupts processed */ 567 counter_u64_t pm_intr_bufferfull; /* #interrupts with ENOSPC */ 568 counter_u64_t pm_syscalls; /* #syscalls */ 569 counter_u64_t pm_syscall_errors; /* #syscalls with errors */ 570 counter_u64_t pm_buffer_requests; /* #buffer requests */ 571 counter_u64_t pm_buffer_requests_failed; /* #failed buffer requests */ 572 counter_u64_t pm_log_sweeps; /* #sample buffer processing 573 passes */ 574 counter_u64_t pm_merges; /* merged k+u */ 575 counter_u64_t pm_overwrites; /* UR overwrites */ 576 }; 577 #endif 578 579 struct pmc_op_getdriverstats { 580 unsigned int pm_intr_ignored; /* #interrupts ignored */ 581 unsigned int pm_intr_processed; /* #interrupts processed */ 582 unsigned int pm_intr_bufferfull; /* #interrupts with ENOSPC */ 583 unsigned int pm_syscalls; /* #syscalls */ 584 unsigned int pm_syscall_errors; /* #syscalls with errors */ 585 unsigned int pm_buffer_requests; /* #buffer requests */ 586 unsigned int pm_buffer_requests_failed; /* #failed buffer requests */ 587 unsigned int pm_log_sweeps; /* #sample buffer processing 588 passes */ 589 }; 590 591 /* 592 * OP RELEASE / OP START / OP STOP 593 * 594 * Simple operations on a PMC id. 595 */ 596 597 struct pmc_op_simple { 598 pmc_id_t pm_pmcid; 599 }; 600 601 /* 602 * OP WRITELOG 603 * 604 * Flush the current log buffer and write 4 bytes of user data to it. 605 */ 606 607 struct pmc_op_writelog { 608 uint32_t pm_userdata; 609 }; 610 611 /* 612 * OP GETMSR 613 * 614 * Retrieve the machine specific address associated with the allocated 615 * PMC. This number can be used subsequently with a read-performance-counter 616 * instruction. 617 */ 618 619 struct pmc_op_getmsr { 620 uint32_t pm_msr; /* machine specific address */ 621 pmc_id_t pm_pmcid; /* allocated pmc id */ 622 }; 623 624 /* 625 * OP GETDYNEVENTINFO 626 * 627 * Retrieve a PMC dynamic class events list. 628 */ 629 630 struct pmc_dyn_event_descr { 631 char pm_ev_name[PMC_NAME_MAX]; 632 enum pmc_event pm_ev_code; 633 }; 634 635 struct pmc_op_getdyneventinfo { 636 enum pmc_class pm_class; 637 unsigned int pm_nevent; 638 struct pmc_dyn_event_descr pm_events[PMC_EV_DYN_COUNT]; 639 }; 640 641 #ifdef _KERNEL 642 643 #include <sys/malloc.h> 644 #include <sys/sysctl.h> 645 #include <sys/_cpuset.h> 646 647 #include <machine/frame.h> 648 649 #define PMC_HASH_SIZE 1024 650 #define PMC_MTXPOOL_SIZE 2048 651 #define PMC_LOG_BUFFER_SIZE 256 652 #define PMC_NLOGBUFFERS_PCPU 32 653 #define PMC_NSAMPLES 256 654 #define PMC_CALLCHAIN_DEPTH 128 655 #define PMC_THREADLIST_MAX 128 656 657 #define PMC_SYSCTL_NAME_PREFIX "kern." PMC_MODULE_NAME "." 658 659 /* 660 * Locking keys 661 * 662 * (b) - pmc_bufferlist_mtx (spin lock) 663 * (k) - pmc_kthread_mtx (sleep lock) 664 * (o) - po->po_mtx (spin lock) 665 * (g) - global_epoch_preempt (epoch) 666 * (p) - pmc_sx (sx) 667 */ 668 669 /* 670 * PMC commands 671 */ 672 673 struct pmc_syscall_args { 674 register_t pmop_code; /* one of PMC_OP_* */ 675 void *pmop_data; /* syscall parameter */ 676 }; 677 678 /* 679 * Interface to processor specific s1tuff 680 */ 681 682 /* 683 * struct pmc_descr 684 * 685 * Machine independent (i.e., the common parts) of a human readable 686 * PMC description. 687 */ 688 689 struct pmc_descr { 690 char pd_name[PMC_NAME_MAX]; /* name */ 691 uint32_t pd_caps; /* capabilities */ 692 enum pmc_class pd_class; /* class of the PMC */ 693 uint32_t pd_width; /* width in bits */ 694 }; 695 696 /* 697 * struct pmc_target 698 * 699 * This structure records all the target processes associated with a 700 * PMC. 701 */ 702 703 struct pmc_target { 704 LIST_ENTRY(pmc_target) pt_next; 705 struct pmc_process *pt_process; /* target descriptor */ 706 }; 707 708 /* 709 * struct pmc 710 * 711 * Describes each allocated PMC. 712 * 713 * Each PMC has precisely one owner, namely the process that allocated 714 * the PMC. 715 * 716 * A PMC may be attached to multiple target processes. The 717 * 'pm_targets' field links all the target processes being monitored 718 * by this PMC. 719 * 720 * The 'pm_savedvalue' field is protected by a mutex. 721 * 722 * On a multi-cpu machine, multiple target threads associated with a 723 * process-virtual PMC could be concurrently executing on different 724 * CPUs. The 'pm_runcount' field is atomically incremented every time 725 * the PMC gets scheduled on a CPU and atomically decremented when it 726 * get descheduled. Deletion of a PMC is only permitted when this 727 * field is '0'. 728 * 729 */ 730 struct pmc_pcpu_state { 731 uint32_t pps_overflowcnt; /* count overflow interrupts */ 732 uint8_t pps_stalled; 733 uint8_t pps_cpustate; 734 } __aligned(CACHE_LINE_SIZE); 735 struct pmc { 736 LIST_HEAD(,pmc_target) pm_targets; /* list of target processes */ 737 LIST_ENTRY(pmc) pm_next; /* owner's list */ 738 739 /* 740 * System-wide PMCs are allocated on a CPU and are not moved 741 * around. For system-wide PMCs we record the CPU the PMC was 742 * allocated on in the 'CPU' field of the pmc ID. 743 * 744 * Virtual PMCs run on whichever CPU is currently executing 745 * their targets' threads. For these PMCs we need to save 746 * their current PMC counter values when they are taken off 747 * CPU. 748 */ 749 750 union { 751 pmc_value_t pm_savedvalue; /* Virtual PMCS */ 752 } pm_gv; 753 754 /* 755 * For sampling mode PMCs, we keep track of the PMC's "reload 756 * count", which is the counter value to be loaded in when 757 * arming the PMC for the next counting session. For counting 758 * modes on PMCs that are read-only (e.g., the x86 TSC), we 759 * keep track of the initial value at the start of 760 * counting-mode operation. 761 */ 762 763 union { 764 pmc_value_t pm_reloadcount; /* sampling PMC modes */ 765 pmc_value_t pm_initial; /* counting PMC modes */ 766 } pm_sc; 767 768 struct pmc_pcpu_state *pm_pcpu_state; 769 volatile cpuset_t pm_cpustate; /* CPUs where PMC should be active */ 770 uint32_t pm_caps; /* PMC capabilities */ 771 enum pmc_event pm_event; /* event being measured */ 772 uint32_t pm_flags; /* additional flags PMC_F_... */ 773 struct pmc_owner *pm_owner; /* owner thread state */ 774 counter_u64_t pm_runcount; /* #cpus currently on */ 775 enum pmc_state pm_state; /* current PMC state */ 776 777 /* 778 * The PMC ID field encodes the row-index for the PMC, its 779 * mode, class and the CPU# associated with the PMC. 780 */ 781 782 pmc_id_t pm_id; /* allocated PMC id */ 783 enum pmc_class pm_class; 784 785 /* md extensions */ 786 union pmc_md_pmc pm_md; 787 }; 788 789 /* 790 * Accessor macros for 'struct pmc' 791 */ 792 793 #define PMC_TO_MODE(P) PMC_ID_TO_MODE((P)->pm_id) 794 #define PMC_TO_CLASS(P) PMC_ID_TO_CLASS((P)->pm_id) 795 #define PMC_TO_ROWINDEX(P) PMC_ID_TO_ROWINDEX((P)->pm_id) 796 #define PMC_TO_CPU(P) PMC_ID_TO_CPU((P)->pm_id) 797 798 /* 799 * struct pmc_threadpmcstate 800 * 801 * Record per-PMC, per-thread state. 802 */ 803 struct pmc_threadpmcstate { 804 pmc_value_t pt_pmcval; /* per-thread reload count */ 805 }; 806 807 /* 808 * struct pmc_thread 809 * 810 * Record a 'target' thread being profiled. 811 */ 812 struct pmc_thread { 813 LIST_ENTRY(pmc_thread) pt_next; /* linked list */ 814 struct thread *pt_td; /* target thread */ 815 struct pmc_threadpmcstate pt_pmcs[]; /* per-PMC state */ 816 }; 817 818 /* 819 * struct pmc_process 820 * 821 * Record a 'target' process being profiled. 822 * 823 * The target process being profiled could be different from the owner 824 * process which allocated the PMCs. Each target process descriptor 825 * is associated with NHWPMC 'struct pmc *' pointers. Each PMC at a 826 * given hardware row-index 'n' will use slot 'n' of the 'pp_pmcs[]' 827 * array. The size of this structure is thus PMC architecture 828 * dependent. 829 * 830 */ 831 832 struct pmc_targetstate { 833 struct pmc *pp_pmc; /* target PMC */ 834 pmc_value_t pp_pmcval; /* per-process value */ 835 }; 836 837 struct pmc_process { 838 LIST_ENTRY(pmc_process) pp_next; /* hash chain */ 839 LIST_HEAD(,pmc_thread) pp_tds; /* list of threads */ 840 struct mtx *pp_tdslock; /* lock on pp_tds thread list */ 841 int pp_refcnt; /* reference count */ 842 uint32_t pp_flags; /* flags PMC_PP_* */ 843 struct proc *pp_proc; /* target process */ 844 struct pmc_targetstate pp_pmcs[]; /* NHWPMCs */ 845 }; 846 847 #define PMC_PP_ENABLE_MSR_ACCESS 0x00000001 848 849 /* 850 * struct pmc_owner 851 * 852 * We associate a PMC with an 'owner' process. 853 * 854 * A process can be associated with 0..NCPUS*NHWPMC PMCs during its 855 * lifetime, where NCPUS is the numbers of CPUS in the system and 856 * NHWPMC is the number of hardware PMCs per CPU. These are 857 * maintained in the list headed by the 'po_pmcs' to save on space. 858 * 859 */ 860 861 struct pmc_owner { 862 LIST_ENTRY(pmc_owner) po_next; /* hash chain */ 863 CK_LIST_ENTRY(pmc_owner) po_ssnext; /* (g/p) list of SS PMC owners */ 864 LIST_HEAD(, pmc) po_pmcs; /* owned PMC list */ 865 TAILQ_HEAD(, pmclog_buffer) po_logbuffers; /* (o) logbuffer list */ 866 struct mtx po_mtx; /* spin lock for (o) */ 867 struct proc *po_owner; /* owner proc */ 868 uint32_t po_flags; /* (k) flags PMC_PO_* */ 869 struct proc *po_kthread; /* (k) helper kthread */ 870 struct file *po_file; /* file reference */ 871 int po_error; /* recorded error */ 872 short po_sscount; /* # SS PMCs owned */ 873 short po_logprocmaps; /* global mappings done */ 874 struct pmclog_buffer *po_curbuf[MAXCPU]; /* current log buffer */ 875 }; 876 877 #define PMC_PO_OWNS_LOGFILE 0x00000001 /* has a log file */ 878 #define PMC_PO_SHUTDOWN 0x00000010 /* in the process of shutdown */ 879 #define PMC_PO_INITIAL_MAPPINGS_DONE 0x00000020 880 881 /* 882 * struct pmc_hw -- describe the state of the PMC hardware 883 * 884 * When in use, a HW PMC is associated with one allocated 'struct pmc' 885 * pointed to by field 'phw_pmc'. When inactive, this field is NULL. 886 * 887 * On an SMP box, one or more HW PMC's in process virtual mode with 888 * the same 'phw_pmc' could be executing on different CPUs. In order 889 * to handle this case correctly, we need to ensure that only 890 * incremental counts get added to the saved value in the associated 891 * 'struct pmc'. The 'phw_save' field is used to keep the saved PMC 892 * value at the time the hardware is started during this context 893 * switch (i.e., the difference between the new (hardware) count and 894 * the saved count is atomically added to the count field in 'struct 895 * pmc' at context switch time). 896 * 897 */ 898 899 struct pmc_hw { 900 uint32_t phw_state; /* see PHW_* macros below */ 901 struct pmc *phw_pmc; /* current thread PMC */ 902 }; 903 904 #define PMC_PHW_RI_MASK 0x000000FF 905 #define PMC_PHW_CPU_SHIFT 8 906 #define PMC_PHW_CPU_MASK 0x0000FF00 907 #define PMC_PHW_FLAGS_SHIFT 16 908 #define PMC_PHW_FLAGS_MASK 0xFFFF0000 909 910 #define PMC_PHW_INDEX_TO_STATE(ri) ((ri) & PMC_PHW_RI_MASK) 911 #define PMC_PHW_STATE_TO_INDEX(state) ((state) & PMC_PHW_RI_MASK) 912 #define PMC_PHW_CPU_TO_STATE(cpu) (((cpu) << PMC_PHW_CPU_SHIFT) & \ 913 PMC_PHW_CPU_MASK) 914 #define PMC_PHW_STATE_TO_CPU(state) (((state) & PMC_PHW_CPU_MASK) >> \ 915 PMC_PHW_CPU_SHIFT) 916 #define PMC_PHW_FLAGS_TO_STATE(flags) (((flags) << PMC_PHW_FLAGS_SHIFT) & \ 917 PMC_PHW_FLAGS_MASK) 918 #define PMC_PHW_STATE_TO_FLAGS(state) (((state) & PMC_PHW_FLAGS_MASK) >> \ 919 PMC_PHW_FLAGS_SHIFT) 920 #define PMC_PHW_FLAG_IS_ENABLED (PMC_PHW_FLAGS_TO_STATE(0x01)) 921 #define PMC_PHW_FLAG_IS_SHAREABLE (PMC_PHW_FLAGS_TO_STATE(0x02)) 922 923 /* 924 * struct pmc_sample 925 * 926 * Space for N (tunable) PC samples and associated control data. 927 */ 928 929 struct pmc_sample { 930 uint16_t ps_nsamples; /* callchain depth */ 931 uint16_t ps_nsamples_actual; 932 uint16_t ps_cpu; /* cpu number */ 933 uint16_t ps_flags; /* other flags */ 934 lwpid_t ps_tid; /* thread id */ 935 pid_t ps_pid; /* process PID or -1 */ 936 int ps_ticks; /* ticks at sample time */ 937 /* pad */ 938 struct thread *ps_td; /* which thread */ 939 struct pmc *ps_pmc; /* interrupting PMC */ 940 uintptr_t *ps_pc; /* (const) callchain start */ 941 uint64_t ps_tsc; /* tsc value */ 942 }; 943 944 #define PMC_SAMPLE_FREE ((uint16_t) 0) 945 #define PMC_USER_CALLCHAIN_PENDING ((uint16_t) 0xFFFF) 946 947 struct pmc_samplebuffer { 948 volatile uint64_t ps_prodidx; /* producer index */ 949 volatile uint64_t ps_considx; /* consumer index */ 950 uintptr_t *ps_callchains; /* all saved call chains */ 951 struct pmc_sample ps_samples[]; /* array of sample entries */ 952 }; 953 954 #define PMC_CONS_SAMPLE(psb) \ 955 (&(psb)->ps_samples[(psb)->ps_considx & pmc_sample_mask]) 956 957 #define PMC_CONS_SAMPLE_OFF(psb, off) \ 958 (&(psb)->ps_samples[(off) & pmc_sample_mask]) 959 960 #define PMC_PROD_SAMPLE(psb) \ 961 (&(psb)->ps_samples[(psb)->ps_prodidx & pmc_sample_mask]) 962 963 /* 964 * struct pmc_cpustate 965 * 966 * A CPU is modelled as a collection of HW PMCs with space for additional 967 * flags. 968 */ 969 970 struct pmc_cpu { 971 uint32_t pc_state; /* physical cpu number + flags */ 972 struct pmc_samplebuffer *pc_sb[3]; /* space for samples */ 973 struct pmc_hw *pc_hwpmcs[]; /* 'npmc' pointers */ 974 }; 975 976 #define PMC_PCPU_CPU_MASK 0x000000FF 977 #define PMC_PCPU_FLAGS_MASK 0xFFFFFF00 978 #define PMC_PCPU_FLAGS_SHIFT 8 979 #define PMC_PCPU_STATE_TO_CPU(S) ((S) & PMC_PCPU_CPU_MASK) 980 #define PMC_PCPU_STATE_TO_FLAGS(S) (((S) & PMC_PCPU_FLAGS_MASK) >> PMC_PCPU_FLAGS_SHIFT) 981 #define PMC_PCPU_FLAGS_TO_STATE(F) (((F) << PMC_PCPU_FLAGS_SHIFT) & PMC_PCPU_FLAGS_MASK) 982 #define PMC_PCPU_CPU_TO_STATE(C) ((C) & PMC_PCPU_CPU_MASK) 983 #define PMC_PCPU_FLAG_HTT (PMC_PCPU_FLAGS_TO_STATE(0x1)) 984 985 /* 986 * struct pmc_binding 987 * 988 * CPU binding information. 989 */ 990 991 struct pmc_binding { 992 int pb_bound; /* is bound? */ 993 int pb_cpu; /* if so, to which CPU */ 994 u_char pb_priority; /* Thread active priority. */ 995 }; 996 997 struct pmc_mdep; 998 999 /* 1000 * struct pmc_classdep 1001 * 1002 * PMC class-dependent operations. 1003 */ 1004 struct pmc_classdep { 1005 uint32_t pcd_caps; /* class capabilities */ 1006 enum pmc_class pcd_class; /* class id */ 1007 int pcd_num; /* number of PMCs */ 1008 int pcd_ri; /* row index of the first PMC in class */ 1009 int pcd_width; /* width of the PMC */ 1010 1011 /* configuring/reading/writing the hardware PMCs */ 1012 int (*pcd_config_pmc)(int _cpu, int _ri, struct pmc *_pm); 1013 int (*pcd_get_config)(int _cpu, int _ri, struct pmc **_ppm); 1014 int (*pcd_read_pmc)(int _cpu, int _ri, struct pmc *_pm, 1015 pmc_value_t *_value); 1016 int (*pcd_write_pmc)(int _cpu, int _ri, struct pmc *_pm, 1017 pmc_value_t _value); 1018 1019 /* pmc allocation/release */ 1020 int (*pcd_allocate_pmc)(int _cpu, int _ri, struct pmc *_t, 1021 const struct pmc_op_pmcallocate *_a); 1022 int (*pcd_release_pmc)(int _cpu, int _ri, struct pmc *_pm); 1023 1024 /* starting and stopping PMCs */ 1025 int (*pcd_start_pmc)(int _cpu, int _ri, struct pmc *_pm); 1026 int (*pcd_stop_pmc)(int _cpu, int _ri, struct pmc *_pm); 1027 1028 /* description */ 1029 int (*pcd_describe)(int _cpu, int _ri, struct pmc_info *_pi, 1030 struct pmc **_ppmc); 1031 1032 /* class-dependent initialization & finalization */ 1033 int (*pcd_pcpu_init)(struct pmc_mdep *_md, int _cpu); 1034 int (*pcd_pcpu_fini)(struct pmc_mdep *_md, int _cpu); 1035 1036 /* machine-specific interface */ 1037 int (*pcd_get_msr)(int _ri, uint32_t *_msr); 1038 }; 1039 1040 /* 1041 * struct pmc_mdep 1042 * 1043 * Machine dependent bits needed per CPU type. 1044 */ 1045 1046 struct pmc_mdep { 1047 uint32_t pmd_cputype; /* from enum pmc_cputype */ 1048 uint32_t pmd_npmc; /* number of PMCs per CPU */ 1049 uint32_t pmd_nclass; /* number of PMC classes present */ 1050 1051 /* 1052 * Machine dependent methods. 1053 */ 1054 1055 /* thread context switch in/out */ 1056 int (*pmd_switch_in)(struct pmc_cpu *_p, struct pmc_process *_pp); 1057 int (*pmd_switch_out)(struct pmc_cpu *_p, struct pmc_process *_pp); 1058 1059 /* handle a PMC interrupt */ 1060 int (*pmd_intr)(struct trapframe *_tf); 1061 1062 /* 1063 * PMC class dependent information. 1064 */ 1065 struct pmc_classdep pmd_classdep[]; 1066 }; 1067 1068 /* 1069 * Per-CPU state. This is an array of 'mp_ncpu' pointers 1070 * to struct pmc_cpu descriptors. 1071 */ 1072 1073 extern struct pmc_cpu **pmc_pcpu; 1074 1075 /* driver statistics */ 1076 extern struct pmc_driverstats pmc_stats; 1077 1078 #if defined(HWPMC_DEBUG) 1079 1080 /* HWPMC_DEBUG without KTR will compile but is a no-op. */ 1081 #if !defined(KTR) || !defined(KTR_COMPILE) || ((KTR_COMPILE & KTR_SUBSYS) == 0) 1082 #error "HWPMC_DEBUG requires KTR and KTR_COMPILE=KTR_SUBSYS -- see ktr(4)" 1083 #endif 1084 1085 #include <sys/ktr.h> 1086 1087 #define __pmcdbg_used /* unused variable annotation */ 1088 1089 /* 1090 * Debug flags, major flag groups. 1091 * 1092 * Please keep the DEBUGGING section of the hwpmc(4) man page in sync. 1093 */ 1094 struct pmc_debugflags { 1095 int pdb_CPU; 1096 int pdb_CSW; 1097 int pdb_LOG; 1098 int pdb_MDP; 1099 int pdb_MOD; 1100 int pdb_OWN; 1101 int pdb_PMC; 1102 int pdb_PRC; 1103 int pdb_SAM; 1104 }; 1105 1106 extern struct pmc_debugflags pmc_debugflags; 1107 1108 #define KTR_PMC KTR_SUBSYS 1109 1110 #define PMC_DEBUG_STRSIZE 128 1111 #define PMC_DEBUG_DEFAULT_FLAGS { 0, 0, 0, 0, 0, 0, 0, 0, 0 } 1112 1113 #define PMCDBG0(M, N, L, F) do { \ 1114 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1115 CTR0(KTR_PMC, #M ":" #N ":" #L ": " F); \ 1116 } while (0) 1117 #define PMCDBG1(M, N, L, F, p1) do { \ 1118 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1119 CTR1(KTR_PMC, #M ":" #N ":" #L ": " F, p1); \ 1120 } while (0) 1121 #define PMCDBG2(M, N, L, F, p1, p2) do { \ 1122 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1123 CTR2(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2); \ 1124 } while (0) 1125 #define PMCDBG3(M, N, L, F, p1, p2, p3) do { \ 1126 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1127 CTR3(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3); \ 1128 } while (0) 1129 #define PMCDBG4(M, N, L, F, p1, p2, p3, p4) do { \ 1130 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1131 CTR4(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4);\ 1132 } while (0) 1133 #define PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) do { \ 1134 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1135 CTR5(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4, \ 1136 p5); \ 1137 } while (0) 1138 #define PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) do { \ 1139 if (pmc_debugflags.pdb_ ## M & (1 << PMC_DEBUG_MIN_ ## N)) \ 1140 CTR6(KTR_PMC, #M ":" #N ":" #L ": " F, p1, p2, p3, p4, \ 1141 p5, p6); \ 1142 } while (0) 1143 1144 /* Major numbers */ 1145 #define PMC_DEBUG_MAJ_CPU 0 /* cpu switches */ 1146 #define PMC_DEBUG_MAJ_CSW 1 /* context switches */ 1147 #define PMC_DEBUG_MAJ_LOG 2 /* logging */ 1148 #define PMC_DEBUG_MAJ_MDP 3 /* machine dependent */ 1149 #define PMC_DEBUG_MAJ_MOD 4 /* misc module infrastructure */ 1150 #define PMC_DEBUG_MAJ_OWN 5 /* owner */ 1151 #define PMC_DEBUG_MAJ_PMC 6 /* pmc management */ 1152 #define PMC_DEBUG_MAJ_PRC 7 /* processes */ 1153 #define PMC_DEBUG_MAJ_SAM 8 /* sampling */ 1154 1155 /* Minor numbers */ 1156 1157 /* Common (8 bits) */ 1158 #define PMC_DEBUG_MIN_ALL 0 /* allocation */ 1159 #define PMC_DEBUG_MIN_REL 1 /* release */ 1160 #define PMC_DEBUG_MIN_OPS 2 /* ops: start, stop, ... */ 1161 #define PMC_DEBUG_MIN_INI 3 /* init */ 1162 #define PMC_DEBUG_MIN_FND 4 /* find */ 1163 1164 /* MODULE */ 1165 #define PMC_DEBUG_MIN_PMH 14 /* pmc_hook */ 1166 #define PMC_DEBUG_MIN_PMS 15 /* pmc_syscall */ 1167 1168 /* OWN */ 1169 #define PMC_DEBUG_MIN_ORM 8 /* owner remove */ 1170 #define PMC_DEBUG_MIN_OMR 9 /* owner maybe remove */ 1171 1172 /* PROCESSES */ 1173 #define PMC_DEBUG_MIN_TLK 8 /* link target */ 1174 #define PMC_DEBUG_MIN_TUL 9 /* unlink target */ 1175 #define PMC_DEBUG_MIN_EXT 10 /* process exit */ 1176 #define PMC_DEBUG_MIN_EXC 11 /* process exec */ 1177 #define PMC_DEBUG_MIN_FRK 12 /* process fork */ 1178 #define PMC_DEBUG_MIN_ATT 13 /* attach/detach */ 1179 #define PMC_DEBUG_MIN_SIG 14 /* signalling */ 1180 1181 /* CONTEXT SWITCHES */ 1182 #define PMC_DEBUG_MIN_SWI 8 /* switch in */ 1183 #define PMC_DEBUG_MIN_SWO 9 /* switch out */ 1184 1185 /* PMC */ 1186 #define PMC_DEBUG_MIN_REG 8 /* pmc register */ 1187 #define PMC_DEBUG_MIN_ALR 9 /* allocate row */ 1188 1189 /* MACHINE DEPENDENT LAYER */ 1190 #define PMC_DEBUG_MIN_REA 8 /* read */ 1191 #define PMC_DEBUG_MIN_WRI 9 /* write */ 1192 #define PMC_DEBUG_MIN_CFG 10 /* config */ 1193 #define PMC_DEBUG_MIN_STA 11 /* start */ 1194 #define PMC_DEBUG_MIN_STO 12 /* stop */ 1195 #define PMC_DEBUG_MIN_INT 13 /* interrupts */ 1196 1197 /* CPU */ 1198 #define PMC_DEBUG_MIN_BND 8 /* bind */ 1199 #define PMC_DEBUG_MIN_SEL 9 /* select */ 1200 1201 /* LOG */ 1202 #define PMC_DEBUG_MIN_GTB 8 /* get buf */ 1203 #define PMC_DEBUG_MIN_SIO 9 /* schedule i/o */ 1204 #define PMC_DEBUG_MIN_FLS 10 /* flush */ 1205 #define PMC_DEBUG_MIN_SAM 11 /* sample */ 1206 #define PMC_DEBUG_MIN_CLO 12 /* close */ 1207 1208 #else 1209 #define __pmcdbg_used __unused 1210 #define PMCDBG0(M, N, L, F) /* nothing */ 1211 #define PMCDBG1(M, N, L, F, p1) 1212 #define PMCDBG2(M, N, L, F, p1, p2) 1213 #define PMCDBG3(M, N, L, F, p1, p2, p3) 1214 #define PMCDBG4(M, N, L, F, p1, p2, p3, p4) 1215 #define PMCDBG5(M, N, L, F, p1, p2, p3, p4, p5) 1216 #define PMCDBG6(M, N, L, F, p1, p2, p3, p4, p5, p6) 1217 #endif 1218 1219 /* declare a dedicated memory pool */ 1220 MALLOC_DECLARE(M_PMC); 1221 1222 /* 1223 * Functions 1224 */ 1225 1226 struct pmc_mdep *pmc_md_initialize(void); /* MD init function */ 1227 void pmc_md_finalize(struct pmc_mdep *_md); /* MD fini function */ 1228 int pmc_getrowdisp(int _ri); 1229 int pmc_process_interrupt(int _ring, struct pmc *_pm, struct trapframe *_tf); 1230 int pmc_save_kernel_callchain(uintptr_t *_cc, int _maxsamples, 1231 struct trapframe *_tf); 1232 int pmc_save_user_callchain(uintptr_t *_cc, int _maxsamples, 1233 struct trapframe *_tf); 1234 void pmc_restore_cpu_binding(struct pmc_binding *pb); 1235 void pmc_save_cpu_binding(struct pmc_binding *pb); 1236 void pmc_select_cpu(int cpu); 1237 struct pmc_mdep *pmc_mdep_alloc(int nclasses); 1238 void pmc_mdep_free(struct pmc_mdep *md); 1239 uint64_t pmc_rdtsc(void); 1240 #endif /* _KERNEL */ 1241 #endif /* _SYS_PMC_H_ */ 1242