1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Per core/cpu state 4 * 5 * Used to coordinate shared registers between HT threads or 6 * among events on a single PMU. 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/stddef.h> 12 #include <linux/types.h> 13 #include <linux/init.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/nmi.h> 17 #include <linux/kvm_host.h> 18 19 #include <asm/cpufeature.h> 20 #include <asm/debugreg.h> 21 #include <asm/hardirq.h> 22 #include <asm/intel-family.h> 23 #include <asm/intel_pt.h> 24 #include <asm/apic.h> 25 #include <asm/cpu_device_id.h> 26 #include <asm/msr.h> 27 28 #include "../perf_event.h" 29 30 /* 31 * Intel PerfMon, used on Core and later. 32 */ 33 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = 34 { 35 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, 36 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 37 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, 38 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, 39 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, 40 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 41 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, 42 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */ 43 }; 44 45 static struct event_constraint intel_core_event_constraints[] __read_mostly = 46 { 47 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ 48 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 49 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ 50 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ 51 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ 52 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */ 53 EVENT_CONSTRAINT_END 54 }; 55 56 static struct event_constraint intel_core2_event_constraints[] __read_mostly = 57 { 58 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 59 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 60 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 61 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ 62 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ 63 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 64 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ 65 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ 66 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ 67 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ 68 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ 69 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */ 70 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ 71 EVENT_CONSTRAINT_END 72 }; 73 74 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = 75 { 76 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 77 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 78 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 79 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ 80 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ 81 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ 82 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ 83 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */ 84 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ 85 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ 86 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ 87 EVENT_CONSTRAINT_END 88 }; 89 90 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = 91 { 92 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 93 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 94 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), 95 EVENT_EXTRA_END 96 }; 97 98 static struct event_constraint intel_westmere_event_constraints[] __read_mostly = 99 { 100 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 101 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 102 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 103 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ 104 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ 105 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ 106 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */ 107 EVENT_CONSTRAINT_END 108 }; 109 110 static struct event_constraint intel_snb_event_constraints[] __read_mostly = 111 { 112 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 113 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 114 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 115 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ 116 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ 117 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 118 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 119 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ 120 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 121 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 122 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ 123 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 124 125 /* 126 * When HT is off these events can only run on the bottom 4 counters 127 * When HT is on, they are impacted by the HT bug and require EXCL access 128 */ 129 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 130 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 131 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 132 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 133 134 EVENT_CONSTRAINT_END 135 }; 136 137 static struct event_constraint intel_ivb_event_constraints[] __read_mostly = 138 { 139 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 140 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 141 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 142 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */ 143 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */ 144 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */ 145 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */ 146 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ 147 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ 148 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */ 149 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 150 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 151 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 152 153 /* 154 * When HT is off these events can only run on the bottom 4 counters 155 * When HT is on, they are impacted by the HT bug and require EXCL access 156 */ 157 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 158 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 159 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 160 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 161 162 EVENT_CONSTRAINT_END 163 }; 164 165 static struct extra_reg intel_westmere_extra_regs[] __read_mostly = 166 { 167 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 168 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 169 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), 170 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), 171 EVENT_EXTRA_END 172 }; 173 174 static struct event_constraint intel_v1_event_constraints[] __read_mostly = 175 { 176 EVENT_CONSTRAINT_END 177 }; 178 179 static struct event_constraint intel_gen_event_constraints[] __read_mostly = 180 { 181 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 182 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 183 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 184 EVENT_CONSTRAINT_END 185 }; 186 187 static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly = 188 { 189 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 190 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 191 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 192 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ 193 FIXED_EVENT_CONSTRAINT(0x0500, 4), 194 FIXED_EVENT_CONSTRAINT(0x0600, 5), 195 FIXED_EVENT_CONSTRAINT(0x0700, 6), 196 FIXED_EVENT_CONSTRAINT(0x0800, 7), 197 FIXED_EVENT_CONSTRAINT(0x0900, 8), 198 FIXED_EVENT_CONSTRAINT(0x0a00, 9), 199 FIXED_EVENT_CONSTRAINT(0x0b00, 10), 200 FIXED_EVENT_CONSTRAINT(0x0c00, 11), 201 FIXED_EVENT_CONSTRAINT(0x0d00, 12), 202 FIXED_EVENT_CONSTRAINT(0x0e00, 13), 203 FIXED_EVENT_CONSTRAINT(0x0f00, 14), 204 FIXED_EVENT_CONSTRAINT(0x1000, 15), 205 EVENT_CONSTRAINT_END 206 }; 207 208 static struct event_constraint intel_slm_event_constraints[] __read_mostly = 209 { 210 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 211 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 212 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ 213 EVENT_CONSTRAINT_END 214 }; 215 216 static struct event_constraint intel_grt_event_constraints[] __read_mostly = { 217 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 218 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 219 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ 220 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */ 221 EVENT_CONSTRAINT_END 222 }; 223 224 static struct event_constraint intel_skt_event_constraints[] __read_mostly = { 225 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 226 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 227 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ 228 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */ 229 FIXED_EVENT_CONSTRAINT(0x0073, 4), /* TOPDOWN_BAD_SPECULATION.ALL */ 230 FIXED_EVENT_CONSTRAINT(0x019c, 5), /* TOPDOWN_FE_BOUND.ALL */ 231 FIXED_EVENT_CONSTRAINT(0x02c2, 6), /* TOPDOWN_RETIRING.ALL */ 232 EVENT_CONSTRAINT_END 233 }; 234 235 static struct event_constraint intel_arw_event_constraints[] __read_mostly = { 236 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 237 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 238 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ 239 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */ 240 FIXED_EVENT_CONSTRAINT(0x0073, 4), /* TOPDOWN_BAD_SPECULATION.ALL */ 241 FIXED_EVENT_CONSTRAINT(0x019c, 5), /* TOPDOWN_FE_BOUND.ALL */ 242 FIXED_EVENT_CONSTRAINT(0x02c2, 6), /* TOPDOWN_RETIRING.ALL */ 243 INTEL_UEVENT_CONSTRAINT(0x01b7, 0x1), 244 INTEL_UEVENT_CONSTRAINT(0x02b7, 0x2), 245 INTEL_UEVENT_CONSTRAINT(0x04b7, 0x4), 246 INTEL_UEVENT_CONSTRAINT(0x08b7, 0x8), 247 INTEL_UEVENT_CONSTRAINT(0x01d4, 0x1), 248 INTEL_UEVENT_CONSTRAINT(0x02d4, 0x2), 249 INTEL_UEVENT_CONSTRAINT(0x04d4, 0x4), 250 INTEL_UEVENT_CONSTRAINT(0x08d4, 0x8), 251 INTEL_UEVENT_CONSTRAINT(0x0175, 0x1), 252 INTEL_UEVENT_CONSTRAINT(0x0275, 0x2), 253 INTEL_UEVENT_CONSTRAINT(0x21d3, 0x1), 254 INTEL_UEVENT_CONSTRAINT(0x22d3, 0x1), 255 EVENT_CONSTRAINT_END 256 }; 257 258 static struct event_constraint intel_skl_event_constraints[] = { 259 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 260 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 261 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 262 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ 263 264 /* 265 * when HT is off, these can only run on the bottom 4 counters 266 */ 267 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ 268 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ 269 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ 270 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */ 271 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */ 272 273 EVENT_CONSTRAINT_END 274 }; 275 276 static struct extra_reg intel_knl_extra_regs[] __read_mostly = { 277 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0), 278 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1), 279 EVENT_EXTRA_END 280 }; 281 282 static struct extra_reg intel_snb_extra_regs[] __read_mostly = { 283 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 284 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), 285 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), 286 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 287 EVENT_EXTRA_END 288 }; 289 290 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { 291 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 292 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), 293 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), 294 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 295 EVENT_EXTRA_END 296 }; 297 298 static struct extra_reg intel_skl_extra_regs[] __read_mostly = { 299 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), 300 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), 301 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 302 /* 303 * Note the low 8 bits eventsel code is not a continuous field, containing 304 * some #GPing bits. These are masked out. 305 */ 306 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), 307 EVENT_EXTRA_END 308 }; 309 310 static struct event_constraint intel_icl_event_constraints[] = { 311 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 312 FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* old INST_RETIRED.PREC_DIST */ 313 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */ 314 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 315 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 316 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ 317 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), 318 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), 319 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), 320 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), 321 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf), 322 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf), 323 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */ 324 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf), 325 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf), 326 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */ 327 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */ 328 INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */ 329 INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */ 330 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf), 331 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf), 332 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf), 333 INTEL_EVENT_CONSTRAINT(0xef, 0xf), 334 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf), 335 EVENT_CONSTRAINT_END 336 }; 337 338 static struct extra_reg intel_icl_extra_regs[] __read_mostly = { 339 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0), 340 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1), 341 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 342 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), 343 EVENT_EXTRA_END 344 }; 345 346 static struct extra_reg intel_glc_extra_regs[] __read_mostly = { 347 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), 348 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), 349 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 350 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), 351 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE), 352 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), 353 EVENT_EXTRA_END 354 }; 355 356 static struct event_constraint intel_glc_event_constraints[] = { 357 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 358 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */ 359 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 360 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 361 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */ 362 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ 363 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), 364 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), 365 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), 366 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), 367 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4), 368 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5), 369 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6), 370 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7), 371 372 INTEL_EVENT_CONSTRAINT(0x2e, 0xff), 373 INTEL_EVENT_CONSTRAINT(0x3c, 0xff), 374 /* 375 * Generally event codes < 0x90 are restricted to counters 0-3. 376 * The 0x2E and 0x3C are exception, which has no restriction. 377 */ 378 INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf), 379 380 INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf), 381 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), 382 INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf), 383 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1), 384 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1), 385 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1), 386 INTEL_EVENT_CONSTRAINT(0xce, 0x1), 387 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf), 388 /* 389 * Generally event codes >= 0x90 are likely to have no restrictions. 390 * The exception are defined as above. 391 */ 392 INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff), 393 394 EVENT_CONSTRAINT_END 395 }; 396 397 static struct extra_reg intel_rwc_extra_regs[] __read_mostly = { 398 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), 399 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), 400 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 401 INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE), 402 INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), 403 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE), 404 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), 405 EVENT_EXTRA_END 406 }; 407 408 static struct event_constraint intel_lnc_event_constraints[] = { 409 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 410 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */ 411 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 412 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 413 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */ 414 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ 415 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), 416 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), 417 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), 418 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), 419 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4), 420 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5), 421 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6), 422 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7), 423 424 INTEL_EVENT_CONSTRAINT(0x20, 0xf), 425 426 INTEL_UEVENT_CONSTRAINT(0x012a, 0xf), 427 INTEL_UEVENT_CONSTRAINT(0x012b, 0xf), 428 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), 429 INTEL_UEVENT_CONSTRAINT(0x0175, 0x4), 430 431 INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff), 432 INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff), 433 434 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), 435 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), 436 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1), 437 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1), 438 INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1), 439 INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8), 440 INTEL_UEVENT_CONSTRAINT(0x01cd, 0x3fc), 441 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3), 442 443 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf), 444 445 INTEL_UEVENT_CONSTRAINT(0x00e0, 0xf), 446 447 EVENT_CONSTRAINT_END 448 }; 449 450 static struct extra_reg intel_lnc_extra_regs[] __read_mostly = { 451 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0xfffffffffffull, RSP_0), 452 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0xfffffffffffull, RSP_1), 453 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 454 INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE), 455 INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), 456 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE), 457 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), 458 EVENT_EXTRA_END 459 }; 460 461 static struct event_constraint intel_pnc_event_constraints[] = { 462 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 463 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */ 464 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 465 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 466 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */ 467 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ 468 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), 469 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), 470 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), 471 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), 472 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4), 473 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5), 474 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6), 475 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7), 476 477 INTEL_EVENT_CONSTRAINT(0x20, 0xf), 478 INTEL_EVENT_CONSTRAINT(0x79, 0xf), 479 480 INTEL_UEVENT_CONSTRAINT(0x0275, 0xf), 481 INTEL_UEVENT_CONSTRAINT(0x0176, 0xf), 482 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1), 483 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1), 484 INTEL_UEVENT_CONSTRAINT(0x01cd, 0xfc), 485 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3), 486 487 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), 488 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), 489 INTEL_EVENT_CONSTRAINT(0xd4, 0xf), 490 INTEL_EVENT_CONSTRAINT(0xd6, 0xf), 491 INTEL_EVENT_CONSTRAINT(0xdf, 0xf), 492 INTEL_EVENT_CONSTRAINT(0xce, 0x1), 493 494 INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8), 495 INTEL_UEVENT_CONSTRAINT(0x0847, 0xf), 496 INTEL_UEVENT_CONSTRAINT(0x0446, 0xf), 497 INTEL_UEVENT_CONSTRAINT(0x0846, 0xf), 498 INTEL_UEVENT_CONSTRAINT(0x0148, 0xf), 499 500 EVENT_CONSTRAINT_END 501 }; 502 503 static struct extra_reg intel_pnc_extra_regs[] __read_mostly = { 504 /* must define OMR_X first, see intel_alt_er() */ 505 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OMR_0, 0x40ffffff0000ffffull, OMR_0), 506 INTEL_UEVENT_EXTRA_REG(0x022a, MSR_OMR_1, 0x40ffffff0000ffffull, OMR_1), 507 INTEL_UEVENT_EXTRA_REG(0x042a, MSR_OMR_2, 0x40ffffff0000ffffull, OMR_2), 508 INTEL_UEVENT_EXTRA_REG(0x082a, MSR_OMR_3, 0x40ffffff0000ffffull, OMR_3), 509 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 510 INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE), 511 INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), 512 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE), 513 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), 514 EVENT_EXTRA_END 515 }; 516 517 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3"); 518 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3"); 519 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2"); 520 521 static struct attribute *nhm_mem_events_attrs[] = { 522 EVENT_PTR(mem_ld_nhm), 523 NULL, 524 }; 525 526 /* 527 * topdown events for Intel Core CPUs. 528 * 529 * The events are all in slots, which is a free slot in a 4 wide 530 * pipeline. Some events are already reported in slots, for cycle 531 * events we multiply by the pipeline width (4). 532 * 533 * With Hyper Threading on, topdown metrics are either summed or averaged 534 * between the threads of a core: (count_t0 + count_t1). 535 * 536 * For the average case the metric is always scaled to pipeline width, 537 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4) 538 */ 539 540 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots, 541 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */ 542 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */ 543 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2"); 544 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued, 545 "event=0xe,umask=0x1"); /* uops_issued.any */ 546 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired, 547 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */ 548 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles, 549 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */ 550 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles, 551 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */ 552 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */ 553 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale, 554 "4", "2"); 555 556 EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4"); 557 EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80"); 558 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81"); 559 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82"); 560 EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83"); 561 EVENT_ATTR_STR(topdown-heavy-ops, td_heavy_ops, "event=0x00,umask=0x84"); 562 EVENT_ATTR_STR(topdown-br-mispredict, td_br_mispredict, "event=0x00,umask=0x85"); 563 EVENT_ATTR_STR(topdown-fetch-lat, td_fetch_lat, "event=0x00,umask=0x86"); 564 EVENT_ATTR_STR(topdown-mem-bound, td_mem_bound, "event=0x00,umask=0x87"); 565 566 static struct attribute *snb_events_attrs[] = { 567 EVENT_PTR(td_slots_issued), 568 EVENT_PTR(td_slots_retired), 569 EVENT_PTR(td_fetch_bubbles), 570 EVENT_PTR(td_total_slots), 571 EVENT_PTR(td_total_slots_scale), 572 EVENT_PTR(td_recovery_bubbles), 573 EVENT_PTR(td_recovery_bubbles_scale), 574 NULL, 575 }; 576 577 static struct attribute *snb_mem_events_attrs[] = { 578 EVENT_PTR(mem_ld_snb), 579 EVENT_PTR(mem_st_snb), 580 NULL, 581 }; 582 583 static struct event_constraint intel_hsw_event_constraints[] = { 584 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 585 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 586 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 587 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ 588 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 589 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 590 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 591 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), 592 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 593 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), 594 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ 595 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), 596 597 /* 598 * When HT is off these events can only run on the bottom 4 counters 599 * When HT is on, they are impacted by the HT bug and require EXCL access 600 */ 601 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 602 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 603 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 604 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 605 606 EVENT_CONSTRAINT_END 607 }; 608 609 static struct event_constraint intel_bdw_event_constraints[] = { 610 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 611 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 612 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 613 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ 614 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */ 615 /* 616 * when HT is off, these can only run on the bottom 4 counters 617 */ 618 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ 619 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ 620 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ 621 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */ 622 EVENT_CONSTRAINT_END 623 }; 624 625 static u64 intel_pmu_event_map(int hw_event) 626 { 627 return intel_perfmon_event_map[hw_event]; 628 } 629 630 static __initconst const u64 glc_hw_cache_event_ids 631 [PERF_COUNT_HW_CACHE_MAX] 632 [PERF_COUNT_HW_CACHE_OP_MAX] 633 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 634 { 635 [ C(L1D ) ] = { 636 [ C(OP_READ) ] = { 637 [ C(RESULT_ACCESS) ] = 0x81d0, 638 [ C(RESULT_MISS) ] = 0xe124, 639 }, 640 [ C(OP_WRITE) ] = { 641 [ C(RESULT_ACCESS) ] = 0x82d0, 642 }, 643 }, 644 [ C(L1I ) ] = { 645 [ C(OP_READ) ] = { 646 [ C(RESULT_MISS) ] = 0xe424, 647 }, 648 [ C(OP_WRITE) ] = { 649 [ C(RESULT_ACCESS) ] = -1, 650 [ C(RESULT_MISS) ] = -1, 651 }, 652 }, 653 [ C(LL ) ] = { 654 [ C(OP_READ) ] = { 655 [ C(RESULT_ACCESS) ] = 0x12a, 656 [ C(RESULT_MISS) ] = 0x12a, 657 }, 658 [ C(OP_WRITE) ] = { 659 [ C(RESULT_ACCESS) ] = 0x12a, 660 [ C(RESULT_MISS) ] = 0x12a, 661 }, 662 }, 663 [ C(DTLB) ] = { 664 [ C(OP_READ) ] = { 665 [ C(RESULT_ACCESS) ] = 0x81d0, 666 [ C(RESULT_MISS) ] = 0xe12, 667 }, 668 [ C(OP_WRITE) ] = { 669 [ C(RESULT_ACCESS) ] = 0x82d0, 670 [ C(RESULT_MISS) ] = 0xe13, 671 }, 672 }, 673 [ C(ITLB) ] = { 674 [ C(OP_READ) ] = { 675 [ C(RESULT_ACCESS) ] = -1, 676 [ C(RESULT_MISS) ] = 0xe11, 677 }, 678 [ C(OP_WRITE) ] = { 679 [ C(RESULT_ACCESS) ] = -1, 680 [ C(RESULT_MISS) ] = -1, 681 }, 682 [ C(OP_PREFETCH) ] = { 683 [ C(RESULT_ACCESS) ] = -1, 684 [ C(RESULT_MISS) ] = -1, 685 }, 686 }, 687 [ C(BPU ) ] = { 688 [ C(OP_READ) ] = { 689 [ C(RESULT_ACCESS) ] = 0x4c4, 690 [ C(RESULT_MISS) ] = 0x4c5, 691 }, 692 [ C(OP_WRITE) ] = { 693 [ C(RESULT_ACCESS) ] = -1, 694 [ C(RESULT_MISS) ] = -1, 695 }, 696 [ C(OP_PREFETCH) ] = { 697 [ C(RESULT_ACCESS) ] = -1, 698 [ C(RESULT_MISS) ] = -1, 699 }, 700 }, 701 [ C(NODE) ] = { 702 [ C(OP_READ) ] = { 703 [ C(RESULT_ACCESS) ] = 0x12a, 704 [ C(RESULT_MISS) ] = 0x12a, 705 }, 706 }, 707 }; 708 709 static __initconst const u64 glc_hw_cache_extra_regs 710 [PERF_COUNT_HW_CACHE_MAX] 711 [PERF_COUNT_HW_CACHE_OP_MAX] 712 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 713 { 714 [ C(LL ) ] = { 715 [ C(OP_READ) ] = { 716 [ C(RESULT_ACCESS) ] = 0x10001, 717 [ C(RESULT_MISS) ] = 0x3fbfc00001, 718 }, 719 [ C(OP_WRITE) ] = { 720 [ C(RESULT_ACCESS) ] = 0x3f3ffc0002, 721 [ C(RESULT_MISS) ] = 0x3f3fc00002, 722 }, 723 }, 724 [ C(NODE) ] = { 725 [ C(OP_READ) ] = { 726 [ C(RESULT_ACCESS) ] = 0x10c000001, 727 [ C(RESULT_MISS) ] = 0x3fb3000001, 728 }, 729 }, 730 }; 731 732 static __initconst const u64 pnc_hw_cache_event_ids 733 [PERF_COUNT_HW_CACHE_MAX] 734 [PERF_COUNT_HW_CACHE_OP_MAX] 735 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 736 { 737 [ C(L1D ) ] = { 738 [ C(OP_READ) ] = { 739 [ C(RESULT_ACCESS) ] = 0x81d0, 740 [ C(RESULT_MISS) ] = 0xe124, 741 }, 742 [ C(OP_WRITE) ] = { 743 [ C(RESULT_ACCESS) ] = 0x82d0, 744 }, 745 }, 746 [ C(L1I ) ] = { 747 [ C(OP_READ) ] = { 748 [ C(RESULT_MISS) ] = 0xe424, 749 }, 750 [ C(OP_WRITE) ] = { 751 [ C(RESULT_ACCESS) ] = -1, 752 [ C(RESULT_MISS) ] = -1, 753 }, 754 }, 755 [ C(LL ) ] = { 756 [ C(OP_READ) ] = { 757 [ C(RESULT_ACCESS) ] = 0x12a, 758 [ C(RESULT_MISS) ] = 0x12a, 759 }, 760 [ C(OP_WRITE) ] = { 761 [ C(RESULT_ACCESS) ] = 0x12a, 762 [ C(RESULT_MISS) ] = 0x12a, 763 }, 764 }, 765 [ C(DTLB) ] = { 766 [ C(OP_READ) ] = { 767 [ C(RESULT_ACCESS) ] = 0x81d0, 768 [ C(RESULT_MISS) ] = 0xe12, 769 }, 770 [ C(OP_WRITE) ] = { 771 [ C(RESULT_ACCESS) ] = 0x82d0, 772 [ C(RESULT_MISS) ] = 0xe13, 773 }, 774 }, 775 [ C(ITLB) ] = { 776 [ C(OP_READ) ] = { 777 [ C(RESULT_ACCESS) ] = -1, 778 [ C(RESULT_MISS) ] = 0xe11, 779 }, 780 [ C(OP_WRITE) ] = { 781 [ C(RESULT_ACCESS) ] = -1, 782 [ C(RESULT_MISS) ] = -1, 783 }, 784 [ C(OP_PREFETCH) ] = { 785 [ C(RESULT_ACCESS) ] = -1, 786 [ C(RESULT_MISS) ] = -1, 787 }, 788 }, 789 [ C(BPU ) ] = { 790 [ C(OP_READ) ] = { 791 [ C(RESULT_ACCESS) ] = 0x4c4, 792 [ C(RESULT_MISS) ] = 0x4c5, 793 }, 794 [ C(OP_WRITE) ] = { 795 [ C(RESULT_ACCESS) ] = -1, 796 [ C(RESULT_MISS) ] = -1, 797 }, 798 [ C(OP_PREFETCH) ] = { 799 [ C(RESULT_ACCESS) ] = -1, 800 [ C(RESULT_MISS) ] = -1, 801 }, 802 }, 803 [ C(NODE) ] = { 804 [ C(OP_READ) ] = { 805 [ C(RESULT_ACCESS) ] = -1, 806 [ C(RESULT_MISS) ] = -1, 807 }, 808 }, 809 }; 810 811 static __initconst const u64 pnc_hw_cache_extra_regs 812 [PERF_COUNT_HW_CACHE_MAX] 813 [PERF_COUNT_HW_CACHE_OP_MAX] 814 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 815 { 816 [ C(LL ) ] = { 817 [ C(OP_READ) ] = { 818 [ C(RESULT_ACCESS) ] = 0x4000000000000001, 819 [ C(RESULT_MISS) ] = 0xFFFFF000000001, 820 }, 821 [ C(OP_WRITE) ] = { 822 [ C(RESULT_ACCESS) ] = 0x4000000000000002, 823 [ C(RESULT_MISS) ] = 0xFFFFF000000002, 824 }, 825 }, 826 }; 827 828 /* 829 * Notes on the events: 830 * - data reads do not include code reads (comparable to earlier tables) 831 * - data counts include speculative execution (except L1 write, dtlb, bpu) 832 * - remote node access includes remote memory, remote cache, remote mmio. 833 * - prefetches are not included in the counts. 834 * - icache miss does not include decoded icache 835 */ 836 837 #define SKL_DEMAND_DATA_RD BIT_ULL(0) 838 #define SKL_DEMAND_RFO BIT_ULL(1) 839 #define SKL_ANY_RESPONSE BIT_ULL(16) 840 #define SKL_SUPPLIER_NONE BIT_ULL(17) 841 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26) 842 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27) 843 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28) 844 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29) 845 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \ 846 SKL_L3_MISS_REMOTE_HOP0_DRAM| \ 847 SKL_L3_MISS_REMOTE_HOP1_DRAM| \ 848 SKL_L3_MISS_REMOTE_HOP2P_DRAM) 849 #define SKL_SPL_HIT BIT_ULL(30) 850 #define SKL_SNOOP_NONE BIT_ULL(31) 851 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32) 852 #define SKL_SNOOP_MISS BIT_ULL(33) 853 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34) 854 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35) 855 #define SKL_SNOOP_HITM BIT_ULL(36) 856 #define SKL_SNOOP_NON_DRAM BIT_ULL(37) 857 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \ 858 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ 859 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ 860 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM) 861 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD 862 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \ 863 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ 864 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ 865 SKL_SNOOP_HITM|SKL_SPL_HIT) 866 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO 867 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE 868 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \ 869 SKL_L3_MISS_REMOTE_HOP1_DRAM| \ 870 SKL_L3_MISS_REMOTE_HOP2P_DRAM) 871 872 static __initconst const u64 skl_hw_cache_event_ids 873 [PERF_COUNT_HW_CACHE_MAX] 874 [PERF_COUNT_HW_CACHE_OP_MAX] 875 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 876 { 877 [ C(L1D ) ] = { 878 [ C(OP_READ) ] = { 879 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ 880 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ 881 }, 882 [ C(OP_WRITE) ] = { 883 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ 884 [ C(RESULT_MISS) ] = 0x0, 885 }, 886 [ C(OP_PREFETCH) ] = { 887 [ C(RESULT_ACCESS) ] = 0x0, 888 [ C(RESULT_MISS) ] = 0x0, 889 }, 890 }, 891 [ C(L1I ) ] = { 892 [ C(OP_READ) ] = { 893 [ C(RESULT_ACCESS) ] = 0x0, 894 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */ 895 }, 896 [ C(OP_WRITE) ] = { 897 [ C(RESULT_ACCESS) ] = -1, 898 [ C(RESULT_MISS) ] = -1, 899 }, 900 [ C(OP_PREFETCH) ] = { 901 [ C(RESULT_ACCESS) ] = 0x0, 902 [ C(RESULT_MISS) ] = 0x0, 903 }, 904 }, 905 [ C(LL ) ] = { 906 [ C(OP_READ) ] = { 907 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 908 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 909 }, 910 [ C(OP_WRITE) ] = { 911 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 912 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 913 }, 914 [ C(OP_PREFETCH) ] = { 915 [ C(RESULT_ACCESS) ] = 0x0, 916 [ C(RESULT_MISS) ] = 0x0, 917 }, 918 }, 919 [ C(DTLB) ] = { 920 [ C(OP_READ) ] = { 921 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ 922 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ 923 }, 924 [ C(OP_WRITE) ] = { 925 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ 926 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */ 927 }, 928 [ C(OP_PREFETCH) ] = { 929 [ C(RESULT_ACCESS) ] = 0x0, 930 [ C(RESULT_MISS) ] = 0x0, 931 }, 932 }, 933 [ C(ITLB) ] = { 934 [ C(OP_READ) ] = { 935 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */ 936 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */ 937 }, 938 [ C(OP_WRITE) ] = { 939 [ C(RESULT_ACCESS) ] = -1, 940 [ C(RESULT_MISS) ] = -1, 941 }, 942 [ C(OP_PREFETCH) ] = { 943 [ C(RESULT_ACCESS) ] = -1, 944 [ C(RESULT_MISS) ] = -1, 945 }, 946 }, 947 [ C(BPU ) ] = { 948 [ C(OP_READ) ] = { 949 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ 950 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 951 }, 952 [ C(OP_WRITE) ] = { 953 [ C(RESULT_ACCESS) ] = -1, 954 [ C(RESULT_MISS) ] = -1, 955 }, 956 [ C(OP_PREFETCH) ] = { 957 [ C(RESULT_ACCESS) ] = -1, 958 [ C(RESULT_MISS) ] = -1, 959 }, 960 }, 961 [ C(NODE) ] = { 962 [ C(OP_READ) ] = { 963 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 964 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 965 }, 966 [ C(OP_WRITE) ] = { 967 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 968 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 969 }, 970 [ C(OP_PREFETCH) ] = { 971 [ C(RESULT_ACCESS) ] = 0x0, 972 [ C(RESULT_MISS) ] = 0x0, 973 }, 974 }, 975 }; 976 977 static __initconst const u64 skl_hw_cache_extra_regs 978 [PERF_COUNT_HW_CACHE_MAX] 979 [PERF_COUNT_HW_CACHE_OP_MAX] 980 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 981 { 982 [ C(LL ) ] = { 983 [ C(OP_READ) ] = { 984 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| 985 SKL_LLC_ACCESS|SKL_ANY_SNOOP, 986 [ C(RESULT_MISS) ] = SKL_DEMAND_READ| 987 SKL_L3_MISS|SKL_ANY_SNOOP| 988 SKL_SUPPLIER_NONE, 989 }, 990 [ C(OP_WRITE) ] = { 991 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| 992 SKL_LLC_ACCESS|SKL_ANY_SNOOP, 993 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE| 994 SKL_L3_MISS|SKL_ANY_SNOOP| 995 SKL_SUPPLIER_NONE, 996 }, 997 [ C(OP_PREFETCH) ] = { 998 [ C(RESULT_ACCESS) ] = 0x0, 999 [ C(RESULT_MISS) ] = 0x0, 1000 }, 1001 }, 1002 [ C(NODE) ] = { 1003 [ C(OP_READ) ] = { 1004 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| 1005 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, 1006 [ C(RESULT_MISS) ] = SKL_DEMAND_READ| 1007 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, 1008 }, 1009 [ C(OP_WRITE) ] = { 1010 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| 1011 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, 1012 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE| 1013 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, 1014 }, 1015 [ C(OP_PREFETCH) ] = { 1016 [ C(RESULT_ACCESS) ] = 0x0, 1017 [ C(RESULT_MISS) ] = 0x0, 1018 }, 1019 }, 1020 }; 1021 1022 #define SNB_DMND_DATA_RD (1ULL << 0) 1023 #define SNB_DMND_RFO (1ULL << 1) 1024 #define SNB_DMND_IFETCH (1ULL << 2) 1025 #define SNB_DMND_WB (1ULL << 3) 1026 #define SNB_PF_DATA_RD (1ULL << 4) 1027 #define SNB_PF_RFO (1ULL << 5) 1028 #define SNB_PF_IFETCH (1ULL << 6) 1029 #define SNB_LLC_DATA_RD (1ULL << 7) 1030 #define SNB_LLC_RFO (1ULL << 8) 1031 #define SNB_LLC_IFETCH (1ULL << 9) 1032 #define SNB_BUS_LOCKS (1ULL << 10) 1033 #define SNB_STRM_ST (1ULL << 11) 1034 #define SNB_OTHER (1ULL << 15) 1035 #define SNB_RESP_ANY (1ULL << 16) 1036 #define SNB_NO_SUPP (1ULL << 17) 1037 #define SNB_LLC_HITM (1ULL << 18) 1038 #define SNB_LLC_HITE (1ULL << 19) 1039 #define SNB_LLC_HITS (1ULL << 20) 1040 #define SNB_LLC_HITF (1ULL << 21) 1041 #define SNB_LOCAL (1ULL << 22) 1042 #define SNB_REMOTE (0xffULL << 23) 1043 #define SNB_SNP_NONE (1ULL << 31) 1044 #define SNB_SNP_NOT_NEEDED (1ULL << 32) 1045 #define SNB_SNP_MISS (1ULL << 33) 1046 #define SNB_NO_FWD (1ULL << 34) 1047 #define SNB_SNP_FWD (1ULL << 35) 1048 #define SNB_HITM (1ULL << 36) 1049 #define SNB_NON_DRAM (1ULL << 37) 1050 1051 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD) 1052 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO) 1053 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) 1054 1055 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \ 1056 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \ 1057 SNB_HITM) 1058 1059 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY) 1060 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY) 1061 1062 #define SNB_L3_ACCESS SNB_RESP_ANY 1063 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM) 1064 1065 static __initconst const u64 snb_hw_cache_extra_regs 1066 [PERF_COUNT_HW_CACHE_MAX] 1067 [PERF_COUNT_HW_CACHE_OP_MAX] 1068 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1069 { 1070 [ C(LL ) ] = { 1071 [ C(OP_READ) ] = { 1072 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS, 1073 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS, 1074 }, 1075 [ C(OP_WRITE) ] = { 1076 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS, 1077 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS, 1078 }, 1079 [ C(OP_PREFETCH) ] = { 1080 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS, 1081 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS, 1082 }, 1083 }, 1084 [ C(NODE) ] = { 1085 [ C(OP_READ) ] = { 1086 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY, 1087 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE, 1088 }, 1089 [ C(OP_WRITE) ] = { 1090 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY, 1091 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE, 1092 }, 1093 [ C(OP_PREFETCH) ] = { 1094 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY, 1095 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE, 1096 }, 1097 }, 1098 }; 1099 1100 static __initconst const u64 snb_hw_cache_event_ids 1101 [PERF_COUNT_HW_CACHE_MAX] 1102 [PERF_COUNT_HW_CACHE_OP_MAX] 1103 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1104 { 1105 [ C(L1D) ] = { 1106 [ C(OP_READ) ] = { 1107 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */ 1108 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */ 1109 }, 1110 [ C(OP_WRITE) ] = { 1111 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */ 1112 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */ 1113 }, 1114 [ C(OP_PREFETCH) ] = { 1115 [ C(RESULT_ACCESS) ] = 0x0, 1116 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */ 1117 }, 1118 }, 1119 [ C(L1I ) ] = { 1120 [ C(OP_READ) ] = { 1121 [ C(RESULT_ACCESS) ] = 0x0, 1122 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */ 1123 }, 1124 [ C(OP_WRITE) ] = { 1125 [ C(RESULT_ACCESS) ] = -1, 1126 [ C(RESULT_MISS) ] = -1, 1127 }, 1128 [ C(OP_PREFETCH) ] = { 1129 [ C(RESULT_ACCESS) ] = 0x0, 1130 [ C(RESULT_MISS) ] = 0x0, 1131 }, 1132 }, 1133 [ C(LL ) ] = { 1134 [ C(OP_READ) ] = { 1135 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1136 [ C(RESULT_ACCESS) ] = 0x01b7, 1137 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 1138 [ C(RESULT_MISS) ] = 0x01b7, 1139 }, 1140 [ C(OP_WRITE) ] = { 1141 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1142 [ C(RESULT_ACCESS) ] = 0x01b7, 1143 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 1144 [ C(RESULT_MISS) ] = 0x01b7, 1145 }, 1146 [ C(OP_PREFETCH) ] = { 1147 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 1148 [ C(RESULT_ACCESS) ] = 0x01b7, 1149 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 1150 [ C(RESULT_MISS) ] = 0x01b7, 1151 }, 1152 }, 1153 [ C(DTLB) ] = { 1154 [ C(OP_READ) ] = { 1155 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */ 1156 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */ 1157 }, 1158 [ C(OP_WRITE) ] = { 1159 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */ 1160 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ 1161 }, 1162 [ C(OP_PREFETCH) ] = { 1163 [ C(RESULT_ACCESS) ] = 0x0, 1164 [ C(RESULT_MISS) ] = 0x0, 1165 }, 1166 }, 1167 [ C(ITLB) ] = { 1168 [ C(OP_READ) ] = { 1169 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */ 1170 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */ 1171 }, 1172 [ C(OP_WRITE) ] = { 1173 [ C(RESULT_ACCESS) ] = -1, 1174 [ C(RESULT_MISS) ] = -1, 1175 }, 1176 [ C(OP_PREFETCH) ] = { 1177 [ C(RESULT_ACCESS) ] = -1, 1178 [ C(RESULT_MISS) ] = -1, 1179 }, 1180 }, 1181 [ C(BPU ) ] = { 1182 [ C(OP_READ) ] = { 1183 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1184 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 1185 }, 1186 [ C(OP_WRITE) ] = { 1187 [ C(RESULT_ACCESS) ] = -1, 1188 [ C(RESULT_MISS) ] = -1, 1189 }, 1190 [ C(OP_PREFETCH) ] = { 1191 [ C(RESULT_ACCESS) ] = -1, 1192 [ C(RESULT_MISS) ] = -1, 1193 }, 1194 }, 1195 [ C(NODE) ] = { 1196 [ C(OP_READ) ] = { 1197 [ C(RESULT_ACCESS) ] = 0x01b7, 1198 [ C(RESULT_MISS) ] = 0x01b7, 1199 }, 1200 [ C(OP_WRITE) ] = { 1201 [ C(RESULT_ACCESS) ] = 0x01b7, 1202 [ C(RESULT_MISS) ] = 0x01b7, 1203 }, 1204 [ C(OP_PREFETCH) ] = { 1205 [ C(RESULT_ACCESS) ] = 0x01b7, 1206 [ C(RESULT_MISS) ] = 0x01b7, 1207 }, 1208 }, 1209 1210 }; 1211 1212 /* 1213 * Notes on the events: 1214 * - data reads do not include code reads (comparable to earlier tables) 1215 * - data counts include speculative execution (except L1 write, dtlb, bpu) 1216 * - remote node access includes remote memory, remote cache, remote mmio. 1217 * - prefetches are not included in the counts because they are not 1218 * reliably counted. 1219 */ 1220 1221 #define HSW_DEMAND_DATA_RD BIT_ULL(0) 1222 #define HSW_DEMAND_RFO BIT_ULL(1) 1223 #define HSW_ANY_RESPONSE BIT_ULL(16) 1224 #define HSW_SUPPLIER_NONE BIT_ULL(17) 1225 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22) 1226 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27) 1227 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28) 1228 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29) 1229 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \ 1230 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \ 1231 HSW_L3_MISS_REMOTE_HOP2P) 1232 #define HSW_SNOOP_NONE BIT_ULL(31) 1233 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32) 1234 #define HSW_SNOOP_MISS BIT_ULL(33) 1235 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34) 1236 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35) 1237 #define HSW_SNOOP_HITM BIT_ULL(36) 1238 #define HSW_SNOOP_NON_DRAM BIT_ULL(37) 1239 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \ 1240 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \ 1241 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \ 1242 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM) 1243 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM) 1244 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD 1245 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO 1246 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\ 1247 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P) 1248 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE 1249 1250 #define BDW_L3_MISS_LOCAL BIT(26) 1251 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \ 1252 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \ 1253 HSW_L3_MISS_REMOTE_HOP2P) 1254 1255 1256 static __initconst const u64 hsw_hw_cache_event_ids 1257 [PERF_COUNT_HW_CACHE_MAX] 1258 [PERF_COUNT_HW_CACHE_OP_MAX] 1259 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1260 { 1261 [ C(L1D ) ] = { 1262 [ C(OP_READ) ] = { 1263 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1264 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ 1265 }, 1266 [ C(OP_WRITE) ] = { 1267 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1268 [ C(RESULT_MISS) ] = 0x0, 1269 }, 1270 [ C(OP_PREFETCH) ] = { 1271 [ C(RESULT_ACCESS) ] = 0x0, 1272 [ C(RESULT_MISS) ] = 0x0, 1273 }, 1274 }, 1275 [ C(L1I ) ] = { 1276 [ C(OP_READ) ] = { 1277 [ C(RESULT_ACCESS) ] = 0x0, 1278 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */ 1279 }, 1280 [ C(OP_WRITE) ] = { 1281 [ C(RESULT_ACCESS) ] = -1, 1282 [ C(RESULT_MISS) ] = -1, 1283 }, 1284 [ C(OP_PREFETCH) ] = { 1285 [ C(RESULT_ACCESS) ] = 0x0, 1286 [ C(RESULT_MISS) ] = 0x0, 1287 }, 1288 }, 1289 [ C(LL ) ] = { 1290 [ C(OP_READ) ] = { 1291 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1292 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1293 }, 1294 [ C(OP_WRITE) ] = { 1295 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1296 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1297 }, 1298 [ C(OP_PREFETCH) ] = { 1299 [ C(RESULT_ACCESS) ] = 0x0, 1300 [ C(RESULT_MISS) ] = 0x0, 1301 }, 1302 }, 1303 [ C(DTLB) ] = { 1304 [ C(OP_READ) ] = { 1305 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1306 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */ 1307 }, 1308 [ C(OP_WRITE) ] = { 1309 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1310 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ 1311 }, 1312 [ C(OP_PREFETCH) ] = { 1313 [ C(RESULT_ACCESS) ] = 0x0, 1314 [ C(RESULT_MISS) ] = 0x0, 1315 }, 1316 }, 1317 [ C(ITLB) ] = { 1318 [ C(OP_READ) ] = { 1319 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */ 1320 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */ 1321 }, 1322 [ C(OP_WRITE) ] = { 1323 [ C(RESULT_ACCESS) ] = -1, 1324 [ C(RESULT_MISS) ] = -1, 1325 }, 1326 [ C(OP_PREFETCH) ] = { 1327 [ C(RESULT_ACCESS) ] = -1, 1328 [ C(RESULT_MISS) ] = -1, 1329 }, 1330 }, 1331 [ C(BPU ) ] = { 1332 [ C(OP_READ) ] = { 1333 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1334 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 1335 }, 1336 [ C(OP_WRITE) ] = { 1337 [ C(RESULT_ACCESS) ] = -1, 1338 [ C(RESULT_MISS) ] = -1, 1339 }, 1340 [ C(OP_PREFETCH) ] = { 1341 [ C(RESULT_ACCESS) ] = -1, 1342 [ C(RESULT_MISS) ] = -1, 1343 }, 1344 }, 1345 [ C(NODE) ] = { 1346 [ C(OP_READ) ] = { 1347 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1348 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1349 }, 1350 [ C(OP_WRITE) ] = { 1351 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1352 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1353 }, 1354 [ C(OP_PREFETCH) ] = { 1355 [ C(RESULT_ACCESS) ] = 0x0, 1356 [ C(RESULT_MISS) ] = 0x0, 1357 }, 1358 }, 1359 }; 1360 1361 static __initconst const u64 hsw_hw_cache_extra_regs 1362 [PERF_COUNT_HW_CACHE_MAX] 1363 [PERF_COUNT_HW_CACHE_OP_MAX] 1364 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1365 { 1366 [ C(LL ) ] = { 1367 [ C(OP_READ) ] = { 1368 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ| 1369 HSW_LLC_ACCESS, 1370 [ C(RESULT_MISS) ] = HSW_DEMAND_READ| 1371 HSW_L3_MISS|HSW_ANY_SNOOP, 1372 }, 1373 [ C(OP_WRITE) ] = { 1374 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE| 1375 HSW_LLC_ACCESS, 1376 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE| 1377 HSW_L3_MISS|HSW_ANY_SNOOP, 1378 }, 1379 [ C(OP_PREFETCH) ] = { 1380 [ C(RESULT_ACCESS) ] = 0x0, 1381 [ C(RESULT_MISS) ] = 0x0, 1382 }, 1383 }, 1384 [ C(NODE) ] = { 1385 [ C(OP_READ) ] = { 1386 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ| 1387 HSW_L3_MISS_LOCAL_DRAM| 1388 HSW_SNOOP_DRAM, 1389 [ C(RESULT_MISS) ] = HSW_DEMAND_READ| 1390 HSW_L3_MISS_REMOTE| 1391 HSW_SNOOP_DRAM, 1392 }, 1393 [ C(OP_WRITE) ] = { 1394 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE| 1395 HSW_L3_MISS_LOCAL_DRAM| 1396 HSW_SNOOP_DRAM, 1397 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE| 1398 HSW_L3_MISS_REMOTE| 1399 HSW_SNOOP_DRAM, 1400 }, 1401 [ C(OP_PREFETCH) ] = { 1402 [ C(RESULT_ACCESS) ] = 0x0, 1403 [ C(RESULT_MISS) ] = 0x0, 1404 }, 1405 }, 1406 }; 1407 1408 static __initconst const u64 westmere_hw_cache_event_ids 1409 [PERF_COUNT_HW_CACHE_MAX] 1410 [PERF_COUNT_HW_CACHE_OP_MAX] 1411 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1412 { 1413 [ C(L1D) ] = { 1414 [ C(OP_READ) ] = { 1415 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 1416 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ 1417 }, 1418 [ C(OP_WRITE) ] = { 1419 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 1420 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ 1421 }, 1422 [ C(OP_PREFETCH) ] = { 1423 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ 1424 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ 1425 }, 1426 }, 1427 [ C(L1I ) ] = { 1428 [ C(OP_READ) ] = { 1429 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ 1430 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ 1431 }, 1432 [ C(OP_WRITE) ] = { 1433 [ C(RESULT_ACCESS) ] = -1, 1434 [ C(RESULT_MISS) ] = -1, 1435 }, 1436 [ C(OP_PREFETCH) ] = { 1437 [ C(RESULT_ACCESS) ] = 0x0, 1438 [ C(RESULT_MISS) ] = 0x0, 1439 }, 1440 }, 1441 [ C(LL ) ] = { 1442 [ C(OP_READ) ] = { 1443 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1444 [ C(RESULT_ACCESS) ] = 0x01b7, 1445 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 1446 [ C(RESULT_MISS) ] = 0x01b7, 1447 }, 1448 /* 1449 * Use RFO, not WRITEBACK, because a write miss would typically occur 1450 * on RFO. 1451 */ 1452 [ C(OP_WRITE) ] = { 1453 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1454 [ C(RESULT_ACCESS) ] = 0x01b7, 1455 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 1456 [ C(RESULT_MISS) ] = 0x01b7, 1457 }, 1458 [ C(OP_PREFETCH) ] = { 1459 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 1460 [ C(RESULT_ACCESS) ] = 0x01b7, 1461 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 1462 [ C(RESULT_MISS) ] = 0x01b7, 1463 }, 1464 }, 1465 [ C(DTLB) ] = { 1466 [ C(OP_READ) ] = { 1467 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 1468 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ 1469 }, 1470 [ C(OP_WRITE) ] = { 1471 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 1472 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ 1473 }, 1474 [ C(OP_PREFETCH) ] = { 1475 [ C(RESULT_ACCESS) ] = 0x0, 1476 [ C(RESULT_MISS) ] = 0x0, 1477 }, 1478 }, 1479 [ C(ITLB) ] = { 1480 [ C(OP_READ) ] = { 1481 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ 1482 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */ 1483 }, 1484 [ C(OP_WRITE) ] = { 1485 [ C(RESULT_ACCESS) ] = -1, 1486 [ C(RESULT_MISS) ] = -1, 1487 }, 1488 [ C(OP_PREFETCH) ] = { 1489 [ C(RESULT_ACCESS) ] = -1, 1490 [ C(RESULT_MISS) ] = -1, 1491 }, 1492 }, 1493 [ C(BPU ) ] = { 1494 [ C(OP_READ) ] = { 1495 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1496 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ 1497 }, 1498 [ C(OP_WRITE) ] = { 1499 [ C(RESULT_ACCESS) ] = -1, 1500 [ C(RESULT_MISS) ] = -1, 1501 }, 1502 [ C(OP_PREFETCH) ] = { 1503 [ C(RESULT_ACCESS) ] = -1, 1504 [ C(RESULT_MISS) ] = -1, 1505 }, 1506 }, 1507 [ C(NODE) ] = { 1508 [ C(OP_READ) ] = { 1509 [ C(RESULT_ACCESS) ] = 0x01b7, 1510 [ C(RESULT_MISS) ] = 0x01b7, 1511 }, 1512 [ C(OP_WRITE) ] = { 1513 [ C(RESULT_ACCESS) ] = 0x01b7, 1514 [ C(RESULT_MISS) ] = 0x01b7, 1515 }, 1516 [ C(OP_PREFETCH) ] = { 1517 [ C(RESULT_ACCESS) ] = 0x01b7, 1518 [ C(RESULT_MISS) ] = 0x01b7, 1519 }, 1520 }, 1521 }; 1522 1523 /* 1524 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits; 1525 * See IA32 SDM Vol 3B 30.6.1.3 1526 */ 1527 1528 #define NHM_DMND_DATA_RD (1 << 0) 1529 #define NHM_DMND_RFO (1 << 1) 1530 #define NHM_DMND_IFETCH (1 << 2) 1531 #define NHM_DMND_WB (1 << 3) 1532 #define NHM_PF_DATA_RD (1 << 4) 1533 #define NHM_PF_DATA_RFO (1 << 5) 1534 #define NHM_PF_IFETCH (1 << 6) 1535 #define NHM_OFFCORE_OTHER (1 << 7) 1536 #define NHM_UNCORE_HIT (1 << 8) 1537 #define NHM_OTHER_CORE_HIT_SNP (1 << 9) 1538 #define NHM_OTHER_CORE_HITM (1 << 10) 1539 /* reserved */ 1540 #define NHM_REMOTE_CACHE_FWD (1 << 12) 1541 #define NHM_REMOTE_DRAM (1 << 13) 1542 #define NHM_LOCAL_DRAM (1 << 14) 1543 #define NHM_NON_DRAM (1 << 15) 1544 1545 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD) 1546 #define NHM_REMOTE (NHM_REMOTE_DRAM) 1547 1548 #define NHM_DMND_READ (NHM_DMND_DATA_RD) 1549 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB) 1550 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO) 1551 1552 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM) 1553 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD) 1554 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS) 1555 1556 static __initconst const u64 nehalem_hw_cache_extra_regs 1557 [PERF_COUNT_HW_CACHE_MAX] 1558 [PERF_COUNT_HW_CACHE_OP_MAX] 1559 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1560 { 1561 [ C(LL ) ] = { 1562 [ C(OP_READ) ] = { 1563 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS, 1564 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS, 1565 }, 1566 [ C(OP_WRITE) ] = { 1567 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS, 1568 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS, 1569 }, 1570 [ C(OP_PREFETCH) ] = { 1571 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS, 1572 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS, 1573 }, 1574 }, 1575 [ C(NODE) ] = { 1576 [ C(OP_READ) ] = { 1577 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE, 1578 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE, 1579 }, 1580 [ C(OP_WRITE) ] = { 1581 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE, 1582 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE, 1583 }, 1584 [ C(OP_PREFETCH) ] = { 1585 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE, 1586 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE, 1587 }, 1588 }, 1589 }; 1590 1591 static __initconst const u64 nehalem_hw_cache_event_ids 1592 [PERF_COUNT_HW_CACHE_MAX] 1593 [PERF_COUNT_HW_CACHE_OP_MAX] 1594 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1595 { 1596 [ C(L1D) ] = { 1597 [ C(OP_READ) ] = { 1598 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 1599 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ 1600 }, 1601 [ C(OP_WRITE) ] = { 1602 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 1603 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ 1604 }, 1605 [ C(OP_PREFETCH) ] = { 1606 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ 1607 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ 1608 }, 1609 }, 1610 [ C(L1I ) ] = { 1611 [ C(OP_READ) ] = { 1612 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ 1613 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ 1614 }, 1615 [ C(OP_WRITE) ] = { 1616 [ C(RESULT_ACCESS) ] = -1, 1617 [ C(RESULT_MISS) ] = -1, 1618 }, 1619 [ C(OP_PREFETCH) ] = { 1620 [ C(RESULT_ACCESS) ] = 0x0, 1621 [ C(RESULT_MISS) ] = 0x0, 1622 }, 1623 }, 1624 [ C(LL ) ] = { 1625 [ C(OP_READ) ] = { 1626 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1627 [ C(RESULT_ACCESS) ] = 0x01b7, 1628 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 1629 [ C(RESULT_MISS) ] = 0x01b7, 1630 }, 1631 /* 1632 * Use RFO, not WRITEBACK, because a write miss would typically occur 1633 * on RFO. 1634 */ 1635 [ C(OP_WRITE) ] = { 1636 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1637 [ C(RESULT_ACCESS) ] = 0x01b7, 1638 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 1639 [ C(RESULT_MISS) ] = 0x01b7, 1640 }, 1641 [ C(OP_PREFETCH) ] = { 1642 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 1643 [ C(RESULT_ACCESS) ] = 0x01b7, 1644 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 1645 [ C(RESULT_MISS) ] = 0x01b7, 1646 }, 1647 }, 1648 [ C(DTLB) ] = { 1649 [ C(OP_READ) ] = { 1650 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ 1651 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ 1652 }, 1653 [ C(OP_WRITE) ] = { 1654 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ 1655 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ 1656 }, 1657 [ C(OP_PREFETCH) ] = { 1658 [ C(RESULT_ACCESS) ] = 0x0, 1659 [ C(RESULT_MISS) ] = 0x0, 1660 }, 1661 }, 1662 [ C(ITLB) ] = { 1663 [ C(OP_READ) ] = { 1664 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ 1665 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */ 1666 }, 1667 [ C(OP_WRITE) ] = { 1668 [ C(RESULT_ACCESS) ] = -1, 1669 [ C(RESULT_MISS) ] = -1, 1670 }, 1671 [ C(OP_PREFETCH) ] = { 1672 [ C(RESULT_ACCESS) ] = -1, 1673 [ C(RESULT_MISS) ] = -1, 1674 }, 1675 }, 1676 [ C(BPU ) ] = { 1677 [ C(OP_READ) ] = { 1678 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1679 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ 1680 }, 1681 [ C(OP_WRITE) ] = { 1682 [ C(RESULT_ACCESS) ] = -1, 1683 [ C(RESULT_MISS) ] = -1, 1684 }, 1685 [ C(OP_PREFETCH) ] = { 1686 [ C(RESULT_ACCESS) ] = -1, 1687 [ C(RESULT_MISS) ] = -1, 1688 }, 1689 }, 1690 [ C(NODE) ] = { 1691 [ C(OP_READ) ] = { 1692 [ C(RESULT_ACCESS) ] = 0x01b7, 1693 [ C(RESULT_MISS) ] = 0x01b7, 1694 }, 1695 [ C(OP_WRITE) ] = { 1696 [ C(RESULT_ACCESS) ] = 0x01b7, 1697 [ C(RESULT_MISS) ] = 0x01b7, 1698 }, 1699 [ C(OP_PREFETCH) ] = { 1700 [ C(RESULT_ACCESS) ] = 0x01b7, 1701 [ C(RESULT_MISS) ] = 0x01b7, 1702 }, 1703 }, 1704 }; 1705 1706 static __initconst const u64 core2_hw_cache_event_ids 1707 [PERF_COUNT_HW_CACHE_MAX] 1708 [PERF_COUNT_HW_CACHE_OP_MAX] 1709 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1710 { 1711 [ C(L1D) ] = { 1712 [ C(OP_READ) ] = { 1713 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ 1714 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ 1715 }, 1716 [ C(OP_WRITE) ] = { 1717 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ 1718 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ 1719 }, 1720 [ C(OP_PREFETCH) ] = { 1721 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */ 1722 [ C(RESULT_MISS) ] = 0, 1723 }, 1724 }, 1725 [ C(L1I ) ] = { 1726 [ C(OP_READ) ] = { 1727 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ 1728 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ 1729 }, 1730 [ C(OP_WRITE) ] = { 1731 [ C(RESULT_ACCESS) ] = -1, 1732 [ C(RESULT_MISS) ] = -1, 1733 }, 1734 [ C(OP_PREFETCH) ] = { 1735 [ C(RESULT_ACCESS) ] = 0, 1736 [ C(RESULT_MISS) ] = 0, 1737 }, 1738 }, 1739 [ C(LL ) ] = { 1740 [ C(OP_READ) ] = { 1741 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ 1742 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ 1743 }, 1744 [ C(OP_WRITE) ] = { 1745 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ 1746 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ 1747 }, 1748 [ C(OP_PREFETCH) ] = { 1749 [ C(RESULT_ACCESS) ] = 0, 1750 [ C(RESULT_MISS) ] = 0, 1751 }, 1752 }, 1753 [ C(DTLB) ] = { 1754 [ C(OP_READ) ] = { 1755 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ 1756 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */ 1757 }, 1758 [ C(OP_WRITE) ] = { 1759 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ 1760 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */ 1761 }, 1762 [ C(OP_PREFETCH) ] = { 1763 [ C(RESULT_ACCESS) ] = 0, 1764 [ C(RESULT_MISS) ] = 0, 1765 }, 1766 }, 1767 [ C(ITLB) ] = { 1768 [ C(OP_READ) ] = { 1769 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1770 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */ 1771 }, 1772 [ C(OP_WRITE) ] = { 1773 [ C(RESULT_ACCESS) ] = -1, 1774 [ C(RESULT_MISS) ] = -1, 1775 }, 1776 [ C(OP_PREFETCH) ] = { 1777 [ C(RESULT_ACCESS) ] = -1, 1778 [ C(RESULT_MISS) ] = -1, 1779 }, 1780 }, 1781 [ C(BPU ) ] = { 1782 [ C(OP_READ) ] = { 1783 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ 1784 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ 1785 }, 1786 [ C(OP_WRITE) ] = { 1787 [ C(RESULT_ACCESS) ] = -1, 1788 [ C(RESULT_MISS) ] = -1, 1789 }, 1790 [ C(OP_PREFETCH) ] = { 1791 [ C(RESULT_ACCESS) ] = -1, 1792 [ C(RESULT_MISS) ] = -1, 1793 }, 1794 }, 1795 }; 1796 1797 static __initconst const u64 atom_hw_cache_event_ids 1798 [PERF_COUNT_HW_CACHE_MAX] 1799 [PERF_COUNT_HW_CACHE_OP_MAX] 1800 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1801 { 1802 [ C(L1D) ] = { 1803 [ C(OP_READ) ] = { 1804 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */ 1805 [ C(RESULT_MISS) ] = 0, 1806 }, 1807 [ C(OP_WRITE) ] = { 1808 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */ 1809 [ C(RESULT_MISS) ] = 0, 1810 }, 1811 [ C(OP_PREFETCH) ] = { 1812 [ C(RESULT_ACCESS) ] = 0x0, 1813 [ C(RESULT_MISS) ] = 0, 1814 }, 1815 }, 1816 [ C(L1I ) ] = { 1817 [ C(OP_READ) ] = { 1818 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ 1819 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ 1820 }, 1821 [ C(OP_WRITE) ] = { 1822 [ C(RESULT_ACCESS) ] = -1, 1823 [ C(RESULT_MISS) ] = -1, 1824 }, 1825 [ C(OP_PREFETCH) ] = { 1826 [ C(RESULT_ACCESS) ] = 0, 1827 [ C(RESULT_MISS) ] = 0, 1828 }, 1829 }, 1830 [ C(LL ) ] = { 1831 [ C(OP_READ) ] = { 1832 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ 1833 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ 1834 }, 1835 [ C(OP_WRITE) ] = { 1836 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ 1837 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ 1838 }, 1839 [ C(OP_PREFETCH) ] = { 1840 [ C(RESULT_ACCESS) ] = 0, 1841 [ C(RESULT_MISS) ] = 0, 1842 }, 1843 }, 1844 [ C(DTLB) ] = { 1845 [ C(OP_READ) ] = { 1846 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */ 1847 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ 1848 }, 1849 [ C(OP_WRITE) ] = { 1850 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */ 1851 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ 1852 }, 1853 [ C(OP_PREFETCH) ] = { 1854 [ C(RESULT_ACCESS) ] = 0, 1855 [ C(RESULT_MISS) ] = 0, 1856 }, 1857 }, 1858 [ C(ITLB) ] = { 1859 [ C(OP_READ) ] = { 1860 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1861 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ 1862 }, 1863 [ C(OP_WRITE) ] = { 1864 [ C(RESULT_ACCESS) ] = -1, 1865 [ C(RESULT_MISS) ] = -1, 1866 }, 1867 [ C(OP_PREFETCH) ] = { 1868 [ C(RESULT_ACCESS) ] = -1, 1869 [ C(RESULT_MISS) ] = -1, 1870 }, 1871 }, 1872 [ C(BPU ) ] = { 1873 [ C(OP_READ) ] = { 1874 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ 1875 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ 1876 }, 1877 [ C(OP_WRITE) ] = { 1878 [ C(RESULT_ACCESS) ] = -1, 1879 [ C(RESULT_MISS) ] = -1, 1880 }, 1881 [ C(OP_PREFETCH) ] = { 1882 [ C(RESULT_ACCESS) ] = -1, 1883 [ C(RESULT_MISS) ] = -1, 1884 }, 1885 }, 1886 }; 1887 1888 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c"); 1889 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2"); 1890 /* no_alloc_cycles.not_delivered */ 1891 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm, 1892 "event=0xca,umask=0x50"); 1893 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2"); 1894 /* uops_retired.all */ 1895 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm, 1896 "event=0xc2,umask=0x10"); 1897 /* uops_retired.all */ 1898 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm, 1899 "event=0xc2,umask=0x10"); 1900 1901 static struct attribute *slm_events_attrs[] = { 1902 EVENT_PTR(td_total_slots_slm), 1903 EVENT_PTR(td_total_slots_scale_slm), 1904 EVENT_PTR(td_fetch_bubbles_slm), 1905 EVENT_PTR(td_fetch_bubbles_scale_slm), 1906 EVENT_PTR(td_slots_issued_slm), 1907 EVENT_PTR(td_slots_retired_slm), 1908 NULL 1909 }; 1910 1911 static struct extra_reg intel_slm_extra_regs[] __read_mostly = 1912 { 1913 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 1914 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0), 1915 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1), 1916 EVENT_EXTRA_END 1917 }; 1918 1919 #define SLM_DMND_READ SNB_DMND_DATA_RD 1920 #define SLM_DMND_WRITE SNB_DMND_RFO 1921 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) 1922 1923 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM) 1924 #define SLM_LLC_ACCESS SNB_RESP_ANY 1925 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM) 1926 1927 static __initconst const u64 slm_hw_cache_extra_regs 1928 [PERF_COUNT_HW_CACHE_MAX] 1929 [PERF_COUNT_HW_CACHE_OP_MAX] 1930 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1931 { 1932 [ C(LL ) ] = { 1933 [ C(OP_READ) ] = { 1934 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS, 1935 [ C(RESULT_MISS) ] = 0, 1936 }, 1937 [ C(OP_WRITE) ] = { 1938 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS, 1939 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS, 1940 }, 1941 [ C(OP_PREFETCH) ] = { 1942 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS, 1943 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS, 1944 }, 1945 }, 1946 }; 1947 1948 static __initconst const u64 slm_hw_cache_event_ids 1949 [PERF_COUNT_HW_CACHE_MAX] 1950 [PERF_COUNT_HW_CACHE_OP_MAX] 1951 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1952 { 1953 [ C(L1D) ] = { 1954 [ C(OP_READ) ] = { 1955 [ C(RESULT_ACCESS) ] = 0, 1956 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */ 1957 }, 1958 [ C(OP_WRITE) ] = { 1959 [ C(RESULT_ACCESS) ] = 0, 1960 [ C(RESULT_MISS) ] = 0, 1961 }, 1962 [ C(OP_PREFETCH) ] = { 1963 [ C(RESULT_ACCESS) ] = 0, 1964 [ C(RESULT_MISS) ] = 0, 1965 }, 1966 }, 1967 [ C(L1I ) ] = { 1968 [ C(OP_READ) ] = { 1969 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */ 1970 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */ 1971 }, 1972 [ C(OP_WRITE) ] = { 1973 [ C(RESULT_ACCESS) ] = -1, 1974 [ C(RESULT_MISS) ] = -1, 1975 }, 1976 [ C(OP_PREFETCH) ] = { 1977 [ C(RESULT_ACCESS) ] = 0, 1978 [ C(RESULT_MISS) ] = 0, 1979 }, 1980 }, 1981 [ C(LL ) ] = { 1982 [ C(OP_READ) ] = { 1983 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1984 [ C(RESULT_ACCESS) ] = 0x01b7, 1985 [ C(RESULT_MISS) ] = 0, 1986 }, 1987 [ C(OP_WRITE) ] = { 1988 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1989 [ C(RESULT_ACCESS) ] = 0x01b7, 1990 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 1991 [ C(RESULT_MISS) ] = 0x01b7, 1992 }, 1993 [ C(OP_PREFETCH) ] = { 1994 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 1995 [ C(RESULT_ACCESS) ] = 0x01b7, 1996 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 1997 [ C(RESULT_MISS) ] = 0x01b7, 1998 }, 1999 }, 2000 [ C(DTLB) ] = { 2001 [ C(OP_READ) ] = { 2002 [ C(RESULT_ACCESS) ] = 0, 2003 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */ 2004 }, 2005 [ C(OP_WRITE) ] = { 2006 [ C(RESULT_ACCESS) ] = 0, 2007 [ C(RESULT_MISS) ] = 0, 2008 }, 2009 [ C(OP_PREFETCH) ] = { 2010 [ C(RESULT_ACCESS) ] = 0, 2011 [ C(RESULT_MISS) ] = 0, 2012 }, 2013 }, 2014 [ C(ITLB) ] = { 2015 [ C(OP_READ) ] = { 2016 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 2017 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */ 2018 }, 2019 [ C(OP_WRITE) ] = { 2020 [ C(RESULT_ACCESS) ] = -1, 2021 [ C(RESULT_MISS) ] = -1, 2022 }, 2023 [ C(OP_PREFETCH) ] = { 2024 [ C(RESULT_ACCESS) ] = -1, 2025 [ C(RESULT_MISS) ] = -1, 2026 }, 2027 }, 2028 [ C(BPU ) ] = { 2029 [ C(OP_READ) ] = { 2030 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ 2031 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ 2032 }, 2033 [ C(OP_WRITE) ] = { 2034 [ C(RESULT_ACCESS) ] = -1, 2035 [ C(RESULT_MISS) ] = -1, 2036 }, 2037 [ C(OP_PREFETCH) ] = { 2038 [ C(RESULT_ACCESS) ] = -1, 2039 [ C(RESULT_MISS) ] = -1, 2040 }, 2041 }, 2042 }; 2043 2044 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c"); 2045 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3"); 2046 /* UOPS_NOT_DELIVERED.ANY */ 2047 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c"); 2048 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */ 2049 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02"); 2050 /* UOPS_RETIRED.ANY */ 2051 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2"); 2052 /* UOPS_ISSUED.ANY */ 2053 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e"); 2054 2055 static struct attribute *glm_events_attrs[] = { 2056 EVENT_PTR(td_total_slots_glm), 2057 EVENT_PTR(td_total_slots_scale_glm), 2058 EVENT_PTR(td_fetch_bubbles_glm), 2059 EVENT_PTR(td_recovery_bubbles_glm), 2060 EVENT_PTR(td_slots_issued_glm), 2061 EVENT_PTR(td_slots_retired_glm), 2062 NULL 2063 }; 2064 2065 static struct extra_reg intel_glm_extra_regs[] __read_mostly = { 2066 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 2067 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0), 2068 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1), 2069 EVENT_EXTRA_END 2070 }; 2071 2072 #define GLM_DEMAND_DATA_RD BIT_ULL(0) 2073 #define GLM_DEMAND_RFO BIT_ULL(1) 2074 #define GLM_ANY_RESPONSE BIT_ULL(16) 2075 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33) 2076 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD 2077 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO 2078 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) 2079 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE 2080 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM) 2081 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM) 2082 2083 static __initconst const u64 glm_hw_cache_event_ids 2084 [PERF_COUNT_HW_CACHE_MAX] 2085 [PERF_COUNT_HW_CACHE_OP_MAX] 2086 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2087 [C(L1D)] = { 2088 [C(OP_READ)] = { 2089 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 2090 [C(RESULT_MISS)] = 0x0, 2091 }, 2092 [C(OP_WRITE)] = { 2093 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 2094 [C(RESULT_MISS)] = 0x0, 2095 }, 2096 [C(OP_PREFETCH)] = { 2097 [C(RESULT_ACCESS)] = 0x0, 2098 [C(RESULT_MISS)] = 0x0, 2099 }, 2100 }, 2101 [C(L1I)] = { 2102 [C(OP_READ)] = { 2103 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */ 2104 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */ 2105 }, 2106 [C(OP_WRITE)] = { 2107 [C(RESULT_ACCESS)] = -1, 2108 [C(RESULT_MISS)] = -1, 2109 }, 2110 [C(OP_PREFETCH)] = { 2111 [C(RESULT_ACCESS)] = 0x0, 2112 [C(RESULT_MISS)] = 0x0, 2113 }, 2114 }, 2115 [C(LL)] = { 2116 [C(OP_READ)] = { 2117 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 2118 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 2119 }, 2120 [C(OP_WRITE)] = { 2121 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 2122 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 2123 }, 2124 [C(OP_PREFETCH)] = { 2125 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 2126 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 2127 }, 2128 }, 2129 [C(DTLB)] = { 2130 [C(OP_READ)] = { 2131 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 2132 [C(RESULT_MISS)] = 0x0, 2133 }, 2134 [C(OP_WRITE)] = { 2135 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 2136 [C(RESULT_MISS)] = 0x0, 2137 }, 2138 [C(OP_PREFETCH)] = { 2139 [C(RESULT_ACCESS)] = 0x0, 2140 [C(RESULT_MISS)] = 0x0, 2141 }, 2142 }, 2143 [C(ITLB)] = { 2144 [C(OP_READ)] = { 2145 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */ 2146 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */ 2147 }, 2148 [C(OP_WRITE)] = { 2149 [C(RESULT_ACCESS)] = -1, 2150 [C(RESULT_MISS)] = -1, 2151 }, 2152 [C(OP_PREFETCH)] = { 2153 [C(RESULT_ACCESS)] = -1, 2154 [C(RESULT_MISS)] = -1, 2155 }, 2156 }, 2157 [C(BPU)] = { 2158 [C(OP_READ)] = { 2159 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 2160 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 2161 }, 2162 [C(OP_WRITE)] = { 2163 [C(RESULT_ACCESS)] = -1, 2164 [C(RESULT_MISS)] = -1, 2165 }, 2166 [C(OP_PREFETCH)] = { 2167 [C(RESULT_ACCESS)] = -1, 2168 [C(RESULT_MISS)] = -1, 2169 }, 2170 }, 2171 }; 2172 2173 static __initconst const u64 glm_hw_cache_extra_regs 2174 [PERF_COUNT_HW_CACHE_MAX] 2175 [PERF_COUNT_HW_CACHE_OP_MAX] 2176 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2177 [C(LL)] = { 2178 [C(OP_READ)] = { 2179 [C(RESULT_ACCESS)] = GLM_DEMAND_READ| 2180 GLM_LLC_ACCESS, 2181 [C(RESULT_MISS)] = GLM_DEMAND_READ| 2182 GLM_LLC_MISS, 2183 }, 2184 [C(OP_WRITE)] = { 2185 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE| 2186 GLM_LLC_ACCESS, 2187 [C(RESULT_MISS)] = GLM_DEMAND_WRITE| 2188 GLM_LLC_MISS, 2189 }, 2190 [C(OP_PREFETCH)] = { 2191 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH| 2192 GLM_LLC_ACCESS, 2193 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH| 2194 GLM_LLC_MISS, 2195 }, 2196 }, 2197 }; 2198 2199 static __initconst const u64 glp_hw_cache_event_ids 2200 [PERF_COUNT_HW_CACHE_MAX] 2201 [PERF_COUNT_HW_CACHE_OP_MAX] 2202 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2203 [C(L1D)] = { 2204 [C(OP_READ)] = { 2205 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 2206 [C(RESULT_MISS)] = 0x0, 2207 }, 2208 [C(OP_WRITE)] = { 2209 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 2210 [C(RESULT_MISS)] = 0x0, 2211 }, 2212 [C(OP_PREFETCH)] = { 2213 [C(RESULT_ACCESS)] = 0x0, 2214 [C(RESULT_MISS)] = 0x0, 2215 }, 2216 }, 2217 [C(L1I)] = { 2218 [C(OP_READ)] = { 2219 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */ 2220 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */ 2221 }, 2222 [C(OP_WRITE)] = { 2223 [C(RESULT_ACCESS)] = -1, 2224 [C(RESULT_MISS)] = -1, 2225 }, 2226 [C(OP_PREFETCH)] = { 2227 [C(RESULT_ACCESS)] = 0x0, 2228 [C(RESULT_MISS)] = 0x0, 2229 }, 2230 }, 2231 [C(LL)] = { 2232 [C(OP_READ)] = { 2233 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 2234 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 2235 }, 2236 [C(OP_WRITE)] = { 2237 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 2238 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 2239 }, 2240 [C(OP_PREFETCH)] = { 2241 [C(RESULT_ACCESS)] = 0x0, 2242 [C(RESULT_MISS)] = 0x0, 2243 }, 2244 }, 2245 [C(DTLB)] = { 2246 [C(OP_READ)] = { 2247 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 2248 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ 2249 }, 2250 [C(OP_WRITE)] = { 2251 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 2252 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */ 2253 }, 2254 [C(OP_PREFETCH)] = { 2255 [C(RESULT_ACCESS)] = 0x0, 2256 [C(RESULT_MISS)] = 0x0, 2257 }, 2258 }, 2259 [C(ITLB)] = { 2260 [C(OP_READ)] = { 2261 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */ 2262 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */ 2263 }, 2264 [C(OP_WRITE)] = { 2265 [C(RESULT_ACCESS)] = -1, 2266 [C(RESULT_MISS)] = -1, 2267 }, 2268 [C(OP_PREFETCH)] = { 2269 [C(RESULT_ACCESS)] = -1, 2270 [C(RESULT_MISS)] = -1, 2271 }, 2272 }, 2273 [C(BPU)] = { 2274 [C(OP_READ)] = { 2275 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 2276 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 2277 }, 2278 [C(OP_WRITE)] = { 2279 [C(RESULT_ACCESS)] = -1, 2280 [C(RESULT_MISS)] = -1, 2281 }, 2282 [C(OP_PREFETCH)] = { 2283 [C(RESULT_ACCESS)] = -1, 2284 [C(RESULT_MISS)] = -1, 2285 }, 2286 }, 2287 }; 2288 2289 static __initconst const u64 glp_hw_cache_extra_regs 2290 [PERF_COUNT_HW_CACHE_MAX] 2291 [PERF_COUNT_HW_CACHE_OP_MAX] 2292 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2293 [C(LL)] = { 2294 [C(OP_READ)] = { 2295 [C(RESULT_ACCESS)] = GLM_DEMAND_READ| 2296 GLM_LLC_ACCESS, 2297 [C(RESULT_MISS)] = GLM_DEMAND_READ| 2298 GLM_LLC_MISS, 2299 }, 2300 [C(OP_WRITE)] = { 2301 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE| 2302 GLM_LLC_ACCESS, 2303 [C(RESULT_MISS)] = GLM_DEMAND_WRITE| 2304 GLM_LLC_MISS, 2305 }, 2306 [C(OP_PREFETCH)] = { 2307 [C(RESULT_ACCESS)] = 0x0, 2308 [C(RESULT_MISS)] = 0x0, 2309 }, 2310 }, 2311 }; 2312 2313 #define TNT_LOCAL_DRAM BIT_ULL(26) 2314 #define TNT_DEMAND_READ GLM_DEMAND_DATA_RD 2315 #define TNT_DEMAND_WRITE GLM_DEMAND_RFO 2316 #define TNT_LLC_ACCESS GLM_ANY_RESPONSE 2317 #define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \ 2318 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM) 2319 #define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM) 2320 2321 static __initconst const u64 tnt_hw_cache_extra_regs 2322 [PERF_COUNT_HW_CACHE_MAX] 2323 [PERF_COUNT_HW_CACHE_OP_MAX] 2324 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2325 [C(LL)] = { 2326 [C(OP_READ)] = { 2327 [C(RESULT_ACCESS)] = TNT_DEMAND_READ| 2328 TNT_LLC_ACCESS, 2329 [C(RESULT_MISS)] = TNT_DEMAND_READ| 2330 TNT_LLC_MISS, 2331 }, 2332 [C(OP_WRITE)] = { 2333 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE| 2334 TNT_LLC_ACCESS, 2335 [C(RESULT_MISS)] = TNT_DEMAND_WRITE| 2336 TNT_LLC_MISS, 2337 }, 2338 [C(OP_PREFETCH)] = { 2339 [C(RESULT_ACCESS)] = 0x0, 2340 [C(RESULT_MISS)] = 0x0, 2341 }, 2342 }, 2343 }; 2344 2345 static __initconst const u64 arw_hw_cache_extra_regs 2346 [PERF_COUNT_HW_CACHE_MAX] 2347 [PERF_COUNT_HW_CACHE_OP_MAX] 2348 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2349 [C(LL)] = { 2350 [C(OP_READ)] = { 2351 [C(RESULT_ACCESS)] = 0x4000000000000001, 2352 [C(RESULT_MISS)] = 0xFFFFF000000001, 2353 }, 2354 [C(OP_WRITE)] = { 2355 [C(RESULT_ACCESS)] = 0x4000000000000002, 2356 [C(RESULT_MISS)] = 0xFFFFF000000002, 2357 }, 2358 [C(OP_PREFETCH)] = { 2359 [C(RESULT_ACCESS)] = 0x0, 2360 [C(RESULT_MISS)] = 0x0, 2361 }, 2362 }, 2363 }; 2364 2365 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0"); 2366 EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0"); 2367 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6"); 2368 EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0"); 2369 2370 static struct attribute *tnt_events_attrs[] = { 2371 EVENT_PTR(td_fe_bound_tnt), 2372 EVENT_PTR(td_retiring_tnt), 2373 EVENT_PTR(td_bad_spec_tnt), 2374 EVENT_PTR(td_be_bound_tnt), 2375 NULL, 2376 }; 2377 2378 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = { 2379 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 2380 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0), 2381 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1), 2382 EVENT_EXTRA_END 2383 }; 2384 2385 EVENT_ATTR_STR(mem-loads, mem_ld_grt, "event=0xd0,umask=0x5,ldlat=3"); 2386 EVENT_ATTR_STR(mem-stores, mem_st_grt, "event=0xd0,umask=0x6"); 2387 2388 static struct attribute *grt_mem_attrs[] = { 2389 EVENT_PTR(mem_ld_grt), 2390 EVENT_PTR(mem_st_grt), 2391 NULL 2392 }; 2393 2394 static struct extra_reg intel_grt_extra_regs[] __read_mostly = { 2395 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 2396 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), 2397 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), 2398 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0), 2399 EVENT_EXTRA_END 2400 }; 2401 2402 EVENT_ATTR_STR(topdown-retiring, td_retiring_cmt, "event=0x72,umask=0x0"); 2403 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_cmt, "event=0x73,umask=0x0"); 2404 2405 static struct attribute *cmt_events_attrs[] = { 2406 EVENT_PTR(td_fe_bound_tnt), 2407 EVENT_PTR(td_retiring_cmt), 2408 EVENT_PTR(td_bad_spec_cmt), 2409 EVENT_PTR(td_be_bound_tnt), 2410 NULL 2411 }; 2412 2413 static struct extra_reg intel_cmt_extra_regs[] __read_mostly = { 2414 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 2415 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff3ffffffffffull, RSP_0), 2416 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff3ffffffffffull, RSP_1), 2417 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0), 2418 INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0), 2419 INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1), 2420 EVENT_EXTRA_END 2421 }; 2422 2423 static struct extra_reg intel_arw_extra_regs[] __read_mostly = { 2424 /* must define OMR_X first, see intel_alt_er() */ 2425 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OMR_0, 0xc0ffffffffffffffull, OMR_0), 2426 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OMR_1, 0xc0ffffffffffffffull, OMR_1), 2427 INTEL_UEVENT_EXTRA_REG(0x04b7, MSR_OMR_2, 0xc0ffffffffffffffull, OMR_2), 2428 INTEL_UEVENT_EXTRA_REG(0x08b7, MSR_OMR_3, 0xc0ffffffffffffffull, OMR_3), 2429 INTEL_UEVENT_EXTRA_REG(0x01d4, MSR_OMR_0, 0xc0ffffffffffffffull, OMR_0), 2430 INTEL_UEVENT_EXTRA_REG(0x02d4, MSR_OMR_1, 0xc0ffffffffffffffull, OMR_1), 2431 INTEL_UEVENT_EXTRA_REG(0x04d4, MSR_OMR_2, 0xc0ffffffffffffffull, OMR_2), 2432 INTEL_UEVENT_EXTRA_REG(0x08d4, MSR_OMR_3, 0xc0ffffffffffffffull, OMR_3), 2433 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0), 2434 INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0), 2435 INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1), 2436 EVENT_EXTRA_END 2437 }; 2438 2439 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_skt, "event=0x9c,umask=0x01"); 2440 EVENT_ATTR_STR(topdown-retiring, td_retiring_skt, "event=0xc2,umask=0x02"); 2441 EVENT_ATTR_STR(topdown-be-bound, td_be_bound_skt, "event=0xa4,umask=0x02"); 2442 2443 static struct attribute *skt_events_attrs[] = { 2444 EVENT_PTR(td_fe_bound_skt), 2445 EVENT_PTR(td_retiring_skt), 2446 EVENT_PTR(td_bad_spec_cmt), 2447 EVENT_PTR(td_be_bound_skt), 2448 NULL, 2449 }; 2450 2451 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */ 2452 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */ 2453 #define KNL_MCDRAM_LOCAL BIT_ULL(21) 2454 #define KNL_MCDRAM_FAR BIT_ULL(22) 2455 #define KNL_DDR_LOCAL BIT_ULL(23) 2456 #define KNL_DDR_FAR BIT_ULL(24) 2457 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \ 2458 KNL_DDR_LOCAL | KNL_DDR_FAR) 2459 #define KNL_L2_READ SLM_DMND_READ 2460 #define KNL_L2_WRITE SLM_DMND_WRITE 2461 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH 2462 #define KNL_L2_ACCESS SLM_LLC_ACCESS 2463 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \ 2464 KNL_DRAM_ANY | SNB_SNP_ANY | \ 2465 SNB_NON_DRAM) 2466 2467 static __initconst const u64 knl_hw_cache_extra_regs 2468 [PERF_COUNT_HW_CACHE_MAX] 2469 [PERF_COUNT_HW_CACHE_OP_MAX] 2470 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2471 [C(LL)] = { 2472 [C(OP_READ)] = { 2473 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS, 2474 [C(RESULT_MISS)] = 0, 2475 }, 2476 [C(OP_WRITE)] = { 2477 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS, 2478 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS, 2479 }, 2480 [C(OP_PREFETCH)] = { 2481 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS, 2482 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS, 2483 }, 2484 }, 2485 }; 2486 2487 /* 2488 * Used from PMIs where the LBRs are already disabled. 2489 * 2490 * This function could be called consecutively. It is required to remain in 2491 * disabled state if called consecutively. 2492 * 2493 * During consecutive calls, the same disable value will be written to related 2494 * registers, so the PMU state remains unchanged. 2495 * 2496 * intel_bts events don't coexist with intel PMU's BTS events because of 2497 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them 2498 * disabled around intel PMU's event batching etc, only inside the PMI handler. 2499 * 2500 * Avoid PEBS_ENABLE MSR access in PMIs. 2501 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore. 2502 * It doesn't matter if the PEBS is enabled or not. 2503 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to 2504 * access PEBS_ENABLE MSR in disable_all()/enable_all(). 2505 * However, there are some cases which may change PEBS status, e.g. PMI 2506 * throttle. The PEBS_ENABLE should be updated where the status changes. 2507 */ 2508 static __always_inline void __intel_pmu_disable_all(bool bts) 2509 { 2510 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2511 2512 wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0); 2513 2514 if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) 2515 intel_pmu_disable_bts(); 2516 } 2517 2518 static __always_inline void intel_pmu_disable_all(void) 2519 { 2520 __intel_pmu_disable_all(true); 2521 static_call_cond(x86_pmu_pebs_disable_all)(); 2522 intel_pmu_lbr_disable_all(); 2523 } 2524 2525 static void __intel_pmu_enable_all(int added, bool pmi) 2526 { 2527 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2528 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); 2529 2530 intel_pmu_lbr_enable_all(pmi); 2531 2532 if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) { 2533 wrmsrq(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val); 2534 cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val; 2535 } 2536 2537 wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 2538 intel_ctrl & ~cpuc->intel_ctrl_guest_mask); 2539 2540 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 2541 struct perf_event *event = 2542 cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; 2543 2544 if (WARN_ON_ONCE(!event)) 2545 return; 2546 2547 intel_pmu_enable_bts(event->hw.config); 2548 } 2549 } 2550 2551 static void intel_pmu_enable_all(int added) 2552 { 2553 static_call_cond(x86_pmu_pebs_enable_all)(); 2554 __intel_pmu_enable_all(added, false); 2555 } 2556 2557 static noinline int 2558 __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, 2559 unsigned int cnt, unsigned long flags) 2560 { 2561 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2562 2563 intel_pmu_lbr_read(); 2564 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr); 2565 2566 memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt); 2567 intel_pmu_enable_all(0); 2568 local_irq_restore(flags); 2569 return cnt; 2570 } 2571 2572 static int 2573 intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt) 2574 { 2575 unsigned long flags; 2576 2577 /* must not have branches... */ 2578 local_irq_save(flags); 2579 __intel_pmu_disable_all(false); /* we don't care about BTS */ 2580 __intel_pmu_lbr_disable(); 2581 /* ... until here */ 2582 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); 2583 } 2584 2585 static int 2586 intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt) 2587 { 2588 unsigned long flags; 2589 2590 /* must not have branches... */ 2591 local_irq_save(flags); 2592 __intel_pmu_disable_all(false); /* we don't care about BTS */ 2593 __intel_pmu_arch_lbr_disable(); 2594 /* ... until here */ 2595 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); 2596 } 2597 2598 /* 2599 * Workaround for: 2600 * Intel Errata AAK100 (model 26) 2601 * Intel Errata AAP53 (model 30) 2602 * Intel Errata BD53 (model 44) 2603 * 2604 * The official story: 2605 * These chips need to be 'reset' when adding counters by programming the 2606 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either 2607 * in sequence on the same PMC or on different PMCs. 2608 * 2609 * In practice it appears some of these events do in fact count, and 2610 * we need to program all 4 events. 2611 */ 2612 static void intel_pmu_nhm_workaround(void) 2613 { 2614 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2615 static const unsigned long nhm_magic[4] = { 2616 0x4300B5, 2617 0x4300D2, 2618 0x4300B1, 2619 0x4300B1 2620 }; 2621 struct perf_event *event; 2622 int i; 2623 2624 /* 2625 * The Errata requires below steps: 2626 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL; 2627 * 2) Configure 4 PERFEVTSELx with the magic events and clear 2628 * the corresponding PMCx; 2629 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL; 2630 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL; 2631 * 5) Clear 4 pairs of ERFEVTSELx and PMCx; 2632 */ 2633 2634 /* 2635 * The real steps we choose are a little different from above. 2636 * A) To reduce MSR operations, we don't run step 1) as they 2637 * are already cleared before this function is called; 2638 * B) Call x86_perf_event_update to save PMCx before configuring 2639 * PERFEVTSELx with magic number; 2640 * C) With step 5), we do clear only when the PERFEVTSELx is 2641 * not used currently. 2642 * D) Call x86_perf_event_set_period to restore PMCx; 2643 */ 2644 2645 /* We always operate 4 pairs of PERF Counters */ 2646 for (i = 0; i < 4; i++) { 2647 event = cpuc->events[i]; 2648 if (event) 2649 static_call(x86_pmu_update)(event); 2650 } 2651 2652 for (i = 0; i < 4; i++) { 2653 wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]); 2654 wrmsrq(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0); 2655 } 2656 2657 wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0xf); 2658 wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); 2659 2660 for (i = 0; i < 4; i++) { 2661 event = cpuc->events[i]; 2662 2663 if (event) { 2664 static_call(x86_pmu_set_period)(event); 2665 __x86_pmu_enable_event(&event->hw, 2666 ARCH_PERFMON_EVENTSEL_ENABLE); 2667 } else 2668 wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0); 2669 } 2670 } 2671 2672 static void intel_pmu_nhm_enable_all(int added) 2673 { 2674 if (added) 2675 intel_pmu_nhm_workaround(); 2676 intel_pmu_enable_all(added); 2677 } 2678 2679 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on) 2680 { 2681 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0; 2682 2683 if (cpuc->tfa_shadow != val) { 2684 cpuc->tfa_shadow = val; 2685 wrmsrq(MSR_TSX_FORCE_ABORT, val); 2686 } 2687 } 2688 2689 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) 2690 { 2691 /* 2692 * We're going to use PMC3, make sure TFA is set before we touch it. 2693 */ 2694 if (cntr == 3) 2695 intel_set_tfa(cpuc, true); 2696 } 2697 2698 static void intel_tfa_pmu_enable_all(int added) 2699 { 2700 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2701 2702 /* 2703 * If we find PMC3 is no longer used when we enable the PMU, we can 2704 * clear TFA. 2705 */ 2706 if (!test_bit(3, cpuc->active_mask)) 2707 intel_set_tfa(cpuc, false); 2708 2709 intel_pmu_enable_all(added); 2710 } 2711 2712 static inline u64 intel_pmu_get_status(void) 2713 { 2714 u64 status; 2715 2716 rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status); 2717 2718 return status; 2719 } 2720 2721 static inline void intel_pmu_ack_status(u64 ack) 2722 { 2723 wrmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 2724 } 2725 2726 static inline bool event_is_checkpointed(struct perf_event *event) 2727 { 2728 return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0; 2729 } 2730 2731 static inline void intel_set_masks(struct perf_event *event, int idx) 2732 { 2733 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2734 2735 if (event->attr.exclude_host) 2736 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); 2737 if (event->attr.exclude_guest) 2738 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); 2739 if (event_is_checkpointed(event)) 2740 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status); 2741 } 2742 2743 static inline void intel_clear_masks(struct perf_event *event, int idx) 2744 { 2745 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2746 2747 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); 2748 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); 2749 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status); 2750 } 2751 2752 static void intel_pmu_disable_fixed(struct perf_event *event) 2753 { 2754 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2755 struct hw_perf_event *hwc = &event->hw; 2756 int idx = hwc->idx; 2757 u64 mask; 2758 2759 if (is_topdown_idx(idx)) { 2760 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2761 2762 /* 2763 * When there are other active TopDown events, 2764 * don't disable the fixed counter 3. 2765 */ 2766 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) 2767 return; 2768 idx = INTEL_PMC_IDX_FIXED_SLOTS; 2769 } 2770 2771 intel_clear_masks(event, idx); 2772 2773 mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK); 2774 cpuc->fixed_ctrl_val &= ~mask; 2775 } 2776 2777 static inline void __intel_pmu_update_event_ext(int idx, u64 ext) 2778 { 2779 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2780 u32 msr; 2781 2782 if (idx < INTEL_PMC_IDX_FIXED) { 2783 msr = MSR_IA32_PMC_V6_GP0_CFG_C + 2784 x86_pmu.addr_offset(idx, false); 2785 } else { 2786 msr = MSR_IA32_PMC_V6_FX0_CFG_C + 2787 x86_pmu.addr_offset(idx - INTEL_PMC_IDX_FIXED, false); 2788 } 2789 2790 cpuc->cfg_c_val[idx] = ext; 2791 wrmsrq(msr, ext); 2792 } 2793 2794 static void intel_pmu_disable_event_ext(struct perf_event *event) 2795 { 2796 /* 2797 * Only clear CFG_C MSR for PEBS counter group events, 2798 * it avoids the HW counter's value to be added into 2799 * other PEBS records incorrectly after PEBS counter 2800 * group events are disabled. 2801 * 2802 * For other events, it's unnecessary to clear CFG_C MSRs 2803 * since CFG_C doesn't take effect if counter is in 2804 * disabled state. That helps to reduce the WRMSR overhead 2805 * in context switches. 2806 */ 2807 if (!is_pebs_counter_event_group(event)) 2808 return; 2809 2810 __intel_pmu_update_event_ext(event->hw.idx, 0); 2811 } 2812 2813 DEFINE_STATIC_CALL_NULL(intel_pmu_disable_event_ext, intel_pmu_disable_event_ext); 2814 2815 static void intel_pmu_disable_event(struct perf_event *event) 2816 { 2817 struct hw_perf_event *hwc = &event->hw; 2818 int idx = hwc->idx; 2819 2820 switch (idx) { 2821 case 0 ... INTEL_PMC_IDX_FIXED - 1: 2822 intel_clear_masks(event, idx); 2823 static_call_cond(intel_pmu_disable_event_ext)(event); 2824 x86_pmu_disable_event(event); 2825 break; 2826 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: 2827 static_call_cond(intel_pmu_disable_event_ext)(event); 2828 fallthrough; 2829 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: 2830 intel_pmu_disable_fixed(event); 2831 break; 2832 case INTEL_PMC_IDX_FIXED_BTS: 2833 intel_pmu_disable_bts(); 2834 intel_pmu_drain_bts_buffer(); 2835 return; 2836 case INTEL_PMC_IDX_FIXED_VLBR: 2837 intel_clear_masks(event, idx); 2838 break; 2839 default: 2840 intel_clear_masks(event, idx); 2841 pr_warn("Failed to disable the event with invalid index %d\n", 2842 idx); 2843 return; 2844 } 2845 2846 /* 2847 * Needs to be called after x86_pmu_disable_event, 2848 * so we don't trigger the event without PEBS bit set. 2849 */ 2850 if (unlikely(event->attr.precise_ip)) 2851 static_call(x86_pmu_pebs_disable)(event); 2852 } 2853 2854 static void intel_pmu_assign_event(struct perf_event *event, int idx) 2855 { 2856 if (is_pebs_pt(event)) 2857 perf_report_aux_output_id(event, idx); 2858 } 2859 2860 static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event) 2861 { 2862 return event->hw.flags & PERF_X86_EVENT_NEEDS_BRANCH_STACK; 2863 } 2864 2865 static void intel_pmu_del_event(struct perf_event *event) 2866 { 2867 if (intel_pmu_needs_branch_stack(event)) 2868 intel_pmu_lbr_del(event); 2869 if (event->attr.precise_ip) 2870 intel_pmu_pebs_del(event); 2871 if (is_pebs_counter_event_group(event) || 2872 is_acr_event_group(event)) 2873 this_cpu_ptr(&cpu_hw_events)->n_late_setup--; 2874 } 2875 2876 static int icl_set_topdown_event_period(struct perf_event *event) 2877 { 2878 struct hw_perf_event *hwc = &event->hw; 2879 s64 left = local64_read(&hwc->period_left); 2880 2881 /* 2882 * The values in PERF_METRICS MSR are derived from fixed counter 3. 2883 * Software should start both registers, PERF_METRICS and fixed 2884 * counter 3, from zero. 2885 * Clear PERF_METRICS and Fixed counter 3 in initialization. 2886 * After that, both MSRs will be cleared for each read. 2887 * Don't need to clear them again. 2888 */ 2889 if (left == x86_pmu.max_period) { 2890 wrmsrq(MSR_CORE_PERF_FIXED_CTR3, 0); 2891 wrmsrq(MSR_PERF_METRICS, 0); 2892 hwc->saved_slots = 0; 2893 hwc->saved_metric = 0; 2894 } 2895 2896 if ((hwc->saved_slots) && is_slots_event(event)) { 2897 wrmsrq(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots); 2898 wrmsrq(MSR_PERF_METRICS, hwc->saved_metric); 2899 } 2900 2901 perf_event_update_userpage(event); 2902 2903 return 0; 2904 } 2905 2906 DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period); 2907 2908 static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx) 2909 { 2910 u32 val; 2911 2912 /* 2913 * The metric is reported as an 8bit integer fraction 2914 * summing up to 0xff. 2915 * slots-in-metric = (Metric / 0xff) * slots 2916 */ 2917 val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff; 2918 return mul_u64_u32_div(slots, val, 0xff); 2919 } 2920 2921 static u64 icl_get_topdown_value(struct perf_event *event, 2922 u64 slots, u64 metrics) 2923 { 2924 int idx = event->hw.idx; 2925 u64 delta; 2926 2927 if (is_metric_idx(idx)) 2928 delta = icl_get_metrics_event_value(metrics, slots, idx); 2929 else 2930 delta = slots; 2931 2932 return delta; 2933 } 2934 2935 static void __icl_update_topdown_event(struct perf_event *event, 2936 u64 slots, u64 metrics, 2937 u64 last_slots, u64 last_metrics) 2938 { 2939 u64 delta, last = 0; 2940 2941 delta = icl_get_topdown_value(event, slots, metrics); 2942 if (last_slots) 2943 last = icl_get_topdown_value(event, last_slots, last_metrics); 2944 2945 /* 2946 * The 8bit integer fraction of metric may be not accurate, 2947 * especially when the changes is very small. 2948 * For example, if only a few bad_spec happens, the fraction 2949 * may be reduced from 1 to 0. If so, the bad_spec event value 2950 * will be 0 which is definitely less than the last value. 2951 * Avoid update event->count for this case. 2952 */ 2953 if (delta > last) { 2954 delta -= last; 2955 local64_add(delta, &event->count); 2956 } 2957 } 2958 2959 static void update_saved_topdown_regs(struct perf_event *event, u64 slots, 2960 u64 metrics, int metric_end) 2961 { 2962 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2963 struct perf_event *other; 2964 int idx; 2965 2966 event->hw.saved_slots = slots; 2967 event->hw.saved_metric = metrics; 2968 2969 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) { 2970 if (!is_topdown_idx(idx)) 2971 continue; 2972 other = cpuc->events[idx]; 2973 other->hw.saved_slots = slots; 2974 other->hw.saved_metric = metrics; 2975 } 2976 } 2977 2978 /* 2979 * Update all active Topdown events. 2980 * 2981 * The PERF_METRICS and Fixed counter 3 are read separately. The values may be 2982 * modify by a NMI. PMU has to be disabled before calling this function. 2983 */ 2984 2985 static u64 intel_update_topdown_event(struct perf_event *event, int metric_end, u64 *val) 2986 { 2987 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2988 struct perf_event *other; 2989 u64 slots, metrics; 2990 bool reset = true; 2991 int idx; 2992 2993 if (!val) { 2994 /* read Fixed counter 3 */ 2995 slots = rdpmc(3 | INTEL_PMC_FIXED_RDPMC_BASE); 2996 if (!slots) 2997 return 0; 2998 2999 /* read PERF_METRICS */ 3000 metrics = rdpmc(INTEL_PMC_FIXED_RDPMC_METRICS); 3001 } else { 3002 slots = val[0]; 3003 metrics = val[1]; 3004 /* 3005 * Don't reset the PERF_METRICS and Fixed counter 3 3006 * for each PEBS record read. Utilize the RDPMC metrics 3007 * clear mode. 3008 */ 3009 reset = false; 3010 } 3011 3012 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) { 3013 if (!is_topdown_idx(idx)) 3014 continue; 3015 other = cpuc->events[idx]; 3016 __icl_update_topdown_event(other, slots, metrics, 3017 event ? event->hw.saved_slots : 0, 3018 event ? event->hw.saved_metric : 0); 3019 } 3020 3021 /* 3022 * Check and update this event, which may have been cleared 3023 * in active_mask e.g. x86_pmu_stop() 3024 */ 3025 if (event && !test_bit(event->hw.idx, cpuc->active_mask)) { 3026 __icl_update_topdown_event(event, slots, metrics, 3027 event->hw.saved_slots, 3028 event->hw.saved_metric); 3029 3030 /* 3031 * In x86_pmu_stop(), the event is cleared in active_mask first, 3032 * then drain the delta, which indicates context switch for 3033 * counting. 3034 * Save metric and slots for context switch. 3035 * Don't need to reset the PERF_METRICS and Fixed counter 3. 3036 * Because the values will be restored in next schedule in. 3037 */ 3038 update_saved_topdown_regs(event, slots, metrics, metric_end); 3039 reset = false; 3040 } 3041 3042 if (reset) { 3043 /* The fixed counter 3 has to be written before the PERF_METRICS. */ 3044 wrmsrq(MSR_CORE_PERF_FIXED_CTR3, 0); 3045 wrmsrq(MSR_PERF_METRICS, 0); 3046 if (event) 3047 update_saved_topdown_regs(event, 0, 0, metric_end); 3048 } 3049 3050 return slots; 3051 } 3052 3053 static u64 icl_update_topdown_event(struct perf_event *event, u64 *val) 3054 { 3055 return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE + 3056 x86_pmu.num_topdown_events - 1, 3057 val); 3058 } 3059 3060 DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, intel_pmu_topdown_event_update); 3061 3062 static void intel_pmu_read_event(struct perf_event *event) 3063 { 3064 if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN) || 3065 is_pebs_counter_event_group(event)) { 3066 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3067 bool pmu_enabled = cpuc->enabled; 3068 3069 /* Only need to call update_topdown_event() once for group read. */ 3070 if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ)) 3071 return; 3072 3073 cpuc->enabled = 0; 3074 if (pmu_enabled) 3075 intel_pmu_disable_all(); 3076 3077 /* 3078 * If the PEBS counters snapshotting is enabled, 3079 * the topdown event is available in PEBS records. 3080 */ 3081 if (is_topdown_count(event) && !is_pebs_counter_event_group(event)) 3082 static_call(intel_pmu_update_topdown_event)(event, NULL); 3083 else 3084 intel_pmu_drain_pebs_buffer(); 3085 3086 cpuc->enabled = pmu_enabled; 3087 if (pmu_enabled) 3088 intel_pmu_enable_all(0); 3089 3090 return; 3091 } 3092 3093 x86_perf_event_update(event); 3094 } 3095 3096 static void intel_pmu_enable_fixed(struct perf_event *event) 3097 { 3098 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3099 struct hw_perf_event *hwc = &event->hw; 3100 int idx = hwc->idx; 3101 u64 bits = 0; 3102 3103 if (is_topdown_idx(idx)) { 3104 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3105 /* 3106 * When there are other active TopDown events, 3107 * don't enable the fixed counter 3 again. 3108 */ 3109 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) 3110 return; 3111 3112 idx = INTEL_PMC_IDX_FIXED_SLOTS; 3113 3114 if (event->attr.config1 & INTEL_TD_CFG_METRIC_CLEAR) 3115 bits |= INTEL_FIXED_3_METRICS_CLEAR; 3116 } 3117 3118 intel_set_masks(event, idx); 3119 3120 /* 3121 * Enable IRQ generation (0x8), if not PEBS, 3122 * and enable ring-3 counting (0x2) and ring-0 counting (0x1) 3123 * if requested: 3124 */ 3125 if (!event->attr.precise_ip) 3126 bits |= INTEL_FIXED_0_ENABLE_PMI; 3127 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) 3128 bits |= INTEL_FIXED_0_USER; 3129 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) 3130 bits |= INTEL_FIXED_0_KERNEL; 3131 if (hwc->config & ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE) 3132 bits |= INTEL_FIXED_0_RDPMC_USER_DISABLE; 3133 3134 /* 3135 * ANY bit is supported in v3 and up 3136 */ 3137 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) 3138 bits |= INTEL_FIXED_0_ANYTHREAD; 3139 3140 idx -= INTEL_PMC_IDX_FIXED; 3141 bits = intel_fixed_bits_by_idx(idx, bits); 3142 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) 3143 bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE); 3144 3145 cpuc->fixed_ctrl_val &= ~intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK); 3146 cpuc->fixed_ctrl_val |= bits; 3147 } 3148 3149 static void intel_pmu_config_acr(int idx, u64 mask, u32 reload) 3150 { 3151 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3152 int msr_b, msr_c; 3153 int msr_offset; 3154 3155 if (!mask && !cpuc->acr_cfg_b[idx]) 3156 return; 3157 3158 if (idx < INTEL_PMC_IDX_FIXED) { 3159 msr_b = MSR_IA32_PMC_V6_GP0_CFG_B; 3160 msr_c = MSR_IA32_PMC_V6_GP0_CFG_C; 3161 msr_offset = x86_pmu.addr_offset(idx, false); 3162 } else { 3163 msr_b = MSR_IA32_PMC_V6_FX0_CFG_B; 3164 msr_c = MSR_IA32_PMC_V6_FX0_CFG_C; 3165 msr_offset = x86_pmu.addr_offset(idx - INTEL_PMC_IDX_FIXED, false); 3166 } 3167 3168 if (cpuc->acr_cfg_b[idx] != mask) { 3169 wrmsrl(msr_b + msr_offset, mask); 3170 cpuc->acr_cfg_b[idx] = mask; 3171 } 3172 /* Only need to update the reload value when there is a valid config value. */ 3173 if (mask && cpuc->acr_cfg_c[idx] != reload) { 3174 wrmsrl(msr_c + msr_offset, reload); 3175 cpuc->acr_cfg_c[idx] = reload; 3176 } 3177 } 3178 3179 static void intel_pmu_enable_acr(struct perf_event *event) 3180 { 3181 struct hw_perf_event *hwc = &event->hw; 3182 3183 if (!is_acr_event_group(event) || !event->attr.config2) { 3184 /* 3185 * The disable doesn't clear the ACR CFG register. 3186 * Check and clear the ACR CFG register. 3187 */ 3188 intel_pmu_config_acr(hwc->idx, 0, 0); 3189 return; 3190 } 3191 3192 intel_pmu_config_acr(hwc->idx, hwc->config1, -hwc->sample_period); 3193 } 3194 3195 DEFINE_STATIC_CALL_NULL(intel_pmu_enable_acr_event, intel_pmu_enable_acr); 3196 3197 static void intel_pmu_enable_event_ext(struct perf_event *event) 3198 { 3199 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3200 struct hw_perf_event *hwc = &event->hw; 3201 union arch_pebs_index old, new; 3202 struct arch_pebs_cap cap; 3203 u64 ext = 0; 3204 3205 cap = hybrid(cpuc->pmu, arch_pebs_cap); 3206 3207 if (event->attr.precise_ip) { 3208 u64 pebs_data_cfg = intel_get_arch_pebs_data_config(event); 3209 3210 ext |= ARCH_PEBS_EN; 3211 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) 3212 ext |= (-hwc->sample_period) & ARCH_PEBS_RELOAD; 3213 3214 if (pebs_data_cfg && cap.caps) { 3215 if (pebs_data_cfg & PEBS_DATACFG_MEMINFO) 3216 ext |= ARCH_PEBS_AUX & cap.caps; 3217 3218 if (pebs_data_cfg & PEBS_DATACFG_GP) 3219 ext |= ARCH_PEBS_GPR & cap.caps; 3220 3221 if (pebs_data_cfg & PEBS_DATACFG_XMMS) 3222 ext |= ARCH_PEBS_VECR_XMM & cap.caps; 3223 3224 if (pebs_data_cfg & PEBS_DATACFG_LBRS) 3225 ext |= ARCH_PEBS_LBR & cap.caps; 3226 3227 if (pebs_data_cfg & 3228 (PEBS_DATACFG_CNTR_MASK << PEBS_DATACFG_CNTR_SHIFT)) 3229 ext |= ARCH_PEBS_CNTR_GP & cap.caps; 3230 3231 if (pebs_data_cfg & 3232 (PEBS_DATACFG_FIX_MASK << PEBS_DATACFG_FIX_SHIFT)) 3233 ext |= ARCH_PEBS_CNTR_FIXED & cap.caps; 3234 3235 if (pebs_data_cfg & PEBS_DATACFG_METRICS) 3236 ext |= ARCH_PEBS_CNTR_METRICS & cap.caps; 3237 } 3238 3239 if (cpuc->n_pebs == cpuc->n_large_pebs) 3240 new.thresh = ARCH_PEBS_THRESH_MULTI; 3241 else 3242 new.thresh = ARCH_PEBS_THRESH_SINGLE; 3243 3244 rdmsrq(MSR_IA32_PEBS_INDEX, old.whole); 3245 if (new.thresh != old.thresh || !old.en) { 3246 if (old.thresh == ARCH_PEBS_THRESH_MULTI && old.wr > 0) { 3247 /* 3248 * Large PEBS was enabled. 3249 * Drain PEBS buffer before applying the single PEBS. 3250 */ 3251 intel_pmu_drain_pebs_buffer(); 3252 } else { 3253 new.wr = 0; 3254 new.full = 0; 3255 new.en = 1; 3256 wrmsrq(MSR_IA32_PEBS_INDEX, new.whole); 3257 } 3258 } 3259 } 3260 3261 if (is_pebs_counter_event_group(event)) 3262 ext |= ARCH_PEBS_CNTR_ALLOW; 3263 3264 if (cpuc->cfg_c_val[hwc->idx] != ext) 3265 __intel_pmu_update_event_ext(hwc->idx, ext); 3266 } 3267 3268 static void intel_pmu_update_rdpmc_user_disable(struct perf_event *event) 3269 { 3270 if (!x86_pmu_has_rdpmc_user_disable(event->pmu)) 3271 return; 3272 3273 /* 3274 * Counter scope's user-space rdpmc is disabled by default 3275 * except two cases. 3276 * a. rdpmc = 2 (user space rdpmc enabled unconditionally) 3277 * b. rdpmc = 1 and the event is not a system-wide event. 3278 * The count of non-system-wide events would be cleared when 3279 * context switches, so no count data is leaked. 3280 */ 3281 if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_ALWAYS_ENABLE || 3282 (x86_pmu.attr_rdpmc == X86_USER_RDPMC_CONDITIONAL_ENABLE && 3283 event->ctx->task)) 3284 event->hw.config &= ~ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE; 3285 else 3286 event->hw.config |= ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE; 3287 } 3288 3289 DEFINE_STATIC_CALL_NULL(intel_pmu_enable_event_ext, intel_pmu_enable_event_ext); 3290 3291 static void intel_pmu_enable_event(struct perf_event *event) 3292 { 3293 u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE; 3294 struct hw_perf_event *hwc = &event->hw; 3295 int idx = hwc->idx; 3296 3297 intel_pmu_update_rdpmc_user_disable(event); 3298 3299 if (unlikely(event->attr.precise_ip)) 3300 static_call(x86_pmu_pebs_enable)(event); 3301 3302 switch (idx) { 3303 case 0 ... INTEL_PMC_IDX_FIXED - 1: 3304 if (branch_sample_counters(event)) 3305 enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR; 3306 intel_set_masks(event, idx); 3307 static_call_cond(intel_pmu_enable_acr_event)(event); 3308 static_call_cond(intel_pmu_enable_event_ext)(event); 3309 __x86_pmu_enable_event(hwc, enable_mask); 3310 break; 3311 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: 3312 static_call_cond(intel_pmu_enable_acr_event)(event); 3313 static_call_cond(intel_pmu_enable_event_ext)(event); 3314 fallthrough; 3315 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: 3316 intel_pmu_enable_fixed(event); 3317 break; 3318 case INTEL_PMC_IDX_FIXED_BTS: 3319 if (!__this_cpu_read(cpu_hw_events.enabled)) 3320 return; 3321 intel_pmu_enable_bts(hwc->config); 3322 break; 3323 case INTEL_PMC_IDX_FIXED_VLBR: 3324 intel_set_masks(event, idx); 3325 break; 3326 default: 3327 pr_warn("Failed to enable the event with invalid index %d\n", 3328 idx); 3329 } 3330 } 3331 3332 static void intel_pmu_acr_late_setup(struct cpu_hw_events *cpuc) 3333 { 3334 struct perf_event *event, *leader; 3335 int i, j, idx; 3336 3337 for (i = 0; i < cpuc->n_events; i++) { 3338 leader = cpuc->event_list[i]; 3339 if (!is_acr_event_group(leader)) 3340 continue; 3341 3342 /* The ACR events must be contiguous. */ 3343 for (j = i; j < cpuc->n_events; j++) { 3344 event = cpuc->event_list[j]; 3345 if (event->group_leader != leader->group_leader) 3346 break; 3347 for_each_set_bit(idx, (unsigned long *)&event->attr.config2, X86_PMC_IDX_MAX) { 3348 if (i + idx >= cpuc->n_events || 3349 !is_acr_event_group(cpuc->event_list[i + idx])) 3350 return; 3351 __set_bit(cpuc->assign[i + idx], (unsigned long *)&event->hw.config1); 3352 } 3353 } 3354 i = j - 1; 3355 } 3356 } 3357 3358 void intel_pmu_late_setup(void) 3359 { 3360 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3361 3362 if (!cpuc->n_late_setup) 3363 return; 3364 3365 intel_pmu_pebs_late_setup(cpuc); 3366 intel_pmu_acr_late_setup(cpuc); 3367 } 3368 3369 static void intel_pmu_add_event(struct perf_event *event) 3370 { 3371 if (event->attr.precise_ip) 3372 intel_pmu_pebs_add(event); 3373 if (intel_pmu_needs_branch_stack(event)) 3374 intel_pmu_lbr_add(event); 3375 if (is_pebs_counter_event_group(event) || 3376 is_acr_event_group(event)) 3377 this_cpu_ptr(&cpu_hw_events)->n_late_setup++; 3378 } 3379 3380 /* 3381 * Save and restart an expired event. Called by NMI contexts, 3382 * so it has to be careful about preempting normal event ops: 3383 */ 3384 int intel_pmu_save_and_restart(struct perf_event *event) 3385 { 3386 static_call(x86_pmu_update)(event); 3387 /* 3388 * For a checkpointed counter always reset back to 0. This 3389 * avoids a situation where the counter overflows, aborts the 3390 * transaction and is then set back to shortly before the 3391 * overflow, and overflows and aborts again. 3392 */ 3393 if (unlikely(event_is_checkpointed(event))) { 3394 /* No race with NMIs because the counter should not be armed */ 3395 wrmsrq(event->hw.event_base, 0); 3396 local64_set(&event->hw.prev_count, 0); 3397 } 3398 return static_call(x86_pmu_set_period)(event); 3399 } 3400 3401 static int intel_pmu_set_period(struct perf_event *event) 3402 { 3403 if (unlikely(is_topdown_count(event))) 3404 return static_call(intel_pmu_set_topdown_event_period)(event); 3405 3406 return x86_perf_event_set_period(event); 3407 } 3408 3409 static u64 intel_pmu_update(struct perf_event *event) 3410 { 3411 if (unlikely(is_topdown_count(event))) 3412 return static_call(intel_pmu_update_topdown_event)(event, NULL); 3413 3414 return x86_perf_event_update(event); 3415 } 3416 3417 static void intel_pmu_reset(void) 3418 { 3419 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); 3420 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3421 unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask); 3422 unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask); 3423 unsigned long flags; 3424 int idx; 3425 3426 if (!*(u64 *)cntr_mask) 3427 return; 3428 3429 local_irq_save(flags); 3430 3431 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id()); 3432 3433 for_each_set_bit(idx, cntr_mask, INTEL_PMC_MAX_GENERIC) { 3434 wrmsrq_safe(x86_pmu_config_addr(idx), 0ull); 3435 wrmsrq_safe(x86_pmu_event_addr(idx), 0ull); 3436 } 3437 for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) { 3438 if (fixed_counter_disabled(idx, cpuc->pmu)) 3439 continue; 3440 wrmsrq_safe(x86_pmu_fixed_ctr_addr(idx), 0ull); 3441 } 3442 3443 if (ds) 3444 ds->bts_index = ds->bts_buffer_base; 3445 3446 /* Ack all overflows and disable fixed counters */ 3447 if (x86_pmu.version >= 2) { 3448 intel_pmu_ack_status(intel_pmu_get_status()); 3449 wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0); 3450 } 3451 3452 /* Reset LBRs and LBR freezing */ 3453 if (x86_pmu.lbr_nr) { 3454 update_debugctlmsr(get_debugctlmsr() & 3455 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR)); 3456 } 3457 3458 local_irq_restore(flags); 3459 } 3460 3461 /* 3462 * We may be running with guest PEBS events created by KVM, and the 3463 * PEBS records are logged into the guest's DS and invisible to host. 3464 * 3465 * In the case of guest PEBS overflow, we only trigger a fake event 3466 * to emulate the PEBS overflow PMI for guest PEBS counters in KVM. 3467 * The guest will then vm-entry and check the guest DS area to read 3468 * the guest PEBS records. 3469 * 3470 * The contents and other behavior of the guest event do not matter. 3471 */ 3472 static void x86_pmu_handle_guest_pebs(struct pt_regs *regs, 3473 struct perf_sample_data *data) 3474 { 3475 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3476 u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask; 3477 struct perf_event *event = NULL; 3478 int bit; 3479 3480 if (!unlikely(perf_guest_state())) 3481 return; 3482 3483 if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active || 3484 !guest_pebs_idxs) 3485 return; 3486 3487 for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs, X86_PMC_IDX_MAX) { 3488 event = cpuc->events[bit]; 3489 if (!event->attr.precise_ip) 3490 continue; 3491 3492 perf_sample_data_init(data, 0, event->hw.last_period); 3493 perf_event_overflow(event, data, regs); 3494 3495 /* Inject one fake event is enough. */ 3496 break; 3497 } 3498 } 3499 3500 static int handle_pmi_common(struct pt_regs *regs, u64 status) 3501 { 3502 struct perf_sample_data data; 3503 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3504 int bit; 3505 int handled = 0; 3506 3507 inc_irq_stat(apic_perf_irqs); 3508 3509 /* 3510 * Ignore a range of extra bits in status that do not indicate 3511 * overflow by themselves. 3512 */ 3513 status &= ~(GLOBAL_STATUS_COND_CHG | 3514 GLOBAL_STATUS_ASIF | 3515 GLOBAL_STATUS_LBRS_FROZEN); 3516 if (!status) 3517 return 0; 3518 /* 3519 * In case multiple PEBS events are sampled at the same time, 3520 * it is possible to have GLOBAL_STATUS bit 62 set indicating 3521 * PEBS buffer overflow and also seeing at most 3 PEBS counters 3522 * having their bits set in the status register. This is a sign 3523 * that there was at least one PEBS record pending at the time 3524 * of the PMU interrupt. PEBS counters must only be processed 3525 * via the drain_pebs() calls and not via the regular sample 3526 * processing loop coming after that the function, otherwise 3527 * phony regular samples may be generated in the sampling buffer 3528 * not marked with the EXACT tag. Another possibility is to have 3529 * one PEBS event and at least one non-PEBS event which overflows 3530 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will 3531 * not be set, yet the overflow status bit for the PEBS counter will 3532 * be on Skylake. 3533 * 3534 * To avoid this problem, we systematically ignore the PEBS-enabled 3535 * counters from the GLOBAL_STATUS mask and we always process PEBS 3536 * events via drain_pebs(). 3537 */ 3538 status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable); 3539 3540 /* 3541 * PEBS overflow sets bit 62 in the global status register 3542 */ 3543 if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) { 3544 u64 pebs_enabled = cpuc->pebs_enabled; 3545 3546 handled++; 3547 x86_pmu_handle_guest_pebs(regs, &data); 3548 static_call(x86_pmu_drain_pebs)(regs, &data); 3549 3550 /* 3551 * PMI throttle may be triggered, which stops the PEBS event. 3552 * Although cpuc->pebs_enabled is updated accordingly, the 3553 * MSR_IA32_PEBS_ENABLE is not updated. Because the 3554 * cpuc->enabled has been forced to 0 in PMI. 3555 * Update the MSR if pebs_enabled is changed. 3556 */ 3557 if (pebs_enabled != cpuc->pebs_enabled) 3558 wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 3559 3560 /* 3561 * Above PEBS handler (PEBS counters snapshotting) has updated fixed 3562 * counter 3 and perf metrics counts if they are in counter group, 3563 * unnecessary to update again. 3564 */ 3565 if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] && 3566 is_pebs_counter_event_group(cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS])) 3567 status &= ~GLOBAL_STATUS_PERF_METRICS_OVF_BIT; 3568 } 3569 3570 /* 3571 * Arch PEBS sets bit 54 in the global status register 3572 */ 3573 if (__test_and_clear_bit(GLOBAL_STATUS_ARCH_PEBS_THRESHOLD_BIT, 3574 (unsigned long *)&status)) { 3575 handled++; 3576 static_call(x86_pmu_drain_pebs)(regs, &data); 3577 3578 if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] && 3579 is_pebs_counter_event_group(cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS])) 3580 status &= ~GLOBAL_STATUS_PERF_METRICS_OVF_BIT; 3581 } 3582 3583 /* 3584 * Intel PT 3585 */ 3586 if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) { 3587 handled++; 3588 if (!perf_guest_handle_intel_pt_intr()) 3589 intel_pt_interrupt(); 3590 } 3591 3592 /* 3593 * Intel Perf metrics 3594 */ 3595 if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) { 3596 handled++; 3597 static_call(intel_pmu_update_topdown_event)(NULL, NULL); 3598 } 3599 3600 status &= hybrid(cpuc->pmu, intel_ctrl); 3601 3602 /* 3603 * Checkpointed counters can lead to 'spurious' PMIs because the 3604 * rollback caused by the PMI will have cleared the overflow status 3605 * bit. Therefore always force probe these counters. 3606 */ 3607 status |= cpuc->intel_cp_status; 3608 3609 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 3610 struct perf_event *event = cpuc->events[bit]; 3611 u64 last_period; 3612 3613 handled++; 3614 3615 if (!test_bit(bit, cpuc->active_mask)) 3616 continue; 3617 /* Event may have already been cleared: */ 3618 if (!event) 3619 continue; 3620 3621 /* 3622 * There may be unprocessed PEBS records in the PEBS buffer, 3623 * which still stores the previous values. 3624 * Process those records first before handling the latest value. 3625 * For example, 3626 * A is a regular counter 3627 * B is a PEBS event which reads A 3628 * C is a PEBS event 3629 * 3630 * The following can happen: 3631 * B-assist A=1 3632 * C A=2 3633 * B-assist A=3 3634 * A-overflow-PMI A=4 3635 * C-assist-PMI (PEBS buffer) A=5 3636 * 3637 * The PEBS buffer has to be drained before handling the A-PMI 3638 */ 3639 if (is_pebs_counter_event_group(event)) 3640 static_call(x86_pmu_drain_pebs)(regs, &data); 3641 3642 last_period = event->hw.last_period; 3643 3644 if (!intel_pmu_save_and_restart(event)) 3645 continue; 3646 3647 perf_sample_data_init(&data, 0, last_period); 3648 3649 if (has_branch_stack(event)) 3650 intel_pmu_lbr_save_brstack(&data, cpuc, event); 3651 3652 perf_event_overflow(event, &data, regs); 3653 } 3654 3655 return handled; 3656 } 3657 3658 /* 3659 * This handler is triggered by the local APIC, so the APIC IRQ handling 3660 * rules apply: 3661 */ 3662 static int intel_pmu_handle_irq(struct pt_regs *regs) 3663 { 3664 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3665 bool late_ack = hybrid_bit(cpuc->pmu, late_ack); 3666 bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack); 3667 int loops; 3668 u64 status; 3669 int handled; 3670 int pmu_enabled; 3671 3672 /* 3673 * Save the PMU state. 3674 * It needs to be restored when leaving the handler. 3675 */ 3676 pmu_enabled = cpuc->enabled; 3677 /* 3678 * In general, the early ACK is only applied for old platforms. 3679 * For the big core starts from Haswell, the late ACK should be 3680 * applied. 3681 * For the small core after Tremont, we have to do the ACK right 3682 * before re-enabling counters, which is in the middle of the 3683 * NMI handler. 3684 */ 3685 if (!late_ack && !mid_ack) 3686 apic_write(APIC_LVTPC, APIC_DM_NMI); 3687 intel_bts_disable_local(); 3688 cpuc->enabled = 0; 3689 __intel_pmu_disable_all(true); 3690 handled = intel_pmu_drain_bts_buffer(); 3691 handled += intel_bts_interrupt(); 3692 status = intel_pmu_get_status(); 3693 if (!status) 3694 goto done; 3695 3696 loops = 0; 3697 again: 3698 intel_pmu_lbr_read(); 3699 intel_pmu_ack_status(status); 3700 if (++loops > 100) { 3701 static bool warned; 3702 3703 if (!warned) { 3704 WARN(1, "perfevents: irq loop stuck!\n"); 3705 perf_event_print_debug(); 3706 warned = true; 3707 } 3708 intel_pmu_reset(); 3709 goto done; 3710 } 3711 3712 handled += handle_pmi_common(regs, status); 3713 3714 /* 3715 * Repeat if there is more work to be done: 3716 */ 3717 status = intel_pmu_get_status(); 3718 if (status) 3719 goto again; 3720 3721 done: 3722 if (mid_ack) 3723 apic_write(APIC_LVTPC, APIC_DM_NMI); 3724 /* Only restore PMU state when it's active. See x86_pmu_disable(). */ 3725 cpuc->enabled = pmu_enabled; 3726 if (pmu_enabled) 3727 __intel_pmu_enable_all(0, true); 3728 intel_bts_enable_local(); 3729 3730 /* 3731 * Only unmask the NMI after the overflow counters 3732 * have been reset. This avoids spurious NMIs on 3733 * Haswell CPUs. 3734 */ 3735 if (late_ack) 3736 apic_write(APIC_LVTPC, APIC_DM_NMI); 3737 return handled; 3738 } 3739 3740 static struct event_constraint * 3741 intel_bts_constraints(struct perf_event *event) 3742 { 3743 if (unlikely(intel_pmu_has_bts(event))) 3744 return &bts_constraint; 3745 3746 return NULL; 3747 } 3748 3749 /* 3750 * Note: matches a fake event, like Fixed2. 3751 */ 3752 static struct event_constraint * 3753 intel_vlbr_constraints(struct perf_event *event) 3754 { 3755 struct event_constraint *c = &vlbr_constraint; 3756 3757 if (unlikely(constraint_match(c, event->hw.config))) { 3758 event->hw.flags |= c->flags; 3759 return c; 3760 } 3761 3762 return NULL; 3763 } 3764 3765 static int intel_alt_er(struct cpu_hw_events *cpuc, 3766 int idx, u64 config) 3767 { 3768 struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs); 3769 int alt_idx = idx; 3770 3771 switch (idx) { 3772 case EXTRA_REG_RSP_0 ... EXTRA_REG_RSP_1: 3773 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1)) 3774 return idx; 3775 if (++alt_idx > EXTRA_REG_RSP_1) 3776 alt_idx = EXTRA_REG_RSP_0; 3777 if (config & ~extra_regs[alt_idx].valid_mask) 3778 return idx; 3779 break; 3780 3781 case EXTRA_REG_OMR_0 ... EXTRA_REG_OMR_3: 3782 if (!(x86_pmu.flags & PMU_FL_HAS_OMR)) 3783 return idx; 3784 if (++alt_idx > EXTRA_REG_OMR_3) 3785 alt_idx = EXTRA_REG_OMR_0; 3786 /* 3787 * Subtracting EXTRA_REG_OMR_0 ensures to get correct 3788 * OMR extra_reg entries which start from 0. 3789 */ 3790 if (config & ~extra_regs[alt_idx - EXTRA_REG_OMR_0].valid_mask) 3791 return idx; 3792 break; 3793 3794 default: 3795 break; 3796 } 3797 3798 return alt_idx; 3799 } 3800 3801 static void intel_fixup_er(struct perf_event *event, int idx) 3802 { 3803 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs); 3804 int er_idx; 3805 3806 event->hw.extra_reg.idx = idx; 3807 switch (idx) { 3808 case EXTRA_REG_RSP_0 ... EXTRA_REG_RSP_1: 3809 er_idx = idx - EXTRA_REG_RSP_0; 3810 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 3811 event->hw.config |= extra_regs[er_idx].event; 3812 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0 + er_idx; 3813 break; 3814 3815 case EXTRA_REG_OMR_0 ... EXTRA_REG_OMR_3: 3816 er_idx = idx - EXTRA_REG_OMR_0; 3817 event->hw.config &= ~ARCH_PERFMON_EVENTSEL_UMASK; 3818 event->hw.config |= 1ULL << (8 + er_idx); 3819 event->hw.extra_reg.reg = MSR_OMR_0 + er_idx; 3820 break; 3821 3822 default: 3823 pr_warn("The extra reg idx %d is not supported.\n", idx); 3824 } 3825 } 3826 3827 /* 3828 * manage allocation of shared extra msr for certain events 3829 * 3830 * sharing can be: 3831 * per-cpu: to be shared between the various events on a single PMU 3832 * per-core: per-cpu + shared by HT threads 3833 */ 3834 static struct event_constraint * 3835 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, 3836 struct perf_event *event, 3837 struct hw_perf_event_extra *reg) 3838 { 3839 struct event_constraint *c = &emptyconstraint; 3840 struct er_account *era; 3841 unsigned long flags; 3842 int idx = reg->idx; 3843 3844 /* 3845 * reg->alloc can be set due to existing state, so for fake cpuc we 3846 * need to ignore this, otherwise we might fail to allocate proper fake 3847 * state for this extra reg constraint. Also see the comment below. 3848 */ 3849 if (reg->alloc && !cpuc->is_fake) 3850 return NULL; /* call x86_get_event_constraint() */ 3851 3852 again: 3853 era = &cpuc->shared_regs->regs[idx]; 3854 /* 3855 * we use spin_lock_irqsave() to avoid lockdep issues when 3856 * passing a fake cpuc 3857 */ 3858 raw_spin_lock_irqsave(&era->lock, flags); 3859 3860 if (!atomic_read(&era->ref) || era->config == reg->config) { 3861 3862 /* 3863 * If its a fake cpuc -- as per validate_{group,event}() we 3864 * shouldn't touch event state and we can avoid doing so 3865 * since both will only call get_event_constraints() once 3866 * on each event, this avoids the need for reg->alloc. 3867 * 3868 * Not doing the ER fixup will only result in era->reg being 3869 * wrong, but since we won't actually try and program hardware 3870 * this isn't a problem either. 3871 */ 3872 if (!cpuc->is_fake) { 3873 if (idx != reg->idx) 3874 intel_fixup_er(event, idx); 3875 3876 /* 3877 * x86_schedule_events() can call get_event_constraints() 3878 * multiple times on events in the case of incremental 3879 * scheduling(). reg->alloc ensures we only do the ER 3880 * allocation once. 3881 */ 3882 reg->alloc = 1; 3883 } 3884 3885 /* lock in msr value */ 3886 era->config = reg->config; 3887 era->reg = reg->reg; 3888 3889 /* one more user */ 3890 atomic_inc(&era->ref); 3891 3892 /* 3893 * need to call x86_get_event_constraint() 3894 * to check if associated event has constraints 3895 */ 3896 c = NULL; 3897 } else { 3898 idx = intel_alt_er(cpuc, idx, reg->config); 3899 if (idx != reg->idx) { 3900 raw_spin_unlock_irqrestore(&era->lock, flags); 3901 goto again; 3902 } 3903 } 3904 raw_spin_unlock_irqrestore(&era->lock, flags); 3905 3906 return c; 3907 } 3908 3909 static void 3910 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc, 3911 struct hw_perf_event_extra *reg) 3912 { 3913 struct er_account *era; 3914 3915 /* 3916 * Only put constraint if extra reg was actually allocated. Also takes 3917 * care of event which do not use an extra shared reg. 3918 * 3919 * Also, if this is a fake cpuc we shouldn't touch any event state 3920 * (reg->alloc) and we don't care about leaving inconsistent cpuc state 3921 * either since it'll be thrown out. 3922 */ 3923 if (!reg->alloc || cpuc->is_fake) 3924 return; 3925 3926 era = &cpuc->shared_regs->regs[reg->idx]; 3927 3928 /* one fewer user */ 3929 atomic_dec(&era->ref); 3930 3931 /* allocate again next time */ 3932 reg->alloc = 0; 3933 } 3934 3935 static struct event_constraint * 3936 intel_shared_regs_constraints(struct cpu_hw_events *cpuc, 3937 struct perf_event *event) 3938 { 3939 struct event_constraint *c = NULL, *d; 3940 struct hw_perf_event_extra *xreg, *breg; 3941 3942 xreg = &event->hw.extra_reg; 3943 if (xreg->idx != EXTRA_REG_NONE) { 3944 c = __intel_shared_reg_get_constraints(cpuc, event, xreg); 3945 if (c == &emptyconstraint) 3946 return c; 3947 } 3948 breg = &event->hw.branch_reg; 3949 if (breg->idx != EXTRA_REG_NONE) { 3950 d = __intel_shared_reg_get_constraints(cpuc, event, breg); 3951 if (d == &emptyconstraint) { 3952 __intel_shared_reg_put_constraints(cpuc, xreg); 3953 c = d; 3954 } 3955 } 3956 return c; 3957 } 3958 3959 struct event_constraint * 3960 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 3961 struct perf_event *event) 3962 { 3963 struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints); 3964 struct event_constraint *c; 3965 3966 if (event_constraints) { 3967 for_each_event_constraint(c, event_constraints) { 3968 if (constraint_match(c, event->hw.config)) { 3969 event->hw.flags |= c->flags; 3970 return c; 3971 } 3972 } 3973 } 3974 3975 return &hybrid_var(cpuc->pmu, unconstrained); 3976 } 3977 3978 static struct event_constraint * 3979 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 3980 struct perf_event *event) 3981 { 3982 struct event_constraint *c; 3983 3984 c = intel_vlbr_constraints(event); 3985 if (c) 3986 return c; 3987 3988 c = intel_bts_constraints(event); 3989 if (c) 3990 return c; 3991 3992 c = intel_shared_regs_constraints(cpuc, event); 3993 if (c) 3994 return c; 3995 3996 c = intel_pebs_constraints(event); 3997 if (c) 3998 return c; 3999 4000 return x86_get_event_constraints(cpuc, idx, event); 4001 } 4002 4003 static void 4004 intel_start_scheduling(struct cpu_hw_events *cpuc) 4005 { 4006 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 4007 struct intel_excl_states *xl; 4008 int tid = cpuc->excl_thread_id; 4009 4010 /* 4011 * nothing needed if in group validation mode 4012 */ 4013 if (cpuc->is_fake || !is_ht_workaround_enabled()) 4014 return; 4015 4016 /* 4017 * no exclusion needed 4018 */ 4019 if (WARN_ON_ONCE(!excl_cntrs)) 4020 return; 4021 4022 xl = &excl_cntrs->states[tid]; 4023 4024 xl->sched_started = true; 4025 /* 4026 * lock shared state until we are done scheduling 4027 * in stop_event_scheduling() 4028 * makes scheduling appear as a transaction 4029 */ 4030 raw_spin_lock(&excl_cntrs->lock); 4031 } 4032 4033 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) 4034 { 4035 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 4036 struct event_constraint *c = cpuc->event_constraint[idx]; 4037 struct intel_excl_states *xl; 4038 int tid = cpuc->excl_thread_id; 4039 4040 if (cpuc->is_fake || !is_ht_workaround_enabled()) 4041 return; 4042 4043 if (WARN_ON_ONCE(!excl_cntrs)) 4044 return; 4045 4046 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) 4047 return; 4048 4049 xl = &excl_cntrs->states[tid]; 4050 4051 lockdep_assert_held(&excl_cntrs->lock); 4052 4053 if (c->flags & PERF_X86_EVENT_EXCL) 4054 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE; 4055 else 4056 xl->state[cntr] = INTEL_EXCL_SHARED; 4057 } 4058 4059 static void 4060 intel_stop_scheduling(struct cpu_hw_events *cpuc) 4061 { 4062 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 4063 struct intel_excl_states *xl; 4064 int tid = cpuc->excl_thread_id; 4065 4066 /* 4067 * nothing needed if in group validation mode 4068 */ 4069 if (cpuc->is_fake || !is_ht_workaround_enabled()) 4070 return; 4071 /* 4072 * no exclusion needed 4073 */ 4074 if (WARN_ON_ONCE(!excl_cntrs)) 4075 return; 4076 4077 xl = &excl_cntrs->states[tid]; 4078 4079 xl->sched_started = false; 4080 /* 4081 * release shared state lock (acquired in intel_start_scheduling()) 4082 */ 4083 raw_spin_unlock(&excl_cntrs->lock); 4084 } 4085 4086 static struct event_constraint * 4087 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx) 4088 { 4089 WARN_ON_ONCE(!cpuc->constraint_list); 4090 4091 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { 4092 struct event_constraint *cx; 4093 4094 /* 4095 * grab pre-allocated constraint entry 4096 */ 4097 cx = &cpuc->constraint_list[idx]; 4098 4099 /* 4100 * initialize dynamic constraint 4101 * with static constraint 4102 */ 4103 *cx = *c; 4104 4105 /* 4106 * mark constraint as dynamic 4107 */ 4108 cx->flags |= PERF_X86_EVENT_DYNAMIC; 4109 c = cx; 4110 } 4111 4112 return c; 4113 } 4114 4115 static struct event_constraint * 4116 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, 4117 int idx, struct event_constraint *c) 4118 { 4119 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 4120 struct intel_excl_states *xlo; 4121 int tid = cpuc->excl_thread_id; 4122 int is_excl, i, w; 4123 4124 /* 4125 * validating a group does not require 4126 * enforcing cross-thread exclusion 4127 */ 4128 if (cpuc->is_fake || !is_ht_workaround_enabled()) 4129 return c; 4130 4131 /* 4132 * no exclusion needed 4133 */ 4134 if (WARN_ON_ONCE(!excl_cntrs)) 4135 return c; 4136 4137 /* 4138 * because we modify the constraint, we need 4139 * to make a copy. Static constraints come 4140 * from static const tables. 4141 * 4142 * only needed when constraint has not yet 4143 * been cloned (marked dynamic) 4144 */ 4145 c = dyn_constraint(cpuc, c, idx); 4146 4147 /* 4148 * From here on, the constraint is dynamic. 4149 * Either it was just allocated above, or it 4150 * was allocated during a earlier invocation 4151 * of this function 4152 */ 4153 4154 /* 4155 * state of sibling HT 4156 */ 4157 xlo = &excl_cntrs->states[tid ^ 1]; 4158 4159 /* 4160 * event requires exclusive counter access 4161 * across HT threads 4162 */ 4163 is_excl = c->flags & PERF_X86_EVENT_EXCL; 4164 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) { 4165 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT; 4166 if (!cpuc->n_excl++) 4167 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1); 4168 } 4169 4170 /* 4171 * Modify static constraint with current dynamic 4172 * state of thread 4173 * 4174 * EXCLUSIVE: sibling counter measuring exclusive event 4175 * SHARED : sibling counter measuring non-exclusive event 4176 * UNUSED : sibling counter unused 4177 */ 4178 w = c->weight; 4179 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) { 4180 /* 4181 * exclusive event in sibling counter 4182 * our corresponding counter cannot be used 4183 * regardless of our event 4184 */ 4185 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) { 4186 __clear_bit(i, c->idxmsk); 4187 w--; 4188 continue; 4189 } 4190 /* 4191 * if measuring an exclusive event, sibling 4192 * measuring non-exclusive, then counter cannot 4193 * be used 4194 */ 4195 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) { 4196 __clear_bit(i, c->idxmsk); 4197 w--; 4198 continue; 4199 } 4200 } 4201 4202 /* 4203 * if we return an empty mask, then switch 4204 * back to static empty constraint to avoid 4205 * the cost of freeing later on 4206 */ 4207 if (!w) 4208 c = &emptyconstraint; 4209 4210 c->weight = w; 4211 4212 return c; 4213 } 4214 4215 static struct event_constraint * 4216 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4217 struct perf_event *event) 4218 { 4219 struct event_constraint *c1, *c2; 4220 4221 c1 = cpuc->event_constraint[idx]; 4222 4223 /* 4224 * first time only 4225 * - static constraint: no change across incremental scheduling calls 4226 * - dynamic constraint: handled by intel_get_excl_constraints() 4227 */ 4228 c2 = __intel_get_event_constraints(cpuc, idx, event); 4229 if (c1) { 4230 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC)); 4231 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX); 4232 c1->weight = c2->weight; 4233 c2 = c1; 4234 } 4235 4236 if (cpuc->excl_cntrs) 4237 return intel_get_excl_constraints(cpuc, event, idx, c2); 4238 4239 if (event->hw.dyn_constraint != ~0ULL) { 4240 c2 = dyn_constraint(cpuc, c2, idx); 4241 c2->idxmsk64 &= event->hw.dyn_constraint; 4242 c2->weight = hweight64(c2->idxmsk64); 4243 } 4244 4245 return c2; 4246 } 4247 4248 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, 4249 struct perf_event *event) 4250 { 4251 struct hw_perf_event *hwc = &event->hw; 4252 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 4253 int tid = cpuc->excl_thread_id; 4254 struct intel_excl_states *xl; 4255 4256 /* 4257 * nothing needed if in group validation mode 4258 */ 4259 if (cpuc->is_fake) 4260 return; 4261 4262 if (WARN_ON_ONCE(!excl_cntrs)) 4263 return; 4264 4265 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) { 4266 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT; 4267 if (!--cpuc->n_excl) 4268 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0); 4269 } 4270 4271 /* 4272 * If event was actually assigned, then mark the counter state as 4273 * unused now. 4274 */ 4275 if (hwc->idx >= 0) { 4276 xl = &excl_cntrs->states[tid]; 4277 4278 /* 4279 * put_constraint may be called from x86_schedule_events() 4280 * which already has the lock held so here make locking 4281 * conditional. 4282 */ 4283 if (!xl->sched_started) 4284 raw_spin_lock(&excl_cntrs->lock); 4285 4286 xl->state[hwc->idx] = INTEL_EXCL_UNUSED; 4287 4288 if (!xl->sched_started) 4289 raw_spin_unlock(&excl_cntrs->lock); 4290 } 4291 } 4292 4293 static void 4294 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, 4295 struct perf_event *event) 4296 { 4297 struct hw_perf_event_extra *reg; 4298 4299 reg = &event->hw.extra_reg; 4300 if (reg->idx != EXTRA_REG_NONE) 4301 __intel_shared_reg_put_constraints(cpuc, reg); 4302 4303 reg = &event->hw.branch_reg; 4304 if (reg->idx != EXTRA_REG_NONE) 4305 __intel_shared_reg_put_constraints(cpuc, reg); 4306 } 4307 4308 static void intel_put_event_constraints(struct cpu_hw_events *cpuc, 4309 struct perf_event *event) 4310 { 4311 intel_put_shared_regs_event_constraints(cpuc, event); 4312 4313 /* 4314 * is PMU has exclusive counter restrictions, then 4315 * all events are subject to and must call the 4316 * put_excl_constraints() routine 4317 */ 4318 if (cpuc->excl_cntrs) 4319 intel_put_excl_constraints(cpuc, event); 4320 } 4321 4322 static void intel_pebs_aliases_core2(struct perf_event *event) 4323 { 4324 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 4325 /* 4326 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 4327 * (0x003c) so that we can use it with PEBS. 4328 * 4329 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 4330 * PEBS capable. However we can use INST_RETIRED.ANY_P 4331 * (0x00c0), which is a PEBS capable event, to get the same 4332 * count. 4333 * 4334 * INST_RETIRED.ANY_P counts the number of cycles that retires 4335 * CNTMASK instructions. By setting CNTMASK to a value (16) 4336 * larger than the maximum number of instructions that can be 4337 * retired per cycle (4) and then inverting the condition, we 4338 * count all cycles that retire 16 or less instructions, which 4339 * is every cycle. 4340 * 4341 * Thereby we gain a PEBS capable cycle counter. 4342 */ 4343 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); 4344 4345 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 4346 event->hw.config = alt_config; 4347 } 4348 } 4349 4350 static void intel_pebs_aliases_snb(struct perf_event *event) 4351 { 4352 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 4353 /* 4354 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 4355 * (0x003c) so that we can use it with PEBS. 4356 * 4357 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 4358 * PEBS capable. However we can use UOPS_RETIRED.ALL 4359 * (0x01c2), which is a PEBS capable event, to get the same 4360 * count. 4361 * 4362 * UOPS_RETIRED.ALL counts the number of cycles that retires 4363 * CNTMASK micro-ops. By setting CNTMASK to a value (16) 4364 * larger than the maximum number of micro-ops that can be 4365 * retired per cycle (4) and then inverting the condition, we 4366 * count all cycles that retire 16 or less micro-ops, which 4367 * is every cycle. 4368 * 4369 * Thereby we gain a PEBS capable cycle counter. 4370 */ 4371 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16); 4372 4373 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 4374 event->hw.config = alt_config; 4375 } 4376 } 4377 4378 static void intel_pebs_aliases_precdist(struct perf_event *event) 4379 { 4380 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 4381 /* 4382 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 4383 * (0x003c) so that we can use it with PEBS. 4384 * 4385 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 4386 * PEBS capable. However we can use INST_RETIRED.PREC_DIST 4387 * (0x01c0), which is a PEBS capable event, to get the same 4388 * count. 4389 * 4390 * The PREC_DIST event has special support to minimize sample 4391 * shadowing effects. One drawback is that it can be 4392 * only programmed on counter 1, but that seems like an 4393 * acceptable trade off. 4394 */ 4395 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16); 4396 4397 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 4398 event->hw.config = alt_config; 4399 } 4400 } 4401 4402 static void intel_pebs_aliases_ivb(struct perf_event *event) 4403 { 4404 if (event->attr.precise_ip < 3) 4405 return intel_pebs_aliases_snb(event); 4406 return intel_pebs_aliases_precdist(event); 4407 } 4408 4409 static void intel_pebs_aliases_skl(struct perf_event *event) 4410 { 4411 if (event->attr.precise_ip < 3) 4412 return intel_pebs_aliases_core2(event); 4413 return intel_pebs_aliases_precdist(event); 4414 } 4415 4416 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event) 4417 { 4418 unsigned long flags = x86_pmu.large_pebs_flags; 4419 4420 if (event->attr.use_clockid) 4421 flags &= ~PERF_SAMPLE_TIME; 4422 if (!event->attr.exclude_kernel) 4423 flags &= ~PERF_SAMPLE_REGS_USER; 4424 if (event->attr.sample_regs_user & ~PEBS_GP_REGS) 4425 flags &= ~PERF_SAMPLE_REGS_USER; 4426 if (event->attr.sample_regs_intr & ~PEBS_GP_REGS) 4427 flags &= ~PERF_SAMPLE_REGS_INTR; 4428 return flags; 4429 } 4430 4431 static int intel_pmu_bts_config(struct perf_event *event) 4432 { 4433 struct perf_event_attr *attr = &event->attr; 4434 4435 if (unlikely(intel_pmu_has_bts(event))) { 4436 /* BTS is not supported by this architecture. */ 4437 if (!x86_pmu.bts_active) 4438 return -EOPNOTSUPP; 4439 4440 /* BTS is currently only allowed for user-mode. */ 4441 if (!attr->exclude_kernel) 4442 return -EOPNOTSUPP; 4443 4444 /* BTS is not allowed for precise events. */ 4445 if (attr->precise_ip) 4446 return -EOPNOTSUPP; 4447 4448 /* disallow bts if conflicting events are present */ 4449 if (x86_add_exclusive(x86_lbr_exclusive_lbr)) 4450 return -EBUSY; 4451 4452 event->destroy = hw_perf_lbr_event_destroy; 4453 } 4454 4455 return 0; 4456 } 4457 4458 static int core_pmu_hw_config(struct perf_event *event) 4459 { 4460 int ret = x86_pmu_hw_config(event); 4461 4462 if (ret) 4463 return ret; 4464 4465 return intel_pmu_bts_config(event); 4466 } 4467 4468 #define INTEL_TD_METRIC_AVAILABLE_MAX (INTEL_TD_METRIC_RETIRING + \ 4469 ((x86_pmu.num_topdown_events - 1) << 8)) 4470 4471 static bool is_available_metric_event(struct perf_event *event) 4472 { 4473 return is_metric_event(event) && 4474 event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX; 4475 } 4476 4477 static inline bool is_mem_loads_event(struct perf_event *event) 4478 { 4479 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01); 4480 } 4481 4482 static inline bool is_mem_loads_aux_event(struct perf_event *event) 4483 { 4484 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82); 4485 } 4486 4487 static inline bool require_mem_loads_aux_event(struct perf_event *event) 4488 { 4489 if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX)) 4490 return false; 4491 4492 if (is_hybrid()) 4493 return hybrid_pmu(event->pmu)->pmu_type == hybrid_big; 4494 4495 return true; 4496 } 4497 4498 static inline bool intel_pmu_has_cap(struct perf_event *event, int idx) 4499 { 4500 union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap); 4501 4502 return test_bit(idx, (unsigned long *)&intel_cap->capabilities); 4503 } 4504 4505 static u64 intel_pmu_freq_start_period(struct perf_event *event) 4506 { 4507 int type = event->attr.type; 4508 u64 config, factor; 4509 s64 start; 4510 4511 /* 4512 * The 127 is the lowest possible recommended SAV (sample after value) 4513 * for a 4000 freq (default freq), according to the event list JSON file. 4514 * Also, assume the workload is idle 50% time. 4515 */ 4516 factor = 64 * 4000; 4517 if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE) 4518 goto end; 4519 4520 /* 4521 * The estimation of the start period in the freq mode is 4522 * based on the below assumption. 4523 * 4524 * For a cycles or an instructions event, 1GHZ of the 4525 * underlying platform, 1 IPC. The workload is idle 50% time. 4526 * The start period = 1,000,000,000 * 1 / freq / 2. 4527 * = 500,000,000 / freq 4528 * 4529 * Usually, the branch-related events occur less than the 4530 * instructions event. According to the Intel event list JSON 4531 * file, the SAV (sample after value) of a branch-related event 4532 * is usually 1/4 of an instruction event. 4533 * The start period of branch-related events = 125,000,000 / freq. 4534 * 4535 * The cache-related events occurs even less. The SAV is usually 4536 * 1/20 of an instruction event. 4537 * The start period of cache-related events = 25,000,000 / freq. 4538 */ 4539 config = event->attr.config & PERF_HW_EVENT_MASK; 4540 if (type == PERF_TYPE_HARDWARE) { 4541 switch (config) { 4542 case PERF_COUNT_HW_CPU_CYCLES: 4543 case PERF_COUNT_HW_INSTRUCTIONS: 4544 case PERF_COUNT_HW_BUS_CYCLES: 4545 case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND: 4546 case PERF_COUNT_HW_STALLED_CYCLES_BACKEND: 4547 case PERF_COUNT_HW_REF_CPU_CYCLES: 4548 factor = 500000000; 4549 break; 4550 case PERF_COUNT_HW_BRANCH_INSTRUCTIONS: 4551 case PERF_COUNT_HW_BRANCH_MISSES: 4552 factor = 125000000; 4553 break; 4554 case PERF_COUNT_HW_CACHE_REFERENCES: 4555 case PERF_COUNT_HW_CACHE_MISSES: 4556 factor = 25000000; 4557 break; 4558 default: 4559 goto end; 4560 } 4561 } 4562 4563 if (type == PERF_TYPE_HW_CACHE) 4564 factor = 25000000; 4565 end: 4566 /* 4567 * Usually, a prime or a number with less factors (close to prime) 4568 * is chosen as an SAV, which makes it less likely that the sampling 4569 * period synchronizes with some periodic event in the workload. 4570 * Minus 1 to make it at least avoiding values near power of twos 4571 * for the default freq. 4572 */ 4573 start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1; 4574 4575 if (start > x86_pmu.max_period) 4576 start = x86_pmu.max_period; 4577 4578 if (x86_pmu.limit_period) 4579 x86_pmu.limit_period(event, &start); 4580 4581 return start; 4582 } 4583 4584 static inline bool intel_pmu_has_acr(struct pmu *pmu) 4585 { 4586 return !!hybrid(pmu, acr_cause_mask64); 4587 } 4588 4589 static bool intel_pmu_is_acr_group(struct perf_event *event) 4590 { 4591 /* The group leader has the ACR flag set */ 4592 if (is_acr_event_group(event)) 4593 return true; 4594 4595 /* The acr_mask is set */ 4596 if (event->attr.config2) 4597 return true; 4598 4599 return false; 4600 } 4601 4602 static inline bool intel_pmu_has_pebs_counter_group(struct pmu *pmu) 4603 { 4604 u64 caps; 4605 4606 if (x86_pmu.intel_cap.pebs_format >= 6 && x86_pmu.intel_cap.pebs_baseline) 4607 return true; 4608 4609 caps = hybrid(pmu, arch_pebs_cap).caps; 4610 if (x86_pmu.arch_pebs && (caps & ARCH_PEBS_CNTR_MASK)) 4611 return true; 4612 4613 return false; 4614 } 4615 4616 static inline void intel_pmu_set_acr_cntr_constr(struct perf_event *event, 4617 u64 *cause_mask, int *num) 4618 { 4619 event->hw.dyn_constraint &= hybrid(event->pmu, acr_cntr_mask64); 4620 *cause_mask |= event->attr.config2; 4621 *num += 1; 4622 } 4623 4624 static inline void intel_pmu_set_acr_caused_constr(struct perf_event *event, 4625 int idx, u64 cause_mask) 4626 { 4627 if (test_bit(idx, (unsigned long *)&cause_mask)) 4628 event->hw.dyn_constraint &= hybrid(event->pmu, acr_cause_mask64); 4629 } 4630 4631 static int intel_pmu_hw_config(struct perf_event *event) 4632 { 4633 int ret = x86_pmu_hw_config(event); 4634 4635 if (ret) 4636 return ret; 4637 4638 ret = intel_pmu_bts_config(event); 4639 if (ret) 4640 return ret; 4641 4642 if (event->attr.freq && event->attr.sample_freq) { 4643 event->hw.sample_period = intel_pmu_freq_start_period(event); 4644 event->hw.last_period = event->hw.sample_period; 4645 local64_set(&event->hw.period_left, event->hw.sample_period); 4646 } 4647 4648 if (event->attr.precise_ip) { 4649 struct arch_pebs_cap pebs_cap = hybrid(event->pmu, arch_pebs_cap); 4650 4651 if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT) 4652 return -EINVAL; 4653 4654 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) { 4655 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; 4656 if (!(event->attr.sample_type & ~intel_pmu_large_pebs_flags(event)) && 4657 !has_aux_action(event)) { 4658 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS; 4659 event->attach_state |= PERF_ATTACH_SCHED_CB; 4660 } 4661 } 4662 if (x86_pmu.pebs_aliases) 4663 x86_pmu.pebs_aliases(event); 4664 4665 if (x86_pmu.arch_pebs) { 4666 u64 cntr_mask = hybrid(event->pmu, intel_ctrl) & 4667 ~GLOBAL_CTRL_EN_PERF_METRICS; 4668 u64 pebs_mask = event->attr.precise_ip >= 3 ? 4669 pebs_cap.pdists : pebs_cap.counters; 4670 if (cntr_mask != pebs_mask) 4671 event->hw.dyn_constraint &= pebs_mask; 4672 } 4673 } 4674 4675 if (needs_branch_stack(event)) { 4676 /* Avoid branch stack setup for counting events in SAMPLE READ */ 4677 if (is_sampling_event(event) || 4678 !(event->attr.sample_type & PERF_SAMPLE_READ)) 4679 event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK; 4680 } 4681 4682 if (branch_sample_counters(event)) { 4683 struct perf_event *leader, *sibling; 4684 int num = 0; 4685 4686 if (!(x86_pmu.flags & PMU_FL_BR_CNTR) || 4687 (event->attr.config & ~INTEL_ARCH_EVENT_MASK)) 4688 return -EINVAL; 4689 4690 /* 4691 * The branch counter logging is not supported in the call stack 4692 * mode yet, since we cannot simply flush the LBR during e.g., 4693 * multiplexing. Also, there is no obvious usage with the call 4694 * stack mode. Simply forbids it for now. 4695 * 4696 * If any events in the group enable the branch counter logging 4697 * feature, the group is treated as a branch counter logging 4698 * group, which requires the extra space to store the counters. 4699 */ 4700 leader = event->group_leader; 4701 if (branch_sample_call_stack(leader)) 4702 return -EINVAL; 4703 if (branch_sample_counters(leader)) { 4704 num++; 4705 leader->hw.dyn_constraint &= x86_pmu.lbr_counters; 4706 } 4707 leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS; 4708 4709 for_each_sibling_event(sibling, leader) { 4710 if (branch_sample_call_stack(sibling)) 4711 return -EINVAL; 4712 if (branch_sample_counters(sibling)) { 4713 num++; 4714 sibling->hw.dyn_constraint &= x86_pmu.lbr_counters; 4715 } 4716 } 4717 4718 if (num > fls(x86_pmu.lbr_counters)) 4719 return -EINVAL; 4720 /* 4721 * Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't 4722 * require any branch stack setup. 4723 * Clear the bit to avoid unnecessary branch stack setup. 4724 */ 4725 if (0 == (event->attr.branch_sample_type & 4726 ~(PERF_SAMPLE_BRANCH_PLM_ALL | 4727 PERF_SAMPLE_BRANCH_COUNTERS))) 4728 event->hw.flags &= ~PERF_X86_EVENT_NEEDS_BRANCH_STACK; 4729 4730 /* 4731 * Force the leader to be a LBR event. So LBRs can be reset 4732 * with the leader event. See intel_pmu_lbr_del() for details. 4733 */ 4734 if (!intel_pmu_needs_branch_stack(leader)) 4735 return -EINVAL; 4736 } 4737 4738 if (intel_pmu_needs_branch_stack(event)) { 4739 ret = intel_pmu_setup_lbr_filter(event); 4740 if (ret) 4741 return ret; 4742 event->attach_state |= PERF_ATTACH_SCHED_CB; 4743 4744 /* 4745 * BTS is set up earlier in this path, so don't account twice 4746 */ 4747 if (!unlikely(intel_pmu_has_bts(event))) { 4748 /* disallow lbr if conflicting events are present */ 4749 if (x86_add_exclusive(x86_lbr_exclusive_lbr)) 4750 return -EBUSY; 4751 4752 event->destroy = hw_perf_lbr_event_destroy; 4753 } 4754 } 4755 4756 if (event->attr.aux_output) { 4757 if (!event->attr.precise_ip) 4758 return -EINVAL; 4759 4760 event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT; 4761 } 4762 4763 if ((event->attr.sample_type & PERF_SAMPLE_READ) && 4764 intel_pmu_has_pebs_counter_group(event->pmu) && 4765 is_sampling_event(event) && 4766 event->attr.precise_ip) 4767 event->group_leader->hw.flags |= PERF_X86_EVENT_PEBS_CNTR; 4768 4769 if (intel_pmu_has_acr(event->pmu) && intel_pmu_is_acr_group(event)) { 4770 struct perf_event *sibling, *leader = event->group_leader; 4771 struct pmu *pmu = event->pmu; 4772 bool has_sw_event = false; 4773 int num = 0, idx = 0; 4774 u64 cause_mask = 0; 4775 4776 /* Not support perf metrics */ 4777 if (is_metric_event(event)) 4778 return -EINVAL; 4779 4780 /* Not support freq mode */ 4781 if (event->attr.freq) 4782 return -EINVAL; 4783 4784 /* PDist is not supported */ 4785 if (event->attr.config2 && event->attr.precise_ip > 2) 4786 return -EINVAL; 4787 4788 /* The reload value cannot exceeds the max period */ 4789 if (event->attr.sample_period > x86_pmu.max_period) 4790 return -EINVAL; 4791 /* 4792 * The counter-constraints of each event cannot be finalized 4793 * unless the whole group is scanned. However, it's hard 4794 * to know whether the event is the last one of the group. 4795 * Recalculate the counter-constraints for each event when 4796 * adding a new event. 4797 * 4798 * The group is traversed twice, which may be optimized later. 4799 * In the first round, 4800 * - Find all events which do reload when other events 4801 * overflow and set the corresponding counter-constraints 4802 * - Add all events, which can cause other events reload, 4803 * in the cause_mask 4804 * - Error out if the number of events exceeds the HW limit 4805 * - The ACR events must be contiguous. 4806 * Error out if there are non-X86 events between ACR events. 4807 * This is not a HW limit, but a SW limit. 4808 * With the assumption, the intel_pmu_acr_late_setup() can 4809 * easily convert the event idx to counter idx without 4810 * traversing the whole event list. 4811 */ 4812 if (!is_x86_event(leader)) 4813 return -EINVAL; 4814 4815 if (leader->attr.config2) 4816 intel_pmu_set_acr_cntr_constr(leader, &cause_mask, &num); 4817 4818 if (leader->nr_siblings) { 4819 for_each_sibling_event(sibling, leader) { 4820 if (!is_x86_event(sibling)) { 4821 has_sw_event = true; 4822 continue; 4823 } 4824 if (!sibling->attr.config2) 4825 continue; 4826 if (has_sw_event) 4827 return -EINVAL; 4828 intel_pmu_set_acr_cntr_constr(sibling, &cause_mask, &num); 4829 } 4830 } 4831 if (leader != event && event->attr.config2) { 4832 if (has_sw_event) 4833 return -EINVAL; 4834 intel_pmu_set_acr_cntr_constr(event, &cause_mask, &num); 4835 } 4836 4837 if (hweight64(cause_mask) > hweight64(hybrid(pmu, acr_cause_mask64)) || 4838 num > hweight64(hybrid(event->pmu, acr_cntr_mask64))) 4839 return -EINVAL; 4840 /* 4841 * In the second round, apply the counter-constraints for 4842 * the events which can cause other events reload. 4843 */ 4844 intel_pmu_set_acr_caused_constr(leader, idx++, cause_mask); 4845 4846 if (leader->nr_siblings) { 4847 for_each_sibling_event(sibling, leader) 4848 intel_pmu_set_acr_caused_constr(sibling, idx++, cause_mask); 4849 } 4850 4851 if (leader != event) 4852 intel_pmu_set_acr_caused_constr(event, idx, cause_mask); 4853 4854 leader->hw.flags |= PERF_X86_EVENT_ACR; 4855 } 4856 4857 if ((event->attr.type == PERF_TYPE_HARDWARE) || 4858 (event->attr.type == PERF_TYPE_HW_CACHE)) 4859 return 0; 4860 4861 /* 4862 * Config Topdown slots and metric events 4863 * 4864 * The slots event on Fixed Counter 3 can support sampling, 4865 * which will be handled normally in x86_perf_event_update(). 4866 * 4867 * Metric events don't support sampling and require being paired 4868 * with a slots event as group leader. When the slots event 4869 * is used in a metrics group, it too cannot support sampling. 4870 */ 4871 if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) { 4872 /* The metrics_clear can only be set for the slots event */ 4873 if (event->attr.config1 && 4874 (!is_slots_event(event) || (event->attr.config1 & ~INTEL_TD_CFG_METRIC_CLEAR))) 4875 return -EINVAL; 4876 4877 if (event->attr.config2) 4878 return -EINVAL; 4879 4880 /* 4881 * The TopDown metrics events and slots event don't 4882 * support any filters. 4883 */ 4884 if (event->attr.config & X86_ALL_EVENT_FLAGS) 4885 return -EINVAL; 4886 4887 if (is_available_metric_event(event)) { 4888 struct perf_event *leader = event->group_leader; 4889 4890 /* The metric events don't support sampling. */ 4891 if (is_sampling_event(event)) 4892 return -EINVAL; 4893 4894 /* The metric events require a slots group leader. */ 4895 if (!is_slots_event(leader)) 4896 return -EINVAL; 4897 4898 /* 4899 * The leader/SLOTS must not be a sampling event for 4900 * metric use; hardware requires it starts at 0 when used 4901 * in conjunction with MSR_PERF_METRICS. 4902 */ 4903 if (is_sampling_event(leader)) 4904 return -EINVAL; 4905 4906 event->event_caps |= PERF_EV_CAP_SIBLING; 4907 /* 4908 * Only once we have a METRICs sibling do we 4909 * need TopDown magic. 4910 */ 4911 leader->hw.flags |= PERF_X86_EVENT_TOPDOWN; 4912 event->hw.flags |= PERF_X86_EVENT_TOPDOWN; 4913 } 4914 } 4915 4916 /* 4917 * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR 4918 * doesn't function quite right. As a work-around it needs to always be 4919 * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82). 4920 * The actual count of this second event is irrelevant it just needs 4921 * to be active to make the first event function correctly. 4922 * 4923 * In a group, the auxiliary event must be in front of the load latency 4924 * event. The rule is to simplify the implementation of the check. 4925 * That's because perf cannot have a complete group at the moment. 4926 */ 4927 if (require_mem_loads_aux_event(event) && 4928 (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) && 4929 is_mem_loads_event(event)) { 4930 struct perf_event *leader = event->group_leader; 4931 struct perf_event *sibling = NULL; 4932 4933 /* 4934 * When this memload event is also the first event (no group 4935 * exists yet), then there is no aux event before it. 4936 */ 4937 if (leader == event) 4938 return -ENODATA; 4939 4940 if (!is_mem_loads_aux_event(leader)) { 4941 for_each_sibling_event(sibling, leader) { 4942 if (is_mem_loads_aux_event(sibling)) 4943 break; 4944 } 4945 if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list)) 4946 return -ENODATA; 4947 } 4948 } 4949 4950 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY)) 4951 return 0; 4952 4953 if (x86_pmu.version < 3) 4954 return -EINVAL; 4955 4956 ret = perf_allow_cpu(); 4957 if (ret) 4958 return ret; 4959 4960 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY; 4961 4962 return 0; 4963 } 4964 4965 /* 4966 * Currently, the only caller of this function is the atomic_switch_perf_msrs(). 4967 * The host perf context helps to prepare the values of the real hardware for 4968 * a set of msrs that need to be switched atomically in a vmx transaction. 4969 * 4970 * For example, the pseudocode needed to add a new msr should look like: 4971 * 4972 * arr[(*nr)++] = (struct perf_guest_switch_msr){ 4973 * .msr = the hardware msr address, 4974 * .host = the value the hardware has when it doesn't run a guest, 4975 * .guest = the value the hardware has when it runs a guest, 4976 * }; 4977 * 4978 * These values have nothing to do with the emulated values the guest sees 4979 * when it uses {RD,WR}MSR, which should be handled by the KVM context, 4980 * specifically in the intel_pmu_{get,set}_msr(). 4981 */ 4982 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data) 4983 { 4984 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 4985 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 4986 struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data; 4987 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); 4988 u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable; 4989 int global_ctrl, pebs_enable; 4990 4991 /* 4992 * In addition to obeying exclude_guest/exclude_host, remove bits being 4993 * used for PEBS when running a guest, because PEBS writes to virtual 4994 * addresses (not physical addresses). 4995 */ 4996 *nr = 0; 4997 global_ctrl = (*nr)++; 4998 arr[global_ctrl] = (struct perf_guest_switch_msr){ 4999 .msr = MSR_CORE_PERF_GLOBAL_CTRL, 5000 .host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask, 5001 .guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask, 5002 }; 5003 5004 if (!x86_pmu.ds_pebs) 5005 return arr; 5006 5007 /* 5008 * If PMU counter has PEBS enabled it is not enough to 5009 * disable counter on a guest entry since PEBS memory 5010 * write can overshoot guest entry and corrupt guest 5011 * memory. Disabling PEBS solves the problem. 5012 * 5013 * Don't do this if the CPU already enforces it. 5014 */ 5015 if (x86_pmu.pebs_no_isolation) { 5016 arr[(*nr)++] = (struct perf_guest_switch_msr){ 5017 .msr = MSR_IA32_PEBS_ENABLE, 5018 .host = cpuc->pebs_enabled, 5019 .guest = 0, 5020 }; 5021 return arr; 5022 } 5023 5024 if (!kvm_pmu || !x86_pmu.pebs_ept) 5025 return arr; 5026 5027 arr[(*nr)++] = (struct perf_guest_switch_msr){ 5028 .msr = MSR_IA32_DS_AREA, 5029 .host = (unsigned long)cpuc->ds, 5030 .guest = kvm_pmu->ds_area, 5031 }; 5032 5033 if (x86_pmu.intel_cap.pebs_baseline) { 5034 arr[(*nr)++] = (struct perf_guest_switch_msr){ 5035 .msr = MSR_PEBS_DATA_CFG, 5036 .host = cpuc->active_pebs_data_cfg, 5037 .guest = kvm_pmu->pebs_data_cfg, 5038 }; 5039 } 5040 5041 pebs_enable = (*nr)++; 5042 arr[pebs_enable] = (struct perf_guest_switch_msr){ 5043 .msr = MSR_IA32_PEBS_ENABLE, 5044 .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask, 5045 .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable, 5046 }; 5047 5048 if (arr[pebs_enable].host) { 5049 /* Disable guest PEBS if host PEBS is enabled. */ 5050 arr[pebs_enable].guest = 0; 5051 } else { 5052 /* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */ 5053 arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask; 5054 arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask; 5055 /* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */ 5056 arr[global_ctrl].guest |= arr[pebs_enable].guest; 5057 } 5058 5059 return arr; 5060 } 5061 5062 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data) 5063 { 5064 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 5065 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 5066 int idx; 5067 5068 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { 5069 struct perf_event *event = cpuc->events[idx]; 5070 5071 arr[idx].msr = x86_pmu_config_addr(idx); 5072 arr[idx].host = arr[idx].guest = 0; 5073 5074 if (!test_bit(idx, cpuc->active_mask)) 5075 continue; 5076 5077 arr[idx].host = arr[idx].guest = 5078 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE; 5079 5080 if (event->attr.exclude_host) 5081 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 5082 else if (event->attr.exclude_guest) 5083 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 5084 } 5085 5086 *nr = x86_pmu_max_num_counters(cpuc->pmu); 5087 return arr; 5088 } 5089 5090 static void core_pmu_enable_event(struct perf_event *event) 5091 { 5092 if (!event->attr.exclude_host) 5093 x86_pmu_enable_event(event); 5094 } 5095 5096 static void core_pmu_enable_all(int added) 5097 { 5098 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 5099 int idx; 5100 5101 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { 5102 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; 5103 5104 if (!test_bit(idx, cpuc->active_mask) || 5105 cpuc->events[idx]->attr.exclude_host) 5106 continue; 5107 5108 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); 5109 } 5110 } 5111 5112 static int hsw_hw_config(struct perf_event *event) 5113 { 5114 int ret = intel_pmu_hw_config(event); 5115 5116 if (ret) 5117 return ret; 5118 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE)) 5119 return 0; 5120 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); 5121 5122 /* 5123 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with 5124 * PEBS or in ANY thread mode. Since the results are non-sensical forbid 5125 * this combination. 5126 */ 5127 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) && 5128 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) || 5129 event->attr.precise_ip > 0)) 5130 return -EOPNOTSUPP; 5131 5132 if (event_is_checkpointed(event)) { 5133 /* 5134 * Sampling of checkpointed events can cause situations where 5135 * the CPU constantly aborts because of a overflow, which is 5136 * then checkpointed back and ignored. Forbid checkpointing 5137 * for sampling. 5138 * 5139 * But still allow a long sampling period, so that perf stat 5140 * from KVM works. 5141 */ 5142 if (event->attr.sample_period > 0 && 5143 event->attr.sample_period < 0x7fffffff) 5144 return -EOPNOTSUPP; 5145 } 5146 return 0; 5147 } 5148 5149 static struct event_constraint counter0_constraint = 5150 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1); 5151 5152 static struct event_constraint counter1_constraint = 5153 INTEL_ALL_EVENT_CONSTRAINT(0, 0x2); 5154 5155 static struct event_constraint counter0_1_constraint = 5156 INTEL_ALL_EVENT_CONSTRAINT(0, 0x3); 5157 5158 static struct event_constraint counter2_constraint = 5159 EVENT_CONSTRAINT(0, 0x4, 0); 5160 5161 static struct event_constraint fixed0_constraint = 5162 FIXED_EVENT_CONSTRAINT(0x00c0, 0); 5163 5164 static struct event_constraint fixed0_counter0_constraint = 5165 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL); 5166 5167 static struct event_constraint fixed0_counter0_1_constraint = 5168 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL); 5169 5170 static struct event_constraint counters_1_7_constraint = 5171 INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL); 5172 5173 static struct event_constraint * 5174 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 5175 struct perf_event *event) 5176 { 5177 struct event_constraint *c; 5178 5179 c = intel_get_event_constraints(cpuc, idx, event); 5180 5181 /* Handle special quirk on in_tx_checkpointed only in counter 2 */ 5182 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) { 5183 if (c->idxmsk64 & (1U << 2)) 5184 return &counter2_constraint; 5185 return &emptyconstraint; 5186 } 5187 5188 return c; 5189 } 5190 5191 static struct event_constraint * 5192 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 5193 struct perf_event *event) 5194 { 5195 /* 5196 * Fixed counter 0 has less skid. 5197 * Force instruction:ppp in Fixed counter 0 5198 */ 5199 if ((event->attr.precise_ip == 3) && 5200 constraint_match(&fixed0_constraint, event->hw.config)) 5201 return &fixed0_constraint; 5202 5203 return hsw_get_event_constraints(cpuc, idx, event); 5204 } 5205 5206 static struct event_constraint * 5207 glc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 5208 struct perf_event *event) 5209 { 5210 struct event_constraint *c; 5211 5212 c = icl_get_event_constraints(cpuc, idx, event); 5213 5214 /* 5215 * The :ppp indicates the Precise Distribution (PDist) facility, which 5216 * is only supported on the GP counter 0. If a :ppp event which is not 5217 * available on the GP counter 0, error out. 5218 * Exception: Instruction PDIR is only available on the fixed counter 0. 5219 */ 5220 if ((event->attr.precise_ip == 3) && 5221 !constraint_match(&fixed0_constraint, event->hw.config)) { 5222 if (c->idxmsk64 & BIT_ULL(0)) 5223 return &counter0_constraint; 5224 5225 return &emptyconstraint; 5226 } 5227 5228 return c; 5229 } 5230 5231 static struct event_constraint * 5232 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 5233 struct perf_event *event) 5234 { 5235 struct event_constraint *c; 5236 5237 /* :ppp means to do reduced skid PEBS which is PMC0 only. */ 5238 if (event->attr.precise_ip == 3) 5239 return &counter0_constraint; 5240 5241 c = intel_get_event_constraints(cpuc, idx, event); 5242 5243 return c; 5244 } 5245 5246 static struct event_constraint * 5247 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 5248 struct perf_event *event) 5249 { 5250 struct event_constraint *c; 5251 5252 c = intel_get_event_constraints(cpuc, idx, event); 5253 5254 /* 5255 * :ppp means to do reduced skid PEBS, 5256 * which is available on PMC0 and fixed counter 0. 5257 */ 5258 if (event->attr.precise_ip == 3) { 5259 /* Force instruction:ppp on PMC0 and Fixed counter 0 */ 5260 if (constraint_match(&fixed0_constraint, event->hw.config)) 5261 return &fixed0_counter0_constraint; 5262 5263 return &counter0_constraint; 5264 } 5265 5266 return c; 5267 } 5268 5269 static bool allow_tsx_force_abort = true; 5270 5271 static struct event_constraint * 5272 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 5273 struct perf_event *event) 5274 { 5275 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event); 5276 5277 /* 5278 * Without TFA we must not use PMC3. 5279 */ 5280 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) { 5281 c = dyn_constraint(cpuc, c, idx); 5282 c->idxmsk64 &= ~(1ULL << 3); 5283 c->weight--; 5284 } 5285 5286 return c; 5287 } 5288 5289 static struct event_constraint * 5290 adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 5291 struct perf_event *event) 5292 { 5293 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 5294 5295 if (pmu->pmu_type == hybrid_big) 5296 return glc_get_event_constraints(cpuc, idx, event); 5297 else if (pmu->pmu_type == hybrid_small) 5298 return tnt_get_event_constraints(cpuc, idx, event); 5299 5300 WARN_ON(1); 5301 return &emptyconstraint; 5302 } 5303 5304 static struct event_constraint * 5305 cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 5306 struct perf_event *event) 5307 { 5308 struct event_constraint *c; 5309 5310 c = intel_get_event_constraints(cpuc, idx, event); 5311 5312 /* 5313 * The :ppp indicates the Precise Distribution (PDist) facility, which 5314 * is only supported on the GP counter 0 & 1 and Fixed counter 0. 5315 * If a :ppp event which is not available on the above eligible counters, 5316 * error out. 5317 */ 5318 if (event->attr.precise_ip == 3) { 5319 /* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */ 5320 if (constraint_match(&fixed0_constraint, event->hw.config)) { 5321 /* The fixed counter 0 doesn't support LBR event logging. */ 5322 if (branch_sample_counters(event)) 5323 return &counter0_1_constraint; 5324 else 5325 return &fixed0_counter0_1_constraint; 5326 } 5327 5328 switch (c->idxmsk64 & 0x3ull) { 5329 case 0x1: 5330 return &counter0_constraint; 5331 case 0x2: 5332 return &counter1_constraint; 5333 case 0x3: 5334 return &counter0_1_constraint; 5335 } 5336 return &emptyconstraint; 5337 } 5338 5339 return c; 5340 } 5341 5342 static struct event_constraint * 5343 rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 5344 struct perf_event *event) 5345 { 5346 struct event_constraint *c; 5347 5348 c = glc_get_event_constraints(cpuc, idx, event); 5349 5350 /* The Retire Latency is not supported by the fixed counter 0. */ 5351 if (event->attr.precise_ip && 5352 (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) && 5353 constraint_match(&fixed0_constraint, event->hw.config)) { 5354 /* 5355 * The Instruction PDIR is only available 5356 * on the fixed counter 0. Error out for this case. 5357 */ 5358 if (event->attr.precise_ip == 3) 5359 return &emptyconstraint; 5360 return &counters_1_7_constraint; 5361 } 5362 5363 return c; 5364 } 5365 5366 static struct event_constraint * 5367 mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 5368 struct perf_event *event) 5369 { 5370 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 5371 5372 if (pmu->pmu_type == hybrid_big) 5373 return rwc_get_event_constraints(cpuc, idx, event); 5374 if (pmu->pmu_type == hybrid_small) 5375 return cmt_get_event_constraints(cpuc, idx, event); 5376 5377 WARN_ON(1); 5378 return &emptyconstraint; 5379 } 5380 5381 static int adl_hw_config(struct perf_event *event) 5382 { 5383 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 5384 5385 if (pmu->pmu_type == hybrid_big) 5386 return hsw_hw_config(event); 5387 else if (pmu->pmu_type == hybrid_small) 5388 return intel_pmu_hw_config(event); 5389 5390 WARN_ON(1); 5391 return -EOPNOTSUPP; 5392 } 5393 5394 static enum intel_cpu_type adl_get_hybrid_cpu_type(void) 5395 { 5396 return INTEL_CPU_TYPE_CORE; 5397 } 5398 5399 static inline bool erratum_hsw11(struct perf_event *event) 5400 { 5401 return (event->hw.config & INTEL_ARCH_EVENT_MASK) == 5402 X86_CONFIG(.event=0xc0, .umask=0x01); 5403 } 5404 5405 static struct event_constraint * 5406 arl_h_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 5407 struct perf_event *event) 5408 { 5409 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 5410 5411 if (pmu->pmu_type == hybrid_tiny) 5412 return cmt_get_event_constraints(cpuc, idx, event); 5413 5414 return mtl_get_event_constraints(cpuc, idx, event); 5415 } 5416 5417 static int arl_h_hw_config(struct perf_event *event) 5418 { 5419 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 5420 5421 if (pmu->pmu_type == hybrid_tiny) 5422 return intel_pmu_hw_config(event); 5423 5424 return adl_hw_config(event); 5425 } 5426 5427 /* 5428 * The HSW11 requires a period larger than 100 which is the same as the BDM11. 5429 * A minimum period of 128 is enforced as well for the INST_RETIRED.ALL. 5430 * 5431 * The message 'interrupt took too long' can be observed on any counter which 5432 * was armed with a period < 32 and two events expired in the same NMI. 5433 * A minimum period of 32 is enforced for the rest of the events. 5434 */ 5435 static void hsw_limit_period(struct perf_event *event, s64 *left) 5436 { 5437 *left = max(*left, erratum_hsw11(event) ? 128 : 32); 5438 } 5439 5440 /* 5441 * Broadwell: 5442 * 5443 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared 5444 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine 5445 * the two to enforce a minimum period of 128 (the smallest value that has bits 5446 * 0-5 cleared and >= 100). 5447 * 5448 * Because of how the code in x86_perf_event_set_period() works, the truncation 5449 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period 5450 * to make up for the 'lost' events due to carrying the 'error' in period_left. 5451 * 5452 * Therefore the effective (average) period matches the requested period, 5453 * despite coarser hardware granularity. 5454 */ 5455 static void bdw_limit_period(struct perf_event *event, s64 *left) 5456 { 5457 if (erratum_hsw11(event)) { 5458 if (*left < 128) 5459 *left = 128; 5460 *left &= ~0x3fULL; 5461 } 5462 } 5463 5464 static void nhm_limit_period(struct perf_event *event, s64 *left) 5465 { 5466 *left = max(*left, 32LL); 5467 } 5468 5469 static void glc_limit_period(struct perf_event *event, s64 *left) 5470 { 5471 if (event->attr.precise_ip == 3) 5472 *left = max(*left, 128LL); 5473 } 5474 5475 PMU_FORMAT_ATTR(event, "config:0-7" ); 5476 PMU_FORMAT_ATTR(umask, "config:8-15" ); 5477 PMU_FORMAT_ATTR(edge, "config:18" ); 5478 PMU_FORMAT_ATTR(pc, "config:19" ); 5479 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */ 5480 PMU_FORMAT_ATTR(inv, "config:23" ); 5481 PMU_FORMAT_ATTR(cmask, "config:24-31" ); 5482 PMU_FORMAT_ATTR(in_tx, "config:32" ); 5483 PMU_FORMAT_ATTR(in_tx_cp, "config:33" ); 5484 PMU_FORMAT_ATTR(eq, "config:36" ); /* v6 + */ 5485 5486 PMU_FORMAT_ATTR(metrics_clear, "config1:0"); /* PERF_CAPABILITIES.RDPMC_METRICS_CLEAR */ 5487 5488 static ssize_t umask2_show(struct device *dev, 5489 struct device_attribute *attr, 5490 char *page) 5491 { 5492 u64 mask = hybrid(dev_get_drvdata(dev), config_mask) & ARCH_PERFMON_EVENTSEL_UMASK2; 5493 5494 if (mask == ARCH_PERFMON_EVENTSEL_UMASK2) 5495 return sprintf(page, "config:8-15,40-47\n"); 5496 5497 /* Roll back to the old format if umask2 is not supported. */ 5498 return sprintf(page, "config:8-15\n"); 5499 } 5500 5501 static struct device_attribute format_attr_umask2 = 5502 __ATTR(umask, 0444, umask2_show, NULL); 5503 5504 static struct attribute *format_evtsel_ext_attrs[] = { 5505 &format_attr_umask2.attr, 5506 &format_attr_eq.attr, 5507 &format_attr_metrics_clear.attr, 5508 NULL 5509 }; 5510 5511 static umode_t 5512 evtsel_ext_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5513 { 5514 struct device *dev = kobj_to_dev(kobj); 5515 u64 mask; 5516 5517 /* 5518 * The umask and umask2 have different formats but share the 5519 * same attr name. In update mode, the previous value of the 5520 * umask is unconditionally removed before is_visible. If 5521 * umask2 format is not enumerated, it's impossible to roll 5522 * back to the old format. 5523 * Does the check in umask2_show rather than is_visible. 5524 */ 5525 if (i == 0) 5526 return attr->mode; 5527 5528 mask = hybrid(dev_get_drvdata(dev), config_mask); 5529 if (i == 1) 5530 return (mask & ARCH_PERFMON_EVENTSEL_EQ) ? attr->mode : 0; 5531 5532 /* PERF_CAPABILITIES.RDPMC_METRICS_CLEAR */ 5533 if (i == 2) { 5534 union perf_capabilities intel_cap = hybrid(dev_get_drvdata(dev), intel_cap); 5535 5536 return intel_cap.rdpmc_metrics_clear ? attr->mode : 0; 5537 } 5538 5539 return 0; 5540 } 5541 5542 static struct attribute *intel_arch_formats_attr[] = { 5543 &format_attr_event.attr, 5544 &format_attr_umask.attr, 5545 &format_attr_edge.attr, 5546 &format_attr_pc.attr, 5547 &format_attr_inv.attr, 5548 &format_attr_cmask.attr, 5549 NULL, 5550 }; 5551 5552 ssize_t intel_event_sysfs_show(char *page, u64 config) 5553 { 5554 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT); 5555 5556 return x86_event_sysfs_show(page, config, event); 5557 } 5558 5559 static struct intel_shared_regs *allocate_shared_regs(int cpu) 5560 { 5561 struct intel_shared_regs *regs; 5562 int i; 5563 5564 regs = kzalloc_node(sizeof(struct intel_shared_regs), 5565 GFP_KERNEL, cpu_to_node(cpu)); 5566 if (regs) { 5567 /* 5568 * initialize the locks to keep lockdep happy 5569 */ 5570 for (i = 0; i < EXTRA_REG_MAX; i++) 5571 raw_spin_lock_init(®s->regs[i].lock); 5572 5573 regs->core_id = -1; 5574 } 5575 return regs; 5576 } 5577 5578 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu) 5579 { 5580 struct intel_excl_cntrs *c; 5581 5582 c = kzalloc_node(sizeof(struct intel_excl_cntrs), 5583 GFP_KERNEL, cpu_to_node(cpu)); 5584 if (c) { 5585 raw_spin_lock_init(&c->lock); 5586 c->core_id = -1; 5587 } 5588 return c; 5589 } 5590 5591 5592 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) 5593 { 5594 cpuc->pebs_record_size = x86_pmu.pebs_record_size; 5595 5596 if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { 5597 cpuc->shared_regs = allocate_shared_regs(cpu); 5598 if (!cpuc->shared_regs) 5599 goto err; 5600 } 5601 5602 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_DYN_CONSTRAINT)) { 5603 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); 5604 5605 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); 5606 if (!cpuc->constraint_list) 5607 goto err_shared_regs; 5608 } 5609 5610 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 5611 cpuc->excl_cntrs = allocate_excl_cntrs(cpu); 5612 if (!cpuc->excl_cntrs) 5613 goto err_constraint_list; 5614 5615 cpuc->excl_thread_id = 0; 5616 } 5617 5618 return 0; 5619 5620 err_constraint_list: 5621 kfree(cpuc->constraint_list); 5622 cpuc->constraint_list = NULL; 5623 5624 err_shared_regs: 5625 kfree(cpuc->shared_regs); 5626 cpuc->shared_regs = NULL; 5627 5628 err: 5629 return -ENOMEM; 5630 } 5631 5632 static int intel_pmu_cpu_prepare(int cpu) 5633 { 5634 int ret; 5635 5636 ret = intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu); 5637 if (ret) 5638 return ret; 5639 5640 return alloc_arch_pebs_buf_on_cpu(cpu); 5641 } 5642 5643 static void flip_smm_bit(void *data) 5644 { 5645 unsigned long set = *(unsigned long *)data; 5646 5647 if (set > 0) { 5648 msr_set_bit(MSR_IA32_DEBUGCTLMSR, 5649 DEBUGCTLMSR_FREEZE_IN_SMM_BIT); 5650 } else { 5651 msr_clear_bit(MSR_IA32_DEBUGCTLMSR, 5652 DEBUGCTLMSR_FREEZE_IN_SMM_BIT); 5653 } 5654 } 5655 5656 static void intel_pmu_check_counters_mask(u64 *cntr_mask, 5657 u64 *fixed_cntr_mask, 5658 u64 *intel_ctrl) 5659 { 5660 unsigned int bit; 5661 5662 bit = fls64(*cntr_mask); 5663 if (bit > INTEL_PMC_MAX_GENERIC) { 5664 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", 5665 bit, INTEL_PMC_MAX_GENERIC); 5666 *cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0); 5667 } 5668 *intel_ctrl = *cntr_mask; 5669 5670 bit = fls64(*fixed_cntr_mask); 5671 if (bit > INTEL_PMC_MAX_FIXED) { 5672 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", 5673 bit, INTEL_PMC_MAX_FIXED); 5674 *fixed_cntr_mask &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0); 5675 } 5676 5677 *intel_ctrl |= *fixed_cntr_mask << INTEL_PMC_IDX_FIXED; 5678 } 5679 5680 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints, 5681 u64 cntr_mask, 5682 u64 fixed_cntr_mask, 5683 u64 intel_ctrl); 5684 5685 enum dyn_constr_type { 5686 DYN_CONSTR_NONE, 5687 DYN_CONSTR_BR_CNTR, 5688 DYN_CONSTR_ACR_CNTR, 5689 DYN_CONSTR_ACR_CAUSE, 5690 DYN_CONSTR_PEBS, 5691 DYN_CONSTR_PDIST, 5692 5693 DYN_CONSTR_MAX, 5694 }; 5695 5696 static const char * const dyn_constr_type_name[] = { 5697 [DYN_CONSTR_NONE] = "a normal event", 5698 [DYN_CONSTR_BR_CNTR] = "a branch counter logging event", 5699 [DYN_CONSTR_ACR_CNTR] = "an auto-counter reload event", 5700 [DYN_CONSTR_ACR_CAUSE] = "an auto-counter reload cause event", 5701 [DYN_CONSTR_PEBS] = "a PEBS event", 5702 [DYN_CONSTR_PDIST] = "a PEBS PDIST event", 5703 }; 5704 5705 static void __intel_pmu_check_dyn_constr(struct event_constraint *constr, 5706 enum dyn_constr_type type, u64 mask) 5707 { 5708 struct event_constraint *c1, *c2; 5709 int new_weight, check_weight; 5710 u64 new_mask, check_mask; 5711 5712 for_each_event_constraint(c1, constr) { 5713 new_mask = c1->idxmsk64 & mask; 5714 new_weight = hweight64(new_mask); 5715 5716 /* ignore topdown perf metrics event */ 5717 if (c1->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) 5718 continue; 5719 5720 if (!new_weight && fls64(c1->idxmsk64) < INTEL_PMC_IDX_FIXED) { 5721 pr_info("The event 0x%llx is not supported as %s.\n", 5722 c1->code, dyn_constr_type_name[type]); 5723 } 5724 5725 if (new_weight <= 1) 5726 continue; 5727 5728 for_each_event_constraint(c2, c1 + 1) { 5729 bool check_fail = false; 5730 5731 check_mask = c2->idxmsk64 & mask; 5732 check_weight = hweight64(check_mask); 5733 5734 if (c2->idxmsk64 & INTEL_PMC_MSK_TOPDOWN || 5735 !check_weight) 5736 continue; 5737 5738 /* The same constraints or no overlap */ 5739 if (new_mask == check_mask || 5740 (new_mask ^ check_mask) == (new_mask | check_mask)) 5741 continue; 5742 5743 /* 5744 * A scheduler issue may be triggered in the following cases. 5745 * - Two overlap constraints have the same weight. 5746 * E.g., A constraints: 0x3, B constraints: 0x6 5747 * event counter failure case 5748 * B PMC[2:1] 1 5749 * A PMC[1:0] 0 5750 * A PMC[1:0] FAIL 5751 * - Two overlap constraints have different weight. 5752 * The constraint has a low weight, but has high last bit. 5753 * E.g., A constraints: 0x7, B constraints: 0xC 5754 * event counter failure case 5755 * B PMC[3:2] 2 5756 * A PMC[2:0] 0 5757 * A PMC[2:0] 1 5758 * A PMC[2:0] FAIL 5759 */ 5760 if (new_weight == check_weight) { 5761 check_fail = true; 5762 } else if (new_weight < check_weight) { 5763 if ((new_mask | check_mask) != check_mask && 5764 fls64(new_mask) > fls64(check_mask)) 5765 check_fail = true; 5766 } else { 5767 if ((new_mask | check_mask) != new_mask && 5768 fls64(new_mask) < fls64(check_mask)) 5769 check_fail = true; 5770 } 5771 5772 if (check_fail) { 5773 pr_info("The two events 0x%llx and 0x%llx may not be " 5774 "fully scheduled under some circumstances as " 5775 "%s.\n", 5776 c1->code, c2->code, dyn_constr_type_name[type]); 5777 } 5778 } 5779 } 5780 } 5781 5782 static void intel_pmu_check_dyn_constr(struct pmu *pmu, 5783 struct event_constraint *constr, 5784 u64 cntr_mask) 5785 { 5786 enum dyn_constr_type i; 5787 u64 mask; 5788 5789 for (i = DYN_CONSTR_NONE; i < DYN_CONSTR_MAX; i++) { 5790 mask = 0; 5791 switch (i) { 5792 case DYN_CONSTR_NONE: 5793 mask = cntr_mask; 5794 break; 5795 case DYN_CONSTR_BR_CNTR: 5796 if (x86_pmu.flags & PMU_FL_BR_CNTR) 5797 mask = x86_pmu.lbr_counters; 5798 break; 5799 case DYN_CONSTR_ACR_CNTR: 5800 mask = hybrid(pmu, acr_cntr_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0); 5801 break; 5802 case DYN_CONSTR_ACR_CAUSE: 5803 if (hybrid(pmu, acr_cntr_mask64) == hybrid(pmu, acr_cause_mask64)) 5804 continue; 5805 mask = hybrid(pmu, acr_cause_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0); 5806 break; 5807 case DYN_CONSTR_PEBS: 5808 if (x86_pmu.arch_pebs) 5809 mask = hybrid(pmu, arch_pebs_cap).counters; 5810 break; 5811 case DYN_CONSTR_PDIST: 5812 if (x86_pmu.arch_pebs) 5813 mask = hybrid(pmu, arch_pebs_cap).pdists; 5814 break; 5815 default: 5816 pr_warn("Unsupported dynamic constraint type %d\n", i); 5817 } 5818 5819 if (mask) 5820 __intel_pmu_check_dyn_constr(constr, i, mask); 5821 } 5822 } 5823 5824 static void intel_pmu_check_event_constraints_all(struct pmu *pmu) 5825 { 5826 struct event_constraint *event_constraints = hybrid(pmu, event_constraints); 5827 struct event_constraint *pebs_constraints = hybrid(pmu, pebs_constraints); 5828 u64 cntr_mask = hybrid(pmu, cntr_mask64); 5829 u64 fixed_cntr_mask = hybrid(pmu, fixed_cntr_mask64); 5830 u64 intel_ctrl = hybrid(pmu, intel_ctrl); 5831 5832 intel_pmu_check_event_constraints(event_constraints, cntr_mask, 5833 fixed_cntr_mask, intel_ctrl); 5834 5835 if (event_constraints) 5836 intel_pmu_check_dyn_constr(pmu, event_constraints, cntr_mask); 5837 5838 if (pebs_constraints) 5839 intel_pmu_check_dyn_constr(pmu, pebs_constraints, cntr_mask); 5840 } 5841 5842 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs); 5843 5844 static inline bool intel_pmu_broken_perf_cap(void) 5845 { 5846 /* The Perf Metric (Bit 15) is always cleared */ 5847 if (boot_cpu_data.x86_vfm == INTEL_METEORLAKE || 5848 boot_cpu_data.x86_vfm == INTEL_METEORLAKE_L) 5849 return true; 5850 5851 return false; 5852 } 5853 5854 static inline void __intel_update_pmu_caps(struct pmu *pmu) 5855 { 5856 struct pmu *dest_pmu = pmu ? pmu : x86_get_pmu(smp_processor_id()); 5857 5858 if (hybrid(pmu, arch_pebs_cap).caps & ARCH_PEBS_VECR_XMM) 5859 dest_pmu->capabilities |= PERF_PMU_CAP_EXTENDED_REGS; 5860 } 5861 5862 static inline void __intel_update_large_pebs_flags(struct pmu *pmu) 5863 { 5864 u64 caps = hybrid(pmu, arch_pebs_cap).caps; 5865 5866 x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME; 5867 if (caps & ARCH_PEBS_LBR) 5868 x86_pmu.large_pebs_flags |= PERF_SAMPLE_BRANCH_STACK; 5869 if (caps & ARCH_PEBS_CNTR_MASK) 5870 x86_pmu.large_pebs_flags |= PERF_SAMPLE_READ; 5871 5872 if (!(caps & ARCH_PEBS_AUX)) 5873 x86_pmu.large_pebs_flags &= ~PERF_SAMPLE_DATA_SRC; 5874 if (!(caps & ARCH_PEBS_GPR)) { 5875 x86_pmu.large_pebs_flags &= 5876 ~(PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER); 5877 } 5878 } 5879 5880 #define counter_mask(_gp, _fixed) ((_gp) | ((u64)(_fixed) << INTEL_PMC_IDX_FIXED)) 5881 5882 static void update_pmu_cap(struct pmu *pmu) 5883 { 5884 unsigned int eax, ebx, ecx, edx; 5885 union cpuid35_eax eax_0; 5886 union cpuid35_ebx ebx_0; 5887 u64 cntrs_mask = 0; 5888 u64 pebs_mask = 0; 5889 u64 pdists_mask = 0; 5890 5891 cpuid(ARCH_PERFMON_EXT_LEAF, &eax_0.full, &ebx_0.full, &ecx, &edx); 5892 5893 if (ebx_0.split.umask2) 5894 hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_UMASK2; 5895 if (ebx_0.split.eq) 5896 hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_EQ; 5897 if (ebx_0.split.rdpmc_user_disable) 5898 hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_RDPMC_USER_DISABLE; 5899 5900 if (eax_0.split.cntr_subleaf) { 5901 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF, 5902 &eax, &ebx, &ecx, &edx); 5903 hybrid(pmu, cntr_mask64) = eax; 5904 hybrid(pmu, fixed_cntr_mask64) = ebx; 5905 cntrs_mask = counter_mask(eax, ebx); 5906 } 5907 5908 if (eax_0.split.acr_subleaf) { 5909 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_ACR_LEAF, 5910 &eax, &ebx, &ecx, &edx); 5911 /* The mask of the counters which can be reloaded */ 5912 hybrid(pmu, acr_cntr_mask64) = counter_mask(eax, ebx); 5913 /* The mask of the counters which can cause a reload of reloadable counters */ 5914 hybrid(pmu, acr_cause_mask64) = counter_mask(ecx, edx); 5915 } 5916 5917 /* Bits[5:4] should be set simultaneously if arch-PEBS is supported */ 5918 if (eax_0.split.pebs_caps_subleaf && eax_0.split.pebs_cnts_subleaf) { 5919 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_CAP_LEAF, 5920 &eax, &ebx, &ecx, &edx); 5921 hybrid(pmu, arch_pebs_cap).caps = (u64)ebx << 32; 5922 5923 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_COUNTER_LEAF, 5924 &eax, &ebx, &ecx, &edx); 5925 pebs_mask = counter_mask(eax, ecx); 5926 pdists_mask = counter_mask(ebx, edx); 5927 hybrid(pmu, arch_pebs_cap).counters = pebs_mask; 5928 hybrid(pmu, arch_pebs_cap).pdists = pdists_mask; 5929 5930 if (WARN_ON((pebs_mask | pdists_mask) & ~cntrs_mask)) { 5931 x86_pmu.arch_pebs = 0; 5932 } else { 5933 __intel_update_pmu_caps(pmu); 5934 __intel_update_large_pebs_flags(pmu); 5935 } 5936 } else { 5937 WARN_ON(x86_pmu.arch_pebs == 1); 5938 x86_pmu.arch_pebs = 0; 5939 } 5940 5941 if (!intel_pmu_broken_perf_cap()) { 5942 /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */ 5943 rdmsrq(MSR_IA32_PERF_CAPABILITIES, hybrid(pmu, intel_cap).capabilities); 5944 } 5945 } 5946 5947 static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu) 5948 { 5949 intel_pmu_check_counters_mask(&pmu->cntr_mask64, &pmu->fixed_cntr_mask64, 5950 &pmu->intel_ctrl); 5951 pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64); 5952 pmu->unconstrained = (struct event_constraint) 5953 __EVENT_CONSTRAINT(0, pmu->cntr_mask64, 5954 0, x86_pmu_num_counters(&pmu->pmu), 0, 0); 5955 5956 if (pmu->intel_cap.perf_metrics) 5957 pmu->intel_ctrl |= GLOBAL_CTRL_EN_PERF_METRICS; 5958 else 5959 pmu->intel_ctrl &= ~GLOBAL_CTRL_EN_PERF_METRICS; 5960 5961 pmu->pmu.capabilities |= PERF_PMU_CAP_MEDIATED_VPMU; 5962 5963 intel_pmu_check_event_constraints_all(&pmu->pmu); 5964 5965 intel_pmu_check_extra_regs(pmu->extra_regs); 5966 } 5967 5968 static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void) 5969 { 5970 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 5971 enum intel_cpu_type cpu_type = c->topo.intel_type; 5972 int i; 5973 5974 /* 5975 * This is running on a CPU model that is known to have hybrid 5976 * configurations. But the CPU told us it is not hybrid, shame 5977 * on it. There should be a fixup function provided for these 5978 * troublesome CPUs (->get_hybrid_cpu_type). 5979 */ 5980 if (cpu_type == INTEL_CPU_TYPE_UNKNOWN) { 5981 if (x86_pmu.get_hybrid_cpu_type) 5982 cpu_type = x86_pmu.get_hybrid_cpu_type(); 5983 else 5984 return NULL; 5985 } 5986 5987 /* 5988 * This essentially just maps between the 'hybrid_cpu_type' 5989 * and 'hybrid_pmu_type' enums except for ARL-H processor 5990 * which needs to compare atom uarch native id since ARL-H 5991 * contains two different atom uarchs. 5992 */ 5993 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { 5994 enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type; 5995 u32 native_id; 5996 5997 if (cpu_type == INTEL_CPU_TYPE_CORE && pmu_type == hybrid_big) 5998 return &x86_pmu.hybrid_pmu[i]; 5999 if (cpu_type == INTEL_CPU_TYPE_ATOM) { 6000 if (x86_pmu.num_hybrid_pmus == 2 && pmu_type == hybrid_small) 6001 return &x86_pmu.hybrid_pmu[i]; 6002 6003 native_id = c->topo.intel_native_model_id; 6004 if (native_id == INTEL_ATOM_SKT_NATIVE_ID && pmu_type == hybrid_small) 6005 return &x86_pmu.hybrid_pmu[i]; 6006 if (native_id == INTEL_ATOM_CMT_NATIVE_ID && pmu_type == hybrid_tiny) 6007 return &x86_pmu.hybrid_pmu[i]; 6008 } 6009 } 6010 6011 return NULL; 6012 } 6013 6014 static bool init_hybrid_pmu(int cpu) 6015 { 6016 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 6017 struct x86_hybrid_pmu *pmu = find_hybrid_pmu_for_cpu(); 6018 6019 if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) { 6020 cpuc->pmu = NULL; 6021 return false; 6022 } 6023 6024 /* Only check and dump the PMU information for the first CPU */ 6025 if (!cpumask_empty(&pmu->supported_cpus)) 6026 goto end; 6027 6028 if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) 6029 update_pmu_cap(&pmu->pmu); 6030 6031 intel_pmu_check_hybrid_pmus(pmu); 6032 6033 if (!check_hw_exists(&pmu->pmu, pmu->cntr_mask, pmu->fixed_cntr_mask)) 6034 return false; 6035 6036 pr_info("%s PMU driver: ", pmu->name); 6037 6038 pr_cont("\n"); 6039 6040 x86_pmu_show_pmu_cap(&pmu->pmu); 6041 6042 end: 6043 cpumask_set_cpu(cpu, &pmu->supported_cpus); 6044 cpuc->pmu = &pmu->pmu; 6045 6046 return true; 6047 } 6048 6049 static void intel_pmu_cpu_starting(int cpu) 6050 { 6051 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 6052 int core_id = topology_core_id(cpu); 6053 int i; 6054 6055 if (is_hybrid() && !init_hybrid_pmu(cpu)) 6056 return; 6057 6058 init_debug_store_on_cpu(cpu); 6059 init_arch_pebs_on_cpu(cpu); 6060 /* 6061 * Deal with CPUs that don't clear their LBRs on power-up, and that may 6062 * even boot with LBRs enabled. 6063 */ 6064 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && x86_pmu.lbr_nr) 6065 msr_clear_bit(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR_BIT); 6066 intel_pmu_lbr_reset(); 6067 6068 cpuc->lbr_sel = NULL; 6069 6070 if (x86_pmu.flags & PMU_FL_TFA) { 6071 WARN_ON_ONCE(cpuc->tfa_shadow); 6072 cpuc->tfa_shadow = ~0ULL; 6073 intel_set_tfa(cpuc, false); 6074 } 6075 6076 if (x86_pmu.version > 1) 6077 flip_smm_bit(&x86_pmu.attr_freeze_on_smi); 6078 6079 /* 6080 * Disable perf metrics if any added CPU doesn't support it. 6081 * 6082 * Turn off the check for a hybrid architecture, because the 6083 * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate 6084 * the architecture features. The perf metrics is a model-specific 6085 * feature for now. The corresponding bit should always be 0 on 6086 * a hybrid platform, e.g., Alder Lake. 6087 */ 6088 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) { 6089 union perf_capabilities perf_cap; 6090 6091 rdmsrq(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities); 6092 if (!perf_cap.perf_metrics) { 6093 x86_pmu.intel_cap.perf_metrics = 0; 6094 x86_pmu.intel_ctrl &= ~GLOBAL_CTRL_EN_PERF_METRICS; 6095 } 6096 } 6097 6098 __intel_update_pmu_caps(cpuc->pmu); 6099 6100 if (!cpuc->shared_regs) 6101 return; 6102 6103 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) { 6104 for_each_cpu(i, topology_sibling_cpumask(cpu)) { 6105 struct intel_shared_regs *pc; 6106 6107 pc = per_cpu(cpu_hw_events, i).shared_regs; 6108 if (pc && pc->core_id == core_id) { 6109 cpuc->kfree_on_online[0] = cpuc->shared_regs; 6110 cpuc->shared_regs = pc; 6111 break; 6112 } 6113 } 6114 cpuc->shared_regs->core_id = core_id; 6115 cpuc->shared_regs->refcnt++; 6116 } 6117 6118 if (x86_pmu.lbr_sel_map) 6119 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; 6120 6121 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 6122 for_each_cpu(i, topology_sibling_cpumask(cpu)) { 6123 struct cpu_hw_events *sibling; 6124 struct intel_excl_cntrs *c; 6125 6126 sibling = &per_cpu(cpu_hw_events, i); 6127 c = sibling->excl_cntrs; 6128 if (c && c->core_id == core_id) { 6129 cpuc->kfree_on_online[1] = cpuc->excl_cntrs; 6130 cpuc->excl_cntrs = c; 6131 if (!sibling->excl_thread_id) 6132 cpuc->excl_thread_id = 1; 6133 break; 6134 } 6135 } 6136 cpuc->excl_cntrs->core_id = core_id; 6137 cpuc->excl_cntrs->refcnt++; 6138 } 6139 } 6140 6141 static void free_excl_cntrs(struct cpu_hw_events *cpuc) 6142 { 6143 struct intel_excl_cntrs *c; 6144 6145 c = cpuc->excl_cntrs; 6146 if (c) { 6147 if (c->core_id == -1 || --c->refcnt == 0) 6148 kfree(c); 6149 cpuc->excl_cntrs = NULL; 6150 } 6151 6152 kfree(cpuc->constraint_list); 6153 cpuc->constraint_list = NULL; 6154 } 6155 6156 static void intel_pmu_cpu_dying(int cpu) 6157 { 6158 fini_debug_store_on_cpu(cpu); 6159 fini_arch_pebs_on_cpu(cpu); 6160 } 6161 6162 void intel_cpuc_finish(struct cpu_hw_events *cpuc) 6163 { 6164 struct intel_shared_regs *pc; 6165 6166 pc = cpuc->shared_regs; 6167 if (pc) { 6168 if (pc->core_id == -1 || --pc->refcnt == 0) 6169 kfree(pc); 6170 cpuc->shared_regs = NULL; 6171 } 6172 6173 free_excl_cntrs(cpuc); 6174 } 6175 6176 static void intel_pmu_cpu_dead(int cpu) 6177 { 6178 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 6179 6180 release_arch_pebs_buf_on_cpu(cpu); 6181 intel_cpuc_finish(cpuc); 6182 6183 if (is_hybrid() && cpuc->pmu) 6184 cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus); 6185 } 6186 6187 static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, 6188 struct task_struct *task, bool sched_in) 6189 { 6190 intel_pmu_pebs_sched_task(pmu_ctx, sched_in); 6191 intel_pmu_lbr_sched_task(pmu_ctx, task, sched_in); 6192 } 6193 6194 static int intel_pmu_check_period(struct perf_event *event, u64 value) 6195 { 6196 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; 6197 } 6198 6199 static void intel_aux_output_init(void) 6200 { 6201 /* Refer also intel_pmu_aux_output_match() */ 6202 if (x86_pmu.intel_cap.pebs_output_pt_available) 6203 x86_pmu.assign = intel_pmu_assign_event; 6204 } 6205 6206 static int intel_pmu_aux_output_match(struct perf_event *event) 6207 { 6208 /* intel_pmu_assign_event() is needed, refer intel_aux_output_init() */ 6209 if (!x86_pmu.intel_cap.pebs_output_pt_available) 6210 return 0; 6211 6212 return is_intel_pt_event(event); 6213 } 6214 6215 static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret) 6216 { 6217 struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu); 6218 6219 *ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus); 6220 } 6221 6222 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); 6223 6224 PMU_FORMAT_ATTR(ldlat, "config1:0-15"); 6225 6226 PMU_FORMAT_ATTR(frontend, "config1:0-23"); 6227 6228 PMU_FORMAT_ATTR(snoop_rsp, "config1:0-63"); 6229 6230 static struct attribute *intel_arch3_formats_attr[] = { 6231 &format_attr_event.attr, 6232 &format_attr_umask.attr, 6233 &format_attr_edge.attr, 6234 &format_attr_pc.attr, 6235 &format_attr_any.attr, 6236 &format_attr_inv.attr, 6237 &format_attr_cmask.attr, 6238 NULL, 6239 }; 6240 6241 static struct attribute *hsw_format_attr[] = { 6242 &format_attr_in_tx.attr, 6243 &format_attr_in_tx_cp.attr, 6244 &format_attr_offcore_rsp.attr, 6245 &format_attr_ldlat.attr, 6246 NULL 6247 }; 6248 6249 static struct attribute *nhm_format_attr[] = { 6250 &format_attr_offcore_rsp.attr, 6251 &format_attr_ldlat.attr, 6252 NULL 6253 }; 6254 6255 static struct attribute *slm_format_attr[] = { 6256 &format_attr_offcore_rsp.attr, 6257 NULL 6258 }; 6259 6260 static struct attribute *cmt_format_attr[] = { 6261 &format_attr_offcore_rsp.attr, 6262 &format_attr_ldlat.attr, 6263 &format_attr_snoop_rsp.attr, 6264 NULL 6265 }; 6266 6267 static struct attribute *skl_format_attr[] = { 6268 &format_attr_frontend.attr, 6269 NULL, 6270 }; 6271 6272 static __initconst const struct x86_pmu core_pmu = { 6273 .name = "core", 6274 .handle_irq = x86_pmu_handle_irq, 6275 .disable_all = x86_pmu_disable_all, 6276 .enable_all = core_pmu_enable_all, 6277 .enable = core_pmu_enable_event, 6278 .disable = x86_pmu_disable_event, 6279 .hw_config = core_pmu_hw_config, 6280 .schedule_events = x86_schedule_events, 6281 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 6282 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 6283 .fixedctr = MSR_ARCH_PERFMON_FIXED_CTR0, 6284 .event_map = intel_pmu_event_map, 6285 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 6286 .apic = 1, 6287 .large_pebs_flags = LARGE_PEBS_FLAGS, 6288 6289 /* 6290 * Intel PMCs cannot be accessed sanely above 32-bit width, 6291 * so we install an artificial 1<<31 period regardless of 6292 * the generic event period: 6293 */ 6294 .max_period = (1ULL<<31) - 1, 6295 .get_event_constraints = intel_get_event_constraints, 6296 .put_event_constraints = intel_put_event_constraints, 6297 .event_constraints = intel_core_event_constraints, 6298 .guest_get_msrs = core_guest_get_msrs, 6299 .format_attrs = intel_arch_formats_attr, 6300 .events_sysfs_show = intel_event_sysfs_show, 6301 6302 /* 6303 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs 6304 * together with PMU version 1 and thus be using core_pmu with 6305 * shared_regs. We need following callbacks here to allocate 6306 * it properly. 6307 */ 6308 .cpu_prepare = intel_pmu_cpu_prepare, 6309 .cpu_starting = intel_pmu_cpu_starting, 6310 .cpu_dying = intel_pmu_cpu_dying, 6311 .cpu_dead = intel_pmu_cpu_dead, 6312 6313 .check_period = intel_pmu_check_period, 6314 6315 .lbr_reset = intel_pmu_lbr_reset_64, 6316 .lbr_read = intel_pmu_lbr_read_64, 6317 .lbr_save = intel_pmu_lbr_save, 6318 .lbr_restore = intel_pmu_lbr_restore, 6319 }; 6320 6321 static __initconst const struct x86_pmu intel_pmu = { 6322 .name = "Intel", 6323 .handle_irq = intel_pmu_handle_irq, 6324 .disable_all = intel_pmu_disable_all, 6325 .enable_all = intel_pmu_enable_all, 6326 .enable = intel_pmu_enable_event, 6327 .disable = intel_pmu_disable_event, 6328 .add = intel_pmu_add_event, 6329 .del = intel_pmu_del_event, 6330 .read = intel_pmu_read_event, 6331 .set_period = intel_pmu_set_period, 6332 .update = intel_pmu_update, 6333 .hw_config = intel_pmu_hw_config, 6334 .schedule_events = x86_schedule_events, 6335 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 6336 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 6337 .fixedctr = MSR_ARCH_PERFMON_FIXED_CTR0, 6338 .event_map = intel_pmu_event_map, 6339 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 6340 .apic = 1, 6341 .large_pebs_flags = LARGE_PEBS_FLAGS, 6342 /* 6343 * Intel PMCs cannot be accessed sanely above 32 bit width, 6344 * so we install an artificial 1<<31 period regardless of 6345 * the generic event period: 6346 */ 6347 .max_period = (1ULL << 31) - 1, 6348 .get_event_constraints = intel_get_event_constraints, 6349 .put_event_constraints = intel_put_event_constraints, 6350 .pebs_aliases = intel_pebs_aliases_core2, 6351 6352 .format_attrs = intel_arch3_formats_attr, 6353 .events_sysfs_show = intel_event_sysfs_show, 6354 6355 .cpu_prepare = intel_pmu_cpu_prepare, 6356 .cpu_starting = intel_pmu_cpu_starting, 6357 .cpu_dying = intel_pmu_cpu_dying, 6358 .cpu_dead = intel_pmu_cpu_dead, 6359 6360 .guest_get_msrs = intel_guest_get_msrs, 6361 .sched_task = intel_pmu_sched_task, 6362 6363 .check_period = intel_pmu_check_period, 6364 6365 .aux_output_match = intel_pmu_aux_output_match, 6366 6367 .lbr_reset = intel_pmu_lbr_reset_64, 6368 .lbr_read = intel_pmu_lbr_read_64, 6369 .lbr_save = intel_pmu_lbr_save, 6370 .lbr_restore = intel_pmu_lbr_restore, 6371 6372 /* 6373 * SMM has access to all 4 rings and while traditionally SMM code only 6374 * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM. 6375 * 6376 * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction 6377 * between SMM or not, this results in what should be pure userspace 6378 * counters including SMM data. 6379 * 6380 * This is a clear privilege issue, therefore globally disable 6381 * counting SMM by default. 6382 */ 6383 .attr_freeze_on_smi = 1, 6384 }; 6385 6386 static __init void intel_clovertown_quirk(void) 6387 { 6388 /* 6389 * PEBS is unreliable due to: 6390 * 6391 * AJ67 - PEBS may experience CPL leaks 6392 * AJ68 - PEBS PMI may be delayed by one event 6393 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12] 6394 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS 6395 * 6396 * AJ67 could be worked around by restricting the OS/USR flags. 6397 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI. 6398 * 6399 * AJ106 could possibly be worked around by not allowing LBR 6400 * usage from PEBS, including the fixup. 6401 * AJ68 could possibly be worked around by always programming 6402 * a pebs_event_reset[0] value and coping with the lost events. 6403 * 6404 * But taken together it might just make sense to not enable PEBS on 6405 * these chips. 6406 */ 6407 pr_warn("PEBS disabled due to CPU errata\n"); 6408 x86_pmu.ds_pebs = 0; 6409 x86_pmu.pebs_constraints = NULL; 6410 } 6411 6412 static const struct x86_cpu_id isolation_ucodes[] = { 6413 X86_MATCH_VFM_STEPS(INTEL_HASWELL, 3, 3, 0x0000001f), 6414 X86_MATCH_VFM_STEPS(INTEL_HASWELL_L, 1, 1, 0x0000001e), 6415 X86_MATCH_VFM_STEPS(INTEL_HASWELL_G, 1, 1, 0x00000015), 6416 X86_MATCH_VFM_STEPS(INTEL_HASWELL_X, 2, 2, 0x00000037), 6417 X86_MATCH_VFM_STEPS(INTEL_HASWELL_X, 4, 4, 0x0000000a), 6418 X86_MATCH_VFM_STEPS(INTEL_BROADWELL, 4, 4, 0x00000023), 6419 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_G, 1, 1, 0x00000014), 6420 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 2, 2, 0x00000010), 6421 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 3, 3, 0x07000009), 6422 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 4, 4, 0x0f000009), 6423 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 5, 5, 0x0e000002), 6424 X86_MATCH_VFM_STEPS(INTEL_BROADWELL_X, 1, 1, 0x0b000014), 6425 X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 3, 3, 0x00000021), 6426 X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 4, 7, 0x00000000), 6427 X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 11, 11, 0x00000000), 6428 X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_L, 3, 3, 0x0000007c), 6429 X86_MATCH_VFM_STEPS(INTEL_SKYLAKE, 3, 3, 0x0000007c), 6430 X86_MATCH_VFM_STEPS(INTEL_KABYLAKE, 9, 13, 0x0000004e), 6431 X86_MATCH_VFM_STEPS(INTEL_KABYLAKE_L, 9, 12, 0x0000004e), 6432 {} 6433 }; 6434 6435 static void intel_check_pebs_isolation(void) 6436 { 6437 x86_pmu.pebs_no_isolation = !x86_match_min_microcode_rev(isolation_ucodes); 6438 } 6439 6440 static __init void intel_pebs_isolation_quirk(void) 6441 { 6442 WARN_ON_ONCE(x86_pmu.check_microcode); 6443 x86_pmu.check_microcode = intel_check_pebs_isolation; 6444 intel_check_pebs_isolation(); 6445 } 6446 6447 static const struct x86_cpu_id pebs_ucodes[] = { 6448 X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE, 7, 7, 0x00000028), 6449 X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X, 6, 6, 0x00000618), 6450 X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X, 7, 7, 0x0000070c), 6451 {} 6452 }; 6453 6454 static bool intel_snb_pebs_broken(void) 6455 { 6456 return !x86_match_min_microcode_rev(pebs_ucodes); 6457 } 6458 6459 static void intel_snb_check_microcode(void) 6460 { 6461 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken) 6462 return; 6463 6464 /* 6465 * Serialized by the microcode lock.. 6466 */ 6467 if (x86_pmu.pebs_broken) { 6468 pr_info("PEBS enabled due to microcode update\n"); 6469 x86_pmu.pebs_broken = 0; 6470 } else { 6471 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n"); 6472 x86_pmu.pebs_broken = 1; 6473 } 6474 } 6475 6476 static bool is_lbr_from(unsigned long msr) 6477 { 6478 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr; 6479 6480 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr; 6481 } 6482 6483 /* 6484 * Under certain circumstances, access certain MSR may cause #GP. 6485 * The function tests if the input MSR can be safely accessed. 6486 */ 6487 static bool check_msr(unsigned long msr, u64 mask) 6488 { 6489 u64 val_old, val_new, val_tmp; 6490 6491 /* 6492 * Disable the check for real HW, so we don't 6493 * mess with potentially enabled registers: 6494 */ 6495 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) 6496 return true; 6497 6498 /* 6499 * Read the current value, change it and read it back to see if it 6500 * matches, this is needed to detect certain hardware emulators 6501 * (qemu/kvm) that don't trap on the MSR access and always return 0s. 6502 */ 6503 if (rdmsrq_safe(msr, &val_old)) 6504 return false; 6505 6506 /* 6507 * Only change the bits which can be updated by wrmsrq. 6508 */ 6509 val_tmp = val_old ^ mask; 6510 6511 if (is_lbr_from(msr)) 6512 val_tmp = lbr_from_signext_quirk_wr(val_tmp); 6513 6514 if (wrmsrq_safe(msr, val_tmp) || 6515 rdmsrq_safe(msr, &val_new)) 6516 return false; 6517 6518 /* 6519 * Quirk only affects validation in wrmsr(), so wrmsrq()'s value 6520 * should equal rdmsrq()'s even with the quirk. 6521 */ 6522 if (val_new != val_tmp) 6523 return false; 6524 6525 if (is_lbr_from(msr)) 6526 val_old = lbr_from_signext_quirk_wr(val_old); 6527 6528 /* Here it's sure that the MSR can be safely accessed. 6529 * Restore the old value and return. 6530 */ 6531 wrmsrq(msr, val_old); 6532 6533 return true; 6534 } 6535 6536 static __init void intel_sandybridge_quirk(void) 6537 { 6538 x86_pmu.check_microcode = intel_snb_check_microcode; 6539 cpus_read_lock(); 6540 intel_snb_check_microcode(); 6541 cpus_read_unlock(); 6542 } 6543 6544 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = { 6545 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" }, 6546 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" }, 6547 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" }, 6548 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" }, 6549 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" }, 6550 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" }, 6551 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" }, 6552 }; 6553 6554 static __init void intel_arch_events_quirk(void) 6555 { 6556 int bit; 6557 6558 /* disable event that reported as not present by cpuid */ 6559 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { 6560 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; 6561 pr_warn("CPUID marked event: \'%s\' unavailable\n", 6562 intel_arch_events_map[bit].name); 6563 } 6564 } 6565 6566 static __init void intel_nehalem_quirk(void) 6567 { 6568 union cpuid10_ebx ebx; 6569 6570 ebx.full = x86_pmu.events_maskl; 6571 if (ebx.split.no_branch_misses_retired) { 6572 /* 6573 * Erratum AAJ80 detected, we work it around by using 6574 * the BR_MISP_EXEC.ANY event. This will over-count 6575 * branch-misses, but it's still much better than the 6576 * architectural event which is often completely bogus: 6577 */ 6578 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; 6579 ebx.split.no_branch_misses_retired = 0; 6580 x86_pmu.events_maskl = ebx.full; 6581 pr_info("CPU erratum AAJ80 worked around\n"); 6582 } 6583 } 6584 6585 /* 6586 * enable software workaround for errata: 6587 * SNB: BJ122 6588 * IVB: BV98 6589 * HSW: HSD29 6590 * 6591 * Only needed when HT is enabled. However detecting 6592 * if HT is enabled is difficult (model specific). So instead, 6593 * we enable the workaround in the early boot, and verify if 6594 * it is needed in a later initcall phase once we have valid 6595 * topology information to check if HT is actually enabled 6596 */ 6597 static __init void intel_ht_bug(void) 6598 { 6599 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED; 6600 6601 x86_pmu.start_scheduling = intel_start_scheduling; 6602 x86_pmu.commit_scheduling = intel_commit_scheduling; 6603 x86_pmu.stop_scheduling = intel_stop_scheduling; 6604 } 6605 6606 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3"); 6607 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82") 6608 6609 /* Haswell special events */ 6610 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1"); 6611 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2"); 6612 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4"); 6613 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2"); 6614 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1"); 6615 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1"); 6616 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2"); 6617 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4"); 6618 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2"); 6619 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1"); 6620 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1"); 6621 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1"); 6622 6623 static struct attribute *hsw_events_attrs[] = { 6624 EVENT_PTR(td_slots_issued), 6625 EVENT_PTR(td_slots_retired), 6626 EVENT_PTR(td_fetch_bubbles), 6627 EVENT_PTR(td_total_slots), 6628 EVENT_PTR(td_total_slots_scale), 6629 EVENT_PTR(td_recovery_bubbles), 6630 EVENT_PTR(td_recovery_bubbles_scale), 6631 NULL 6632 }; 6633 6634 static struct attribute *hsw_mem_events_attrs[] = { 6635 EVENT_PTR(mem_ld_hsw), 6636 EVENT_PTR(mem_st_hsw), 6637 NULL, 6638 }; 6639 6640 static struct attribute *hsw_tsx_events_attrs[] = { 6641 EVENT_PTR(tx_start), 6642 EVENT_PTR(tx_commit), 6643 EVENT_PTR(tx_abort), 6644 EVENT_PTR(tx_capacity), 6645 EVENT_PTR(tx_conflict), 6646 EVENT_PTR(el_start), 6647 EVENT_PTR(el_commit), 6648 EVENT_PTR(el_abort), 6649 EVENT_PTR(el_capacity), 6650 EVENT_PTR(el_conflict), 6651 EVENT_PTR(cycles_t), 6652 EVENT_PTR(cycles_ct), 6653 NULL 6654 }; 6655 6656 EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80"); 6657 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2"); 6658 EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80"); 6659 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2"); 6660 6661 static struct attribute *icl_events_attrs[] = { 6662 EVENT_PTR(mem_ld_hsw), 6663 EVENT_PTR(mem_st_hsw), 6664 NULL, 6665 }; 6666 6667 static struct attribute *icl_td_events_attrs[] = { 6668 EVENT_PTR(slots), 6669 EVENT_PTR(td_retiring), 6670 EVENT_PTR(td_bad_spec), 6671 EVENT_PTR(td_fe_bound), 6672 EVENT_PTR(td_be_bound), 6673 NULL, 6674 }; 6675 6676 static struct attribute *icl_tsx_events_attrs[] = { 6677 EVENT_PTR(tx_start), 6678 EVENT_PTR(tx_abort), 6679 EVENT_PTR(tx_commit), 6680 EVENT_PTR(tx_capacity_read), 6681 EVENT_PTR(tx_capacity_write), 6682 EVENT_PTR(tx_conflict), 6683 EVENT_PTR(el_start), 6684 EVENT_PTR(el_abort), 6685 EVENT_PTR(el_commit), 6686 EVENT_PTR(el_capacity_read), 6687 EVENT_PTR(el_capacity_write), 6688 EVENT_PTR(el_conflict), 6689 EVENT_PTR(cycles_t), 6690 EVENT_PTR(cycles_ct), 6691 NULL, 6692 }; 6693 6694 6695 EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2"); 6696 EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82"); 6697 6698 static struct attribute *glc_events_attrs[] = { 6699 EVENT_PTR(mem_ld_hsw), 6700 EVENT_PTR(mem_st_spr), 6701 EVENT_PTR(mem_ld_aux), 6702 NULL, 6703 }; 6704 6705 static struct attribute *glc_td_events_attrs[] = { 6706 EVENT_PTR(slots), 6707 EVENT_PTR(td_retiring), 6708 EVENT_PTR(td_bad_spec), 6709 EVENT_PTR(td_fe_bound), 6710 EVENT_PTR(td_be_bound), 6711 EVENT_PTR(td_heavy_ops), 6712 EVENT_PTR(td_br_mispredict), 6713 EVENT_PTR(td_fetch_lat), 6714 EVENT_PTR(td_mem_bound), 6715 NULL, 6716 }; 6717 6718 static struct attribute *glc_tsx_events_attrs[] = { 6719 EVENT_PTR(tx_start), 6720 EVENT_PTR(tx_abort), 6721 EVENT_PTR(tx_commit), 6722 EVENT_PTR(tx_capacity_read), 6723 EVENT_PTR(tx_capacity_write), 6724 EVENT_PTR(tx_conflict), 6725 EVENT_PTR(cycles_t), 6726 EVENT_PTR(cycles_ct), 6727 NULL, 6728 }; 6729 6730 static ssize_t freeze_on_smi_show(struct device *cdev, 6731 struct device_attribute *attr, 6732 char *buf) 6733 { 6734 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi); 6735 } 6736 6737 static DEFINE_MUTEX(freeze_on_smi_mutex); 6738 6739 static ssize_t freeze_on_smi_store(struct device *cdev, 6740 struct device_attribute *attr, 6741 const char *buf, size_t count) 6742 { 6743 unsigned long val; 6744 ssize_t ret; 6745 6746 ret = kstrtoul(buf, 0, &val); 6747 if (ret) 6748 return ret; 6749 6750 if (val > 1) 6751 return -EINVAL; 6752 6753 mutex_lock(&freeze_on_smi_mutex); 6754 6755 if (x86_pmu.attr_freeze_on_smi == val) 6756 goto done; 6757 6758 x86_pmu.attr_freeze_on_smi = val; 6759 6760 cpus_read_lock(); 6761 on_each_cpu(flip_smm_bit, &val, 1); 6762 cpus_read_unlock(); 6763 done: 6764 mutex_unlock(&freeze_on_smi_mutex); 6765 6766 return count; 6767 } 6768 6769 static void update_tfa_sched(void *ignored) 6770 { 6771 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 6772 6773 /* 6774 * check if PMC3 is used 6775 * and if so force schedule out for all event types all contexts 6776 */ 6777 if (test_bit(3, cpuc->active_mask)) 6778 perf_pmu_resched(x86_get_pmu(smp_processor_id())); 6779 } 6780 6781 static ssize_t show_sysctl_tfa(struct device *cdev, 6782 struct device_attribute *attr, 6783 char *buf) 6784 { 6785 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort); 6786 } 6787 6788 static ssize_t set_sysctl_tfa(struct device *cdev, 6789 struct device_attribute *attr, 6790 const char *buf, size_t count) 6791 { 6792 bool val; 6793 ssize_t ret; 6794 6795 ret = kstrtobool(buf, &val); 6796 if (ret) 6797 return ret; 6798 6799 /* no change */ 6800 if (val == allow_tsx_force_abort) 6801 return count; 6802 6803 allow_tsx_force_abort = val; 6804 6805 cpus_read_lock(); 6806 on_each_cpu(update_tfa_sched, NULL, 1); 6807 cpus_read_unlock(); 6808 6809 return count; 6810 } 6811 6812 6813 static DEVICE_ATTR_RW(freeze_on_smi); 6814 6815 static ssize_t branches_show(struct device *cdev, 6816 struct device_attribute *attr, 6817 char *buf) 6818 { 6819 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr); 6820 } 6821 6822 static DEVICE_ATTR_RO(branches); 6823 6824 static ssize_t branch_counter_nr_show(struct device *cdev, 6825 struct device_attribute *attr, 6826 char *buf) 6827 { 6828 return snprintf(buf, PAGE_SIZE, "%d\n", fls(x86_pmu.lbr_counters)); 6829 } 6830 6831 static DEVICE_ATTR_RO(branch_counter_nr); 6832 6833 static ssize_t branch_counter_width_show(struct device *cdev, 6834 struct device_attribute *attr, 6835 char *buf) 6836 { 6837 return snprintf(buf, PAGE_SIZE, "%d\n", LBR_INFO_BR_CNTR_BITS); 6838 } 6839 6840 static DEVICE_ATTR_RO(branch_counter_width); 6841 6842 static struct attribute *lbr_attrs[] = { 6843 &dev_attr_branches.attr, 6844 &dev_attr_branch_counter_nr.attr, 6845 &dev_attr_branch_counter_width.attr, 6846 NULL 6847 }; 6848 6849 static umode_t 6850 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) 6851 { 6852 /* branches */ 6853 if (i == 0) 6854 return x86_pmu.lbr_nr ? attr->mode : 0; 6855 6856 return (x86_pmu.flags & PMU_FL_BR_CNTR) ? attr->mode : 0; 6857 } 6858 6859 static char pmu_name_str[30]; 6860 6861 static DEVICE_STRING_ATTR_RO(pmu_name, 0444, pmu_name_str); 6862 6863 static struct attribute *intel_pmu_caps_attrs[] = { 6864 &dev_attr_pmu_name.attr.attr, 6865 NULL 6866 }; 6867 6868 static DEVICE_ATTR(allow_tsx_force_abort, 0644, 6869 show_sysctl_tfa, 6870 set_sysctl_tfa); 6871 6872 static struct attribute *intel_pmu_attrs[] = { 6873 &dev_attr_freeze_on_smi.attr, 6874 &dev_attr_allow_tsx_force_abort.attr, 6875 NULL, 6876 }; 6877 6878 static umode_t 6879 default_is_visible(struct kobject *kobj, struct attribute *attr, int i) 6880 { 6881 if (attr == &dev_attr_allow_tsx_force_abort.attr) 6882 return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0; 6883 6884 return attr->mode; 6885 } 6886 6887 static umode_t 6888 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i) 6889 { 6890 return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0; 6891 } 6892 6893 static umode_t 6894 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i) 6895 { 6896 return intel_pmu_has_pebs() ? attr->mode : 0; 6897 } 6898 6899 static umode_t 6900 mem_is_visible(struct kobject *kobj, struct attribute *attr, int i) 6901 { 6902 if (attr == &event_attr_mem_ld_aux.attr.attr) 6903 return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0; 6904 6905 return pebs_is_visible(kobj, attr, i); 6906 } 6907 6908 static umode_t 6909 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i) 6910 { 6911 return x86_pmu.version >= 2 ? attr->mode : 0; 6912 } 6913 6914 static umode_t 6915 td_is_visible(struct kobject *kobj, struct attribute *attr, int i) 6916 { 6917 /* 6918 * Hide the perf metrics topdown events 6919 * if the feature is not enumerated. 6920 */ 6921 if (x86_pmu.num_topdown_events) 6922 return x86_pmu.intel_cap.perf_metrics ? attr->mode : 0; 6923 6924 return attr->mode; 6925 } 6926 6927 PMU_FORMAT_ATTR(acr_mask, "config2:0-63"); 6928 6929 static struct attribute *format_acr_attrs[] = { 6930 &format_attr_acr_mask.attr, 6931 NULL 6932 }; 6933 6934 static umode_t 6935 acr_is_visible(struct kobject *kobj, struct attribute *attr, int i) 6936 { 6937 struct device *dev = kobj_to_dev(kobj); 6938 6939 return intel_pmu_has_acr(dev_get_drvdata(dev)) ? attr->mode : 0; 6940 } 6941 6942 static struct attribute_group group_events_td = { 6943 .name = "events", 6944 .is_visible = td_is_visible, 6945 }; 6946 6947 static struct attribute_group group_events_mem = { 6948 .name = "events", 6949 .is_visible = mem_is_visible, 6950 }; 6951 6952 static struct attribute_group group_events_tsx = { 6953 .name = "events", 6954 .is_visible = tsx_is_visible, 6955 }; 6956 6957 static struct attribute_group group_caps_gen = { 6958 .name = "caps", 6959 .attrs = intel_pmu_caps_attrs, 6960 }; 6961 6962 static struct attribute_group group_caps_lbr = { 6963 .name = "caps", 6964 .attrs = lbr_attrs, 6965 .is_visible = lbr_is_visible, 6966 }; 6967 6968 static struct attribute_group group_format_extra = { 6969 .name = "format", 6970 .is_visible = exra_is_visible, 6971 }; 6972 6973 static struct attribute_group group_format_extra_skl = { 6974 .name = "format", 6975 .is_visible = exra_is_visible, 6976 }; 6977 6978 static struct attribute_group group_format_evtsel_ext = { 6979 .name = "format", 6980 .attrs = format_evtsel_ext_attrs, 6981 .is_visible = evtsel_ext_is_visible, 6982 }; 6983 6984 static struct attribute_group group_format_acr = { 6985 .name = "format", 6986 .attrs = format_acr_attrs, 6987 .is_visible = acr_is_visible, 6988 }; 6989 6990 static struct attribute_group group_default = { 6991 .attrs = intel_pmu_attrs, 6992 .is_visible = default_is_visible, 6993 }; 6994 6995 static const struct attribute_group *attr_update[] = { 6996 &group_events_td, 6997 &group_events_mem, 6998 &group_events_tsx, 6999 &group_caps_gen, 7000 &group_caps_lbr, 7001 &group_format_extra, 7002 &group_format_extra_skl, 7003 &group_format_evtsel_ext, 7004 &group_format_acr, 7005 &group_default, 7006 NULL, 7007 }; 7008 7009 EVENT_ATTR_STR_HYBRID(slots, slots_adl, "event=0x00,umask=0x4", hybrid_big); 7010 EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small); 7011 EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small); 7012 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small); 7013 EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small); 7014 EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_adl, "event=0x00,umask=0x84", hybrid_big); 7015 EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85", hybrid_big); 7016 EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_adl, "event=0x00,umask=0x86", hybrid_big); 7017 EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_adl, "event=0x00,umask=0x87", hybrid_big); 7018 7019 static struct attribute *adl_hybrid_events_attrs[] = { 7020 EVENT_PTR(slots_adl), 7021 EVENT_PTR(td_retiring_adl), 7022 EVENT_PTR(td_bad_spec_adl), 7023 EVENT_PTR(td_fe_bound_adl), 7024 EVENT_PTR(td_be_bound_adl), 7025 EVENT_PTR(td_heavy_ops_adl), 7026 EVENT_PTR(td_br_mis_adl), 7027 EVENT_PTR(td_fetch_lat_adl), 7028 EVENT_PTR(td_mem_bound_adl), 7029 NULL, 7030 }; 7031 7032 EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_lnl, "event=0xc2,umask=0x02;event=0x00,umask=0x80", hybrid_big_small); 7033 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_lnl, "event=0x9c,umask=0x01;event=0x00,umask=0x82", hybrid_big_small); 7034 EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_lnl, "event=0xa4,umask=0x02;event=0x00,umask=0x83", hybrid_big_small); 7035 7036 static struct attribute *lnl_hybrid_events_attrs[] = { 7037 EVENT_PTR(slots_adl), 7038 EVENT_PTR(td_retiring_lnl), 7039 EVENT_PTR(td_bad_spec_adl), 7040 EVENT_PTR(td_fe_bound_lnl), 7041 EVENT_PTR(td_be_bound_lnl), 7042 EVENT_PTR(td_heavy_ops_adl), 7043 EVENT_PTR(td_br_mis_adl), 7044 EVENT_PTR(td_fetch_lat_adl), 7045 EVENT_PTR(td_mem_bound_adl), 7046 NULL 7047 }; 7048 7049 /* The event string must be in PMU IDX order. */ 7050 EVENT_ATTR_STR_HYBRID(topdown-retiring, 7051 td_retiring_arl_h, 7052 "event=0xc2,umask=0x02;event=0x00,umask=0x80;event=0xc2,umask=0x0", 7053 hybrid_big_small_tiny); 7054 EVENT_ATTR_STR_HYBRID(topdown-bad-spec, 7055 td_bad_spec_arl_h, 7056 "event=0x73,umask=0x0;event=0x00,umask=0x81;event=0x73,umask=0x0", 7057 hybrid_big_small_tiny); 7058 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, 7059 td_fe_bound_arl_h, 7060 "event=0x9c,umask=0x01;event=0x00,umask=0x82;event=0x71,umask=0x0", 7061 hybrid_big_small_tiny); 7062 EVENT_ATTR_STR_HYBRID(topdown-be-bound, 7063 td_be_bound_arl_h, 7064 "event=0xa4,umask=0x02;event=0x00,umask=0x83;event=0x74,umask=0x0", 7065 hybrid_big_small_tiny); 7066 7067 static struct attribute *arl_h_hybrid_events_attrs[] = { 7068 EVENT_PTR(slots_adl), 7069 EVENT_PTR(td_retiring_arl_h), 7070 EVENT_PTR(td_bad_spec_arl_h), 7071 EVENT_PTR(td_fe_bound_arl_h), 7072 EVENT_PTR(td_be_bound_arl_h), 7073 EVENT_PTR(td_heavy_ops_adl), 7074 EVENT_PTR(td_br_mis_adl), 7075 EVENT_PTR(td_fetch_lat_adl), 7076 EVENT_PTR(td_mem_bound_adl), 7077 NULL, 7078 }; 7079 7080 /* Must be in IDX order */ 7081 EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small); 7082 EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small); 7083 EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82", hybrid_big); 7084 7085 static struct attribute *adl_hybrid_mem_attrs[] = { 7086 EVENT_PTR(mem_ld_adl), 7087 EVENT_PTR(mem_st_adl), 7088 EVENT_PTR(mem_ld_aux_adl), 7089 NULL, 7090 }; 7091 7092 static struct attribute *mtl_hybrid_mem_attrs[] = { 7093 EVENT_PTR(mem_ld_adl), 7094 EVENT_PTR(mem_st_adl), 7095 NULL 7096 }; 7097 7098 EVENT_ATTR_STR_HYBRID(mem-loads, 7099 mem_ld_arl_h, 7100 "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3;event=0xd0,umask=0x5,ldlat=3", 7101 hybrid_big_small_tiny); 7102 EVENT_ATTR_STR_HYBRID(mem-stores, 7103 mem_st_arl_h, 7104 "event=0xd0,umask=0x6;event=0xcd,umask=0x2;event=0xd0,umask=0x6", 7105 hybrid_big_small_tiny); 7106 7107 static struct attribute *arl_h_hybrid_mem_attrs[] = { 7108 EVENT_PTR(mem_ld_arl_h), 7109 EVENT_PTR(mem_st_arl_h), 7110 NULL, 7111 }; 7112 7113 EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big); 7114 EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big); 7115 EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big); 7116 EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl, "event=0x54,umask=0x1", hybrid_big); 7117 EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl, "event=0x3c,in_tx=1", hybrid_big); 7118 EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big); 7119 EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl, "event=0x54,umask=0x80", hybrid_big); 7120 EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2", hybrid_big); 7121 7122 static struct attribute *adl_hybrid_tsx_attrs[] = { 7123 EVENT_PTR(tx_start_adl), 7124 EVENT_PTR(tx_abort_adl), 7125 EVENT_PTR(tx_commit_adl), 7126 EVENT_PTR(tx_capacity_read_adl), 7127 EVENT_PTR(tx_capacity_write_adl), 7128 EVENT_PTR(tx_conflict_adl), 7129 EVENT_PTR(cycles_t_adl), 7130 EVENT_PTR(cycles_ct_adl), 7131 NULL, 7132 }; 7133 7134 FORMAT_ATTR_HYBRID(in_tx, hybrid_big); 7135 FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big); 7136 FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small_tiny); 7137 FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small_tiny); 7138 FORMAT_ATTR_HYBRID(frontend, hybrid_big); 7139 7140 #define ADL_HYBRID_RTM_FORMAT_ATTR \ 7141 FORMAT_HYBRID_PTR(in_tx), \ 7142 FORMAT_HYBRID_PTR(in_tx_cp) 7143 7144 #define ADL_HYBRID_FORMAT_ATTR \ 7145 FORMAT_HYBRID_PTR(offcore_rsp), \ 7146 FORMAT_HYBRID_PTR(ldlat), \ 7147 FORMAT_HYBRID_PTR(frontend) 7148 7149 static struct attribute *adl_hybrid_extra_attr_rtm[] = { 7150 ADL_HYBRID_RTM_FORMAT_ATTR, 7151 ADL_HYBRID_FORMAT_ATTR, 7152 NULL 7153 }; 7154 7155 static struct attribute *adl_hybrid_extra_attr[] = { 7156 ADL_HYBRID_FORMAT_ATTR, 7157 NULL 7158 }; 7159 7160 FORMAT_ATTR_HYBRID(snoop_rsp, hybrid_small_tiny); 7161 7162 static struct attribute *mtl_hybrid_extra_attr_rtm[] = { 7163 ADL_HYBRID_RTM_FORMAT_ATTR, 7164 ADL_HYBRID_FORMAT_ATTR, 7165 FORMAT_HYBRID_PTR(snoop_rsp), 7166 NULL 7167 }; 7168 7169 static struct attribute *mtl_hybrid_extra_attr[] = { 7170 ADL_HYBRID_FORMAT_ATTR, 7171 FORMAT_HYBRID_PTR(snoop_rsp), 7172 NULL 7173 }; 7174 7175 static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr) 7176 { 7177 struct device *dev = kobj_to_dev(kobj); 7178 struct x86_hybrid_pmu *pmu = 7179 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 7180 struct perf_pmu_events_hybrid_attr *pmu_attr = 7181 container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr); 7182 7183 return pmu->pmu_type & pmu_attr->pmu_type; 7184 } 7185 7186 static umode_t hybrid_events_is_visible(struct kobject *kobj, 7187 struct attribute *attr, int i) 7188 { 7189 return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0; 7190 } 7191 7192 static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu) 7193 { 7194 int cpu = cpumask_first(&pmu->supported_cpus); 7195 7196 return (cpu >= nr_cpu_ids) ? -1 : cpu; 7197 } 7198 7199 static umode_t hybrid_tsx_is_visible(struct kobject *kobj, 7200 struct attribute *attr, int i) 7201 { 7202 struct device *dev = kobj_to_dev(kobj); 7203 struct x86_hybrid_pmu *pmu = 7204 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 7205 int cpu = hybrid_find_supported_cpu(pmu); 7206 7207 return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0; 7208 } 7209 7210 static umode_t hybrid_format_is_visible(struct kobject *kobj, 7211 struct attribute *attr, int i) 7212 { 7213 struct device *dev = kobj_to_dev(kobj); 7214 struct x86_hybrid_pmu *pmu = 7215 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 7216 struct perf_pmu_format_hybrid_attr *pmu_attr = 7217 container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr); 7218 int cpu = hybrid_find_supported_cpu(pmu); 7219 7220 return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0; 7221 } 7222 7223 static umode_t hybrid_td_is_visible(struct kobject *kobj, 7224 struct attribute *attr, int i) 7225 { 7226 struct device *dev = kobj_to_dev(kobj); 7227 struct x86_hybrid_pmu *pmu = 7228 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 7229 7230 if (!is_attr_for_this_pmu(kobj, attr)) 7231 return 0; 7232 7233 7234 /* Only the big core supports perf metrics */ 7235 if (pmu->pmu_type == hybrid_big) 7236 return pmu->intel_cap.perf_metrics ? attr->mode : 0; 7237 7238 return attr->mode; 7239 } 7240 7241 static struct attribute_group hybrid_group_events_td = { 7242 .name = "events", 7243 .is_visible = hybrid_td_is_visible, 7244 }; 7245 7246 static struct attribute_group hybrid_group_events_mem = { 7247 .name = "events", 7248 .is_visible = hybrid_events_is_visible, 7249 }; 7250 7251 static struct attribute_group hybrid_group_events_tsx = { 7252 .name = "events", 7253 .is_visible = hybrid_tsx_is_visible, 7254 }; 7255 7256 static struct attribute_group hybrid_group_format_extra = { 7257 .name = "format", 7258 .is_visible = hybrid_format_is_visible, 7259 }; 7260 7261 static ssize_t intel_hybrid_get_attr_cpus(struct device *dev, 7262 struct device_attribute *attr, 7263 char *buf) 7264 { 7265 struct x86_hybrid_pmu *pmu = 7266 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 7267 7268 return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus); 7269 } 7270 7271 static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL); 7272 static struct attribute *intel_hybrid_cpus_attrs[] = { 7273 &dev_attr_cpus.attr, 7274 NULL, 7275 }; 7276 7277 static struct attribute_group hybrid_group_cpus = { 7278 .attrs = intel_hybrid_cpus_attrs, 7279 }; 7280 7281 static const struct attribute_group *hybrid_attr_update[] = { 7282 &hybrid_group_events_td, 7283 &hybrid_group_events_mem, 7284 &hybrid_group_events_tsx, 7285 &group_caps_gen, 7286 &group_caps_lbr, 7287 &hybrid_group_format_extra, 7288 &group_format_evtsel_ext, 7289 &group_format_acr, 7290 &group_default, 7291 &hybrid_group_cpus, 7292 NULL, 7293 }; 7294 7295 static struct attribute *empty_attrs; 7296 7297 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints, 7298 u64 cntr_mask, 7299 u64 fixed_cntr_mask, 7300 u64 intel_ctrl) 7301 { 7302 struct event_constraint *c; 7303 7304 if (!event_constraints) 7305 return; 7306 7307 /* 7308 * event on fixed counter2 (REF_CYCLES) only works on this 7309 * counter, so do not extend mask to generic counters 7310 */ 7311 for_each_event_constraint(c, event_constraints) { 7312 /* 7313 * Don't extend the topdown slots and metrics 7314 * events to the generic counters. 7315 */ 7316 if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) { 7317 /* 7318 * Disable topdown slots and metrics events, 7319 * if slots event is not in CPUID. 7320 */ 7321 if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl)) 7322 c->idxmsk64 = 0; 7323 c->weight = hweight64(c->idxmsk64); 7324 continue; 7325 } 7326 7327 if (c->cmask == FIXED_EVENT_FLAGS) { 7328 /* Disabled fixed counters which are not in CPUID */ 7329 c->idxmsk64 &= intel_ctrl; 7330 7331 /* 7332 * Don't extend the pseudo-encoding to the 7333 * generic counters 7334 */ 7335 if (!use_fixed_pseudo_encoding(c->code)) 7336 c->idxmsk64 |= cntr_mask; 7337 } 7338 c->idxmsk64 &= cntr_mask | (fixed_cntr_mask << INTEL_PMC_IDX_FIXED); 7339 c->weight = hweight64(c->idxmsk64); 7340 } 7341 } 7342 7343 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs) 7344 { 7345 struct extra_reg *er; 7346 7347 /* 7348 * Access extra MSR may cause #GP under certain circumstances. 7349 * E.g. KVM doesn't support offcore event 7350 * Check all extra_regs here. 7351 */ 7352 if (!extra_regs) 7353 return; 7354 7355 for (er = extra_regs; er->msr; er++) { 7356 er->extra_msr_access = check_msr(er->msr, 0x11UL); 7357 /* Disable LBR select mapping */ 7358 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) 7359 x86_pmu.lbr_sel_map = NULL; 7360 } 7361 } 7362 7363 static inline int intel_pmu_v6_addr_offset(int index, bool eventsel) 7364 { 7365 return MSR_IA32_PMC_V6_STEP * index; 7366 } 7367 7368 static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = { 7369 { hybrid_small, "cpu_atom" }, 7370 { hybrid_big, "cpu_core" }, 7371 { hybrid_tiny, "cpu_lowpower" }, 7372 }; 7373 7374 static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus) 7375 { 7376 unsigned long pmus_mask = pmus; 7377 struct x86_hybrid_pmu *pmu; 7378 int idx = 0, bit; 7379 7380 x86_pmu.num_hybrid_pmus = hweight_long(pmus_mask); 7381 x86_pmu.hybrid_pmu = kcalloc(x86_pmu.num_hybrid_pmus, 7382 sizeof(struct x86_hybrid_pmu), 7383 GFP_KERNEL); 7384 if (!x86_pmu.hybrid_pmu) 7385 return -ENOMEM; 7386 7387 static_branch_enable(&perf_is_hybrid); 7388 x86_pmu.filter = intel_pmu_filter; 7389 7390 for_each_set_bit(bit, &pmus_mask, ARRAY_SIZE(intel_hybrid_pmu_type_map)) { 7391 pmu = &x86_pmu.hybrid_pmu[idx++]; 7392 pmu->pmu_type = intel_hybrid_pmu_type_map[bit].id; 7393 pmu->name = intel_hybrid_pmu_type_map[bit].name; 7394 7395 pmu->cntr_mask64 = x86_pmu.cntr_mask64; 7396 pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64; 7397 pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64); 7398 pmu->config_mask = X86_RAW_EVENT_MASK; 7399 pmu->unconstrained = (struct event_constraint) 7400 __EVENT_CONSTRAINT(0, pmu->cntr_mask64, 7401 0, x86_pmu_num_counters(&pmu->pmu), 0, 0); 7402 7403 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; 7404 if (pmu->pmu_type & hybrid_small_tiny) { 7405 pmu->intel_cap.perf_metrics = 0; 7406 pmu->mid_ack = true; 7407 } else if (pmu->pmu_type & hybrid_big) { 7408 pmu->intel_cap.perf_metrics = 1; 7409 pmu->late_ack = true; 7410 } 7411 } 7412 7413 return 0; 7414 } 7415 7416 static __always_inline void intel_pmu_ref_cycles_ext(void) 7417 { 7418 if (!(x86_pmu.events_maskl & (INTEL_PMC_MSK_FIXED_REF_CYCLES >> INTEL_PMC_IDX_FIXED))) 7419 intel_perfmon_event_map[PERF_COUNT_HW_REF_CPU_CYCLES] = 0x013c; 7420 } 7421 7422 static __always_inline void intel_pmu_init_glc(struct pmu *pmu) 7423 { 7424 x86_pmu.late_ack = true; 7425 x86_pmu.limit_period = glc_limit_period; 7426 x86_pmu.pebs_aliases = NULL; 7427 x86_pmu.pebs_prec_dist = true; 7428 x86_pmu.pebs_block = true; 7429 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 7430 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 7431 x86_pmu.flags |= PMU_FL_INSTR_LATENCY; 7432 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); 7433 x86_pmu.lbr_pt_coexist = true; 7434 x86_pmu.num_topdown_events = 8; 7435 static_call_update(intel_pmu_update_topdown_event, 7436 &icl_update_topdown_event); 7437 static_call_update(intel_pmu_set_topdown_event_period, 7438 &icl_set_topdown_event_period); 7439 7440 memcpy(hybrid_var(pmu, hw_cache_event_ids), glc_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 7441 memcpy(hybrid_var(pmu, hw_cache_extra_regs), glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 7442 hybrid(pmu, event_constraints) = intel_glc_event_constraints; 7443 hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints; 7444 7445 intel_pmu_ref_cycles_ext(); 7446 } 7447 7448 static __always_inline void intel_pmu_init_grt(struct pmu *pmu) 7449 { 7450 x86_pmu.mid_ack = true; 7451 x86_pmu.limit_period = glc_limit_period; 7452 x86_pmu.pebs_aliases = NULL; 7453 x86_pmu.pebs_prec_dist = true; 7454 x86_pmu.pebs_block = true; 7455 x86_pmu.lbr_pt_coexist = true; 7456 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 7457 x86_pmu.flags |= PMU_FL_INSTR_LATENCY; 7458 7459 memcpy(hybrid_var(pmu, hw_cache_event_ids), glp_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 7460 memcpy(hybrid_var(pmu, hw_cache_extra_regs), tnt_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 7461 hybrid_var(pmu, hw_cache_event_ids)[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 7462 hybrid(pmu, event_constraints) = intel_grt_event_constraints; 7463 hybrid(pmu, pebs_constraints) = intel_grt_pebs_event_constraints; 7464 hybrid(pmu, extra_regs) = intel_grt_extra_regs; 7465 7466 intel_pmu_ref_cycles_ext(); 7467 } 7468 7469 static __always_inline void intel_pmu_init_lnc(struct pmu *pmu) 7470 { 7471 intel_pmu_init_glc(pmu); 7472 hybrid(pmu, event_constraints) = intel_lnc_event_constraints; 7473 hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints; 7474 hybrid(pmu, extra_regs) = intel_lnc_extra_regs; 7475 } 7476 7477 static __always_inline void intel_pmu_init_pnc(struct pmu *pmu) 7478 { 7479 intel_pmu_init_glc(pmu); 7480 x86_pmu.flags &= ~PMU_FL_HAS_RSP_1; 7481 x86_pmu.flags |= PMU_FL_HAS_OMR; 7482 memcpy(hybrid_var(pmu, hw_cache_event_ids), 7483 pnc_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 7484 memcpy(hybrid_var(pmu, hw_cache_extra_regs), 7485 pnc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 7486 hybrid(pmu, event_constraints) = intel_pnc_event_constraints; 7487 hybrid(pmu, pebs_constraints) = intel_pnc_pebs_event_constraints; 7488 hybrid(pmu, extra_regs) = intel_pnc_extra_regs; 7489 } 7490 7491 static __always_inline void intel_pmu_init_skt(struct pmu *pmu) 7492 { 7493 intel_pmu_init_grt(pmu); 7494 hybrid(pmu, event_constraints) = intel_skt_event_constraints; 7495 hybrid(pmu, extra_regs) = intel_cmt_extra_regs; 7496 static_call_update(intel_pmu_enable_acr_event, intel_pmu_enable_acr); 7497 } 7498 7499 static __always_inline void intel_pmu_init_arw(struct pmu *pmu) 7500 { 7501 intel_pmu_init_grt(pmu); 7502 x86_pmu.flags &= ~PMU_FL_HAS_RSP_1; 7503 x86_pmu.flags |= PMU_FL_HAS_OMR; 7504 memcpy(hybrid_var(pmu, hw_cache_extra_regs), 7505 arw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 7506 hybrid(pmu, event_constraints) = intel_arw_event_constraints; 7507 hybrid(pmu, pebs_constraints) = intel_arw_pebs_event_constraints; 7508 hybrid(pmu, extra_regs) = intel_arw_extra_regs; 7509 static_call_update(intel_pmu_enable_acr_event, intel_pmu_enable_acr); 7510 } 7511 7512 __init int intel_pmu_init(void) 7513 { 7514 struct attribute **extra_skl_attr = &empty_attrs; 7515 struct attribute **extra_attr = &empty_attrs; 7516 struct attribute **td_attr = &empty_attrs; 7517 struct attribute **mem_attr = &empty_attrs; 7518 struct attribute **tsx_attr = &empty_attrs; 7519 union cpuid10_edx edx; 7520 union cpuid10_eax eax; 7521 union cpuid10_ebx ebx; 7522 unsigned int fixed_mask; 7523 bool pmem = false; 7524 int version, i; 7525 char *name; 7526 struct x86_hybrid_pmu *pmu; 7527 7528 /* Architectural Perfmon was introduced starting with Core "Yonah" */ 7529 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { 7530 switch (boot_cpu_data.x86) { 7531 case 6: 7532 if (boot_cpu_data.x86_vfm < INTEL_CORE_YONAH) 7533 return p6_pmu_init(); 7534 break; 7535 case 11: 7536 return knc_pmu_init(); 7537 case 15: 7538 return p4_pmu_init(); 7539 } 7540 7541 pr_cont("unsupported CPU family %d model %d ", 7542 boot_cpu_data.x86, boot_cpu_data.x86_model); 7543 return -ENODEV; 7544 } 7545 7546 /* 7547 * Check whether the Architectural PerfMon supports 7548 * Branch Misses Retired hw_event or not. 7549 */ 7550 cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full); 7551 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT) 7552 return -ENODEV; 7553 7554 version = eax.split.version_id; 7555 if (version < 2) 7556 x86_pmu = core_pmu; 7557 else 7558 x86_pmu = intel_pmu; 7559 7560 x86_pmu.version = version; 7561 x86_pmu.cntr_mask64 = GENMASK_ULL(eax.split.num_counters - 1, 0); 7562 x86_pmu.cntval_bits = eax.split.bit_width; 7563 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; 7564 7565 x86_pmu.events_maskl = ebx.full; 7566 x86_pmu.events_mask_len = eax.split.mask_length; 7567 7568 x86_pmu.pebs_events_mask = intel_pmu_pebs_mask(x86_pmu.cntr_mask64); 7569 x86_pmu.pebs_capable = PEBS_COUNTER_MASK; 7570 x86_pmu.config_mask = X86_RAW_EVENT_MASK; 7571 7572 /* 7573 * Quirk: v2 perfmon does not report fixed-purpose events, so 7574 * assume at least 3 events, when not running in a hypervisor: 7575 */ 7576 if (version > 1 && version < 5) { 7577 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR); 7578 7579 x86_pmu.fixed_cntr_mask64 = 7580 GENMASK_ULL(max((int)edx.split.num_counters_fixed, assume) - 1, 0); 7581 } else if (version >= 5) 7582 x86_pmu.fixed_cntr_mask64 = fixed_mask; 7583 7584 if (boot_cpu_has(X86_FEATURE_PDCM)) { 7585 u64 capabilities; 7586 7587 rdmsrq(MSR_IA32_PERF_CAPABILITIES, capabilities); 7588 x86_pmu.intel_cap.capabilities = capabilities; 7589 } 7590 7591 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) { 7592 x86_pmu.lbr_reset = intel_pmu_lbr_reset_32; 7593 x86_pmu.lbr_read = intel_pmu_lbr_read_32; 7594 } 7595 7596 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) 7597 intel_pmu_arch_lbr_init(); 7598 7599 intel_pebs_init(); 7600 7601 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */ 7602 7603 if (version >= 5) { 7604 x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated; 7605 if (x86_pmu.intel_cap.anythread_deprecated) 7606 pr_cont(" AnyThread deprecated, "); 7607 } 7608 7609 /* The perf side of core PMU is ready to support the mediated vPMU. */ 7610 x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_MEDIATED_VPMU; 7611 7612 /* 7613 * Many features on and after V6 require dynamic constraint, 7614 * e.g., Arch PEBS, ACR. 7615 */ 7616 if (version >= 6) { 7617 x86_pmu.flags |= PMU_FL_DYN_CONSTRAINT; 7618 x86_pmu.late_setup = intel_pmu_late_setup; 7619 } 7620 7621 /* 7622 * Install the hw-cache-events table: 7623 */ 7624 switch (boot_cpu_data.x86_vfm) { 7625 case INTEL_CORE_YONAH: 7626 pr_cont("Core events, "); 7627 name = "core"; 7628 break; 7629 7630 case INTEL_CORE2_MEROM: 7631 x86_add_quirk(intel_clovertown_quirk); 7632 fallthrough; 7633 7634 case INTEL_CORE2_MEROM_L: 7635 case INTEL_CORE2_PENRYN: 7636 case INTEL_CORE2_DUNNINGTON: 7637 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, 7638 sizeof(hw_cache_event_ids)); 7639 7640 intel_pmu_lbr_init_core(); 7641 7642 x86_pmu.event_constraints = intel_core2_event_constraints; 7643 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints; 7644 pr_cont("Core2 events, "); 7645 name = "core2"; 7646 break; 7647 7648 case INTEL_NEHALEM: 7649 case INTEL_NEHALEM_EP: 7650 case INTEL_NEHALEM_EX: 7651 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, 7652 sizeof(hw_cache_event_ids)); 7653 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, 7654 sizeof(hw_cache_extra_regs)); 7655 7656 intel_pmu_lbr_init_nhm(); 7657 7658 x86_pmu.event_constraints = intel_nehalem_event_constraints; 7659 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; 7660 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 7661 x86_pmu.extra_regs = intel_nehalem_extra_regs; 7662 x86_pmu.limit_period = nhm_limit_period; 7663 7664 mem_attr = nhm_mem_events_attrs; 7665 7666 /* UOPS_ISSUED.STALLED_CYCLES */ 7667 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 7668 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 7669 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ 7670 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 7671 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); 7672 7673 intel_pmu_pebs_data_source_nhm(); 7674 x86_add_quirk(intel_nehalem_quirk); 7675 x86_pmu.pebs_no_tlb = 1; 7676 extra_attr = nhm_format_attr; 7677 7678 pr_cont("Nehalem events, "); 7679 name = "nehalem"; 7680 break; 7681 7682 case INTEL_ATOM_BONNELL: 7683 case INTEL_ATOM_BONNELL_MID: 7684 case INTEL_ATOM_SALTWELL: 7685 case INTEL_ATOM_SALTWELL_MID: 7686 case INTEL_ATOM_SALTWELL_TABLET: 7687 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 7688 sizeof(hw_cache_event_ids)); 7689 7690 intel_pmu_lbr_init_atom(); 7691 7692 x86_pmu.event_constraints = intel_gen_event_constraints; 7693 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints; 7694 x86_pmu.pebs_aliases = intel_pebs_aliases_core2; 7695 pr_cont("Atom events, "); 7696 name = "bonnell"; 7697 break; 7698 7699 case INTEL_ATOM_SILVERMONT: 7700 case INTEL_ATOM_SILVERMONT_D: 7701 case INTEL_ATOM_SILVERMONT_MID: 7702 case INTEL_ATOM_AIRMONT: 7703 case INTEL_ATOM_AIRMONT_NP: 7704 case INTEL_ATOM_SILVERMONT_MID2: 7705 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, 7706 sizeof(hw_cache_event_ids)); 7707 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, 7708 sizeof(hw_cache_extra_regs)); 7709 7710 intel_pmu_lbr_init_slm(); 7711 7712 x86_pmu.event_constraints = intel_slm_event_constraints; 7713 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints; 7714 x86_pmu.extra_regs = intel_slm_extra_regs; 7715 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 7716 td_attr = slm_events_attrs; 7717 extra_attr = slm_format_attr; 7718 pr_cont("Silvermont events, "); 7719 name = "silvermont"; 7720 break; 7721 7722 case INTEL_ATOM_GOLDMONT: 7723 case INTEL_ATOM_GOLDMONT_D: 7724 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids, 7725 sizeof(hw_cache_event_ids)); 7726 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs, 7727 sizeof(hw_cache_extra_regs)); 7728 7729 intel_pmu_lbr_init_skl(); 7730 7731 x86_pmu.event_constraints = intel_slm_event_constraints; 7732 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints; 7733 x86_pmu.extra_regs = intel_glm_extra_regs; 7734 /* 7735 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS 7736 * for precise cycles. 7737 * :pp is identical to :ppp 7738 */ 7739 x86_pmu.pebs_aliases = NULL; 7740 x86_pmu.pebs_prec_dist = true; 7741 x86_pmu.lbr_pt_coexist = true; 7742 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 7743 td_attr = glm_events_attrs; 7744 extra_attr = slm_format_attr; 7745 pr_cont("Goldmont events, "); 7746 name = "goldmont"; 7747 break; 7748 7749 case INTEL_ATOM_GOLDMONT_PLUS: 7750 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, 7751 sizeof(hw_cache_event_ids)); 7752 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs, 7753 sizeof(hw_cache_extra_regs)); 7754 7755 intel_pmu_lbr_init_skl(); 7756 7757 x86_pmu.event_constraints = intel_slm_event_constraints; 7758 x86_pmu.extra_regs = intel_glm_extra_regs; 7759 /* 7760 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS 7761 * for precise cycles. 7762 */ 7763 x86_pmu.pebs_aliases = NULL; 7764 x86_pmu.pebs_prec_dist = true; 7765 x86_pmu.lbr_pt_coexist = true; 7766 x86_pmu.pebs_capable = ~0ULL; 7767 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 7768 x86_pmu.flags |= PMU_FL_PEBS_ALL; 7769 x86_pmu.get_event_constraints = glp_get_event_constraints; 7770 td_attr = glm_events_attrs; 7771 /* Goldmont Plus has 4-wide pipeline */ 7772 event_attr_td_total_slots_scale_glm.event_str = "4"; 7773 extra_attr = slm_format_attr; 7774 pr_cont("Goldmont plus events, "); 7775 name = "goldmont_plus"; 7776 break; 7777 7778 case INTEL_ATOM_TREMONT_D: 7779 case INTEL_ATOM_TREMONT: 7780 case INTEL_ATOM_TREMONT_L: 7781 x86_pmu.late_ack = true; 7782 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, 7783 sizeof(hw_cache_event_ids)); 7784 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, 7785 sizeof(hw_cache_extra_regs)); 7786 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 7787 7788 intel_pmu_lbr_init_skl(); 7789 7790 x86_pmu.event_constraints = intel_slm_event_constraints; 7791 x86_pmu.extra_regs = intel_tnt_extra_regs; 7792 /* 7793 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS 7794 * for precise cycles. 7795 */ 7796 x86_pmu.pebs_aliases = NULL; 7797 x86_pmu.pebs_prec_dist = true; 7798 x86_pmu.lbr_pt_coexist = true; 7799 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 7800 x86_pmu.get_event_constraints = tnt_get_event_constraints; 7801 td_attr = tnt_events_attrs; 7802 extra_attr = slm_format_attr; 7803 pr_cont("Tremont events, "); 7804 name = "Tremont"; 7805 break; 7806 7807 case INTEL_ATOM_GRACEMONT: 7808 intel_pmu_init_grt(NULL); 7809 intel_pmu_pebs_data_source_grt(); 7810 x86_pmu.pebs_latency_data = grt_latency_data; 7811 x86_pmu.get_event_constraints = tnt_get_event_constraints; 7812 td_attr = tnt_events_attrs; 7813 mem_attr = grt_mem_attrs; 7814 extra_attr = nhm_format_attr; 7815 pr_cont("Gracemont events, "); 7816 name = "gracemont"; 7817 break; 7818 7819 case INTEL_ATOM_CRESTMONT: 7820 case INTEL_ATOM_CRESTMONT_X: 7821 intel_pmu_init_grt(NULL); 7822 x86_pmu.extra_regs = intel_cmt_extra_regs; 7823 intel_pmu_pebs_data_source_cmt(); 7824 x86_pmu.pebs_latency_data = cmt_latency_data; 7825 x86_pmu.get_event_constraints = cmt_get_event_constraints; 7826 td_attr = cmt_events_attrs; 7827 mem_attr = grt_mem_attrs; 7828 extra_attr = cmt_format_attr; 7829 pr_cont("Crestmont events, "); 7830 name = "crestmont"; 7831 break; 7832 7833 case INTEL_ATOM_DARKMONT_X: 7834 intel_pmu_init_skt(NULL); 7835 intel_pmu_pebs_data_source_cmt(); 7836 x86_pmu.pebs_latency_data = cmt_latency_data; 7837 x86_pmu.get_event_constraints = cmt_get_event_constraints; 7838 td_attr = skt_events_attrs; 7839 mem_attr = grt_mem_attrs; 7840 extra_attr = cmt_format_attr; 7841 pr_cont("Darkmont events, "); 7842 name = "darkmont"; 7843 break; 7844 7845 case INTEL_WESTMERE: 7846 case INTEL_WESTMERE_EP: 7847 case INTEL_WESTMERE_EX: 7848 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, 7849 sizeof(hw_cache_event_ids)); 7850 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, 7851 sizeof(hw_cache_extra_regs)); 7852 7853 intel_pmu_lbr_init_nhm(); 7854 7855 x86_pmu.event_constraints = intel_westmere_event_constraints; 7856 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 7857 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; 7858 x86_pmu.extra_regs = intel_westmere_extra_regs; 7859 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 7860 7861 mem_attr = nhm_mem_events_attrs; 7862 7863 /* UOPS_ISSUED.STALLED_CYCLES */ 7864 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 7865 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 7866 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ 7867 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 7868 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); 7869 7870 intel_pmu_pebs_data_source_nhm(); 7871 extra_attr = nhm_format_attr; 7872 pr_cont("Westmere events, "); 7873 name = "westmere"; 7874 break; 7875 7876 case INTEL_SANDYBRIDGE: 7877 case INTEL_SANDYBRIDGE_X: 7878 x86_add_quirk(intel_sandybridge_quirk); 7879 x86_add_quirk(intel_ht_bug); 7880 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 7881 sizeof(hw_cache_event_ids)); 7882 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 7883 sizeof(hw_cache_extra_regs)); 7884 7885 intel_pmu_lbr_init_snb(); 7886 7887 x86_pmu.event_constraints = intel_snb_event_constraints; 7888 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; 7889 x86_pmu.pebs_aliases = intel_pebs_aliases_snb; 7890 if (boot_cpu_data.x86_vfm == INTEL_SANDYBRIDGE_X) 7891 x86_pmu.extra_regs = intel_snbep_extra_regs; 7892 else 7893 x86_pmu.extra_regs = intel_snb_extra_regs; 7894 7895 7896 /* all extra regs are per-cpu when HT is on */ 7897 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 7898 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 7899 7900 td_attr = snb_events_attrs; 7901 mem_attr = snb_mem_events_attrs; 7902 7903 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 7904 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 7905 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 7906 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ 7907 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 7908 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1); 7909 7910 extra_attr = nhm_format_attr; 7911 7912 pr_cont("SandyBridge events, "); 7913 name = "sandybridge"; 7914 break; 7915 7916 case INTEL_IVYBRIDGE: 7917 case INTEL_IVYBRIDGE_X: 7918 x86_add_quirk(intel_ht_bug); 7919 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 7920 sizeof(hw_cache_event_ids)); 7921 /* dTLB-load-misses on IVB is different than SNB */ 7922 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */ 7923 7924 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 7925 sizeof(hw_cache_extra_regs)); 7926 7927 intel_pmu_lbr_init_snb(); 7928 7929 x86_pmu.event_constraints = intel_ivb_event_constraints; 7930 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; 7931 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 7932 x86_pmu.pebs_prec_dist = true; 7933 if (boot_cpu_data.x86_vfm == INTEL_IVYBRIDGE_X) 7934 x86_pmu.extra_regs = intel_snbep_extra_regs; 7935 else 7936 x86_pmu.extra_regs = intel_snb_extra_regs; 7937 /* all extra regs are per-cpu when HT is on */ 7938 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 7939 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 7940 7941 td_attr = snb_events_attrs; 7942 mem_attr = snb_mem_events_attrs; 7943 7944 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 7945 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 7946 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 7947 7948 extra_attr = nhm_format_attr; 7949 7950 pr_cont("IvyBridge events, "); 7951 name = "ivybridge"; 7952 break; 7953 7954 7955 case INTEL_HASWELL: 7956 case INTEL_HASWELL_X: 7957 case INTEL_HASWELL_L: 7958 case INTEL_HASWELL_G: 7959 x86_add_quirk(intel_ht_bug); 7960 x86_add_quirk(intel_pebs_isolation_quirk); 7961 x86_pmu.late_ack = true; 7962 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 7963 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 7964 7965 intel_pmu_lbr_init_hsw(); 7966 7967 x86_pmu.event_constraints = intel_hsw_event_constraints; 7968 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; 7969 x86_pmu.extra_regs = intel_snbep_extra_regs; 7970 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 7971 x86_pmu.pebs_prec_dist = true; 7972 /* all extra regs are per-cpu when HT is on */ 7973 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 7974 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 7975 7976 x86_pmu.hw_config = hsw_hw_config; 7977 x86_pmu.get_event_constraints = hsw_get_event_constraints; 7978 x86_pmu.limit_period = hsw_limit_period; 7979 x86_pmu.lbr_double_abort = true; 7980 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 7981 hsw_format_attr : nhm_format_attr; 7982 td_attr = hsw_events_attrs; 7983 mem_attr = hsw_mem_events_attrs; 7984 tsx_attr = hsw_tsx_events_attrs; 7985 pr_cont("Haswell events, "); 7986 name = "haswell"; 7987 break; 7988 7989 case INTEL_BROADWELL: 7990 case INTEL_BROADWELL_D: 7991 case INTEL_BROADWELL_G: 7992 case INTEL_BROADWELL_X: 7993 x86_add_quirk(intel_pebs_isolation_quirk); 7994 x86_pmu.late_ack = true; 7995 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 7996 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 7997 7998 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */ 7999 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ | 8000 BDW_L3_MISS|HSW_SNOOP_DRAM; 8001 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS| 8002 HSW_SNOOP_DRAM; 8003 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ| 8004 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM; 8005 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE| 8006 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM; 8007 8008 intel_pmu_lbr_init_hsw(); 8009 8010 x86_pmu.event_constraints = intel_bdw_event_constraints; 8011 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints; 8012 x86_pmu.extra_regs = intel_snbep_extra_regs; 8013 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 8014 x86_pmu.pebs_prec_dist = true; 8015 /* all extra regs are per-cpu when HT is on */ 8016 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 8017 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 8018 8019 x86_pmu.hw_config = hsw_hw_config; 8020 x86_pmu.get_event_constraints = hsw_get_event_constraints; 8021 x86_pmu.limit_period = bdw_limit_period; 8022 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 8023 hsw_format_attr : nhm_format_attr; 8024 td_attr = hsw_events_attrs; 8025 mem_attr = hsw_mem_events_attrs; 8026 tsx_attr = hsw_tsx_events_attrs; 8027 pr_cont("Broadwell events, "); 8028 name = "broadwell"; 8029 break; 8030 8031 case INTEL_XEON_PHI_KNL: 8032 case INTEL_XEON_PHI_KNM: 8033 memcpy(hw_cache_event_ids, 8034 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 8035 memcpy(hw_cache_extra_regs, 8036 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 8037 intel_pmu_lbr_init_knl(); 8038 8039 x86_pmu.event_constraints = intel_slm_event_constraints; 8040 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints; 8041 x86_pmu.extra_regs = intel_knl_extra_regs; 8042 8043 /* all extra regs are per-cpu when HT is on */ 8044 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 8045 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 8046 extra_attr = slm_format_attr; 8047 pr_cont("Knights Landing/Mill events, "); 8048 name = "knights-landing"; 8049 break; 8050 8051 case INTEL_SKYLAKE_X: 8052 pmem = true; 8053 fallthrough; 8054 case INTEL_SKYLAKE_L: 8055 case INTEL_SKYLAKE: 8056 case INTEL_KABYLAKE_L: 8057 case INTEL_KABYLAKE: 8058 case INTEL_COMETLAKE_L: 8059 case INTEL_COMETLAKE: 8060 x86_add_quirk(intel_pebs_isolation_quirk); 8061 x86_pmu.late_ack = true; 8062 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 8063 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 8064 intel_pmu_lbr_init_skl(); 8065 8066 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */ 8067 event_attr_td_recovery_bubbles.event_str_noht = 8068 "event=0xd,umask=0x1,cmask=1"; 8069 event_attr_td_recovery_bubbles.event_str_ht = 8070 "event=0xd,umask=0x1,cmask=1,any=1"; 8071 8072 x86_pmu.event_constraints = intel_skl_event_constraints; 8073 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints; 8074 x86_pmu.extra_regs = intel_skl_extra_regs; 8075 x86_pmu.pebs_aliases = intel_pebs_aliases_skl; 8076 x86_pmu.pebs_prec_dist = true; 8077 /* all extra regs are per-cpu when HT is on */ 8078 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 8079 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 8080 8081 x86_pmu.hw_config = hsw_hw_config; 8082 x86_pmu.get_event_constraints = hsw_get_event_constraints; 8083 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 8084 hsw_format_attr : nhm_format_attr; 8085 extra_skl_attr = skl_format_attr; 8086 td_attr = hsw_events_attrs; 8087 mem_attr = hsw_mem_events_attrs; 8088 tsx_attr = hsw_tsx_events_attrs; 8089 intel_pmu_pebs_data_source_skl(pmem); 8090 8091 /* 8092 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default. 8093 * TSX force abort hooks are not required on these systems. Only deploy 8094 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT. 8095 */ 8096 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) && 8097 !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) { 8098 x86_pmu.flags |= PMU_FL_TFA; 8099 x86_pmu.get_event_constraints = tfa_get_event_constraints; 8100 x86_pmu.enable_all = intel_tfa_pmu_enable_all; 8101 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling; 8102 } 8103 8104 pr_cont("Skylake events, "); 8105 name = "skylake"; 8106 break; 8107 8108 case INTEL_ICELAKE_X: 8109 case INTEL_ICELAKE_D: 8110 x86_pmu.pebs_ept = 1; 8111 pmem = true; 8112 fallthrough; 8113 case INTEL_ICELAKE_L: 8114 case INTEL_ICELAKE: 8115 case INTEL_TIGERLAKE_L: 8116 case INTEL_TIGERLAKE: 8117 case INTEL_ROCKETLAKE: 8118 x86_pmu.late_ack = true; 8119 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 8120 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 8121 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 8122 intel_pmu_lbr_init_skl(); 8123 8124 x86_pmu.event_constraints = intel_icl_event_constraints; 8125 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints; 8126 x86_pmu.extra_regs = intel_icl_extra_regs; 8127 x86_pmu.pebs_aliases = NULL; 8128 x86_pmu.pebs_prec_dist = true; 8129 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 8130 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 8131 8132 x86_pmu.hw_config = hsw_hw_config; 8133 x86_pmu.get_event_constraints = icl_get_event_constraints; 8134 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 8135 hsw_format_attr : nhm_format_attr; 8136 extra_skl_attr = skl_format_attr; 8137 mem_attr = icl_events_attrs; 8138 td_attr = icl_td_events_attrs; 8139 tsx_attr = icl_tsx_events_attrs; 8140 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); 8141 x86_pmu.lbr_pt_coexist = true; 8142 intel_pmu_pebs_data_source_skl(pmem); 8143 x86_pmu.num_topdown_events = 4; 8144 static_call_update(intel_pmu_update_topdown_event, 8145 &icl_update_topdown_event); 8146 static_call_update(intel_pmu_set_topdown_event_period, 8147 &icl_set_topdown_event_period); 8148 pr_cont("Icelake events, "); 8149 name = "icelake"; 8150 break; 8151 8152 case INTEL_SAPPHIRERAPIDS_X: 8153 case INTEL_EMERALDRAPIDS_X: 8154 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; 8155 x86_pmu.extra_regs = intel_glc_extra_regs; 8156 pr_cont("Sapphire Rapids events, "); 8157 name = "sapphire_rapids"; 8158 goto glc_common; 8159 8160 case INTEL_GRANITERAPIDS_X: 8161 case INTEL_GRANITERAPIDS_D: 8162 x86_pmu.extra_regs = intel_rwc_extra_regs; 8163 pr_cont("Granite Rapids events, "); 8164 name = "granite_rapids"; 8165 goto glc_common; 8166 8167 case INTEL_DIAMONDRAPIDS_X: 8168 intel_pmu_init_pnc(NULL); 8169 x86_pmu.pebs_latency_data = pnc_latency_data; 8170 8171 pr_cont("Panthercove events, "); 8172 name = "panthercove"; 8173 goto glc_base; 8174 8175 glc_common: 8176 intel_pmu_init_glc(NULL); 8177 intel_pmu_pebs_data_source_skl(true); 8178 8179 glc_base: 8180 x86_pmu.pebs_ept = 1; 8181 x86_pmu.hw_config = hsw_hw_config; 8182 x86_pmu.get_event_constraints = glc_get_event_constraints; 8183 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 8184 hsw_format_attr : nhm_format_attr; 8185 extra_skl_attr = skl_format_attr; 8186 mem_attr = glc_events_attrs; 8187 td_attr = glc_td_events_attrs; 8188 tsx_attr = glc_tsx_events_attrs; 8189 break; 8190 8191 case INTEL_ALDERLAKE: 8192 case INTEL_ALDERLAKE_L: 8193 case INTEL_RAPTORLAKE: 8194 case INTEL_RAPTORLAKE_P: 8195 case INTEL_RAPTORLAKE_S: 8196 /* 8197 * Alder Lake has 2 types of CPU, core and atom. 8198 * 8199 * Initialize the common PerfMon capabilities here. 8200 */ 8201 intel_pmu_init_hybrid(hybrid_big_small); 8202 8203 x86_pmu.pebs_latency_data = grt_latency_data; 8204 x86_pmu.get_event_constraints = adl_get_event_constraints; 8205 x86_pmu.hw_config = adl_hw_config; 8206 x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type; 8207 8208 td_attr = adl_hybrid_events_attrs; 8209 mem_attr = adl_hybrid_mem_attrs; 8210 tsx_attr = adl_hybrid_tsx_attrs; 8211 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 8212 adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr; 8213 8214 /* Initialize big core specific PerfMon capabilities.*/ 8215 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; 8216 intel_pmu_init_glc(&pmu->pmu); 8217 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) { 8218 pmu->cntr_mask64 <<= 2; 8219 pmu->cntr_mask64 |= 0x3; 8220 pmu->fixed_cntr_mask64 <<= 1; 8221 pmu->fixed_cntr_mask64 |= 0x1; 8222 } else { 8223 pmu->cntr_mask64 = x86_pmu.cntr_mask64; 8224 pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64; 8225 } 8226 8227 /* 8228 * Quirk: For some Alder Lake machine, when all E-cores are disabled in 8229 * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However, 8230 * the X86_FEATURE_HYBRID_CPU is still set. The above codes will 8231 * mistakenly add extra counters for P-cores. Correct the number of 8232 * counters here. 8233 */ 8234 if ((x86_pmu_num_counters(&pmu->pmu) > 8) || (x86_pmu_num_counters_fixed(&pmu->pmu) > 4)) { 8235 pmu->cntr_mask64 = x86_pmu.cntr_mask64; 8236 pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64; 8237 } 8238 8239 pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64); 8240 pmu->unconstrained = (struct event_constraint) 8241 __EVENT_CONSTRAINT(0, pmu->cntr_mask64, 8242 0, x86_pmu_num_counters(&pmu->pmu), 0, 0); 8243 8244 pmu->extra_regs = intel_glc_extra_regs; 8245 8246 /* Initialize Atom core specific PerfMon capabilities.*/ 8247 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; 8248 intel_pmu_init_grt(&pmu->pmu); 8249 8250 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; 8251 intel_pmu_pebs_data_source_adl(); 8252 pr_cont("Alderlake Hybrid events, "); 8253 name = "alderlake_hybrid"; 8254 break; 8255 8256 case INTEL_METEORLAKE: 8257 case INTEL_METEORLAKE_L: 8258 case INTEL_ARROWLAKE_U: 8259 intel_pmu_init_hybrid(hybrid_big_small); 8260 8261 x86_pmu.pebs_latency_data = cmt_latency_data; 8262 x86_pmu.get_event_constraints = mtl_get_event_constraints; 8263 x86_pmu.hw_config = adl_hw_config; 8264 8265 td_attr = adl_hybrid_events_attrs; 8266 mem_attr = mtl_hybrid_mem_attrs; 8267 tsx_attr = adl_hybrid_tsx_attrs; 8268 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 8269 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; 8270 8271 /* Initialize big core specific PerfMon capabilities.*/ 8272 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; 8273 intel_pmu_init_glc(&pmu->pmu); 8274 pmu->extra_regs = intel_rwc_extra_regs; 8275 8276 /* Initialize Atom core specific PerfMon capabilities.*/ 8277 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; 8278 intel_pmu_init_grt(&pmu->pmu); 8279 pmu->extra_regs = intel_cmt_extra_regs; 8280 8281 intel_pmu_pebs_data_source_mtl(); 8282 pr_cont("Meteorlake Hybrid events, "); 8283 name = "meteorlake_hybrid"; 8284 break; 8285 8286 case INTEL_PANTHERLAKE_L: 8287 case INTEL_WILDCATLAKE_L: 8288 pr_cont("Pantherlake Hybrid events, "); 8289 name = "pantherlake_hybrid"; 8290 goto lnl_common; 8291 8292 case INTEL_LUNARLAKE_M: 8293 case INTEL_ARROWLAKE: 8294 pr_cont("Lunarlake Hybrid events, "); 8295 name = "lunarlake_hybrid"; 8296 8297 lnl_common: 8298 intel_pmu_init_hybrid(hybrid_big_small); 8299 8300 x86_pmu.pebs_latency_data = lnl_latency_data; 8301 x86_pmu.get_event_constraints = mtl_get_event_constraints; 8302 x86_pmu.hw_config = adl_hw_config; 8303 8304 td_attr = lnl_hybrid_events_attrs; 8305 mem_attr = mtl_hybrid_mem_attrs; 8306 tsx_attr = adl_hybrid_tsx_attrs; 8307 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 8308 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; 8309 8310 /* Initialize big core specific PerfMon capabilities.*/ 8311 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; 8312 intel_pmu_init_lnc(&pmu->pmu); 8313 8314 /* Initialize Atom core specific PerfMon capabilities.*/ 8315 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; 8316 intel_pmu_init_skt(&pmu->pmu); 8317 8318 intel_pmu_pebs_data_source_lnl(); 8319 break; 8320 8321 case INTEL_ARROWLAKE_H: 8322 intel_pmu_init_hybrid(hybrid_big_small_tiny); 8323 8324 x86_pmu.pebs_latency_data = arl_h_latency_data; 8325 x86_pmu.get_event_constraints = arl_h_get_event_constraints; 8326 x86_pmu.hw_config = arl_h_hw_config; 8327 8328 td_attr = arl_h_hybrid_events_attrs; 8329 mem_attr = arl_h_hybrid_mem_attrs; 8330 tsx_attr = adl_hybrid_tsx_attrs; 8331 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 8332 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; 8333 8334 /* Initialize big core specific PerfMon capabilities. */ 8335 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; 8336 intel_pmu_init_lnc(&pmu->pmu); 8337 8338 /* Initialize Atom core specific PerfMon capabilities. */ 8339 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; 8340 intel_pmu_init_skt(&pmu->pmu); 8341 8342 /* Initialize Lower Power Atom specific PerfMon capabilities. */ 8343 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_TINY_IDX]; 8344 intel_pmu_init_grt(&pmu->pmu); 8345 pmu->extra_regs = intel_cmt_extra_regs; 8346 8347 intel_pmu_pebs_data_source_arl_h(); 8348 pr_cont("ArrowLake-H Hybrid events, "); 8349 name = "arrowlake_h_hybrid"; 8350 break; 8351 8352 case INTEL_NOVALAKE: 8353 case INTEL_NOVALAKE_L: 8354 pr_cont("Novalake Hybrid events, "); 8355 name = "novalake_hybrid"; 8356 intel_pmu_init_hybrid(hybrid_big_small); 8357 8358 x86_pmu.pebs_latency_data = nvl_latency_data; 8359 x86_pmu.get_event_constraints = mtl_get_event_constraints; 8360 x86_pmu.hw_config = adl_hw_config; 8361 8362 td_attr = lnl_hybrid_events_attrs; 8363 mem_attr = mtl_hybrid_mem_attrs; 8364 tsx_attr = adl_hybrid_tsx_attrs; 8365 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 8366 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; 8367 8368 /* Initialize big core specific PerfMon capabilities.*/ 8369 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; 8370 intel_pmu_init_pnc(&pmu->pmu); 8371 8372 /* Initialize Atom core specific PerfMon capabilities.*/ 8373 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; 8374 intel_pmu_init_arw(&pmu->pmu); 8375 8376 intel_pmu_pebs_data_source_lnl(); 8377 break; 8378 8379 default: 8380 switch (x86_pmu.version) { 8381 case 1: 8382 x86_pmu.event_constraints = intel_v1_event_constraints; 8383 pr_cont("generic architected perfmon v1, "); 8384 name = "generic_arch_v1"; 8385 break; 8386 case 2: 8387 case 3: 8388 case 4: 8389 /* 8390 * default constraints for v2 and up 8391 */ 8392 x86_pmu.event_constraints = intel_gen_event_constraints; 8393 pr_cont("generic architected perfmon, "); 8394 name = "generic_arch_v2+"; 8395 break; 8396 default: 8397 /* 8398 * The default constraints for v5 and up can support up to 8399 * 16 fixed counters. For the fixed counters 4 and later, 8400 * the pseudo-encoding is applied. 8401 * The constraints may be cut according to the CPUID enumeration 8402 * by inserting the EVENT_CONSTRAINT_END. 8403 */ 8404 if (fls64(x86_pmu.fixed_cntr_mask64) > INTEL_PMC_MAX_FIXED) 8405 x86_pmu.fixed_cntr_mask64 &= GENMASK_ULL(INTEL_PMC_MAX_FIXED - 1, 0); 8406 intel_v5_gen_event_constraints[fls64(x86_pmu.fixed_cntr_mask64)].weight = -1; 8407 x86_pmu.event_constraints = intel_v5_gen_event_constraints; 8408 pr_cont("generic architected perfmon, "); 8409 name = "generic_arch_v5+"; 8410 break; 8411 } 8412 } 8413 8414 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name); 8415 8416 if (!is_hybrid()) { 8417 group_events_td.attrs = td_attr; 8418 group_events_mem.attrs = mem_attr; 8419 group_events_tsx.attrs = tsx_attr; 8420 group_format_extra.attrs = extra_attr; 8421 group_format_extra_skl.attrs = extra_skl_attr; 8422 8423 x86_pmu.attr_update = attr_update; 8424 } else { 8425 hybrid_group_events_td.attrs = td_attr; 8426 hybrid_group_events_mem.attrs = mem_attr; 8427 hybrid_group_events_tsx.attrs = tsx_attr; 8428 hybrid_group_format_extra.attrs = extra_attr; 8429 8430 x86_pmu.attr_update = hybrid_attr_update; 8431 } 8432 8433 /* 8434 * The archPerfmonExt (0x23) includes an enhanced enumeration of 8435 * PMU architectural features with a per-core view. For non-hybrid, 8436 * each core has the same PMU capabilities. It's good enough to 8437 * update the x86_pmu from the booting CPU. For hybrid, the x86_pmu 8438 * is used to keep the common capabilities. Still keep the values 8439 * from the leaf 0xa. The core specific update will be done later 8440 * when a new type is online. 8441 */ 8442 if (!is_hybrid() && boot_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) 8443 update_pmu_cap(NULL); 8444 8445 if (x86_pmu.arch_pebs) { 8446 static_call_update(intel_pmu_disable_event_ext, 8447 intel_pmu_disable_event_ext); 8448 static_call_update(intel_pmu_enable_event_ext, 8449 intel_pmu_enable_event_ext); 8450 pr_cont("Architectural PEBS, "); 8451 } 8452 8453 intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64, 8454 &x86_pmu.fixed_cntr_mask64, 8455 &x86_pmu.intel_ctrl); 8456 8457 /* AnyThread may be deprecated on arch perfmon v5 or later */ 8458 if (x86_pmu.intel_cap.anythread_deprecated) 8459 x86_pmu.format_attrs = intel_arch_formats_attr; 8460 8461 intel_pmu_check_event_constraints_all(NULL); 8462 8463 /* 8464 * Access LBR MSR may cause #GP under certain circumstances. 8465 * Check all LBR MSR here. 8466 * Disable LBR access if any LBR MSRs can not be accessed. 8467 */ 8468 if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL)) 8469 x86_pmu.lbr_nr = 0; 8470 for (i = 0; i < x86_pmu.lbr_nr; i++) { 8471 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) && 8472 check_msr(x86_pmu.lbr_to + i, 0xffffUL))) 8473 x86_pmu.lbr_nr = 0; 8474 } 8475 8476 if (x86_pmu.lbr_nr) { 8477 intel_pmu_lbr_init(); 8478 8479 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); 8480 8481 /* only support branch_stack snapshot for perfmon >= v2 */ 8482 if (x86_pmu.disable_all == intel_pmu_disable_all) { 8483 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) { 8484 static_call_update(perf_snapshot_branch_stack, 8485 intel_pmu_snapshot_arch_branch_stack); 8486 } else { 8487 static_call_update(perf_snapshot_branch_stack, 8488 intel_pmu_snapshot_branch_stack); 8489 } 8490 } 8491 } 8492 8493 intel_pmu_check_extra_regs(x86_pmu.extra_regs); 8494 8495 /* Support full width counters using alternative MSR range */ 8496 if (x86_pmu.intel_cap.full_width_write) { 8497 x86_pmu.max_period = x86_pmu.cntval_mask >> 1; 8498 x86_pmu.perfctr = MSR_IA32_PMC0; 8499 pr_cont("full-width counters, "); 8500 } 8501 8502 /* Support V6+ MSR Aliasing */ 8503 if (x86_pmu.version >= 6) { 8504 x86_pmu.perfctr = MSR_IA32_PMC_V6_GP0_CTR; 8505 x86_pmu.eventsel = MSR_IA32_PMC_V6_GP0_CFG_A; 8506 x86_pmu.fixedctr = MSR_IA32_PMC_V6_FX0_CTR; 8507 x86_pmu.addr_offset = intel_pmu_v6_addr_offset; 8508 } 8509 8510 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) 8511 x86_pmu.intel_ctrl |= GLOBAL_CTRL_EN_PERF_METRICS; 8512 8513 if (x86_pmu.intel_cap.pebs_timing_info) 8514 x86_pmu.flags |= PMU_FL_RETIRE_LATENCY; 8515 8516 intel_aux_output_init(); 8517 8518 return 0; 8519 } 8520 8521 /* 8522 * HT bug: phase 2 init 8523 * Called once we have valid topology information to check 8524 * whether or not HT is enabled 8525 * If HT is off, then we disable the workaround 8526 */ 8527 static __init int fixup_ht_bug(void) 8528 { 8529 int c; 8530 /* 8531 * problem not present on this CPU model, nothing to do 8532 */ 8533 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED)) 8534 return 0; 8535 8536 if (topology_max_smt_threads() > 1) { 8537 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n"); 8538 return 0; 8539 } 8540 8541 cpus_read_lock(); 8542 8543 hardlockup_detector_perf_stop(); 8544 8545 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED); 8546 8547 x86_pmu.start_scheduling = NULL; 8548 x86_pmu.commit_scheduling = NULL; 8549 x86_pmu.stop_scheduling = NULL; 8550 8551 hardlockup_detector_perf_restart(); 8552 8553 for_each_online_cpu(c) 8554 free_excl_cntrs(&per_cpu(cpu_hw_events, c)); 8555 8556 cpus_read_unlock(); 8557 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n"); 8558 return 0; 8559 } 8560 subsys_initcall(fixup_ht_bug) 8561