1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2016-2020 Arm Limited 3 // CMN-600 Coherent Mesh Network PMU driver 4 5 #include <linux/acpi.h> 6 #include <linux/bitfield.h> 7 #include <linux/bitops.h> 8 #include <linux/debugfs.h> 9 #include <linux/interrupt.h> 10 #include <linux/io.h> 11 #include <linux/io-64-nonatomic-lo-hi.h> 12 #include <linux/kernel.h> 13 #include <linux/list.h> 14 #include <linux/module.h> 15 #include <linux/of.h> 16 #include <linux/perf_event.h> 17 #include <linux/platform_device.h> 18 #include <linux/slab.h> 19 #include <linux/sort.h> 20 21 /* Common register stuff */ 22 #define CMN_NODE_INFO 0x0000 23 #define CMN_NI_NODE_TYPE GENMASK_ULL(15, 0) 24 #define CMN_NI_NODE_ID GENMASK_ULL(31, 16) 25 #define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32) 26 27 #define CMN_NODEID_DEVID(reg) ((reg) & 3) 28 #define CMN_NODEID_EXT_DEVID(reg) ((reg) & 1) 29 #define CMN_NODEID_PID(reg) (((reg) >> 2) & 1) 30 #define CMN_NODEID_EXT_PID(reg) (((reg) >> 1) & 3) 31 #define CMN_NODEID_1x1_PID(reg) (((reg) >> 2) & 7) 32 #define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits))) 33 #define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1)) 34 35 #define CMN_CHILD_INFO 0x0080 36 #define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0) 37 #define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16) 38 39 #define CMN_CHILD_NODE_ADDR GENMASK(29, 0) 40 #define CMN_CHILD_NODE_EXTERNAL BIT(31) 41 42 #define CMN_MAX_DIMENSION 12 43 #define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION) 44 #define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4) 45 46 /* The CFG node has various info besides the discovery tree */ 47 #define CMN_CFGM_PERIPH_ID_01 0x0008 48 #define CMN_CFGM_PID0_PART_0 GENMASK_ULL(7, 0) 49 #define CMN_CFGM_PID1_PART_1 GENMASK_ULL(35, 32) 50 #define CMN_CFGM_PERIPH_ID_23 0x0010 51 #define CMN_CFGM_PID2_REVISION GENMASK_ULL(7, 4) 52 53 #define CMN_CFGM_INFO_GLOBAL 0x900 54 #define CMN_INFO_MULTIPLE_DTM_EN BIT_ULL(63) 55 #define CMN_INFO_RSP_VC_NUM GENMASK_ULL(53, 52) 56 #define CMN_INFO_DAT_VC_NUM GENMASK_ULL(51, 50) 57 58 #define CMN_CFGM_INFO_GLOBAL_1 0x908 59 #define CMN_INFO_SNP_VC_NUM GENMASK_ULL(3, 2) 60 #define CMN_INFO_REQ_VC_NUM GENMASK_ULL(1, 0) 61 62 /* XPs also have some local topology info which has uses too */ 63 #define CMN_MXP__CONNECT_INFO(p) (0x0008 + 8 * (p)) 64 #define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(4, 0) 65 66 #define CMN_MAX_PORTS 6 67 #define CI700_CONNECT_INFO_P2_5_OFFSET 0x10 68 69 /* PMU registers occupy the 3rd 4KB page of each node's region */ 70 #define CMN_PMU_OFFSET 0x2000 71 72 /* For most nodes, this is all there is */ 73 #define CMN_PMU_EVENT_SEL 0x000 74 #define CMN__PMU_CBUSY_SNTHROTTLE_SEL GENMASK_ULL(44, 42) 75 #define CMN__PMU_SN_HOME_SEL GENMASK_ULL(40, 39) 76 #define CMN__PMU_HBT_LBT_SEL GENMASK_ULL(38, 37) 77 #define CMN__PMU_CLASS_OCCUP_ID GENMASK_ULL(36, 35) 78 /* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */ 79 #define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32) 80 81 /* HN-Ps are weird... */ 82 #define CMN_HNP_PMU_EVENT_SEL 0x008 83 84 /* DTMs live in the PMU space of XP registers */ 85 #define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18) 86 #define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00) 87 #define CMN_DTM_WPn_CONFIG_WP_CHN_NUM GENMASK_ULL(20, 19) 88 #define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18, 17) 89 #define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(9) 90 #define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(8) 91 #define CMN600_WPn_CONFIG_WP_COMBINE BIT(6) 92 #define CMN600_WPn_CONFIG_WP_EXCLUSIVE BIT(5) 93 #define CMN_DTM_WPn_CONFIG_WP_GRP GENMASK_ULL(5, 4) 94 #define CMN_DTM_WPn_CONFIG_WP_CHN_SEL GENMASK_ULL(3, 1) 95 #define CMN_DTM_WPn_CONFIG_WP_DEV_SEL BIT(0) 96 #define CMN_DTM_WPn_VAL(n) (CMN_DTM_WPn(n) + 0x08) 97 #define CMN_DTM_WPn_MASK(n) (CMN_DTM_WPn(n) + 0x10) 98 99 #define CMN_DTM_PMU_CONFIG 0x210 100 #define CMN__PMEVCNT0_INPUT_SEL GENMASK_ULL(37, 32) 101 #define CMN__PMEVCNT0_INPUT_SEL_WP 0x00 102 #define CMN__PMEVCNT0_INPUT_SEL_XP 0x04 103 #define CMN__PMEVCNT0_INPUT_SEL_DEV 0x10 104 #define CMN__PMEVCNT0_GLOBAL_NUM GENMASK_ULL(18, 16) 105 #define CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(n) ((n) * 4) 106 #define CMN__PMEVCNT_PAIRED(n) BIT(4 + (n)) 107 #define CMN__PMEVCNT23_COMBINED BIT(2) 108 #define CMN__PMEVCNT01_COMBINED BIT(1) 109 #define CMN_DTM_PMU_CONFIG_PMU_EN BIT(0) 110 111 #define CMN_DTM_PMEVCNT 0x220 112 113 #define CMN_DTM_PMEVCNTSR 0x240 114 115 #define CMN650_DTM_UNIT_INFO 0x0910 116 #define CMN_DTM_UNIT_INFO 0x0960 117 #define CMN_DTM_UNIT_INFO_DTC_DOMAIN GENMASK_ULL(1, 0) 118 119 #define CMN_DTM_NUM_COUNTERS 4 120 /* Want more local counters? Why not replicate the whole DTM! Ugh... */ 121 #define CMN_DTM_OFFSET(n) ((n) * 0x200) 122 123 /* The DTC node is where the magic happens */ 124 #define CMN_DT_DTC_CTL 0x0a00 125 #define CMN_DT_DTC_CTL_DT_EN BIT(0) 126 127 /* DTC counters are paired in 64-bit registers on a 16-byte stride. Yuck */ 128 #define _CMN_DT_CNT_REG(n) ((((n) / 2) * 4 + (n) % 2) * 4) 129 #define CMN_DT_PMEVCNT(n) (CMN_PMU_OFFSET + _CMN_DT_CNT_REG(n)) 130 #define CMN_DT_PMCCNTR (CMN_PMU_OFFSET + 0x40) 131 132 #define CMN_DT_PMEVCNTSR(n) (CMN_PMU_OFFSET + 0x50 + _CMN_DT_CNT_REG(n)) 133 #define CMN_DT_PMCCNTRSR (CMN_PMU_OFFSET + 0x90) 134 135 #define CMN_DT_PMCR (CMN_PMU_OFFSET + 0x100) 136 #define CMN_DT_PMCR_PMU_EN BIT(0) 137 #define CMN_DT_PMCR_CNTR_RST BIT(5) 138 #define CMN_DT_PMCR_OVFL_INTR_EN BIT(6) 139 140 #define CMN_DT_PMOVSR (CMN_PMU_OFFSET + 0x118) 141 #define CMN_DT_PMOVSR_CLR (CMN_PMU_OFFSET + 0x120) 142 143 #define CMN_DT_PMSSR (CMN_PMU_OFFSET + 0x128) 144 #define CMN_DT_PMSSR_SS_STATUS(n) BIT(n) 145 146 #define CMN_DT_PMSRR (CMN_PMU_OFFSET + 0x130) 147 #define CMN_DT_PMSRR_SS_REQ BIT(0) 148 149 #define CMN_DT_NUM_COUNTERS 8 150 #define CMN_MAX_DTCS 4 151 152 /* 153 * Even in the worst case a DTC counter can't wrap in fewer than 2^42 cycles, 154 * so throwing away one bit to make overflow handling easy is no big deal. 155 */ 156 #define CMN_COUNTER_INIT 0x80000000 157 /* Similarly for the 40-bit cycle counter */ 158 #define CMN_CC_INIT 0x8000000000ULL 159 160 161 /* Event attributes */ 162 #define CMN_CONFIG_TYPE GENMASK_ULL(15, 0) 163 #define CMN_CONFIG_EVENTID GENMASK_ULL(26, 16) 164 #define CMN_CONFIG_OCCUPID GENMASK_ULL(30, 27) 165 #define CMN_CONFIG_BYNODEID BIT_ULL(31) 166 #define CMN_CONFIG_NODEID GENMASK_ULL(47, 32) 167 168 #define CMN_EVENT_TYPE(event) FIELD_GET(CMN_CONFIG_TYPE, (event)->attr.config) 169 #define CMN_EVENT_EVENTID(event) FIELD_GET(CMN_CONFIG_EVENTID, (event)->attr.config) 170 #define CMN_EVENT_OCCUPID(event) FIELD_GET(CMN_CONFIG_OCCUPID, (event)->attr.config) 171 #define CMN_EVENT_BYNODEID(event) FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config) 172 #define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config) 173 174 #define CMN_CONFIG_WP_COMBINE GENMASK_ULL(30, 27) 175 #define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48) 176 #define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51) 177 /* Note that we don't yet support the tertiary match group on newer IPs */ 178 #define CMN_CONFIG_WP_GRP BIT_ULL(56) 179 #define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(57) 180 #define CMN_CONFIG1_WP_VAL GENMASK_ULL(63, 0) 181 #define CMN_CONFIG2_WP_MASK GENMASK_ULL(63, 0) 182 183 #define CMN_EVENT_WP_COMBINE(event) FIELD_GET(CMN_CONFIG_WP_COMBINE, (event)->attr.config) 184 #define CMN_EVENT_WP_DEV_SEL(event) FIELD_GET(CMN_CONFIG_WP_DEV_SEL, (event)->attr.config) 185 #define CMN_EVENT_WP_CHN_SEL(event) FIELD_GET(CMN_CONFIG_WP_CHN_SEL, (event)->attr.config) 186 #define CMN_EVENT_WP_GRP(event) FIELD_GET(CMN_CONFIG_WP_GRP, (event)->attr.config) 187 #define CMN_EVENT_WP_EXCLUSIVE(event) FIELD_GET(CMN_CONFIG_WP_EXCLUSIVE, (event)->attr.config) 188 #define CMN_EVENT_WP_VAL(event) FIELD_GET(CMN_CONFIG1_WP_VAL, (event)->attr.config1) 189 #define CMN_EVENT_WP_MASK(event) FIELD_GET(CMN_CONFIG2_WP_MASK, (event)->attr.config2) 190 191 /* Made-up event IDs for watchpoint direction */ 192 #define CMN_WP_UP 0 193 #define CMN_WP_DOWN 2 194 195 196 /* Internal values for encoding event support */ 197 enum cmn_model { 198 CMN600 = 1, 199 CMN650 = 2, 200 CMN700 = 4, 201 CI700 = 8, 202 /* ...and then we can use bitmap tricks for commonality */ 203 CMN_ANY = -1, 204 NOT_CMN600 = -2, 205 CMN_650ON = CMN650 | CMN700, 206 }; 207 208 /* Actual part numbers and revision IDs defined by the hardware */ 209 enum cmn_part { 210 PART_CMN600 = 0x434, 211 PART_CMN650 = 0x436, 212 PART_CMN700 = 0x43c, 213 PART_CI700 = 0x43a, 214 }; 215 216 /* CMN-600 r0px shouldn't exist in silicon, thankfully */ 217 enum cmn_revision { 218 REV_CMN600_R1P0, 219 REV_CMN600_R1P1, 220 REV_CMN600_R1P2, 221 REV_CMN600_R1P3, 222 REV_CMN600_R2P0, 223 REV_CMN600_R3P0, 224 REV_CMN600_R3P1, 225 REV_CMN650_R0P0 = 0, 226 REV_CMN650_R1P0, 227 REV_CMN650_R1P1, 228 REV_CMN650_R2P0, 229 REV_CMN650_R1P2, 230 REV_CMN700_R0P0 = 0, 231 REV_CMN700_R1P0, 232 REV_CMN700_R2P0, 233 REV_CMN700_R3P0, 234 REV_CI700_R0P0 = 0, 235 REV_CI700_R1P0, 236 REV_CI700_R2P0, 237 }; 238 239 enum cmn_node_type { 240 CMN_TYPE_INVALID, 241 CMN_TYPE_DVM, 242 CMN_TYPE_CFG, 243 CMN_TYPE_DTC, 244 CMN_TYPE_HNI, 245 CMN_TYPE_HNF, 246 CMN_TYPE_XP, 247 CMN_TYPE_SBSX, 248 CMN_TYPE_MPAM_S, 249 CMN_TYPE_MPAM_NS, 250 CMN_TYPE_RNI, 251 CMN_TYPE_RND = 0xd, 252 CMN_TYPE_RNSAM = 0xf, 253 CMN_TYPE_MTSX, 254 CMN_TYPE_HNP, 255 CMN_TYPE_CXRA = 0x100, 256 CMN_TYPE_CXHA, 257 CMN_TYPE_CXLA, 258 CMN_TYPE_CCRA, 259 CMN_TYPE_CCHA, 260 CMN_TYPE_CCLA, 261 CMN_TYPE_CCLA_RNI, 262 CMN_TYPE_HNS = 0x200, 263 CMN_TYPE_HNS_MPAM_S, 264 CMN_TYPE_HNS_MPAM_NS, 265 /* Not a real node type */ 266 CMN_TYPE_WP = 0x7770 267 }; 268 269 enum cmn_filter_select { 270 SEL_NONE = -1, 271 SEL_OCCUP1ID, 272 SEL_CLASS_OCCUP_ID, 273 SEL_CBUSY_SNTHROTTLE_SEL, 274 SEL_HBT_LBT_SEL, 275 SEL_SN_HOME_SEL, 276 SEL_MAX 277 }; 278 279 struct arm_cmn_node { 280 void __iomem *pmu_base; 281 u16 id, logid; 282 enum cmn_node_type type; 283 284 u8 dtm; 285 s8 dtc; 286 /* DN/HN-F/CXHA */ 287 struct { 288 u8 val : 4; 289 u8 count : 4; 290 } occupid[SEL_MAX]; 291 union { 292 u8 event[4]; 293 __le32 event_sel; 294 u16 event_w[4]; 295 __le64 event_sel_w; 296 }; 297 }; 298 299 struct arm_cmn_dtm { 300 void __iomem *base; 301 u32 pmu_config_low; 302 union { 303 u8 input_sel[4]; 304 __le32 pmu_config_high; 305 }; 306 s8 wp_event[4]; 307 }; 308 309 struct arm_cmn_dtc { 310 void __iomem *base; 311 int irq; 312 int irq_friend; 313 bool cc_active; 314 315 struct perf_event *counters[CMN_DT_NUM_COUNTERS]; 316 struct perf_event *cycles; 317 }; 318 319 #define CMN_STATE_DISABLED BIT(0) 320 #define CMN_STATE_TXN BIT(1) 321 322 struct arm_cmn { 323 struct device *dev; 324 void __iomem *base; 325 unsigned int state; 326 327 enum cmn_revision rev; 328 enum cmn_part part; 329 u8 mesh_x; 330 u8 mesh_y; 331 u16 num_xps; 332 u16 num_dns; 333 bool multi_dtm; 334 u8 ports_used; 335 struct { 336 unsigned int rsp_vc_num : 2; 337 unsigned int dat_vc_num : 2; 338 unsigned int snp_vc_num : 2; 339 unsigned int req_vc_num : 2; 340 }; 341 342 struct arm_cmn_node *xps; 343 struct arm_cmn_node *dns; 344 345 struct arm_cmn_dtm *dtms; 346 struct arm_cmn_dtc *dtc; 347 unsigned int num_dtcs; 348 349 int cpu; 350 struct hlist_node cpuhp_node; 351 352 struct pmu pmu; 353 struct dentry *debug; 354 }; 355 356 #define to_cmn(p) container_of(p, struct arm_cmn, pmu) 357 358 static int arm_cmn_hp_state; 359 360 struct arm_cmn_nodeid { 361 u8 x; 362 u8 y; 363 u8 port; 364 u8 dev; 365 }; 366 367 static int arm_cmn_xyidbits(const struct arm_cmn *cmn) 368 { 369 return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2); 370 } 371 372 static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id) 373 { 374 struct arm_cmn_nodeid nid; 375 376 if (cmn->num_xps == 1) { 377 nid.x = 0; 378 nid.y = 0; 379 nid.port = CMN_NODEID_1x1_PID(id); 380 nid.dev = CMN_NODEID_DEVID(id); 381 } else { 382 int bits = arm_cmn_xyidbits(cmn); 383 384 nid.x = CMN_NODEID_X(id, bits); 385 nid.y = CMN_NODEID_Y(id, bits); 386 if (cmn->ports_used & 0xc) { 387 nid.port = CMN_NODEID_EXT_PID(id); 388 nid.dev = CMN_NODEID_EXT_DEVID(id); 389 } else { 390 nid.port = CMN_NODEID_PID(id); 391 nid.dev = CMN_NODEID_DEVID(id); 392 } 393 } 394 return nid; 395 } 396 397 static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn, 398 const struct arm_cmn_node *dn) 399 { 400 struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); 401 int xp_idx = cmn->mesh_x * nid.y + nid.x; 402 403 return cmn->xps + xp_idx; 404 } 405 static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn, 406 enum cmn_node_type type) 407 { 408 struct arm_cmn_node *dn; 409 410 for (dn = cmn->dns; dn->type; dn++) 411 if (dn->type == type) 412 return dn; 413 return NULL; 414 } 415 416 static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn) 417 { 418 switch (cmn->part) { 419 case PART_CMN600: 420 return CMN600; 421 case PART_CMN650: 422 return CMN650; 423 case PART_CMN700: 424 return CMN700; 425 case PART_CI700: 426 return CI700; 427 default: 428 return 0; 429 }; 430 } 431 432 static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn, 433 const struct arm_cmn_node *xp, int port) 434 { 435 int offset = CMN_MXP__CONNECT_INFO(port); 436 437 if (port >= 2) { 438 if (cmn->part == PART_CMN600 || cmn->part == PART_CMN650) 439 return 0; 440 /* 441 * CI-700 may have extra ports, but still has the 442 * mesh_port_connect_info registers in the way. 443 */ 444 if (cmn->part == PART_CI700) 445 offset += CI700_CONNECT_INFO_P2_5_OFFSET; 446 } 447 448 return readl_relaxed(xp->pmu_base - CMN_PMU_OFFSET + offset); 449 } 450 451 static struct dentry *arm_cmn_debugfs; 452 453 #ifdef CONFIG_DEBUG_FS 454 static const char *arm_cmn_device_type(u8 type) 455 { 456 switch(FIELD_GET(CMN__CONNECT_INFO_DEVICE_TYPE, type)) { 457 case 0x00: return " |"; 458 case 0x01: return " RN-I |"; 459 case 0x02: return " RN-D |"; 460 case 0x04: return " RN-F_B |"; 461 case 0x05: return "RN-F_B_E|"; 462 case 0x06: return " RN-F_A |"; 463 case 0x07: return "RN-F_A_E|"; 464 case 0x08: return " HN-T |"; 465 case 0x09: return " HN-I |"; 466 case 0x0a: return " HN-D |"; 467 case 0x0b: return " HN-P |"; 468 case 0x0c: return " SN-F |"; 469 case 0x0d: return " SBSX |"; 470 case 0x0e: return " HN-F |"; 471 case 0x0f: return " SN-F_E |"; 472 case 0x10: return " SN-F_D |"; 473 case 0x11: return " CXHA |"; 474 case 0x12: return " CXRA |"; 475 case 0x13: return " CXRH |"; 476 case 0x14: return " RN-F_D |"; 477 case 0x15: return "RN-F_D_E|"; 478 case 0x16: return " RN-F_C |"; 479 case 0x17: return "RN-F_C_E|"; 480 case 0x18: return " RN-F_E |"; 481 case 0x19: return "RN-F_E_E|"; 482 case 0x1c: return " MTSX |"; 483 case 0x1d: return " HN-V |"; 484 case 0x1e: return " CCG |"; 485 default: return " ???? |"; 486 } 487 } 488 489 static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d) 490 { 491 struct arm_cmn *cmn = s->private; 492 struct arm_cmn_node *dn; 493 494 for (dn = cmn->dns; dn->type; dn++) { 495 struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); 496 int pad = dn->logid < 10; 497 498 if (dn->type == CMN_TYPE_XP) 499 continue; 500 /* Ignore the extra components that will overlap on some ports */ 501 if (dn->type < CMN_TYPE_HNI) 502 continue; 503 504 if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d) 505 continue; 506 507 seq_printf(s, " %*c#%-*d |", pad + 1, ' ', 3 - pad, dn->logid); 508 return; 509 } 510 seq_puts(s, " |"); 511 } 512 513 static int arm_cmn_map_show(struct seq_file *s, void *data) 514 { 515 struct arm_cmn *cmn = s->private; 516 int x, y, p, pmax = fls(cmn->ports_used); 517 518 seq_puts(s, " X"); 519 for (x = 0; x < cmn->mesh_x; x++) 520 seq_printf(s, " %-2d ", x); 521 seq_puts(s, "\nY P D+"); 522 y = cmn->mesh_y; 523 while (y--) { 524 int xp_base = cmn->mesh_x * y; 525 u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION]; 526 527 for (x = 0; x < cmn->mesh_x; x++) 528 seq_puts(s, "--------+"); 529 530 seq_printf(s, "\n%-2d |", y); 531 for (x = 0; x < cmn->mesh_x; x++) { 532 struct arm_cmn_node *xp = cmn->xps + xp_base + x; 533 534 for (p = 0; p < CMN_MAX_PORTS; p++) 535 port[p][x] = arm_cmn_device_connect_info(cmn, xp, p); 536 seq_printf(s, " XP #%-3d|", xp_base + x); 537 } 538 539 seq_puts(s, "\n |"); 540 for (x = 0; x < cmn->mesh_x; x++) { 541 s8 dtc = cmn->xps[xp_base + x].dtc; 542 543 if (dtc < 0) 544 seq_puts(s, " DTC ?? |"); 545 else 546 seq_printf(s, " DTC %d |", dtc); 547 } 548 seq_puts(s, "\n |"); 549 for (x = 0; x < cmn->mesh_x; x++) 550 seq_puts(s, "........|"); 551 552 for (p = 0; p < pmax; p++) { 553 seq_printf(s, "\n %d |", p); 554 for (x = 0; x < cmn->mesh_x; x++) 555 seq_puts(s, arm_cmn_device_type(port[p][x])); 556 seq_puts(s, "\n 0|"); 557 for (x = 0; x < cmn->mesh_x; x++) 558 arm_cmn_show_logid(s, x, y, p, 0); 559 seq_puts(s, "\n 1|"); 560 for (x = 0; x < cmn->mesh_x; x++) 561 arm_cmn_show_logid(s, x, y, p, 1); 562 } 563 seq_puts(s, "\n-----+"); 564 } 565 for (x = 0; x < cmn->mesh_x; x++) 566 seq_puts(s, "--------+"); 567 seq_puts(s, "\n"); 568 return 0; 569 } 570 DEFINE_SHOW_ATTRIBUTE(arm_cmn_map); 571 572 static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) 573 { 574 const char *name = "map"; 575 576 if (id > 0) 577 name = devm_kasprintf(cmn->dev, GFP_KERNEL, "map_%d", id); 578 if (!name) 579 return; 580 581 cmn->debug = debugfs_create_file(name, 0444, arm_cmn_debugfs, cmn, &arm_cmn_map_fops); 582 } 583 #else 584 static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {} 585 #endif 586 587 struct arm_cmn_hw_event { 588 struct arm_cmn_node *dn; 589 u64 dtm_idx[4]; 590 s8 dtc_idx[CMN_MAX_DTCS]; 591 u8 num_dns; 592 u8 dtm_offset; 593 bool wide_sel; 594 enum cmn_filter_select filter_sel; 595 }; 596 597 #define for_each_hw_dn(hw, dn, i) \ 598 for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++) 599 600 /* @i is the DTC number, @idx is the counter index on that DTC */ 601 #define for_each_hw_dtc_idx(hw, i, idx) \ 602 for (int i = 0, idx; i < CMN_MAX_DTCS; i++) if ((idx = hw->dtc_idx[i]) >= 0) 603 604 static struct arm_cmn_hw_event *to_cmn_hw(struct perf_event *event) 605 { 606 BUILD_BUG_ON(sizeof(struct arm_cmn_hw_event) > offsetof(struct hw_perf_event, target)); 607 return (struct arm_cmn_hw_event *)&event->hw; 608 } 609 610 static void arm_cmn_set_index(u64 x[], unsigned int pos, unsigned int val) 611 { 612 x[pos / 32] |= (u64)val << ((pos % 32) * 2); 613 } 614 615 static unsigned int arm_cmn_get_index(u64 x[], unsigned int pos) 616 { 617 return (x[pos / 32] >> ((pos % 32) * 2)) & 3; 618 } 619 620 struct arm_cmn_event_attr { 621 struct device_attribute attr; 622 enum cmn_model model; 623 enum cmn_node_type type; 624 enum cmn_filter_select fsel; 625 u16 eventid; 626 u8 occupid; 627 }; 628 629 struct arm_cmn_format_attr { 630 struct device_attribute attr; 631 u64 field; 632 int config; 633 }; 634 635 #define _CMN_EVENT_ATTR(_model, _name, _type, _eventid, _occupid, _fsel)\ 636 (&((struct arm_cmn_event_attr[]) {{ \ 637 .attr = __ATTR(_name, 0444, arm_cmn_event_show, NULL), \ 638 .model = _model, \ 639 .type = _type, \ 640 .eventid = _eventid, \ 641 .occupid = _occupid, \ 642 .fsel = _fsel, \ 643 }})[0].attr.attr) 644 #define CMN_EVENT_ATTR(_model, _name, _type, _eventid) \ 645 _CMN_EVENT_ATTR(_model, _name, _type, _eventid, 0, SEL_NONE) 646 647 static ssize_t arm_cmn_event_show(struct device *dev, 648 struct device_attribute *attr, char *buf) 649 { 650 struct arm_cmn_event_attr *eattr; 651 652 eattr = container_of(attr, typeof(*eattr), attr); 653 654 if (eattr->type == CMN_TYPE_DTC) 655 return sysfs_emit(buf, "type=0x%x\n", eattr->type); 656 657 if (eattr->type == CMN_TYPE_WP) 658 return sysfs_emit(buf, 659 "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n", 660 eattr->type, eattr->eventid); 661 662 if (eattr->fsel > SEL_NONE) 663 return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n", 664 eattr->type, eattr->eventid, eattr->occupid); 665 666 return sysfs_emit(buf, "type=0x%x,eventid=0x%x\n", eattr->type, 667 eattr->eventid); 668 } 669 670 static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, 671 struct attribute *attr, 672 int unused) 673 { 674 struct device *dev = kobj_to_dev(kobj); 675 struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev)); 676 struct arm_cmn_event_attr *eattr; 677 enum cmn_node_type type; 678 u16 eventid; 679 680 eattr = container_of(attr, typeof(*eattr), attr.attr); 681 682 if (!(eattr->model & arm_cmn_model(cmn))) 683 return 0; 684 685 type = eattr->type; 686 eventid = eattr->eventid; 687 688 /* Watchpoints aren't nodes, so avoid confusion */ 689 if (type == CMN_TYPE_WP) 690 return attr->mode; 691 692 /* Hide XP events for unused interfaces/channels */ 693 if (type == CMN_TYPE_XP) { 694 unsigned int intf = (eventid >> 2) & 7; 695 unsigned int chan = eventid >> 5; 696 697 if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3))) 698 return 0; 699 700 if (chan == 4 && cmn->part == PART_CMN600) 701 return 0; 702 703 if ((chan == 5 && cmn->rsp_vc_num < 2) || 704 (chan == 6 && cmn->dat_vc_num < 2) || 705 (chan == 7 && cmn->snp_vc_num < 2) || 706 (chan == 8 && cmn->req_vc_num < 2)) 707 return 0; 708 } 709 710 /* Revision-specific differences */ 711 if (cmn->part == PART_CMN600) { 712 if (cmn->rev < REV_CMN600_R1P3) { 713 if (type == CMN_TYPE_CXRA && eventid > 0x10) 714 return 0; 715 } 716 if (cmn->rev < REV_CMN600_R1P2) { 717 if (type == CMN_TYPE_HNF && eventid == 0x1b) 718 return 0; 719 if (type == CMN_TYPE_CXRA || type == CMN_TYPE_CXHA) 720 return 0; 721 } 722 } else if (cmn->part == PART_CMN650) { 723 if (cmn->rev < REV_CMN650_R2P0 || cmn->rev == REV_CMN650_R1P2) { 724 if (type == CMN_TYPE_HNF && eventid > 0x22) 725 return 0; 726 if (type == CMN_TYPE_SBSX && eventid == 0x17) 727 return 0; 728 if (type == CMN_TYPE_RNI && eventid > 0x10) 729 return 0; 730 } 731 } else if (cmn->part == PART_CMN700) { 732 if (cmn->rev < REV_CMN700_R2P0) { 733 if (type == CMN_TYPE_HNF && eventid > 0x2c) 734 return 0; 735 if (type == CMN_TYPE_CCHA && eventid > 0x74) 736 return 0; 737 if (type == CMN_TYPE_CCLA && eventid > 0x27) 738 return 0; 739 } 740 if (cmn->rev < REV_CMN700_R1P0) { 741 if (type == CMN_TYPE_HNF && eventid > 0x2b) 742 return 0; 743 } 744 } 745 746 if (!arm_cmn_node(cmn, type)) 747 return 0; 748 749 return attr->mode; 750 } 751 752 #define _CMN_EVENT_DVM(_model, _name, _event, _occup, _fsel) \ 753 _CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup, _fsel) 754 #define CMN_EVENT_DTC(_name) \ 755 CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0) 756 #define CMN_EVENT_HNF(_model, _name, _event) \ 757 CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event) 758 #define CMN_EVENT_HNI(_name, _event) \ 759 CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event) 760 #define CMN_EVENT_HNP(_name, _event) \ 761 CMN_EVENT_ATTR(CMN_ANY, hnp_##_name, CMN_TYPE_HNP, _event) 762 #define __CMN_EVENT_XP(_name, _event) \ 763 CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event) 764 #define CMN_EVENT_SBSX(_model, _name, _event) \ 765 CMN_EVENT_ATTR(_model, sbsx_##_name, CMN_TYPE_SBSX, _event) 766 #define CMN_EVENT_RNID(_model, _name, _event) \ 767 CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event) 768 #define CMN_EVENT_MTSX(_name, _event) \ 769 CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event) 770 #define CMN_EVENT_CXRA(_model, _name, _event) \ 771 CMN_EVENT_ATTR(_model, cxra_##_name, CMN_TYPE_CXRA, _event) 772 #define CMN_EVENT_CXHA(_name, _event) \ 773 CMN_EVENT_ATTR(CMN_ANY, cxha_##_name, CMN_TYPE_CXHA, _event) 774 #define CMN_EVENT_CCRA(_name, _event) \ 775 CMN_EVENT_ATTR(CMN_ANY, ccra_##_name, CMN_TYPE_CCRA, _event) 776 #define CMN_EVENT_CCHA(_name, _event) \ 777 CMN_EVENT_ATTR(CMN_ANY, ccha_##_name, CMN_TYPE_CCHA, _event) 778 #define CMN_EVENT_CCLA(_name, _event) \ 779 CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event) 780 #define CMN_EVENT_CCLA_RNI(_name, _event) \ 781 CMN_EVENT_ATTR(CMN_ANY, ccla_rni_##_name, CMN_TYPE_CCLA_RNI, _event) 782 #define CMN_EVENT_HNS(_name, _event) \ 783 CMN_EVENT_ATTR(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event) 784 785 #define CMN_EVENT_DVM(_model, _name, _event) \ 786 _CMN_EVENT_DVM(_model, _name, _event, 0, SEL_NONE) 787 #define CMN_EVENT_DVM_OCC(_model, _name, _event) \ 788 _CMN_EVENT_DVM(_model, _name##_all, _event, 0, SEL_OCCUP1ID), \ 789 _CMN_EVENT_DVM(_model, _name##_dvmop, _event, 1, SEL_OCCUP1ID), \ 790 _CMN_EVENT_DVM(_model, _name##_dvmsync, _event, 2, SEL_OCCUP1ID) 791 792 #define CMN_EVENT_HN_OCC(_model, _name, _type, _event) \ 793 _CMN_EVENT_ATTR(_model, _name##_all, _type, _event, 0, SEL_OCCUP1ID), \ 794 _CMN_EVENT_ATTR(_model, _name##_read, _type, _event, 1, SEL_OCCUP1ID), \ 795 _CMN_EVENT_ATTR(_model, _name##_write, _type, _event, 2, SEL_OCCUP1ID), \ 796 _CMN_EVENT_ATTR(_model, _name##_atomic, _type, _event, 3, SEL_OCCUP1ID), \ 797 _CMN_EVENT_ATTR(_model, _name##_stash, _type, _event, 4, SEL_OCCUP1ID) 798 #define CMN_EVENT_HN_CLS(_model, _name, _type, _event) \ 799 _CMN_EVENT_ATTR(_model, _name##_class0, _type, _event, 0, SEL_CLASS_OCCUP_ID), \ 800 _CMN_EVENT_ATTR(_model, _name##_class1, _type, _event, 1, SEL_CLASS_OCCUP_ID), \ 801 _CMN_EVENT_ATTR(_model, _name##_class2, _type, _event, 2, SEL_CLASS_OCCUP_ID), \ 802 _CMN_EVENT_ATTR(_model, _name##_class3, _type, _event, 3, SEL_CLASS_OCCUP_ID) 803 #define CMN_EVENT_HN_SNT(_model, _name, _type, _event) \ 804 _CMN_EVENT_ATTR(_model, _name##_all, _type, _event, 0, SEL_CBUSY_SNTHROTTLE_SEL), \ 805 _CMN_EVENT_ATTR(_model, _name##_group0_read, _type, _event, 1, SEL_CBUSY_SNTHROTTLE_SEL), \ 806 _CMN_EVENT_ATTR(_model, _name##_group0_write, _type, _event, 2, SEL_CBUSY_SNTHROTTLE_SEL), \ 807 _CMN_EVENT_ATTR(_model, _name##_group1_read, _type, _event, 3, SEL_CBUSY_SNTHROTTLE_SEL), \ 808 _CMN_EVENT_ATTR(_model, _name##_group1_write, _type, _event, 4, SEL_CBUSY_SNTHROTTLE_SEL), \ 809 _CMN_EVENT_ATTR(_model, _name##_read, _type, _event, 5, SEL_CBUSY_SNTHROTTLE_SEL), \ 810 _CMN_EVENT_ATTR(_model, _name##_write, _type, _event, 6, SEL_CBUSY_SNTHROTTLE_SEL) 811 812 #define CMN_EVENT_HNF_OCC(_model, _name, _event) \ 813 CMN_EVENT_HN_OCC(_model, hnf_##_name, CMN_TYPE_HNF, _event) 814 #define CMN_EVENT_HNF_CLS(_model, _name, _event) \ 815 CMN_EVENT_HN_CLS(_model, hnf_##_name, CMN_TYPE_HNF, _event) 816 #define CMN_EVENT_HNF_SNT(_model, _name, _event) \ 817 CMN_EVENT_HN_SNT(_model, hnf_##_name, CMN_TYPE_HNF, _event) 818 819 #define CMN_EVENT_HNS_OCC(_name, _event) \ 820 CMN_EVENT_HN_OCC(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event), \ 821 _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_rxsnp, CMN_TYPE_HNS, _event, 5, SEL_OCCUP1ID), \ 822 _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_lbt, CMN_TYPE_HNS, _event, 6, SEL_OCCUP1ID), \ 823 _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_hbt, CMN_TYPE_HNS, _event, 7, SEL_OCCUP1ID) 824 #define CMN_EVENT_HNS_CLS( _name, _event) \ 825 CMN_EVENT_HN_CLS(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event) 826 #define CMN_EVENT_HNS_SNT(_name, _event) \ 827 CMN_EVENT_HN_SNT(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event) 828 #define CMN_EVENT_HNS_HBT(_name, _event) \ 829 _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_all, CMN_TYPE_HNS, _event, 0, SEL_HBT_LBT_SEL), \ 830 _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_hbt, CMN_TYPE_HNS, _event, 1, SEL_HBT_LBT_SEL), \ 831 _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_lbt, CMN_TYPE_HNS, _event, 2, SEL_HBT_LBT_SEL) 832 #define CMN_EVENT_HNS_SNH(_name, _event) \ 833 _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_all, CMN_TYPE_HNS, _event, 0, SEL_SN_HOME_SEL), \ 834 _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_sn, CMN_TYPE_HNS, _event, 1, SEL_SN_HOME_SEL), \ 835 _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_home, CMN_TYPE_HNS, _event, 2, SEL_SN_HOME_SEL) 836 837 #define _CMN_EVENT_XP_MESH(_name, _event) \ 838 __CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)), \ 839 __CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)), \ 840 __CMN_EVENT_XP(n_##_name, (_event) | (2 << 2)), \ 841 __CMN_EVENT_XP(s_##_name, (_event) | (3 << 2)) 842 843 #define _CMN_EVENT_XP_PORT(_name, _event) \ 844 __CMN_EVENT_XP(p0_##_name, (_event) | (4 << 2)), \ 845 __CMN_EVENT_XP(p1_##_name, (_event) | (5 << 2)), \ 846 __CMN_EVENT_XP(p2_##_name, (_event) | (6 << 2)), \ 847 __CMN_EVENT_XP(p3_##_name, (_event) | (7 << 2)) 848 849 #define _CMN_EVENT_XP(_name, _event) \ 850 _CMN_EVENT_XP_MESH(_name, _event), \ 851 _CMN_EVENT_XP_PORT(_name, _event) 852 853 /* Good thing there are only 3 fundamental XP events... */ 854 #define CMN_EVENT_XP(_name, _event) \ 855 _CMN_EVENT_XP(req_##_name, (_event) | (0 << 5)), \ 856 _CMN_EVENT_XP(rsp_##_name, (_event) | (1 << 5)), \ 857 _CMN_EVENT_XP(snp_##_name, (_event) | (2 << 5)), \ 858 _CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5)), \ 859 _CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)), \ 860 _CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)), \ 861 _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)), \ 862 _CMN_EVENT_XP(snp2_##_name, (_event) | (7 << 5)), \ 863 _CMN_EVENT_XP(req2_##_name, (_event) | (8 << 5)) 864 865 #define CMN_EVENT_XP_DAT(_name, _event) \ 866 _CMN_EVENT_XP_PORT(dat_##_name, (_event) | (3 << 5)), \ 867 _CMN_EVENT_XP_PORT(dat2_##_name, (_event) | (6 << 5)) 868 869 870 static struct attribute *arm_cmn_event_attrs[] = { 871 CMN_EVENT_DTC(cycles), 872 873 /* 874 * DVM node events conflict with HN-I events in the equivalent PMU 875 * slot, but our lazy short-cut of using the DTM counter index for 876 * the PMU index as well happens to avoid that by construction. 877 */ 878 CMN_EVENT_DVM(CMN600, rxreq_dvmop, 0x01), 879 CMN_EVENT_DVM(CMN600, rxreq_dvmsync, 0x02), 880 CMN_EVENT_DVM(CMN600, rxreq_dvmop_vmid_filtered, 0x03), 881 CMN_EVENT_DVM(CMN600, rxreq_retried, 0x04), 882 CMN_EVENT_DVM_OCC(CMN600, rxreq_trk_occupancy, 0x05), 883 CMN_EVENT_DVM(NOT_CMN600, dvmop_tlbi, 0x01), 884 CMN_EVENT_DVM(NOT_CMN600, dvmop_bpi, 0x02), 885 CMN_EVENT_DVM(NOT_CMN600, dvmop_pici, 0x03), 886 CMN_EVENT_DVM(NOT_CMN600, dvmop_vici, 0x04), 887 CMN_EVENT_DVM(NOT_CMN600, dvmsync, 0x05), 888 CMN_EVENT_DVM(NOT_CMN600, vmid_filtered, 0x06), 889 CMN_EVENT_DVM(NOT_CMN600, rndop_filtered, 0x07), 890 CMN_EVENT_DVM(NOT_CMN600, retry, 0x08), 891 CMN_EVENT_DVM(NOT_CMN600, txsnp_flitv, 0x09), 892 CMN_EVENT_DVM(NOT_CMN600, txsnp_stall, 0x0a), 893 CMN_EVENT_DVM(NOT_CMN600, trkfull, 0x0b), 894 CMN_EVENT_DVM_OCC(NOT_CMN600, trk_occupancy, 0x0c), 895 CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_cxha, 0x0d), 896 CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_pdn, 0x0e), 897 CMN_EVENT_DVM(CMN700, trk_alloc, 0x0f), 898 CMN_EVENT_DVM(CMN700, trk_cxha_alloc, 0x10), 899 CMN_EVENT_DVM(CMN700, trk_pdn_alloc, 0x11), 900 CMN_EVENT_DVM(CMN700, txsnp_stall_limit, 0x12), 901 CMN_EVENT_DVM(CMN700, rxsnp_stall_starv, 0x13), 902 CMN_EVENT_DVM(CMN700, txsnp_sync_stall_op, 0x14), 903 904 CMN_EVENT_HNF(CMN_ANY, cache_miss, 0x01), 905 CMN_EVENT_HNF(CMN_ANY, slc_sf_cache_access, 0x02), 906 CMN_EVENT_HNF(CMN_ANY, cache_fill, 0x03), 907 CMN_EVENT_HNF(CMN_ANY, pocq_retry, 0x04), 908 CMN_EVENT_HNF(CMN_ANY, pocq_reqs_recvd, 0x05), 909 CMN_EVENT_HNF(CMN_ANY, sf_hit, 0x06), 910 CMN_EVENT_HNF(CMN_ANY, sf_evictions, 0x07), 911 CMN_EVENT_HNF(CMN_ANY, dir_snoops_sent, 0x08), 912 CMN_EVENT_HNF(CMN_ANY, brd_snoops_sent, 0x09), 913 CMN_EVENT_HNF(CMN_ANY, slc_eviction, 0x0a), 914 CMN_EVENT_HNF(CMN_ANY, slc_fill_invalid_way, 0x0b), 915 CMN_EVENT_HNF(CMN_ANY, mc_retries, 0x0c), 916 CMN_EVENT_HNF(CMN_ANY, mc_reqs, 0x0d), 917 CMN_EVENT_HNF(CMN_ANY, qos_hh_retry, 0x0e), 918 CMN_EVENT_HNF_OCC(CMN_ANY, qos_pocq_occupancy, 0x0f), 919 CMN_EVENT_HNF(CMN_ANY, pocq_addrhaz, 0x10), 920 CMN_EVENT_HNF(CMN_ANY, pocq_atomic_addrhaz, 0x11), 921 CMN_EVENT_HNF(CMN_ANY, ld_st_swp_adq_full, 0x12), 922 CMN_EVENT_HNF(CMN_ANY, cmp_adq_full, 0x13), 923 CMN_EVENT_HNF(CMN_ANY, txdat_stall, 0x14), 924 CMN_EVENT_HNF(CMN_ANY, txrsp_stall, 0x15), 925 CMN_EVENT_HNF(CMN_ANY, seq_full, 0x16), 926 CMN_EVENT_HNF(CMN_ANY, seq_hit, 0x17), 927 CMN_EVENT_HNF(CMN_ANY, snp_sent, 0x18), 928 CMN_EVENT_HNF(CMN_ANY, sfbi_dir_snp_sent, 0x19), 929 CMN_EVENT_HNF(CMN_ANY, sfbi_brd_snp_sent, 0x1a), 930 CMN_EVENT_HNF(CMN_ANY, snp_sent_untrk, 0x1b), 931 CMN_EVENT_HNF(CMN_ANY, intv_dirty, 0x1c), 932 CMN_EVENT_HNF(CMN_ANY, stash_snp_sent, 0x1d), 933 CMN_EVENT_HNF(CMN_ANY, stash_data_pull, 0x1e), 934 CMN_EVENT_HNF(CMN_ANY, snp_fwded, 0x1f), 935 CMN_EVENT_HNF(NOT_CMN600, atomic_fwd, 0x20), 936 CMN_EVENT_HNF(NOT_CMN600, mpam_hardlim, 0x21), 937 CMN_EVENT_HNF(NOT_CMN600, mpam_softlim, 0x22), 938 CMN_EVENT_HNF(CMN_650ON, snp_sent_cluster, 0x23), 939 CMN_EVENT_HNF(CMN_650ON, sf_imprecise_evict, 0x24), 940 CMN_EVENT_HNF(CMN_650ON, sf_evict_shared_line, 0x25), 941 CMN_EVENT_HNF_CLS(CMN700, pocq_class_occup, 0x26), 942 CMN_EVENT_HNF_CLS(CMN700, pocq_class_retry, 0x27), 943 CMN_EVENT_HNF_CLS(CMN700, class_mc_reqs, 0x28), 944 CMN_EVENT_HNF_CLS(CMN700, class_cgnt_cmin, 0x29), 945 CMN_EVENT_HNF_SNT(CMN700, sn_throttle, 0x2a), 946 CMN_EVENT_HNF_SNT(CMN700, sn_throttle_min, 0x2b), 947 CMN_EVENT_HNF(CMN700, sf_precise_to_imprecise, 0x2c), 948 CMN_EVENT_HNF(CMN700, snp_intv_cln, 0x2d), 949 CMN_EVENT_HNF(CMN700, nc_excl, 0x2e), 950 CMN_EVENT_HNF(CMN700, excl_mon_ovfl, 0x2f), 951 952 CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20), 953 CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21), 954 CMN_EVENT_HNI(rdt_rd_occ_cnt_ovfl, 0x22), 955 CMN_EVENT_HNI(rdt_wr_occ_cnt_ovfl, 0x23), 956 CMN_EVENT_HNI(wdb_occ_cnt_ovfl, 0x24), 957 CMN_EVENT_HNI(rrt_rd_alloc, 0x25), 958 CMN_EVENT_HNI(rrt_wr_alloc, 0x26), 959 CMN_EVENT_HNI(rdt_rd_alloc, 0x27), 960 CMN_EVENT_HNI(rdt_wr_alloc, 0x28), 961 CMN_EVENT_HNI(wdb_alloc, 0x29), 962 CMN_EVENT_HNI(txrsp_retryack, 0x2a), 963 CMN_EVENT_HNI(arvalid_no_arready, 0x2b), 964 CMN_EVENT_HNI(arready_no_arvalid, 0x2c), 965 CMN_EVENT_HNI(awvalid_no_awready, 0x2d), 966 CMN_EVENT_HNI(awready_no_awvalid, 0x2e), 967 CMN_EVENT_HNI(wvalid_no_wready, 0x2f), 968 CMN_EVENT_HNI(txdat_stall, 0x30), 969 CMN_EVENT_HNI(nonpcie_serialization, 0x31), 970 CMN_EVENT_HNI(pcie_serialization, 0x32), 971 972 /* 973 * HN-P events squat on top of the HN-I similarly to DVM events, except 974 * for being crammed into the same physical node as well. And of course 975 * where would the fun be if the same events were in the same order... 976 */ 977 CMN_EVENT_HNP(rrt_wr_occ_cnt_ovfl, 0x01), 978 CMN_EVENT_HNP(rdt_wr_occ_cnt_ovfl, 0x02), 979 CMN_EVENT_HNP(wdb_occ_cnt_ovfl, 0x03), 980 CMN_EVENT_HNP(rrt_wr_alloc, 0x04), 981 CMN_EVENT_HNP(rdt_wr_alloc, 0x05), 982 CMN_EVENT_HNP(wdb_alloc, 0x06), 983 CMN_EVENT_HNP(awvalid_no_awready, 0x07), 984 CMN_EVENT_HNP(awready_no_awvalid, 0x08), 985 CMN_EVENT_HNP(wvalid_no_wready, 0x09), 986 CMN_EVENT_HNP(rrt_rd_occ_cnt_ovfl, 0x11), 987 CMN_EVENT_HNP(rdt_rd_occ_cnt_ovfl, 0x12), 988 CMN_EVENT_HNP(rrt_rd_alloc, 0x13), 989 CMN_EVENT_HNP(rdt_rd_alloc, 0x14), 990 CMN_EVENT_HNP(arvalid_no_arready, 0x15), 991 CMN_EVENT_HNP(arready_no_arvalid, 0x16), 992 993 CMN_EVENT_XP(txflit_valid, 0x01), 994 CMN_EVENT_XP(txflit_stall, 0x02), 995 CMN_EVENT_XP_DAT(partial_dat_flit, 0x03), 996 /* We treat watchpoints as a special made-up class of XP events */ 997 CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP), 998 CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN), 999 1000 CMN_EVENT_SBSX(CMN_ANY, rd_req, 0x01), 1001 CMN_EVENT_SBSX(CMN_ANY, wr_req, 0x02), 1002 CMN_EVENT_SBSX(CMN_ANY, cmo_req, 0x03), 1003 CMN_EVENT_SBSX(CMN_ANY, txrsp_retryack, 0x04), 1004 CMN_EVENT_SBSX(CMN_ANY, txdat_flitv, 0x05), 1005 CMN_EVENT_SBSX(CMN_ANY, txrsp_flitv, 0x06), 1006 CMN_EVENT_SBSX(CMN_ANY, rd_req_trkr_occ_cnt_ovfl, 0x11), 1007 CMN_EVENT_SBSX(CMN_ANY, wr_req_trkr_occ_cnt_ovfl, 0x12), 1008 CMN_EVENT_SBSX(CMN_ANY, cmo_req_trkr_occ_cnt_ovfl, 0x13), 1009 CMN_EVENT_SBSX(CMN_ANY, wdb_occ_cnt_ovfl, 0x14), 1010 CMN_EVENT_SBSX(CMN_ANY, rd_axi_trkr_occ_cnt_ovfl, 0x15), 1011 CMN_EVENT_SBSX(CMN_ANY, cmo_axi_trkr_occ_cnt_ovfl, 0x16), 1012 CMN_EVENT_SBSX(NOT_CMN600, rdb_occ_cnt_ovfl, 0x17), 1013 CMN_EVENT_SBSX(CMN_ANY, arvalid_no_arready, 0x21), 1014 CMN_EVENT_SBSX(CMN_ANY, awvalid_no_awready, 0x22), 1015 CMN_EVENT_SBSX(CMN_ANY, wvalid_no_wready, 0x23), 1016 CMN_EVENT_SBSX(CMN_ANY, txdat_stall, 0x24), 1017 CMN_EVENT_SBSX(CMN_ANY, txrsp_stall, 0x25), 1018 1019 CMN_EVENT_RNID(CMN_ANY, s0_rdata_beats, 0x01), 1020 CMN_EVENT_RNID(CMN_ANY, s1_rdata_beats, 0x02), 1021 CMN_EVENT_RNID(CMN_ANY, s2_rdata_beats, 0x03), 1022 CMN_EVENT_RNID(CMN_ANY, rxdat_flits, 0x04), 1023 CMN_EVENT_RNID(CMN_ANY, txdat_flits, 0x05), 1024 CMN_EVENT_RNID(CMN_ANY, txreq_flits_total, 0x06), 1025 CMN_EVENT_RNID(CMN_ANY, txreq_flits_retried, 0x07), 1026 CMN_EVENT_RNID(CMN_ANY, rrt_occ_ovfl, 0x08), 1027 CMN_EVENT_RNID(CMN_ANY, wrt_occ_ovfl, 0x09), 1028 CMN_EVENT_RNID(CMN_ANY, txreq_flits_replayed, 0x0a), 1029 CMN_EVENT_RNID(CMN_ANY, wrcancel_sent, 0x0b), 1030 CMN_EVENT_RNID(CMN_ANY, s0_wdata_beats, 0x0c), 1031 CMN_EVENT_RNID(CMN_ANY, s1_wdata_beats, 0x0d), 1032 CMN_EVENT_RNID(CMN_ANY, s2_wdata_beats, 0x0e), 1033 CMN_EVENT_RNID(CMN_ANY, rrt_alloc, 0x0f), 1034 CMN_EVENT_RNID(CMN_ANY, wrt_alloc, 0x10), 1035 CMN_EVENT_RNID(CMN600, rdb_unord, 0x11), 1036 CMN_EVENT_RNID(CMN600, rdb_replay, 0x12), 1037 CMN_EVENT_RNID(CMN600, rdb_hybrid, 0x13), 1038 CMN_EVENT_RNID(CMN600, rdb_ord, 0x14), 1039 CMN_EVENT_RNID(NOT_CMN600, padb_occ_ovfl, 0x11), 1040 CMN_EVENT_RNID(NOT_CMN600, rpdb_occ_ovfl, 0x12), 1041 CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice1, 0x13), 1042 CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice2, 0x14), 1043 CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice3, 0x15), 1044 CMN_EVENT_RNID(NOT_CMN600, wrt_throttled, 0x16), 1045 CMN_EVENT_RNID(CMN700, ldb_full, 0x17), 1046 CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice0, 0x18), 1047 CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice1, 0x19), 1048 CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice2, 0x1a), 1049 CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice3, 0x1b), 1050 CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice0, 0x1c), 1051 CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice1, 0x1d), 1052 CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice2, 0x1e), 1053 CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice3, 0x1f), 1054 CMN_EVENT_RNID(CMN700, rrt_burst_alloc, 0x20), 1055 CMN_EVENT_RNID(CMN700, awid_hash, 0x21), 1056 CMN_EVENT_RNID(CMN700, atomic_alloc, 0x22), 1057 CMN_EVENT_RNID(CMN700, atomic_occ_ovfl, 0x23), 1058 1059 CMN_EVENT_MTSX(tc_lookup, 0x01), 1060 CMN_EVENT_MTSX(tc_fill, 0x02), 1061 CMN_EVENT_MTSX(tc_miss, 0x03), 1062 CMN_EVENT_MTSX(tdb_forward, 0x04), 1063 CMN_EVENT_MTSX(tcq_hazard, 0x05), 1064 CMN_EVENT_MTSX(tcq_rd_alloc, 0x06), 1065 CMN_EVENT_MTSX(tcq_wr_alloc, 0x07), 1066 CMN_EVENT_MTSX(tcq_cmo_alloc, 0x08), 1067 CMN_EVENT_MTSX(axi_rd_req, 0x09), 1068 CMN_EVENT_MTSX(axi_wr_req, 0x0a), 1069 CMN_EVENT_MTSX(tcq_occ_cnt_ovfl, 0x0b), 1070 CMN_EVENT_MTSX(tdb_occ_cnt_ovfl, 0x0c), 1071 1072 CMN_EVENT_CXRA(CMN_ANY, rht_occ, 0x01), 1073 CMN_EVENT_CXRA(CMN_ANY, sht_occ, 0x02), 1074 CMN_EVENT_CXRA(CMN_ANY, rdb_occ, 0x03), 1075 CMN_EVENT_CXRA(CMN_ANY, wdb_occ, 0x04), 1076 CMN_EVENT_CXRA(CMN_ANY, ssb_occ, 0x05), 1077 CMN_EVENT_CXRA(CMN_ANY, snp_bcasts, 0x06), 1078 CMN_EVENT_CXRA(CMN_ANY, req_chains, 0x07), 1079 CMN_EVENT_CXRA(CMN_ANY, req_chain_avglen, 0x08), 1080 CMN_EVENT_CXRA(CMN_ANY, chirsp_stalls, 0x09), 1081 CMN_EVENT_CXRA(CMN_ANY, chidat_stalls, 0x0a), 1082 CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link0, 0x0b), 1083 CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link1, 0x0c), 1084 CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link2, 0x0d), 1085 CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link0, 0x0e), 1086 CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link1, 0x0f), 1087 CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link2, 0x10), 1088 CMN_EVENT_CXRA(CMN_ANY, external_chirsp_stalls, 0x11), 1089 CMN_EVENT_CXRA(CMN_ANY, external_chidat_stalls, 0x12), 1090 CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link0, 0x13), 1091 CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link1, 0x14), 1092 CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link2, 0x15), 1093 1094 CMN_EVENT_CXHA(rddatbyp, 0x21), 1095 CMN_EVENT_CXHA(chirsp_up_stall, 0x22), 1096 CMN_EVENT_CXHA(chidat_up_stall, 0x23), 1097 CMN_EVENT_CXHA(snppcrd_link0_stall, 0x24), 1098 CMN_EVENT_CXHA(snppcrd_link1_stall, 0x25), 1099 CMN_EVENT_CXHA(snppcrd_link2_stall, 0x26), 1100 CMN_EVENT_CXHA(reqtrk_occ, 0x27), 1101 CMN_EVENT_CXHA(rdb_occ, 0x28), 1102 CMN_EVENT_CXHA(rdbyp_occ, 0x29), 1103 CMN_EVENT_CXHA(wdb_occ, 0x2a), 1104 CMN_EVENT_CXHA(snptrk_occ, 0x2b), 1105 CMN_EVENT_CXHA(sdb_occ, 0x2c), 1106 CMN_EVENT_CXHA(snphaz_occ, 0x2d), 1107 1108 CMN_EVENT_CCRA(rht_occ, 0x41), 1109 CMN_EVENT_CCRA(sht_occ, 0x42), 1110 CMN_EVENT_CCRA(rdb_occ, 0x43), 1111 CMN_EVENT_CCRA(wdb_occ, 0x44), 1112 CMN_EVENT_CCRA(ssb_occ, 0x45), 1113 CMN_EVENT_CCRA(snp_bcasts, 0x46), 1114 CMN_EVENT_CCRA(req_chains, 0x47), 1115 CMN_EVENT_CCRA(req_chain_avglen, 0x48), 1116 CMN_EVENT_CCRA(chirsp_stalls, 0x49), 1117 CMN_EVENT_CCRA(chidat_stalls, 0x4a), 1118 CMN_EVENT_CCRA(cxreq_pcrd_stalls_link0, 0x4b), 1119 CMN_EVENT_CCRA(cxreq_pcrd_stalls_link1, 0x4c), 1120 CMN_EVENT_CCRA(cxreq_pcrd_stalls_link2, 0x4d), 1121 CMN_EVENT_CCRA(cxdat_pcrd_stalls_link0, 0x4e), 1122 CMN_EVENT_CCRA(cxdat_pcrd_stalls_link1, 0x4f), 1123 CMN_EVENT_CCRA(cxdat_pcrd_stalls_link2, 0x50), 1124 CMN_EVENT_CCRA(external_chirsp_stalls, 0x51), 1125 CMN_EVENT_CCRA(external_chidat_stalls, 0x52), 1126 CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link0, 0x53), 1127 CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link1, 0x54), 1128 CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link2, 0x55), 1129 CMN_EVENT_CCRA(rht_alloc, 0x56), 1130 CMN_EVENT_CCRA(sht_alloc, 0x57), 1131 CMN_EVENT_CCRA(rdb_alloc, 0x58), 1132 CMN_EVENT_CCRA(wdb_alloc, 0x59), 1133 CMN_EVENT_CCRA(ssb_alloc, 0x5a), 1134 1135 CMN_EVENT_CCHA(rddatbyp, 0x61), 1136 CMN_EVENT_CCHA(chirsp_up_stall, 0x62), 1137 CMN_EVENT_CCHA(chidat_up_stall, 0x63), 1138 CMN_EVENT_CCHA(snppcrd_link0_stall, 0x64), 1139 CMN_EVENT_CCHA(snppcrd_link1_stall, 0x65), 1140 CMN_EVENT_CCHA(snppcrd_link2_stall, 0x66), 1141 CMN_EVENT_CCHA(reqtrk_occ, 0x67), 1142 CMN_EVENT_CCHA(rdb_occ, 0x68), 1143 CMN_EVENT_CCHA(rdbyp_occ, 0x69), 1144 CMN_EVENT_CCHA(wdb_occ, 0x6a), 1145 CMN_EVENT_CCHA(snptrk_occ, 0x6b), 1146 CMN_EVENT_CCHA(sdb_occ, 0x6c), 1147 CMN_EVENT_CCHA(snphaz_occ, 0x6d), 1148 CMN_EVENT_CCHA(reqtrk_alloc, 0x6e), 1149 CMN_EVENT_CCHA(rdb_alloc, 0x6f), 1150 CMN_EVENT_CCHA(rdbyp_alloc, 0x70), 1151 CMN_EVENT_CCHA(wdb_alloc, 0x71), 1152 CMN_EVENT_CCHA(snptrk_alloc, 0x72), 1153 CMN_EVENT_CCHA(sdb_alloc, 0x73), 1154 CMN_EVENT_CCHA(snphaz_alloc, 0x74), 1155 CMN_EVENT_CCHA(pb_rhu_req_occ, 0x75), 1156 CMN_EVENT_CCHA(pb_rhu_req_alloc, 0x76), 1157 CMN_EVENT_CCHA(pb_rhu_pcie_req_occ, 0x77), 1158 CMN_EVENT_CCHA(pb_rhu_pcie_req_alloc, 0x78), 1159 CMN_EVENT_CCHA(pb_pcie_wr_req_occ, 0x79), 1160 CMN_EVENT_CCHA(pb_pcie_wr_req_alloc, 0x7a), 1161 CMN_EVENT_CCHA(pb_pcie_reg_req_occ, 0x7b), 1162 CMN_EVENT_CCHA(pb_pcie_reg_req_alloc, 0x7c), 1163 CMN_EVENT_CCHA(pb_pcie_rsvd_req_occ, 0x7d), 1164 CMN_EVENT_CCHA(pb_pcie_rsvd_req_alloc, 0x7e), 1165 CMN_EVENT_CCHA(pb_rhu_dat_occ, 0x7f), 1166 CMN_EVENT_CCHA(pb_rhu_dat_alloc, 0x80), 1167 CMN_EVENT_CCHA(pb_rhu_pcie_dat_occ, 0x81), 1168 CMN_EVENT_CCHA(pb_rhu_pcie_dat_alloc, 0x82), 1169 CMN_EVENT_CCHA(pb_pcie_wr_dat_occ, 0x83), 1170 CMN_EVENT_CCHA(pb_pcie_wr_dat_alloc, 0x84), 1171 1172 CMN_EVENT_CCLA(rx_cxs, 0x21), 1173 CMN_EVENT_CCLA(tx_cxs, 0x22), 1174 CMN_EVENT_CCLA(rx_cxs_avg_size, 0x23), 1175 CMN_EVENT_CCLA(tx_cxs_avg_size, 0x24), 1176 CMN_EVENT_CCLA(tx_cxs_lcrd_backpressure, 0x25), 1177 CMN_EVENT_CCLA(link_crdbuf_occ, 0x26), 1178 CMN_EVENT_CCLA(link_crdbuf_alloc, 0x27), 1179 CMN_EVENT_CCLA(pfwd_rcvr_cxs, 0x28), 1180 CMN_EVENT_CCLA(pfwd_sndr_num_flits, 0x29), 1181 CMN_EVENT_CCLA(pfwd_sndr_stalls_static_crd, 0x2a), 1182 CMN_EVENT_CCLA(pfwd_sndr_stalls_dynmaic_crd, 0x2b), 1183 1184 CMN_EVENT_HNS_HBT(cache_miss, 0x01), 1185 CMN_EVENT_HNS_HBT(slc_sf_cache_access, 0x02), 1186 CMN_EVENT_HNS_HBT(cache_fill, 0x03), 1187 CMN_EVENT_HNS_HBT(pocq_retry, 0x04), 1188 CMN_EVENT_HNS_HBT(pocq_reqs_recvd, 0x05), 1189 CMN_EVENT_HNS_HBT(sf_hit, 0x06), 1190 CMN_EVENT_HNS_HBT(sf_evictions, 0x07), 1191 CMN_EVENT_HNS(dir_snoops_sent, 0x08), 1192 CMN_EVENT_HNS(brd_snoops_sent, 0x09), 1193 CMN_EVENT_HNS_HBT(slc_eviction, 0x0a), 1194 CMN_EVENT_HNS_HBT(slc_fill_invalid_way, 0x0b), 1195 CMN_EVENT_HNS(mc_retries_local, 0x0c), 1196 CMN_EVENT_HNS_SNH(mc_reqs_local, 0x0d), 1197 CMN_EVENT_HNS(qos_hh_retry, 0x0e), 1198 CMN_EVENT_HNS_OCC(qos_pocq_occupancy, 0x0f), 1199 CMN_EVENT_HNS(pocq_addrhaz, 0x10), 1200 CMN_EVENT_HNS(pocq_atomic_addrhaz, 0x11), 1201 CMN_EVENT_HNS(ld_st_swp_adq_full, 0x12), 1202 CMN_EVENT_HNS(cmp_adq_full, 0x13), 1203 CMN_EVENT_HNS(txdat_stall, 0x14), 1204 CMN_EVENT_HNS(txrsp_stall, 0x15), 1205 CMN_EVENT_HNS(seq_full, 0x16), 1206 CMN_EVENT_HNS(seq_hit, 0x17), 1207 CMN_EVENT_HNS(snp_sent, 0x18), 1208 CMN_EVENT_HNS(sfbi_dir_snp_sent, 0x19), 1209 CMN_EVENT_HNS(sfbi_brd_snp_sent, 0x1a), 1210 CMN_EVENT_HNS(intv_dirty, 0x1c), 1211 CMN_EVENT_HNS(stash_snp_sent, 0x1d), 1212 CMN_EVENT_HNS(stash_data_pull, 0x1e), 1213 CMN_EVENT_HNS(snp_fwded, 0x1f), 1214 CMN_EVENT_HNS(atomic_fwd, 0x20), 1215 CMN_EVENT_HNS(mpam_hardlim, 0x21), 1216 CMN_EVENT_HNS(mpam_softlim, 0x22), 1217 CMN_EVENT_HNS(snp_sent_cluster, 0x23), 1218 CMN_EVENT_HNS(sf_imprecise_evict, 0x24), 1219 CMN_EVENT_HNS(sf_evict_shared_line, 0x25), 1220 CMN_EVENT_HNS_CLS(pocq_class_occup, 0x26), 1221 CMN_EVENT_HNS_CLS(pocq_class_retry, 0x27), 1222 CMN_EVENT_HNS_CLS(class_mc_reqs_local, 0x28), 1223 CMN_EVENT_HNS_CLS(class_cgnt_cmin, 0x29), 1224 CMN_EVENT_HNS_SNT(sn_throttle, 0x2a), 1225 CMN_EVENT_HNS_SNT(sn_throttle_min, 0x2b), 1226 CMN_EVENT_HNS(sf_precise_to_imprecise, 0x2c), 1227 CMN_EVENT_HNS(snp_intv_cln, 0x2d), 1228 CMN_EVENT_HNS(nc_excl, 0x2e), 1229 CMN_EVENT_HNS(excl_mon_ovfl, 0x2f), 1230 CMN_EVENT_HNS(snp_req_recvd, 0x30), 1231 CMN_EVENT_HNS(snp_req_byp_pocq, 0x31), 1232 CMN_EVENT_HNS(dir_ccgha_snp_sent, 0x32), 1233 CMN_EVENT_HNS(brd_ccgha_snp_sent, 0x33), 1234 CMN_EVENT_HNS(ccgha_snp_stall, 0x34), 1235 CMN_EVENT_HNS(lbt_req_hardlim, 0x35), 1236 CMN_EVENT_HNS(hbt_req_hardlim, 0x36), 1237 CMN_EVENT_HNS(sf_reupdate, 0x37), 1238 CMN_EVENT_HNS(excl_sf_imprecise, 0x38), 1239 CMN_EVENT_HNS(snp_pocq_addrhaz, 0x39), 1240 CMN_EVENT_HNS(mc_retries_remote, 0x3a), 1241 CMN_EVENT_HNS_SNH(mc_reqs_remote, 0x3b), 1242 CMN_EVENT_HNS_CLS(class_mc_reqs_remote, 0x3c), 1243 1244 NULL 1245 }; 1246 1247 static const struct attribute_group arm_cmn_event_attrs_group = { 1248 .name = "events", 1249 .attrs = arm_cmn_event_attrs, 1250 .is_visible = arm_cmn_event_attr_is_visible, 1251 }; 1252 1253 static ssize_t arm_cmn_format_show(struct device *dev, 1254 struct device_attribute *attr, char *buf) 1255 { 1256 struct arm_cmn_format_attr *fmt = container_of(attr, typeof(*fmt), attr); 1257 int lo = __ffs(fmt->field), hi = __fls(fmt->field); 1258 1259 if (lo == hi) 1260 return sysfs_emit(buf, "config:%d\n", lo); 1261 1262 if (!fmt->config) 1263 return sysfs_emit(buf, "config:%d-%d\n", lo, hi); 1264 1265 return sysfs_emit(buf, "config%d:%d-%d\n", fmt->config, lo, hi); 1266 } 1267 1268 #define _CMN_FORMAT_ATTR(_name, _cfg, _fld) \ 1269 (&((struct arm_cmn_format_attr[]) {{ \ 1270 .attr = __ATTR(_name, 0444, arm_cmn_format_show, NULL), \ 1271 .config = _cfg, \ 1272 .field = _fld, \ 1273 }})[0].attr.attr) 1274 #define CMN_FORMAT_ATTR(_name, _fld) _CMN_FORMAT_ATTR(_name, 0, _fld) 1275 1276 static struct attribute *arm_cmn_format_attrs[] = { 1277 CMN_FORMAT_ATTR(type, CMN_CONFIG_TYPE), 1278 CMN_FORMAT_ATTR(eventid, CMN_CONFIG_EVENTID), 1279 CMN_FORMAT_ATTR(occupid, CMN_CONFIG_OCCUPID), 1280 CMN_FORMAT_ATTR(bynodeid, CMN_CONFIG_BYNODEID), 1281 CMN_FORMAT_ATTR(nodeid, CMN_CONFIG_NODEID), 1282 1283 CMN_FORMAT_ATTR(wp_dev_sel, CMN_CONFIG_WP_DEV_SEL), 1284 CMN_FORMAT_ATTR(wp_chn_sel, CMN_CONFIG_WP_CHN_SEL), 1285 CMN_FORMAT_ATTR(wp_grp, CMN_CONFIG_WP_GRP), 1286 CMN_FORMAT_ATTR(wp_exclusive, CMN_CONFIG_WP_EXCLUSIVE), 1287 CMN_FORMAT_ATTR(wp_combine, CMN_CONFIG_WP_COMBINE), 1288 1289 _CMN_FORMAT_ATTR(wp_val, 1, CMN_CONFIG1_WP_VAL), 1290 _CMN_FORMAT_ATTR(wp_mask, 2, CMN_CONFIG2_WP_MASK), 1291 1292 NULL 1293 }; 1294 1295 static const struct attribute_group arm_cmn_format_attrs_group = { 1296 .name = "format", 1297 .attrs = arm_cmn_format_attrs, 1298 }; 1299 1300 static ssize_t arm_cmn_cpumask_show(struct device *dev, 1301 struct device_attribute *attr, char *buf) 1302 { 1303 struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev)); 1304 1305 return cpumap_print_to_pagebuf(true, buf, cpumask_of(cmn->cpu)); 1306 } 1307 1308 static struct device_attribute arm_cmn_cpumask_attr = 1309 __ATTR(cpumask, 0444, arm_cmn_cpumask_show, NULL); 1310 1311 static ssize_t arm_cmn_identifier_show(struct device *dev, 1312 struct device_attribute *attr, char *buf) 1313 { 1314 struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev)); 1315 1316 return sysfs_emit(buf, "%03x%02x\n", cmn->part, cmn->rev); 1317 } 1318 1319 static struct device_attribute arm_cmn_identifier_attr = 1320 __ATTR(identifier, 0444, arm_cmn_identifier_show, NULL); 1321 1322 static struct attribute *arm_cmn_other_attrs[] = { 1323 &arm_cmn_cpumask_attr.attr, 1324 &arm_cmn_identifier_attr.attr, 1325 NULL, 1326 }; 1327 1328 static const struct attribute_group arm_cmn_other_attrs_group = { 1329 .attrs = arm_cmn_other_attrs, 1330 }; 1331 1332 static const struct attribute_group *arm_cmn_attr_groups[] = { 1333 &arm_cmn_event_attrs_group, 1334 &arm_cmn_format_attrs_group, 1335 &arm_cmn_other_attrs_group, 1336 NULL 1337 }; 1338 1339 static int arm_cmn_wp_idx(struct perf_event *event) 1340 { 1341 return CMN_EVENT_EVENTID(event) + CMN_EVENT_WP_GRP(event); 1342 } 1343 1344 static u32 arm_cmn_wp_config(struct perf_event *event) 1345 { 1346 u32 config; 1347 u32 dev = CMN_EVENT_WP_DEV_SEL(event); 1348 u32 chn = CMN_EVENT_WP_CHN_SEL(event); 1349 u32 grp = CMN_EVENT_WP_GRP(event); 1350 u32 exc = CMN_EVENT_WP_EXCLUSIVE(event); 1351 u32 combine = CMN_EVENT_WP_COMBINE(event); 1352 bool is_cmn600 = to_cmn(event->pmu)->part == PART_CMN600; 1353 1354 config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) | 1355 FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) | 1356 FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) | 1357 FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL2, dev >> 1); 1358 if (exc) 1359 config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_EXCLUSIVE : 1360 CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE; 1361 if (combine && !grp) 1362 config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_COMBINE : 1363 CMN_DTM_WPn_CONFIG_WP_COMBINE; 1364 return config; 1365 } 1366 1367 static void arm_cmn_set_state(struct arm_cmn *cmn, u32 state) 1368 { 1369 if (!cmn->state) 1370 writel_relaxed(0, cmn->dtc[0].base + CMN_DT_PMCR); 1371 cmn->state |= state; 1372 } 1373 1374 static void arm_cmn_clear_state(struct arm_cmn *cmn, u32 state) 1375 { 1376 cmn->state &= ~state; 1377 if (!cmn->state) 1378 writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, 1379 cmn->dtc[0].base + CMN_DT_PMCR); 1380 } 1381 1382 static void arm_cmn_pmu_enable(struct pmu *pmu) 1383 { 1384 arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_DISABLED); 1385 } 1386 1387 static void arm_cmn_pmu_disable(struct pmu *pmu) 1388 { 1389 arm_cmn_set_state(to_cmn(pmu), CMN_STATE_DISABLED); 1390 } 1391 1392 static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw, 1393 bool snapshot) 1394 { 1395 struct arm_cmn_dtm *dtm = NULL; 1396 struct arm_cmn_node *dn; 1397 unsigned int i, offset, dtm_idx; 1398 u64 reg, count = 0; 1399 1400 offset = snapshot ? CMN_DTM_PMEVCNTSR : CMN_DTM_PMEVCNT; 1401 for_each_hw_dn(hw, dn, i) { 1402 if (dtm != &cmn->dtms[dn->dtm]) { 1403 dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset; 1404 reg = readq_relaxed(dtm->base + offset); 1405 } 1406 dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); 1407 count += (u16)(reg >> (dtm_idx * 16)); 1408 } 1409 return count; 1410 } 1411 1412 static u64 arm_cmn_read_cc(struct arm_cmn_dtc *dtc) 1413 { 1414 u64 val = readq_relaxed(dtc->base + CMN_DT_PMCCNTR); 1415 1416 writeq_relaxed(CMN_CC_INIT, dtc->base + CMN_DT_PMCCNTR); 1417 return (val - CMN_CC_INIT) & ((CMN_CC_INIT << 1) - 1); 1418 } 1419 1420 static u32 arm_cmn_read_counter(struct arm_cmn_dtc *dtc, int idx) 1421 { 1422 u32 val, pmevcnt = CMN_DT_PMEVCNT(idx); 1423 1424 val = readl_relaxed(dtc->base + pmevcnt); 1425 writel_relaxed(CMN_COUNTER_INIT, dtc->base + pmevcnt); 1426 return val - CMN_COUNTER_INIT; 1427 } 1428 1429 static void arm_cmn_init_counter(struct perf_event *event) 1430 { 1431 struct arm_cmn *cmn = to_cmn(event->pmu); 1432 struct arm_cmn_hw_event *hw = to_cmn_hw(event); 1433 u64 count; 1434 1435 for_each_hw_dtc_idx(hw, i, idx) { 1436 writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + CMN_DT_PMEVCNT(idx)); 1437 cmn->dtc[i].counters[idx] = event; 1438 } 1439 1440 count = arm_cmn_read_dtm(cmn, hw, false); 1441 local64_set(&event->hw.prev_count, count); 1442 } 1443 1444 static void arm_cmn_event_read(struct perf_event *event) 1445 { 1446 struct arm_cmn *cmn = to_cmn(event->pmu); 1447 struct arm_cmn_hw_event *hw = to_cmn_hw(event); 1448 u64 delta, new, prev; 1449 unsigned long flags; 1450 1451 if (CMN_EVENT_TYPE(event) == CMN_TYPE_DTC) { 1452 delta = arm_cmn_read_cc(cmn->dtc + hw->dtc_idx[0]); 1453 local64_add(delta, &event->count); 1454 return; 1455 } 1456 new = arm_cmn_read_dtm(cmn, hw, false); 1457 prev = local64_xchg(&event->hw.prev_count, new); 1458 1459 delta = new - prev; 1460 1461 local_irq_save(flags); 1462 for_each_hw_dtc_idx(hw, i, idx) { 1463 new = arm_cmn_read_counter(cmn->dtc + i, idx); 1464 delta += new << 16; 1465 } 1466 local_irq_restore(flags); 1467 local64_add(delta, &event->count); 1468 } 1469 1470 static int arm_cmn_set_event_sel_hi(struct arm_cmn_node *dn, 1471 enum cmn_filter_select fsel, u8 occupid) 1472 { 1473 u64 reg; 1474 1475 if (fsel == SEL_NONE) 1476 return 0; 1477 1478 if (!dn->occupid[fsel].count) { 1479 dn->occupid[fsel].val = occupid; 1480 reg = FIELD_PREP(CMN__PMU_CBUSY_SNTHROTTLE_SEL, 1481 dn->occupid[SEL_CBUSY_SNTHROTTLE_SEL].val) | 1482 FIELD_PREP(CMN__PMU_SN_HOME_SEL, 1483 dn->occupid[SEL_SN_HOME_SEL].val) | 1484 FIELD_PREP(CMN__PMU_HBT_LBT_SEL, 1485 dn->occupid[SEL_HBT_LBT_SEL].val) | 1486 FIELD_PREP(CMN__PMU_CLASS_OCCUP_ID, 1487 dn->occupid[SEL_CLASS_OCCUP_ID].val) | 1488 FIELD_PREP(CMN__PMU_OCCUP1_ID, 1489 dn->occupid[SEL_OCCUP1ID].val); 1490 writel_relaxed(reg >> 32, dn->pmu_base + CMN_PMU_EVENT_SEL + 4); 1491 } else if (dn->occupid[fsel].val != occupid) { 1492 return -EBUSY; 1493 } 1494 dn->occupid[fsel].count++; 1495 return 0; 1496 } 1497 1498 static void arm_cmn_set_event_sel_lo(struct arm_cmn_node *dn, int dtm_idx, 1499 int eventid, bool wide_sel) 1500 { 1501 if (wide_sel) { 1502 dn->event_w[dtm_idx] = eventid; 1503 writeq_relaxed(le64_to_cpu(dn->event_sel_w), dn->pmu_base + CMN_PMU_EVENT_SEL); 1504 } else { 1505 dn->event[dtm_idx] = eventid; 1506 writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL); 1507 } 1508 } 1509 1510 static void arm_cmn_event_start(struct perf_event *event, int flags) 1511 { 1512 struct arm_cmn *cmn = to_cmn(event->pmu); 1513 struct arm_cmn_hw_event *hw = to_cmn_hw(event); 1514 struct arm_cmn_node *dn; 1515 enum cmn_node_type type = CMN_EVENT_TYPE(event); 1516 int i; 1517 1518 if (type == CMN_TYPE_DTC) { 1519 i = hw->dtc_idx[0]; 1520 writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR); 1521 cmn->dtc[i].cc_active = true; 1522 } else if (type == CMN_TYPE_WP) { 1523 int wp_idx = arm_cmn_wp_idx(event); 1524 u64 val = CMN_EVENT_WP_VAL(event); 1525 u64 mask = CMN_EVENT_WP_MASK(event); 1526 1527 for_each_hw_dn(hw, dn, i) { 1528 void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset); 1529 1530 writeq_relaxed(val, base + CMN_DTM_WPn_VAL(wp_idx)); 1531 writeq_relaxed(mask, base + CMN_DTM_WPn_MASK(wp_idx)); 1532 } 1533 } else for_each_hw_dn(hw, dn, i) { 1534 int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); 1535 1536 arm_cmn_set_event_sel_lo(dn, dtm_idx, CMN_EVENT_EVENTID(event), 1537 hw->wide_sel); 1538 } 1539 } 1540 1541 static void arm_cmn_event_stop(struct perf_event *event, int flags) 1542 { 1543 struct arm_cmn *cmn = to_cmn(event->pmu); 1544 struct arm_cmn_hw_event *hw = to_cmn_hw(event); 1545 struct arm_cmn_node *dn; 1546 enum cmn_node_type type = CMN_EVENT_TYPE(event); 1547 int i; 1548 1549 if (type == CMN_TYPE_DTC) { 1550 i = hw->dtc_idx[0]; 1551 cmn->dtc[i].cc_active = false; 1552 } else if (type == CMN_TYPE_WP) { 1553 int wp_idx = arm_cmn_wp_idx(event); 1554 1555 for_each_hw_dn(hw, dn, i) { 1556 void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset); 1557 1558 writeq_relaxed(0, base + CMN_DTM_WPn_MASK(wp_idx)); 1559 writeq_relaxed(~0ULL, base + CMN_DTM_WPn_VAL(wp_idx)); 1560 } 1561 } else for_each_hw_dn(hw, dn, i) { 1562 int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); 1563 1564 arm_cmn_set_event_sel_lo(dn, dtm_idx, 0, hw->wide_sel); 1565 } 1566 1567 arm_cmn_event_read(event); 1568 } 1569 1570 struct arm_cmn_val { 1571 u8 dtm_count[CMN_MAX_DTMS]; 1572 u8 occupid[CMN_MAX_DTMS][SEL_MAX]; 1573 u8 wp[CMN_MAX_DTMS][4]; 1574 int dtc_count[CMN_MAX_DTCS]; 1575 bool cycles; 1576 }; 1577 1578 static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val, 1579 struct perf_event *event) 1580 { 1581 struct arm_cmn_hw_event *hw = to_cmn_hw(event); 1582 struct arm_cmn_node *dn; 1583 enum cmn_node_type type; 1584 int i; 1585 1586 if (is_software_event(event)) 1587 return; 1588 1589 type = CMN_EVENT_TYPE(event); 1590 if (type == CMN_TYPE_DTC) { 1591 val->cycles = true; 1592 return; 1593 } 1594 1595 for_each_hw_dtc_idx(hw, dtc, idx) 1596 val->dtc_count[dtc]++; 1597 1598 for_each_hw_dn(hw, dn, i) { 1599 int wp_idx, dtm = dn->dtm, sel = hw->filter_sel; 1600 1601 val->dtm_count[dtm]++; 1602 1603 if (sel > SEL_NONE) 1604 val->occupid[dtm][sel] = CMN_EVENT_OCCUPID(event) + 1; 1605 1606 if (type != CMN_TYPE_WP) 1607 continue; 1608 1609 wp_idx = arm_cmn_wp_idx(event); 1610 val->wp[dtm][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1; 1611 } 1612 } 1613 1614 static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event) 1615 { 1616 struct arm_cmn_hw_event *hw = to_cmn_hw(event); 1617 struct arm_cmn_node *dn; 1618 struct perf_event *sibling, *leader = event->group_leader; 1619 enum cmn_node_type type; 1620 struct arm_cmn_val *val; 1621 int i, ret = -EINVAL; 1622 1623 if (leader == event) 1624 return 0; 1625 1626 if (event->pmu != leader->pmu && !is_software_event(leader)) 1627 return -EINVAL; 1628 1629 val = kzalloc(sizeof(*val), GFP_KERNEL); 1630 if (!val) 1631 return -ENOMEM; 1632 1633 arm_cmn_val_add_event(cmn, val, leader); 1634 for_each_sibling_event(sibling, leader) 1635 arm_cmn_val_add_event(cmn, val, sibling); 1636 1637 type = CMN_EVENT_TYPE(event); 1638 if (type == CMN_TYPE_DTC) { 1639 ret = val->cycles ? -EINVAL : 0; 1640 goto done; 1641 } 1642 1643 for (i = 0; i < CMN_MAX_DTCS; i++) 1644 if (val->dtc_count[i] == CMN_DT_NUM_COUNTERS) 1645 goto done; 1646 1647 for_each_hw_dn(hw, dn, i) { 1648 int wp_idx, wp_cmb, dtm = dn->dtm, sel = hw->filter_sel; 1649 1650 if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS) 1651 goto done; 1652 1653 if (sel > SEL_NONE && val->occupid[dtm][sel] && 1654 val->occupid[dtm][sel] != CMN_EVENT_OCCUPID(event) + 1) 1655 goto done; 1656 1657 if (type != CMN_TYPE_WP) 1658 continue; 1659 1660 wp_idx = arm_cmn_wp_idx(event); 1661 if (val->wp[dtm][wp_idx]) 1662 goto done; 1663 1664 wp_cmb = val->wp[dtm][wp_idx ^ 1]; 1665 if (wp_cmb && wp_cmb != CMN_EVENT_WP_COMBINE(event) + 1) 1666 goto done; 1667 } 1668 1669 ret = 0; 1670 done: 1671 kfree(val); 1672 return ret; 1673 } 1674 1675 static enum cmn_filter_select arm_cmn_filter_sel(const struct arm_cmn *cmn, 1676 enum cmn_node_type type, 1677 unsigned int eventid) 1678 { 1679 struct arm_cmn_event_attr *e; 1680 enum cmn_model model = arm_cmn_model(cmn); 1681 1682 for (int i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) { 1683 e = container_of(arm_cmn_event_attrs[i], typeof(*e), attr.attr); 1684 if (e->model & model && e->type == type && e->eventid == eventid) 1685 return e->fsel; 1686 } 1687 return SEL_NONE; 1688 } 1689 1690 1691 static int arm_cmn_event_init(struct perf_event *event) 1692 { 1693 struct arm_cmn *cmn = to_cmn(event->pmu); 1694 struct arm_cmn_hw_event *hw = to_cmn_hw(event); 1695 struct arm_cmn_node *dn; 1696 enum cmn_node_type type; 1697 bool bynodeid; 1698 u16 nodeid, eventid; 1699 1700 if (event->attr.type != event->pmu->type) 1701 return -ENOENT; 1702 1703 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) 1704 return -EINVAL; 1705 1706 event->cpu = cmn->cpu; 1707 if (event->cpu < 0) 1708 return -EINVAL; 1709 1710 type = CMN_EVENT_TYPE(event); 1711 /* DTC events (i.e. cycles) already have everything they need */ 1712 if (type == CMN_TYPE_DTC) 1713 return arm_cmn_validate_group(cmn, event); 1714 1715 eventid = CMN_EVENT_EVENTID(event); 1716 /* For watchpoints we need the actual XP node here */ 1717 if (type == CMN_TYPE_WP) { 1718 type = CMN_TYPE_XP; 1719 /* ...and we need a "real" direction */ 1720 if (eventid != CMN_WP_UP && eventid != CMN_WP_DOWN) 1721 return -EINVAL; 1722 /* ...but the DTM may depend on which port we're watching */ 1723 if (cmn->multi_dtm) 1724 hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2; 1725 } else if (type == CMN_TYPE_XP && cmn->part == PART_CMN700) { 1726 hw->wide_sel = true; 1727 } 1728 1729 /* This is sufficiently annoying to recalculate, so cache it */ 1730 hw->filter_sel = arm_cmn_filter_sel(cmn, type, eventid); 1731 1732 bynodeid = CMN_EVENT_BYNODEID(event); 1733 nodeid = CMN_EVENT_NODEID(event); 1734 1735 hw->dn = arm_cmn_node(cmn, type); 1736 if (!hw->dn) 1737 return -EINVAL; 1738 1739 memset(hw->dtc_idx, -1, sizeof(hw->dtc_idx)); 1740 for (dn = hw->dn; dn->type == type; dn++) { 1741 if (bynodeid && dn->id != nodeid) { 1742 hw->dn++; 1743 continue; 1744 } 1745 hw->num_dns++; 1746 if (dn->dtc < 0) 1747 memset(hw->dtc_idx, 0, cmn->num_dtcs); 1748 else 1749 hw->dtc_idx[dn->dtc] = 0; 1750 1751 if (bynodeid) 1752 break; 1753 } 1754 1755 if (!hw->num_dns) { 1756 struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, nodeid); 1757 1758 dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n", 1759 nodeid, nid.x, nid.y, nid.port, nid.dev, type); 1760 return -EINVAL; 1761 } 1762 1763 return arm_cmn_validate_group(cmn, event); 1764 } 1765 1766 static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event, 1767 int i) 1768 { 1769 struct arm_cmn_hw_event *hw = to_cmn_hw(event); 1770 enum cmn_node_type type = CMN_EVENT_TYPE(event); 1771 1772 while (i--) { 1773 struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset; 1774 unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); 1775 1776 if (type == CMN_TYPE_WP) 1777 dtm->wp_event[arm_cmn_wp_idx(event)] = -1; 1778 1779 if (hw->filter_sel > SEL_NONE) 1780 hw->dn[i].occupid[hw->filter_sel].count--; 1781 1782 dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx); 1783 writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG); 1784 } 1785 memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx)); 1786 1787 for_each_hw_dtc_idx(hw, j, idx) 1788 cmn->dtc[j].counters[idx] = NULL; 1789 } 1790 1791 static int arm_cmn_event_add(struct perf_event *event, int flags) 1792 { 1793 struct arm_cmn *cmn = to_cmn(event->pmu); 1794 struct arm_cmn_hw_event *hw = to_cmn_hw(event); 1795 struct arm_cmn_node *dn; 1796 enum cmn_node_type type = CMN_EVENT_TYPE(event); 1797 unsigned int input_sel, i = 0; 1798 1799 if (type == CMN_TYPE_DTC) { 1800 while (cmn->dtc[i].cycles) 1801 if (++i == cmn->num_dtcs) 1802 return -ENOSPC; 1803 1804 cmn->dtc[i].cycles = event; 1805 hw->dtc_idx[0] = i; 1806 1807 if (flags & PERF_EF_START) 1808 arm_cmn_event_start(event, 0); 1809 return 0; 1810 } 1811 1812 /* Grab the global counters first... */ 1813 for_each_hw_dtc_idx(hw, j, idx) { 1814 if (cmn->part == PART_CMN600 && j > 0) { 1815 idx = hw->dtc_idx[0]; 1816 } else { 1817 idx = 0; 1818 while (cmn->dtc[j].counters[idx]) 1819 if (++idx == CMN_DT_NUM_COUNTERS) 1820 return -ENOSPC; 1821 } 1822 hw->dtc_idx[j] = idx; 1823 } 1824 1825 /* ...then the local counters to feed them */ 1826 for_each_hw_dn(hw, dn, i) { 1827 struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset; 1828 unsigned int dtm_idx, shift, d = max_t(int, dn->dtc, 0); 1829 u64 reg; 1830 1831 dtm_idx = 0; 1832 while (dtm->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx)) 1833 if (++dtm_idx == CMN_DTM_NUM_COUNTERS) 1834 goto free_dtms; 1835 1836 if (type == CMN_TYPE_XP) { 1837 input_sel = CMN__PMEVCNT0_INPUT_SEL_XP + dtm_idx; 1838 } else if (type == CMN_TYPE_WP) { 1839 int tmp, wp_idx = arm_cmn_wp_idx(event); 1840 u32 cfg = arm_cmn_wp_config(event); 1841 1842 if (dtm->wp_event[wp_idx] >= 0) 1843 goto free_dtms; 1844 1845 tmp = dtm->wp_event[wp_idx ^ 1]; 1846 if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) != 1847 CMN_EVENT_WP_COMBINE(cmn->dtc[d].counters[tmp])) 1848 goto free_dtms; 1849 1850 input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx; 1851 dtm->wp_event[wp_idx] = hw->dtc_idx[d]; 1852 writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx)); 1853 } else { 1854 struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); 1855 1856 if (cmn->multi_dtm) 1857 nid.port %= 2; 1858 1859 input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx + 1860 (nid.port << 4) + (nid.dev << 2); 1861 1862 if (arm_cmn_set_event_sel_hi(dn, hw->filter_sel, CMN_EVENT_OCCUPID(event))) 1863 goto free_dtms; 1864 } 1865 1866 arm_cmn_set_index(hw->dtm_idx, i, dtm_idx); 1867 1868 dtm->input_sel[dtm_idx] = input_sel; 1869 shift = CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(dtm_idx); 1870 dtm->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift); 1871 dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, hw->dtc_idx[d]) << shift; 1872 dtm->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx); 1873 reg = (u64)le32_to_cpu(dtm->pmu_config_high) << 32 | dtm->pmu_config_low; 1874 writeq_relaxed(reg, dtm->base + CMN_DTM_PMU_CONFIG); 1875 } 1876 1877 /* Go go go! */ 1878 arm_cmn_init_counter(event); 1879 1880 if (flags & PERF_EF_START) 1881 arm_cmn_event_start(event, 0); 1882 1883 return 0; 1884 1885 free_dtms: 1886 arm_cmn_event_clear(cmn, event, i); 1887 return -ENOSPC; 1888 } 1889 1890 static void arm_cmn_event_del(struct perf_event *event, int flags) 1891 { 1892 struct arm_cmn *cmn = to_cmn(event->pmu); 1893 struct arm_cmn_hw_event *hw = to_cmn_hw(event); 1894 enum cmn_node_type type = CMN_EVENT_TYPE(event); 1895 1896 arm_cmn_event_stop(event, PERF_EF_UPDATE); 1897 1898 if (type == CMN_TYPE_DTC) 1899 cmn->dtc[hw->dtc_idx[0]].cycles = NULL; 1900 else 1901 arm_cmn_event_clear(cmn, event, hw->num_dns); 1902 } 1903 1904 /* 1905 * We stop the PMU for both add and read, to avoid skew across DTM counters. 1906 * In theory we could use snapshots to read without stopping, but then it 1907 * becomes a lot trickier to deal with overlow and racing against interrupts, 1908 * plus it seems they don't work properly on some hardware anyway :( 1909 */ 1910 static void arm_cmn_start_txn(struct pmu *pmu, unsigned int flags) 1911 { 1912 arm_cmn_set_state(to_cmn(pmu), CMN_STATE_TXN); 1913 } 1914 1915 static void arm_cmn_end_txn(struct pmu *pmu) 1916 { 1917 arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_TXN); 1918 } 1919 1920 static int arm_cmn_commit_txn(struct pmu *pmu) 1921 { 1922 arm_cmn_end_txn(pmu); 1923 return 0; 1924 } 1925 1926 static void arm_cmn_migrate(struct arm_cmn *cmn, unsigned int cpu) 1927 { 1928 unsigned int i; 1929 1930 perf_pmu_migrate_context(&cmn->pmu, cmn->cpu, cpu); 1931 for (i = 0; i < cmn->num_dtcs; i++) 1932 irq_set_affinity(cmn->dtc[i].irq, cpumask_of(cpu)); 1933 cmn->cpu = cpu; 1934 } 1935 1936 static int arm_cmn_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) 1937 { 1938 struct arm_cmn *cmn; 1939 int node; 1940 1941 cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node); 1942 node = dev_to_node(cmn->dev); 1943 if (node != NUMA_NO_NODE && cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node) 1944 arm_cmn_migrate(cmn, cpu); 1945 return 0; 1946 } 1947 1948 static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) 1949 { 1950 struct arm_cmn *cmn; 1951 unsigned int target; 1952 int node; 1953 1954 cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node); 1955 if (cpu != cmn->cpu) 1956 return 0; 1957 1958 node = dev_to_node(cmn->dev); 1959 1960 target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu); 1961 if (target >= nr_cpu_ids) 1962 target = cpumask_any_but(cpu_online_mask, cpu); 1963 1964 if (target < nr_cpu_ids) 1965 arm_cmn_migrate(cmn, target); 1966 1967 return 0; 1968 } 1969 1970 static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id) 1971 { 1972 struct arm_cmn_dtc *dtc = dev_id; 1973 irqreturn_t ret = IRQ_NONE; 1974 1975 for (;;) { 1976 u32 status = readl_relaxed(dtc->base + CMN_DT_PMOVSR); 1977 u64 delta; 1978 int i; 1979 1980 for (i = 0; i < CMN_DT_NUM_COUNTERS; i++) { 1981 if (status & (1U << i)) { 1982 ret = IRQ_HANDLED; 1983 if (WARN_ON(!dtc->counters[i])) 1984 continue; 1985 delta = (u64)arm_cmn_read_counter(dtc, i) << 16; 1986 local64_add(delta, &dtc->counters[i]->count); 1987 } 1988 } 1989 1990 if (status & (1U << CMN_DT_NUM_COUNTERS)) { 1991 ret = IRQ_HANDLED; 1992 if (dtc->cc_active && !WARN_ON(!dtc->cycles)) { 1993 delta = arm_cmn_read_cc(dtc); 1994 local64_add(delta, &dtc->cycles->count); 1995 } 1996 } 1997 1998 writel_relaxed(status, dtc->base + CMN_DT_PMOVSR_CLR); 1999 2000 if (!dtc->irq_friend) 2001 return ret; 2002 dtc += dtc->irq_friend; 2003 } 2004 } 2005 2006 /* We can reasonably accommodate DTCs of the same CMN sharing IRQs */ 2007 static int arm_cmn_init_irqs(struct arm_cmn *cmn) 2008 { 2009 int i, j, irq, err; 2010 2011 for (i = 0; i < cmn->num_dtcs; i++) { 2012 irq = cmn->dtc[i].irq; 2013 for (j = i; j--; ) { 2014 if (cmn->dtc[j].irq == irq) { 2015 cmn->dtc[j].irq_friend = i - j; 2016 goto next; 2017 } 2018 } 2019 err = devm_request_irq(cmn->dev, irq, arm_cmn_handle_irq, 2020 IRQF_NOBALANCING | IRQF_NO_THREAD, 2021 dev_name(cmn->dev), &cmn->dtc[i]); 2022 if (err) 2023 return err; 2024 2025 err = irq_set_affinity(irq, cpumask_of(cmn->cpu)); 2026 if (err) 2027 return err; 2028 next: 2029 ; /* isn't C great? */ 2030 } 2031 return 0; 2032 } 2033 2034 static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp, int idx) 2035 { 2036 int i; 2037 2038 dtm->base = xp->pmu_base + CMN_DTM_OFFSET(idx); 2039 dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN; 2040 writeq_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG); 2041 for (i = 0; i < 4; i++) { 2042 dtm->wp_event[i] = -1; 2043 writeq_relaxed(0, dtm->base + CMN_DTM_WPn_MASK(i)); 2044 writeq_relaxed(~0ULL, dtm->base + CMN_DTM_WPn_VAL(i)); 2045 } 2046 } 2047 2048 static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int idx) 2049 { 2050 struct arm_cmn_dtc *dtc = cmn->dtc + idx; 2051 2052 dtc->base = dn->pmu_base - CMN_PMU_OFFSET; 2053 dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx); 2054 if (dtc->irq < 0) 2055 return dtc->irq; 2056 2057 writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL); 2058 writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR); 2059 writeq_relaxed(0, dtc->base + CMN_DT_PMCCNTR); 2060 writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR); 2061 2062 return 0; 2063 } 2064 2065 static int arm_cmn_node_cmp(const void *a, const void *b) 2066 { 2067 const struct arm_cmn_node *dna = a, *dnb = b; 2068 int cmp; 2069 2070 cmp = dna->type - dnb->type; 2071 if (!cmp) 2072 cmp = dna->logid - dnb->logid; 2073 return cmp; 2074 } 2075 2076 static int arm_cmn_init_dtcs(struct arm_cmn *cmn) 2077 { 2078 struct arm_cmn_node *dn, *xp; 2079 int dtc_idx = 0; 2080 2081 cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL); 2082 if (!cmn->dtc) 2083 return -ENOMEM; 2084 2085 sort(cmn->dns, cmn->num_dns, sizeof(cmn->dns[0]), arm_cmn_node_cmp, NULL); 2086 2087 cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP); 2088 2089 if (cmn->part == PART_CMN600 && cmn->num_dtcs > 1) { 2090 /* We do at least know that a DTC's XP must be in that DTC's domain */ 2091 dn = arm_cmn_node(cmn, CMN_TYPE_DTC); 2092 for (int i = 0; i < cmn->num_dtcs; i++) 2093 arm_cmn_node_to_xp(cmn, dn + i)->dtc = i; 2094 } 2095 2096 for (dn = cmn->dns; dn->type; dn++) { 2097 if (dn->type == CMN_TYPE_XP) 2098 continue; 2099 2100 xp = arm_cmn_node_to_xp(cmn, dn); 2101 dn->dtc = xp->dtc; 2102 dn->dtm = xp->dtm; 2103 if (cmn->multi_dtm) 2104 dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2; 2105 2106 if (dn->type == CMN_TYPE_DTC) { 2107 int err = arm_cmn_init_dtc(cmn, dn, dtc_idx++); 2108 2109 if (err) 2110 return err; 2111 } 2112 2113 /* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */ 2114 if (dn->type == CMN_TYPE_RND) 2115 dn->type = CMN_TYPE_RNI; 2116 2117 /* We split the RN-I off already, so let the CCLA part match CCLA events */ 2118 if (dn->type == CMN_TYPE_CCLA_RNI) 2119 dn->type = CMN_TYPE_CCLA; 2120 } 2121 2122 arm_cmn_set_state(cmn, CMN_STATE_DISABLED); 2123 2124 return 0; 2125 } 2126 2127 static unsigned int arm_cmn_dtc_domain(struct arm_cmn *cmn, void __iomem *xp_region) 2128 { 2129 int offset = CMN_DTM_UNIT_INFO; 2130 2131 if (cmn->part == PART_CMN650 || cmn->part == PART_CI700) 2132 offset = CMN650_DTM_UNIT_INFO; 2133 2134 return FIELD_GET(CMN_DTM_UNIT_INFO_DTC_DOMAIN, readl_relaxed(xp_region + offset)); 2135 } 2136 2137 static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node) 2138 { 2139 int level; 2140 u64 reg = readq_relaxed(cmn->base + offset + CMN_NODE_INFO); 2141 2142 node->type = FIELD_GET(CMN_NI_NODE_TYPE, reg); 2143 node->id = FIELD_GET(CMN_NI_NODE_ID, reg); 2144 node->logid = FIELD_GET(CMN_NI_LOGICAL_ID, reg); 2145 2146 node->pmu_base = cmn->base + offset + CMN_PMU_OFFSET; 2147 2148 if (node->type == CMN_TYPE_CFG) 2149 level = 0; 2150 else if (node->type == CMN_TYPE_XP) 2151 level = 1; 2152 else 2153 level = 2; 2154 2155 dev_dbg(cmn->dev, "node%*c%#06hx%*ctype:%-#6x id:%-4hd off:%#x\n", 2156 (level * 2) + 1, ' ', node->id, 5 - (level * 2), ' ', 2157 node->type, node->logid, offset); 2158 } 2159 2160 static enum cmn_node_type arm_cmn_subtype(enum cmn_node_type type) 2161 { 2162 switch (type) { 2163 case CMN_TYPE_HNP: 2164 return CMN_TYPE_HNI; 2165 case CMN_TYPE_CCLA_RNI: 2166 return CMN_TYPE_RNI; 2167 default: 2168 return CMN_TYPE_INVALID; 2169 } 2170 } 2171 2172 static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) 2173 { 2174 void __iomem *cfg_region; 2175 struct arm_cmn_node cfg, *dn; 2176 struct arm_cmn_dtm *dtm; 2177 enum cmn_part part; 2178 u16 child_count, child_poff; 2179 u32 xp_offset[CMN_MAX_XPS]; 2180 u64 reg; 2181 int i, j; 2182 size_t sz; 2183 2184 arm_cmn_init_node_info(cmn, rgn_offset, &cfg); 2185 if (cfg.type != CMN_TYPE_CFG) 2186 return -ENODEV; 2187 2188 cfg_region = cmn->base + rgn_offset; 2189 2190 reg = readq_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_01); 2191 part = FIELD_GET(CMN_CFGM_PID0_PART_0, reg); 2192 part |= FIELD_GET(CMN_CFGM_PID1_PART_1, reg) << 8; 2193 if (cmn->part && cmn->part != part) 2194 dev_warn(cmn->dev, 2195 "Firmware binding mismatch: expected part number 0x%x, found 0x%x\n", 2196 cmn->part, part); 2197 cmn->part = part; 2198 if (!arm_cmn_model(cmn)) 2199 dev_warn(cmn->dev, "Unknown part number: 0x%x\n", part); 2200 2201 reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_23); 2202 cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg); 2203 2204 reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL); 2205 cmn->multi_dtm = reg & CMN_INFO_MULTIPLE_DTM_EN; 2206 cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg); 2207 cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg); 2208 2209 reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL_1); 2210 cmn->snp_vc_num = FIELD_GET(CMN_INFO_SNP_VC_NUM, reg); 2211 cmn->req_vc_num = FIELD_GET(CMN_INFO_REQ_VC_NUM, reg); 2212 2213 reg = readq_relaxed(cfg_region + CMN_CHILD_INFO); 2214 child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg); 2215 child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg); 2216 2217 cmn->num_xps = child_count; 2218 cmn->num_dns = cmn->num_xps; 2219 2220 /* Pass 1: visit the XPs, enumerate their children */ 2221 for (i = 0; i < cmn->num_xps; i++) { 2222 reg = readq_relaxed(cfg_region + child_poff + i * 8); 2223 xp_offset[i] = reg & CMN_CHILD_NODE_ADDR; 2224 2225 reg = readq_relaxed(cmn->base + xp_offset[i] + CMN_CHILD_INFO); 2226 cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg); 2227 } 2228 2229 /* 2230 * Some nodes effectively have two separate types, which we'll handle 2231 * by creating one of each internally. For a (very) safe initial upper 2232 * bound, account for double the number of non-XP nodes. 2233 */ 2234 dn = devm_kcalloc(cmn->dev, cmn->num_dns * 2 - cmn->num_xps, 2235 sizeof(*dn), GFP_KERNEL); 2236 if (!dn) 2237 return -ENOMEM; 2238 2239 /* Initial safe upper bound on DTMs for any possible mesh layout */ 2240 i = cmn->num_xps; 2241 if (cmn->multi_dtm) 2242 i += cmn->num_xps + 1; 2243 dtm = devm_kcalloc(cmn->dev, i, sizeof(*dtm), GFP_KERNEL); 2244 if (!dtm) 2245 return -ENOMEM; 2246 2247 /* Pass 2: now we can actually populate the nodes */ 2248 cmn->dns = dn; 2249 cmn->dtms = dtm; 2250 for (i = 0; i < cmn->num_xps; i++) { 2251 void __iomem *xp_region = cmn->base + xp_offset[i]; 2252 struct arm_cmn_node *xp = dn++; 2253 unsigned int xp_ports = 0; 2254 2255 arm_cmn_init_node_info(cmn, xp_offset[i], xp); 2256 /* 2257 * Thanks to the order in which XP logical IDs seem to be 2258 * assigned, we can handily infer the mesh X dimension by 2259 * looking out for the XP at (0,1) without needing to know 2260 * the exact node ID format, which we can later derive. 2261 */ 2262 if (xp->id == (1 << 3)) 2263 cmn->mesh_x = xp->logid; 2264 2265 if (cmn->part == PART_CMN600) 2266 xp->dtc = -1; 2267 else 2268 xp->dtc = arm_cmn_dtc_domain(cmn, xp_region); 2269 2270 xp->dtm = dtm - cmn->dtms; 2271 arm_cmn_init_dtm(dtm++, xp, 0); 2272 /* 2273 * Keeping track of connected ports will let us filter out 2274 * unnecessary XP events easily. We can also reliably infer the 2275 * "extra device ports" configuration for the node ID format 2276 * from this, since in that case we will see at least one XP 2277 * with port 2 connected, for the HN-D. 2278 */ 2279 for (int p = 0; p < CMN_MAX_PORTS; p++) 2280 if (arm_cmn_device_connect_info(cmn, xp, p)) 2281 xp_ports |= BIT(p); 2282 2283 if (cmn->multi_dtm && (xp_ports & 0xc)) 2284 arm_cmn_init_dtm(dtm++, xp, 1); 2285 if (cmn->multi_dtm && (xp_ports & 0x30)) 2286 arm_cmn_init_dtm(dtm++, xp, 2); 2287 2288 cmn->ports_used |= xp_ports; 2289 2290 reg = readq_relaxed(xp_region + CMN_CHILD_INFO); 2291 child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg); 2292 child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg); 2293 2294 for (j = 0; j < child_count; j++) { 2295 reg = readq_relaxed(xp_region + child_poff + j * 8); 2296 /* 2297 * Don't even try to touch anything external, since in general 2298 * we haven't a clue how to power up arbitrary CHI requesters. 2299 * As of CMN-600r1 these could only be RN-SAMs or CXLAs, 2300 * neither of which have any PMU events anyway. 2301 * (Actually, CXLAs do seem to have grown some events in r1p2, 2302 * but they don't go to regular XP DTMs, and they depend on 2303 * secure configuration which we can't easily deal with) 2304 */ 2305 if (reg & CMN_CHILD_NODE_EXTERNAL) { 2306 dev_dbg(cmn->dev, "ignoring external node %llx\n", reg); 2307 continue; 2308 } 2309 /* 2310 * AmpereOneX erratum AC04_MESH_1 makes some XPs report a bogus 2311 * child count larger than the number of valid child pointers. 2312 * A child offset of 0 can only occur on CMN-600; otherwise it 2313 * would imply the root node being its own grandchild, which 2314 * we can safely dismiss in general. 2315 */ 2316 if (reg == 0 && cmn->part != PART_CMN600) { 2317 dev_dbg(cmn->dev, "bogus child pointer?\n"); 2318 continue; 2319 } 2320 2321 arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn); 2322 2323 switch (dn->type) { 2324 case CMN_TYPE_DTC: 2325 cmn->num_dtcs++; 2326 dn++; 2327 break; 2328 /* These guys have PMU events */ 2329 case CMN_TYPE_DVM: 2330 case CMN_TYPE_HNI: 2331 case CMN_TYPE_HNF: 2332 case CMN_TYPE_SBSX: 2333 case CMN_TYPE_RNI: 2334 case CMN_TYPE_RND: 2335 case CMN_TYPE_MTSX: 2336 case CMN_TYPE_CXRA: 2337 case CMN_TYPE_CXHA: 2338 case CMN_TYPE_CCRA: 2339 case CMN_TYPE_CCHA: 2340 case CMN_TYPE_CCLA: 2341 case CMN_TYPE_HNS: 2342 dn++; 2343 break; 2344 /* Nothing to see here */ 2345 case CMN_TYPE_MPAM_S: 2346 case CMN_TYPE_MPAM_NS: 2347 case CMN_TYPE_RNSAM: 2348 case CMN_TYPE_CXLA: 2349 case CMN_TYPE_HNS_MPAM_S: 2350 case CMN_TYPE_HNS_MPAM_NS: 2351 break; 2352 /* 2353 * Split "optimised" combination nodes into separate 2354 * types for the different event sets. Offsetting the 2355 * base address lets us handle the second pmu_event_sel 2356 * register via the normal mechanism later. 2357 */ 2358 case CMN_TYPE_HNP: 2359 case CMN_TYPE_CCLA_RNI: 2360 dn[1] = dn[0]; 2361 dn[0].pmu_base += CMN_HNP_PMU_EVENT_SEL; 2362 dn[1].type = arm_cmn_subtype(dn->type); 2363 dn += 2; 2364 break; 2365 /* Something has gone horribly wrong */ 2366 default: 2367 dev_err(cmn->dev, "invalid device node type: 0x%x\n", dn->type); 2368 return -ENODEV; 2369 } 2370 } 2371 } 2372 2373 /* Correct for any nodes we added or skipped */ 2374 cmn->num_dns = dn - cmn->dns; 2375 2376 /* Cheeky +1 to help terminate pointer-based iteration later */ 2377 sz = (void *)(dn + 1) - (void *)cmn->dns; 2378 dn = devm_krealloc(cmn->dev, cmn->dns, sz, GFP_KERNEL); 2379 if (dn) 2380 cmn->dns = dn; 2381 2382 sz = (void *)dtm - (void *)cmn->dtms; 2383 dtm = devm_krealloc(cmn->dev, cmn->dtms, sz, GFP_KERNEL); 2384 if (dtm) 2385 cmn->dtms = dtm; 2386 2387 /* 2388 * If mesh_x wasn't set during discovery then we never saw 2389 * an XP at (0,1), thus we must have an Nx1 configuration. 2390 */ 2391 if (!cmn->mesh_x) 2392 cmn->mesh_x = cmn->num_xps; 2393 cmn->mesh_y = cmn->num_xps / cmn->mesh_x; 2394 2395 /* 1x1 config plays havoc with XP event encodings */ 2396 if (cmn->num_xps == 1) 2397 dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n"); 2398 2399 dev_dbg(cmn->dev, "periph_id part 0x%03x revision %d\n", cmn->part, cmn->rev); 2400 reg = cmn->ports_used; 2401 dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n", 2402 cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), ®, 2403 cmn->multi_dtm ? ", multi-DTM" : ""); 2404 2405 return 0; 2406 } 2407 2408 static int arm_cmn600_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn) 2409 { 2410 struct resource *cfg, *root; 2411 2412 cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2413 if (!cfg) 2414 return -EINVAL; 2415 2416 root = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2417 if (!root) 2418 return -EINVAL; 2419 2420 if (!resource_contains(cfg, root)) 2421 swap(cfg, root); 2422 /* 2423 * Note that devm_ioremap_resource() is dumb and won't let the platform 2424 * device claim cfg when the ACPI companion device has already claimed 2425 * root within it. But since they *are* already both claimed in the 2426 * appropriate name, we don't really need to do it again here anyway. 2427 */ 2428 cmn->base = devm_ioremap(cmn->dev, cfg->start, resource_size(cfg)); 2429 if (!cmn->base) 2430 return -ENOMEM; 2431 2432 return root->start - cfg->start; 2433 } 2434 2435 static int arm_cmn600_of_probe(struct device_node *np) 2436 { 2437 u32 rootnode; 2438 2439 return of_property_read_u32(np, "arm,root-node", &rootnode) ?: rootnode; 2440 } 2441 2442 static int arm_cmn_probe(struct platform_device *pdev) 2443 { 2444 struct arm_cmn *cmn; 2445 const char *name; 2446 static atomic_t id; 2447 int err, rootnode, this_id; 2448 2449 cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL); 2450 if (!cmn) 2451 return -ENOMEM; 2452 2453 cmn->dev = &pdev->dev; 2454 cmn->part = (unsigned long)device_get_match_data(cmn->dev); 2455 platform_set_drvdata(pdev, cmn); 2456 2457 if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) { 2458 rootnode = arm_cmn600_acpi_probe(pdev, cmn); 2459 } else { 2460 rootnode = 0; 2461 cmn->base = devm_platform_ioremap_resource(pdev, 0); 2462 if (IS_ERR(cmn->base)) 2463 return PTR_ERR(cmn->base); 2464 if (cmn->part == PART_CMN600) 2465 rootnode = arm_cmn600_of_probe(pdev->dev.of_node); 2466 } 2467 if (rootnode < 0) 2468 return rootnode; 2469 2470 err = arm_cmn_discover(cmn, rootnode); 2471 if (err) 2472 return err; 2473 2474 err = arm_cmn_init_dtcs(cmn); 2475 if (err) 2476 return err; 2477 2478 err = arm_cmn_init_irqs(cmn); 2479 if (err) 2480 return err; 2481 2482 cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev)); 2483 cmn->pmu = (struct pmu) { 2484 .module = THIS_MODULE, 2485 .parent = cmn->dev, 2486 .attr_groups = arm_cmn_attr_groups, 2487 .capabilities = PERF_PMU_CAP_NO_EXCLUDE, 2488 .task_ctx_nr = perf_invalid_context, 2489 .pmu_enable = arm_cmn_pmu_enable, 2490 .pmu_disable = arm_cmn_pmu_disable, 2491 .event_init = arm_cmn_event_init, 2492 .add = arm_cmn_event_add, 2493 .del = arm_cmn_event_del, 2494 .start = arm_cmn_event_start, 2495 .stop = arm_cmn_event_stop, 2496 .read = arm_cmn_event_read, 2497 .start_txn = arm_cmn_start_txn, 2498 .commit_txn = arm_cmn_commit_txn, 2499 .cancel_txn = arm_cmn_end_txn, 2500 }; 2501 2502 this_id = atomic_fetch_inc(&id); 2503 name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", this_id); 2504 if (!name) 2505 return -ENOMEM; 2506 2507 err = cpuhp_state_add_instance(arm_cmn_hp_state, &cmn->cpuhp_node); 2508 if (err) 2509 return err; 2510 2511 err = perf_pmu_register(&cmn->pmu, name, -1); 2512 if (err) 2513 cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node); 2514 else 2515 arm_cmn_debugfs_init(cmn, this_id); 2516 2517 return err; 2518 } 2519 2520 static void arm_cmn_remove(struct platform_device *pdev) 2521 { 2522 struct arm_cmn *cmn = platform_get_drvdata(pdev); 2523 2524 writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL); 2525 2526 perf_pmu_unregister(&cmn->pmu); 2527 cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node); 2528 debugfs_remove(cmn->debug); 2529 } 2530 2531 #ifdef CONFIG_OF 2532 static const struct of_device_id arm_cmn_of_match[] = { 2533 { .compatible = "arm,cmn-600", .data = (void *)PART_CMN600 }, 2534 { .compatible = "arm,cmn-650" }, 2535 { .compatible = "arm,cmn-700" }, 2536 { .compatible = "arm,ci-700" }, 2537 {} 2538 }; 2539 MODULE_DEVICE_TABLE(of, arm_cmn_of_match); 2540 #endif 2541 2542 #ifdef CONFIG_ACPI 2543 static const struct acpi_device_id arm_cmn_acpi_match[] = { 2544 { "ARMHC600", PART_CMN600 }, 2545 { "ARMHC650" }, 2546 { "ARMHC700" }, 2547 {} 2548 }; 2549 MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match); 2550 #endif 2551 2552 static struct platform_driver arm_cmn_driver = { 2553 .driver = { 2554 .name = "arm-cmn", 2555 .of_match_table = of_match_ptr(arm_cmn_of_match), 2556 .acpi_match_table = ACPI_PTR(arm_cmn_acpi_match), 2557 }, 2558 .probe = arm_cmn_probe, 2559 .remove_new = arm_cmn_remove, 2560 }; 2561 2562 static int __init arm_cmn_init(void) 2563 { 2564 int ret; 2565 2566 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 2567 "perf/arm/cmn:online", 2568 arm_cmn_pmu_online_cpu, 2569 arm_cmn_pmu_offline_cpu); 2570 if (ret < 0) 2571 return ret; 2572 2573 arm_cmn_hp_state = ret; 2574 arm_cmn_debugfs = debugfs_create_dir("arm-cmn", NULL); 2575 2576 ret = platform_driver_register(&arm_cmn_driver); 2577 if (ret) { 2578 cpuhp_remove_multi_state(arm_cmn_hp_state); 2579 debugfs_remove(arm_cmn_debugfs); 2580 } 2581 return ret; 2582 } 2583 2584 static void __exit arm_cmn_exit(void) 2585 { 2586 platform_driver_unregister(&arm_cmn_driver); 2587 cpuhp_remove_multi_state(arm_cmn_hp_state); 2588 debugfs_remove(arm_cmn_debugfs); 2589 } 2590 2591 module_init(arm_cmn_init); 2592 module_exit(arm_cmn_exit); 2593 2594 MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>"); 2595 MODULE_DESCRIPTION("Arm CMN-600 PMU driver"); 2596 MODULE_LICENSE("GPL v2"); 2597