1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * APM X-Gene SoC PMU (Performance Monitor Unit) 4 * 5 * Copyright (c) 2016, Applied Micro Circuits Corporation 6 * Author: Hoan Tran <hotran@apm.com> 7 * Tai Nguyen <ttnguyen@apm.com> 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/clk.h> 12 #include <linux/cpuhotplug.h> 13 #include <linux/cpumask.h> 14 #include <linux/interrupt.h> 15 #include <linux/io.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/module.h> 18 #include <linux/of_address.h> 19 #include <linux/perf_event.h> 20 #include <linux/platform_device.h> 21 #include <linux/property.h> 22 #include <linux/regmap.h> 23 #include <linux/slab.h> 24 25 #define CSW_CSWCR 0x0000 26 #define CSW_CSWCR_DUALMCB_MASK BIT(0) 27 #define CSW_CSWCR_MCB0_ROUTING(x) (((x) & 0x0C) >> 2) 28 #define CSW_CSWCR_MCB1_ROUTING(x) (((x) & 0x30) >> 4) 29 #define MCBADDRMR 0x0000 30 #define MCBADDRMR_DUALMCU_MODE_MASK BIT(2) 31 32 #define PCPPMU_INTSTATUS_REG 0x000 33 #define PCPPMU_INTMASK_REG 0x004 34 #define PCPPMU_INTMASK 0x0000000F 35 #define PCPPMU_INTENMASK 0xFFFFFFFF 36 #define PCPPMU_INTCLRMASK 0xFFFFFFF0 37 #define PCPPMU_INT_MCU BIT(0) 38 #define PCPPMU_INT_MCB BIT(1) 39 #define PCPPMU_INT_L3C BIT(2) 40 #define PCPPMU_INT_IOB BIT(3) 41 42 #define PCPPMU_V3_INTMASK 0x00FF33FF 43 #define PCPPMU_V3_INTENMASK 0xFFFFFFFF 44 #define PCPPMU_V3_INTCLRMASK 0xFF00CC00 45 #define PCPPMU_V3_INT_MCU 0x000000FF 46 #define PCPPMU_V3_INT_MCB 0x00000300 47 #define PCPPMU_V3_INT_L3C 0x00FF0000 48 #define PCPPMU_V3_INT_IOB 0x00003000 49 50 #define PMU_MAX_COUNTERS 4 51 #define PMU_CNT_MAX_PERIOD 0xFFFFFFFFULL 52 #define PMU_V3_CNT_MAX_PERIOD 0xFFFFFFFFFFFFFFFFULL 53 #define PMU_OVERFLOW_MASK 0xF 54 #define PMU_PMCR_E BIT(0) 55 #define PMU_PMCR_P BIT(1) 56 57 #define PMU_PMEVCNTR0 0x000 58 #define PMU_PMEVCNTR1 0x004 59 #define PMU_PMEVCNTR2 0x008 60 #define PMU_PMEVCNTR3 0x00C 61 #define PMU_PMEVTYPER0 0x400 62 #define PMU_PMEVTYPER1 0x404 63 #define PMU_PMEVTYPER2 0x408 64 #define PMU_PMEVTYPER3 0x40C 65 #define PMU_PMAMR0 0xA00 66 #define PMU_PMAMR1 0xA04 67 #define PMU_PMCNTENSET 0xC00 68 #define PMU_PMCNTENCLR 0xC20 69 #define PMU_PMINTENSET 0xC40 70 #define PMU_PMINTENCLR 0xC60 71 #define PMU_PMOVSR 0xC80 72 #define PMU_PMCR 0xE04 73 74 /* PMU registers for V3 */ 75 #define PMU_PMOVSCLR 0xC80 76 #define PMU_PMOVSSET 0xCC0 77 78 #define to_pmu_dev(p) container_of(p, struct xgene_pmu_dev, pmu) 79 #define GET_CNTR(ev) (ev->hw.idx) 80 #define GET_EVENTID(ev) (ev->hw.config & 0xFFULL) 81 #define GET_AGENTID(ev) (ev->hw.config_base & 0xFFFFFFFFUL) 82 #define GET_AGENT1ID(ev) ((ev->hw.config_base >> 32) & 0xFFFFFFFFUL) 83 84 struct hw_pmu_info { 85 u32 type; 86 u32 enable_mask; 87 void __iomem *csr; 88 }; 89 90 struct xgene_pmu_dev { 91 struct hw_pmu_info *inf; 92 struct xgene_pmu *parent; 93 struct pmu pmu; 94 u8 max_counters; 95 DECLARE_BITMAP(cntr_assign_mask, PMU_MAX_COUNTERS); 96 u64 max_period; 97 const struct attribute_group **attr_groups; 98 struct perf_event *pmu_counter_event[PMU_MAX_COUNTERS]; 99 }; 100 101 struct xgene_pmu_ops { 102 void (*mask_int)(struct xgene_pmu *pmu); 103 void (*unmask_int)(struct xgene_pmu *pmu); 104 u64 (*read_counter)(struct xgene_pmu_dev *pmu, int idx); 105 void (*write_counter)(struct xgene_pmu_dev *pmu, int idx, u64 val); 106 void (*write_evttype)(struct xgene_pmu_dev *pmu_dev, int idx, u32 val); 107 void (*write_agentmsk)(struct xgene_pmu_dev *pmu_dev, u32 val); 108 void (*write_agent1msk)(struct xgene_pmu_dev *pmu_dev, u32 val); 109 void (*enable_counter)(struct xgene_pmu_dev *pmu_dev, int idx); 110 void (*disable_counter)(struct xgene_pmu_dev *pmu_dev, int idx); 111 void (*enable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx); 112 void (*disable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx); 113 void (*reset_counters)(struct xgene_pmu_dev *pmu_dev); 114 void (*start_counters)(struct xgene_pmu_dev *pmu_dev); 115 void (*stop_counters)(struct xgene_pmu_dev *pmu_dev); 116 }; 117 118 struct xgene_pmu { 119 struct device *dev; 120 struct hlist_node node; 121 int version; 122 void __iomem *pcppmu_csr; 123 u32 mcb_active_mask; 124 u32 mc_active_mask; 125 u32 l3c_active_mask; 126 cpumask_t cpu; 127 int irq; 128 raw_spinlock_t lock; 129 const struct xgene_pmu_ops *ops; 130 struct list_head l3cpmus; 131 struct list_head iobpmus; 132 struct list_head mcbpmus; 133 struct list_head mcpmus; 134 }; 135 136 struct xgene_pmu_dev_ctx { 137 char *name; 138 struct list_head next; 139 struct xgene_pmu_dev *pmu_dev; 140 struct hw_pmu_info inf; 141 }; 142 143 struct xgene_pmu_data { 144 int id; 145 u32 data; 146 }; 147 148 enum xgene_pmu_version { 149 PCP_PMU_V1 = 1, 150 PCP_PMU_V2, 151 PCP_PMU_V3, 152 }; 153 154 enum xgene_pmu_dev_type { 155 PMU_TYPE_L3C = 0, 156 PMU_TYPE_IOB, 157 PMU_TYPE_IOB_SLOW, 158 PMU_TYPE_MCB, 159 PMU_TYPE_MC, 160 }; 161 162 /* 163 * sysfs format attributes 164 */ 165 #define XGENE_PMU_FORMAT_ATTR(_name, _config) \ 166 (&((struct dev_ext_attribute[]) { \ 167 { .attr = __ATTR(_name, S_IRUGO, device_show_string, NULL), \ 168 .var = (void *) _config, } \ 169 })[0].attr.attr) 170 171 static struct attribute *l3c_pmu_format_attrs[] = { 172 XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-7"), 173 XGENE_PMU_FORMAT_ATTR(l3c_agentid, "config1:0-9"), 174 NULL, 175 }; 176 177 static struct attribute *iob_pmu_format_attrs[] = { 178 XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-7"), 179 XGENE_PMU_FORMAT_ATTR(iob_agentid, "config1:0-63"), 180 NULL, 181 }; 182 183 static struct attribute *mcb_pmu_format_attrs[] = { 184 XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-5"), 185 XGENE_PMU_FORMAT_ATTR(mcb_agentid, "config1:0-9"), 186 NULL, 187 }; 188 189 static struct attribute *mc_pmu_format_attrs[] = { 190 XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-28"), 191 NULL, 192 }; 193 194 static const struct attribute_group l3c_pmu_format_attr_group = { 195 .name = "format", 196 .attrs = l3c_pmu_format_attrs, 197 }; 198 199 static const struct attribute_group iob_pmu_format_attr_group = { 200 .name = "format", 201 .attrs = iob_pmu_format_attrs, 202 }; 203 204 static const struct attribute_group mcb_pmu_format_attr_group = { 205 .name = "format", 206 .attrs = mcb_pmu_format_attrs, 207 }; 208 209 static const struct attribute_group mc_pmu_format_attr_group = { 210 .name = "format", 211 .attrs = mc_pmu_format_attrs, 212 }; 213 214 static struct attribute *l3c_pmu_v3_format_attrs[] = { 215 XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-39"), 216 NULL, 217 }; 218 219 static struct attribute *iob_pmu_v3_format_attrs[] = { 220 XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-47"), 221 NULL, 222 }; 223 224 static struct attribute *iob_slow_pmu_v3_format_attrs[] = { 225 XGENE_PMU_FORMAT_ATTR(iob_slow_eventid, "config:0-16"), 226 NULL, 227 }; 228 229 static struct attribute *mcb_pmu_v3_format_attrs[] = { 230 XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-35"), 231 NULL, 232 }; 233 234 static struct attribute *mc_pmu_v3_format_attrs[] = { 235 XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-44"), 236 NULL, 237 }; 238 239 static const struct attribute_group l3c_pmu_v3_format_attr_group = { 240 .name = "format", 241 .attrs = l3c_pmu_v3_format_attrs, 242 }; 243 244 static const struct attribute_group iob_pmu_v3_format_attr_group = { 245 .name = "format", 246 .attrs = iob_pmu_v3_format_attrs, 247 }; 248 249 static const struct attribute_group iob_slow_pmu_v3_format_attr_group = { 250 .name = "format", 251 .attrs = iob_slow_pmu_v3_format_attrs, 252 }; 253 254 static const struct attribute_group mcb_pmu_v3_format_attr_group = { 255 .name = "format", 256 .attrs = mcb_pmu_v3_format_attrs, 257 }; 258 259 static const struct attribute_group mc_pmu_v3_format_attr_group = { 260 .name = "format", 261 .attrs = mc_pmu_v3_format_attrs, 262 }; 263 264 /* 265 * sysfs event attributes 266 */ 267 static ssize_t xgene_pmu_event_show(struct device *dev, 268 struct device_attribute *attr, char *buf) 269 { 270 struct perf_pmu_events_attr *pmu_attr = 271 container_of(attr, struct perf_pmu_events_attr, attr); 272 273 return sysfs_emit(buf, "config=0x%llx\n", pmu_attr->id); 274 } 275 276 #define XGENE_PMU_EVENT_ATTR(_name, _config) \ 277 PMU_EVENT_ATTR_ID(_name, xgene_pmu_event_show, _config) 278 279 static struct attribute *l3c_pmu_events_attrs[] = { 280 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), 281 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01), 282 XGENE_PMU_EVENT_ATTR(read-hit, 0x02), 283 XGENE_PMU_EVENT_ATTR(read-miss, 0x03), 284 XGENE_PMU_EVENT_ATTR(write-need-replacement, 0x06), 285 XGENE_PMU_EVENT_ATTR(write-not-need-replacement, 0x07), 286 XGENE_PMU_EVENT_ATTR(tq-full, 0x08), 287 XGENE_PMU_EVENT_ATTR(ackq-full, 0x09), 288 XGENE_PMU_EVENT_ATTR(wdb-full, 0x0a), 289 XGENE_PMU_EVENT_ATTR(bank-fifo-full, 0x0b), 290 XGENE_PMU_EVENT_ATTR(odb-full, 0x0c), 291 XGENE_PMU_EVENT_ATTR(wbq-full, 0x0d), 292 XGENE_PMU_EVENT_ATTR(bank-conflict-fifo-issue, 0x0e), 293 XGENE_PMU_EVENT_ATTR(bank-fifo-issue, 0x0f), 294 NULL, 295 }; 296 297 static struct attribute *iob_pmu_events_attrs[] = { 298 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), 299 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01), 300 XGENE_PMU_EVENT_ATTR(axi0-read, 0x02), 301 XGENE_PMU_EVENT_ATTR(axi0-read-partial, 0x03), 302 XGENE_PMU_EVENT_ATTR(axi1-read, 0x04), 303 XGENE_PMU_EVENT_ATTR(axi1-read-partial, 0x05), 304 XGENE_PMU_EVENT_ATTR(csw-read-block, 0x06), 305 XGENE_PMU_EVENT_ATTR(csw-read-partial, 0x07), 306 XGENE_PMU_EVENT_ATTR(axi0-write, 0x10), 307 XGENE_PMU_EVENT_ATTR(axi0-write-partial, 0x11), 308 XGENE_PMU_EVENT_ATTR(axi1-write, 0x13), 309 XGENE_PMU_EVENT_ATTR(axi1-write-partial, 0x14), 310 XGENE_PMU_EVENT_ATTR(csw-inbound-dirty, 0x16), 311 NULL, 312 }; 313 314 static struct attribute *mcb_pmu_events_attrs[] = { 315 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), 316 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01), 317 XGENE_PMU_EVENT_ATTR(csw-read, 0x02), 318 XGENE_PMU_EVENT_ATTR(csw-write-request, 0x03), 319 XGENE_PMU_EVENT_ATTR(mcb-csw-stall, 0x04), 320 XGENE_PMU_EVENT_ATTR(cancel-read-gack, 0x05), 321 NULL, 322 }; 323 324 static struct attribute *mc_pmu_events_attrs[] = { 325 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), 326 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01), 327 XGENE_PMU_EVENT_ATTR(act-cmd-sent, 0x02), 328 XGENE_PMU_EVENT_ATTR(pre-cmd-sent, 0x03), 329 XGENE_PMU_EVENT_ATTR(rd-cmd-sent, 0x04), 330 XGENE_PMU_EVENT_ATTR(rda-cmd-sent, 0x05), 331 XGENE_PMU_EVENT_ATTR(wr-cmd-sent, 0x06), 332 XGENE_PMU_EVENT_ATTR(wra-cmd-sent, 0x07), 333 XGENE_PMU_EVENT_ATTR(pde-cmd-sent, 0x08), 334 XGENE_PMU_EVENT_ATTR(sre-cmd-sent, 0x09), 335 XGENE_PMU_EVENT_ATTR(prea-cmd-sent, 0x0a), 336 XGENE_PMU_EVENT_ATTR(ref-cmd-sent, 0x0b), 337 XGENE_PMU_EVENT_ATTR(rd-rda-cmd-sent, 0x0c), 338 XGENE_PMU_EVENT_ATTR(wr-wra-cmd-sent, 0x0d), 339 XGENE_PMU_EVENT_ATTR(in-rd-collision, 0x0e), 340 XGENE_PMU_EVENT_ATTR(in-wr-collision, 0x0f), 341 XGENE_PMU_EVENT_ATTR(collision-queue-not-empty, 0x10), 342 XGENE_PMU_EVENT_ATTR(collision-queue-full, 0x11), 343 XGENE_PMU_EVENT_ATTR(mcu-request, 0x12), 344 XGENE_PMU_EVENT_ATTR(mcu-rd-request, 0x13), 345 XGENE_PMU_EVENT_ATTR(mcu-hp-rd-request, 0x14), 346 XGENE_PMU_EVENT_ATTR(mcu-wr-request, 0x15), 347 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-all, 0x16), 348 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-cancel, 0x17), 349 XGENE_PMU_EVENT_ATTR(mcu-rd-response, 0x18), 350 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-all, 0x19), 351 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-cancel, 0x1a), 352 XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-all, 0x1b), 353 XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-cancel, 0x1c), 354 NULL, 355 }; 356 357 static const struct attribute_group l3c_pmu_events_attr_group = { 358 .name = "events", 359 .attrs = l3c_pmu_events_attrs, 360 }; 361 362 static const struct attribute_group iob_pmu_events_attr_group = { 363 .name = "events", 364 .attrs = iob_pmu_events_attrs, 365 }; 366 367 static const struct attribute_group mcb_pmu_events_attr_group = { 368 .name = "events", 369 .attrs = mcb_pmu_events_attrs, 370 }; 371 372 static const struct attribute_group mc_pmu_events_attr_group = { 373 .name = "events", 374 .attrs = mc_pmu_events_attrs, 375 }; 376 377 static struct attribute *l3c_pmu_v3_events_attrs[] = { 378 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), 379 XGENE_PMU_EVENT_ATTR(read-hit, 0x01), 380 XGENE_PMU_EVENT_ATTR(read-miss, 0x02), 381 XGENE_PMU_EVENT_ATTR(index-flush-eviction, 0x03), 382 XGENE_PMU_EVENT_ATTR(write-caused-replacement, 0x04), 383 XGENE_PMU_EVENT_ATTR(write-not-caused-replacement, 0x05), 384 XGENE_PMU_EVENT_ATTR(clean-eviction, 0x06), 385 XGENE_PMU_EVENT_ATTR(dirty-eviction, 0x07), 386 XGENE_PMU_EVENT_ATTR(read, 0x08), 387 XGENE_PMU_EVENT_ATTR(write, 0x09), 388 XGENE_PMU_EVENT_ATTR(request, 0x0a), 389 XGENE_PMU_EVENT_ATTR(tq-bank-conflict-issue-stall, 0x0b), 390 XGENE_PMU_EVENT_ATTR(tq-full, 0x0c), 391 XGENE_PMU_EVENT_ATTR(ackq-full, 0x0d), 392 XGENE_PMU_EVENT_ATTR(wdb-full, 0x0e), 393 XGENE_PMU_EVENT_ATTR(odb-full, 0x10), 394 XGENE_PMU_EVENT_ATTR(wbq-full, 0x11), 395 XGENE_PMU_EVENT_ATTR(input-req-async-fifo-stall, 0x12), 396 XGENE_PMU_EVENT_ATTR(output-req-async-fifo-stall, 0x13), 397 XGENE_PMU_EVENT_ATTR(output-data-async-fifo-stall, 0x14), 398 XGENE_PMU_EVENT_ATTR(total-insertion, 0x15), 399 XGENE_PMU_EVENT_ATTR(sip-insertions-r-set, 0x16), 400 XGENE_PMU_EVENT_ATTR(sip-insertions-r-clear, 0x17), 401 XGENE_PMU_EVENT_ATTR(dip-insertions-r-set, 0x18), 402 XGENE_PMU_EVENT_ATTR(dip-insertions-r-clear, 0x19), 403 XGENE_PMU_EVENT_ATTR(dip-insertions-force-r-set, 0x1a), 404 XGENE_PMU_EVENT_ATTR(egression, 0x1b), 405 XGENE_PMU_EVENT_ATTR(replacement, 0x1c), 406 XGENE_PMU_EVENT_ATTR(old-replacement, 0x1d), 407 XGENE_PMU_EVENT_ATTR(young-replacement, 0x1e), 408 XGENE_PMU_EVENT_ATTR(r-set-replacement, 0x1f), 409 XGENE_PMU_EVENT_ATTR(r-clear-replacement, 0x20), 410 XGENE_PMU_EVENT_ATTR(old-r-replacement, 0x21), 411 XGENE_PMU_EVENT_ATTR(old-nr-replacement, 0x22), 412 XGENE_PMU_EVENT_ATTR(young-r-replacement, 0x23), 413 XGENE_PMU_EVENT_ATTR(young-nr-replacement, 0x24), 414 XGENE_PMU_EVENT_ATTR(bloomfilter-clearing, 0x25), 415 XGENE_PMU_EVENT_ATTR(generation-flip, 0x26), 416 XGENE_PMU_EVENT_ATTR(vcc-droop-detected, 0x27), 417 NULL, 418 }; 419 420 static struct attribute *iob_fast_pmu_v3_events_attrs[] = { 421 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), 422 XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-all, 0x01), 423 XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-rd, 0x02), 424 XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-wr, 0x03), 425 XGENE_PMU_EVENT_ATTR(pa-all-cp-req, 0x04), 426 XGENE_PMU_EVENT_ATTR(pa-cp-blk-req, 0x05), 427 XGENE_PMU_EVENT_ATTR(pa-cp-ptl-req, 0x06), 428 XGENE_PMU_EVENT_ATTR(pa-cp-rd-req, 0x07), 429 XGENE_PMU_EVENT_ATTR(pa-cp-wr-req, 0x08), 430 XGENE_PMU_EVENT_ATTR(ba-all-req, 0x09), 431 XGENE_PMU_EVENT_ATTR(ba-rd-req, 0x0a), 432 XGENE_PMU_EVENT_ATTR(ba-wr-req, 0x0b), 433 XGENE_PMU_EVENT_ATTR(pa-rd-shared-req-issued, 0x10), 434 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-req-issued, 0x11), 435 XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-stashable, 0x12), 436 XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-nonstashable, 0x13), 437 XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-stashable, 0x14), 438 XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-nonstashable, 0x15), 439 XGENE_PMU_EVENT_ATTR(pa-ptl-wr-req, 0x16), 440 XGENE_PMU_EVENT_ATTR(pa-ptl-rd-req, 0x17), 441 XGENE_PMU_EVENT_ATTR(pa-wr-back-clean-data, 0x18), 442 XGENE_PMU_EVENT_ATTR(pa-wr-back-cancelled-on-SS, 0x1b), 443 XGENE_PMU_EVENT_ATTR(pa-barrier-occurrence, 0x1c), 444 XGENE_PMU_EVENT_ATTR(pa-barrier-cycles, 0x1d), 445 XGENE_PMU_EVENT_ATTR(pa-total-cp-snoops, 0x20), 446 XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop, 0x21), 447 XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop-hit, 0x22), 448 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop, 0x23), 449 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop-hit, 0x24), 450 XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop, 0x25), 451 XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop-hit, 0x26), 452 XGENE_PMU_EVENT_ATTR(pa-req-buffer-full, 0x28), 453 XGENE_PMU_EVENT_ATTR(cswlf-outbound-req-fifo-full, 0x29), 454 XGENE_PMU_EVENT_ATTR(cswlf-inbound-snoop-fifo-backpressure, 0x2a), 455 XGENE_PMU_EVENT_ATTR(cswlf-outbound-lack-fifo-full, 0x2b), 456 XGENE_PMU_EVENT_ATTR(cswlf-inbound-gack-fifo-backpressure, 0x2c), 457 XGENE_PMU_EVENT_ATTR(cswlf-outbound-data-fifo-full, 0x2d), 458 XGENE_PMU_EVENT_ATTR(cswlf-inbound-data-fifo-backpressure, 0x2e), 459 XGENE_PMU_EVENT_ATTR(cswlf-inbound-req-backpressure, 0x2f), 460 NULL, 461 }; 462 463 static struct attribute *iob_slow_pmu_v3_events_attrs[] = { 464 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), 465 XGENE_PMU_EVENT_ATTR(pa-axi0-rd-req, 0x01), 466 XGENE_PMU_EVENT_ATTR(pa-axi0-wr-req, 0x02), 467 XGENE_PMU_EVENT_ATTR(pa-axi1-rd-req, 0x03), 468 XGENE_PMU_EVENT_ATTR(pa-axi1-wr-req, 0x04), 469 XGENE_PMU_EVENT_ATTR(ba-all-axi-req, 0x07), 470 XGENE_PMU_EVENT_ATTR(ba-axi-rd-req, 0x08), 471 XGENE_PMU_EVENT_ATTR(ba-axi-wr-req, 0x09), 472 XGENE_PMU_EVENT_ATTR(ba-free-list-empty, 0x10), 473 NULL, 474 }; 475 476 static struct attribute *mcb_pmu_v3_events_attrs[] = { 477 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), 478 XGENE_PMU_EVENT_ATTR(req-receive, 0x01), 479 XGENE_PMU_EVENT_ATTR(rd-req-recv, 0x02), 480 XGENE_PMU_EVENT_ATTR(rd-req-recv-2, 0x03), 481 XGENE_PMU_EVENT_ATTR(wr-req-recv, 0x04), 482 XGENE_PMU_EVENT_ATTR(wr-req-recv-2, 0x05), 483 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu, 0x06), 484 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu-2, 0x07), 485 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu, 0x08), 486 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu-2, 0x09), 487 XGENE_PMU_EVENT_ATTR(glbl-ack-recv-for-rd-sent-to-spec-mcu, 0x0a), 488 XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-for-rd-sent-to-spec-mcu, 0x0b), 489 XGENE_PMU_EVENT_ATTR(glbl-ack-nogo-recv-for-rd-sent-to-spec-mcu, 0x0c), 490 XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req, 0x0d), 491 XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req-2, 0x0e), 492 XGENE_PMU_EVENT_ATTR(wr-req-sent-to-mcu, 0x0f), 493 XGENE_PMU_EVENT_ATTR(gack-recv, 0x10), 494 XGENE_PMU_EVENT_ATTR(rd-gack-recv, 0x11), 495 XGENE_PMU_EVENT_ATTR(wr-gack-recv, 0x12), 496 XGENE_PMU_EVENT_ATTR(cancel-rd-gack, 0x13), 497 XGENE_PMU_EVENT_ATTR(cancel-wr-gack, 0x14), 498 XGENE_PMU_EVENT_ATTR(mcb-csw-req-stall, 0x15), 499 XGENE_PMU_EVENT_ATTR(mcu-req-intf-blocked, 0x16), 500 XGENE_PMU_EVENT_ATTR(mcb-mcu-rd-intf-stall, 0x17), 501 XGENE_PMU_EVENT_ATTR(csw-rd-intf-blocked, 0x18), 502 XGENE_PMU_EVENT_ATTR(csw-local-ack-intf-blocked, 0x19), 503 XGENE_PMU_EVENT_ATTR(mcu-req-table-full, 0x1a), 504 XGENE_PMU_EVENT_ATTR(mcu-stat-table-full, 0x1b), 505 XGENE_PMU_EVENT_ATTR(mcu-wr-table-full, 0x1c), 506 XGENE_PMU_EVENT_ATTR(mcu-rdreceipt-resp, 0x1d), 507 XGENE_PMU_EVENT_ATTR(mcu-wrcomplete-resp, 0x1e), 508 XGENE_PMU_EVENT_ATTR(mcu-retryack-resp, 0x1f), 509 XGENE_PMU_EVENT_ATTR(mcu-pcrdgrant-resp, 0x20), 510 XGENE_PMU_EVENT_ATTR(mcu-req-from-lastload, 0x21), 511 XGENE_PMU_EVENT_ATTR(mcu-req-from-bypass, 0x22), 512 XGENE_PMU_EVENT_ATTR(volt-droop-detect, 0x23), 513 NULL, 514 }; 515 516 static struct attribute *mc_pmu_v3_events_attrs[] = { 517 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), 518 XGENE_PMU_EVENT_ATTR(act-sent, 0x01), 519 XGENE_PMU_EVENT_ATTR(pre-sent, 0x02), 520 XGENE_PMU_EVENT_ATTR(rd-sent, 0x03), 521 XGENE_PMU_EVENT_ATTR(rda-sent, 0x04), 522 XGENE_PMU_EVENT_ATTR(wr-sent, 0x05), 523 XGENE_PMU_EVENT_ATTR(wra-sent, 0x06), 524 XGENE_PMU_EVENT_ATTR(pd-entry-vld, 0x07), 525 XGENE_PMU_EVENT_ATTR(sref-entry-vld, 0x08), 526 XGENE_PMU_EVENT_ATTR(prea-sent, 0x09), 527 XGENE_PMU_EVENT_ATTR(ref-sent, 0x0a), 528 XGENE_PMU_EVENT_ATTR(rd-rda-sent, 0x0b), 529 XGENE_PMU_EVENT_ATTR(wr-wra-sent, 0x0c), 530 XGENE_PMU_EVENT_ATTR(raw-hazard, 0x0d), 531 XGENE_PMU_EVENT_ATTR(war-hazard, 0x0e), 532 XGENE_PMU_EVENT_ATTR(waw-hazard, 0x0f), 533 XGENE_PMU_EVENT_ATTR(rar-hazard, 0x10), 534 XGENE_PMU_EVENT_ATTR(raw-war-waw-hazard, 0x11), 535 XGENE_PMU_EVENT_ATTR(hprd-lprd-wr-req-vld, 0x12), 536 XGENE_PMU_EVENT_ATTR(lprd-req-vld, 0x13), 537 XGENE_PMU_EVENT_ATTR(hprd-req-vld, 0x14), 538 XGENE_PMU_EVENT_ATTR(hprd-lprd-req-vld, 0x15), 539 XGENE_PMU_EVENT_ATTR(wr-req-vld, 0x16), 540 XGENE_PMU_EVENT_ATTR(partial-wr-req-vld, 0x17), 541 XGENE_PMU_EVENT_ATTR(rd-retry, 0x18), 542 XGENE_PMU_EVENT_ATTR(wr-retry, 0x19), 543 XGENE_PMU_EVENT_ATTR(retry-gnt, 0x1a), 544 XGENE_PMU_EVENT_ATTR(rank-change, 0x1b), 545 XGENE_PMU_EVENT_ATTR(dir-change, 0x1c), 546 XGENE_PMU_EVENT_ATTR(rank-dir-change, 0x1d), 547 XGENE_PMU_EVENT_ATTR(rank-active, 0x1e), 548 XGENE_PMU_EVENT_ATTR(rank-idle, 0x1f), 549 XGENE_PMU_EVENT_ATTR(rank-pd, 0x20), 550 XGENE_PMU_EVENT_ATTR(rank-sref, 0x21), 551 XGENE_PMU_EVENT_ATTR(queue-fill-gt-thresh, 0x22), 552 XGENE_PMU_EVENT_ATTR(queue-rds-gt-thresh, 0x23), 553 XGENE_PMU_EVENT_ATTR(queue-wrs-gt-thresh, 0x24), 554 XGENE_PMU_EVENT_ATTR(phy-updt-complt, 0x25), 555 XGENE_PMU_EVENT_ATTR(tz-fail, 0x26), 556 XGENE_PMU_EVENT_ATTR(dram-errc, 0x27), 557 XGENE_PMU_EVENT_ATTR(dram-errd, 0x28), 558 XGENE_PMU_EVENT_ATTR(rd-enq, 0x29), 559 XGENE_PMU_EVENT_ATTR(wr-enq, 0x2a), 560 XGENE_PMU_EVENT_ATTR(tmac-limit-reached, 0x2b), 561 XGENE_PMU_EVENT_ATTR(tmaw-tracker-full, 0x2c), 562 NULL, 563 }; 564 565 static const struct attribute_group l3c_pmu_v3_events_attr_group = { 566 .name = "events", 567 .attrs = l3c_pmu_v3_events_attrs, 568 }; 569 570 static const struct attribute_group iob_fast_pmu_v3_events_attr_group = { 571 .name = "events", 572 .attrs = iob_fast_pmu_v3_events_attrs, 573 }; 574 575 static const struct attribute_group iob_slow_pmu_v3_events_attr_group = { 576 .name = "events", 577 .attrs = iob_slow_pmu_v3_events_attrs, 578 }; 579 580 static const struct attribute_group mcb_pmu_v3_events_attr_group = { 581 .name = "events", 582 .attrs = mcb_pmu_v3_events_attrs, 583 }; 584 585 static const struct attribute_group mc_pmu_v3_events_attr_group = { 586 .name = "events", 587 .attrs = mc_pmu_v3_events_attrs, 588 }; 589 590 /* 591 * sysfs cpumask attributes 592 */ 593 static ssize_t cpumask_show(struct device *dev, 594 struct device_attribute *attr, char *buf) 595 { 596 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev)); 597 598 return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu); 599 } 600 601 static DEVICE_ATTR_RO(cpumask); 602 603 static struct attribute *xgene_pmu_cpumask_attrs[] = { 604 &dev_attr_cpumask.attr, 605 NULL, 606 }; 607 608 static const struct attribute_group pmu_cpumask_attr_group = { 609 .attrs = xgene_pmu_cpumask_attrs, 610 }; 611 612 /* 613 * Per PMU device attribute groups of PMU v1 and v2 614 */ 615 static const struct attribute_group *l3c_pmu_attr_groups[] = { 616 &l3c_pmu_format_attr_group, 617 &pmu_cpumask_attr_group, 618 &l3c_pmu_events_attr_group, 619 NULL 620 }; 621 622 static const struct attribute_group *iob_pmu_attr_groups[] = { 623 &iob_pmu_format_attr_group, 624 &pmu_cpumask_attr_group, 625 &iob_pmu_events_attr_group, 626 NULL 627 }; 628 629 static const struct attribute_group *mcb_pmu_attr_groups[] = { 630 &mcb_pmu_format_attr_group, 631 &pmu_cpumask_attr_group, 632 &mcb_pmu_events_attr_group, 633 NULL 634 }; 635 636 static const struct attribute_group *mc_pmu_attr_groups[] = { 637 &mc_pmu_format_attr_group, 638 &pmu_cpumask_attr_group, 639 &mc_pmu_events_attr_group, 640 NULL 641 }; 642 643 /* 644 * Per PMU device attribute groups of PMU v3 645 */ 646 static const struct attribute_group *l3c_pmu_v3_attr_groups[] = { 647 &l3c_pmu_v3_format_attr_group, 648 &pmu_cpumask_attr_group, 649 &l3c_pmu_v3_events_attr_group, 650 NULL 651 }; 652 653 static const struct attribute_group *iob_fast_pmu_v3_attr_groups[] = { 654 &iob_pmu_v3_format_attr_group, 655 &pmu_cpumask_attr_group, 656 &iob_fast_pmu_v3_events_attr_group, 657 NULL 658 }; 659 660 static const struct attribute_group *iob_slow_pmu_v3_attr_groups[] = { 661 &iob_slow_pmu_v3_format_attr_group, 662 &pmu_cpumask_attr_group, 663 &iob_slow_pmu_v3_events_attr_group, 664 NULL 665 }; 666 667 static const struct attribute_group *mcb_pmu_v3_attr_groups[] = { 668 &mcb_pmu_v3_format_attr_group, 669 &pmu_cpumask_attr_group, 670 &mcb_pmu_v3_events_attr_group, 671 NULL 672 }; 673 674 static const struct attribute_group *mc_pmu_v3_attr_groups[] = { 675 &mc_pmu_v3_format_attr_group, 676 &pmu_cpumask_attr_group, 677 &mc_pmu_v3_events_attr_group, 678 NULL 679 }; 680 681 static int get_next_avail_cntr(struct xgene_pmu_dev *pmu_dev) 682 { 683 int cntr; 684 685 cntr = find_first_zero_bit(pmu_dev->cntr_assign_mask, 686 pmu_dev->max_counters); 687 if (cntr == pmu_dev->max_counters) 688 return -ENOSPC; 689 set_bit(cntr, pmu_dev->cntr_assign_mask); 690 691 return cntr; 692 } 693 694 static void clear_avail_cntr(struct xgene_pmu_dev *pmu_dev, int cntr) 695 { 696 clear_bit(cntr, pmu_dev->cntr_assign_mask); 697 } 698 699 static inline void xgene_pmu_mask_int(struct xgene_pmu *xgene_pmu) 700 { 701 writel(PCPPMU_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG); 702 } 703 704 static inline void xgene_pmu_v3_mask_int(struct xgene_pmu *xgene_pmu) 705 { 706 writel(PCPPMU_V3_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG); 707 } 708 709 static inline void xgene_pmu_unmask_int(struct xgene_pmu *xgene_pmu) 710 { 711 writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG); 712 } 713 714 static inline void xgene_pmu_v3_unmask_int(struct xgene_pmu *xgene_pmu) 715 { 716 writel(PCPPMU_V3_INTCLRMASK, 717 xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG); 718 } 719 720 static inline u64 xgene_pmu_read_counter32(struct xgene_pmu_dev *pmu_dev, 721 int idx) 722 { 723 return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx)); 724 } 725 726 static inline u64 xgene_pmu_read_counter64(struct xgene_pmu_dev *pmu_dev, 727 int idx) 728 { 729 u32 lo, hi; 730 731 /* 732 * v3 has 64-bit counter registers composed by 2 32-bit registers 733 * This can be a problem if the counter increases and carries 734 * out of bit [31] between 2 reads. The extra reads would help 735 * to prevent this issue. 736 */ 737 do { 738 hi = xgene_pmu_read_counter32(pmu_dev, 2 * idx + 1); 739 lo = xgene_pmu_read_counter32(pmu_dev, 2 * idx); 740 } while (hi != xgene_pmu_read_counter32(pmu_dev, 2 * idx + 1)); 741 742 return (((u64)hi << 32) | lo); 743 } 744 745 static inline void 746 xgene_pmu_write_counter32(struct xgene_pmu_dev *pmu_dev, int idx, u64 val) 747 { 748 writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx)); 749 } 750 751 static inline void 752 xgene_pmu_write_counter64(struct xgene_pmu_dev *pmu_dev, int idx, u64 val) 753 { 754 u32 cnt_lo, cnt_hi; 755 756 cnt_hi = upper_32_bits(val); 757 cnt_lo = lower_32_bits(val); 758 759 /* v3 has 64-bit counter registers composed by 2 32-bit registers */ 760 xgene_pmu_write_counter32(pmu_dev, 2 * idx, cnt_lo); 761 xgene_pmu_write_counter32(pmu_dev, 2 * idx + 1, cnt_hi); 762 } 763 764 static inline void 765 xgene_pmu_write_evttype(struct xgene_pmu_dev *pmu_dev, int idx, u32 val) 766 { 767 writel(val, pmu_dev->inf->csr + PMU_PMEVTYPER0 + (4 * idx)); 768 } 769 770 static inline void 771 xgene_pmu_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val) 772 { 773 writel(val, pmu_dev->inf->csr + PMU_PMAMR0); 774 } 775 776 static inline void 777 xgene_pmu_v3_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val) { } 778 779 static inline void 780 xgene_pmu_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val) 781 { 782 writel(val, pmu_dev->inf->csr + PMU_PMAMR1); 783 } 784 785 static inline void 786 xgene_pmu_v3_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val) { } 787 788 static inline void 789 xgene_pmu_enable_counter(struct xgene_pmu_dev *pmu_dev, int idx) 790 { 791 u32 val; 792 793 val = readl(pmu_dev->inf->csr + PMU_PMCNTENSET); 794 val |= 1 << idx; 795 writel(val, pmu_dev->inf->csr + PMU_PMCNTENSET); 796 } 797 798 static inline void 799 xgene_pmu_disable_counter(struct xgene_pmu_dev *pmu_dev, int idx) 800 { 801 u32 val; 802 803 val = readl(pmu_dev->inf->csr + PMU_PMCNTENCLR); 804 val |= 1 << idx; 805 writel(val, pmu_dev->inf->csr + PMU_PMCNTENCLR); 806 } 807 808 static inline void 809 xgene_pmu_enable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx) 810 { 811 u32 val; 812 813 val = readl(pmu_dev->inf->csr + PMU_PMINTENSET); 814 val |= 1 << idx; 815 writel(val, pmu_dev->inf->csr + PMU_PMINTENSET); 816 } 817 818 static inline void 819 xgene_pmu_disable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx) 820 { 821 u32 val; 822 823 val = readl(pmu_dev->inf->csr + PMU_PMINTENCLR); 824 val |= 1 << idx; 825 writel(val, pmu_dev->inf->csr + PMU_PMINTENCLR); 826 } 827 828 static inline void xgene_pmu_reset_counters(struct xgene_pmu_dev *pmu_dev) 829 { 830 u32 val; 831 832 val = readl(pmu_dev->inf->csr + PMU_PMCR); 833 val |= PMU_PMCR_P; 834 writel(val, pmu_dev->inf->csr + PMU_PMCR); 835 } 836 837 static inline void xgene_pmu_start_counters(struct xgene_pmu_dev *pmu_dev) 838 { 839 u32 val; 840 841 val = readl(pmu_dev->inf->csr + PMU_PMCR); 842 val |= PMU_PMCR_E; 843 writel(val, pmu_dev->inf->csr + PMU_PMCR); 844 } 845 846 static inline void xgene_pmu_stop_counters(struct xgene_pmu_dev *pmu_dev) 847 { 848 u32 val; 849 850 val = readl(pmu_dev->inf->csr + PMU_PMCR); 851 val &= ~PMU_PMCR_E; 852 writel(val, pmu_dev->inf->csr + PMU_PMCR); 853 } 854 855 static void xgene_perf_pmu_enable(struct pmu *pmu) 856 { 857 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu); 858 struct xgene_pmu *xgene_pmu = pmu_dev->parent; 859 bool enabled = !bitmap_empty(pmu_dev->cntr_assign_mask, 860 pmu_dev->max_counters); 861 862 if (!enabled) 863 return; 864 865 xgene_pmu->ops->start_counters(pmu_dev); 866 } 867 868 static void xgene_perf_pmu_disable(struct pmu *pmu) 869 { 870 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu); 871 struct xgene_pmu *xgene_pmu = pmu_dev->parent; 872 873 xgene_pmu->ops->stop_counters(pmu_dev); 874 } 875 876 static int xgene_perf_event_init(struct perf_event *event) 877 { 878 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); 879 struct hw_perf_event *hw = &event->hw; 880 struct perf_event *sibling; 881 882 /* Test the event attr type check for PMU enumeration */ 883 if (event->attr.type != event->pmu->type) 884 return -ENOENT; 885 886 /* 887 * SOC PMU counters are shared across all cores. 888 * Therefore, it does not support per-process mode. 889 * Also, it does not support event sampling mode. 890 */ 891 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) 892 return -EINVAL; 893 894 if (event->cpu < 0) 895 return -EINVAL; 896 /* 897 * Many perf core operations (eg. events rotation) operate on a 898 * single CPU context. This is obvious for CPU PMUs, where one 899 * expects the same sets of events being observed on all CPUs, 900 * but can lead to issues for off-core PMUs, where each 901 * event could be theoretically assigned to a different CPU. To 902 * mitigate this, we enforce CPU assignment to one, selected 903 * processor (the one described in the "cpumask" attribute). 904 */ 905 event->cpu = cpumask_first(&pmu_dev->parent->cpu); 906 907 hw->config = event->attr.config; 908 /* 909 * Each bit of the config1 field represents an agent from which the 910 * request of the event come. The event is counted only if it's caused 911 * by a request of an agent has the bit cleared. 912 * By default, the event is counted for all agents. 913 */ 914 hw->config_base = event->attr.config1; 915 916 /* 917 * We must NOT create groups containing mixed PMUs, although software 918 * events are acceptable 919 */ 920 if (event->group_leader->pmu != event->pmu && 921 !is_software_event(event->group_leader)) 922 return -EINVAL; 923 924 for_each_sibling_event(sibling, event->group_leader) { 925 if (sibling->pmu != event->pmu && 926 !is_software_event(sibling)) 927 return -EINVAL; 928 } 929 930 return 0; 931 } 932 933 static void xgene_perf_enable_event(struct perf_event *event) 934 { 935 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); 936 struct xgene_pmu *xgene_pmu = pmu_dev->parent; 937 938 xgene_pmu->ops->write_evttype(pmu_dev, GET_CNTR(event), 939 GET_EVENTID(event)); 940 xgene_pmu->ops->write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event))); 941 if (pmu_dev->inf->type == PMU_TYPE_IOB) 942 xgene_pmu->ops->write_agent1msk(pmu_dev, 943 ~((u32)GET_AGENT1ID(event))); 944 945 xgene_pmu->ops->enable_counter(pmu_dev, GET_CNTR(event)); 946 xgene_pmu->ops->enable_counter_int(pmu_dev, GET_CNTR(event)); 947 } 948 949 static void xgene_perf_disable_event(struct perf_event *event) 950 { 951 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); 952 struct xgene_pmu *xgene_pmu = pmu_dev->parent; 953 954 xgene_pmu->ops->disable_counter(pmu_dev, GET_CNTR(event)); 955 xgene_pmu->ops->disable_counter_int(pmu_dev, GET_CNTR(event)); 956 } 957 958 static void xgene_perf_event_set_period(struct perf_event *event) 959 { 960 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); 961 struct xgene_pmu *xgene_pmu = pmu_dev->parent; 962 struct hw_perf_event *hw = &event->hw; 963 /* 964 * For 32 bit counter, it has a period of 2^32. To account for the 965 * possibility of extreme interrupt latency we program for a period of 966 * half that. Hopefully, we can handle the interrupt before another 2^31 967 * events occur and the counter overtakes its previous value. 968 * For 64 bit counter, we don't expect it overflow. 969 */ 970 u64 val = 1ULL << 31; 971 972 local64_set(&hw->prev_count, val); 973 xgene_pmu->ops->write_counter(pmu_dev, hw->idx, val); 974 } 975 976 static void xgene_perf_event_update(struct perf_event *event) 977 { 978 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); 979 struct xgene_pmu *xgene_pmu = pmu_dev->parent; 980 struct hw_perf_event *hw = &event->hw; 981 u64 delta, prev_raw_count, new_raw_count; 982 983 again: 984 prev_raw_count = local64_read(&hw->prev_count); 985 new_raw_count = xgene_pmu->ops->read_counter(pmu_dev, GET_CNTR(event)); 986 987 if (local64_cmpxchg(&hw->prev_count, prev_raw_count, 988 new_raw_count) != prev_raw_count) 989 goto again; 990 991 delta = (new_raw_count - prev_raw_count) & pmu_dev->max_period; 992 993 local64_add(delta, &event->count); 994 } 995 996 static void xgene_perf_read(struct perf_event *event) 997 { 998 xgene_perf_event_update(event); 999 } 1000 1001 static void xgene_perf_start(struct perf_event *event, int flags) 1002 { 1003 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); 1004 struct xgene_pmu *xgene_pmu = pmu_dev->parent; 1005 struct hw_perf_event *hw = &event->hw; 1006 1007 if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED))) 1008 return; 1009 1010 WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE)); 1011 hw->state = 0; 1012 1013 xgene_perf_event_set_period(event); 1014 1015 if (flags & PERF_EF_RELOAD) { 1016 u64 prev_raw_count = local64_read(&hw->prev_count); 1017 1018 xgene_pmu->ops->write_counter(pmu_dev, GET_CNTR(event), 1019 prev_raw_count); 1020 } 1021 1022 xgene_perf_enable_event(event); 1023 perf_event_update_userpage(event); 1024 } 1025 1026 static void xgene_perf_stop(struct perf_event *event, int flags) 1027 { 1028 struct hw_perf_event *hw = &event->hw; 1029 1030 if (hw->state & PERF_HES_UPTODATE) 1031 return; 1032 1033 xgene_perf_disable_event(event); 1034 WARN_ON_ONCE(hw->state & PERF_HES_STOPPED); 1035 hw->state |= PERF_HES_STOPPED; 1036 1037 if (hw->state & PERF_HES_UPTODATE) 1038 return; 1039 1040 xgene_perf_read(event); 1041 hw->state |= PERF_HES_UPTODATE; 1042 } 1043 1044 static int xgene_perf_add(struct perf_event *event, int flags) 1045 { 1046 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); 1047 struct hw_perf_event *hw = &event->hw; 1048 1049 hw->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 1050 1051 /* Allocate an event counter */ 1052 hw->idx = get_next_avail_cntr(pmu_dev); 1053 if (hw->idx < 0) 1054 return -EAGAIN; 1055 1056 /* Update counter event pointer for Interrupt handler */ 1057 pmu_dev->pmu_counter_event[hw->idx] = event; 1058 1059 if (flags & PERF_EF_START) 1060 xgene_perf_start(event, PERF_EF_RELOAD); 1061 1062 return 0; 1063 } 1064 1065 static void xgene_perf_del(struct perf_event *event, int flags) 1066 { 1067 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); 1068 struct hw_perf_event *hw = &event->hw; 1069 1070 xgene_perf_stop(event, PERF_EF_UPDATE); 1071 1072 /* clear the assigned counter */ 1073 clear_avail_cntr(pmu_dev, GET_CNTR(event)); 1074 1075 perf_event_update_userpage(event); 1076 pmu_dev->pmu_counter_event[hw->idx] = NULL; 1077 } 1078 1079 static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name) 1080 { 1081 struct xgene_pmu *xgene_pmu; 1082 1083 if (pmu_dev->parent->version == PCP_PMU_V3) 1084 pmu_dev->max_period = PMU_V3_CNT_MAX_PERIOD; 1085 else 1086 pmu_dev->max_period = PMU_CNT_MAX_PERIOD; 1087 /* First version PMU supports only single event counter */ 1088 xgene_pmu = pmu_dev->parent; 1089 if (xgene_pmu->version == PCP_PMU_V1) 1090 pmu_dev->max_counters = 1; 1091 else 1092 pmu_dev->max_counters = PMU_MAX_COUNTERS; 1093 1094 /* Perf driver registration */ 1095 pmu_dev->pmu = (struct pmu) { 1096 .parent = pmu_dev->parent->dev, 1097 .attr_groups = pmu_dev->attr_groups, 1098 .task_ctx_nr = perf_invalid_context, 1099 .pmu_enable = xgene_perf_pmu_enable, 1100 .pmu_disable = xgene_perf_pmu_disable, 1101 .event_init = xgene_perf_event_init, 1102 .add = xgene_perf_add, 1103 .del = xgene_perf_del, 1104 .start = xgene_perf_start, 1105 .stop = xgene_perf_stop, 1106 .read = xgene_perf_read, 1107 .capabilities = PERF_PMU_CAP_NO_EXCLUDE, 1108 }; 1109 1110 /* Hardware counter init */ 1111 xgene_pmu->ops->stop_counters(pmu_dev); 1112 xgene_pmu->ops->reset_counters(pmu_dev); 1113 1114 return perf_pmu_register(&pmu_dev->pmu, name, -1); 1115 } 1116 1117 static int 1118 xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx) 1119 { 1120 struct device *dev = xgene_pmu->dev; 1121 struct xgene_pmu_dev *pmu; 1122 1123 pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL); 1124 if (!pmu) 1125 return -ENOMEM; 1126 pmu->parent = xgene_pmu; 1127 pmu->inf = &ctx->inf; 1128 ctx->pmu_dev = pmu; 1129 1130 switch (pmu->inf->type) { 1131 case PMU_TYPE_L3C: 1132 if (!(xgene_pmu->l3c_active_mask & pmu->inf->enable_mask)) 1133 return -ENODEV; 1134 if (xgene_pmu->version == PCP_PMU_V3) 1135 pmu->attr_groups = l3c_pmu_v3_attr_groups; 1136 else 1137 pmu->attr_groups = l3c_pmu_attr_groups; 1138 break; 1139 case PMU_TYPE_IOB: 1140 if (xgene_pmu->version == PCP_PMU_V3) 1141 pmu->attr_groups = iob_fast_pmu_v3_attr_groups; 1142 else 1143 pmu->attr_groups = iob_pmu_attr_groups; 1144 break; 1145 case PMU_TYPE_IOB_SLOW: 1146 if (xgene_pmu->version == PCP_PMU_V3) 1147 pmu->attr_groups = iob_slow_pmu_v3_attr_groups; 1148 break; 1149 case PMU_TYPE_MCB: 1150 if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask)) 1151 return -ENODEV; 1152 if (xgene_pmu->version == PCP_PMU_V3) 1153 pmu->attr_groups = mcb_pmu_v3_attr_groups; 1154 else 1155 pmu->attr_groups = mcb_pmu_attr_groups; 1156 break; 1157 case PMU_TYPE_MC: 1158 if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask)) 1159 return -ENODEV; 1160 if (xgene_pmu->version == PCP_PMU_V3) 1161 pmu->attr_groups = mc_pmu_v3_attr_groups; 1162 else 1163 pmu->attr_groups = mc_pmu_attr_groups; 1164 break; 1165 default: 1166 return -EINVAL; 1167 } 1168 1169 if (xgene_init_perf(pmu, ctx->name)) { 1170 dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name); 1171 return -ENODEV; 1172 } 1173 1174 dev_info(dev, "%s PMU registered\n", ctx->name); 1175 1176 return 0; 1177 } 1178 1179 static void _xgene_pmu_isr(int irq, struct xgene_pmu_dev *pmu_dev) 1180 { 1181 struct xgene_pmu *xgene_pmu = pmu_dev->parent; 1182 void __iomem *csr = pmu_dev->inf->csr; 1183 u32 pmovsr; 1184 int idx; 1185 1186 xgene_pmu->ops->stop_counters(pmu_dev); 1187 1188 if (xgene_pmu->version == PCP_PMU_V3) 1189 pmovsr = readl(csr + PMU_PMOVSSET) & PMU_OVERFLOW_MASK; 1190 else 1191 pmovsr = readl(csr + PMU_PMOVSR) & PMU_OVERFLOW_MASK; 1192 1193 if (!pmovsr) 1194 goto out; 1195 1196 /* Clear interrupt flag */ 1197 if (xgene_pmu->version == PCP_PMU_V1) 1198 writel(0x0, csr + PMU_PMOVSR); 1199 else if (xgene_pmu->version == PCP_PMU_V2) 1200 writel(pmovsr, csr + PMU_PMOVSR); 1201 else 1202 writel(pmovsr, csr + PMU_PMOVSCLR); 1203 1204 for (idx = 0; idx < PMU_MAX_COUNTERS; idx++) { 1205 struct perf_event *event = pmu_dev->pmu_counter_event[idx]; 1206 int overflowed = pmovsr & BIT(idx); 1207 1208 /* Ignore if we don't have an event. */ 1209 if (!event || !overflowed) 1210 continue; 1211 xgene_perf_event_update(event); 1212 xgene_perf_event_set_period(event); 1213 } 1214 1215 out: 1216 xgene_pmu->ops->start_counters(pmu_dev); 1217 } 1218 1219 static irqreturn_t xgene_pmu_isr(int irq, void *dev_id) 1220 { 1221 u32 intr_mcu, intr_mcb, intr_l3c, intr_iob; 1222 struct xgene_pmu_dev_ctx *ctx; 1223 struct xgene_pmu *xgene_pmu = dev_id; 1224 u32 val; 1225 1226 raw_spin_lock(&xgene_pmu->lock); 1227 1228 /* Get Interrupt PMU source */ 1229 val = readl(xgene_pmu->pcppmu_csr + PCPPMU_INTSTATUS_REG); 1230 if (xgene_pmu->version == PCP_PMU_V3) { 1231 intr_mcu = PCPPMU_V3_INT_MCU; 1232 intr_mcb = PCPPMU_V3_INT_MCB; 1233 intr_l3c = PCPPMU_V3_INT_L3C; 1234 intr_iob = PCPPMU_V3_INT_IOB; 1235 } else { 1236 intr_mcu = PCPPMU_INT_MCU; 1237 intr_mcb = PCPPMU_INT_MCB; 1238 intr_l3c = PCPPMU_INT_L3C; 1239 intr_iob = PCPPMU_INT_IOB; 1240 } 1241 if (val & intr_mcu) { 1242 list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) { 1243 _xgene_pmu_isr(irq, ctx->pmu_dev); 1244 } 1245 } 1246 if (val & intr_mcb) { 1247 list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) { 1248 _xgene_pmu_isr(irq, ctx->pmu_dev); 1249 } 1250 } 1251 if (val & intr_l3c) { 1252 list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) { 1253 _xgene_pmu_isr(irq, ctx->pmu_dev); 1254 } 1255 } 1256 if (val & intr_iob) { 1257 list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) { 1258 _xgene_pmu_isr(irq, ctx->pmu_dev); 1259 } 1260 } 1261 1262 raw_spin_unlock(&xgene_pmu->lock); 1263 1264 return IRQ_HANDLED; 1265 } 1266 1267 static int acpi_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu, 1268 struct platform_device *pdev) 1269 { 1270 void __iomem *csw_csr, *mcba_csr, *mcbb_csr; 1271 unsigned int reg; 1272 1273 csw_csr = devm_platform_ioremap_resource(pdev, 1); 1274 if (IS_ERR(csw_csr)) { 1275 dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n"); 1276 return PTR_ERR(csw_csr); 1277 } 1278 1279 mcba_csr = devm_platform_ioremap_resource(pdev, 2); 1280 if (IS_ERR(mcba_csr)) { 1281 dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n"); 1282 return PTR_ERR(mcba_csr); 1283 } 1284 1285 mcbb_csr = devm_platform_ioremap_resource(pdev, 3); 1286 if (IS_ERR(mcbb_csr)) { 1287 dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n"); 1288 return PTR_ERR(mcbb_csr); 1289 } 1290 1291 xgene_pmu->l3c_active_mask = 0x1; 1292 1293 reg = readl(csw_csr + CSW_CSWCR); 1294 if (reg & CSW_CSWCR_DUALMCB_MASK) { 1295 /* Dual MCB active */ 1296 xgene_pmu->mcb_active_mask = 0x3; 1297 /* Probe all active MC(s) */ 1298 reg = readl(mcbb_csr + CSW_CSWCR); 1299 xgene_pmu->mc_active_mask = 1300 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5; 1301 } else { 1302 /* Single MCB active */ 1303 xgene_pmu->mcb_active_mask = 0x1; 1304 /* Probe all active MC(s) */ 1305 reg = readl(mcba_csr + CSW_CSWCR); 1306 xgene_pmu->mc_active_mask = 1307 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1; 1308 } 1309 1310 return 0; 1311 } 1312 1313 static int acpi_pmu_v3_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu, 1314 struct platform_device *pdev) 1315 { 1316 void __iomem *csw_csr; 1317 unsigned int reg; 1318 u32 mcb0routing; 1319 u32 mcb1routing; 1320 1321 csw_csr = devm_platform_ioremap_resource(pdev, 1); 1322 if (IS_ERR(csw_csr)) { 1323 dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n"); 1324 return PTR_ERR(csw_csr); 1325 } 1326 1327 reg = readl(csw_csr + CSW_CSWCR); 1328 mcb0routing = CSW_CSWCR_MCB0_ROUTING(reg); 1329 mcb1routing = CSW_CSWCR_MCB1_ROUTING(reg); 1330 if (reg & CSW_CSWCR_DUALMCB_MASK) { 1331 /* Dual MCB active */ 1332 xgene_pmu->mcb_active_mask = 0x3; 1333 /* Probe all active L3C(s), maximum is 8 */ 1334 xgene_pmu->l3c_active_mask = 0xFF; 1335 /* Probe all active MC(s), maximum is 8 */ 1336 if ((mcb0routing == 0x2) && (mcb1routing == 0x2)) 1337 xgene_pmu->mc_active_mask = 0xFF; 1338 else if ((mcb0routing == 0x1) && (mcb1routing == 0x1)) 1339 xgene_pmu->mc_active_mask = 0x33; 1340 else 1341 xgene_pmu->mc_active_mask = 0x11; 1342 } else { 1343 /* Single MCB active */ 1344 xgene_pmu->mcb_active_mask = 0x1; 1345 /* Probe all active L3C(s), maximum is 4 */ 1346 xgene_pmu->l3c_active_mask = 0x0F; 1347 /* Probe all active MC(s), maximum is 4 */ 1348 if (mcb0routing == 0x2) 1349 xgene_pmu->mc_active_mask = 0x0F; 1350 else if (mcb0routing == 0x1) 1351 xgene_pmu->mc_active_mask = 0x03; 1352 else 1353 xgene_pmu->mc_active_mask = 0x01; 1354 } 1355 1356 return 0; 1357 } 1358 1359 static int fdt_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu, 1360 struct platform_device *pdev) 1361 { 1362 struct regmap *csw_map, *mcba_map, *mcbb_map; 1363 struct device_node *np = pdev->dev.of_node; 1364 unsigned int reg; 1365 1366 csw_map = syscon_regmap_lookup_by_phandle(np, "regmap-csw"); 1367 if (IS_ERR(csw_map)) { 1368 dev_err(&pdev->dev, "unable to get syscon regmap csw\n"); 1369 return PTR_ERR(csw_map); 1370 } 1371 1372 mcba_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcba"); 1373 if (IS_ERR(mcba_map)) { 1374 dev_err(&pdev->dev, "unable to get syscon regmap mcba\n"); 1375 return PTR_ERR(mcba_map); 1376 } 1377 1378 mcbb_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcbb"); 1379 if (IS_ERR(mcbb_map)) { 1380 dev_err(&pdev->dev, "unable to get syscon regmap mcbb\n"); 1381 return PTR_ERR(mcbb_map); 1382 } 1383 1384 xgene_pmu->l3c_active_mask = 0x1; 1385 if (regmap_read(csw_map, CSW_CSWCR, ®)) 1386 return -EINVAL; 1387 1388 if (reg & CSW_CSWCR_DUALMCB_MASK) { 1389 /* Dual MCB active */ 1390 xgene_pmu->mcb_active_mask = 0x3; 1391 /* Probe all active MC(s) */ 1392 if (regmap_read(mcbb_map, MCBADDRMR, ®)) 1393 return 0; 1394 xgene_pmu->mc_active_mask = 1395 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5; 1396 } else { 1397 /* Single MCB active */ 1398 xgene_pmu->mcb_active_mask = 0x1; 1399 /* Probe all active MC(s) */ 1400 if (regmap_read(mcba_map, MCBADDRMR, ®)) 1401 return 0; 1402 xgene_pmu->mc_active_mask = 1403 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1; 1404 } 1405 1406 return 0; 1407 } 1408 1409 static int xgene_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu, 1410 struct platform_device *pdev) 1411 { 1412 if (has_acpi_companion(&pdev->dev)) { 1413 if (xgene_pmu->version == PCP_PMU_V3) 1414 return acpi_pmu_v3_probe_active_mcb_mcu_l3c(xgene_pmu, 1415 pdev); 1416 else 1417 return acpi_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, 1418 pdev); 1419 } 1420 return fdt_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, pdev); 1421 } 1422 1423 static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id) 1424 { 1425 switch (type) { 1426 case PMU_TYPE_L3C: 1427 return devm_kasprintf(dev, GFP_KERNEL, "l3c%d", id); 1428 case PMU_TYPE_IOB: 1429 return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id); 1430 case PMU_TYPE_IOB_SLOW: 1431 return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id); 1432 case PMU_TYPE_MCB: 1433 return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id); 1434 case PMU_TYPE_MC: 1435 return devm_kasprintf(dev, GFP_KERNEL, "mc%d", id); 1436 default: 1437 return devm_kasprintf(dev, GFP_KERNEL, "unknown"); 1438 } 1439 } 1440 1441 #if defined(CONFIG_ACPI) 1442 static struct 1443 xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, 1444 struct acpi_device *adev, u32 type) 1445 { 1446 struct device *dev = xgene_pmu->dev; 1447 struct list_head resource_list; 1448 struct xgene_pmu_dev_ctx *ctx; 1449 const union acpi_object *obj; 1450 struct hw_pmu_info *inf; 1451 void __iomem *dev_csr; 1452 struct resource res; 1453 struct resource_entry *rentry; 1454 int enable_bit; 1455 int rc; 1456 1457 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 1458 if (!ctx) 1459 return NULL; 1460 1461 INIT_LIST_HEAD(&resource_list); 1462 rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); 1463 if (rc <= 0) { 1464 dev_err(dev, "PMU type %d: No resources found\n", type); 1465 return NULL; 1466 } 1467 1468 list_for_each_entry(rentry, &resource_list, node) { 1469 if (resource_type(rentry->res) == IORESOURCE_MEM) { 1470 res = *rentry->res; 1471 rentry = NULL; 1472 break; 1473 } 1474 } 1475 acpi_dev_free_resource_list(&resource_list); 1476 1477 if (rentry) { 1478 dev_err(dev, "PMU type %d: No memory resource found\n", type); 1479 return NULL; 1480 } 1481 1482 dev_csr = devm_ioremap_resource(dev, &res); 1483 if (IS_ERR(dev_csr)) { 1484 dev_err(dev, "PMU type %d: Fail to map resource\n", type); 1485 return NULL; 1486 } 1487 1488 /* A PMU device node without enable-bit-index is always enabled */ 1489 rc = acpi_dev_get_property(adev, "enable-bit-index", 1490 ACPI_TYPE_INTEGER, &obj); 1491 if (rc < 0) 1492 enable_bit = 0; 1493 else 1494 enable_bit = (int) obj->integer.value; 1495 1496 ctx->name = xgene_pmu_dev_name(dev, type, enable_bit); 1497 if (!ctx->name) { 1498 dev_err(dev, "PMU type %d: Fail to get device name\n", type); 1499 return NULL; 1500 } 1501 inf = &ctx->inf; 1502 inf->type = type; 1503 inf->csr = dev_csr; 1504 inf->enable_mask = 1 << enable_bit; 1505 1506 return ctx; 1507 } 1508 1509 static const struct acpi_device_id xgene_pmu_acpi_type_match[] = { 1510 {"APMC0D5D", PMU_TYPE_L3C}, 1511 {"APMC0D5E", PMU_TYPE_IOB}, 1512 {"APMC0D5F", PMU_TYPE_MCB}, 1513 {"APMC0D60", PMU_TYPE_MC}, 1514 {"APMC0D84", PMU_TYPE_L3C}, 1515 {"APMC0D85", PMU_TYPE_IOB}, 1516 {"APMC0D86", PMU_TYPE_IOB_SLOW}, 1517 {"APMC0D87", PMU_TYPE_MCB}, 1518 {"APMC0D88", PMU_TYPE_MC}, 1519 {}, 1520 }; 1521 1522 static const struct acpi_device_id *xgene_pmu_acpi_match_type( 1523 const struct acpi_device_id *ids, 1524 struct acpi_device *adev) 1525 { 1526 const struct acpi_device_id *match_id = NULL; 1527 const struct acpi_device_id *id; 1528 1529 for (id = ids; id->id[0] || id->cls; id++) { 1530 if (!acpi_match_device_ids(adev, id)) 1531 match_id = id; 1532 else if (match_id) 1533 break; 1534 } 1535 1536 return match_id; 1537 } 1538 1539 static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level, 1540 void *data, void **return_value) 1541 { 1542 struct acpi_device *adev = acpi_fetch_acpi_dev(handle); 1543 const struct acpi_device_id *acpi_id; 1544 struct xgene_pmu *xgene_pmu = data; 1545 struct xgene_pmu_dev_ctx *ctx; 1546 1547 if (!adev || acpi_bus_get_status(adev) || !adev->status.present) 1548 return AE_OK; 1549 1550 acpi_id = xgene_pmu_acpi_match_type(xgene_pmu_acpi_type_match, adev); 1551 if (!acpi_id) 1552 return AE_OK; 1553 1554 ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, (u32)acpi_id->driver_data); 1555 if (!ctx) 1556 return AE_OK; 1557 1558 if (xgene_pmu_dev_add(xgene_pmu, ctx)) { 1559 /* Can't add the PMU device, skip it */ 1560 devm_kfree(xgene_pmu->dev, ctx); 1561 return AE_OK; 1562 } 1563 1564 switch (ctx->inf.type) { 1565 case PMU_TYPE_L3C: 1566 list_add(&ctx->next, &xgene_pmu->l3cpmus); 1567 break; 1568 case PMU_TYPE_IOB: 1569 list_add(&ctx->next, &xgene_pmu->iobpmus); 1570 break; 1571 case PMU_TYPE_IOB_SLOW: 1572 list_add(&ctx->next, &xgene_pmu->iobpmus); 1573 break; 1574 case PMU_TYPE_MCB: 1575 list_add(&ctx->next, &xgene_pmu->mcbpmus); 1576 break; 1577 case PMU_TYPE_MC: 1578 list_add(&ctx->next, &xgene_pmu->mcpmus); 1579 break; 1580 } 1581 return AE_OK; 1582 } 1583 1584 static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu, 1585 struct platform_device *pdev) 1586 { 1587 struct device *dev = xgene_pmu->dev; 1588 acpi_handle handle; 1589 acpi_status status; 1590 1591 handle = ACPI_HANDLE(dev); 1592 if (!handle) 1593 return -EINVAL; 1594 1595 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1596 acpi_pmu_dev_add, NULL, xgene_pmu, NULL); 1597 if (ACPI_FAILURE(status)) { 1598 dev_err(dev, "failed to probe PMU devices\n"); 1599 return -ENODEV; 1600 } 1601 1602 return 0; 1603 } 1604 #else 1605 static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu, 1606 struct platform_device *pdev) 1607 { 1608 return 0; 1609 } 1610 #endif 1611 1612 static struct 1613 xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, 1614 struct device_node *np, u32 type) 1615 { 1616 struct device *dev = xgene_pmu->dev; 1617 struct xgene_pmu_dev_ctx *ctx; 1618 struct hw_pmu_info *inf; 1619 void __iomem *dev_csr; 1620 struct resource res; 1621 int enable_bit; 1622 1623 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 1624 if (!ctx) 1625 return NULL; 1626 1627 if (of_address_to_resource(np, 0, &res) < 0) { 1628 dev_err(dev, "PMU type %d: No resource address found\n", type); 1629 return NULL; 1630 } 1631 1632 dev_csr = devm_ioremap_resource(dev, &res); 1633 if (IS_ERR(dev_csr)) { 1634 dev_err(dev, "PMU type %d: Fail to map resource\n", type); 1635 return NULL; 1636 } 1637 1638 /* A PMU device node without enable-bit-index is always enabled */ 1639 if (of_property_read_u32(np, "enable-bit-index", &enable_bit)) 1640 enable_bit = 0; 1641 1642 ctx->name = xgene_pmu_dev_name(dev, type, enable_bit); 1643 if (!ctx->name) { 1644 dev_err(dev, "PMU type %d: Fail to get device name\n", type); 1645 return NULL; 1646 } 1647 1648 inf = &ctx->inf; 1649 inf->type = type; 1650 inf->csr = dev_csr; 1651 inf->enable_mask = 1 << enable_bit; 1652 1653 return ctx; 1654 } 1655 1656 static int fdt_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu, 1657 struct platform_device *pdev) 1658 { 1659 struct xgene_pmu_dev_ctx *ctx; 1660 struct device_node *np; 1661 1662 for_each_child_of_node(pdev->dev.of_node, np) { 1663 if (!of_device_is_available(np)) 1664 continue; 1665 1666 if (of_device_is_compatible(np, "apm,xgene-pmu-l3c")) 1667 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_L3C); 1668 else if (of_device_is_compatible(np, "apm,xgene-pmu-iob")) 1669 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_IOB); 1670 else if (of_device_is_compatible(np, "apm,xgene-pmu-mcb")) 1671 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MCB); 1672 else if (of_device_is_compatible(np, "apm,xgene-pmu-mc")) 1673 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MC); 1674 else 1675 ctx = NULL; 1676 1677 if (!ctx) 1678 continue; 1679 1680 if (xgene_pmu_dev_add(xgene_pmu, ctx)) { 1681 /* Can't add the PMU device, skip it */ 1682 devm_kfree(xgene_pmu->dev, ctx); 1683 continue; 1684 } 1685 1686 switch (ctx->inf.type) { 1687 case PMU_TYPE_L3C: 1688 list_add(&ctx->next, &xgene_pmu->l3cpmus); 1689 break; 1690 case PMU_TYPE_IOB: 1691 list_add(&ctx->next, &xgene_pmu->iobpmus); 1692 break; 1693 case PMU_TYPE_IOB_SLOW: 1694 list_add(&ctx->next, &xgene_pmu->iobpmus); 1695 break; 1696 case PMU_TYPE_MCB: 1697 list_add(&ctx->next, &xgene_pmu->mcbpmus); 1698 break; 1699 case PMU_TYPE_MC: 1700 list_add(&ctx->next, &xgene_pmu->mcpmus); 1701 break; 1702 } 1703 } 1704 1705 return 0; 1706 } 1707 1708 static int xgene_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu, 1709 struct platform_device *pdev) 1710 { 1711 if (has_acpi_companion(&pdev->dev)) 1712 return acpi_pmu_probe_pmu_dev(xgene_pmu, pdev); 1713 return fdt_pmu_probe_pmu_dev(xgene_pmu, pdev); 1714 } 1715 1716 static const struct xgene_pmu_data xgene_pmu_data = { 1717 .id = PCP_PMU_V1, 1718 }; 1719 1720 static const struct xgene_pmu_data xgene_pmu_v2_data = { 1721 .id = PCP_PMU_V2, 1722 }; 1723 1724 #ifdef CONFIG_ACPI 1725 static const struct xgene_pmu_data xgene_pmu_v3_data = { 1726 .id = PCP_PMU_V3, 1727 }; 1728 #endif 1729 1730 static const struct xgene_pmu_ops xgene_pmu_ops = { 1731 .mask_int = xgene_pmu_mask_int, 1732 .unmask_int = xgene_pmu_unmask_int, 1733 .read_counter = xgene_pmu_read_counter32, 1734 .write_counter = xgene_pmu_write_counter32, 1735 .write_evttype = xgene_pmu_write_evttype, 1736 .write_agentmsk = xgene_pmu_write_agentmsk, 1737 .write_agent1msk = xgene_pmu_write_agent1msk, 1738 .enable_counter = xgene_pmu_enable_counter, 1739 .disable_counter = xgene_pmu_disable_counter, 1740 .enable_counter_int = xgene_pmu_enable_counter_int, 1741 .disable_counter_int = xgene_pmu_disable_counter_int, 1742 .reset_counters = xgene_pmu_reset_counters, 1743 .start_counters = xgene_pmu_start_counters, 1744 .stop_counters = xgene_pmu_stop_counters, 1745 }; 1746 1747 static const struct xgene_pmu_ops xgene_pmu_v3_ops = { 1748 .mask_int = xgene_pmu_v3_mask_int, 1749 .unmask_int = xgene_pmu_v3_unmask_int, 1750 .read_counter = xgene_pmu_read_counter64, 1751 .write_counter = xgene_pmu_write_counter64, 1752 .write_evttype = xgene_pmu_write_evttype, 1753 .write_agentmsk = xgene_pmu_v3_write_agentmsk, 1754 .write_agent1msk = xgene_pmu_v3_write_agent1msk, 1755 .enable_counter = xgene_pmu_enable_counter, 1756 .disable_counter = xgene_pmu_disable_counter, 1757 .enable_counter_int = xgene_pmu_enable_counter_int, 1758 .disable_counter_int = xgene_pmu_disable_counter_int, 1759 .reset_counters = xgene_pmu_reset_counters, 1760 .start_counters = xgene_pmu_start_counters, 1761 .stop_counters = xgene_pmu_stop_counters, 1762 }; 1763 1764 static const struct of_device_id xgene_pmu_of_match[] = { 1765 { .compatible = "apm,xgene-pmu", .data = &xgene_pmu_data }, 1766 { .compatible = "apm,xgene-pmu-v2", .data = &xgene_pmu_v2_data }, 1767 {}, 1768 }; 1769 MODULE_DEVICE_TABLE(of, xgene_pmu_of_match); 1770 #ifdef CONFIG_ACPI 1771 static const struct acpi_device_id xgene_pmu_acpi_match[] = { 1772 {"APMC0D5B", (kernel_ulong_t)&xgene_pmu_data}, 1773 {"APMC0D5C", (kernel_ulong_t)&xgene_pmu_v2_data}, 1774 {"APMC0D83", (kernel_ulong_t)&xgene_pmu_v3_data}, 1775 {}, 1776 }; 1777 MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match); 1778 #endif 1779 1780 static int xgene_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) 1781 { 1782 struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu, 1783 node); 1784 1785 if (cpumask_empty(&xgene_pmu->cpu)) 1786 cpumask_set_cpu(cpu, &xgene_pmu->cpu); 1787 1788 /* Overflow interrupt also should use the same CPU */ 1789 WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu)); 1790 1791 return 0; 1792 } 1793 1794 static int xgene_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) 1795 { 1796 struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu, 1797 node); 1798 struct xgene_pmu_dev_ctx *ctx; 1799 unsigned int target; 1800 1801 if (!cpumask_test_and_clear_cpu(cpu, &xgene_pmu->cpu)) 1802 return 0; 1803 target = cpumask_any_but(cpu_online_mask, cpu); 1804 if (target >= nr_cpu_ids) 1805 return 0; 1806 1807 list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) { 1808 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target); 1809 } 1810 list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) { 1811 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target); 1812 } 1813 list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) { 1814 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target); 1815 } 1816 list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) { 1817 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target); 1818 } 1819 1820 cpumask_set_cpu(target, &xgene_pmu->cpu); 1821 /* Overflow interrupt also should use the same CPU */ 1822 WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu)); 1823 1824 return 0; 1825 } 1826 1827 static int xgene_pmu_probe(struct platform_device *pdev) 1828 { 1829 const struct xgene_pmu_data *dev_data; 1830 struct xgene_pmu *xgene_pmu; 1831 int irq, rc; 1832 int version; 1833 1834 /* Install a hook to update the reader CPU in case it goes offline */ 1835 rc = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, 1836 "CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE", 1837 xgene_pmu_online_cpu, 1838 xgene_pmu_offline_cpu); 1839 if (rc) 1840 return rc; 1841 1842 xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL); 1843 if (!xgene_pmu) 1844 return -ENOMEM; 1845 xgene_pmu->dev = &pdev->dev; 1846 platform_set_drvdata(pdev, xgene_pmu); 1847 1848 dev_data = device_get_match_data(&pdev->dev); 1849 if (!dev_data) 1850 return -ENODEV; 1851 version = dev_data->id; 1852 1853 if (version == PCP_PMU_V3) 1854 xgene_pmu->ops = &xgene_pmu_v3_ops; 1855 else 1856 xgene_pmu->ops = &xgene_pmu_ops; 1857 1858 INIT_LIST_HEAD(&xgene_pmu->l3cpmus); 1859 INIT_LIST_HEAD(&xgene_pmu->iobpmus); 1860 INIT_LIST_HEAD(&xgene_pmu->mcbpmus); 1861 INIT_LIST_HEAD(&xgene_pmu->mcpmus); 1862 1863 xgene_pmu->version = version; 1864 dev_info(&pdev->dev, "X-Gene PMU version %d\n", xgene_pmu->version); 1865 1866 xgene_pmu->pcppmu_csr = devm_platform_ioremap_resource(pdev, 0); 1867 if (IS_ERR(xgene_pmu->pcppmu_csr)) { 1868 dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n"); 1869 return PTR_ERR(xgene_pmu->pcppmu_csr); 1870 } 1871 1872 irq = platform_get_irq(pdev, 0); 1873 if (irq < 0) 1874 return -EINVAL; 1875 1876 rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr, 1877 IRQF_NOBALANCING | IRQF_NO_THREAD, 1878 dev_name(&pdev->dev), xgene_pmu); 1879 if (rc) { 1880 dev_err(&pdev->dev, "Could not request IRQ %d\n", irq); 1881 return rc; 1882 } 1883 1884 xgene_pmu->irq = irq; 1885 1886 raw_spin_lock_init(&xgene_pmu->lock); 1887 1888 /* Check for active MCBs and MCUs */ 1889 rc = xgene_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, pdev); 1890 if (rc) { 1891 dev_warn(&pdev->dev, "Unknown MCB/MCU active status\n"); 1892 xgene_pmu->mcb_active_mask = 0x1; 1893 xgene_pmu->mc_active_mask = 0x1; 1894 } 1895 1896 /* Add this instance to the list used by the hotplug callback */ 1897 rc = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, 1898 &xgene_pmu->node); 1899 if (rc) { 1900 dev_err(&pdev->dev, "Error %d registering hotplug", rc); 1901 return rc; 1902 } 1903 1904 /* Walk through the tree for all PMU perf devices */ 1905 rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev); 1906 if (rc) { 1907 dev_err(&pdev->dev, "No PMU perf devices found!\n"); 1908 goto out_unregister; 1909 } 1910 1911 /* Enable interrupt */ 1912 xgene_pmu->ops->unmask_int(xgene_pmu); 1913 1914 return 0; 1915 1916 out_unregister: 1917 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, 1918 &xgene_pmu->node); 1919 return rc; 1920 } 1921 1922 static void 1923 xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus) 1924 { 1925 struct xgene_pmu_dev_ctx *ctx; 1926 1927 list_for_each_entry(ctx, pmus, next) { 1928 perf_pmu_unregister(&ctx->pmu_dev->pmu); 1929 } 1930 } 1931 1932 static void xgene_pmu_remove(struct platform_device *pdev) 1933 { 1934 struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev); 1935 1936 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->l3cpmus); 1937 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus); 1938 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus); 1939 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus); 1940 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, 1941 &xgene_pmu->node); 1942 } 1943 1944 static struct platform_driver xgene_pmu_driver = { 1945 .probe = xgene_pmu_probe, 1946 .remove_new = xgene_pmu_remove, 1947 .driver = { 1948 .name = "xgene-pmu", 1949 .of_match_table = xgene_pmu_of_match, 1950 .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match), 1951 .suppress_bind_attrs = true, 1952 }, 1953 }; 1954 1955 builtin_platform_driver(xgene_pmu_driver); 1956