1a639027aSMarc Zyngier // SPDX-License-Identifier: GPL-2.0 2a639027aSMarc Zyngier /* 3a639027aSMarc Zyngier * CPU PMU driver for the Apple M1 and derivatives 4a639027aSMarc Zyngier * 5a639027aSMarc Zyngier * Copyright (C) 2021 Google LLC 6a639027aSMarc Zyngier * 7a639027aSMarc Zyngier * Author: Marc Zyngier <maz@kernel.org> 8a639027aSMarc Zyngier * 9a639027aSMarc Zyngier * Most of the information used in this driver was provided by the 10a639027aSMarc Zyngier * Asahi Linux project. The rest was experimentally discovered. 11a639027aSMarc Zyngier */ 12a639027aSMarc Zyngier 13a639027aSMarc Zyngier #include <linux/of.h> 14a639027aSMarc Zyngier #include <linux/perf/arm_pmu.h> 15a639027aSMarc Zyngier #include <linux/platform_device.h> 16a639027aSMarc Zyngier 17a639027aSMarc Zyngier #include <asm/apple_m1_pmu.h> 18a639027aSMarc Zyngier #include <asm/irq_regs.h> 19a639027aSMarc Zyngier #include <asm/perf_event.h> 20a639027aSMarc Zyngier 21a639027aSMarc Zyngier #define M1_PMU_NR_COUNTERS 10 22a639027aSMarc Zyngier 23a639027aSMarc Zyngier #define M1_PMU_CFG_EVENT GENMASK(7, 0) 24a639027aSMarc Zyngier 25a639027aSMarc Zyngier #define ANY_BUT_0_1 GENMASK(9, 2) 26a639027aSMarc Zyngier #define ONLY_2_TO_7 GENMASK(7, 2) 27a639027aSMarc Zyngier #define ONLY_2_4_6 (BIT(2) | BIT(4) | BIT(6)) 28a639027aSMarc Zyngier #define ONLY_5_6_7 (BIT(5) | BIT(6) | BIT(7)) 29a639027aSMarc Zyngier 30a639027aSMarc Zyngier /* 31a639027aSMarc Zyngier * Description of the events we actually know about, as well as those with 32a639027aSMarc Zyngier * a specific counter affinity. Yes, this is a grand total of two known 33a639027aSMarc Zyngier * counters, and the rest is anybody's guess. 34a639027aSMarc Zyngier * 35a639027aSMarc Zyngier * Not all counters can count all events. Counters #0 and #1 are wired to 36a639027aSMarc Zyngier * count cycles and instructions respectively, and some events have 37a639027aSMarc Zyngier * bizarre mappings (every other counter, or even *one* counter). These 38a639027aSMarc Zyngier * restrictions equally apply to both P and E cores. 39a639027aSMarc Zyngier * 40a639027aSMarc Zyngier * It is worth noting that the PMUs attached to P and E cores are likely 41a639027aSMarc Zyngier * to be different because the underlying uarches are different. At the 42a639027aSMarc Zyngier * moment, we don't really need to distinguish between the two because we 43a639027aSMarc Zyngier * know next to nothing about the events themselves, and we already have 44a639027aSMarc Zyngier * per cpu-type PMU abstractions. 45a639027aSMarc Zyngier * 46a639027aSMarc Zyngier * If we eventually find out that the events are different across 47a639027aSMarc Zyngier * implementations, we'll have to introduce per cpu-type tables. 48a639027aSMarc Zyngier */ 49a639027aSMarc Zyngier enum m1_pmu_events { 50a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_01 = 0x01, 51a639027aSMarc Zyngier M1_PMU_PERFCTR_CPU_CYCLES = 0x02, 52a639027aSMarc Zyngier M1_PMU_PERFCTR_INSTRUCTIONS = 0x8c, 53a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_8d = 0x8d, 54a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_8e = 0x8e, 55a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_8f = 0x8f, 56a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_90 = 0x90, 57a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_93 = 0x93, 58a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_94 = 0x94, 59a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_95 = 0x95, 60a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_96 = 0x96, 61a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_97 = 0x97, 62a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_98 = 0x98, 63a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_99 = 0x99, 64a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_9a = 0x9a, 65a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_9b = 0x9b, 66a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_9c = 0x9c, 67a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_9f = 0x9f, 68a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_bf = 0xbf, 69a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_c0 = 0xc0, 70a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_c1 = 0xc1, 71a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_c4 = 0xc4, 72a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_c5 = 0xc5, 73a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_c6 = 0xc6, 74a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_c8 = 0xc8, 75a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_ca = 0xca, 76a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_cb = 0xcb, 77a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_f5 = 0xf5, 78a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_f6 = 0xf6, 79a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_f7 = 0xf7, 80a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_f8 = 0xf8, 81a639027aSMarc Zyngier M1_PMU_PERFCTR_UNKNOWN_fd = 0xfd, 82a639027aSMarc Zyngier M1_PMU_PERFCTR_LAST = M1_PMU_CFG_EVENT, 83a639027aSMarc Zyngier 84a639027aSMarc Zyngier /* 85a639027aSMarc Zyngier * From this point onwards, these are not actual HW events, 86a639027aSMarc Zyngier * but attributes that get stored in hw->config_base. 87a639027aSMarc Zyngier */ 88a639027aSMarc Zyngier M1_PMU_CFG_COUNT_USER = BIT(8), 89a639027aSMarc Zyngier M1_PMU_CFG_COUNT_KERNEL = BIT(9), 90a639027aSMarc Zyngier }; 91a639027aSMarc Zyngier 92a639027aSMarc Zyngier /* 93a639027aSMarc Zyngier * Per-event affinity table. Most events can be installed on counter 94a639027aSMarc Zyngier * 2-9, but there are a number of exceptions. Note that this table 95a639027aSMarc Zyngier * has been created experimentally, and I wouldn't be surprised if more 96a639027aSMarc Zyngier * counters had strange affinities. 97a639027aSMarc Zyngier */ 98a639027aSMarc Zyngier static const u16 m1_pmu_event_affinity[M1_PMU_PERFCTR_LAST + 1] = { 99a639027aSMarc Zyngier [0 ... M1_PMU_PERFCTR_LAST] = ANY_BUT_0_1, 100a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_01] = BIT(7), 101a639027aSMarc Zyngier [M1_PMU_PERFCTR_CPU_CYCLES] = ANY_BUT_0_1 | BIT(0), 102a639027aSMarc Zyngier [M1_PMU_PERFCTR_INSTRUCTIONS] = BIT(7) | BIT(1), 103a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_8d] = ONLY_5_6_7, 104a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_8e] = ONLY_5_6_7, 105a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_8f] = ONLY_5_6_7, 106a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_90] = ONLY_5_6_7, 107a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_93] = ONLY_5_6_7, 108a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_94] = ONLY_5_6_7, 109a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_95] = ONLY_5_6_7, 110a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_96] = ONLY_5_6_7, 111a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_97] = BIT(7), 112a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_98] = ONLY_5_6_7, 113a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_99] = ONLY_5_6_7, 114a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_9a] = BIT(7), 115a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_9b] = ONLY_5_6_7, 116a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_9c] = ONLY_5_6_7, 117a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_9f] = BIT(7), 118a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_bf] = ONLY_5_6_7, 119a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_c0] = ONLY_5_6_7, 120a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_c1] = ONLY_5_6_7, 121a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_c4] = ONLY_5_6_7, 122a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_c5] = ONLY_5_6_7, 123a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_c6] = ONLY_5_6_7, 124a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_c8] = ONLY_5_6_7, 125a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_ca] = ONLY_5_6_7, 126a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_cb] = ONLY_5_6_7, 127a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_f5] = ONLY_2_4_6, 128a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_f6] = ONLY_2_4_6, 129a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_f7] = ONLY_2_4_6, 130a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_f8] = ONLY_2_TO_7, 131a639027aSMarc Zyngier [M1_PMU_PERFCTR_UNKNOWN_fd] = ONLY_2_4_6, 132a639027aSMarc Zyngier }; 133a639027aSMarc Zyngier 134a639027aSMarc Zyngier static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = { 135a639027aSMarc Zyngier PERF_MAP_ALL_UNSUPPORTED, 136a639027aSMarc Zyngier [PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CPU_CYCLES, 137a639027aSMarc Zyngier [PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INSTRUCTIONS, 138a639027aSMarc Zyngier /* No idea about the rest yet */ 139a639027aSMarc Zyngier }; 140a639027aSMarc Zyngier 141a639027aSMarc Zyngier /* sysfs definitions */ 142a639027aSMarc Zyngier static ssize_t m1_pmu_events_sysfs_show(struct device *dev, 143a639027aSMarc Zyngier struct device_attribute *attr, 144a639027aSMarc Zyngier char *page) 145a639027aSMarc Zyngier { 146a639027aSMarc Zyngier struct perf_pmu_events_attr *pmu_attr; 147a639027aSMarc Zyngier 148a639027aSMarc Zyngier pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); 149a639027aSMarc Zyngier 150a639027aSMarc Zyngier return sprintf(page, "event=0x%04llx\n", pmu_attr->id); 151a639027aSMarc Zyngier } 152a639027aSMarc Zyngier 153a639027aSMarc Zyngier #define M1_PMU_EVENT_ATTR(name, config) \ 154a639027aSMarc Zyngier PMU_EVENT_ATTR_ID(name, m1_pmu_events_sysfs_show, config) 155a639027aSMarc Zyngier 156a639027aSMarc Zyngier static struct attribute *m1_pmu_event_attrs[] = { 157a639027aSMarc Zyngier M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CPU_CYCLES), 158a639027aSMarc Zyngier M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INSTRUCTIONS), 159a639027aSMarc Zyngier NULL, 160a639027aSMarc Zyngier }; 161a639027aSMarc Zyngier 162a639027aSMarc Zyngier static const struct attribute_group m1_pmu_events_attr_group = { 163a639027aSMarc Zyngier .name = "events", 164a639027aSMarc Zyngier .attrs = m1_pmu_event_attrs, 165a639027aSMarc Zyngier }; 166a639027aSMarc Zyngier 167a639027aSMarc Zyngier PMU_FORMAT_ATTR(event, "config:0-7"); 168a639027aSMarc Zyngier 169a639027aSMarc Zyngier static struct attribute *m1_pmu_format_attrs[] = { 170a639027aSMarc Zyngier &format_attr_event.attr, 171a639027aSMarc Zyngier NULL, 172a639027aSMarc Zyngier }; 173a639027aSMarc Zyngier 174a639027aSMarc Zyngier static const struct attribute_group m1_pmu_format_attr_group = { 175a639027aSMarc Zyngier .name = "format", 176a639027aSMarc Zyngier .attrs = m1_pmu_format_attrs, 177a639027aSMarc Zyngier }; 178a639027aSMarc Zyngier 179a639027aSMarc Zyngier /* Low level accessors. No synchronisation. */ 180a639027aSMarc Zyngier #define PMU_READ_COUNTER(_idx) \ 181a639027aSMarc Zyngier case _idx: return read_sysreg_s(SYS_IMP_APL_PMC## _idx ##_EL1) 182a639027aSMarc Zyngier 183a639027aSMarc Zyngier #define PMU_WRITE_COUNTER(_val, _idx) \ 184a639027aSMarc Zyngier case _idx: \ 185a639027aSMarc Zyngier write_sysreg_s(_val, SYS_IMP_APL_PMC## _idx ##_EL1); \ 186a639027aSMarc Zyngier return 187a639027aSMarc Zyngier 188a639027aSMarc Zyngier static u64 m1_pmu_read_hw_counter(unsigned int index) 189a639027aSMarc Zyngier { 190a639027aSMarc Zyngier switch (index) { 191a639027aSMarc Zyngier PMU_READ_COUNTER(0); 192a639027aSMarc Zyngier PMU_READ_COUNTER(1); 193a639027aSMarc Zyngier PMU_READ_COUNTER(2); 194a639027aSMarc Zyngier PMU_READ_COUNTER(3); 195a639027aSMarc Zyngier PMU_READ_COUNTER(4); 196a639027aSMarc Zyngier PMU_READ_COUNTER(5); 197a639027aSMarc Zyngier PMU_READ_COUNTER(6); 198a639027aSMarc Zyngier PMU_READ_COUNTER(7); 199a639027aSMarc Zyngier PMU_READ_COUNTER(8); 200a639027aSMarc Zyngier PMU_READ_COUNTER(9); 201a639027aSMarc Zyngier } 202a639027aSMarc Zyngier 203a639027aSMarc Zyngier BUG(); 204a639027aSMarc Zyngier } 205a639027aSMarc Zyngier 206a639027aSMarc Zyngier static void m1_pmu_write_hw_counter(u64 val, unsigned int index) 207a639027aSMarc Zyngier { 208a639027aSMarc Zyngier switch (index) { 209a639027aSMarc Zyngier PMU_WRITE_COUNTER(val, 0); 210a639027aSMarc Zyngier PMU_WRITE_COUNTER(val, 1); 211a639027aSMarc Zyngier PMU_WRITE_COUNTER(val, 2); 212a639027aSMarc Zyngier PMU_WRITE_COUNTER(val, 3); 213a639027aSMarc Zyngier PMU_WRITE_COUNTER(val, 4); 214a639027aSMarc Zyngier PMU_WRITE_COUNTER(val, 5); 215a639027aSMarc Zyngier PMU_WRITE_COUNTER(val, 6); 216a639027aSMarc Zyngier PMU_WRITE_COUNTER(val, 7); 217a639027aSMarc Zyngier PMU_WRITE_COUNTER(val, 8); 218a639027aSMarc Zyngier PMU_WRITE_COUNTER(val, 9); 219a639027aSMarc Zyngier } 220a639027aSMarc Zyngier 221a639027aSMarc Zyngier BUG(); 222a639027aSMarc Zyngier } 223a639027aSMarc Zyngier 224a639027aSMarc Zyngier #define get_bit_offset(index, mask) (__ffs(mask) + (index)) 225a639027aSMarc Zyngier 226a639027aSMarc Zyngier static void __m1_pmu_enable_counter(unsigned int index, bool en) 227a639027aSMarc Zyngier { 228a639027aSMarc Zyngier u64 val, bit; 229a639027aSMarc Zyngier 230a639027aSMarc Zyngier switch (index) { 231a639027aSMarc Zyngier case 0 ... 7: 232a639027aSMarc Zyngier bit = BIT(get_bit_offset(index, PMCR0_CNT_ENABLE_0_7)); 233a639027aSMarc Zyngier break; 234a639027aSMarc Zyngier case 8 ... 9: 235a639027aSMarc Zyngier bit = BIT(get_bit_offset(index - 8, PMCR0_CNT_ENABLE_8_9)); 236a639027aSMarc Zyngier break; 237a639027aSMarc Zyngier default: 238a639027aSMarc Zyngier BUG(); 239a639027aSMarc Zyngier } 240a639027aSMarc Zyngier 241a639027aSMarc Zyngier val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1); 242a639027aSMarc Zyngier 243a639027aSMarc Zyngier if (en) 244a639027aSMarc Zyngier val |= bit; 245a639027aSMarc Zyngier else 246a639027aSMarc Zyngier val &= ~bit; 247a639027aSMarc Zyngier 248a639027aSMarc Zyngier write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1); 249a639027aSMarc Zyngier } 250a639027aSMarc Zyngier 251a639027aSMarc Zyngier static void m1_pmu_enable_counter(unsigned int index) 252a639027aSMarc Zyngier { 253a639027aSMarc Zyngier __m1_pmu_enable_counter(index, true); 254a639027aSMarc Zyngier } 255a639027aSMarc Zyngier 256a639027aSMarc Zyngier static void m1_pmu_disable_counter(unsigned int index) 257a639027aSMarc Zyngier { 258a639027aSMarc Zyngier __m1_pmu_enable_counter(index, false); 259a639027aSMarc Zyngier } 260a639027aSMarc Zyngier 261a639027aSMarc Zyngier static void __m1_pmu_enable_counter_interrupt(unsigned int index, bool en) 262a639027aSMarc Zyngier { 263a639027aSMarc Zyngier u64 val, bit; 264a639027aSMarc Zyngier 265a639027aSMarc Zyngier switch (index) { 266a639027aSMarc Zyngier case 0 ... 7: 267a639027aSMarc Zyngier bit = BIT(get_bit_offset(index, PMCR0_PMI_ENABLE_0_7)); 268a639027aSMarc Zyngier break; 269a639027aSMarc Zyngier case 8 ... 9: 270a639027aSMarc Zyngier bit = BIT(get_bit_offset(index - 8, PMCR0_PMI_ENABLE_8_9)); 271a639027aSMarc Zyngier break; 272a639027aSMarc Zyngier default: 273a639027aSMarc Zyngier BUG(); 274a639027aSMarc Zyngier } 275a639027aSMarc Zyngier 276a639027aSMarc Zyngier val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1); 277a639027aSMarc Zyngier 278a639027aSMarc Zyngier if (en) 279a639027aSMarc Zyngier val |= bit; 280a639027aSMarc Zyngier else 281a639027aSMarc Zyngier val &= ~bit; 282a639027aSMarc Zyngier 283a639027aSMarc Zyngier write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1); 284a639027aSMarc Zyngier } 285a639027aSMarc Zyngier 286a639027aSMarc Zyngier static void m1_pmu_enable_counter_interrupt(unsigned int index) 287a639027aSMarc Zyngier { 288a639027aSMarc Zyngier __m1_pmu_enable_counter_interrupt(index, true); 289a639027aSMarc Zyngier } 290a639027aSMarc Zyngier 291a639027aSMarc Zyngier static void m1_pmu_disable_counter_interrupt(unsigned int index) 292a639027aSMarc Zyngier { 293a639027aSMarc Zyngier __m1_pmu_enable_counter_interrupt(index, false); 294a639027aSMarc Zyngier } 295a639027aSMarc Zyngier 296a639027aSMarc Zyngier static void m1_pmu_configure_counter(unsigned int index, u8 event, 297a639027aSMarc Zyngier bool user, bool kernel) 298a639027aSMarc Zyngier { 299a639027aSMarc Zyngier u64 val, user_bit, kernel_bit; 300a639027aSMarc Zyngier int shift; 301a639027aSMarc Zyngier 302a639027aSMarc Zyngier switch (index) { 303a639027aSMarc Zyngier case 0 ... 7: 304a639027aSMarc Zyngier user_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL0_0_7)); 305a639027aSMarc Zyngier kernel_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL1_0_7)); 306a639027aSMarc Zyngier break; 307a639027aSMarc Zyngier case 8 ... 9: 308a639027aSMarc Zyngier user_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL0_8_9)); 309a639027aSMarc Zyngier kernel_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL1_8_9)); 310a639027aSMarc Zyngier break; 311a639027aSMarc Zyngier default: 312a639027aSMarc Zyngier BUG(); 313a639027aSMarc Zyngier } 314a639027aSMarc Zyngier 315a639027aSMarc Zyngier val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1); 316a639027aSMarc Zyngier 317a639027aSMarc Zyngier if (user) 318a639027aSMarc Zyngier val |= user_bit; 319a639027aSMarc Zyngier else 320a639027aSMarc Zyngier val &= ~user_bit; 321a639027aSMarc Zyngier 322a639027aSMarc Zyngier if (kernel) 323a639027aSMarc Zyngier val |= kernel_bit; 324a639027aSMarc Zyngier else 325a639027aSMarc Zyngier val &= ~kernel_bit; 326a639027aSMarc Zyngier 327a639027aSMarc Zyngier write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1); 328a639027aSMarc Zyngier 329a639027aSMarc Zyngier /* 330a639027aSMarc Zyngier * Counters 0 and 1 have fixed events. For anything else, 331a639027aSMarc Zyngier * place the event at the expected location in the relevant 332a639027aSMarc Zyngier * register (PMESR0 holds the event configuration for counters 333a639027aSMarc Zyngier * 2-5, resp. PMESR1 for counters 6-9). 334a639027aSMarc Zyngier */ 335a639027aSMarc Zyngier switch (index) { 336a639027aSMarc Zyngier case 0 ... 1: 337a639027aSMarc Zyngier break; 338a639027aSMarc Zyngier case 2 ... 5: 339a639027aSMarc Zyngier shift = (index - 2) * 8; 340a639027aSMarc Zyngier val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1); 341a639027aSMarc Zyngier val &= ~((u64)0xff << shift); 342a639027aSMarc Zyngier val |= (u64)event << shift; 343a639027aSMarc Zyngier write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1); 344a639027aSMarc Zyngier break; 345a639027aSMarc Zyngier case 6 ... 9: 346a639027aSMarc Zyngier shift = (index - 6) * 8; 347a639027aSMarc Zyngier val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1); 348a639027aSMarc Zyngier val &= ~((u64)0xff << shift); 349a639027aSMarc Zyngier val |= (u64)event << shift; 350a639027aSMarc Zyngier write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1); 351a639027aSMarc Zyngier break; 352a639027aSMarc Zyngier } 353a639027aSMarc Zyngier } 354a639027aSMarc Zyngier 355a639027aSMarc Zyngier /* arm_pmu backend */ 356a639027aSMarc Zyngier static void m1_pmu_enable_event(struct perf_event *event) 357a639027aSMarc Zyngier { 358a639027aSMarc Zyngier bool user, kernel; 359a639027aSMarc Zyngier u8 evt; 360a639027aSMarc Zyngier 361a639027aSMarc Zyngier evt = event->hw.config_base & M1_PMU_CFG_EVENT; 362a639027aSMarc Zyngier user = event->hw.config_base & M1_PMU_CFG_COUNT_USER; 363a639027aSMarc Zyngier kernel = event->hw.config_base & M1_PMU_CFG_COUNT_KERNEL; 364a639027aSMarc Zyngier 365a639027aSMarc Zyngier m1_pmu_disable_counter_interrupt(event->hw.idx); 366a639027aSMarc Zyngier m1_pmu_disable_counter(event->hw.idx); 367a639027aSMarc Zyngier isb(); 368a639027aSMarc Zyngier 369a639027aSMarc Zyngier m1_pmu_configure_counter(event->hw.idx, evt, user, kernel); 370a639027aSMarc Zyngier m1_pmu_enable_counter(event->hw.idx); 371a639027aSMarc Zyngier m1_pmu_enable_counter_interrupt(event->hw.idx); 372a639027aSMarc Zyngier isb(); 373a639027aSMarc Zyngier } 374a639027aSMarc Zyngier 375a639027aSMarc Zyngier static void m1_pmu_disable_event(struct perf_event *event) 376a639027aSMarc Zyngier { 377a639027aSMarc Zyngier m1_pmu_disable_counter_interrupt(event->hw.idx); 378a639027aSMarc Zyngier m1_pmu_disable_counter(event->hw.idx); 379a639027aSMarc Zyngier isb(); 380a639027aSMarc Zyngier } 381a639027aSMarc Zyngier 382a639027aSMarc Zyngier static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu) 383a639027aSMarc Zyngier { 384a639027aSMarc Zyngier struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); 385a639027aSMarc Zyngier struct pt_regs *regs; 386a639027aSMarc Zyngier u64 overflow, state; 387a639027aSMarc Zyngier int idx; 388a639027aSMarc Zyngier 389a639027aSMarc Zyngier overflow = read_sysreg_s(SYS_IMP_APL_PMSR_EL1); 390a639027aSMarc Zyngier if (!overflow) { 391a639027aSMarc Zyngier /* Spurious interrupt? */ 392a639027aSMarc Zyngier state = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1); 393a639027aSMarc Zyngier state &= ~PMCR0_IACT; 394a639027aSMarc Zyngier write_sysreg_s(state, SYS_IMP_APL_PMCR0_EL1); 395a639027aSMarc Zyngier isb(); 396a639027aSMarc Zyngier return IRQ_NONE; 397a639027aSMarc Zyngier } 398a639027aSMarc Zyngier 399a639027aSMarc Zyngier cpu_pmu->stop(cpu_pmu); 400a639027aSMarc Zyngier 401a639027aSMarc Zyngier regs = get_irq_regs(); 402a639027aSMarc Zyngier 403a639027aSMarc Zyngier for (idx = 0; idx < cpu_pmu->num_events; idx++) { 404a639027aSMarc Zyngier struct perf_event *event = cpuc->events[idx]; 405a639027aSMarc Zyngier struct perf_sample_data data; 406a639027aSMarc Zyngier 407a639027aSMarc Zyngier if (!event) 408a639027aSMarc Zyngier continue; 409a639027aSMarc Zyngier 410a639027aSMarc Zyngier armpmu_event_update(event); 411a639027aSMarc Zyngier perf_sample_data_init(&data, 0, event->hw.last_period); 412a639027aSMarc Zyngier if (!armpmu_event_set_period(event)) 413a639027aSMarc Zyngier continue; 414a639027aSMarc Zyngier 415a639027aSMarc Zyngier if (perf_event_overflow(event, &data, regs)) 416a639027aSMarc Zyngier m1_pmu_disable_event(event); 417a639027aSMarc Zyngier } 418a639027aSMarc Zyngier 419a639027aSMarc Zyngier cpu_pmu->start(cpu_pmu); 420a639027aSMarc Zyngier 421a639027aSMarc Zyngier return IRQ_HANDLED; 422a639027aSMarc Zyngier } 423a639027aSMarc Zyngier 424a639027aSMarc Zyngier static u64 m1_pmu_read_counter(struct perf_event *event) 425a639027aSMarc Zyngier { 426a639027aSMarc Zyngier return m1_pmu_read_hw_counter(event->hw.idx); 427a639027aSMarc Zyngier } 428a639027aSMarc Zyngier 429a639027aSMarc Zyngier static void m1_pmu_write_counter(struct perf_event *event, u64 value) 430a639027aSMarc Zyngier { 431a639027aSMarc Zyngier m1_pmu_write_hw_counter(value, event->hw.idx); 432a639027aSMarc Zyngier isb(); 433a639027aSMarc Zyngier } 434a639027aSMarc Zyngier 435a639027aSMarc Zyngier static int m1_pmu_get_event_idx(struct pmu_hw_events *cpuc, 436a639027aSMarc Zyngier struct perf_event *event) 437a639027aSMarc Zyngier { 438a639027aSMarc Zyngier unsigned long evtype = event->hw.config_base & M1_PMU_CFG_EVENT; 439a639027aSMarc Zyngier unsigned long affinity = m1_pmu_event_affinity[evtype]; 440a639027aSMarc Zyngier int idx; 441a639027aSMarc Zyngier 442a639027aSMarc Zyngier /* 443a639027aSMarc Zyngier * Place the event on the first free counter that can count 444a639027aSMarc Zyngier * this event. 445a639027aSMarc Zyngier * 446a639027aSMarc Zyngier * We could do a better job if we had a view of all the events 447a639027aSMarc Zyngier * counting on the PMU at any given time, and by placing the 448a639027aSMarc Zyngier * most constraining events first. 449a639027aSMarc Zyngier */ 450a639027aSMarc Zyngier for_each_set_bit(idx, &affinity, M1_PMU_NR_COUNTERS) { 451a639027aSMarc Zyngier if (!test_and_set_bit(idx, cpuc->used_mask)) 452a639027aSMarc Zyngier return idx; 453a639027aSMarc Zyngier } 454a639027aSMarc Zyngier 455a639027aSMarc Zyngier return -EAGAIN; 456a639027aSMarc Zyngier } 457a639027aSMarc Zyngier 458a639027aSMarc Zyngier static void m1_pmu_clear_event_idx(struct pmu_hw_events *cpuc, 459a639027aSMarc Zyngier struct perf_event *event) 460a639027aSMarc Zyngier { 461a639027aSMarc Zyngier clear_bit(event->hw.idx, cpuc->used_mask); 462a639027aSMarc Zyngier } 463a639027aSMarc Zyngier 464a639027aSMarc Zyngier static void __m1_pmu_set_mode(u8 mode) 465a639027aSMarc Zyngier { 466a639027aSMarc Zyngier u64 val; 467a639027aSMarc Zyngier 468a639027aSMarc Zyngier val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1); 469a639027aSMarc Zyngier val &= ~(PMCR0_IMODE | PMCR0_IACT); 470a639027aSMarc Zyngier val |= FIELD_PREP(PMCR0_IMODE, mode); 471a639027aSMarc Zyngier write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1); 472a639027aSMarc Zyngier isb(); 473a639027aSMarc Zyngier } 474a639027aSMarc Zyngier 475a639027aSMarc Zyngier static void m1_pmu_start(struct arm_pmu *cpu_pmu) 476a639027aSMarc Zyngier { 477a639027aSMarc Zyngier __m1_pmu_set_mode(PMCR0_IMODE_FIQ); 478a639027aSMarc Zyngier } 479a639027aSMarc Zyngier 480a639027aSMarc Zyngier static void m1_pmu_stop(struct arm_pmu *cpu_pmu) 481a639027aSMarc Zyngier { 482a639027aSMarc Zyngier __m1_pmu_set_mode(PMCR0_IMODE_OFF); 483a639027aSMarc Zyngier } 484a639027aSMarc Zyngier 485a639027aSMarc Zyngier static int m1_pmu_map_event(struct perf_event *event) 486a639027aSMarc Zyngier { 487a639027aSMarc Zyngier /* 488a639027aSMarc Zyngier * Although the counters are 48bit wide, bit 47 is what 489a639027aSMarc Zyngier * triggers the overflow interrupt. Advertise the counters 490a639027aSMarc Zyngier * being 47bit wide to mimick the behaviour of the ARM PMU. 491a639027aSMarc Zyngier */ 492a639027aSMarc Zyngier event->hw.flags |= ARMPMU_EVT_47BIT; 493a639027aSMarc Zyngier return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT); 494a639027aSMarc Zyngier } 495a639027aSMarc Zyngier 496a639027aSMarc Zyngier static void m1_pmu_reset(void *info) 497a639027aSMarc Zyngier { 498a639027aSMarc Zyngier int i; 499a639027aSMarc Zyngier 500a639027aSMarc Zyngier __m1_pmu_set_mode(PMCR0_IMODE_OFF); 501a639027aSMarc Zyngier 502a639027aSMarc Zyngier for (i = 0; i < M1_PMU_NR_COUNTERS; i++) { 503a639027aSMarc Zyngier m1_pmu_disable_counter(i); 504a639027aSMarc Zyngier m1_pmu_disable_counter_interrupt(i); 505a639027aSMarc Zyngier m1_pmu_write_hw_counter(0, i); 506a639027aSMarc Zyngier } 507a639027aSMarc Zyngier 508a639027aSMarc Zyngier isb(); 509a639027aSMarc Zyngier } 510a639027aSMarc Zyngier 511a639027aSMarc Zyngier static int m1_pmu_set_event_filter(struct hw_perf_event *event, 512a639027aSMarc Zyngier struct perf_event_attr *attr) 513a639027aSMarc Zyngier { 514a639027aSMarc Zyngier unsigned long config_base = 0; 515a639027aSMarc Zyngier 516a639027aSMarc Zyngier if (!attr->exclude_guest) 517a639027aSMarc Zyngier return -EINVAL; 518a639027aSMarc Zyngier if (!attr->exclude_kernel) 519a639027aSMarc Zyngier config_base |= M1_PMU_CFG_COUNT_KERNEL; 520a639027aSMarc Zyngier if (!attr->exclude_user) 521a639027aSMarc Zyngier config_base |= M1_PMU_CFG_COUNT_USER; 522a639027aSMarc Zyngier 523a639027aSMarc Zyngier event->config_base = config_base; 524a639027aSMarc Zyngier 525a639027aSMarc Zyngier return 0; 526a639027aSMarc Zyngier } 527a639027aSMarc Zyngier 528a639027aSMarc Zyngier static int m1_pmu_init(struct arm_pmu *cpu_pmu) 529a639027aSMarc Zyngier { 530a639027aSMarc Zyngier cpu_pmu->handle_irq = m1_pmu_handle_irq; 531a639027aSMarc Zyngier cpu_pmu->enable = m1_pmu_enable_event; 532a639027aSMarc Zyngier cpu_pmu->disable = m1_pmu_disable_event; 533a639027aSMarc Zyngier cpu_pmu->read_counter = m1_pmu_read_counter; 534a639027aSMarc Zyngier cpu_pmu->write_counter = m1_pmu_write_counter; 535a639027aSMarc Zyngier cpu_pmu->get_event_idx = m1_pmu_get_event_idx; 536a639027aSMarc Zyngier cpu_pmu->clear_event_idx = m1_pmu_clear_event_idx; 537a639027aSMarc Zyngier cpu_pmu->start = m1_pmu_start; 538a639027aSMarc Zyngier cpu_pmu->stop = m1_pmu_stop; 539a639027aSMarc Zyngier cpu_pmu->map_event = m1_pmu_map_event; 540a639027aSMarc Zyngier cpu_pmu->reset = m1_pmu_reset; 541a639027aSMarc Zyngier cpu_pmu->set_event_filter = m1_pmu_set_event_filter; 542a639027aSMarc Zyngier 543a639027aSMarc Zyngier cpu_pmu->num_events = M1_PMU_NR_COUNTERS; 544a639027aSMarc Zyngier cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group; 545a639027aSMarc Zyngier cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group; 546a639027aSMarc Zyngier return 0; 547a639027aSMarc Zyngier } 548a639027aSMarc Zyngier 549a639027aSMarc Zyngier /* Device driver gunk */ 550a639027aSMarc Zyngier static int m1_pmu_ice_init(struct arm_pmu *cpu_pmu) 551a639027aSMarc Zyngier { 552a639027aSMarc Zyngier cpu_pmu->name = "apple_icestorm_pmu"; 553a639027aSMarc Zyngier return m1_pmu_init(cpu_pmu); 554a639027aSMarc Zyngier } 555a639027aSMarc Zyngier 556a639027aSMarc Zyngier static int m1_pmu_fire_init(struct arm_pmu *cpu_pmu) 557a639027aSMarc Zyngier { 558a639027aSMarc Zyngier cpu_pmu->name = "apple_firestorm_pmu"; 559a639027aSMarc Zyngier return m1_pmu_init(cpu_pmu); 560a639027aSMarc Zyngier } 561a639027aSMarc Zyngier 562*7d0bfb7cSJanne Grunau static int m2_pmu_avalanche_init(struct arm_pmu *cpu_pmu) 563*7d0bfb7cSJanne Grunau { 564*7d0bfb7cSJanne Grunau cpu_pmu->name = "apple_avalanche_pmu"; 565*7d0bfb7cSJanne Grunau return m1_pmu_init(cpu_pmu); 566*7d0bfb7cSJanne Grunau } 567*7d0bfb7cSJanne Grunau 568*7d0bfb7cSJanne Grunau static int m2_pmu_blizzard_init(struct arm_pmu *cpu_pmu) 569*7d0bfb7cSJanne Grunau { 570*7d0bfb7cSJanne Grunau cpu_pmu->name = "apple_blizzard_pmu"; 571*7d0bfb7cSJanne Grunau return m1_pmu_init(cpu_pmu); 572*7d0bfb7cSJanne Grunau } 573*7d0bfb7cSJanne Grunau 574a639027aSMarc Zyngier static const struct of_device_id m1_pmu_of_device_ids[] = { 575*7d0bfb7cSJanne Grunau { .compatible = "apple,avalanche-pmu", .data = m2_pmu_avalanche_init, }, 576*7d0bfb7cSJanne Grunau { .compatible = "apple,blizzard-pmu", .data = m2_pmu_blizzard_init, }, 577a639027aSMarc Zyngier { .compatible = "apple,icestorm-pmu", .data = m1_pmu_ice_init, }, 578a639027aSMarc Zyngier { .compatible = "apple,firestorm-pmu", .data = m1_pmu_fire_init, }, 579a639027aSMarc Zyngier { }, 580a639027aSMarc Zyngier }; 581a639027aSMarc Zyngier MODULE_DEVICE_TABLE(of, m1_pmu_of_device_ids); 582a639027aSMarc Zyngier 583a639027aSMarc Zyngier static int m1_pmu_device_probe(struct platform_device *pdev) 584a639027aSMarc Zyngier { 585a639027aSMarc Zyngier return arm_pmu_device_probe(pdev, m1_pmu_of_device_ids, NULL); 586a639027aSMarc Zyngier } 587a639027aSMarc Zyngier 588a639027aSMarc Zyngier static struct platform_driver m1_pmu_driver = { 589a639027aSMarc Zyngier .driver = { 590a639027aSMarc Zyngier .name = "apple-m1-cpu-pmu", 591a639027aSMarc Zyngier .of_match_table = m1_pmu_of_device_ids, 592a639027aSMarc Zyngier .suppress_bind_attrs = true, 593a639027aSMarc Zyngier }, 594a639027aSMarc Zyngier .probe = m1_pmu_device_probe, 595a639027aSMarc Zyngier }; 596a639027aSMarc Zyngier 597a639027aSMarc Zyngier module_platform_driver(m1_pmu_driver); 598