1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * HiSilicon SoC MN uncore Hardware event counters support
4 *
5 * Copyright (c) 2025 HiSilicon Technologies Co., Ltd.
6 */
7 #include <linux/cpuhotplug.h>
8 #include <linux/interrupt.h>
9 #include <linux/iopoll.h>
10 #include <linux/irq.h>
11 #include <linux/list.h>
12 #include <linux/mod_devicetable.h>
13 #include <linux/property.h>
14
15 #include "hisi_uncore_pmu.h"
16
17 /* Dynamic CPU hotplug state used by MN PMU */
18 static enum cpuhp_state hisi_mn_pmu_online;
19
20 /* MN register definition */
21 #define HISI_MN_DYNAMIC_CTRL_REG 0x400
22 #define HISI_MN_DYNAMIC_CTRL_EN BIT(0)
23 #define HISI_MN_PERF_CTRL_REG 0x408
24 #define HISI_MN_PERF_CTRL_EN BIT(6)
25 #define HISI_MN_INT_MASK_REG 0x800
26 #define HISI_MN_INT_STATUS_REG 0x808
27 #define HISI_MN_INT_CLEAR_REG 0x80C
28 #define HISI_MN_EVENT_CTRL_REG 0x1C00
29 #define HISI_MN_VERSION_REG 0x1C04
30 #define HISI_MN_EVTYPE0_REG 0x1d00
31 #define HISI_MN_EVTYPE_MASK GENMASK(7, 0)
32 #define HISI_MN_CNTR0_REG 0x1e00
33 #define HISI_MN_EVTYPE_REGn(evtype0, n) ((evtype0) + (n) * 4)
34 #define HISI_MN_CNTR_REGn(cntr0, n) ((cntr0) + (n) * 8)
35
36 #define HISI_MN_NR_COUNTERS 4
37 #define HISI_MN_TIMEOUT_US 500U
38
39 struct hisi_mn_pmu_regs {
40 u32 version;
41 u32 dyn_ctrl;
42 u32 perf_ctrl;
43 u32 int_mask;
44 u32 int_clear;
45 u32 int_status;
46 u32 event_ctrl;
47 u32 event_type0;
48 u32 event_cntr0;
49 };
50
51 /*
52 * Each event request takes a certain amount of time to complete. If
53 * we counting the latency related event, we need to wait for the all
54 * requests complete. Otherwise, the value of counter is slightly larger.
55 */
hisi_mn_pmu_counter_flush(struct hisi_pmu * mn_pmu)56 static void hisi_mn_pmu_counter_flush(struct hisi_pmu *mn_pmu)
57 {
58 struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
59 int ret;
60 u32 val;
61
62 val = readl(mn_pmu->base + reg_info->dyn_ctrl);
63 val |= HISI_MN_DYNAMIC_CTRL_EN;
64 writel(val, mn_pmu->base + reg_info->dyn_ctrl);
65
66 ret = readl_poll_timeout_atomic(mn_pmu->base + reg_info->dyn_ctrl,
67 val, !(val & HISI_MN_DYNAMIC_CTRL_EN),
68 1, HISI_MN_TIMEOUT_US);
69 if (ret)
70 dev_warn(mn_pmu->dev, "Counter flush timeout\n");
71 }
72
hisi_mn_pmu_read_counter(struct hisi_pmu * mn_pmu,struct hw_perf_event * hwc)73 static u64 hisi_mn_pmu_read_counter(struct hisi_pmu *mn_pmu,
74 struct hw_perf_event *hwc)
75 {
76 struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
77
78 return readq(mn_pmu->base + HISI_MN_CNTR_REGn(reg_info->event_cntr0, hwc->idx));
79 }
80
hisi_mn_pmu_write_counter(struct hisi_pmu * mn_pmu,struct hw_perf_event * hwc,u64 val)81 static void hisi_mn_pmu_write_counter(struct hisi_pmu *mn_pmu,
82 struct hw_perf_event *hwc, u64 val)
83 {
84 struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
85
86 writeq(val, mn_pmu->base + HISI_MN_CNTR_REGn(reg_info->event_cntr0, hwc->idx));
87 }
88
hisi_mn_pmu_write_evtype(struct hisi_pmu * mn_pmu,int idx,u32 type)89 static void hisi_mn_pmu_write_evtype(struct hisi_pmu *mn_pmu, int idx, u32 type)
90 {
91 struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
92 u32 val;
93
94 /*
95 * Select the appropriate event select register.
96 * There are 2 32-bit event select registers for the
97 * 8 hardware counters, each event code is 8-bit wide.
98 */
99 val = readl(mn_pmu->base + HISI_MN_EVTYPE_REGn(reg_info->event_type0, idx / 4));
100 val &= ~(HISI_MN_EVTYPE_MASK << HISI_PMU_EVTYPE_SHIFT(idx));
101 val |= (type << HISI_PMU_EVTYPE_SHIFT(idx));
102 writel(val, mn_pmu->base + HISI_MN_EVTYPE_REGn(reg_info->event_type0, idx / 4));
103 }
104
hisi_mn_pmu_start_counters(struct hisi_pmu * mn_pmu)105 static void hisi_mn_pmu_start_counters(struct hisi_pmu *mn_pmu)
106 {
107 struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
108 u32 val;
109
110 val = readl(mn_pmu->base + reg_info->perf_ctrl);
111 val |= HISI_MN_PERF_CTRL_EN;
112 writel(val, mn_pmu->base + reg_info->perf_ctrl);
113 }
114
hisi_mn_pmu_stop_counters(struct hisi_pmu * mn_pmu)115 static void hisi_mn_pmu_stop_counters(struct hisi_pmu *mn_pmu)
116 {
117 struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
118 u32 val;
119
120 val = readl(mn_pmu->base + reg_info->perf_ctrl);
121 val &= ~HISI_MN_PERF_CTRL_EN;
122 writel(val, mn_pmu->base + reg_info->perf_ctrl);
123
124 hisi_mn_pmu_counter_flush(mn_pmu);
125 }
126
hisi_mn_pmu_enable_counter(struct hisi_pmu * mn_pmu,struct hw_perf_event * hwc)127 static void hisi_mn_pmu_enable_counter(struct hisi_pmu *mn_pmu,
128 struct hw_perf_event *hwc)
129 {
130 struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
131 u32 val;
132
133 val = readl(mn_pmu->base + reg_info->event_ctrl);
134 val |= BIT(hwc->idx);
135 writel(val, mn_pmu->base + reg_info->event_ctrl);
136 }
137
hisi_mn_pmu_disable_counter(struct hisi_pmu * mn_pmu,struct hw_perf_event * hwc)138 static void hisi_mn_pmu_disable_counter(struct hisi_pmu *mn_pmu,
139 struct hw_perf_event *hwc)
140 {
141 struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
142 u32 val;
143
144 val = readl(mn_pmu->base + reg_info->event_ctrl);
145 val &= ~BIT(hwc->idx);
146 writel(val, mn_pmu->base + reg_info->event_ctrl);
147 }
148
hisi_mn_pmu_enable_counter_int(struct hisi_pmu * mn_pmu,struct hw_perf_event * hwc)149 static void hisi_mn_pmu_enable_counter_int(struct hisi_pmu *mn_pmu,
150 struct hw_perf_event *hwc)
151 {
152 struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
153 u32 val;
154
155 val = readl(mn_pmu->base + reg_info->int_mask);
156 val &= ~BIT(hwc->idx);
157 writel(val, mn_pmu->base + reg_info->int_mask);
158 }
159
hisi_mn_pmu_disable_counter_int(struct hisi_pmu * mn_pmu,struct hw_perf_event * hwc)160 static void hisi_mn_pmu_disable_counter_int(struct hisi_pmu *mn_pmu,
161 struct hw_perf_event *hwc)
162 {
163 struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
164 u32 val;
165
166 val = readl(mn_pmu->base + reg_info->int_mask);
167 val |= BIT(hwc->idx);
168 writel(val, mn_pmu->base + reg_info->int_mask);
169 }
170
hisi_mn_pmu_get_int_status(struct hisi_pmu * mn_pmu)171 static u32 hisi_mn_pmu_get_int_status(struct hisi_pmu *mn_pmu)
172 {
173 struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
174
175 return readl(mn_pmu->base + reg_info->int_status);
176 }
177
hisi_mn_pmu_clear_int_status(struct hisi_pmu * mn_pmu,int idx)178 static void hisi_mn_pmu_clear_int_status(struct hisi_pmu *mn_pmu, int idx)
179 {
180 struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
181
182 writel(BIT(idx), mn_pmu->base + reg_info->int_clear);
183 }
184
185 static struct attribute *hisi_mn_pmu_format_attr[] = {
186 HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
187 NULL
188 };
189
190 static const struct attribute_group hisi_mn_pmu_format_group = {
191 .name = "format",
192 .attrs = hisi_mn_pmu_format_attr,
193 };
194
195 static struct attribute *hisi_mn_pmu_events_attr[] = {
196 HISI_PMU_EVENT_ATTR(req_eobarrier_num, 0x00),
197 HISI_PMU_EVENT_ATTR(req_ecbarrier_num, 0x01),
198 HISI_PMU_EVENT_ATTR(req_dvmop_num, 0x02),
199 HISI_PMU_EVENT_ATTR(req_dvmsync_num, 0x03),
200 HISI_PMU_EVENT_ATTR(req_retry_num, 0x04),
201 HISI_PMU_EVENT_ATTR(req_writenosnp_num, 0x05),
202 HISI_PMU_EVENT_ATTR(req_readnosnp_num, 0x06),
203 HISI_PMU_EVENT_ATTR(snp_dvm_num, 0x07),
204 HISI_PMU_EVENT_ATTR(snp_dvmsync_num, 0x08),
205 HISI_PMU_EVENT_ATTR(l3t_req_dvm_num, 0x09),
206 HISI_PMU_EVENT_ATTR(l3t_req_dvmsync_num, 0x0A),
207 HISI_PMU_EVENT_ATTR(mn_req_dvm_num, 0x0B),
208 HISI_PMU_EVENT_ATTR(mn_req_dvmsync_num, 0x0C),
209 HISI_PMU_EVENT_ATTR(pa_req_dvm_num, 0x0D),
210 HISI_PMU_EVENT_ATTR(pa_req_dvmsync_num, 0x0E),
211 HISI_PMU_EVENT_ATTR(snp_dvm_latency, 0x80),
212 HISI_PMU_EVENT_ATTR(snp_dvmsync_latency, 0x81),
213 HISI_PMU_EVENT_ATTR(l3t_req_dvm_latency, 0x82),
214 HISI_PMU_EVENT_ATTR(l3t_req_dvmsync_latency, 0x83),
215 HISI_PMU_EVENT_ATTR(mn_req_dvm_latency, 0x84),
216 HISI_PMU_EVENT_ATTR(mn_req_dvmsync_latency, 0x85),
217 HISI_PMU_EVENT_ATTR(pa_req_dvm_latency, 0x86),
218 HISI_PMU_EVENT_ATTR(pa_req_dvmsync_latency, 0x87),
219 NULL
220 };
221
222 static const struct attribute_group hisi_mn_pmu_events_group = {
223 .name = "events",
224 .attrs = hisi_mn_pmu_events_attr,
225 };
226
227 static const struct attribute_group *hisi_mn_pmu_attr_groups[] = {
228 &hisi_mn_pmu_format_group,
229 &hisi_mn_pmu_events_group,
230 &hisi_pmu_cpumask_attr_group,
231 &hisi_pmu_identifier_group,
232 NULL
233 };
234
235 static const struct hisi_uncore_ops hisi_uncore_mn_ops = {
236 .write_evtype = hisi_mn_pmu_write_evtype,
237 .get_event_idx = hisi_uncore_pmu_get_event_idx,
238 .start_counters = hisi_mn_pmu_start_counters,
239 .stop_counters = hisi_mn_pmu_stop_counters,
240 .enable_counter = hisi_mn_pmu_enable_counter,
241 .disable_counter = hisi_mn_pmu_disable_counter,
242 .enable_counter_int = hisi_mn_pmu_enable_counter_int,
243 .disable_counter_int = hisi_mn_pmu_disable_counter_int,
244 .write_counter = hisi_mn_pmu_write_counter,
245 .read_counter = hisi_mn_pmu_read_counter,
246 .get_int_status = hisi_mn_pmu_get_int_status,
247 .clear_int_status = hisi_mn_pmu_clear_int_status,
248 };
249
hisi_mn_pmu_dev_init(struct platform_device * pdev,struct hisi_pmu * mn_pmu)250 static int hisi_mn_pmu_dev_init(struct platform_device *pdev,
251 struct hisi_pmu *mn_pmu)
252 {
253 struct hisi_mn_pmu_regs *reg_info;
254 int ret;
255
256 hisi_uncore_pmu_init_topology(mn_pmu, &pdev->dev);
257
258 if (mn_pmu->topo.scl_id < 0)
259 return dev_err_probe(&pdev->dev, -EINVAL,
260 "Failed to read MN scl id\n");
261
262 if (mn_pmu->topo.index_id < 0)
263 return dev_err_probe(&pdev->dev, -EINVAL,
264 "Failed to read MN index id\n");
265
266 mn_pmu->base = devm_platform_ioremap_resource(pdev, 0);
267 if (IS_ERR(mn_pmu->base))
268 return dev_err_probe(&pdev->dev, PTR_ERR(mn_pmu->base),
269 "Failed to ioremap resource\n");
270
271 ret = hisi_uncore_pmu_init_irq(mn_pmu, pdev);
272 if (ret)
273 return ret;
274
275 mn_pmu->dev_info = device_get_match_data(&pdev->dev);
276 if (!mn_pmu->dev_info)
277 return -ENODEV;
278
279 mn_pmu->pmu_events.attr_groups = mn_pmu->dev_info->attr_groups;
280 mn_pmu->counter_bits = mn_pmu->dev_info->counter_bits;
281 mn_pmu->check_event = mn_pmu->dev_info->check_event;
282 mn_pmu->num_counters = HISI_MN_NR_COUNTERS;
283 mn_pmu->ops = &hisi_uncore_mn_ops;
284 mn_pmu->dev = &pdev->dev;
285 mn_pmu->on_cpu = -1;
286
287 reg_info = mn_pmu->dev_info->private;
288 mn_pmu->identifier = readl(mn_pmu->base + reg_info->version);
289
290 return 0;
291 }
292
hisi_mn_pmu_remove_cpuhp(void * hotplug_node)293 static void hisi_mn_pmu_remove_cpuhp(void *hotplug_node)
294 {
295 cpuhp_state_remove_instance_nocalls(hisi_mn_pmu_online, hotplug_node);
296 }
297
hisi_mn_pmu_unregister(void * pmu)298 static void hisi_mn_pmu_unregister(void *pmu)
299 {
300 perf_pmu_unregister(pmu);
301 }
302
hisi_mn_pmu_probe(struct platform_device * pdev)303 static int hisi_mn_pmu_probe(struct platform_device *pdev)
304 {
305 struct hisi_pmu *mn_pmu;
306 char *name;
307 int ret;
308
309 mn_pmu = devm_kzalloc(&pdev->dev, sizeof(*mn_pmu), GFP_KERNEL);
310 if (!mn_pmu)
311 return -ENOMEM;
312
313 platform_set_drvdata(pdev, mn_pmu);
314
315 ret = hisi_mn_pmu_dev_init(pdev, mn_pmu);
316 if (ret)
317 return ret;
318
319 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_scl%d_mn%d",
320 mn_pmu->topo.scl_id, mn_pmu->topo.index_id);
321 if (!name)
322 return -ENOMEM;
323
324 ret = cpuhp_state_add_instance(hisi_mn_pmu_online, &mn_pmu->node);
325 if (ret)
326 return dev_err_probe(&pdev->dev, ret, "Failed to register cpu hotplug\n");
327
328 ret = devm_add_action_or_reset(&pdev->dev, hisi_mn_pmu_remove_cpuhp, &mn_pmu->node);
329 if (ret)
330 return ret;
331
332 hisi_pmu_init(mn_pmu, THIS_MODULE);
333
334 ret = perf_pmu_register(&mn_pmu->pmu, name, -1);
335 if (ret)
336 return dev_err_probe(mn_pmu->dev, ret, "Failed to register MN PMU\n");
337
338 return devm_add_action_or_reset(&pdev->dev, hisi_mn_pmu_unregister, &mn_pmu->pmu);
339 }
340
341 static struct hisi_mn_pmu_regs hisi_mn_v1_pmu_regs = {
342 .version = HISI_MN_VERSION_REG,
343 .dyn_ctrl = HISI_MN_DYNAMIC_CTRL_REG,
344 .perf_ctrl = HISI_MN_PERF_CTRL_REG,
345 .int_mask = HISI_MN_INT_MASK_REG,
346 .int_clear = HISI_MN_INT_CLEAR_REG,
347 .int_status = HISI_MN_INT_STATUS_REG,
348 .event_ctrl = HISI_MN_EVENT_CTRL_REG,
349 .event_type0 = HISI_MN_EVTYPE0_REG,
350 .event_cntr0 = HISI_MN_CNTR0_REG,
351 };
352
353 static const struct hisi_pmu_dev_info hisi_mn_v1 = {
354 .attr_groups = hisi_mn_pmu_attr_groups,
355 .counter_bits = 48,
356 .check_event = HISI_MN_EVTYPE_MASK,
357 .private = &hisi_mn_v1_pmu_regs,
358 };
359
360 static const struct acpi_device_id hisi_mn_pmu_acpi_match[] = {
361 { "HISI0222", (kernel_ulong_t) &hisi_mn_v1 },
362 { }
363 };
364 MODULE_DEVICE_TABLE(acpi, hisi_mn_pmu_acpi_match);
365
366 static struct platform_driver hisi_mn_pmu_driver = {
367 .driver = {
368 .name = "hisi_mn_pmu",
369 .acpi_match_table = hisi_mn_pmu_acpi_match,
370 /*
371 * We have not worked out a safe bind/unbind process,
372 * Forcefully unbinding during sampling will lead to a
373 * kernel panic, so this is not supported yet.
374 */
375 .suppress_bind_attrs = true,
376 },
377 .probe = hisi_mn_pmu_probe,
378 };
379
hisi_mn_pmu_module_init(void)380 static int __init hisi_mn_pmu_module_init(void)
381 {
382 int ret;
383
384 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/hisi/mn:online",
385 hisi_uncore_pmu_online_cpu,
386 hisi_uncore_pmu_offline_cpu);
387 if (ret < 0) {
388 pr_err("hisi_mn_pmu: Failed to setup MN PMU hotplug: %d\n", ret);
389 return ret;
390 }
391 hisi_mn_pmu_online = ret;
392
393 ret = platform_driver_register(&hisi_mn_pmu_driver);
394 if (ret)
395 cpuhp_remove_multi_state(hisi_mn_pmu_online);
396
397 return ret;
398 }
399 module_init(hisi_mn_pmu_module_init);
400
hisi_mn_pmu_module_exit(void)401 static void __exit hisi_mn_pmu_module_exit(void)
402 {
403 platform_driver_unregister(&hisi_mn_pmu_driver);
404 cpuhp_remove_multi_state(hisi_mn_pmu_online);
405 }
406 module_exit(hisi_mn_pmu_module_exit);
407
408 MODULE_IMPORT_NS("HISI_PMU");
409 MODULE_DESCRIPTION("HiSilicon SoC MN uncore PMU driver");
410 MODULE_LICENSE("GPL");
411 MODULE_AUTHOR("Junhao He <hejunhao3@huawei.com>");
412