xref: /linux/drivers/perf/starfive_starlink_pmu.c (revision da5b2ad1c2f18834cb1ce429e2e5a5cf5cbdf21b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * StarFive's StarLink PMU driver
4  *
5  * Copyright (C) 2023 StarFive Technology Co., Ltd.
6  *
7  * Author: Ji Sheng Teoh <jisheng.teoh@starfivetech.com>
8  *
9  */
10 
11 #define STARLINK_PMU_PDEV_NAME	"starfive_starlink_pmu"
12 #define pr_fmt(fmt)	STARLINK_PMU_PDEV_NAME ": " fmt
13 
14 #include <linux/bitmap.h>
15 #include <linux/cpu_pm.h>
16 #include <linux/io.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/mod_devicetable.h>
21 #include <linux/perf_event.h>
22 #include <linux/platform_device.h>
23 #include <linux/sysfs.h>
24 
25 #define STARLINK_PMU_MAX_COUNTERS			64
26 #define STARLINK_PMU_NUM_COUNTERS			16
27 #define STARLINK_PMU_IDX_CYCLE_COUNTER			63
28 
29 #define STARLINK_PMU_EVENT_SELECT			0x060
30 #define STARLINK_PMU_EVENT_COUNTER			0x160
31 #define STARLINK_PMU_COUNTER_MASK			GENMASK_ULL(63, 0)
32 #define STARLINK_PMU_CYCLE_COUNTER			0x058
33 
34 #define STARLINK_PMU_CONTROL				0x040
35 #define STARLINK_PMU_GLOBAL_ENABLE			BIT_ULL(0)
36 
37 #define STARLINK_PMU_INTERRUPT_ENABLE			0x050
38 #define STARLINK_PMU_COUNTER_OVERFLOW_STATUS		0x048
39 #define STARLINK_PMU_CYCLE_OVERFLOW_MASK		BIT_ULL(63)
40 
41 #define STARLINK_CYCLES				0x058
42 #define CACHE_READ_REQUEST			0x04000701
43 #define CACHE_WRITE_REQUEST			0x03000001
44 #define CACHE_RELEASE_REQUEST			0x0003e001
45 #define CACHE_READ_HIT				0x00901202
46 #define CACHE_READ_MISS				0x04008002
47 #define CACHE_WRITE_HIT				0x006c0002
48 #define CACHE_WRITE_MISS			0x03000002
49 #define CACHE_WRITEBACK				0x00000403
50 
51 #define to_starlink_pmu(p) (container_of(p, struct starlink_pmu, pmu))
52 
53 #define STARLINK_FORMAT_ATTR(_name, _config)				      \
54 	(&((struct dev_ext_attribute[]) {				      \
55 		{ .attr = __ATTR(_name, 0444, starlink_pmu_sysfs_format_show, NULL), \
56 		  .var = (void *)_config, }				      \
57 	})[0].attr.attr)
58 
59 #define STARLINK_EVENT_ATTR(_name, _id)					     \
60 	PMU_EVENT_ATTR_ID(_name, starlink_pmu_sysfs_event_show, _id)
61 
62 static int starlink_pmu_cpuhp_state;
63 
64 struct starlink_hw_events {
65 	struct perf_event	*events[STARLINK_PMU_MAX_COUNTERS];
66 	DECLARE_BITMAP(used_mask, STARLINK_PMU_MAX_COUNTERS);
67 };
68 
69 struct starlink_pmu {
70 	struct pmu					pmu;
71 	struct starlink_hw_events			__percpu *hw_events;
72 	struct hlist_node				node;
73 	struct notifier_block				starlink_pmu_pm_nb;
74 	void __iomem					*pmu_base;
75 	cpumask_t					cpumask;
76 	int						irq;
77 };
78 
79 static ssize_t
80 starlink_pmu_sysfs_format_show(struct device *dev,
81 			       struct device_attribute *attr,
82 			       char *buf)
83 {
84 	struct dev_ext_attribute *eattr = container_of(attr,
85 						       struct dev_ext_attribute, attr);
86 
87 	return sysfs_emit(buf, "%s\n", (char *)eattr->var);
88 }
89 
90 static struct attribute *starlink_pmu_format_attrs[] = {
91 	STARLINK_FORMAT_ATTR(event, "config:0-31"),
92 	NULL
93 };
94 
95 static const struct attribute_group starlink_pmu_format_attr_group = {
96 	.name = "format",
97 	.attrs = starlink_pmu_format_attrs,
98 };
99 
100 static ssize_t
101 starlink_pmu_sysfs_event_show(struct device *dev,
102 			      struct device_attribute *attr,
103 			      char *buf)
104 {
105 	struct perf_pmu_events_attr *eattr = container_of(attr,
106 							  struct perf_pmu_events_attr, attr);
107 
108 	return sysfs_emit(buf, "event=0x%02llx\n", eattr->id);
109 }
110 
111 static struct attribute *starlink_pmu_event_attrs[] = {
112 	STARLINK_EVENT_ATTR(cycles, STARLINK_CYCLES),
113 	STARLINK_EVENT_ATTR(read_request, CACHE_READ_REQUEST),
114 	STARLINK_EVENT_ATTR(write_request, CACHE_WRITE_REQUEST),
115 	STARLINK_EVENT_ATTR(release_request, CACHE_RELEASE_REQUEST),
116 	STARLINK_EVENT_ATTR(read_hit, CACHE_READ_HIT),
117 	STARLINK_EVENT_ATTR(read_miss, CACHE_READ_MISS),
118 	STARLINK_EVENT_ATTR(write_hit, CACHE_WRITE_HIT),
119 	STARLINK_EVENT_ATTR(write_miss, CACHE_WRITE_MISS),
120 	STARLINK_EVENT_ATTR(writeback, CACHE_WRITEBACK),
121 	NULL
122 };
123 
124 static const struct attribute_group starlink_pmu_events_attr_group = {
125 	.name = "events",
126 	.attrs = starlink_pmu_event_attrs,
127 };
128 
129 static ssize_t
130 cpumask_show(struct device *dev, struct device_attribute *attr, char *buf)
131 {
132 	struct starlink_pmu *starlink_pmu = to_starlink_pmu(dev_get_drvdata(dev));
133 
134 	return cpumap_print_to_pagebuf(true, buf, &starlink_pmu->cpumask);
135 }
136 
137 static DEVICE_ATTR_RO(cpumask);
138 
139 static struct attribute *starlink_pmu_cpumask_attrs[] = {
140 	&dev_attr_cpumask.attr,
141 	NULL
142 };
143 
144 static const struct attribute_group starlink_pmu_cpumask_attr_group = {
145 	.attrs = starlink_pmu_cpumask_attrs,
146 };
147 
148 static const struct attribute_group *starlink_pmu_attr_groups[] = {
149 	&starlink_pmu_format_attr_group,
150 	&starlink_pmu_events_attr_group,
151 	&starlink_pmu_cpumask_attr_group,
152 	NULL
153 };
154 
155 static void starlink_pmu_set_event_period(struct perf_event *event)
156 {
157 	struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
158 	struct hw_perf_event *hwc = &event->hw;
159 	int idx = event->hw.idx;
160 
161 	/*
162 	 * Program counter to half of it's max count to handle
163 	 * cases of extreme interrupt latency.
164 	 */
165 	u64 val = STARLINK_PMU_COUNTER_MASK >> 1;
166 
167 	local64_set(&hwc->prev_count, val);
168 	if (hwc->config == STARLINK_CYCLES)
169 		writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_CYCLE_COUNTER);
170 	else
171 		writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_EVENT_COUNTER +
172 		       idx * sizeof(u64));
173 }
174 
175 static void starlink_pmu_counter_start(struct perf_event *event,
176 				       struct starlink_pmu *starlink_pmu)
177 {
178 	struct hw_perf_event *hwc = &event->hw;
179 	int idx = event->hw.idx;
180 	u64 val;
181 
182 	/*
183 	 * Enable counter overflow interrupt[63:0],
184 	 * which is mapped as follow:
185 	 *
186 	 * event counter 0	- Bit [0]
187 	 * event counter 1	- Bit [1]
188 	 * ...
189 	 * cycle counter	- Bit [63]
190 	 */
191 	val = readq(starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
192 
193 	if (hwc->config == STARLINK_CYCLES) {
194 		/*
195 		 * Cycle count has its dedicated register, and it starts
196 		 * counting as soon as STARLINK_PMU_GLOBAL_ENABLE is set.
197 		 */
198 		val |= STARLINK_PMU_CYCLE_OVERFLOW_MASK;
199 	} else {
200 		writeq(event->hw.config, starlink_pmu->pmu_base +
201 		       STARLINK_PMU_EVENT_SELECT + idx * sizeof(u64));
202 
203 		val |= BIT_ULL(idx);
204 	}
205 
206 	writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
207 
208 	writeq(STARLINK_PMU_GLOBAL_ENABLE, starlink_pmu->pmu_base +
209 	       STARLINK_PMU_CONTROL);
210 }
211 
212 static void starlink_pmu_counter_stop(struct perf_event *event,
213 				      struct starlink_pmu *starlink_pmu)
214 {
215 	struct hw_perf_event *hwc = &event->hw;
216 	int idx = event->hw.idx;
217 	u64 val;
218 
219 	val = readq(starlink_pmu->pmu_base + STARLINK_PMU_CONTROL);
220 	val &= ~STARLINK_PMU_GLOBAL_ENABLE;
221 	writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_CONTROL);
222 
223 	val = readq(starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
224 	if (hwc->config == STARLINK_CYCLES)
225 		val &= ~STARLINK_PMU_CYCLE_OVERFLOW_MASK;
226 	else
227 		val &= ~BIT_ULL(idx);
228 
229 	writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
230 }
231 
232 static void starlink_pmu_update(struct perf_event *event)
233 {
234 	struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
235 	struct hw_perf_event *hwc = &event->hw;
236 	int idx = hwc->idx;
237 	u64 prev_raw_count, new_raw_count;
238 	u64 oldval;
239 	u64 delta;
240 
241 	do {
242 		prev_raw_count = local64_read(&hwc->prev_count);
243 		if (hwc->config == STARLINK_CYCLES)
244 			new_raw_count = readq(starlink_pmu->pmu_base +
245 					      STARLINK_PMU_CYCLE_COUNTER);
246 		else
247 			new_raw_count = readq(starlink_pmu->pmu_base +
248 					      STARLINK_PMU_EVENT_COUNTER +
249 					      idx * sizeof(u64));
250 		oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
251 					 new_raw_count);
252 	} while (oldval != prev_raw_count);
253 
254 	delta = (new_raw_count - prev_raw_count) & STARLINK_PMU_COUNTER_MASK;
255 	local64_add(delta, &event->count);
256 }
257 
258 static void starlink_pmu_start(struct perf_event *event, int flags)
259 {
260 	struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
261 	struct hw_perf_event *hwc = &event->hw;
262 
263 	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
264 		return;
265 
266 	if (flags & PERF_EF_RELOAD)
267 		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
268 
269 	hwc->state = 0;
270 
271 	starlink_pmu_set_event_period(event);
272 	starlink_pmu_counter_start(event, starlink_pmu);
273 
274 	perf_event_update_userpage(event);
275 }
276 
277 static void starlink_pmu_stop(struct perf_event *event, int flags)
278 {
279 	struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
280 	struct hw_perf_event *hwc = &event->hw;
281 
282 	if (hwc->state & PERF_HES_STOPPED)
283 		return;
284 
285 	starlink_pmu_counter_stop(event, starlink_pmu);
286 	starlink_pmu_update(event);
287 	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
288 }
289 
290 static int starlink_pmu_add(struct perf_event *event, int flags)
291 {
292 	struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
293 	struct starlink_hw_events *hw_events =
294 					this_cpu_ptr(starlink_pmu->hw_events);
295 	struct hw_perf_event *hwc = &event->hw;
296 	unsigned long *used_mask = hw_events->used_mask;
297 	u32 n_events = STARLINK_PMU_NUM_COUNTERS;
298 	int idx;
299 
300 	/*
301 	 * Cycle counter has dedicated register to hold counter value.
302 	 * Event other than cycle count has to be enabled through
303 	 * event select register, and assigned with independent counter
304 	 * as they appear.
305 	 */
306 
307 	if (hwc->config == STARLINK_CYCLES) {
308 		idx = STARLINK_PMU_IDX_CYCLE_COUNTER;
309 	} else {
310 		idx = find_first_zero_bit(used_mask, n_events);
311 		/* All counter are in use */
312 		if (idx < 0)
313 			return idx;
314 
315 		set_bit(idx, used_mask);
316 	}
317 
318 	hwc->idx = idx;
319 	hw_events->events[idx] = event;
320 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
321 
322 	if (flags & PERF_EF_START)
323 		starlink_pmu_start(event, PERF_EF_RELOAD);
324 
325 	perf_event_update_userpage(event);
326 
327 	return 0;
328 }
329 
330 static void starlink_pmu_del(struct perf_event *event, int flags)
331 {
332 	struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
333 	struct starlink_hw_events *hw_events =
334 					this_cpu_ptr(starlink_pmu->hw_events);
335 	struct hw_perf_event *hwc = &event->hw;
336 
337 	starlink_pmu_stop(event, PERF_EF_UPDATE);
338 	hw_events->events[hwc->idx] = NULL;
339 	clear_bit(hwc->idx, hw_events->used_mask);
340 
341 	perf_event_update_userpage(event);
342 }
343 
344 static bool starlink_pmu_validate_event_group(struct perf_event *event)
345 {
346 	struct perf_event *leader = event->group_leader;
347 	struct perf_event *sibling;
348 	int counter = 1;
349 
350 	/*
351 	 * Ensure hardware events in the group are on the same PMU,
352 	 * software events are acceptable.
353 	 */
354 	if (event->group_leader->pmu != event->pmu &&
355 	    !is_software_event(event->group_leader))
356 		return false;
357 
358 	for_each_sibling_event(sibling, leader) {
359 		if (sibling->pmu != event->pmu && !is_software_event(sibling))
360 			return false;
361 
362 		counter++;
363 	}
364 
365 	return counter <= STARLINK_PMU_NUM_COUNTERS;
366 }
367 
368 static int starlink_pmu_event_init(struct perf_event *event)
369 {
370 	struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
371 	struct hw_perf_event *hwc = &event->hw;
372 
373 	/*
374 	 * Sampling is not supported, as counters are shared
375 	 * by all CPU.
376 	 */
377 	if (hwc->sample_period)
378 		return -EOPNOTSUPP;
379 
380 	/*
381 	 * Per-task and attach to a task are not supported,
382 	 * as uncore events are not specific to any CPU.
383 	 */
384 	if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK)
385 		return -EOPNOTSUPP;
386 
387 	if (!starlink_pmu_validate_event_group(event))
388 		return -EINVAL;
389 
390 	hwc->idx = -1;
391 	hwc->config = event->attr.config;
392 	event->cpu = cpumask_first(&starlink_pmu->cpumask);
393 
394 	return 0;
395 }
396 
397 static irqreturn_t starlink_pmu_handle_irq(int irq_num, void *data)
398 {
399 	struct starlink_pmu *starlink_pmu = data;
400 	struct starlink_hw_events *hw_events =
401 			this_cpu_ptr(starlink_pmu->hw_events);
402 	bool handled = false;
403 	int idx;
404 	u64 overflow_status;
405 
406 	for (idx = 0; idx < STARLINK_PMU_MAX_COUNTERS; idx++) {
407 		struct perf_event *event = hw_events->events[idx];
408 
409 		if (!event)
410 			continue;
411 
412 		overflow_status = readq(starlink_pmu->pmu_base +
413 					STARLINK_PMU_COUNTER_OVERFLOW_STATUS);
414 		if (!(overflow_status & BIT_ULL(idx)))
415 			continue;
416 
417 		writeq(BIT_ULL(idx), starlink_pmu->pmu_base +
418 		       STARLINK_PMU_COUNTER_OVERFLOW_STATUS);
419 
420 		starlink_pmu_update(event);
421 		starlink_pmu_set_event_period(event);
422 		handled = true;
423 	}
424 	return IRQ_RETVAL(handled);
425 }
426 
427 static int starlink_setup_irqs(struct starlink_pmu *starlink_pmu,
428 			       struct platform_device *pdev)
429 {
430 	int ret, irq;
431 
432 	irq = platform_get_irq(pdev, 0);
433 	if (irq < 0)
434 		return -EINVAL;
435 
436 	ret = devm_request_irq(&pdev->dev, irq, starlink_pmu_handle_irq,
437 			       0, STARLINK_PMU_PDEV_NAME, starlink_pmu);
438 	if (ret)
439 		return dev_err_probe(&pdev->dev, ret, "Failed to request IRQ\n");
440 
441 	starlink_pmu->irq = irq;
442 
443 	return 0;
444 }
445 
446 static int starlink_pmu_pm_notify(struct notifier_block *b,
447 				  unsigned long cmd, void *v)
448 {
449 	struct starlink_pmu *starlink_pmu = container_of(b, struct starlink_pmu,
450 							 starlink_pmu_pm_nb);
451 	struct starlink_hw_events *hw_events =
452 					this_cpu_ptr(starlink_pmu->hw_events);
453 	int enabled = bitmap_weight(hw_events->used_mask,
454 				    STARLINK_PMU_MAX_COUNTERS);
455 	struct perf_event *event;
456 	int idx;
457 
458 	if (!enabled)
459 		return NOTIFY_OK;
460 
461 	for (idx = 0; idx < STARLINK_PMU_MAX_COUNTERS; idx++) {
462 		event = hw_events->events[idx];
463 		if (!event)
464 			continue;
465 
466 		switch (cmd) {
467 		case CPU_PM_ENTER:
468 			/* Stop and update the counter */
469 			starlink_pmu_stop(event, PERF_EF_UPDATE);
470 			break;
471 		case CPU_PM_EXIT:
472 		case CPU_PM_ENTER_FAILED:
473 			/* Restore and enable the counter */
474 			starlink_pmu_start(event, PERF_EF_RELOAD);
475 			break;
476 		default:
477 			break;
478 		}
479 	}
480 
481 	return NOTIFY_OK;
482 }
483 
484 static int starlink_pmu_pm_register(struct starlink_pmu *starlink_pmu)
485 {
486 	if (!IS_ENABLED(CONFIG_CPU_PM))
487 		return 0;
488 
489 	starlink_pmu->starlink_pmu_pm_nb.notifier_call = starlink_pmu_pm_notify;
490 	return cpu_pm_register_notifier(&starlink_pmu->starlink_pmu_pm_nb);
491 }
492 
493 static void starlink_pmu_pm_unregister(struct starlink_pmu *starlink_pmu)
494 {
495 	if (!IS_ENABLED(CONFIG_CPU_PM))
496 		return;
497 
498 	cpu_pm_unregister_notifier(&starlink_pmu->starlink_pmu_pm_nb);
499 }
500 
501 static void starlink_pmu_destroy(struct starlink_pmu *starlink_pmu)
502 {
503 	starlink_pmu_pm_unregister(starlink_pmu);
504 	cpuhp_state_remove_instance(starlink_pmu_cpuhp_state,
505 				    &starlink_pmu->node);
506 }
507 
508 static int starlink_pmu_probe(struct platform_device *pdev)
509 {
510 	struct starlink_pmu *starlink_pmu;
511 	struct starlink_hw_events *hw_events;
512 	struct resource *res;
513 	int cpuid, i, ret;
514 
515 	starlink_pmu = devm_kzalloc(&pdev->dev, sizeof(*starlink_pmu), GFP_KERNEL);
516 	if (!starlink_pmu)
517 		return -ENOMEM;
518 
519 	starlink_pmu->pmu_base =
520 			devm_platform_get_and_ioremap_resource(pdev, 0, &res);
521 	if (IS_ERR(starlink_pmu->pmu_base))
522 		return PTR_ERR(starlink_pmu->pmu_base);
523 
524 	starlink_pmu->hw_events = alloc_percpu_gfp(struct starlink_hw_events,
525 						   GFP_KERNEL);
526 	if (!starlink_pmu->hw_events) {
527 		dev_err(&pdev->dev, "Failed to allocate per-cpu PMU data\n");
528 		return -ENOMEM;
529 	}
530 
531 	for_each_possible_cpu(cpuid) {
532 		hw_events = per_cpu_ptr(starlink_pmu->hw_events, cpuid);
533 		for (i = 0; i < STARLINK_PMU_MAX_COUNTERS; i++)
534 			hw_events->events[i] = NULL;
535 	}
536 
537 	ret = starlink_setup_irqs(starlink_pmu, pdev);
538 	if (ret)
539 		return ret;
540 
541 	ret = cpuhp_state_add_instance(starlink_pmu_cpuhp_state,
542 				       &starlink_pmu->node);
543 	if (ret) {
544 		dev_err(&pdev->dev, "Failed to register hotplug\n");
545 		return ret;
546 	}
547 
548 	ret = starlink_pmu_pm_register(starlink_pmu);
549 	if (ret) {
550 		cpuhp_state_remove_instance(starlink_pmu_cpuhp_state,
551 					    &starlink_pmu->node);
552 		return ret;
553 	}
554 
555 	starlink_pmu->pmu = (struct pmu) {
556 		.task_ctx_nr	= perf_invalid_context,
557 		.event_init	= starlink_pmu_event_init,
558 		.add		= starlink_pmu_add,
559 		.del		= starlink_pmu_del,
560 		.start		= starlink_pmu_start,
561 		.stop		= starlink_pmu_stop,
562 		.read		= starlink_pmu_update,
563 		.attr_groups	= starlink_pmu_attr_groups,
564 	};
565 
566 	ret = perf_pmu_register(&starlink_pmu->pmu, STARLINK_PMU_PDEV_NAME, -1);
567 	if (ret)
568 		starlink_pmu_destroy(starlink_pmu);
569 
570 	return ret;
571 }
572 
573 static const struct of_device_id starlink_pmu_of_match[] = {
574 	{ .compatible = "starfive,jh8100-starlink-pmu" },
575 	{}
576 };
577 MODULE_DEVICE_TABLE(of, starlink_pmu_of_match);
578 
579 static struct platform_driver starlink_pmu_driver = {
580 	.driver = {
581 		.name	= STARLINK_PMU_PDEV_NAME,
582 		.of_match_table = starlink_pmu_of_match,
583 		.suppress_bind_attrs = true,
584 	},
585 	.probe = starlink_pmu_probe,
586 };
587 
588 static int
589 starlink_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
590 {
591 	struct starlink_pmu *starlink_pmu = hlist_entry_safe(node,
592 							     struct starlink_pmu,
593 							     node);
594 
595 	if (cpumask_empty(&starlink_pmu->cpumask))
596 		cpumask_set_cpu(cpu, &starlink_pmu->cpumask);
597 
598 	WARN_ON(irq_set_affinity(starlink_pmu->irq, cpumask_of(cpu)));
599 
600 	return 0;
601 }
602 
603 static int
604 starlink_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
605 {
606 	struct starlink_pmu *starlink_pmu = hlist_entry_safe(node,
607 							     struct starlink_pmu,
608 							     node);
609 	unsigned int target;
610 
611 	if (!cpumask_test_and_clear_cpu(cpu, &starlink_pmu->cpumask))
612 		return 0;
613 
614 	target = cpumask_any_but(cpu_online_mask, cpu);
615 	if (target >= nr_cpu_ids)
616 		return 0;
617 
618 	perf_pmu_migrate_context(&starlink_pmu->pmu, cpu, target);
619 
620 	cpumask_set_cpu(target, &starlink_pmu->cpumask);
621 	WARN_ON(irq_set_affinity(starlink_pmu->irq, cpumask_of(target)));
622 
623 	return 0;
624 }
625 
626 static int __init starlink_pmu_init(void)
627 {
628 	int ret;
629 
630 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
631 				      "soc/starfive/starlink_pmu:online",
632 				      starlink_pmu_online_cpu,
633 				      starlink_pmu_offline_cpu);
634 	if (ret < 0)
635 		return ret;
636 
637 	starlink_pmu_cpuhp_state = ret;
638 
639 	return platform_driver_register(&starlink_pmu_driver);
640 }
641 
642 device_initcall(starlink_pmu_init);
643