xref: /linux/drivers/perf/fujitsu_uncore_pmu.c (revision feafee284579d29537a5a56ba8f23894f0463f3d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Driver for the Uncore PMUs in Fujitsu chips.
4  *
5  * See Documentation/admin-guide/perf/fujitsu_uncore_pmu.rst for more details.
6  *
7  * Copyright (c) 2025 Fujitsu. All rights reserved.
8  */
9 
10 #include <linux/acpi.h>
11 #include <linux/bitfield.h>
12 #include <linux/bitops.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/list.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/module.h>
18 #include <linux/perf_event.h>
19 #include <linux/platform_device.h>
20 
21 /* Number of counters on each PMU */
22 #define MAC_NUM_COUNTERS  8
23 #define PCI_NUM_COUNTERS  8
24 /* Mask for the event type field within perf_event_attr.config and EVTYPE reg */
25 #define UNCORE_EVTYPE_MASK   0xFF
26 
27 /* Perfmon registers */
28 #define PM_EVCNTR(__cntr)           (0x000 + (__cntr) * 8)
29 #define PM_CNTCTL(__cntr)           (0x100 + (__cntr) * 8)
30 #define PM_CNTCTL_RESET             0
31 #define PM_EVTYPE(__cntr)           (0x200 + (__cntr) * 8)
32 #define PM_EVTYPE_EVSEL(__val)      FIELD_GET(UNCORE_EVTYPE_MASK, __val)
33 #define PM_CR                       0x400
34 #define PM_CR_RESET                 BIT(1)
35 #define PM_CR_ENABLE                BIT(0)
36 #define PM_CNTENSET                 0x410
37 #define PM_CNTENSET_IDX(__cntr)     BIT(__cntr)
38 #define PM_CNTENCLR                 0x418
39 #define PM_CNTENCLR_IDX(__cntr)     BIT(__cntr)
40 #define PM_CNTENCLR_RESET           0xFF
41 #define PM_INTENSET                 0x420
42 #define PM_INTENSET_IDX(__cntr)     BIT(__cntr)
43 #define PM_INTENCLR                 0x428
44 #define PM_INTENCLR_IDX(__cntr)     BIT(__cntr)
45 #define PM_INTENCLR_RESET           0xFF
46 #define PM_OVSR                     0x440
47 #define PM_OVSR_OVSRCLR_RESET       0xFF
48 
49 enum fujitsu_uncore_pmu {
50 	FUJITSU_UNCORE_PMU_MAC = 1,
51 	FUJITSU_UNCORE_PMU_PCI = 2,
52 };
53 
54 struct uncore_pmu {
55 	int			num_counters;
56 	struct pmu		pmu;
57 	struct hlist_node	node;
58 	void __iomem		*regs;
59 	struct perf_event	**events;
60 	unsigned long		*used_mask;
61 	int			cpu;
62 	int			irq;
63 	struct device		*dev;
64 };
65 
66 #define to_uncore_pmu(p) (container_of(p, struct uncore_pmu, pmu))
67 
68 static int uncore_pmu_cpuhp_state;
69 
fujitsu_uncore_counter_start(struct perf_event * event)70 static void fujitsu_uncore_counter_start(struct perf_event *event)
71 {
72 	struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
73 	int idx = event->hw.idx;
74 
75 	/* Initialize the hardware counter and reset prev_count*/
76 	local64_set(&event->hw.prev_count, 0);
77 	writeq_relaxed(0, uncorepmu->regs + PM_EVCNTR(idx));
78 
79 	/* Set the event type */
80 	writeq_relaxed(PM_EVTYPE_EVSEL(event->attr.config), uncorepmu->regs + PM_EVTYPE(idx));
81 
82 	/* Enable interrupt generation by this counter */
83 	writeq_relaxed(PM_INTENSET_IDX(idx), uncorepmu->regs + PM_INTENSET);
84 
85 	/* Finally, enable the counter */
86 	writeq_relaxed(PM_CNTCTL_RESET, uncorepmu->regs + PM_CNTCTL(idx));
87 	writeq_relaxed(PM_CNTENSET_IDX(idx), uncorepmu->regs + PM_CNTENSET);
88 }
89 
fujitsu_uncore_counter_stop(struct perf_event * event)90 static void fujitsu_uncore_counter_stop(struct perf_event *event)
91 {
92 	struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
93 	int idx = event->hw.idx;
94 
95 	/* Disable the counter */
96 	writeq_relaxed(PM_CNTENCLR_IDX(idx), uncorepmu->regs + PM_CNTENCLR);
97 
98 	/* Disable interrupt generation by this counter */
99 	writeq_relaxed(PM_INTENCLR_IDX(idx), uncorepmu->regs + PM_INTENCLR);
100 }
101 
fujitsu_uncore_counter_update(struct perf_event * event)102 static void fujitsu_uncore_counter_update(struct perf_event *event)
103 {
104 	struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
105 	int idx = event->hw.idx;
106 	u64 prev, new;
107 
108 	do {
109 		prev = local64_read(&event->hw.prev_count);
110 		new = readq_relaxed(uncorepmu->regs + PM_EVCNTR(idx));
111 	} while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
112 
113 	local64_add(new - prev, &event->count);
114 }
115 
fujitsu_uncore_init(struct uncore_pmu * uncorepmu)116 static inline void fujitsu_uncore_init(struct uncore_pmu *uncorepmu)
117 {
118 	int i;
119 
120 	writeq_relaxed(PM_CR_RESET, uncorepmu->regs + PM_CR);
121 
122 	writeq_relaxed(PM_CNTENCLR_RESET, uncorepmu->regs + PM_CNTENCLR);
123 	writeq_relaxed(PM_INTENCLR_RESET, uncorepmu->regs + PM_INTENCLR);
124 	writeq_relaxed(PM_OVSR_OVSRCLR_RESET, uncorepmu->regs + PM_OVSR);
125 
126 	for (i = 0; i < uncorepmu->num_counters; ++i) {
127 		writeq_relaxed(PM_CNTCTL_RESET, uncorepmu->regs + PM_CNTCTL(i));
128 		writeq_relaxed(PM_EVTYPE_EVSEL(0), uncorepmu->regs + PM_EVTYPE(i));
129 	}
130 	writeq_relaxed(PM_CR_ENABLE, uncorepmu->regs + PM_CR);
131 }
132 
fujitsu_uncore_handle_irq(int irq_num,void * data)133 static irqreturn_t fujitsu_uncore_handle_irq(int irq_num, void *data)
134 {
135 	struct uncore_pmu *uncorepmu = data;
136 	/* Read the overflow status register */
137 	long status = readq_relaxed(uncorepmu->regs + PM_OVSR);
138 	int idx;
139 
140 	if (status == 0)
141 		return IRQ_NONE;
142 
143 	/* Clear the bits we read on the overflow status register */
144 	writeq_relaxed(status, uncorepmu->regs + PM_OVSR);
145 
146 	for_each_set_bit(idx, &status, uncorepmu->num_counters) {
147 		struct perf_event *event;
148 
149 		event = uncorepmu->events[idx];
150 		if (!event)
151 			continue;
152 
153 		fujitsu_uncore_counter_update(event);
154 	}
155 
156 	return IRQ_HANDLED;
157 }
158 
fujitsu_uncore_pmu_enable(struct pmu * pmu)159 static void fujitsu_uncore_pmu_enable(struct pmu *pmu)
160 {
161 	writeq_relaxed(PM_CR_ENABLE, to_uncore_pmu(pmu)->regs + PM_CR);
162 }
163 
fujitsu_uncore_pmu_disable(struct pmu * pmu)164 static void fujitsu_uncore_pmu_disable(struct pmu *pmu)
165 {
166 	writeq_relaxed(0, to_uncore_pmu(pmu)->regs + PM_CR);
167 }
168 
fujitsu_uncore_validate_event_group(struct perf_event * event)169 static bool fujitsu_uncore_validate_event_group(struct perf_event *event)
170 {
171 	struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
172 	struct perf_event *leader = event->group_leader;
173 	struct perf_event *sibling;
174 	int counters = 1;
175 
176 	if (leader == event)
177 		return true;
178 
179 	if (leader->pmu == event->pmu)
180 		counters++;
181 
182 	for_each_sibling_event(sibling, leader) {
183 		if (sibling->pmu == event->pmu)
184 			counters++;
185 	}
186 
187 	/*
188 	 * If the group requires more counters than the HW has, it
189 	 * cannot ever be scheduled.
190 	 */
191 	return counters <= uncorepmu->num_counters;
192 }
193 
fujitsu_uncore_event_init(struct perf_event * event)194 static int fujitsu_uncore_event_init(struct perf_event *event)
195 {
196 	struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
197 	struct hw_perf_event *hwc = &event->hw;
198 
199 	/* Is the event for this PMU? */
200 	if (event->attr.type != event->pmu->type)
201 		return -ENOENT;
202 
203 	/*
204 	 * Sampling not supported since these events are not
205 	 * core-attributable.
206 	 */
207 	if (is_sampling_event(event))
208 		return -EINVAL;
209 
210 	/*
211 	 * Task mode not available, we run the counters as socket counters,
212 	 * not attributable to any CPU and therefore cannot attribute per-task.
213 	 */
214 	if (event->cpu < 0)
215 		return -EINVAL;
216 
217 	/* Validate the group */
218 	if (!fujitsu_uncore_validate_event_group(event))
219 		return -EINVAL;
220 
221 	hwc->idx = -1;
222 
223 	event->cpu = uncorepmu->cpu;
224 
225 	return 0;
226 }
227 
fujitsu_uncore_event_start(struct perf_event * event,int flags)228 static void fujitsu_uncore_event_start(struct perf_event *event, int flags)
229 {
230 	struct hw_perf_event *hwc = &event->hw;
231 
232 	hwc->state = 0;
233 	fujitsu_uncore_counter_start(event);
234 }
235 
fujitsu_uncore_event_stop(struct perf_event * event,int flags)236 static void fujitsu_uncore_event_stop(struct perf_event *event, int flags)
237 {
238 	struct hw_perf_event *hwc = &event->hw;
239 
240 	if (hwc->state & PERF_HES_STOPPED)
241 		return;
242 
243 	fujitsu_uncore_counter_stop(event);
244 	if (flags & PERF_EF_UPDATE)
245 		fujitsu_uncore_counter_update(event);
246 	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
247 }
248 
fujitsu_uncore_event_add(struct perf_event * event,int flags)249 static int fujitsu_uncore_event_add(struct perf_event *event, int flags)
250 {
251 	struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
252 	struct hw_perf_event *hwc = &event->hw;
253 	int idx;
254 
255 	/* Try to allocate a counter. */
256 	idx = bitmap_find_free_region(uncorepmu->used_mask, uncorepmu->num_counters, 0);
257 	if (idx < 0)
258 		/* The counters are all in use. */
259 		return -EAGAIN;
260 
261 	hwc->idx = idx;
262 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
263 	uncorepmu->events[idx] = event;
264 
265 	if (flags & PERF_EF_START)
266 		fujitsu_uncore_event_start(event, 0);
267 
268 	/* Propagate changes to the userspace mapping. */
269 	perf_event_update_userpage(event);
270 
271 	return 0;
272 }
273 
fujitsu_uncore_event_del(struct perf_event * event,int flags)274 static void fujitsu_uncore_event_del(struct perf_event *event, int flags)
275 {
276 	struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
277 	struct hw_perf_event *hwc = &event->hw;
278 
279 	/* Stop and clean up */
280 	fujitsu_uncore_event_stop(event, flags | PERF_EF_UPDATE);
281 	uncorepmu->events[hwc->idx] = NULL;
282 	bitmap_release_region(uncorepmu->used_mask, hwc->idx, 0);
283 
284 	/* Propagate changes to the userspace mapping. */
285 	perf_event_update_userpage(event);
286 }
287 
fujitsu_uncore_event_read(struct perf_event * event)288 static void fujitsu_uncore_event_read(struct perf_event *event)
289 {
290 	fujitsu_uncore_counter_update(event);
291 }
292 
293 #define UNCORE_PMU_FORMAT_ATTR(_name, _config)				      \
294 	(&((struct dev_ext_attribute[]) {				      \
295 		{ .attr = __ATTR(_name, 0444, device_show_string, NULL),      \
296 		  .var = (void *)_config, }				      \
297 	})[0].attr.attr)
298 
299 static struct attribute *fujitsu_uncore_pmu_formats[] = {
300 	UNCORE_PMU_FORMAT_ATTR(event, "config:0-7"),
301 	NULL
302 };
303 
304 static const struct attribute_group fujitsu_uncore_pmu_format_group = {
305 	.name = "format",
306 	.attrs = fujitsu_uncore_pmu_formats,
307 };
308 
fujitsu_uncore_pmu_event_show(struct device * dev,struct device_attribute * attr,char * page)309 static ssize_t fujitsu_uncore_pmu_event_show(struct device *dev,
310 					     struct device_attribute *attr, char *page)
311 {
312 	struct perf_pmu_events_attr *pmu_attr;
313 
314 	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
315 	return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
316 }
317 
318 #define MAC_EVENT_ATTR(_name, _id)					     \
319 	PMU_EVENT_ATTR_ID(_name, fujitsu_uncore_pmu_event_show, _id)
320 
321 static struct attribute *fujitsu_uncore_mac_pmu_events[] = {
322 	MAC_EVENT_ATTR(cycles,				0x00),
323 	MAC_EVENT_ATTR(read-count,			0x10),
324 	MAC_EVENT_ATTR(read-count-request,		0x11),
325 	MAC_EVENT_ATTR(read-count-return,		0x12),
326 	MAC_EVENT_ATTR(read-count-request-pftgt,	0x13),
327 	MAC_EVENT_ATTR(read-count-request-normal,	0x14),
328 	MAC_EVENT_ATTR(read-count-return-pftgt-hit,	0x15),
329 	MAC_EVENT_ATTR(read-count-return-pftgt-miss,	0x16),
330 	MAC_EVENT_ATTR(read-wait,			0x17),
331 	MAC_EVENT_ATTR(write-count,			0x20),
332 	MAC_EVENT_ATTR(write-count-write,		0x21),
333 	MAC_EVENT_ATTR(write-count-pwrite,		0x22),
334 	MAC_EVENT_ATTR(memory-read-count,		0x40),
335 	MAC_EVENT_ATTR(memory-write-count,		0x50),
336 	MAC_EVENT_ATTR(memory-pwrite-count,		0x60),
337 	MAC_EVENT_ATTR(ea-mac,				0x80),
338 	MAC_EVENT_ATTR(ea-memory,			0x90),
339 	MAC_EVENT_ATTR(ea-memory-mac-write,		0x92),
340 	MAC_EVENT_ATTR(ea-ha,				0xa0),
341 	NULL
342 };
343 
344 #define PCI_EVENT_ATTR(_name, _id)					     \
345 	PMU_EVENT_ATTR_ID(_name, fujitsu_uncore_pmu_event_show, _id)
346 
347 static struct attribute *fujitsu_uncore_pci_pmu_events[] = {
348 	PCI_EVENT_ATTR(pci-port0-cycles,		0x00),
349 	PCI_EVENT_ATTR(pci-port0-read-count,		0x10),
350 	PCI_EVENT_ATTR(pci-port0-read-count-bus,	0x14),
351 	PCI_EVENT_ATTR(pci-port0-write-count,		0x20),
352 	PCI_EVENT_ATTR(pci-port0-write-count-bus,	0x24),
353 	PCI_EVENT_ATTR(pci-port1-cycles,		0x40),
354 	PCI_EVENT_ATTR(pci-port1-read-count,		0x50),
355 	PCI_EVENT_ATTR(pci-port1-read-count-bus,	0x54),
356 	PCI_EVENT_ATTR(pci-port1-write-count,		0x60),
357 	PCI_EVENT_ATTR(pci-port1-write-count-bus,	0x64),
358 	PCI_EVENT_ATTR(ea-pci,				0x80),
359 	NULL
360 };
361 
362 static const struct attribute_group fujitsu_uncore_mac_pmu_events_group = {
363 	.name = "events",
364 	.attrs = fujitsu_uncore_mac_pmu_events,
365 };
366 
367 static const struct attribute_group fujitsu_uncore_pci_pmu_events_group = {
368 	.name = "events",
369 	.attrs = fujitsu_uncore_pci_pmu_events,
370 };
371 
cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)372 static ssize_t cpumask_show(struct device *dev,
373 			    struct device_attribute *attr, char *buf)
374 {
375 	struct uncore_pmu *uncorepmu = to_uncore_pmu(dev_get_drvdata(dev));
376 
377 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(uncorepmu->cpu));
378 }
379 static DEVICE_ATTR_RO(cpumask);
380 
381 static struct attribute *fujitsu_uncore_pmu_cpumask_attrs[] = {
382 	&dev_attr_cpumask.attr,
383 	NULL
384 };
385 
386 static const struct attribute_group fujitsu_uncore_pmu_cpumask_attr_group = {
387 	.attrs = fujitsu_uncore_pmu_cpumask_attrs,
388 };
389 
390 static const struct attribute_group *fujitsu_uncore_mac_pmu_attr_grps[] = {
391 	&fujitsu_uncore_pmu_format_group,
392 	&fujitsu_uncore_mac_pmu_events_group,
393 	&fujitsu_uncore_pmu_cpumask_attr_group,
394 	NULL
395 };
396 
397 static const struct attribute_group *fujitsu_uncore_pci_pmu_attr_grps[] = {
398 	&fujitsu_uncore_pmu_format_group,
399 	&fujitsu_uncore_pci_pmu_events_group,
400 	&fujitsu_uncore_pmu_cpumask_attr_group,
401 	NULL
402 };
403 
fujitsu_uncore_pmu_migrate(struct uncore_pmu * uncorepmu,unsigned int cpu)404 static void fujitsu_uncore_pmu_migrate(struct uncore_pmu *uncorepmu, unsigned int cpu)
405 {
406 	perf_pmu_migrate_context(&uncorepmu->pmu, uncorepmu->cpu, cpu);
407 	irq_set_affinity(uncorepmu->irq, cpumask_of(cpu));
408 	uncorepmu->cpu = cpu;
409 }
410 
fujitsu_uncore_pmu_online_cpu(unsigned int cpu,struct hlist_node * cpuhp_node)411 static int fujitsu_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
412 {
413 	struct uncore_pmu *uncorepmu;
414 	int node;
415 
416 	uncorepmu = hlist_entry_safe(cpuhp_node, struct uncore_pmu, node);
417 	node = dev_to_node(uncorepmu->dev);
418 	if (cpu_to_node(uncorepmu->cpu) != node && cpu_to_node(cpu) == node)
419 		fujitsu_uncore_pmu_migrate(uncorepmu, cpu);
420 
421 	return 0;
422 }
423 
fujitsu_uncore_pmu_offline_cpu(unsigned int cpu,struct hlist_node * cpuhp_node)424 static int fujitsu_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
425 {
426 	struct uncore_pmu *uncorepmu;
427 	unsigned int target;
428 	int node;
429 
430 	uncorepmu = hlist_entry_safe(cpuhp_node, struct uncore_pmu, node);
431 	if (cpu != uncorepmu->cpu)
432 		return 0;
433 
434 	node = dev_to_node(uncorepmu->dev);
435 	target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
436 	if (target >= nr_cpu_ids)
437 		target = cpumask_any_but(cpu_online_mask, cpu);
438 
439 	if (target < nr_cpu_ids)
440 		fujitsu_uncore_pmu_migrate(uncorepmu, target);
441 
442 	return 0;
443 }
444 
fujitsu_uncore_pmu_probe(struct platform_device * pdev)445 static int fujitsu_uncore_pmu_probe(struct platform_device *pdev)
446 {
447 	struct device *dev = &pdev->dev;
448 	unsigned long device_type = (unsigned long)device_get_match_data(dev);
449 	const struct attribute_group **attr_groups;
450 	struct uncore_pmu *uncorepmu;
451 	struct resource *memrc;
452 	size_t alloc_size;
453 	char *name;
454 	int ret;
455 	int irq;
456 	u64 uid;
457 
458 	ret = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid);
459 	if (ret)
460 		return dev_err_probe(dev, ret, "unable to read ACPI uid\n");
461 
462 	uncorepmu = devm_kzalloc(dev, sizeof(*uncorepmu), GFP_KERNEL);
463 	if (!uncorepmu)
464 		return -ENOMEM;
465 	uncorepmu->dev = dev;
466 	uncorepmu->cpu = cpumask_local_spread(0, dev_to_node(dev));
467 	platform_set_drvdata(pdev, uncorepmu);
468 
469 	switch (device_type) {
470 	case FUJITSU_UNCORE_PMU_MAC:
471 		uncorepmu->num_counters = MAC_NUM_COUNTERS;
472 		attr_groups = fujitsu_uncore_mac_pmu_attr_grps;
473 		name = devm_kasprintf(dev, GFP_KERNEL, "mac_iod%llu_mac%llu_ch%llu",
474 				      (uid >> 8) & 0xF, (uid >> 4) & 0xF, uid & 0xF);
475 		break;
476 	case FUJITSU_UNCORE_PMU_PCI:
477 		uncorepmu->num_counters = PCI_NUM_COUNTERS;
478 		attr_groups = fujitsu_uncore_pci_pmu_attr_grps;
479 		name = devm_kasprintf(dev, GFP_KERNEL, "pci_iod%llu_pci%llu",
480 				      (uid >> 4) & 0xF, uid & 0xF);
481 		break;
482 	default:
483 		return dev_err_probe(dev, -EINVAL, "illegal device type: %lu\n", device_type);
484 	}
485 	if (!name)
486 		return -ENOMEM;
487 
488 	uncorepmu->pmu = (struct pmu) {
489 		.parent		= dev,
490 		.task_ctx_nr	= perf_invalid_context,
491 
492 		.attr_groups	= attr_groups,
493 
494 		.pmu_enable	= fujitsu_uncore_pmu_enable,
495 		.pmu_disable	= fujitsu_uncore_pmu_disable,
496 		.event_init	= fujitsu_uncore_event_init,
497 		.add		= fujitsu_uncore_event_add,
498 		.del		= fujitsu_uncore_event_del,
499 		.start		= fujitsu_uncore_event_start,
500 		.stop		= fujitsu_uncore_event_stop,
501 		.read		= fujitsu_uncore_event_read,
502 
503 		.capabilities	= PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
504 	};
505 
506 	alloc_size = sizeof(uncorepmu->events[0]) * uncorepmu->num_counters;
507 	uncorepmu->events = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
508 	if (!uncorepmu->events)
509 		return -ENOMEM;
510 
511 	alloc_size = sizeof(uncorepmu->used_mask[0]) * BITS_TO_LONGS(uncorepmu->num_counters);
512 	uncorepmu->used_mask = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
513 	if (!uncorepmu->used_mask)
514 		return -ENOMEM;
515 
516 	uncorepmu->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &memrc);
517 	if (IS_ERR(uncorepmu->regs))
518 		return PTR_ERR(uncorepmu->regs);
519 
520 	fujitsu_uncore_init(uncorepmu);
521 
522 	irq = platform_get_irq(pdev, 0);
523 	if (irq < 0)
524 		return irq;
525 
526 	ret = devm_request_irq(dev, irq, fujitsu_uncore_handle_irq,
527 			       IRQF_NOBALANCING | IRQF_NO_THREAD,
528 			       name, uncorepmu);
529 	if (ret)
530 		return dev_err_probe(dev, ret, "Failed to request IRQ:%d\n", irq);
531 
532 	ret = irq_set_affinity(irq, cpumask_of(uncorepmu->cpu));
533 	if (ret)
534 		return dev_err_probe(dev, ret, "Failed to set irq affinity:%d\n", irq);
535 
536 	uncorepmu->irq = irq;
537 
538 	/* Add this instance to the list used by the offline callback */
539 	ret = cpuhp_state_add_instance(uncore_pmu_cpuhp_state, &uncorepmu->node);
540 	if (ret)
541 		return dev_err_probe(dev, ret, "Error registering hotplug");
542 
543 	ret = perf_pmu_register(&uncorepmu->pmu, name, -1);
544 	if (ret < 0) {
545 		cpuhp_state_remove_instance_nocalls(uncore_pmu_cpuhp_state, &uncorepmu->node);
546 		return dev_err_probe(dev, ret, "Failed to register %s PMU\n", name);
547 	}
548 
549 	dev_dbg(dev, "Registered %s, type: %d\n", name, uncorepmu->pmu.type);
550 
551 	return 0;
552 }
553 
fujitsu_uncore_pmu_remove(struct platform_device * pdev)554 static void fujitsu_uncore_pmu_remove(struct platform_device *pdev)
555 {
556 	struct uncore_pmu *uncorepmu = platform_get_drvdata(pdev);
557 
558 	writeq_relaxed(0, uncorepmu->regs + PM_CR);
559 
560 	perf_pmu_unregister(&uncorepmu->pmu);
561 	cpuhp_state_remove_instance_nocalls(uncore_pmu_cpuhp_state, &uncorepmu->node);
562 }
563 
564 static const struct acpi_device_id fujitsu_uncore_pmu_acpi_match[] = {
565 	{ "FUJI200C", FUJITSU_UNCORE_PMU_MAC },
566 	{ "FUJI200D", FUJITSU_UNCORE_PMU_PCI },
567 	{ }
568 };
569 MODULE_DEVICE_TABLE(acpi, fujitsu_uncore_pmu_acpi_match);
570 
571 static struct platform_driver fujitsu_uncore_pmu_driver = {
572 	.driver = {
573 		.name = "fujitsu-uncore-pmu",
574 		.acpi_match_table = fujitsu_uncore_pmu_acpi_match,
575 		.suppress_bind_attrs = true,
576 	},
577 	.probe = fujitsu_uncore_pmu_probe,
578 	.remove = fujitsu_uncore_pmu_remove,
579 };
580 
fujitsu_uncore_pmu_init(void)581 static int __init fujitsu_uncore_pmu_init(void)
582 {
583 	int ret;
584 
585 	/* Install a hook to update the reader CPU in case it goes offline */
586 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
587 				      "perf/fujitsu/uncore:online",
588 				      fujitsu_uncore_pmu_online_cpu,
589 				      fujitsu_uncore_pmu_offline_cpu);
590 	if (ret < 0)
591 		return ret;
592 
593 	uncore_pmu_cpuhp_state = ret;
594 
595 	ret = platform_driver_register(&fujitsu_uncore_pmu_driver);
596 	if (ret)
597 		cpuhp_remove_multi_state(uncore_pmu_cpuhp_state);
598 
599 	return ret;
600 }
601 
fujitsu_uncore_pmu_exit(void)602 static void __exit fujitsu_uncore_pmu_exit(void)
603 {
604 	platform_driver_unregister(&fujitsu_uncore_pmu_driver);
605 	cpuhp_remove_multi_state(uncore_pmu_cpuhp_state);
606 }
607 
608 module_init(fujitsu_uncore_pmu_init);
609 module_exit(fujitsu_uncore_pmu_exit);
610 
611 MODULE_AUTHOR("Koichi Okuno <fj2767dz@fujitsu.com>");
612 MODULE_DESCRIPTION("Fujitsu Uncore PMU driver");
613 MODULE_LICENSE("GPL");
614