xref: /linux/drivers/perf/fujitsu_uncore_pmu.c (revision feafee284579d29537a5a56ba8f23894f0463f3d)
1*bad11557SKoichi Okuno // SPDX-License-Identifier: GPL-2.0-only
2*bad11557SKoichi Okuno /*
3*bad11557SKoichi Okuno  * Driver for the Uncore PMUs in Fujitsu chips.
4*bad11557SKoichi Okuno  *
5*bad11557SKoichi Okuno  * See Documentation/admin-guide/perf/fujitsu_uncore_pmu.rst for more details.
6*bad11557SKoichi Okuno  *
7*bad11557SKoichi Okuno  * Copyright (c) 2025 Fujitsu. All rights reserved.
8*bad11557SKoichi Okuno  */
9*bad11557SKoichi Okuno 
10*bad11557SKoichi Okuno #include <linux/acpi.h>
11*bad11557SKoichi Okuno #include <linux/bitfield.h>
12*bad11557SKoichi Okuno #include <linux/bitops.h>
13*bad11557SKoichi Okuno #include <linux/interrupt.h>
14*bad11557SKoichi Okuno #include <linux/io.h>
15*bad11557SKoichi Okuno #include <linux/list.h>
16*bad11557SKoichi Okuno #include <linux/mod_devicetable.h>
17*bad11557SKoichi Okuno #include <linux/module.h>
18*bad11557SKoichi Okuno #include <linux/perf_event.h>
19*bad11557SKoichi Okuno #include <linux/platform_device.h>
20*bad11557SKoichi Okuno 
21*bad11557SKoichi Okuno /* Number of counters on each PMU */
22*bad11557SKoichi Okuno #define MAC_NUM_COUNTERS  8
23*bad11557SKoichi Okuno #define PCI_NUM_COUNTERS  8
24*bad11557SKoichi Okuno /* Mask for the event type field within perf_event_attr.config and EVTYPE reg */
25*bad11557SKoichi Okuno #define UNCORE_EVTYPE_MASK   0xFF
26*bad11557SKoichi Okuno 
27*bad11557SKoichi Okuno /* Perfmon registers */
28*bad11557SKoichi Okuno #define PM_EVCNTR(__cntr)           (0x000 + (__cntr) * 8)
29*bad11557SKoichi Okuno #define PM_CNTCTL(__cntr)           (0x100 + (__cntr) * 8)
30*bad11557SKoichi Okuno #define PM_CNTCTL_RESET             0
31*bad11557SKoichi Okuno #define PM_EVTYPE(__cntr)           (0x200 + (__cntr) * 8)
32*bad11557SKoichi Okuno #define PM_EVTYPE_EVSEL(__val)      FIELD_GET(UNCORE_EVTYPE_MASK, __val)
33*bad11557SKoichi Okuno #define PM_CR                       0x400
34*bad11557SKoichi Okuno #define PM_CR_RESET                 BIT(1)
35*bad11557SKoichi Okuno #define PM_CR_ENABLE                BIT(0)
36*bad11557SKoichi Okuno #define PM_CNTENSET                 0x410
37*bad11557SKoichi Okuno #define PM_CNTENSET_IDX(__cntr)     BIT(__cntr)
38*bad11557SKoichi Okuno #define PM_CNTENCLR                 0x418
39*bad11557SKoichi Okuno #define PM_CNTENCLR_IDX(__cntr)     BIT(__cntr)
40*bad11557SKoichi Okuno #define PM_CNTENCLR_RESET           0xFF
41*bad11557SKoichi Okuno #define PM_INTENSET                 0x420
42*bad11557SKoichi Okuno #define PM_INTENSET_IDX(__cntr)     BIT(__cntr)
43*bad11557SKoichi Okuno #define PM_INTENCLR                 0x428
44*bad11557SKoichi Okuno #define PM_INTENCLR_IDX(__cntr)     BIT(__cntr)
45*bad11557SKoichi Okuno #define PM_INTENCLR_RESET           0xFF
46*bad11557SKoichi Okuno #define PM_OVSR                     0x440
47*bad11557SKoichi Okuno #define PM_OVSR_OVSRCLR_RESET       0xFF
48*bad11557SKoichi Okuno 
49*bad11557SKoichi Okuno enum fujitsu_uncore_pmu {
50*bad11557SKoichi Okuno 	FUJITSU_UNCORE_PMU_MAC = 1,
51*bad11557SKoichi Okuno 	FUJITSU_UNCORE_PMU_PCI = 2,
52*bad11557SKoichi Okuno };
53*bad11557SKoichi Okuno 
54*bad11557SKoichi Okuno struct uncore_pmu {
55*bad11557SKoichi Okuno 	int			num_counters;
56*bad11557SKoichi Okuno 	struct pmu		pmu;
57*bad11557SKoichi Okuno 	struct hlist_node	node;
58*bad11557SKoichi Okuno 	void __iomem		*regs;
59*bad11557SKoichi Okuno 	struct perf_event	**events;
60*bad11557SKoichi Okuno 	unsigned long		*used_mask;
61*bad11557SKoichi Okuno 	int			cpu;
62*bad11557SKoichi Okuno 	int			irq;
63*bad11557SKoichi Okuno 	struct device		*dev;
64*bad11557SKoichi Okuno };
65*bad11557SKoichi Okuno 
66*bad11557SKoichi Okuno #define to_uncore_pmu(p) (container_of(p, struct uncore_pmu, pmu))
67*bad11557SKoichi Okuno 
68*bad11557SKoichi Okuno static int uncore_pmu_cpuhp_state;
69*bad11557SKoichi Okuno 
fujitsu_uncore_counter_start(struct perf_event * event)70*bad11557SKoichi Okuno static void fujitsu_uncore_counter_start(struct perf_event *event)
71*bad11557SKoichi Okuno {
72*bad11557SKoichi Okuno 	struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
73*bad11557SKoichi Okuno 	int idx = event->hw.idx;
74*bad11557SKoichi Okuno 
75*bad11557SKoichi Okuno 	/* Initialize the hardware counter and reset prev_count*/
76*bad11557SKoichi Okuno 	local64_set(&event->hw.prev_count, 0);
77*bad11557SKoichi Okuno 	writeq_relaxed(0, uncorepmu->regs + PM_EVCNTR(idx));
78*bad11557SKoichi Okuno 
79*bad11557SKoichi Okuno 	/* Set the event type */
80*bad11557SKoichi Okuno 	writeq_relaxed(PM_EVTYPE_EVSEL(event->attr.config), uncorepmu->regs + PM_EVTYPE(idx));
81*bad11557SKoichi Okuno 
82*bad11557SKoichi Okuno 	/* Enable interrupt generation by this counter */
83*bad11557SKoichi Okuno 	writeq_relaxed(PM_INTENSET_IDX(idx), uncorepmu->regs + PM_INTENSET);
84*bad11557SKoichi Okuno 
85*bad11557SKoichi Okuno 	/* Finally, enable the counter */
86*bad11557SKoichi Okuno 	writeq_relaxed(PM_CNTCTL_RESET, uncorepmu->regs + PM_CNTCTL(idx));
87*bad11557SKoichi Okuno 	writeq_relaxed(PM_CNTENSET_IDX(idx), uncorepmu->regs + PM_CNTENSET);
88*bad11557SKoichi Okuno }
89*bad11557SKoichi Okuno 
fujitsu_uncore_counter_stop(struct perf_event * event)90*bad11557SKoichi Okuno static void fujitsu_uncore_counter_stop(struct perf_event *event)
91*bad11557SKoichi Okuno {
92*bad11557SKoichi Okuno 	struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
93*bad11557SKoichi Okuno 	int idx = event->hw.idx;
94*bad11557SKoichi Okuno 
95*bad11557SKoichi Okuno 	/* Disable the counter */
96*bad11557SKoichi Okuno 	writeq_relaxed(PM_CNTENCLR_IDX(idx), uncorepmu->regs + PM_CNTENCLR);
97*bad11557SKoichi Okuno 
98*bad11557SKoichi Okuno 	/* Disable interrupt generation by this counter */
99*bad11557SKoichi Okuno 	writeq_relaxed(PM_INTENCLR_IDX(idx), uncorepmu->regs + PM_INTENCLR);
100*bad11557SKoichi Okuno }
101*bad11557SKoichi Okuno 
fujitsu_uncore_counter_update(struct perf_event * event)102*bad11557SKoichi Okuno static void fujitsu_uncore_counter_update(struct perf_event *event)
103*bad11557SKoichi Okuno {
104*bad11557SKoichi Okuno 	struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
105*bad11557SKoichi Okuno 	int idx = event->hw.idx;
106*bad11557SKoichi Okuno 	u64 prev, new;
107*bad11557SKoichi Okuno 
108*bad11557SKoichi Okuno 	do {
109*bad11557SKoichi Okuno 		prev = local64_read(&event->hw.prev_count);
110*bad11557SKoichi Okuno 		new = readq_relaxed(uncorepmu->regs + PM_EVCNTR(idx));
111*bad11557SKoichi Okuno 	} while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
112*bad11557SKoichi Okuno 
113*bad11557SKoichi Okuno 	local64_add(new - prev, &event->count);
114*bad11557SKoichi Okuno }
115*bad11557SKoichi Okuno 
fujitsu_uncore_init(struct uncore_pmu * uncorepmu)116*bad11557SKoichi Okuno static inline void fujitsu_uncore_init(struct uncore_pmu *uncorepmu)
117*bad11557SKoichi Okuno {
118*bad11557SKoichi Okuno 	int i;
119*bad11557SKoichi Okuno 
120*bad11557SKoichi Okuno 	writeq_relaxed(PM_CR_RESET, uncorepmu->regs + PM_CR);
121*bad11557SKoichi Okuno 
122*bad11557SKoichi Okuno 	writeq_relaxed(PM_CNTENCLR_RESET, uncorepmu->regs + PM_CNTENCLR);
123*bad11557SKoichi Okuno 	writeq_relaxed(PM_INTENCLR_RESET, uncorepmu->regs + PM_INTENCLR);
124*bad11557SKoichi Okuno 	writeq_relaxed(PM_OVSR_OVSRCLR_RESET, uncorepmu->regs + PM_OVSR);
125*bad11557SKoichi Okuno 
126*bad11557SKoichi Okuno 	for (i = 0; i < uncorepmu->num_counters; ++i) {
127*bad11557SKoichi Okuno 		writeq_relaxed(PM_CNTCTL_RESET, uncorepmu->regs + PM_CNTCTL(i));
128*bad11557SKoichi Okuno 		writeq_relaxed(PM_EVTYPE_EVSEL(0), uncorepmu->regs + PM_EVTYPE(i));
129*bad11557SKoichi Okuno 	}
130*bad11557SKoichi Okuno 	writeq_relaxed(PM_CR_ENABLE, uncorepmu->regs + PM_CR);
131*bad11557SKoichi Okuno }
132*bad11557SKoichi Okuno 
fujitsu_uncore_handle_irq(int irq_num,void * data)133*bad11557SKoichi Okuno static irqreturn_t fujitsu_uncore_handle_irq(int irq_num, void *data)
134*bad11557SKoichi Okuno {
135*bad11557SKoichi Okuno 	struct uncore_pmu *uncorepmu = data;
136*bad11557SKoichi Okuno 	/* Read the overflow status register */
137*bad11557SKoichi Okuno 	long status = readq_relaxed(uncorepmu->regs + PM_OVSR);
138*bad11557SKoichi Okuno 	int idx;
139*bad11557SKoichi Okuno 
140*bad11557SKoichi Okuno 	if (status == 0)
141*bad11557SKoichi Okuno 		return IRQ_NONE;
142*bad11557SKoichi Okuno 
143*bad11557SKoichi Okuno 	/* Clear the bits we read on the overflow status register */
144*bad11557SKoichi Okuno 	writeq_relaxed(status, uncorepmu->regs + PM_OVSR);
145*bad11557SKoichi Okuno 
146*bad11557SKoichi Okuno 	for_each_set_bit(idx, &status, uncorepmu->num_counters) {
147*bad11557SKoichi Okuno 		struct perf_event *event;
148*bad11557SKoichi Okuno 
149*bad11557SKoichi Okuno 		event = uncorepmu->events[idx];
150*bad11557SKoichi Okuno 		if (!event)
151*bad11557SKoichi Okuno 			continue;
152*bad11557SKoichi Okuno 
153*bad11557SKoichi Okuno 		fujitsu_uncore_counter_update(event);
154*bad11557SKoichi Okuno 	}
155*bad11557SKoichi Okuno 
156*bad11557SKoichi Okuno 	return IRQ_HANDLED;
157*bad11557SKoichi Okuno }
158*bad11557SKoichi Okuno 
fujitsu_uncore_pmu_enable(struct pmu * pmu)159*bad11557SKoichi Okuno static void fujitsu_uncore_pmu_enable(struct pmu *pmu)
160*bad11557SKoichi Okuno {
161*bad11557SKoichi Okuno 	writeq_relaxed(PM_CR_ENABLE, to_uncore_pmu(pmu)->regs + PM_CR);
162*bad11557SKoichi Okuno }
163*bad11557SKoichi Okuno 
fujitsu_uncore_pmu_disable(struct pmu * pmu)164*bad11557SKoichi Okuno static void fujitsu_uncore_pmu_disable(struct pmu *pmu)
165*bad11557SKoichi Okuno {
166*bad11557SKoichi Okuno 	writeq_relaxed(0, to_uncore_pmu(pmu)->regs + PM_CR);
167*bad11557SKoichi Okuno }
168*bad11557SKoichi Okuno 
fujitsu_uncore_validate_event_group(struct perf_event * event)169*bad11557SKoichi Okuno static bool fujitsu_uncore_validate_event_group(struct perf_event *event)
170*bad11557SKoichi Okuno {
171*bad11557SKoichi Okuno 	struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
172*bad11557SKoichi Okuno 	struct perf_event *leader = event->group_leader;
173*bad11557SKoichi Okuno 	struct perf_event *sibling;
174*bad11557SKoichi Okuno 	int counters = 1;
175*bad11557SKoichi Okuno 
176*bad11557SKoichi Okuno 	if (leader == event)
177*bad11557SKoichi Okuno 		return true;
178*bad11557SKoichi Okuno 
179*bad11557SKoichi Okuno 	if (leader->pmu == event->pmu)
180*bad11557SKoichi Okuno 		counters++;
181*bad11557SKoichi Okuno 
182*bad11557SKoichi Okuno 	for_each_sibling_event(sibling, leader) {
183*bad11557SKoichi Okuno 		if (sibling->pmu == event->pmu)
184*bad11557SKoichi Okuno 			counters++;
185*bad11557SKoichi Okuno 	}
186*bad11557SKoichi Okuno 
187*bad11557SKoichi Okuno 	/*
188*bad11557SKoichi Okuno 	 * If the group requires more counters than the HW has, it
189*bad11557SKoichi Okuno 	 * cannot ever be scheduled.
190*bad11557SKoichi Okuno 	 */
191*bad11557SKoichi Okuno 	return counters <= uncorepmu->num_counters;
192*bad11557SKoichi Okuno }
193*bad11557SKoichi Okuno 
fujitsu_uncore_event_init(struct perf_event * event)194*bad11557SKoichi Okuno static int fujitsu_uncore_event_init(struct perf_event *event)
195*bad11557SKoichi Okuno {
196*bad11557SKoichi Okuno 	struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
197*bad11557SKoichi Okuno 	struct hw_perf_event *hwc = &event->hw;
198*bad11557SKoichi Okuno 
199*bad11557SKoichi Okuno 	/* Is the event for this PMU? */
200*bad11557SKoichi Okuno 	if (event->attr.type != event->pmu->type)
201*bad11557SKoichi Okuno 		return -ENOENT;
202*bad11557SKoichi Okuno 
203*bad11557SKoichi Okuno 	/*
204*bad11557SKoichi Okuno 	 * Sampling not supported since these events are not
205*bad11557SKoichi Okuno 	 * core-attributable.
206*bad11557SKoichi Okuno 	 */
207*bad11557SKoichi Okuno 	if (is_sampling_event(event))
208*bad11557SKoichi Okuno 		return -EINVAL;
209*bad11557SKoichi Okuno 
210*bad11557SKoichi Okuno 	/*
211*bad11557SKoichi Okuno 	 * Task mode not available, we run the counters as socket counters,
212*bad11557SKoichi Okuno 	 * not attributable to any CPU and therefore cannot attribute per-task.
213*bad11557SKoichi Okuno 	 */
214*bad11557SKoichi Okuno 	if (event->cpu < 0)
215*bad11557SKoichi Okuno 		return -EINVAL;
216*bad11557SKoichi Okuno 
217*bad11557SKoichi Okuno 	/* Validate the group */
218*bad11557SKoichi Okuno 	if (!fujitsu_uncore_validate_event_group(event))
219*bad11557SKoichi Okuno 		return -EINVAL;
220*bad11557SKoichi Okuno 
221*bad11557SKoichi Okuno 	hwc->idx = -1;
222*bad11557SKoichi Okuno 
223*bad11557SKoichi Okuno 	event->cpu = uncorepmu->cpu;
224*bad11557SKoichi Okuno 
225*bad11557SKoichi Okuno 	return 0;
226*bad11557SKoichi Okuno }
227*bad11557SKoichi Okuno 
fujitsu_uncore_event_start(struct perf_event * event,int flags)228*bad11557SKoichi Okuno static void fujitsu_uncore_event_start(struct perf_event *event, int flags)
229*bad11557SKoichi Okuno {
230*bad11557SKoichi Okuno 	struct hw_perf_event *hwc = &event->hw;
231*bad11557SKoichi Okuno 
232*bad11557SKoichi Okuno 	hwc->state = 0;
233*bad11557SKoichi Okuno 	fujitsu_uncore_counter_start(event);
234*bad11557SKoichi Okuno }
235*bad11557SKoichi Okuno 
fujitsu_uncore_event_stop(struct perf_event * event,int flags)236*bad11557SKoichi Okuno static void fujitsu_uncore_event_stop(struct perf_event *event, int flags)
237*bad11557SKoichi Okuno {
238*bad11557SKoichi Okuno 	struct hw_perf_event *hwc = &event->hw;
239*bad11557SKoichi Okuno 
240*bad11557SKoichi Okuno 	if (hwc->state & PERF_HES_STOPPED)
241*bad11557SKoichi Okuno 		return;
242*bad11557SKoichi Okuno 
243*bad11557SKoichi Okuno 	fujitsu_uncore_counter_stop(event);
244*bad11557SKoichi Okuno 	if (flags & PERF_EF_UPDATE)
245*bad11557SKoichi Okuno 		fujitsu_uncore_counter_update(event);
246*bad11557SKoichi Okuno 	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
247*bad11557SKoichi Okuno }
248*bad11557SKoichi Okuno 
fujitsu_uncore_event_add(struct perf_event * event,int flags)249*bad11557SKoichi Okuno static int fujitsu_uncore_event_add(struct perf_event *event, int flags)
250*bad11557SKoichi Okuno {
251*bad11557SKoichi Okuno 	struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
252*bad11557SKoichi Okuno 	struct hw_perf_event *hwc = &event->hw;
253*bad11557SKoichi Okuno 	int idx;
254*bad11557SKoichi Okuno 
255*bad11557SKoichi Okuno 	/* Try to allocate a counter. */
256*bad11557SKoichi Okuno 	idx = bitmap_find_free_region(uncorepmu->used_mask, uncorepmu->num_counters, 0);
257*bad11557SKoichi Okuno 	if (idx < 0)
258*bad11557SKoichi Okuno 		/* The counters are all in use. */
259*bad11557SKoichi Okuno 		return -EAGAIN;
260*bad11557SKoichi Okuno 
261*bad11557SKoichi Okuno 	hwc->idx = idx;
262*bad11557SKoichi Okuno 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
263*bad11557SKoichi Okuno 	uncorepmu->events[idx] = event;
264*bad11557SKoichi Okuno 
265*bad11557SKoichi Okuno 	if (flags & PERF_EF_START)
266*bad11557SKoichi Okuno 		fujitsu_uncore_event_start(event, 0);
267*bad11557SKoichi Okuno 
268*bad11557SKoichi Okuno 	/* Propagate changes to the userspace mapping. */
269*bad11557SKoichi Okuno 	perf_event_update_userpage(event);
270*bad11557SKoichi Okuno 
271*bad11557SKoichi Okuno 	return 0;
272*bad11557SKoichi Okuno }
273*bad11557SKoichi Okuno 
fujitsu_uncore_event_del(struct perf_event * event,int flags)274*bad11557SKoichi Okuno static void fujitsu_uncore_event_del(struct perf_event *event, int flags)
275*bad11557SKoichi Okuno {
276*bad11557SKoichi Okuno 	struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
277*bad11557SKoichi Okuno 	struct hw_perf_event *hwc = &event->hw;
278*bad11557SKoichi Okuno 
279*bad11557SKoichi Okuno 	/* Stop and clean up */
280*bad11557SKoichi Okuno 	fujitsu_uncore_event_stop(event, flags | PERF_EF_UPDATE);
281*bad11557SKoichi Okuno 	uncorepmu->events[hwc->idx] = NULL;
282*bad11557SKoichi Okuno 	bitmap_release_region(uncorepmu->used_mask, hwc->idx, 0);
283*bad11557SKoichi Okuno 
284*bad11557SKoichi Okuno 	/* Propagate changes to the userspace mapping. */
285*bad11557SKoichi Okuno 	perf_event_update_userpage(event);
286*bad11557SKoichi Okuno }
287*bad11557SKoichi Okuno 
fujitsu_uncore_event_read(struct perf_event * event)288*bad11557SKoichi Okuno static void fujitsu_uncore_event_read(struct perf_event *event)
289*bad11557SKoichi Okuno {
290*bad11557SKoichi Okuno 	fujitsu_uncore_counter_update(event);
291*bad11557SKoichi Okuno }
292*bad11557SKoichi Okuno 
293*bad11557SKoichi Okuno #define UNCORE_PMU_FORMAT_ATTR(_name, _config)				      \
294*bad11557SKoichi Okuno 	(&((struct dev_ext_attribute[]) {				      \
295*bad11557SKoichi Okuno 		{ .attr = __ATTR(_name, 0444, device_show_string, NULL),      \
296*bad11557SKoichi Okuno 		  .var = (void *)_config, }				      \
297*bad11557SKoichi Okuno 	})[0].attr.attr)
298*bad11557SKoichi Okuno 
299*bad11557SKoichi Okuno static struct attribute *fujitsu_uncore_pmu_formats[] = {
300*bad11557SKoichi Okuno 	UNCORE_PMU_FORMAT_ATTR(event, "config:0-7"),
301*bad11557SKoichi Okuno 	NULL
302*bad11557SKoichi Okuno };
303*bad11557SKoichi Okuno 
304*bad11557SKoichi Okuno static const struct attribute_group fujitsu_uncore_pmu_format_group = {
305*bad11557SKoichi Okuno 	.name = "format",
306*bad11557SKoichi Okuno 	.attrs = fujitsu_uncore_pmu_formats,
307*bad11557SKoichi Okuno };
308*bad11557SKoichi Okuno 
fujitsu_uncore_pmu_event_show(struct device * dev,struct device_attribute * attr,char * page)309*bad11557SKoichi Okuno static ssize_t fujitsu_uncore_pmu_event_show(struct device *dev,
310*bad11557SKoichi Okuno 					     struct device_attribute *attr, char *page)
311*bad11557SKoichi Okuno {
312*bad11557SKoichi Okuno 	struct perf_pmu_events_attr *pmu_attr;
313*bad11557SKoichi Okuno 
314*bad11557SKoichi Okuno 	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
315*bad11557SKoichi Okuno 	return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
316*bad11557SKoichi Okuno }
317*bad11557SKoichi Okuno 
318*bad11557SKoichi Okuno #define MAC_EVENT_ATTR(_name, _id)					     \
319*bad11557SKoichi Okuno 	PMU_EVENT_ATTR_ID(_name, fujitsu_uncore_pmu_event_show, _id)
320*bad11557SKoichi Okuno 
321*bad11557SKoichi Okuno static struct attribute *fujitsu_uncore_mac_pmu_events[] = {
322*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(cycles,				0x00),
323*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(read-count,			0x10),
324*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(read-count-request,		0x11),
325*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(read-count-return,		0x12),
326*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(read-count-request-pftgt,	0x13),
327*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(read-count-request-normal,	0x14),
328*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(read-count-return-pftgt-hit,	0x15),
329*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(read-count-return-pftgt-miss,	0x16),
330*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(read-wait,			0x17),
331*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(write-count,			0x20),
332*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(write-count-write,		0x21),
333*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(write-count-pwrite,		0x22),
334*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(memory-read-count,		0x40),
335*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(memory-write-count,		0x50),
336*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(memory-pwrite-count,		0x60),
337*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(ea-mac,				0x80),
338*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(ea-memory,			0x90),
339*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(ea-memory-mac-write,		0x92),
340*bad11557SKoichi Okuno 	MAC_EVENT_ATTR(ea-ha,				0xa0),
341*bad11557SKoichi Okuno 	NULL
342*bad11557SKoichi Okuno };
343*bad11557SKoichi Okuno 
344*bad11557SKoichi Okuno #define PCI_EVENT_ATTR(_name, _id)					     \
345*bad11557SKoichi Okuno 	PMU_EVENT_ATTR_ID(_name, fujitsu_uncore_pmu_event_show, _id)
346*bad11557SKoichi Okuno 
347*bad11557SKoichi Okuno static struct attribute *fujitsu_uncore_pci_pmu_events[] = {
348*bad11557SKoichi Okuno 	PCI_EVENT_ATTR(pci-port0-cycles,		0x00),
349*bad11557SKoichi Okuno 	PCI_EVENT_ATTR(pci-port0-read-count,		0x10),
350*bad11557SKoichi Okuno 	PCI_EVENT_ATTR(pci-port0-read-count-bus,	0x14),
351*bad11557SKoichi Okuno 	PCI_EVENT_ATTR(pci-port0-write-count,		0x20),
352*bad11557SKoichi Okuno 	PCI_EVENT_ATTR(pci-port0-write-count-bus,	0x24),
353*bad11557SKoichi Okuno 	PCI_EVENT_ATTR(pci-port1-cycles,		0x40),
354*bad11557SKoichi Okuno 	PCI_EVENT_ATTR(pci-port1-read-count,		0x50),
355*bad11557SKoichi Okuno 	PCI_EVENT_ATTR(pci-port1-read-count-bus,	0x54),
356*bad11557SKoichi Okuno 	PCI_EVENT_ATTR(pci-port1-write-count,		0x60),
357*bad11557SKoichi Okuno 	PCI_EVENT_ATTR(pci-port1-write-count-bus,	0x64),
358*bad11557SKoichi Okuno 	PCI_EVENT_ATTR(ea-pci,				0x80),
359*bad11557SKoichi Okuno 	NULL
360*bad11557SKoichi Okuno };
361*bad11557SKoichi Okuno 
362*bad11557SKoichi Okuno static const struct attribute_group fujitsu_uncore_mac_pmu_events_group = {
363*bad11557SKoichi Okuno 	.name = "events",
364*bad11557SKoichi Okuno 	.attrs = fujitsu_uncore_mac_pmu_events,
365*bad11557SKoichi Okuno };
366*bad11557SKoichi Okuno 
367*bad11557SKoichi Okuno static const struct attribute_group fujitsu_uncore_pci_pmu_events_group = {
368*bad11557SKoichi Okuno 	.name = "events",
369*bad11557SKoichi Okuno 	.attrs = fujitsu_uncore_pci_pmu_events,
370*bad11557SKoichi Okuno };
371*bad11557SKoichi Okuno 
cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)372*bad11557SKoichi Okuno static ssize_t cpumask_show(struct device *dev,
373*bad11557SKoichi Okuno 			    struct device_attribute *attr, char *buf)
374*bad11557SKoichi Okuno {
375*bad11557SKoichi Okuno 	struct uncore_pmu *uncorepmu = to_uncore_pmu(dev_get_drvdata(dev));
376*bad11557SKoichi Okuno 
377*bad11557SKoichi Okuno 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(uncorepmu->cpu));
378*bad11557SKoichi Okuno }
379*bad11557SKoichi Okuno static DEVICE_ATTR_RO(cpumask);
380*bad11557SKoichi Okuno 
381*bad11557SKoichi Okuno static struct attribute *fujitsu_uncore_pmu_cpumask_attrs[] = {
382*bad11557SKoichi Okuno 	&dev_attr_cpumask.attr,
383*bad11557SKoichi Okuno 	NULL
384*bad11557SKoichi Okuno };
385*bad11557SKoichi Okuno 
386*bad11557SKoichi Okuno static const struct attribute_group fujitsu_uncore_pmu_cpumask_attr_group = {
387*bad11557SKoichi Okuno 	.attrs = fujitsu_uncore_pmu_cpumask_attrs,
388*bad11557SKoichi Okuno };
389*bad11557SKoichi Okuno 
390*bad11557SKoichi Okuno static const struct attribute_group *fujitsu_uncore_mac_pmu_attr_grps[] = {
391*bad11557SKoichi Okuno 	&fujitsu_uncore_pmu_format_group,
392*bad11557SKoichi Okuno 	&fujitsu_uncore_mac_pmu_events_group,
393*bad11557SKoichi Okuno 	&fujitsu_uncore_pmu_cpumask_attr_group,
394*bad11557SKoichi Okuno 	NULL
395*bad11557SKoichi Okuno };
396*bad11557SKoichi Okuno 
397*bad11557SKoichi Okuno static const struct attribute_group *fujitsu_uncore_pci_pmu_attr_grps[] = {
398*bad11557SKoichi Okuno 	&fujitsu_uncore_pmu_format_group,
399*bad11557SKoichi Okuno 	&fujitsu_uncore_pci_pmu_events_group,
400*bad11557SKoichi Okuno 	&fujitsu_uncore_pmu_cpumask_attr_group,
401*bad11557SKoichi Okuno 	NULL
402*bad11557SKoichi Okuno };
403*bad11557SKoichi Okuno 
fujitsu_uncore_pmu_migrate(struct uncore_pmu * uncorepmu,unsigned int cpu)404*bad11557SKoichi Okuno static void fujitsu_uncore_pmu_migrate(struct uncore_pmu *uncorepmu, unsigned int cpu)
405*bad11557SKoichi Okuno {
406*bad11557SKoichi Okuno 	perf_pmu_migrate_context(&uncorepmu->pmu, uncorepmu->cpu, cpu);
407*bad11557SKoichi Okuno 	irq_set_affinity(uncorepmu->irq, cpumask_of(cpu));
408*bad11557SKoichi Okuno 	uncorepmu->cpu = cpu;
409*bad11557SKoichi Okuno }
410*bad11557SKoichi Okuno 
fujitsu_uncore_pmu_online_cpu(unsigned int cpu,struct hlist_node * cpuhp_node)411*bad11557SKoichi Okuno static int fujitsu_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
412*bad11557SKoichi Okuno {
413*bad11557SKoichi Okuno 	struct uncore_pmu *uncorepmu;
414*bad11557SKoichi Okuno 	int node;
415*bad11557SKoichi Okuno 
416*bad11557SKoichi Okuno 	uncorepmu = hlist_entry_safe(cpuhp_node, struct uncore_pmu, node);
417*bad11557SKoichi Okuno 	node = dev_to_node(uncorepmu->dev);
418*bad11557SKoichi Okuno 	if (cpu_to_node(uncorepmu->cpu) != node && cpu_to_node(cpu) == node)
419*bad11557SKoichi Okuno 		fujitsu_uncore_pmu_migrate(uncorepmu, cpu);
420*bad11557SKoichi Okuno 
421*bad11557SKoichi Okuno 	return 0;
422*bad11557SKoichi Okuno }
423*bad11557SKoichi Okuno 
fujitsu_uncore_pmu_offline_cpu(unsigned int cpu,struct hlist_node * cpuhp_node)424*bad11557SKoichi Okuno static int fujitsu_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
425*bad11557SKoichi Okuno {
426*bad11557SKoichi Okuno 	struct uncore_pmu *uncorepmu;
427*bad11557SKoichi Okuno 	unsigned int target;
428*bad11557SKoichi Okuno 	int node;
429*bad11557SKoichi Okuno 
430*bad11557SKoichi Okuno 	uncorepmu = hlist_entry_safe(cpuhp_node, struct uncore_pmu, node);
431*bad11557SKoichi Okuno 	if (cpu != uncorepmu->cpu)
432*bad11557SKoichi Okuno 		return 0;
433*bad11557SKoichi Okuno 
434*bad11557SKoichi Okuno 	node = dev_to_node(uncorepmu->dev);
435*bad11557SKoichi Okuno 	target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
436*bad11557SKoichi Okuno 	if (target >= nr_cpu_ids)
437*bad11557SKoichi Okuno 		target = cpumask_any_but(cpu_online_mask, cpu);
438*bad11557SKoichi Okuno 
439*bad11557SKoichi Okuno 	if (target < nr_cpu_ids)
440*bad11557SKoichi Okuno 		fujitsu_uncore_pmu_migrate(uncorepmu, target);
441*bad11557SKoichi Okuno 
442*bad11557SKoichi Okuno 	return 0;
443*bad11557SKoichi Okuno }
444*bad11557SKoichi Okuno 
fujitsu_uncore_pmu_probe(struct platform_device * pdev)445*bad11557SKoichi Okuno static int fujitsu_uncore_pmu_probe(struct platform_device *pdev)
446*bad11557SKoichi Okuno {
447*bad11557SKoichi Okuno 	struct device *dev = &pdev->dev;
448*bad11557SKoichi Okuno 	unsigned long device_type = (unsigned long)device_get_match_data(dev);
449*bad11557SKoichi Okuno 	const struct attribute_group **attr_groups;
450*bad11557SKoichi Okuno 	struct uncore_pmu *uncorepmu;
451*bad11557SKoichi Okuno 	struct resource *memrc;
452*bad11557SKoichi Okuno 	size_t alloc_size;
453*bad11557SKoichi Okuno 	char *name;
454*bad11557SKoichi Okuno 	int ret;
455*bad11557SKoichi Okuno 	int irq;
456*bad11557SKoichi Okuno 	u64 uid;
457*bad11557SKoichi Okuno 
458*bad11557SKoichi Okuno 	ret = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid);
459*bad11557SKoichi Okuno 	if (ret)
460*bad11557SKoichi Okuno 		return dev_err_probe(dev, ret, "unable to read ACPI uid\n");
461*bad11557SKoichi Okuno 
462*bad11557SKoichi Okuno 	uncorepmu = devm_kzalloc(dev, sizeof(*uncorepmu), GFP_KERNEL);
463*bad11557SKoichi Okuno 	if (!uncorepmu)
464*bad11557SKoichi Okuno 		return -ENOMEM;
465*bad11557SKoichi Okuno 	uncorepmu->dev = dev;
466*bad11557SKoichi Okuno 	uncorepmu->cpu = cpumask_local_spread(0, dev_to_node(dev));
467*bad11557SKoichi Okuno 	platform_set_drvdata(pdev, uncorepmu);
468*bad11557SKoichi Okuno 
469*bad11557SKoichi Okuno 	switch (device_type) {
470*bad11557SKoichi Okuno 	case FUJITSU_UNCORE_PMU_MAC:
471*bad11557SKoichi Okuno 		uncorepmu->num_counters = MAC_NUM_COUNTERS;
472*bad11557SKoichi Okuno 		attr_groups = fujitsu_uncore_mac_pmu_attr_grps;
473*bad11557SKoichi Okuno 		name = devm_kasprintf(dev, GFP_KERNEL, "mac_iod%llu_mac%llu_ch%llu",
474*bad11557SKoichi Okuno 				      (uid >> 8) & 0xF, (uid >> 4) & 0xF, uid & 0xF);
475*bad11557SKoichi Okuno 		break;
476*bad11557SKoichi Okuno 	case FUJITSU_UNCORE_PMU_PCI:
477*bad11557SKoichi Okuno 		uncorepmu->num_counters = PCI_NUM_COUNTERS;
478*bad11557SKoichi Okuno 		attr_groups = fujitsu_uncore_pci_pmu_attr_grps;
479*bad11557SKoichi Okuno 		name = devm_kasprintf(dev, GFP_KERNEL, "pci_iod%llu_pci%llu",
480*bad11557SKoichi Okuno 				      (uid >> 4) & 0xF, uid & 0xF);
481*bad11557SKoichi Okuno 		break;
482*bad11557SKoichi Okuno 	default:
483*bad11557SKoichi Okuno 		return dev_err_probe(dev, -EINVAL, "illegal device type: %lu\n", device_type);
484*bad11557SKoichi Okuno 	}
485*bad11557SKoichi Okuno 	if (!name)
486*bad11557SKoichi Okuno 		return -ENOMEM;
487*bad11557SKoichi Okuno 
488*bad11557SKoichi Okuno 	uncorepmu->pmu = (struct pmu) {
489*bad11557SKoichi Okuno 		.parent		= dev,
490*bad11557SKoichi Okuno 		.task_ctx_nr	= perf_invalid_context,
491*bad11557SKoichi Okuno 
492*bad11557SKoichi Okuno 		.attr_groups	= attr_groups,
493*bad11557SKoichi Okuno 
494*bad11557SKoichi Okuno 		.pmu_enable	= fujitsu_uncore_pmu_enable,
495*bad11557SKoichi Okuno 		.pmu_disable	= fujitsu_uncore_pmu_disable,
496*bad11557SKoichi Okuno 		.event_init	= fujitsu_uncore_event_init,
497*bad11557SKoichi Okuno 		.add		= fujitsu_uncore_event_add,
498*bad11557SKoichi Okuno 		.del		= fujitsu_uncore_event_del,
499*bad11557SKoichi Okuno 		.start		= fujitsu_uncore_event_start,
500*bad11557SKoichi Okuno 		.stop		= fujitsu_uncore_event_stop,
501*bad11557SKoichi Okuno 		.read		= fujitsu_uncore_event_read,
502*bad11557SKoichi Okuno 
503*bad11557SKoichi Okuno 		.capabilities	= PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
504*bad11557SKoichi Okuno 	};
505*bad11557SKoichi Okuno 
506*bad11557SKoichi Okuno 	alloc_size = sizeof(uncorepmu->events[0]) * uncorepmu->num_counters;
507*bad11557SKoichi Okuno 	uncorepmu->events = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
508*bad11557SKoichi Okuno 	if (!uncorepmu->events)
509*bad11557SKoichi Okuno 		return -ENOMEM;
510*bad11557SKoichi Okuno 
511*bad11557SKoichi Okuno 	alloc_size = sizeof(uncorepmu->used_mask[0]) * BITS_TO_LONGS(uncorepmu->num_counters);
512*bad11557SKoichi Okuno 	uncorepmu->used_mask = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
513*bad11557SKoichi Okuno 	if (!uncorepmu->used_mask)
514*bad11557SKoichi Okuno 		return -ENOMEM;
515*bad11557SKoichi Okuno 
516*bad11557SKoichi Okuno 	uncorepmu->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &memrc);
517*bad11557SKoichi Okuno 	if (IS_ERR(uncorepmu->regs))
518*bad11557SKoichi Okuno 		return PTR_ERR(uncorepmu->regs);
519*bad11557SKoichi Okuno 
520*bad11557SKoichi Okuno 	fujitsu_uncore_init(uncorepmu);
521*bad11557SKoichi Okuno 
522*bad11557SKoichi Okuno 	irq = platform_get_irq(pdev, 0);
523*bad11557SKoichi Okuno 	if (irq < 0)
524*bad11557SKoichi Okuno 		return irq;
525*bad11557SKoichi Okuno 
526*bad11557SKoichi Okuno 	ret = devm_request_irq(dev, irq, fujitsu_uncore_handle_irq,
527*bad11557SKoichi Okuno 			       IRQF_NOBALANCING | IRQF_NO_THREAD,
528*bad11557SKoichi Okuno 			       name, uncorepmu);
529*bad11557SKoichi Okuno 	if (ret)
530*bad11557SKoichi Okuno 		return dev_err_probe(dev, ret, "Failed to request IRQ:%d\n", irq);
531*bad11557SKoichi Okuno 
532*bad11557SKoichi Okuno 	ret = irq_set_affinity(irq, cpumask_of(uncorepmu->cpu));
533*bad11557SKoichi Okuno 	if (ret)
534*bad11557SKoichi Okuno 		return dev_err_probe(dev, ret, "Failed to set irq affinity:%d\n", irq);
535*bad11557SKoichi Okuno 
536*bad11557SKoichi Okuno 	uncorepmu->irq = irq;
537*bad11557SKoichi Okuno 
538*bad11557SKoichi Okuno 	/* Add this instance to the list used by the offline callback */
539*bad11557SKoichi Okuno 	ret = cpuhp_state_add_instance(uncore_pmu_cpuhp_state, &uncorepmu->node);
540*bad11557SKoichi Okuno 	if (ret)
541*bad11557SKoichi Okuno 		return dev_err_probe(dev, ret, "Error registering hotplug");
542*bad11557SKoichi Okuno 
543*bad11557SKoichi Okuno 	ret = perf_pmu_register(&uncorepmu->pmu, name, -1);
544*bad11557SKoichi Okuno 	if (ret < 0) {
545*bad11557SKoichi Okuno 		cpuhp_state_remove_instance_nocalls(uncore_pmu_cpuhp_state, &uncorepmu->node);
546*bad11557SKoichi Okuno 		return dev_err_probe(dev, ret, "Failed to register %s PMU\n", name);
547*bad11557SKoichi Okuno 	}
548*bad11557SKoichi Okuno 
549*bad11557SKoichi Okuno 	dev_dbg(dev, "Registered %s, type: %d\n", name, uncorepmu->pmu.type);
550*bad11557SKoichi Okuno 
551*bad11557SKoichi Okuno 	return 0;
552*bad11557SKoichi Okuno }
553*bad11557SKoichi Okuno 
fujitsu_uncore_pmu_remove(struct platform_device * pdev)554*bad11557SKoichi Okuno static void fujitsu_uncore_pmu_remove(struct platform_device *pdev)
555*bad11557SKoichi Okuno {
556*bad11557SKoichi Okuno 	struct uncore_pmu *uncorepmu = platform_get_drvdata(pdev);
557*bad11557SKoichi Okuno 
558*bad11557SKoichi Okuno 	writeq_relaxed(0, uncorepmu->regs + PM_CR);
559*bad11557SKoichi Okuno 
560*bad11557SKoichi Okuno 	perf_pmu_unregister(&uncorepmu->pmu);
561*bad11557SKoichi Okuno 	cpuhp_state_remove_instance_nocalls(uncore_pmu_cpuhp_state, &uncorepmu->node);
562*bad11557SKoichi Okuno }
563*bad11557SKoichi Okuno 
564*bad11557SKoichi Okuno static const struct acpi_device_id fujitsu_uncore_pmu_acpi_match[] = {
565*bad11557SKoichi Okuno 	{ "FUJI200C", FUJITSU_UNCORE_PMU_MAC },
566*bad11557SKoichi Okuno 	{ "FUJI200D", FUJITSU_UNCORE_PMU_PCI },
567*bad11557SKoichi Okuno 	{ }
568*bad11557SKoichi Okuno };
569*bad11557SKoichi Okuno MODULE_DEVICE_TABLE(acpi, fujitsu_uncore_pmu_acpi_match);
570*bad11557SKoichi Okuno 
571*bad11557SKoichi Okuno static struct platform_driver fujitsu_uncore_pmu_driver = {
572*bad11557SKoichi Okuno 	.driver = {
573*bad11557SKoichi Okuno 		.name = "fujitsu-uncore-pmu",
574*bad11557SKoichi Okuno 		.acpi_match_table = fujitsu_uncore_pmu_acpi_match,
575*bad11557SKoichi Okuno 		.suppress_bind_attrs = true,
576*bad11557SKoichi Okuno 	},
577*bad11557SKoichi Okuno 	.probe = fujitsu_uncore_pmu_probe,
578*bad11557SKoichi Okuno 	.remove = fujitsu_uncore_pmu_remove,
579*bad11557SKoichi Okuno };
580*bad11557SKoichi Okuno 
fujitsu_uncore_pmu_init(void)581*bad11557SKoichi Okuno static int __init fujitsu_uncore_pmu_init(void)
582*bad11557SKoichi Okuno {
583*bad11557SKoichi Okuno 	int ret;
584*bad11557SKoichi Okuno 
585*bad11557SKoichi Okuno 	/* Install a hook to update the reader CPU in case it goes offline */
586*bad11557SKoichi Okuno 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
587*bad11557SKoichi Okuno 				      "perf/fujitsu/uncore:online",
588*bad11557SKoichi Okuno 				      fujitsu_uncore_pmu_online_cpu,
589*bad11557SKoichi Okuno 				      fujitsu_uncore_pmu_offline_cpu);
590*bad11557SKoichi Okuno 	if (ret < 0)
591*bad11557SKoichi Okuno 		return ret;
592*bad11557SKoichi Okuno 
593*bad11557SKoichi Okuno 	uncore_pmu_cpuhp_state = ret;
594*bad11557SKoichi Okuno 
595*bad11557SKoichi Okuno 	ret = platform_driver_register(&fujitsu_uncore_pmu_driver);
596*bad11557SKoichi Okuno 	if (ret)
597*bad11557SKoichi Okuno 		cpuhp_remove_multi_state(uncore_pmu_cpuhp_state);
598*bad11557SKoichi Okuno 
599*bad11557SKoichi Okuno 	return ret;
600*bad11557SKoichi Okuno }
601*bad11557SKoichi Okuno 
fujitsu_uncore_pmu_exit(void)602*bad11557SKoichi Okuno static void __exit fujitsu_uncore_pmu_exit(void)
603*bad11557SKoichi Okuno {
604*bad11557SKoichi Okuno 	platform_driver_unregister(&fujitsu_uncore_pmu_driver);
605*bad11557SKoichi Okuno 	cpuhp_remove_multi_state(uncore_pmu_cpuhp_state);
606*bad11557SKoichi Okuno }
607*bad11557SKoichi Okuno 
608*bad11557SKoichi Okuno module_init(fujitsu_uncore_pmu_init);
609*bad11557SKoichi Okuno module_exit(fujitsu_uncore_pmu_exit);
610*bad11557SKoichi Okuno 
611*bad11557SKoichi Okuno MODULE_AUTHOR("Koichi Okuno <fj2767dz@fujitsu.com>");
612*bad11557SKoichi Okuno MODULE_DESCRIPTION("Fujitsu Uncore PMU driver");
613*bad11557SKoichi Okuno MODULE_LICENSE("GPL");
614