xref: /linux/drivers/perf/amlogic/meson_ddr_pmu_core.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2022 Amlogic, Inc. All rights reserved.
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/init.h>
8 #include <linux/irqreturn.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/perf_event.h>
13 #include <linux/platform_device.h>
14 #include <linux/printk.h>
15 #include <linux/sysfs.h>
16 #include <linux/types.h>
17 
18 #include <soc/amlogic/meson_ddr_pmu.h>
19 
20 struct ddr_pmu {
21 	struct pmu pmu;
22 	struct dmc_info info;
23 	struct dmc_counter counters;	/* save counters from hw */
24 	bool pmu_enabled;
25 	struct device *dev;
26 	char *name;
27 	struct hlist_node node;
28 	enum cpuhp_state cpuhp_state;
29 	int cpu;			/* for cpu hotplug */
30 };
31 
32 #define DDR_PERF_DEV_NAME "meson_ddr_bw"
33 #define MAX_AXI_PORTS_OF_CHANNEL	4	/* A DMC channel can monitor max 4 axi ports */
34 
35 #define to_ddr_pmu(p)		container_of(p, struct ddr_pmu, pmu)
36 #define dmc_info_to_pmu(p)	container_of(p, struct ddr_pmu, info)
37 
dmc_pmu_enable(struct ddr_pmu * pmu)38 static void dmc_pmu_enable(struct ddr_pmu *pmu)
39 {
40 	if (!pmu->pmu_enabled)
41 		pmu->info.hw_info->enable(&pmu->info);
42 
43 	pmu->pmu_enabled = true;
44 }
45 
dmc_pmu_disable(struct ddr_pmu * pmu)46 static void dmc_pmu_disable(struct ddr_pmu *pmu)
47 {
48 	if (pmu->pmu_enabled)
49 		pmu->info.hw_info->disable(&pmu->info);
50 
51 	pmu->pmu_enabled = false;
52 }
53 
meson_ddr_set_axi_filter(struct perf_event * event,u8 axi_id)54 static void meson_ddr_set_axi_filter(struct perf_event *event, u8 axi_id)
55 {
56 	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
57 	int chann;
58 
59 	if (event->attr.config > ALL_CHAN_COUNTER_ID &&
60 	    event->attr.config < COUNTER_MAX_ID) {
61 		chann = event->attr.config - CHAN1_COUNTER_ID;
62 
63 		pmu->info.hw_info->set_axi_filter(&pmu->info, axi_id, chann);
64 	}
65 }
66 
ddr_cnt_addition(struct dmc_counter * sum,struct dmc_counter * add1,struct dmc_counter * add2,int chann_nr)67 static void ddr_cnt_addition(struct dmc_counter *sum,
68 			     struct dmc_counter *add1,
69 			     struct dmc_counter *add2,
70 			     int chann_nr)
71 {
72 	int i;
73 	u64 cnt1, cnt2;
74 
75 	sum->all_cnt = add1->all_cnt + add2->all_cnt;
76 	sum->all_req = add1->all_req + add2->all_req;
77 	for (i = 0; i < chann_nr; i++) {
78 		cnt1 = add1->channel_cnt[i];
79 		cnt2 = add2->channel_cnt[i];
80 
81 		sum->channel_cnt[i] = cnt1 + cnt2;
82 	}
83 }
84 
meson_ddr_perf_event_update(struct perf_event * event)85 static void meson_ddr_perf_event_update(struct perf_event *event)
86 {
87 	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
88 	u64 new_raw_count = 0;
89 	struct dmc_counter dc = {0}, sum_dc = {0};
90 	int idx;
91 	int chann_nr = pmu->info.hw_info->chann_nr;
92 
93 	/* get the remain counters in register. */
94 	pmu->info.hw_info->get_counters(&pmu->info, &dc);
95 
96 	ddr_cnt_addition(&sum_dc, &pmu->counters, &dc, chann_nr);
97 
98 	switch (event->attr.config) {
99 	case ALL_CHAN_COUNTER_ID:
100 		new_raw_count = sum_dc.all_cnt;
101 		break;
102 	case CHAN1_COUNTER_ID:
103 	case CHAN2_COUNTER_ID:
104 	case CHAN3_COUNTER_ID:
105 	case CHAN4_COUNTER_ID:
106 	case CHAN5_COUNTER_ID:
107 	case CHAN6_COUNTER_ID:
108 	case CHAN7_COUNTER_ID:
109 	case CHAN8_COUNTER_ID:
110 		idx = event->attr.config - CHAN1_COUNTER_ID;
111 		new_raw_count = sum_dc.channel_cnt[idx];
112 		break;
113 	}
114 
115 	local64_set(&event->count, new_raw_count);
116 }
117 
meson_ddr_perf_event_init(struct perf_event * event)118 static int meson_ddr_perf_event_init(struct perf_event *event)
119 {
120 	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
121 	u64 config1 = event->attr.config1;
122 	u64 config2 = event->attr.config2;
123 
124 	if (event->attr.type != event->pmu->type)
125 		return -ENOENT;
126 
127 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
128 		return -EOPNOTSUPP;
129 
130 	if (event->cpu < 0)
131 		return -EOPNOTSUPP;
132 
133 	/* check if the number of parameters is too much */
134 	if (event->attr.config != ALL_CHAN_COUNTER_ID &&
135 	    hweight64(config1) + hweight64(config2) > MAX_AXI_PORTS_OF_CHANNEL)
136 		return -EOPNOTSUPP;
137 
138 	event->cpu = pmu->cpu;
139 
140 	return 0;
141 }
142 
meson_ddr_perf_event_start(struct perf_event * event,int flags)143 static void meson_ddr_perf_event_start(struct perf_event *event, int flags)
144 {
145 	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
146 
147 	memset(&pmu->counters, 0, sizeof(pmu->counters));
148 	dmc_pmu_enable(pmu);
149 }
150 
meson_ddr_perf_event_add(struct perf_event * event,int flags)151 static int meson_ddr_perf_event_add(struct perf_event *event, int flags)
152 {
153 	u64 config1 = event->attr.config1;
154 	u64 config2 = event->attr.config2;
155 	int i;
156 
157 	for_each_set_bit(i,
158 			 (const unsigned long *)&config1,
159 			 BITS_PER_TYPE(config1))
160 		meson_ddr_set_axi_filter(event, i);
161 
162 	for_each_set_bit(i,
163 			 (const unsigned long *)&config2,
164 			 BITS_PER_TYPE(config2))
165 		meson_ddr_set_axi_filter(event, i + 64);
166 
167 	if (flags & PERF_EF_START)
168 		meson_ddr_perf_event_start(event, flags);
169 
170 	return 0;
171 }
172 
meson_ddr_perf_event_stop(struct perf_event * event,int flags)173 static void meson_ddr_perf_event_stop(struct perf_event *event, int flags)
174 {
175 	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
176 
177 	if (flags & PERF_EF_UPDATE)
178 		meson_ddr_perf_event_update(event);
179 
180 	dmc_pmu_disable(pmu);
181 }
182 
meson_ddr_perf_event_del(struct perf_event * event,int flags)183 static void meson_ddr_perf_event_del(struct perf_event *event, int flags)
184 {
185 	meson_ddr_perf_event_stop(event, PERF_EF_UPDATE);
186 }
187 
meson_ddr_perf_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)188 static ssize_t meson_ddr_perf_cpumask_show(struct device *dev,
189 					   struct device_attribute *attr,
190 					   char *buf)
191 {
192 	struct ddr_pmu *pmu = dev_get_drvdata(dev);
193 
194 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
195 }
196 
197 static struct device_attribute meson_ddr_perf_cpumask_attr =
198 __ATTR(cpumask, 0444, meson_ddr_perf_cpumask_show, NULL);
199 
200 static struct attribute *meson_ddr_perf_cpumask_attrs[] = {
201 	&meson_ddr_perf_cpumask_attr.attr,
202 	NULL,
203 };
204 
205 static const struct attribute_group ddr_perf_cpumask_attr_group = {
206 	.attrs = meson_ddr_perf_cpumask_attrs,
207 };
208 
209 static ssize_t
pmu_event_show(struct device * dev,struct device_attribute * attr,char * page)210 pmu_event_show(struct device *dev, struct device_attribute *attr,
211 	       char *page)
212 {
213 	struct perf_pmu_events_attr *pmu_attr;
214 
215 	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
216 	return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
217 }
218 
219 static ssize_t
event_show_unit(struct device * dev,struct device_attribute * attr,char * page)220 event_show_unit(struct device *dev, struct device_attribute *attr,
221 		char *page)
222 {
223 	return sysfs_emit(page, "MB\n");
224 }
225 
226 static ssize_t
event_show_scale(struct device * dev,struct device_attribute * attr,char * page)227 event_show_scale(struct device *dev, struct device_attribute *attr,
228 		 char *page)
229 {
230 	/* one count = 16byte = 1.52587890625e-05 MB */
231 	return sysfs_emit(page, "1.52587890625e-05\n");
232 }
233 
234 #define AML_DDR_PMU_EVENT_ATTR(_name, _id)				\
235 {									\
236 	.attr = __ATTR(_name, 0444, pmu_event_show, NULL),		\
237 	.id = _id,							\
238 }
239 
240 #define AML_DDR_PMU_EVENT_UNIT_ATTR(_name)				\
241 	__ATTR(_name.unit, 0444, event_show_unit, NULL)
242 
243 #define AML_DDR_PMU_EVENT_SCALE_ATTR(_name)				\
244 	__ATTR(_name.scale, 0444, event_show_scale, NULL)
245 
246 static struct device_attribute event_unit_attrs[] = {
247 	AML_DDR_PMU_EVENT_UNIT_ATTR(total_rw_bytes),
248 	AML_DDR_PMU_EVENT_UNIT_ATTR(chan_1_rw_bytes),
249 	AML_DDR_PMU_EVENT_UNIT_ATTR(chan_2_rw_bytes),
250 	AML_DDR_PMU_EVENT_UNIT_ATTR(chan_3_rw_bytes),
251 	AML_DDR_PMU_EVENT_UNIT_ATTR(chan_4_rw_bytes),
252 	AML_DDR_PMU_EVENT_UNIT_ATTR(chan_5_rw_bytes),
253 	AML_DDR_PMU_EVENT_UNIT_ATTR(chan_6_rw_bytes),
254 	AML_DDR_PMU_EVENT_UNIT_ATTR(chan_7_rw_bytes),
255 	AML_DDR_PMU_EVENT_UNIT_ATTR(chan_8_rw_bytes),
256 };
257 
258 static struct device_attribute event_scale_attrs[] = {
259 	AML_DDR_PMU_EVENT_SCALE_ATTR(total_rw_bytes),
260 	AML_DDR_PMU_EVENT_SCALE_ATTR(chan_1_rw_bytes),
261 	AML_DDR_PMU_EVENT_SCALE_ATTR(chan_2_rw_bytes),
262 	AML_DDR_PMU_EVENT_SCALE_ATTR(chan_3_rw_bytes),
263 	AML_DDR_PMU_EVENT_SCALE_ATTR(chan_4_rw_bytes),
264 	AML_DDR_PMU_EVENT_SCALE_ATTR(chan_5_rw_bytes),
265 	AML_DDR_PMU_EVENT_SCALE_ATTR(chan_6_rw_bytes),
266 	AML_DDR_PMU_EVENT_SCALE_ATTR(chan_7_rw_bytes),
267 	AML_DDR_PMU_EVENT_SCALE_ATTR(chan_8_rw_bytes),
268 };
269 
270 static struct perf_pmu_events_attr event_attrs[] = {
271 	AML_DDR_PMU_EVENT_ATTR(total_rw_bytes, ALL_CHAN_COUNTER_ID),
272 	AML_DDR_PMU_EVENT_ATTR(chan_1_rw_bytes, CHAN1_COUNTER_ID),
273 	AML_DDR_PMU_EVENT_ATTR(chan_2_rw_bytes, CHAN2_COUNTER_ID),
274 	AML_DDR_PMU_EVENT_ATTR(chan_3_rw_bytes, CHAN3_COUNTER_ID),
275 	AML_DDR_PMU_EVENT_ATTR(chan_4_rw_bytes, CHAN4_COUNTER_ID),
276 	AML_DDR_PMU_EVENT_ATTR(chan_5_rw_bytes, CHAN5_COUNTER_ID),
277 	AML_DDR_PMU_EVENT_ATTR(chan_6_rw_bytes, CHAN6_COUNTER_ID),
278 	AML_DDR_PMU_EVENT_ATTR(chan_7_rw_bytes, CHAN7_COUNTER_ID),
279 	AML_DDR_PMU_EVENT_ATTR(chan_8_rw_bytes, CHAN8_COUNTER_ID),
280 };
281 
282 /* three attrs are combined an event */
283 static struct attribute *ddr_perf_events_attrs[COUNTER_MAX_ID * 3];
284 
285 static struct attribute_group ddr_perf_events_attr_group = {
286 	.name = "events",
287 	.attrs = ddr_perf_events_attrs,
288 };
289 
meson_ddr_perf_format_attr_visible(struct kobject * kobj,struct attribute * attr,int n)290 static umode_t meson_ddr_perf_format_attr_visible(struct kobject *kobj,
291 						  struct attribute *attr,
292 						  int n)
293 {
294 	struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
295 	struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
296 	const u64 *capability = ddr_pmu->info.hw_info->capability;
297 	struct device_attribute *dev_attr;
298 	int id;
299 	char value[20]; // config1:xxx, 20 is enough
300 
301 	dev_attr = container_of(attr, struct device_attribute, attr);
302 	dev_attr->show(NULL, NULL, value);
303 
304 	if (sscanf(value, "config1:%d", &id) == 1)
305 		return capability[0] & (1ULL << id) ? attr->mode : 0;
306 
307 	if (sscanf(value, "config2:%d", &id) == 1)
308 		return capability[1] & (1ULL << id) ? attr->mode : 0;
309 
310 	return attr->mode;
311 }
312 
313 static struct attribute_group ddr_perf_format_attr_group = {
314 	.name = "format",
315 	.is_visible = meson_ddr_perf_format_attr_visible,
316 };
317 
meson_ddr_perf_identifier_show(struct device * dev,struct device_attribute * attr,char * page)318 static ssize_t meson_ddr_perf_identifier_show(struct device *dev,
319 					      struct device_attribute *attr,
320 					      char *page)
321 {
322 	struct ddr_pmu *pmu = dev_get_drvdata(dev);
323 
324 	return sysfs_emit(page, "%s\n", pmu->name);
325 }
326 
327 static struct device_attribute meson_ddr_perf_identifier_attr =
328 __ATTR(identifier, 0444, meson_ddr_perf_identifier_show, NULL);
329 
330 static struct attribute *meson_ddr_perf_identifier_attrs[] = {
331 	&meson_ddr_perf_identifier_attr.attr,
332 	NULL,
333 };
334 
335 static const struct attribute_group ddr_perf_identifier_attr_group = {
336 	.attrs = meson_ddr_perf_identifier_attrs,
337 };
338 
339 static const struct attribute_group *attr_groups[] = {
340 	&ddr_perf_events_attr_group,
341 	&ddr_perf_format_attr_group,
342 	&ddr_perf_cpumask_attr_group,
343 	&ddr_perf_identifier_attr_group,
344 	NULL,
345 };
346 
dmc_irq_handler(int irq,void * dev_id)347 static irqreturn_t dmc_irq_handler(int irq, void *dev_id)
348 {
349 	struct dmc_info *info = dev_id;
350 	struct ddr_pmu *pmu;
351 	struct dmc_counter counters, *sum_cnter;
352 	int i;
353 
354 	pmu = dmc_info_to_pmu(info);
355 
356 	if (info->hw_info->irq_handler(info, &counters) != 0)
357 		goto out;
358 
359 	sum_cnter = &pmu->counters;
360 	sum_cnter->all_cnt += counters.all_cnt;
361 	sum_cnter->all_req += counters.all_req;
362 
363 	for (i = 0; i < pmu->info.hw_info->chann_nr; i++)
364 		sum_cnter->channel_cnt[i] += counters.channel_cnt[i];
365 
366 	if (pmu->pmu_enabled)
367 		/*
368 		 * the timer interrupt only supprt
369 		 * one shot mode, we have to re-enable
370 		 * it in ISR to support continue mode.
371 		 */
372 		info->hw_info->enable(info);
373 
374 	dev_dbg(pmu->dev, "counts: %llu %llu %llu, %llu, %llu, %llu\t\t"
375 			"sum: %llu %llu %llu, %llu, %llu, %llu\n",
376 			counters.all_req,
377 			counters.all_cnt,
378 			counters.channel_cnt[0],
379 			counters.channel_cnt[1],
380 			counters.channel_cnt[2],
381 			counters.channel_cnt[3],
382 
383 			pmu->counters.all_req,
384 			pmu->counters.all_cnt,
385 			pmu->counters.channel_cnt[0],
386 			pmu->counters.channel_cnt[1],
387 			pmu->counters.channel_cnt[2],
388 			pmu->counters.channel_cnt[3]);
389 out:
390 	return IRQ_HANDLED;
391 }
392 
ddr_perf_offline_cpu(unsigned int cpu,struct hlist_node * node)393 static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
394 {
395 	struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
396 	int target;
397 
398 	if (cpu != pmu->cpu)
399 		return 0;
400 
401 	target = cpumask_any_but(cpu_online_mask, cpu);
402 	if (target >= nr_cpu_ids)
403 		return 0;
404 
405 	perf_pmu_migrate_context(&pmu->pmu, cpu, target);
406 	pmu->cpu = target;
407 
408 	WARN_ON(irq_set_affinity(pmu->info.irq_num, cpumask_of(pmu->cpu)));
409 
410 	return 0;
411 }
412 
fill_event_attr(struct ddr_pmu * pmu)413 static void fill_event_attr(struct ddr_pmu *pmu)
414 {
415 	int i, j, k;
416 	struct attribute **dst = ddr_perf_events_attrs;
417 
418 	j = 0;
419 	k = 0;
420 
421 	/* fill ALL_CHAN_COUNTER_ID event */
422 	dst[j++] = &event_attrs[k].attr.attr;
423 	dst[j++] = &event_unit_attrs[k].attr;
424 	dst[j++] = &event_scale_attrs[k].attr;
425 
426 	k++;
427 
428 	/* fill each channel event */
429 	for (i = 0; i < pmu->info.hw_info->chann_nr; i++, k++) {
430 		dst[j++] = &event_attrs[k].attr.attr;
431 		dst[j++] = &event_unit_attrs[k].attr;
432 		dst[j++] = &event_scale_attrs[k].attr;
433 	}
434 
435 	dst[j] = NULL; /* mark end */
436 }
437 
fmt_attr_fill(struct attribute ** fmt_attr)438 static void fmt_attr_fill(struct attribute **fmt_attr)
439 {
440 	ddr_perf_format_attr_group.attrs = fmt_attr;
441 }
442 
ddr_pmu_parse_dt(struct platform_device * pdev,struct dmc_info * info)443 static int ddr_pmu_parse_dt(struct platform_device *pdev,
444 			    struct dmc_info *info)
445 {
446 	void __iomem *base;
447 	int i, ret;
448 
449 	info->hw_info = of_device_get_match_data(&pdev->dev);
450 
451 	for (i = 0; i < info->hw_info->dmc_nr; i++) {
452 		/* resource 0 for ddr register base */
453 		base = devm_platform_ioremap_resource(pdev, i);
454 		if (IS_ERR(base))
455 			return PTR_ERR(base);
456 
457 		info->ddr_reg[i] = base;
458 	}
459 
460 	/* resource i for pll register base */
461 	base = devm_platform_ioremap_resource(pdev, i);
462 	if (IS_ERR(base))
463 		return PTR_ERR(base);
464 
465 	info->pll_reg = base;
466 
467 	ret = platform_get_irq(pdev, 0);
468 	if (ret < 0)
469 		return ret;
470 
471 	info->irq_num = ret;
472 
473 	ret = devm_request_irq(&pdev->dev, info->irq_num, dmc_irq_handler,
474 			       IRQF_NOBALANCING, dev_name(&pdev->dev),
475 			       (void *)info);
476 	if (ret < 0)
477 		return ret;
478 
479 	return 0;
480 }
481 
meson_ddr_pmu_create(struct platform_device * pdev)482 int meson_ddr_pmu_create(struct platform_device *pdev)
483 {
484 	int ret;
485 	char *name;
486 	struct ddr_pmu *pmu;
487 
488 	pmu = devm_kzalloc(&pdev->dev, sizeof(struct ddr_pmu), GFP_KERNEL);
489 	if (!pmu)
490 		return -ENOMEM;
491 
492 	*pmu = (struct ddr_pmu) {
493 		.pmu = {
494 			.module		= THIS_MODULE,
495 			.parent		= &pdev->dev,
496 			.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
497 			.task_ctx_nr	= perf_invalid_context,
498 			.attr_groups	= attr_groups,
499 			.event_init	= meson_ddr_perf_event_init,
500 			.add		= meson_ddr_perf_event_add,
501 			.del		= meson_ddr_perf_event_del,
502 			.start		= meson_ddr_perf_event_start,
503 			.stop		= meson_ddr_perf_event_stop,
504 			.read		= meson_ddr_perf_event_update,
505 		},
506 	};
507 
508 	ret = ddr_pmu_parse_dt(pdev, &pmu->info);
509 	if (ret < 0)
510 		return ret;
511 
512 	fmt_attr_fill(pmu->info.hw_info->fmt_attr);
513 
514 	pmu->cpu = smp_processor_id();
515 
516 	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME);
517 	if (!name)
518 		return -ENOMEM;
519 
520 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, name, NULL,
521 				      ddr_perf_offline_cpu);
522 	if (ret < 0)
523 		return ret;
524 
525 	pmu->cpuhp_state = ret;
526 
527 	/* Register the pmu instance for cpu hotplug */
528 	ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
529 	if (ret)
530 		goto cpuhp_instance_err;
531 
532 	fill_event_attr(pmu);
533 
534 	ret = perf_pmu_register(&pmu->pmu, name, -1);
535 	if (ret)
536 		goto pmu_register_err;
537 
538 	pmu->name = name;
539 	pmu->dev = &pdev->dev;
540 	pmu->pmu_enabled = false;
541 
542 	platform_set_drvdata(pdev, pmu);
543 
544 	return 0;
545 
546 pmu_register_err:
547 	cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
548 
549 cpuhp_instance_err:
550 	cpuhp_remove_state(pmu->cpuhp_state);
551 
552 	return ret;
553 }
554 
meson_ddr_pmu_remove(struct platform_device * pdev)555 int meson_ddr_pmu_remove(struct platform_device *pdev)
556 {
557 	struct ddr_pmu *pmu = platform_get_drvdata(pdev);
558 
559 	perf_pmu_unregister(&pmu->pmu);
560 	cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
561 	cpuhp_remove_state(pmu->cpuhp_state);
562 
563 	return 0;
564 }
565