xref: /linux/drivers/devfreq/event/exynos-ppmu.c (revision 32786fdc9506aeba98278c1844d4bfb766863832)
1 /*
2  * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
3  *
4  * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
5  * Author : Chanwoo Choi <cw00.choi@samsung.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This driver is based on drivers/devfreq/exynos/exynos_ppmu.c
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/io.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of_address.h>
19 #include <linux/platform_device.h>
20 #include <linux/suspend.h>
21 #include <linux/devfreq-event.h>
22 
23 #include "exynos-ppmu.h"
24 
25 struct exynos_ppmu_data {
26 	void __iomem *base;
27 	struct clk *clk;
28 };
29 
30 struct exynos_ppmu {
31 	struct devfreq_event_dev **edev;
32 	struct devfreq_event_desc *desc;
33 	unsigned int num_events;
34 
35 	struct device *dev;
36 
37 	struct exynos_ppmu_data ppmu;
38 };
39 
40 #define PPMU_EVENT(name)			\
41 	{ "ppmu-event0-"#name, PPMU_PMNCNT0 },	\
42 	{ "ppmu-event1-"#name, PPMU_PMNCNT1 },	\
43 	{ "ppmu-event2-"#name, PPMU_PMNCNT2 },	\
44 	{ "ppmu-event3-"#name, PPMU_PMNCNT3 }
45 
46 struct __exynos_ppmu_events {
47 	char *name;
48 	int id;
49 } ppmu_events[] = {
50 	/* For Exynos3250, Exynos4 and Exynos5260 */
51 	PPMU_EVENT(g3d),
52 	PPMU_EVENT(fsys),
53 
54 	/* For Exynos4 SoCs and Exynos3250 */
55 	PPMU_EVENT(dmc0),
56 	PPMU_EVENT(dmc1),
57 	PPMU_EVENT(cpu),
58 	PPMU_EVENT(rightbus),
59 	PPMU_EVENT(leftbus),
60 	PPMU_EVENT(lcd0),
61 	PPMU_EVENT(camif),
62 
63 	/* Only for Exynos3250 and Exynos5260 */
64 	PPMU_EVENT(mfc),
65 
66 	/* Only for Exynos4 SoCs */
67 	PPMU_EVENT(mfc-left),
68 	PPMU_EVENT(mfc-right),
69 
70 	/* Only for Exynos5260 SoCs */
71 	PPMU_EVENT(drex0-s0),
72 	PPMU_EVENT(drex0-s1),
73 	PPMU_EVENT(drex1-s0),
74 	PPMU_EVENT(drex1-s1),
75 	PPMU_EVENT(eagle),
76 	PPMU_EVENT(kfc),
77 	PPMU_EVENT(isp),
78 	PPMU_EVENT(fimc),
79 	PPMU_EVENT(gscl),
80 	PPMU_EVENT(mscl),
81 	PPMU_EVENT(fimd0x),
82 	PPMU_EVENT(fimd1x),
83 
84 	/* Only for Exynos5433 SoCs */
85 	PPMU_EVENT(d0-cpu),
86 	PPMU_EVENT(d0-general),
87 	PPMU_EVENT(d0-rt),
88 	PPMU_EVENT(d1-cpu),
89 	PPMU_EVENT(d1-general),
90 	PPMU_EVENT(d1-rt),
91 };
92 
93 static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
94 {
95 	int i;
96 
97 	for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
98 		if (!strcmp(edev->desc->name, ppmu_events[i].name))
99 			return ppmu_events[i].id;
100 
101 	return -EINVAL;
102 }
103 
104 /*
105  * The devfreq-event ops structure for PPMU v1.1
106  */
107 static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
108 {
109 	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
110 	u32 pmnc;
111 
112 	/* Disable all counters */
113 	__raw_writel(PPMU_CCNT_MASK |
114 		     PPMU_PMCNT0_MASK |
115 		     PPMU_PMCNT1_MASK |
116 		     PPMU_PMCNT2_MASK |
117 		     PPMU_PMCNT3_MASK,
118 		     info->ppmu.base + PPMU_CNTENC);
119 
120 	/* Disable PPMU */
121 	pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
122 	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
123 	__raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
124 
125 	return 0;
126 }
127 
128 static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
129 {
130 	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
131 	int id = exynos_ppmu_find_ppmu_id(edev);
132 	u32 pmnc, cntens;
133 
134 	if (id < 0)
135 		return id;
136 
137 	/* Enable specific counter */
138 	cntens = __raw_readl(info->ppmu.base + PPMU_CNTENS);
139 	cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
140 	__raw_writel(cntens, info->ppmu.base + PPMU_CNTENS);
141 
142 	/* Set the event of Read/Write data count  */
143 	__raw_writel(PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT,
144 			info->ppmu.base + PPMU_BEVTxSEL(id));
145 
146 	/* Reset cycle counter/performance counter and enable PPMU */
147 	pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
148 	pmnc &= ~(PPMU_PMNC_ENABLE_MASK
149 			| PPMU_PMNC_COUNTER_RESET_MASK
150 			| PPMU_PMNC_CC_RESET_MASK);
151 	pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
152 	pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
153 	pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
154 	__raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
155 
156 	return 0;
157 }
158 
159 static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
160 				struct devfreq_event_data *edata)
161 {
162 	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
163 	int id = exynos_ppmu_find_ppmu_id(edev);
164 	u32 pmnc, cntenc;
165 
166 	if (id < 0)
167 		return -EINVAL;
168 
169 	/* Disable PPMU */
170 	pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
171 	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
172 	__raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
173 
174 	/* Read cycle count */
175 	edata->total_count = __raw_readl(info->ppmu.base + PPMU_CCNT);
176 
177 	/* Read performance count */
178 	switch (id) {
179 	case PPMU_PMNCNT0:
180 	case PPMU_PMNCNT1:
181 	case PPMU_PMNCNT2:
182 		edata->load_count
183 			= __raw_readl(info->ppmu.base + PPMU_PMNCT(id));
184 		break;
185 	case PPMU_PMNCNT3:
186 		edata->load_count =
187 			((__raw_readl(info->ppmu.base + PPMU_PMCNT3_HIGH) << 8)
188 			| __raw_readl(info->ppmu.base + PPMU_PMCNT3_LOW));
189 		break;
190 	default:
191 		return -EINVAL;
192 	}
193 
194 	/* Disable specific counter */
195 	cntenc = __raw_readl(info->ppmu.base + PPMU_CNTENC);
196 	cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
197 	__raw_writel(cntenc, info->ppmu.base + PPMU_CNTENC);
198 
199 	dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
200 					edata->load_count, edata->total_count);
201 
202 	return 0;
203 }
204 
205 static const struct devfreq_event_ops exynos_ppmu_ops = {
206 	.disable = exynos_ppmu_disable,
207 	.set_event = exynos_ppmu_set_event,
208 	.get_event = exynos_ppmu_get_event,
209 };
210 
211 /*
212  * The devfreq-event ops structure for PPMU v2.0
213  */
214 static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
215 {
216 	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
217 	u32 pmnc, clear;
218 
219 	/* Disable all counters */
220 	clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
221 		| PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
222 
223 	__raw_writel(clear, info->ppmu.base + PPMU_V2_FLAG);
224 	__raw_writel(clear, info->ppmu.base + PPMU_V2_INTENC);
225 	__raw_writel(clear, info->ppmu.base + PPMU_V2_CNTENC);
226 	__raw_writel(clear, info->ppmu.base + PPMU_V2_CNT_RESET);
227 
228 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG0);
229 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG1);
230 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG2);
231 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_RESULT);
232 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CNT_AUTO);
233 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV0_TYPE);
234 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV1_TYPE);
235 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV2_TYPE);
236 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV3_TYPE);
237 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_V);
238 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_A);
239 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_V);
240 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_A);
241 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_INTERRUPT_RESET);
242 
243 	/* Disable PPMU */
244 	pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
245 	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
246 	__raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
247 
248 	return 0;
249 }
250 
251 static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
252 {
253 	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
254 	int id = exynos_ppmu_find_ppmu_id(edev);
255 	u32 pmnc, cntens;
256 
257 	/* Enable all counters */
258 	cntens = __raw_readl(info->ppmu.base + PPMU_V2_CNTENS);
259 	cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
260 	__raw_writel(cntens, info->ppmu.base + PPMU_V2_CNTENS);
261 
262 	/* Set the event of Read/Write data count  */
263 	switch (id) {
264 	case PPMU_PMNCNT0:
265 	case PPMU_PMNCNT1:
266 	case PPMU_PMNCNT2:
267 		__raw_writel(PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT,
268 				info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
269 		break;
270 	case PPMU_PMNCNT3:
271 		__raw_writel(PPMU_V2_EVT3_RW_DATA_CNT,
272 				info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
273 		break;
274 	}
275 
276 	/* Reset cycle counter/performance counter and enable PPMU */
277 	pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
278 	pmnc &= ~(PPMU_PMNC_ENABLE_MASK
279 			| PPMU_PMNC_COUNTER_RESET_MASK
280 			| PPMU_PMNC_CC_RESET_MASK
281 			| PPMU_PMNC_CC_DIVIDER_MASK
282 			| PPMU_V2_PMNC_START_MODE_MASK);
283 	pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
284 	pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
285 	pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
286 	pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
287 	__raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
288 
289 	return 0;
290 }
291 
292 static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
293 				    struct devfreq_event_data *edata)
294 {
295 	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
296 	int id = exynos_ppmu_find_ppmu_id(edev);
297 	u32 pmnc, cntenc;
298 	u32 pmcnt_high, pmcnt_low;
299 	u64 load_count = 0;
300 
301 	/* Disable PPMU */
302 	pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
303 	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
304 	__raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
305 
306 	/* Read cycle count and performance count */
307 	edata->total_count = __raw_readl(info->ppmu.base + PPMU_V2_CCNT);
308 
309 	switch (id) {
310 	case PPMU_PMNCNT0:
311 	case PPMU_PMNCNT1:
312 	case PPMU_PMNCNT2:
313 		load_count = __raw_readl(info->ppmu.base + PPMU_V2_PMNCT(id));
314 		break;
315 	case PPMU_PMNCNT3:
316 		pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH);
317 		pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW);
318 		load_count = ((u64)((pmcnt_high & 0xff)) << 32)
319 			   + (u64)pmcnt_low;
320 		break;
321 	}
322 	edata->load_count = load_count;
323 
324 	/* Disable all counters */
325 	cntenc = __raw_readl(info->ppmu.base + PPMU_V2_CNTENC);
326 	cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
327 	__raw_writel(cntenc, info->ppmu.base + PPMU_V2_CNTENC);
328 
329 	dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
330 					edata->load_count, edata->total_count);
331 	return 0;
332 }
333 
334 static const struct devfreq_event_ops exynos_ppmu_v2_ops = {
335 	.disable = exynos_ppmu_v2_disable,
336 	.set_event = exynos_ppmu_v2_set_event,
337 	.get_event = exynos_ppmu_v2_get_event,
338 };
339 
340 static const struct of_device_id exynos_ppmu_id_match[] = {
341 	{
342 		.compatible = "samsung,exynos-ppmu",
343 		.data = (void *)&exynos_ppmu_ops,
344 	}, {
345 		.compatible = "samsung,exynos-ppmu-v2",
346 		.data = (void *)&exynos_ppmu_v2_ops,
347 	},
348 	{ /* sentinel */ },
349 };
350 MODULE_DEVICE_TABLE(of, exynos_ppmu_id_match);
351 
352 static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
353 {
354 	const struct of_device_id *match;
355 
356 	match = of_match_node(exynos_ppmu_id_match, np);
357 	return (struct devfreq_event_ops *)match->data;
358 }
359 
360 static int of_get_devfreq_events(struct device_node *np,
361 				 struct exynos_ppmu *info)
362 {
363 	struct devfreq_event_desc *desc;
364 	struct devfreq_event_ops *event_ops;
365 	struct device *dev = info->dev;
366 	struct device_node *events_np, *node;
367 	int i, j, count;
368 
369 	events_np = of_get_child_by_name(np, "events");
370 	if (!events_np) {
371 		dev_err(dev,
372 			"failed to get child node of devfreq-event devices\n");
373 		return -EINVAL;
374 	}
375 	event_ops = exynos_bus_get_ops(np);
376 
377 	count = of_get_child_count(events_np);
378 	desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
379 	if (!desc)
380 		return -ENOMEM;
381 	info->num_events = count;
382 
383 	j = 0;
384 	for_each_child_of_node(events_np, node) {
385 		for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
386 			if (!ppmu_events[i].name)
387 				continue;
388 
389 			if (!of_node_cmp(node->name, ppmu_events[i].name))
390 				break;
391 		}
392 
393 		if (i == ARRAY_SIZE(ppmu_events)) {
394 			dev_warn(dev,
395 				"don't know how to configure events : %s\n",
396 				node->name);
397 			continue;
398 		}
399 
400 		desc[j].ops = event_ops;
401 		desc[j].driver_data = info;
402 
403 		of_property_read_string(node, "event-name", &desc[j].name);
404 
405 		j++;
406 	}
407 	info->desc = desc;
408 
409 	of_node_put(events_np);
410 
411 	return 0;
412 }
413 
414 static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
415 {
416 	struct device *dev = info->dev;
417 	struct device_node *np = dev->of_node;
418 	int ret = 0;
419 
420 	if (!np) {
421 		dev_err(dev, "failed to find devicetree node\n");
422 		return -EINVAL;
423 	}
424 
425 	/* Maps the memory mapped IO to control PPMU register */
426 	info->ppmu.base = of_iomap(np, 0);
427 	if (IS_ERR_OR_NULL(info->ppmu.base)) {
428 		dev_err(dev, "failed to map memory region\n");
429 		return -ENOMEM;
430 	}
431 
432 	info->ppmu.clk = devm_clk_get(dev, "ppmu");
433 	if (IS_ERR(info->ppmu.clk)) {
434 		info->ppmu.clk = NULL;
435 		dev_warn(dev, "cannot get PPMU clock\n");
436 	}
437 
438 	ret = of_get_devfreq_events(np, info);
439 	if (ret < 0) {
440 		dev_err(dev, "failed to parse exynos ppmu dt node\n");
441 		goto err;
442 	}
443 
444 	return 0;
445 
446 err:
447 	iounmap(info->ppmu.base);
448 
449 	return ret;
450 }
451 
452 static int exynos_ppmu_probe(struct platform_device *pdev)
453 {
454 	struct exynos_ppmu *info;
455 	struct devfreq_event_dev **edev;
456 	struct devfreq_event_desc *desc;
457 	int i, ret = 0, size;
458 
459 	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
460 	if (!info)
461 		return -ENOMEM;
462 
463 	info->dev = &pdev->dev;
464 
465 	/* Parse dt data to get resource */
466 	ret = exynos_ppmu_parse_dt(info);
467 	if (ret < 0) {
468 		dev_err(&pdev->dev,
469 			"failed to parse devicetree for resource\n");
470 		return ret;
471 	}
472 	desc = info->desc;
473 
474 	size = sizeof(struct devfreq_event_dev *) * info->num_events;
475 	info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
476 	if (!info->edev) {
477 		dev_err(&pdev->dev,
478 			"failed to allocate memory devfreq-event devices\n");
479 		ret = -ENOMEM;
480 		goto err;
481 	}
482 	edev = info->edev;
483 	platform_set_drvdata(pdev, info);
484 
485 	for (i = 0; i < info->num_events; i++) {
486 		edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
487 		if (IS_ERR(edev[i])) {
488 			ret = PTR_ERR(edev[i]);
489 			dev_err(&pdev->dev,
490 				"failed to add devfreq-event device\n");
491 			goto err;
492 		}
493 	}
494 
495 	clk_prepare_enable(info->ppmu.clk);
496 
497 	return 0;
498 err:
499 	iounmap(info->ppmu.base);
500 
501 	return ret;
502 }
503 
504 static int exynos_ppmu_remove(struct platform_device *pdev)
505 {
506 	struct exynos_ppmu *info = platform_get_drvdata(pdev);
507 
508 	clk_disable_unprepare(info->ppmu.clk);
509 	iounmap(info->ppmu.base);
510 
511 	return 0;
512 }
513 
514 static struct platform_driver exynos_ppmu_driver = {
515 	.probe	= exynos_ppmu_probe,
516 	.remove	= exynos_ppmu_remove,
517 	.driver = {
518 		.name	= "exynos-ppmu",
519 		.of_match_table = exynos_ppmu_id_match,
520 	},
521 };
522 module_platform_driver(exynos_ppmu_driver);
523 
524 MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver");
525 MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
526 MODULE_LICENSE("GPL");
527