xref: /linux/drivers/devfreq/event/exynos-ppmu.c (revision b85d45947951d23cb22d90caecf4c1eb81342c96)
1 /*
2  * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
3  *
4  * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
5  * Author : Chanwoo Choi <cw00.choi@samsung.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This driver is based on drivers/devfreq/exynos/exynos_ppmu.c
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/io.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/of_address.h>
20 #include <linux/platform_device.h>
21 #include <linux/suspend.h>
22 #include <linux/devfreq-event.h>
23 
24 #include "exynos-ppmu.h"
25 
26 struct exynos_ppmu_data {
27 	void __iomem *base;
28 	struct clk *clk;
29 };
30 
31 struct exynos_ppmu {
32 	struct devfreq_event_dev **edev;
33 	struct devfreq_event_desc *desc;
34 	unsigned int num_events;
35 
36 	struct device *dev;
37 	struct mutex lock;
38 
39 	struct exynos_ppmu_data ppmu;
40 };
41 
42 #define PPMU_EVENT(name)			\
43 	{ "ppmu-event0-"#name, PPMU_PMNCNT0 },	\
44 	{ "ppmu-event1-"#name, PPMU_PMNCNT1 },	\
45 	{ "ppmu-event2-"#name, PPMU_PMNCNT2 },	\
46 	{ "ppmu-event3-"#name, PPMU_PMNCNT3 }
47 
48 struct __exynos_ppmu_events {
49 	char *name;
50 	int id;
51 } ppmu_events[] = {
52 	/* For Exynos3250, Exynos4 and Exynos5260 */
53 	PPMU_EVENT(g3d),
54 	PPMU_EVENT(fsys),
55 
56 	/* For Exynos4 SoCs and Exynos3250 */
57 	PPMU_EVENT(dmc0),
58 	PPMU_EVENT(dmc1),
59 	PPMU_EVENT(cpu),
60 	PPMU_EVENT(rightbus),
61 	PPMU_EVENT(leftbus),
62 	PPMU_EVENT(lcd0),
63 	PPMU_EVENT(camif),
64 
65 	/* Only for Exynos3250 and Exynos5260 */
66 	PPMU_EVENT(mfc),
67 
68 	/* Only for Exynos4 SoCs */
69 	PPMU_EVENT(mfc-left),
70 	PPMU_EVENT(mfc-right),
71 
72 	/* Only for Exynos5260 SoCs */
73 	PPMU_EVENT(drex0-s0),
74 	PPMU_EVENT(drex0-s1),
75 	PPMU_EVENT(drex1-s0),
76 	PPMU_EVENT(drex1-s1),
77 	PPMU_EVENT(eagle),
78 	PPMU_EVENT(kfc),
79 	PPMU_EVENT(isp),
80 	PPMU_EVENT(fimc),
81 	PPMU_EVENT(gscl),
82 	PPMU_EVENT(mscl),
83 	PPMU_EVENT(fimd0x),
84 	PPMU_EVENT(fimd1x),
85 
86 	/* Only for Exynos5433 SoCs */
87 	PPMU_EVENT(d0-cpu),
88 	PPMU_EVENT(d0-general),
89 	PPMU_EVENT(d0-rt),
90 	PPMU_EVENT(d1-cpu),
91 	PPMU_EVENT(d1-general),
92 	PPMU_EVENT(d1-rt),
93 
94 	{ /* sentinel */ },
95 };
96 
97 static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
98 {
99 	int i;
100 
101 	for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
102 		if (!strcmp(edev->desc->name, ppmu_events[i].name))
103 			return ppmu_events[i].id;
104 
105 	return -EINVAL;
106 }
107 
108 /*
109  * The devfreq-event ops structure for PPMU v1.1
110  */
111 static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
112 {
113 	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
114 	u32 pmnc;
115 
116 	/* Disable all counters */
117 	__raw_writel(PPMU_CCNT_MASK |
118 		     PPMU_PMCNT0_MASK |
119 		     PPMU_PMCNT1_MASK |
120 		     PPMU_PMCNT2_MASK |
121 		     PPMU_PMCNT3_MASK,
122 		     info->ppmu.base + PPMU_CNTENC);
123 
124 	/* Disable PPMU */
125 	pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
126 	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
127 	__raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
128 
129 	return 0;
130 }
131 
132 static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
133 {
134 	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
135 	int id = exynos_ppmu_find_ppmu_id(edev);
136 	u32 pmnc, cntens;
137 
138 	if (id < 0)
139 		return id;
140 
141 	/* Enable specific counter */
142 	cntens = __raw_readl(info->ppmu.base + PPMU_CNTENS);
143 	cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
144 	__raw_writel(cntens, info->ppmu.base + PPMU_CNTENS);
145 
146 	/* Set the event of Read/Write data count  */
147 	__raw_writel(PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT,
148 			info->ppmu.base + PPMU_BEVTxSEL(id));
149 
150 	/* Reset cycle counter/performance counter and enable PPMU */
151 	pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
152 	pmnc &= ~(PPMU_PMNC_ENABLE_MASK
153 			| PPMU_PMNC_COUNTER_RESET_MASK
154 			| PPMU_PMNC_CC_RESET_MASK);
155 	pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
156 	pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
157 	pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
158 	__raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
159 
160 	return 0;
161 }
162 
163 static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
164 				struct devfreq_event_data *edata)
165 {
166 	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
167 	int id = exynos_ppmu_find_ppmu_id(edev);
168 	u32 pmnc, cntenc;
169 
170 	if (id < 0)
171 		return -EINVAL;
172 
173 	/* Disable PPMU */
174 	pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
175 	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
176 	__raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
177 
178 	/* Read cycle count */
179 	edata->total_count = __raw_readl(info->ppmu.base + PPMU_CCNT);
180 
181 	/* Read performance count */
182 	switch (id) {
183 	case PPMU_PMNCNT0:
184 	case PPMU_PMNCNT1:
185 	case PPMU_PMNCNT2:
186 		edata->load_count
187 			= __raw_readl(info->ppmu.base + PPMU_PMNCT(id));
188 		break;
189 	case PPMU_PMNCNT3:
190 		edata->load_count =
191 			((__raw_readl(info->ppmu.base + PPMU_PMCNT3_HIGH) << 8)
192 			| __raw_readl(info->ppmu.base + PPMU_PMCNT3_LOW));
193 		break;
194 	default:
195 		return -EINVAL;
196 	}
197 
198 	/* Disable specific counter */
199 	cntenc = __raw_readl(info->ppmu.base + PPMU_CNTENC);
200 	cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
201 	__raw_writel(cntenc, info->ppmu.base + PPMU_CNTENC);
202 
203 	dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
204 					edata->load_count, edata->total_count);
205 
206 	return 0;
207 }
208 
209 static const struct devfreq_event_ops exynos_ppmu_ops = {
210 	.disable = exynos_ppmu_disable,
211 	.set_event = exynos_ppmu_set_event,
212 	.get_event = exynos_ppmu_get_event,
213 };
214 
215 /*
216  * The devfreq-event ops structure for PPMU v2.0
217  */
218 static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
219 {
220 	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
221 	u32 pmnc, clear;
222 
223 	/* Disable all counters */
224 	clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
225 		| PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
226 
227 	__raw_writel(clear, info->ppmu.base + PPMU_V2_FLAG);
228 	__raw_writel(clear, info->ppmu.base + PPMU_V2_INTENC);
229 	__raw_writel(clear, info->ppmu.base + PPMU_V2_CNTENC);
230 	__raw_writel(clear, info->ppmu.base + PPMU_V2_CNT_RESET);
231 
232 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG0);
233 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG1);
234 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG2);
235 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_RESULT);
236 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CNT_AUTO);
237 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV0_TYPE);
238 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV1_TYPE);
239 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV2_TYPE);
240 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV3_TYPE);
241 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_V);
242 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_A);
243 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_V);
244 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_A);
245 	__raw_writel(0x0, info->ppmu.base + PPMU_V2_INTERRUPT_RESET);
246 
247 	/* Disable PPMU */
248 	pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
249 	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
250 	__raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
251 
252 	return 0;
253 }
254 
255 static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
256 {
257 	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
258 	int id = exynos_ppmu_find_ppmu_id(edev);
259 	u32 pmnc, cntens;
260 
261 	/* Enable all counters */
262 	cntens = __raw_readl(info->ppmu.base + PPMU_V2_CNTENS);
263 	cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
264 	__raw_writel(cntens, info->ppmu.base + PPMU_V2_CNTENS);
265 
266 	/* Set the event of Read/Write data count  */
267 	switch (id) {
268 	case PPMU_PMNCNT0:
269 	case PPMU_PMNCNT1:
270 	case PPMU_PMNCNT2:
271 		__raw_writel(PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT,
272 				info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
273 		break;
274 	case PPMU_PMNCNT3:
275 		__raw_writel(PPMU_V2_EVT3_RW_DATA_CNT,
276 				info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
277 		break;
278 	}
279 
280 	/* Reset cycle counter/performance counter and enable PPMU */
281 	pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
282 	pmnc &= ~(PPMU_PMNC_ENABLE_MASK
283 			| PPMU_PMNC_COUNTER_RESET_MASK
284 			| PPMU_PMNC_CC_RESET_MASK
285 			| PPMU_PMNC_CC_DIVIDER_MASK
286 			| PPMU_V2_PMNC_START_MODE_MASK);
287 	pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
288 	pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
289 	pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
290 	pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
291 	__raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
292 
293 	return 0;
294 }
295 
296 static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
297 				    struct devfreq_event_data *edata)
298 {
299 	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
300 	int id = exynos_ppmu_find_ppmu_id(edev);
301 	u32 pmnc, cntenc;
302 	u32 pmcnt_high, pmcnt_low;
303 	u64 load_count = 0;
304 
305 	/* Disable PPMU */
306 	pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
307 	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
308 	__raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
309 
310 	/* Read cycle count and performance count */
311 	edata->total_count = __raw_readl(info->ppmu.base + PPMU_V2_CCNT);
312 
313 	switch (id) {
314 	case PPMU_PMNCNT0:
315 	case PPMU_PMNCNT1:
316 	case PPMU_PMNCNT2:
317 		load_count = __raw_readl(info->ppmu.base + PPMU_V2_PMNCT(id));
318 		break;
319 	case PPMU_PMNCNT3:
320 		pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH);
321 		pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW);
322 		load_count = ((u64)((pmcnt_high & 0xff)) << 32)
323 			   + (u64)pmcnt_low;
324 		break;
325 	}
326 	edata->load_count = load_count;
327 
328 	/* Disable all counters */
329 	cntenc = __raw_readl(info->ppmu.base + PPMU_V2_CNTENC);
330 	cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
331 	__raw_writel(cntenc, info->ppmu.base + PPMU_V2_CNTENC);
332 
333 	dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
334 					edata->load_count, edata->total_count);
335 	return 0;
336 }
337 
338 static const struct devfreq_event_ops exynos_ppmu_v2_ops = {
339 	.disable = exynos_ppmu_v2_disable,
340 	.set_event = exynos_ppmu_v2_set_event,
341 	.get_event = exynos_ppmu_v2_get_event,
342 };
343 
344 static const struct of_device_id exynos_ppmu_id_match[] = {
345 	{
346 		.compatible = "samsung,exynos-ppmu",
347 		.data = (void *)&exynos_ppmu_ops,
348 	}, {
349 		.compatible = "samsung,exynos-ppmu-v2",
350 		.data = (void *)&exynos_ppmu_v2_ops,
351 	},
352 	{ /* sentinel */ },
353 };
354 
355 static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
356 {
357 	const struct of_device_id *match;
358 
359 	match = of_match_node(exynos_ppmu_id_match, np);
360 	return (struct devfreq_event_ops *)match->data;
361 }
362 
363 static int of_get_devfreq_events(struct device_node *np,
364 				 struct exynos_ppmu *info)
365 {
366 	struct devfreq_event_desc *desc;
367 	struct devfreq_event_ops *event_ops;
368 	struct device *dev = info->dev;
369 	struct device_node *events_np, *node;
370 	int i, j, count;
371 
372 	events_np = of_get_child_by_name(np, "events");
373 	if (!events_np) {
374 		dev_err(dev,
375 			"failed to get child node of devfreq-event devices\n");
376 		return -EINVAL;
377 	}
378 	event_ops = exynos_bus_get_ops(np);
379 
380 	count = of_get_child_count(events_np);
381 	desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
382 	if (!desc)
383 		return -ENOMEM;
384 	info->num_events = count;
385 
386 	j = 0;
387 	for_each_child_of_node(events_np, node) {
388 		for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
389 			if (!ppmu_events[i].name)
390 				continue;
391 
392 			if (!of_node_cmp(node->name, ppmu_events[i].name))
393 				break;
394 		}
395 
396 		if (i == ARRAY_SIZE(ppmu_events)) {
397 			dev_warn(dev,
398 				"don't know how to configure events : %s\n",
399 				node->name);
400 			continue;
401 		}
402 
403 		desc[j].ops = event_ops;
404 		desc[j].driver_data = info;
405 
406 		of_property_read_string(node, "event-name", &desc[j].name);
407 
408 		j++;
409 
410 		of_node_put(node);
411 	}
412 	info->desc = desc;
413 
414 	of_node_put(events_np);
415 
416 	return 0;
417 }
418 
419 static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
420 {
421 	struct device *dev = info->dev;
422 	struct device_node *np = dev->of_node;
423 	int ret = 0;
424 
425 	if (!np) {
426 		dev_err(dev, "failed to find devicetree node\n");
427 		return -EINVAL;
428 	}
429 
430 	/* Maps the memory mapped IO to control PPMU register */
431 	info->ppmu.base = of_iomap(np, 0);
432 	if (IS_ERR_OR_NULL(info->ppmu.base)) {
433 		dev_err(dev, "failed to map memory region\n");
434 		return -ENOMEM;
435 	}
436 
437 	info->ppmu.clk = devm_clk_get(dev, "ppmu");
438 	if (IS_ERR(info->ppmu.clk)) {
439 		info->ppmu.clk = NULL;
440 		dev_warn(dev, "cannot get PPMU clock\n");
441 	}
442 
443 	ret = of_get_devfreq_events(np, info);
444 	if (ret < 0) {
445 		dev_err(dev, "failed to parse exynos ppmu dt node\n");
446 		goto err;
447 	}
448 
449 	return 0;
450 
451 err:
452 	iounmap(info->ppmu.base);
453 
454 	return ret;
455 }
456 
457 static int exynos_ppmu_probe(struct platform_device *pdev)
458 {
459 	struct exynos_ppmu *info;
460 	struct devfreq_event_dev **edev;
461 	struct devfreq_event_desc *desc;
462 	int i, ret = 0, size;
463 
464 	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
465 	if (!info)
466 		return -ENOMEM;
467 
468 	mutex_init(&info->lock);
469 	info->dev = &pdev->dev;
470 
471 	/* Parse dt data to get resource */
472 	ret = exynos_ppmu_parse_dt(info);
473 	if (ret < 0) {
474 		dev_err(&pdev->dev,
475 			"failed to parse devicetree for resource\n");
476 		return ret;
477 	}
478 	desc = info->desc;
479 
480 	size = sizeof(struct devfreq_event_dev *) * info->num_events;
481 	info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
482 	if (!info->edev) {
483 		dev_err(&pdev->dev,
484 			"failed to allocate memory devfreq-event devices\n");
485 		return -ENOMEM;
486 	}
487 	edev = info->edev;
488 	platform_set_drvdata(pdev, info);
489 
490 	for (i = 0; i < info->num_events; i++) {
491 		edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
492 		if (IS_ERR(edev[i])) {
493 			ret = PTR_ERR(edev[i]);
494 			dev_err(&pdev->dev,
495 				"failed to add devfreq-event device\n");
496 			goto err;
497 		}
498 	}
499 
500 	clk_prepare_enable(info->ppmu.clk);
501 
502 	return 0;
503 err:
504 	iounmap(info->ppmu.base);
505 
506 	return ret;
507 }
508 
509 static int exynos_ppmu_remove(struct platform_device *pdev)
510 {
511 	struct exynos_ppmu *info = platform_get_drvdata(pdev);
512 
513 	clk_disable_unprepare(info->ppmu.clk);
514 	iounmap(info->ppmu.base);
515 
516 	return 0;
517 }
518 
519 static struct platform_driver exynos_ppmu_driver = {
520 	.probe	= exynos_ppmu_probe,
521 	.remove	= exynos_ppmu_remove,
522 	.driver = {
523 		.name	= "exynos-ppmu",
524 		.of_match_table = exynos_ppmu_id_match,
525 	},
526 };
527 module_platform_driver(exynos_ppmu_driver);
528 
529 MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver");
530 MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
531 MODULE_LICENSE("GPL");
532