xref: /linux/drivers/devfreq/tegra30-devfreq.c (revision 4359a011e259a4608afc7fb3635370c9d4ba5943)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * A devfreq driver for NVIDIA Tegra SoCs
4  *
5  * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
6  * Copyright (C) 2014 Google, Inc
7  */
8 
9 #include <linux/clk.h>
10 #include <linux/cpufreq.h>
11 #include <linux/devfreq.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/irq.h>
15 #include <linux/module.h>
16 #include <linux/of_device.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_opp.h>
19 #include <linux/reset.h>
20 #include <linux/workqueue.h>
21 
22 #include <soc/tegra/fuse.h>
23 
24 #include "governor.h"
25 
26 #define ACTMON_GLB_STATUS					0x0
27 #define ACTMON_GLB_PERIOD_CTRL					0x4
28 
29 #define ACTMON_DEV_CTRL						0x0
30 #define ACTMON_DEV_CTRL_K_VAL_SHIFT				10
31 #define ACTMON_DEV_CTRL_ENB_PERIODIC				BIT(18)
32 #define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN			BIT(20)
33 #define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN			BIT(21)
34 #define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT	23
35 #define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT	26
36 #define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN		BIT(29)
37 #define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN		BIT(30)
38 #define ACTMON_DEV_CTRL_ENB					BIT(31)
39 
40 #define ACTMON_DEV_CTRL_STOP					0x00000000
41 
42 #define ACTMON_DEV_UPPER_WMARK					0x4
43 #define ACTMON_DEV_LOWER_WMARK					0x8
44 #define ACTMON_DEV_INIT_AVG					0xc
45 #define ACTMON_DEV_AVG_UPPER_WMARK				0x10
46 #define ACTMON_DEV_AVG_LOWER_WMARK				0x14
47 #define ACTMON_DEV_COUNT_WEIGHT					0x18
48 #define ACTMON_DEV_AVG_COUNT					0x20
49 #define ACTMON_DEV_INTR_STATUS					0x24
50 
51 #define ACTMON_INTR_STATUS_CLEAR				0xffffffff
52 
53 #define ACTMON_DEV_INTR_CONSECUTIVE_UPPER			BIT(31)
54 #define ACTMON_DEV_INTR_CONSECUTIVE_LOWER			BIT(30)
55 
56 #define ACTMON_ABOVE_WMARK_WINDOW				1
57 #define ACTMON_BELOW_WMARK_WINDOW				3
58 #define ACTMON_BOOST_FREQ_STEP					16000
59 
60 /*
61  * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
62  * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
63  */
64 #define ACTMON_AVERAGE_WINDOW_LOG2			6
65 #define ACTMON_SAMPLING_PERIOD				12 /* ms */
66 #define ACTMON_DEFAULT_AVG_BAND				6  /* 1/10 of % */
67 
68 #define KHZ							1000
69 
70 #define KHZ_MAX						(ULONG_MAX / KHZ)
71 
72 /* Assume that the bus is saturated if the utilization is 25% */
73 #define BUS_SATURATION_RATIO					25
74 
75 /**
76  * struct tegra_devfreq_device_config - configuration specific to an ACTMON
77  * device
78  *
79  * Coefficients and thresholds are percentages unless otherwise noted
80  */
81 struct tegra_devfreq_device_config {
82 	u32		offset;
83 	u32		irq_mask;
84 
85 	/* Factors applied to boost_freq every consecutive watermark breach */
86 	unsigned int	boost_up_coeff;
87 	unsigned int	boost_down_coeff;
88 
89 	/* Define the watermark bounds when applied to the current avg */
90 	unsigned int	boost_up_threshold;
91 	unsigned int	boost_down_threshold;
92 
93 	/*
94 	 * Threshold of activity (cycles translated to kHz) below which the
95 	 * CPU frequency isn't to be taken into account. This is to avoid
96 	 * increasing the EMC frequency when the CPU is very busy but not
97 	 * accessing the bus often.
98 	 */
99 	u32		avg_dependency_threshold;
100 };
101 
102 enum tegra_actmon_device {
103 	MCALL = 0,
104 	MCCPU,
105 };
106 
107 static const struct tegra_devfreq_device_config tegra124_device_configs[] = {
108 	{
109 		/* MCALL: All memory accesses (including from the CPUs) */
110 		.offset = 0x1c0,
111 		.irq_mask = 1 << 26,
112 		.boost_up_coeff = 200,
113 		.boost_down_coeff = 50,
114 		.boost_up_threshold = 60,
115 		.boost_down_threshold = 40,
116 	},
117 	{
118 		/* MCCPU: memory accesses from the CPUs */
119 		.offset = 0x200,
120 		.irq_mask = 1 << 25,
121 		.boost_up_coeff = 800,
122 		.boost_down_coeff = 40,
123 		.boost_up_threshold = 27,
124 		.boost_down_threshold = 10,
125 		.avg_dependency_threshold = 16000, /* 16MHz in kHz units */
126 	},
127 };
128 
129 static const struct tegra_devfreq_device_config tegra30_device_configs[] = {
130 	{
131 		/* MCALL: All memory accesses (including from the CPUs) */
132 		.offset = 0x1c0,
133 		.irq_mask = 1 << 26,
134 		.boost_up_coeff = 200,
135 		.boost_down_coeff = 50,
136 		.boost_up_threshold = 20,
137 		.boost_down_threshold = 10,
138 	},
139 	{
140 		/* MCCPU: memory accesses from the CPUs */
141 		.offset = 0x200,
142 		.irq_mask = 1 << 25,
143 		.boost_up_coeff = 800,
144 		.boost_down_coeff = 40,
145 		.boost_up_threshold = 27,
146 		.boost_down_threshold = 10,
147 		.avg_dependency_threshold = 16000, /* 16MHz in kHz units */
148 	},
149 };
150 
151 /**
152  * struct tegra_devfreq_device - state specific to an ACTMON device
153  *
154  * Frequencies are in kHz.
155  */
156 struct tegra_devfreq_device {
157 	const struct tegra_devfreq_device_config *config;
158 	void __iomem *regs;
159 
160 	/* Average event count sampled in the last interrupt */
161 	u32 avg_count;
162 
163 	/*
164 	 * Extra frequency to increase the target by due to consecutive
165 	 * watermark breaches.
166 	 */
167 	unsigned long boost_freq;
168 
169 	/* Optimal frequency calculated from the stats for this device */
170 	unsigned long target_freq;
171 };
172 
173 struct tegra_devfreq_soc_data {
174 	const struct tegra_devfreq_device_config *configs;
175 	/* Weight value for count measurements */
176 	unsigned int count_weight;
177 };
178 
179 struct tegra_devfreq {
180 	struct devfreq		*devfreq;
181 
182 	struct reset_control	*reset;
183 	struct clk		*clock;
184 	void __iomem		*regs;
185 
186 	struct clk		*emc_clock;
187 	unsigned long		max_freq;
188 	unsigned long		cur_freq;
189 	struct notifier_block	clk_rate_change_nb;
190 
191 	struct delayed_work	cpufreq_update_work;
192 	struct notifier_block	cpu_rate_change_nb;
193 
194 	struct tegra_devfreq_device devices[2];
195 
196 	unsigned int		irq;
197 
198 	bool			started;
199 
200 	const struct tegra_devfreq_soc_data *soc;
201 };
202 
203 struct tegra_actmon_emc_ratio {
204 	unsigned long cpu_freq;
205 	unsigned long emc_freq;
206 };
207 
208 static const struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
209 	{ 1400000,    KHZ_MAX },
210 	{ 1200000,    750000 },
211 	{ 1100000,    600000 },
212 	{ 1000000,    500000 },
213 	{  800000,    375000 },
214 	{  500000,    200000 },
215 	{  250000,    100000 },
216 };
217 
218 static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset)
219 {
220 	return readl_relaxed(tegra->regs + offset);
221 }
222 
223 static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset)
224 {
225 	writel_relaxed(val, tegra->regs + offset);
226 }
227 
228 static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset)
229 {
230 	return readl_relaxed(dev->regs + offset);
231 }
232 
233 static void device_writel(struct tegra_devfreq_device *dev, u32 val,
234 			  u32 offset)
235 {
236 	writel_relaxed(val, dev->regs + offset);
237 }
238 
239 static unsigned long do_percent(unsigned long long val, unsigned int pct)
240 {
241 	val = val * pct;
242 	do_div(val, 100);
243 
244 	/*
245 	 * High freq + high boosting percent + large polling interval are
246 	 * resulting in integer overflow when watermarks are calculated.
247 	 */
248 	return min_t(u64, val, U32_MAX);
249 }
250 
251 static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
252 					   struct tegra_devfreq_device *dev)
253 {
254 	u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
255 	u32 band = avg_band_freq * tegra->devfreq->profile->polling_ms;
256 	u32 avg;
257 
258 	avg = min(dev->avg_count, U32_MAX - band);
259 	device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
260 
261 	avg = max(dev->avg_count, band);
262 	device_writel(dev, avg - band, ACTMON_DEV_AVG_LOWER_WMARK);
263 }
264 
265 static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
266 				       struct tegra_devfreq_device *dev)
267 {
268 	u32 val = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
269 
270 	device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
271 		      ACTMON_DEV_UPPER_WMARK);
272 
273 	device_writel(dev, do_percent(val, dev->config->boost_down_threshold),
274 		      ACTMON_DEV_LOWER_WMARK);
275 }
276 
277 static void actmon_isr_device(struct tegra_devfreq *tegra,
278 			      struct tegra_devfreq_device *dev)
279 {
280 	u32 intr_status, dev_ctrl;
281 
282 	dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT);
283 	tegra_devfreq_update_avg_wmark(tegra, dev);
284 
285 	intr_status = device_readl(dev, ACTMON_DEV_INTR_STATUS);
286 	dev_ctrl = device_readl(dev, ACTMON_DEV_CTRL);
287 
288 	if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
289 		/*
290 		 * new_boost = min(old_boost * up_coef + step, max_freq)
291 		 */
292 		dev->boost_freq = do_percent(dev->boost_freq,
293 					     dev->config->boost_up_coeff);
294 		dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
295 
296 		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
297 
298 		if (dev->boost_freq >= tegra->max_freq) {
299 			dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
300 			dev->boost_freq = tegra->max_freq;
301 		}
302 	} else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
303 		/*
304 		 * new_boost = old_boost * down_coef
305 		 * or 0 if (old_boost * down_coef < step / 2)
306 		 */
307 		dev->boost_freq = do_percent(dev->boost_freq,
308 					     dev->config->boost_down_coeff);
309 
310 		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
311 
312 		if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
313 			dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
314 			dev->boost_freq = 0;
315 		}
316 	}
317 
318 	device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
319 
320 	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
321 }
322 
323 static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
324 					    unsigned long cpu_freq)
325 {
326 	unsigned int i;
327 	const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
328 
329 	for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
330 		if (cpu_freq >= ratio->cpu_freq) {
331 			if (ratio->emc_freq >= tegra->max_freq)
332 				return tegra->max_freq;
333 			else
334 				return ratio->emc_freq;
335 		}
336 	}
337 
338 	return 0;
339 }
340 
341 static unsigned long actmon_device_target_freq(struct tegra_devfreq *tegra,
342 					       struct tegra_devfreq_device *dev)
343 {
344 	unsigned int avg_sustain_coef;
345 	unsigned long target_freq;
346 
347 	target_freq = dev->avg_count / tegra->devfreq->profile->polling_ms;
348 	avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
349 	target_freq = do_percent(target_freq, avg_sustain_coef);
350 
351 	return target_freq;
352 }
353 
354 static void actmon_update_target(struct tegra_devfreq *tegra,
355 				 struct tegra_devfreq_device *dev)
356 {
357 	unsigned long cpu_freq = 0;
358 	unsigned long static_cpu_emc_freq = 0;
359 
360 	dev->target_freq = actmon_device_target_freq(tegra, dev);
361 
362 	if (dev->config->avg_dependency_threshold &&
363 	    dev->config->avg_dependency_threshold <= dev->target_freq) {
364 		cpu_freq = cpufreq_quick_get(0);
365 		static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
366 
367 		dev->target_freq += dev->boost_freq;
368 		dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
369 	} else {
370 		dev->target_freq += dev->boost_freq;
371 	}
372 }
373 
374 static irqreturn_t actmon_thread_isr(int irq, void *data)
375 {
376 	struct tegra_devfreq *tegra = data;
377 	bool handled = false;
378 	unsigned int i;
379 	u32 val;
380 
381 	mutex_lock(&tegra->devfreq->lock);
382 
383 	val = actmon_readl(tegra, ACTMON_GLB_STATUS);
384 	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
385 		if (val & tegra->devices[i].config->irq_mask) {
386 			actmon_isr_device(tegra, tegra->devices + i);
387 			handled = true;
388 		}
389 	}
390 
391 	if (handled)
392 		update_devfreq(tegra->devfreq);
393 
394 	mutex_unlock(&tegra->devfreq->lock);
395 
396 	return handled ? IRQ_HANDLED : IRQ_NONE;
397 }
398 
399 static int tegra_actmon_clk_notify_cb(struct notifier_block *nb,
400 				      unsigned long action, void *ptr)
401 {
402 	struct clk_notifier_data *data = ptr;
403 	struct tegra_devfreq *tegra;
404 	struct tegra_devfreq_device *dev;
405 	unsigned int i;
406 
407 	if (action != POST_RATE_CHANGE)
408 		return NOTIFY_OK;
409 
410 	tegra = container_of(nb, struct tegra_devfreq, clk_rate_change_nb);
411 
412 	tegra->cur_freq = data->new_rate / KHZ;
413 
414 	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
415 		dev = &tegra->devices[i];
416 
417 		tegra_devfreq_update_wmark(tegra, dev);
418 	}
419 
420 	return NOTIFY_OK;
421 }
422 
423 static void tegra_actmon_delayed_update(struct work_struct *work)
424 {
425 	struct tegra_devfreq *tegra = container_of(work, struct tegra_devfreq,
426 						   cpufreq_update_work.work);
427 
428 	mutex_lock(&tegra->devfreq->lock);
429 	update_devfreq(tegra->devfreq);
430 	mutex_unlock(&tegra->devfreq->lock);
431 }
432 
433 static unsigned long
434 tegra_actmon_cpufreq_contribution(struct tegra_devfreq *tegra,
435 				  unsigned int cpu_freq)
436 {
437 	struct tegra_devfreq_device *actmon_dev = &tegra->devices[MCCPU];
438 	unsigned long static_cpu_emc_freq, dev_freq;
439 
440 	dev_freq = actmon_device_target_freq(tegra, actmon_dev);
441 
442 	/* check whether CPU's freq is taken into account at all */
443 	if (dev_freq < actmon_dev->config->avg_dependency_threshold)
444 		return 0;
445 
446 	static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
447 
448 	if (dev_freq + actmon_dev->boost_freq >= static_cpu_emc_freq)
449 		return 0;
450 
451 	return static_cpu_emc_freq;
452 }
453 
454 static int tegra_actmon_cpu_notify_cb(struct notifier_block *nb,
455 				      unsigned long action, void *ptr)
456 {
457 	struct cpufreq_freqs *freqs = ptr;
458 	struct tegra_devfreq *tegra;
459 	unsigned long old, new, delay;
460 
461 	if (action != CPUFREQ_POSTCHANGE)
462 		return NOTIFY_OK;
463 
464 	tegra = container_of(nb, struct tegra_devfreq, cpu_rate_change_nb);
465 
466 	/*
467 	 * Quickly check whether CPU frequency should be taken into account
468 	 * at all, without blocking CPUFreq's core.
469 	 */
470 	if (mutex_trylock(&tegra->devfreq->lock)) {
471 		old = tegra_actmon_cpufreq_contribution(tegra, freqs->old);
472 		new = tegra_actmon_cpufreq_contribution(tegra, freqs->new);
473 		mutex_unlock(&tegra->devfreq->lock);
474 
475 		/*
476 		 * If CPU's frequency shouldn't be taken into account at
477 		 * the moment, then there is no need to update the devfreq's
478 		 * state because ISR will re-check CPU's frequency on the
479 		 * next interrupt.
480 		 */
481 		if (old == new)
482 			return NOTIFY_OK;
483 	}
484 
485 	/*
486 	 * CPUFreq driver should support CPUFREQ_ASYNC_NOTIFICATION in order
487 	 * to allow asynchronous notifications. This means we can't block
488 	 * here for too long, otherwise CPUFreq's core will complain with a
489 	 * warning splat.
490 	 */
491 	delay = msecs_to_jiffies(ACTMON_SAMPLING_PERIOD);
492 	schedule_delayed_work(&tegra->cpufreq_update_work, delay);
493 
494 	return NOTIFY_OK;
495 }
496 
497 static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
498 					  struct tegra_devfreq_device *dev)
499 {
500 	u32 val = 0;
501 
502 	/* reset boosting on governor's restart */
503 	dev->boost_freq = 0;
504 
505 	dev->target_freq = tegra->cur_freq;
506 
507 	dev->avg_count = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
508 	device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
509 
510 	tegra_devfreq_update_avg_wmark(tegra, dev);
511 	tegra_devfreq_update_wmark(tegra, dev);
512 
513 	device_writel(dev, tegra->soc->count_weight, ACTMON_DEV_COUNT_WEIGHT);
514 	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
515 
516 	val |= ACTMON_DEV_CTRL_ENB_PERIODIC;
517 	val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
518 		<< ACTMON_DEV_CTRL_K_VAL_SHIFT;
519 	val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
520 		<< ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
521 	val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
522 		<< ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
523 	val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
524 	val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
525 	val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
526 	val |= ACTMON_DEV_CTRL_ENB;
527 
528 	device_writel(dev, val, ACTMON_DEV_CTRL);
529 }
530 
531 static void tegra_actmon_stop_devices(struct tegra_devfreq *tegra)
532 {
533 	struct tegra_devfreq_device *dev = tegra->devices;
534 	unsigned int i;
535 
536 	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++, dev++) {
537 		device_writel(dev, ACTMON_DEV_CTRL_STOP, ACTMON_DEV_CTRL);
538 		device_writel(dev, ACTMON_INTR_STATUS_CLEAR,
539 			      ACTMON_DEV_INTR_STATUS);
540 	}
541 }
542 
543 static int tegra_actmon_resume(struct tegra_devfreq *tegra)
544 {
545 	unsigned int i;
546 	int err;
547 
548 	if (!tegra->devfreq->profile->polling_ms || !tegra->started)
549 		return 0;
550 
551 	actmon_writel(tegra, tegra->devfreq->profile->polling_ms - 1,
552 		      ACTMON_GLB_PERIOD_CTRL);
553 
554 	/*
555 	 * CLK notifications are needed in order to reconfigure the upper
556 	 * consecutive watermark in accordance to the actual clock rate
557 	 * to avoid unnecessary upper interrupts.
558 	 */
559 	err = clk_notifier_register(tegra->emc_clock,
560 				    &tegra->clk_rate_change_nb);
561 	if (err) {
562 		dev_err(tegra->devfreq->dev.parent,
563 			"Failed to register rate change notifier\n");
564 		return err;
565 	}
566 
567 	tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
568 
569 	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
570 		tegra_actmon_configure_device(tegra, &tegra->devices[i]);
571 
572 	/*
573 	 * We are estimating CPU's memory bandwidth requirement based on
574 	 * amount of memory accesses and system's load, judging by CPU's
575 	 * frequency. We also don't want to receive events about CPU's
576 	 * frequency transaction when governor is stopped, hence notifier
577 	 * is registered dynamically.
578 	 */
579 	err = cpufreq_register_notifier(&tegra->cpu_rate_change_nb,
580 					CPUFREQ_TRANSITION_NOTIFIER);
581 	if (err) {
582 		dev_err(tegra->devfreq->dev.parent,
583 			"Failed to register rate change notifier: %d\n", err);
584 		goto err_stop;
585 	}
586 
587 	enable_irq(tegra->irq);
588 
589 	return 0;
590 
591 err_stop:
592 	tegra_actmon_stop_devices(tegra);
593 
594 	clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
595 
596 	return err;
597 }
598 
599 static int tegra_actmon_start(struct tegra_devfreq *tegra)
600 {
601 	int ret = 0;
602 
603 	if (!tegra->started) {
604 		tegra->started = true;
605 
606 		ret = tegra_actmon_resume(tegra);
607 		if (ret)
608 			tegra->started = false;
609 	}
610 
611 	return ret;
612 }
613 
614 static void tegra_actmon_pause(struct tegra_devfreq *tegra)
615 {
616 	if (!tegra->devfreq->profile->polling_ms || !tegra->started)
617 		return;
618 
619 	disable_irq(tegra->irq);
620 
621 	cpufreq_unregister_notifier(&tegra->cpu_rate_change_nb,
622 				    CPUFREQ_TRANSITION_NOTIFIER);
623 
624 	cancel_delayed_work_sync(&tegra->cpufreq_update_work);
625 
626 	tegra_actmon_stop_devices(tegra);
627 
628 	clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
629 }
630 
631 static void tegra_actmon_stop(struct tegra_devfreq *tegra)
632 {
633 	tegra_actmon_pause(tegra);
634 	tegra->started = false;
635 }
636 
637 static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
638 				u32 flags)
639 {
640 	struct dev_pm_opp *opp;
641 	int ret;
642 
643 	opp = devfreq_recommended_opp(dev, freq, flags);
644 	if (IS_ERR(opp)) {
645 		dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
646 		return PTR_ERR(opp);
647 	}
648 
649 	ret = dev_pm_opp_set_opp(dev, opp);
650 	dev_pm_opp_put(opp);
651 
652 	return ret;
653 }
654 
655 static int tegra_devfreq_get_dev_status(struct device *dev,
656 					struct devfreq_dev_status *stat)
657 {
658 	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
659 	struct tegra_devfreq_device *actmon_dev;
660 	unsigned long cur_freq;
661 
662 	cur_freq = READ_ONCE(tegra->cur_freq);
663 
664 	/* To be used by the tegra governor */
665 	stat->private_data = tegra;
666 
667 	/* The below are to be used by the other governors */
668 	stat->current_frequency = cur_freq * KHZ;
669 
670 	actmon_dev = &tegra->devices[MCALL];
671 
672 	/* Number of cycles spent on memory access */
673 	stat->busy_time = device_readl(actmon_dev, ACTMON_DEV_AVG_COUNT);
674 
675 	/* The bus can be considered to be saturated way before 100% */
676 	stat->busy_time *= 100 / BUS_SATURATION_RATIO;
677 
678 	/* Number of cycles in a sampling period */
679 	stat->total_time = tegra->devfreq->profile->polling_ms * cur_freq;
680 
681 	stat->busy_time = min(stat->busy_time, stat->total_time);
682 
683 	return 0;
684 }
685 
686 static struct devfreq_dev_profile tegra_devfreq_profile = {
687 	.polling_ms	= ACTMON_SAMPLING_PERIOD,
688 	.target		= tegra_devfreq_target,
689 	.get_dev_status	= tegra_devfreq_get_dev_status,
690 	.is_cooling_device = true,
691 };
692 
693 static int tegra_governor_get_target(struct devfreq *devfreq,
694 				     unsigned long *freq)
695 {
696 	struct devfreq_dev_status *stat;
697 	struct tegra_devfreq *tegra;
698 	struct tegra_devfreq_device *dev;
699 	unsigned long target_freq = 0;
700 	unsigned int i;
701 	int err;
702 
703 	err = devfreq_update_stats(devfreq);
704 	if (err)
705 		return err;
706 
707 	stat = &devfreq->last_status;
708 
709 	tegra = stat->private_data;
710 
711 	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
712 		dev = &tegra->devices[i];
713 
714 		actmon_update_target(tegra, dev);
715 
716 		target_freq = max(target_freq, dev->target_freq);
717 	}
718 
719 	/*
720 	 * tegra-devfreq driver operates with KHz units, while OPP table
721 	 * entries use Hz units. Hence we need to convert the units for the
722 	 * devfreq core.
723 	 */
724 	*freq = target_freq * KHZ;
725 
726 	return 0;
727 }
728 
729 static int tegra_governor_event_handler(struct devfreq *devfreq,
730 					unsigned int event, void *data)
731 {
732 	struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
733 	unsigned int *new_delay = data;
734 	int ret = 0;
735 
736 	/*
737 	 * Couple devfreq-device with the governor early because it is
738 	 * needed at the moment of governor's start (used by ISR).
739 	 */
740 	tegra->devfreq = devfreq;
741 
742 	switch (event) {
743 	case DEVFREQ_GOV_START:
744 		devfreq_monitor_start(devfreq);
745 		ret = tegra_actmon_start(tegra);
746 		break;
747 
748 	case DEVFREQ_GOV_STOP:
749 		tegra_actmon_stop(tegra);
750 		devfreq_monitor_stop(devfreq);
751 		break;
752 
753 	case DEVFREQ_GOV_UPDATE_INTERVAL:
754 		/*
755 		 * ACTMON hardware supports up to 256 milliseconds for the
756 		 * sampling period.
757 		 */
758 		if (*new_delay > 256) {
759 			ret = -EINVAL;
760 			break;
761 		}
762 
763 		tegra_actmon_pause(tegra);
764 		devfreq_update_interval(devfreq, new_delay);
765 		ret = tegra_actmon_resume(tegra);
766 		break;
767 
768 	case DEVFREQ_GOV_SUSPEND:
769 		tegra_actmon_stop(tegra);
770 		devfreq_monitor_suspend(devfreq);
771 		break;
772 
773 	case DEVFREQ_GOV_RESUME:
774 		devfreq_monitor_resume(devfreq);
775 		ret = tegra_actmon_start(tegra);
776 		break;
777 	}
778 
779 	return ret;
780 }
781 
782 static struct devfreq_governor tegra_devfreq_governor = {
783 	.name = "tegra_actmon",
784 	.attrs = DEVFREQ_GOV_ATTR_POLLING_INTERVAL,
785 	.flags = DEVFREQ_GOV_FLAG_IMMUTABLE
786 		| DEVFREQ_GOV_FLAG_IRQ_DRIVEN,
787 	.get_target_freq = tegra_governor_get_target,
788 	.event_handler = tegra_governor_event_handler,
789 };
790 
791 static void devm_tegra_devfreq_deinit_hw(void *data)
792 {
793 	struct tegra_devfreq *tegra = data;
794 
795 	reset_control_reset(tegra->reset);
796 	clk_disable_unprepare(tegra->clock);
797 }
798 
799 static int devm_tegra_devfreq_init_hw(struct device *dev,
800 				      struct tegra_devfreq *tegra)
801 {
802 	int err;
803 
804 	err = clk_prepare_enable(tegra->clock);
805 	if (err) {
806 		dev_err(dev, "Failed to prepare and enable ACTMON clock\n");
807 		return err;
808 	}
809 
810 	err = devm_add_action_or_reset(dev, devm_tegra_devfreq_deinit_hw,
811 				       tegra);
812 	if (err)
813 		return err;
814 
815 	err = reset_control_reset(tegra->reset);
816 	if (err) {
817 		dev_err(dev, "Failed to reset hardware: %d\n", err);
818 		return err;
819 	}
820 
821 	return err;
822 }
823 
824 static int tegra_devfreq_config_clks_nop(struct device *dev,
825 					 struct opp_table *opp_table,
826 					 struct dev_pm_opp *opp, void *data,
827 					 bool scaling_down)
828 {
829 	/* We want to skip clk configuration via dev_pm_opp_set_opp() */
830 	return 0;
831 }
832 
833 static int tegra_devfreq_probe(struct platform_device *pdev)
834 {
835 	u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
836 	struct tegra_devfreq_device *dev;
837 	struct tegra_devfreq *tegra;
838 	struct devfreq *devfreq;
839 	unsigned int i;
840 	long rate;
841 	int err;
842 	const char *clk_names[] = { "actmon", NULL };
843 	struct dev_pm_opp_config config = {
844 		.supported_hw = &hw_version,
845 		.supported_hw_count = 1,
846 		.clk_names = clk_names,
847 		.config_clks = tegra_devfreq_config_clks_nop,
848 	};
849 
850 	tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
851 	if (!tegra)
852 		return -ENOMEM;
853 
854 	tegra->soc = of_device_get_match_data(&pdev->dev);
855 
856 	tegra->regs = devm_platform_ioremap_resource(pdev, 0);
857 	if (IS_ERR(tegra->regs))
858 		return PTR_ERR(tegra->regs);
859 
860 	tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
861 	if (IS_ERR(tegra->reset)) {
862 		dev_err(&pdev->dev, "Failed to get reset\n");
863 		return PTR_ERR(tegra->reset);
864 	}
865 
866 	tegra->clock = devm_clk_get(&pdev->dev, "actmon");
867 	if (IS_ERR(tegra->clock)) {
868 		dev_err(&pdev->dev, "Failed to get actmon clock\n");
869 		return PTR_ERR(tegra->clock);
870 	}
871 
872 	tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
873 	if (IS_ERR(tegra->emc_clock))
874 		return dev_err_probe(&pdev->dev, PTR_ERR(tegra->emc_clock),
875 				     "Failed to get emc clock\n");
876 
877 	err = platform_get_irq(pdev, 0);
878 	if (err < 0)
879 		return err;
880 
881 	tegra->irq = err;
882 
883 	irq_set_status_flags(tegra->irq, IRQ_NOAUTOEN);
884 
885 	err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
886 					actmon_thread_isr, IRQF_ONESHOT,
887 					"tegra-devfreq", tegra);
888 	if (err) {
889 		dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
890 		return err;
891 	}
892 
893 	err = devm_pm_opp_set_config(&pdev->dev, &config);
894 	if (err) {
895 		dev_err(&pdev->dev, "Failed to set OPP config: %d\n", err);
896 		return err;
897 	}
898 
899 	err = devm_pm_opp_of_add_table_indexed(&pdev->dev, 0);
900 	if (err) {
901 		dev_err(&pdev->dev, "Failed to add OPP table: %d\n", err);
902 		return err;
903 	}
904 
905 	err = devm_tegra_devfreq_init_hw(&pdev->dev, tegra);
906 	if (err)
907 		return err;
908 
909 	rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
910 	if (rate <= 0) {
911 		dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
912 		return rate ?: -EINVAL;
913 	}
914 
915 	tegra->max_freq = rate / KHZ;
916 
917 	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
918 		dev = tegra->devices + i;
919 		dev->config = tegra->soc->configs + i;
920 		dev->regs = tegra->regs + dev->config->offset;
921 	}
922 
923 	platform_set_drvdata(pdev, tegra);
924 
925 	tegra->clk_rate_change_nb.notifier_call = tegra_actmon_clk_notify_cb;
926 	tegra->cpu_rate_change_nb.notifier_call = tegra_actmon_cpu_notify_cb;
927 
928 	INIT_DELAYED_WORK(&tegra->cpufreq_update_work,
929 			  tegra_actmon_delayed_update);
930 
931 	err = devm_devfreq_add_governor(&pdev->dev, &tegra_devfreq_governor);
932 	if (err) {
933 		dev_err(&pdev->dev, "Failed to add governor: %d\n", err);
934 		return err;
935 	}
936 
937 	tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
938 
939 	devfreq = devm_devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
940 					  "tegra_actmon", NULL);
941 	if (IS_ERR(devfreq)) {
942 		dev_err(&pdev->dev, "Failed to add device: %pe\n", devfreq);
943 		return PTR_ERR(devfreq);
944 	}
945 
946 	return 0;
947 }
948 
949 static const struct tegra_devfreq_soc_data tegra124_soc = {
950 	.configs = tegra124_device_configs,
951 
952 	/*
953 	 * Activity counter is incremented every 256 memory transactions,
954 	 * and each transaction takes 4 EMC clocks.
955 	 */
956 	.count_weight = 4 * 256,
957 };
958 
959 static const struct tegra_devfreq_soc_data tegra30_soc = {
960 	.configs = tegra30_device_configs,
961 	.count_weight = 2 * 256,
962 };
963 
964 static const struct of_device_id tegra_devfreq_of_match[] = {
965 	{ .compatible = "nvidia,tegra30-actmon",  .data = &tegra30_soc, },
966 	{ .compatible = "nvidia,tegra124-actmon", .data = &tegra124_soc, },
967 	{ },
968 };
969 
970 MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
971 
972 static struct platform_driver tegra_devfreq_driver = {
973 	.probe	= tegra_devfreq_probe,
974 	.driver = {
975 		.name = "tegra-devfreq",
976 		.of_match_table = tegra_devfreq_of_match,
977 	},
978 };
979 module_platform_driver(tegra_devfreq_driver);
980 
981 MODULE_LICENSE("GPL v2");
982 MODULE_DESCRIPTION("Tegra devfreq driver");
983 MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");
984