xref: /linux/drivers/devfreq/event/rockchip-dfi.c (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
4  * Author: Lin Huang <hl@rock-chips.com>
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/devfreq-event.h>
9 #include <linux/kernel.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/io.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/regmap.h>
17 #include <linux/slab.h>
18 #include <linux/list.h>
19 #include <linux/seqlock.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/bitfield.h>
23 #include <linux/bits.h>
24 #include <linux/perf_event.h>
25 
26 #include <soc/rockchip/rockchip_grf.h>
27 #include <soc/rockchip/rk3399_grf.h>
28 #include <soc/rockchip/rk3568_grf.h>
29 #include <soc/rockchip/rk3588_grf.h>
30 
31 #define DMC_MAX_CHANNELS	4
32 
33 #define HIWORD_UPDATE(val, mask)	((val) | (mask) << 16)
34 
35 /* DDRMON_CTRL */
36 #define DDRMON_CTRL	0x04
37 #define DDRMON_CTRL_LPDDR5		BIT(6)
38 #define DDRMON_CTRL_DDR4		BIT(5)
39 #define DDRMON_CTRL_LPDDR4		BIT(4)
40 #define DDRMON_CTRL_HARDWARE_EN		BIT(3)
41 #define DDRMON_CTRL_LPDDR23		BIT(2)
42 #define DDRMON_CTRL_SOFTWARE_EN		BIT(1)
43 #define DDRMON_CTRL_TIMER_CNT_EN	BIT(0)
44 #define DDRMON_CTRL_DDR_TYPE_MASK	(DDRMON_CTRL_LPDDR5 | \
45 					 DDRMON_CTRL_DDR4 | \
46 					 DDRMON_CTRL_LPDDR4 | \
47 					 DDRMON_CTRL_LPDDR23)
48 #define DDRMON_CTRL_LP5_BANK_MODE_MASK	GENMASK(8, 7)
49 
50 #define DDRMON_CH0_WR_NUM		0x20
51 #define DDRMON_CH0_RD_NUM		0x24
52 #define DDRMON_CH0_COUNT_NUM		0x28
53 #define DDRMON_CH0_DFI_ACCESS_NUM	0x2c
54 #define DDRMON_CH1_COUNT_NUM		0x3c
55 #define DDRMON_CH1_DFI_ACCESS_NUM	0x40
56 
57 #define PERF_EVENT_CYCLES		0x0
58 #define PERF_EVENT_READ_BYTES		0x1
59 #define PERF_EVENT_WRITE_BYTES		0x2
60 #define PERF_EVENT_READ_BYTES0		0x3
61 #define PERF_EVENT_WRITE_BYTES0		0x4
62 #define PERF_EVENT_READ_BYTES1		0x5
63 #define PERF_EVENT_WRITE_BYTES1		0x6
64 #define PERF_EVENT_READ_BYTES2		0x7
65 #define PERF_EVENT_WRITE_BYTES2		0x8
66 #define PERF_EVENT_READ_BYTES3		0x9
67 #define PERF_EVENT_WRITE_BYTES3		0xa
68 #define PERF_EVENT_BYTES		0xb
69 #define PERF_ACCESS_TYPE_MAX		0xc
70 
71 /**
72  * struct dmc_count_channel - structure to hold counter values from the DDR controller
73  * @access:       Number of read and write accesses
74  * @clock_cycles: DDR clock cycles
75  * @read_access:  number of read accesses
76  * @write_access: number of write accesses
77  */
78 struct dmc_count_channel {
79 	u64 access;
80 	u64 clock_cycles;
81 	u64 read_access;
82 	u64 write_access;
83 };
84 
85 struct dmc_count {
86 	struct dmc_count_channel c[DMC_MAX_CHANNELS];
87 };
88 
89 /*
90  * The dfi controller can monitor DDR load. It has an upper and lower threshold
91  * for the operating points. Whenever the usage leaves these bounds an event is
92  * generated to indicate the DDR frequency should be changed.
93  */
94 struct rockchip_dfi {
95 	struct devfreq_event_dev *edev;
96 	struct devfreq_event_desc desc;
97 	struct dmc_count last_event_count;
98 
99 	struct dmc_count last_perf_count;
100 	struct dmc_count total_count;
101 	seqlock_t count_seqlock; /* protects last_perf_count and total_count */
102 
103 	struct device *dev;
104 	void __iomem *regs;
105 	struct regmap *regmap_pmu;
106 	struct clk *clk;
107 	int usecount;
108 	struct mutex mutex;
109 	u32 ddr_type;
110 	unsigned int channel_mask;
111 	unsigned int max_channels;
112 	enum cpuhp_state cpuhp_state;
113 	struct hlist_node node;
114 	struct pmu pmu;
115 	struct hrtimer timer;
116 	unsigned int cpu;
117 	int active_events;
118 	int burst_len;
119 	int buswidth[DMC_MAX_CHANNELS];
120 	int ddrmon_stride;
121 	bool ddrmon_ctrl_single;
122 	u32 lp5_bank_mode;
123 	bool lp5_ckr;	/* true if in 4:1 command-to-data clock ratio mode */
124 	unsigned int count_multiplier;	/* number of data clocks per count */
125 };
126 
127 static int rockchip_dfi_ddrtype_to_ctrl(struct rockchip_dfi *dfi, u32 *ctrl,
128 					u32 *mask)
129 {
130 	u32 ddrmon_ver;
131 
132 	*mask = DDRMON_CTRL_DDR_TYPE_MASK;
133 
134 	switch (dfi->ddr_type) {
135 	case ROCKCHIP_DDRTYPE_LPDDR2:
136 	case ROCKCHIP_DDRTYPE_LPDDR3:
137 		*ctrl = DDRMON_CTRL_LPDDR23;
138 		break;
139 	case ROCKCHIP_DDRTYPE_LPDDR4:
140 	case ROCKCHIP_DDRTYPE_LPDDR4X:
141 		*ctrl = DDRMON_CTRL_LPDDR4;
142 		break;
143 	case ROCKCHIP_DDRTYPE_LPDDR5:
144 		ddrmon_ver = readl_relaxed(dfi->regs);
145 		if (ddrmon_ver < 0x40) {
146 			*ctrl = DDRMON_CTRL_LPDDR5 | dfi->lp5_bank_mode;
147 			*mask |= DDRMON_CTRL_LP5_BANK_MODE_MASK;
148 			break;
149 		}
150 
151 		/*
152 		 * As it is unknown whether the unpleasant special case
153 		 * behaviour used by the vendor kernel is needed for any
154 		 * shipping hardware, ask users to report if they have
155 		 * some of that hardware.
156 		 */
157 		dev_err(&dfi->edev->dev,
158 			"unsupported DDRMON version 0x%04X, please let linux-rockchip know!\n",
159 			ddrmon_ver);
160 		return -EOPNOTSUPP;
161 	default:
162 		dev_err(&dfi->edev->dev, "unsupported memory type 0x%X\n",
163 			dfi->ddr_type);
164 		return -EOPNOTSUPP;
165 	}
166 
167 	return 0;
168 }
169 
170 static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
171 {
172 	void __iomem *dfi_regs = dfi->regs;
173 	int i, ret = 0;
174 	u32 ctrl;
175 	u32 ctrl_mask;
176 
177 	mutex_lock(&dfi->mutex);
178 
179 	dfi->usecount++;
180 	if (dfi->usecount > 1)
181 		goto out;
182 
183 	ret = clk_prepare_enable(dfi->clk);
184 	if (ret) {
185 		dev_err(&dfi->edev->dev, "failed to enable dfi clk: %d\n", ret);
186 		goto out;
187 	}
188 
189 	ret = rockchip_dfi_ddrtype_to_ctrl(dfi, &ctrl, &ctrl_mask);
190 	if (ret)
191 		goto out;
192 
193 	for (i = 0; i < dfi->max_channels; i++) {
194 
195 		if (!(dfi->channel_mask & BIT(i)))
196 			continue;
197 
198 		/* clear DDRMON_CTRL setting */
199 		writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_TIMER_CNT_EN |
200 			       DDRMON_CTRL_SOFTWARE_EN | DDRMON_CTRL_HARDWARE_EN),
201 			       dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
202 
203 		writel_relaxed(HIWORD_UPDATE(ctrl, ctrl_mask),
204 			       dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
205 
206 		/* enable count, use software mode */
207 		writel_relaxed(HIWORD_UPDATE(DDRMON_CTRL_SOFTWARE_EN, DDRMON_CTRL_SOFTWARE_EN),
208 			       dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
209 
210 		if (dfi->ddrmon_ctrl_single)
211 			break;
212 	}
213 out:
214 	mutex_unlock(&dfi->mutex);
215 
216 	return ret;
217 }
218 
219 static void rockchip_dfi_disable(struct rockchip_dfi *dfi)
220 {
221 	void __iomem *dfi_regs = dfi->regs;
222 	int i;
223 
224 	mutex_lock(&dfi->mutex);
225 
226 	dfi->usecount--;
227 
228 	WARN_ON_ONCE(dfi->usecount < 0);
229 
230 	if (dfi->usecount > 0)
231 		goto out;
232 
233 	for (i = 0; i < dfi->max_channels; i++) {
234 		if (!(dfi->channel_mask & BIT(i)))
235 			continue;
236 
237 		writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_SOFTWARE_EN),
238 			      dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
239 
240 		if (dfi->ddrmon_ctrl_single)
241 			break;
242 	}
243 
244 	clk_disable_unprepare(dfi->clk);
245 out:
246 	mutex_unlock(&dfi->mutex);
247 }
248 
249 static void rockchip_dfi_read_counters(struct rockchip_dfi *dfi, struct dmc_count *res)
250 {
251 	u32 i;
252 	void __iomem *dfi_regs = dfi->regs;
253 
254 	for (i = 0; i < dfi->max_channels; i++) {
255 		if (!(dfi->channel_mask & BIT(i)))
256 			continue;
257 		res->c[i].read_access = readl_relaxed(dfi_regs +
258 				DDRMON_CH0_RD_NUM + i * dfi->ddrmon_stride);
259 		res->c[i].write_access = readl_relaxed(dfi_regs +
260 				DDRMON_CH0_WR_NUM + i * dfi->ddrmon_stride);
261 		res->c[i].access = readl_relaxed(dfi_regs +
262 				DDRMON_CH0_DFI_ACCESS_NUM + i * dfi->ddrmon_stride);
263 		res->c[i].clock_cycles = readl_relaxed(dfi_regs +
264 				DDRMON_CH0_COUNT_NUM + i * dfi->ddrmon_stride);
265 	}
266 }
267 
268 static int rockchip_dfi_event_disable(struct devfreq_event_dev *edev)
269 {
270 	struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev);
271 
272 	rockchip_dfi_disable(dfi);
273 
274 	return 0;
275 }
276 
277 static int rockchip_dfi_event_enable(struct devfreq_event_dev *edev)
278 {
279 	struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev);
280 
281 	return rockchip_dfi_enable(dfi);
282 }
283 
284 static int rockchip_dfi_set_event(struct devfreq_event_dev *edev)
285 {
286 	return 0;
287 }
288 
289 static int rockchip_dfi_get_event(struct devfreq_event_dev *edev,
290 				  struct devfreq_event_data *edata)
291 {
292 	struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev);
293 	struct dmc_count count;
294 	struct dmc_count *last = &dfi->last_event_count;
295 	u32 access = 0, clock_cycles = 0;
296 	int i;
297 
298 	rockchip_dfi_read_counters(dfi, &count);
299 
300 	/* We can only report one channel, so find the busiest one */
301 	for (i = 0; i < dfi->max_channels; i++) {
302 		u32 a, c;
303 
304 		if (!(dfi->channel_mask & BIT(i)))
305 			continue;
306 
307 		a = count.c[i].access - last->c[i].access;
308 		c = count.c[i].clock_cycles - last->c[i].clock_cycles;
309 
310 		if (a > access) {
311 			access = a;
312 			clock_cycles = c;
313 		}
314 	}
315 
316 	edata->load_count = access * 4;
317 	edata->total_count = clock_cycles;
318 
319 	dfi->last_event_count = count;
320 
321 	return 0;
322 }
323 
324 static const struct devfreq_event_ops rockchip_dfi_ops = {
325 	.disable = rockchip_dfi_event_disable,
326 	.enable = rockchip_dfi_event_enable,
327 	.get_event = rockchip_dfi_get_event,
328 	.set_event = rockchip_dfi_set_event,
329 };
330 
331 #ifdef CONFIG_PERF_EVENTS
332 
333 static void rockchip_ddr_perf_counters_add(struct rockchip_dfi *dfi,
334 					   const struct dmc_count *now,
335 					   struct dmc_count *res)
336 {
337 	const struct dmc_count *last = &dfi->last_perf_count;
338 	int i;
339 
340 	for (i = 0; i < dfi->max_channels; i++) {
341 		res->c[i].read_access = dfi->total_count.c[i].read_access +
342 			(u32)(now->c[i].read_access - last->c[i].read_access);
343 		res->c[i].write_access = dfi->total_count.c[i].write_access +
344 			(u32)(now->c[i].write_access - last->c[i].write_access);
345 		res->c[i].access = dfi->total_count.c[i].access +
346 			(u32)(now->c[i].access - last->c[i].access);
347 		res->c[i].clock_cycles = dfi->total_count.c[i].clock_cycles +
348 			(u32)(now->c[i].clock_cycles - last->c[i].clock_cycles);
349 	}
350 }
351 
352 static ssize_t ddr_perf_cpumask_show(struct device *dev,
353 				struct device_attribute *attr, char *buf)
354 {
355 	struct pmu *pmu = dev_get_drvdata(dev);
356 	struct rockchip_dfi *dfi = container_of(pmu, struct rockchip_dfi, pmu);
357 
358 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(dfi->cpu));
359 }
360 
361 static struct device_attribute ddr_perf_cpumask_attr =
362 	__ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
363 
364 static struct attribute *ddr_perf_cpumask_attrs[] = {
365 	&ddr_perf_cpumask_attr.attr,
366 	NULL,
367 };
368 
369 static const struct attribute_group ddr_perf_cpumask_attr_group = {
370 	.attrs = ddr_perf_cpumask_attrs,
371 };
372 
373 PMU_EVENT_ATTR_STRING(cycles, ddr_pmu_cycles, "event="__stringify(PERF_EVENT_CYCLES))
374 
375 #define DFI_PMU_EVENT_ATTR(_name, _var, _str) \
376 	PMU_EVENT_ATTR_STRING(_name, _var, _str); \
377 	PMU_EVENT_ATTR_STRING(_name.unit, _var##_unit, "MB"); \
378 	PMU_EVENT_ATTR_STRING(_name.scale, _var##_scale, "9.536743164e-07")
379 
380 DFI_PMU_EVENT_ATTR(read-bytes0, ddr_pmu_read_bytes0, "event="__stringify(PERF_EVENT_READ_BYTES0));
381 DFI_PMU_EVENT_ATTR(write-bytes0, ddr_pmu_write_bytes0, "event="__stringify(PERF_EVENT_WRITE_BYTES0));
382 
383 DFI_PMU_EVENT_ATTR(read-bytes1, ddr_pmu_read_bytes1, "event="__stringify(PERF_EVENT_READ_BYTES1));
384 DFI_PMU_EVENT_ATTR(write-bytes1, ddr_pmu_write_bytes1, "event="__stringify(PERF_EVENT_WRITE_BYTES1));
385 
386 DFI_PMU_EVENT_ATTR(read-bytes2, ddr_pmu_read_bytes2, "event="__stringify(PERF_EVENT_READ_BYTES2));
387 DFI_PMU_EVENT_ATTR(write-bytes2, ddr_pmu_write_bytes2, "event="__stringify(PERF_EVENT_WRITE_BYTES2));
388 
389 DFI_PMU_EVENT_ATTR(read-bytes3, ddr_pmu_read_bytes3, "event="__stringify(PERF_EVENT_READ_BYTES3));
390 DFI_PMU_EVENT_ATTR(write-bytes3, ddr_pmu_write_bytes3, "event="__stringify(PERF_EVENT_WRITE_BYTES3));
391 
392 DFI_PMU_EVENT_ATTR(read-bytes, ddr_pmu_read_bytes, "event="__stringify(PERF_EVENT_READ_BYTES));
393 DFI_PMU_EVENT_ATTR(write-bytes, ddr_pmu_write_bytes, "event="__stringify(PERF_EVENT_WRITE_BYTES));
394 
395 DFI_PMU_EVENT_ATTR(bytes, ddr_pmu_bytes, "event="__stringify(PERF_EVENT_BYTES));
396 
397 #define DFI_ATTR_MB(_name) 		\
398 	&_name.attr.attr,		\
399 	&_name##_unit.attr.attr,	\
400 	&_name##_scale.attr.attr
401 
402 static struct attribute *ddr_perf_events_attrs[] = {
403 	&ddr_pmu_cycles.attr.attr,
404 	DFI_ATTR_MB(ddr_pmu_read_bytes),
405 	DFI_ATTR_MB(ddr_pmu_write_bytes),
406 	DFI_ATTR_MB(ddr_pmu_read_bytes0),
407 	DFI_ATTR_MB(ddr_pmu_write_bytes0),
408 	DFI_ATTR_MB(ddr_pmu_read_bytes1),
409 	DFI_ATTR_MB(ddr_pmu_write_bytes1),
410 	DFI_ATTR_MB(ddr_pmu_read_bytes2),
411 	DFI_ATTR_MB(ddr_pmu_write_bytes2),
412 	DFI_ATTR_MB(ddr_pmu_read_bytes3),
413 	DFI_ATTR_MB(ddr_pmu_write_bytes3),
414 	DFI_ATTR_MB(ddr_pmu_bytes),
415 	NULL,
416 };
417 
418 static const struct attribute_group ddr_perf_events_attr_group = {
419 	.name = "events",
420 	.attrs = ddr_perf_events_attrs,
421 };
422 
423 PMU_FORMAT_ATTR(event, "config:0-7");
424 
425 static struct attribute *ddr_perf_format_attrs[] = {
426 	&format_attr_event.attr,
427 	NULL,
428 };
429 
430 static const struct attribute_group ddr_perf_format_attr_group = {
431 	.name = "format",
432 	.attrs = ddr_perf_format_attrs,
433 };
434 
435 static const struct attribute_group *attr_groups[] = {
436 	&ddr_perf_events_attr_group,
437 	&ddr_perf_cpumask_attr_group,
438 	&ddr_perf_format_attr_group,
439 	NULL,
440 };
441 
442 static int rockchip_ddr_perf_event_init(struct perf_event *event)
443 {
444 	struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
445 
446 	if (event->attr.type != event->pmu->type)
447 		return -ENOENT;
448 
449 	if (event->attach_state & PERF_ATTACH_TASK)
450 		return -EINVAL;
451 
452 	if (event->cpu < 0) {
453 		dev_warn(dfi->dev, "Can't provide per-task data!\n");
454 		return -EINVAL;
455 	}
456 
457 	return 0;
458 }
459 
460 static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event)
461 {
462 	struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
463 	int blen = dfi->burst_len;
464 	struct dmc_count total, now;
465 	unsigned int seq;
466 	u64 count = 0;
467 	int i;
468 
469 	rockchip_dfi_read_counters(dfi, &now);
470 
471 	do {
472 		seq = read_seqbegin(&dfi->count_seqlock);
473 		rockchip_ddr_perf_counters_add(dfi, &now, &total);
474 	} while (read_seqretry(&dfi->count_seqlock, seq));
475 
476 	switch (event->attr.config) {
477 	case PERF_EVENT_CYCLES:
478 		count = total.c[0].clock_cycles * dfi->count_multiplier;
479 		break;
480 	case PERF_EVENT_READ_BYTES:
481 		for (i = 0; i < dfi->max_channels; i++)
482 			count += total.c[i].read_access * blen * dfi->buswidth[i];
483 		break;
484 	case PERF_EVENT_WRITE_BYTES:
485 		for (i = 0; i < dfi->max_channels; i++)
486 			count += total.c[i].write_access * blen * dfi->buswidth[i];
487 		break;
488 	case PERF_EVENT_READ_BYTES0:
489 		count = total.c[0].read_access * blen * dfi->buswidth[0];
490 		break;
491 	case PERF_EVENT_WRITE_BYTES0:
492 		count = total.c[0].write_access * blen * dfi->buswidth[0];
493 		break;
494 	case PERF_EVENT_READ_BYTES1:
495 		count = total.c[1].read_access * blen * dfi->buswidth[1];
496 		break;
497 	case PERF_EVENT_WRITE_BYTES1:
498 		count = total.c[1].write_access * blen * dfi->buswidth[1];
499 		break;
500 	case PERF_EVENT_READ_BYTES2:
501 		count = total.c[2].read_access * blen * dfi->buswidth[2];
502 		break;
503 	case PERF_EVENT_WRITE_BYTES2:
504 		count = total.c[2].write_access * blen * dfi->buswidth[2];
505 		break;
506 	case PERF_EVENT_READ_BYTES3:
507 		count = total.c[3].read_access * blen * dfi->buswidth[3];
508 		break;
509 	case PERF_EVENT_WRITE_BYTES3:
510 		count = total.c[3].write_access * blen * dfi->buswidth[3];
511 		break;
512 	case PERF_EVENT_BYTES:
513 		for (i = 0; i < dfi->max_channels; i++)
514 			count += total.c[i].access * blen * dfi->buswidth[i];
515 		break;
516 	}
517 
518 	return count;
519 }
520 
521 static void rockchip_ddr_perf_event_update(struct perf_event *event)
522 {
523 	u64 now;
524 	s64 prev;
525 
526 	if (event->attr.config >= PERF_ACCESS_TYPE_MAX)
527 		return;
528 
529 	now = rockchip_ddr_perf_event_get_count(event);
530 	prev = local64_xchg(&event->hw.prev_count, now);
531 	local64_add(now - prev, &event->count);
532 }
533 
534 static void rockchip_ddr_perf_event_start(struct perf_event *event, int flags)
535 {
536 	u64 now = rockchip_ddr_perf_event_get_count(event);
537 
538 	local64_set(&event->hw.prev_count, now);
539 }
540 
541 static int rockchip_ddr_perf_event_add(struct perf_event *event, int flags)
542 {
543 	struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
544 
545 	dfi->active_events++;
546 
547 	if (dfi->active_events == 1) {
548 		dfi->total_count = (struct dmc_count){};
549 		rockchip_dfi_read_counters(dfi, &dfi->last_perf_count);
550 		hrtimer_start(&dfi->timer, ns_to_ktime(NSEC_PER_SEC), HRTIMER_MODE_REL);
551 	}
552 
553 	if (flags & PERF_EF_START)
554 		rockchip_ddr_perf_event_start(event, flags);
555 
556 	return 0;
557 }
558 
559 static void rockchip_ddr_perf_event_stop(struct perf_event *event, int flags)
560 {
561 	rockchip_ddr_perf_event_update(event);
562 }
563 
564 static void rockchip_ddr_perf_event_del(struct perf_event *event, int flags)
565 {
566 	struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
567 
568 	rockchip_ddr_perf_event_stop(event, PERF_EF_UPDATE);
569 
570 	dfi->active_events--;
571 
572 	if (dfi->active_events == 0)
573 		hrtimer_cancel(&dfi->timer);
574 }
575 
576 static enum hrtimer_restart rockchip_dfi_timer(struct hrtimer *timer)
577 {
578 	struct rockchip_dfi *dfi = container_of(timer, struct rockchip_dfi, timer);
579 	struct dmc_count now, total;
580 
581 	rockchip_dfi_read_counters(dfi, &now);
582 
583 	write_seqlock(&dfi->count_seqlock);
584 
585 	rockchip_ddr_perf_counters_add(dfi, &now, &total);
586 	dfi->total_count = total;
587 	dfi->last_perf_count = now;
588 
589 	write_sequnlock(&dfi->count_seqlock);
590 
591 	hrtimer_forward_now(&dfi->timer, ns_to_ktime(NSEC_PER_SEC));
592 
593 	return HRTIMER_RESTART;
594 };
595 
596 static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
597 {
598 	struct rockchip_dfi *dfi = hlist_entry_safe(node, struct rockchip_dfi, node);
599 	int target;
600 
601 	if (cpu != dfi->cpu)
602 		return 0;
603 
604 	target = cpumask_any_but(cpu_online_mask, cpu);
605 	if (target >= nr_cpu_ids)
606 		return 0;
607 
608 	perf_pmu_migrate_context(&dfi->pmu, cpu, target);
609 	dfi->cpu = target;
610 
611 	return 0;
612 }
613 
614 static void rockchip_ddr_cpuhp_remove_state(void *data)
615 {
616 	struct rockchip_dfi *dfi = data;
617 
618 	cpuhp_remove_multi_state(dfi->cpuhp_state);
619 
620 	rockchip_dfi_disable(dfi);
621 }
622 
623 static void rockchip_ddr_cpuhp_remove_instance(void *data)
624 {
625 	struct rockchip_dfi *dfi = data;
626 
627 	cpuhp_state_remove_instance_nocalls(dfi->cpuhp_state, &dfi->node);
628 }
629 
630 static void rockchip_ddr_perf_remove(void *data)
631 {
632 	struct rockchip_dfi *dfi = data;
633 
634 	perf_pmu_unregister(&dfi->pmu);
635 }
636 
637 static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
638 {
639 	struct pmu *pmu = &dfi->pmu;
640 	int ret;
641 
642 	seqlock_init(&dfi->count_seqlock);
643 
644 	pmu->module = THIS_MODULE;
645 	pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE;
646 	pmu->task_ctx_nr = perf_invalid_context;
647 	pmu->attr_groups = attr_groups;
648 	pmu->event_init  = rockchip_ddr_perf_event_init;
649 	pmu->add = rockchip_ddr_perf_event_add;
650 	pmu->del = rockchip_ddr_perf_event_del;
651 	pmu->start = rockchip_ddr_perf_event_start;
652 	pmu->stop = rockchip_ddr_perf_event_stop;
653 	pmu->read = rockchip_ddr_perf_event_update;
654 
655 	dfi->cpu = raw_smp_processor_id();
656 
657 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
658 				      "rockchip_ddr_perf_pmu",
659 				      NULL,
660 				      ddr_perf_offline_cpu);
661 
662 	if (ret < 0) {
663 		dev_err(dfi->dev, "cpuhp_setup_state_multi failed: %d\n", ret);
664 		return ret;
665 	}
666 
667 	dfi->cpuhp_state = ret;
668 
669 	rockchip_dfi_enable(dfi);
670 
671 	ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_state, dfi);
672 	if (ret)
673 		return ret;
674 
675 	ret = cpuhp_state_add_instance_nocalls(dfi->cpuhp_state, &dfi->node);
676 	if (ret) {
677 		dev_err(dfi->dev, "Error %d registering hotplug\n", ret);
678 		return ret;
679 	}
680 
681 	ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_instance, dfi);
682 	if (ret)
683 		return ret;
684 
685 	hrtimer_setup(&dfi->timer, rockchip_dfi_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
686 
687 	switch (dfi->ddr_type) {
688 	case ROCKCHIP_DDRTYPE_LPDDR2:
689 	case ROCKCHIP_DDRTYPE_LPDDR3:
690 		dfi->burst_len = 8;
691 		break;
692 	case ROCKCHIP_DDRTYPE_LPDDR4:
693 	case ROCKCHIP_DDRTYPE_LPDDR4X:
694 	case ROCKCHIP_DDRTYPE_LPDDR5:
695 		dfi->burst_len = 16;
696 		break;
697 	}
698 
699 	if (!dfi->count_multiplier)
700 		dfi->count_multiplier = 1;
701 
702 	ret = perf_pmu_register(pmu, "rockchip_ddr", -1);
703 	if (ret)
704 		return ret;
705 
706 	return devm_add_action_or_reset(dfi->dev, rockchip_ddr_perf_remove, dfi);
707 }
708 #else
709 static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
710 {
711 	return 0;
712 }
713 #endif
714 
715 static int rk3399_dfi_init(struct rockchip_dfi *dfi)
716 {
717 	struct regmap *regmap_pmu = dfi->regmap_pmu;
718 	u32 val;
719 
720 	dfi->clk = devm_clk_get(dfi->dev, "pclk_ddr_mon");
721 	if (IS_ERR(dfi->clk))
722 		return dev_err_probe(dfi->dev, PTR_ERR(dfi->clk),
723 				     "Cannot get the clk pclk_ddr_mon\n");
724 
725 	/* get ddr type */
726 	regmap_read(regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
727 	dfi->ddr_type = FIELD_GET(RK3399_PMUGRF_OS_REG2_DDRTYPE, val);
728 
729 	dfi->channel_mask = GENMASK(1, 0);
730 	dfi->max_channels = 2;
731 
732 	dfi->buswidth[0] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH0, val) == 0 ? 4 : 2;
733 	dfi->buswidth[1] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH1, val) == 0 ? 4 : 2;
734 
735 	dfi->ddrmon_stride = 0x14;
736 	dfi->ddrmon_ctrl_single = true;
737 
738 	return 0;
739 };
740 
741 static int rk3568_dfi_init(struct rockchip_dfi *dfi)
742 {
743 	struct regmap *regmap_pmu = dfi->regmap_pmu;
744 	u32 reg2, reg3;
745 
746 	regmap_read(regmap_pmu, RK3568_PMUGRF_OS_REG2, &reg2);
747 	regmap_read(regmap_pmu, RK3568_PMUGRF_OS_REG3, &reg3);
748 
749 	/* lower 3 bits of the DDR type */
750 	dfi->ddr_type = FIELD_GET(RK3568_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2);
751 
752 	/*
753 	 * For version three and higher the upper two bits of the DDR type are
754 	 * in RK3568_PMUGRF_OS_REG3
755 	 */
756 	if (FIELD_GET(RK3568_PMUGRF_OS_REG3_SYSREG_VERSION, reg3) >= 0x3)
757 		dfi->ddr_type |= FIELD_GET(RK3568_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3;
758 
759 	dfi->channel_mask = BIT(0);
760 	dfi->max_channels = 1;
761 
762 	dfi->buswidth[0] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2;
763 
764 	dfi->ddrmon_stride = 0x0; /* not relevant, we only have a single channel on this SoC */
765 	dfi->ddrmon_ctrl_single = true;
766 
767 	return 0;
768 };
769 
770 static int rk3588_dfi_init(struct rockchip_dfi *dfi)
771 {
772 	struct regmap *regmap_pmu = dfi->regmap_pmu;
773 	u32 reg2, reg3, reg4, reg6;
774 
775 	regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG2, &reg2);
776 	regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG3, &reg3);
777 	regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG4, &reg4);
778 
779 	/* lower 3 bits of the DDR type */
780 	dfi->ddr_type = FIELD_GET(RK3588_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2);
781 
782 	/*
783 	 * For version three and higher the upper two bits of the DDR type are
784 	 * in RK3588_PMUGRF_OS_REG3
785 	 */
786 	if (FIELD_GET(RK3588_PMUGRF_OS_REG3_SYSREG_VERSION, reg3) >= 0x3)
787 		dfi->ddr_type |= FIELD_GET(RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3;
788 
789 	dfi->buswidth[0] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2;
790 	dfi->buswidth[1] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg2) == 0 ? 4 : 2;
791 	dfi->buswidth[2] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg4) == 0 ? 4 : 2;
792 	dfi->buswidth[3] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg4) == 0 ? 4 : 2;
793 	dfi->channel_mask = FIELD_GET(RK3588_PMUGRF_OS_REG2_CH_INFO, reg2) |
794 			    FIELD_GET(RK3588_PMUGRF_OS_REG2_CH_INFO, reg4) << 2;
795 	dfi->max_channels = 4;
796 
797 	dfi->ddrmon_stride = 0x4000;
798 	dfi->count_multiplier = 2;
799 
800 	if (dfi->ddr_type == ROCKCHIP_DDRTYPE_LPDDR5) {
801 		regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG6, &reg6);
802 		dfi->lp5_bank_mode = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_BANK_MODE, reg6) << 7;
803 		dfi->lp5_ckr = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_CKR, reg6);
804 		if (dfi->lp5_ckr)
805 			dfi->count_multiplier *= 2;
806 	}
807 
808 	return 0;
809 };
810 
811 static const struct of_device_id rockchip_dfi_id_match[] = {
812 	{ .compatible = "rockchip,rk3399-dfi", .data = rk3399_dfi_init },
813 	{ .compatible = "rockchip,rk3568-dfi", .data = rk3568_dfi_init },
814 	{ .compatible = "rockchip,rk3588-dfi", .data = rk3588_dfi_init },
815 	{ },
816 };
817 
818 MODULE_DEVICE_TABLE(of, rockchip_dfi_id_match);
819 
820 static int rockchip_dfi_probe(struct platform_device *pdev)
821 {
822 	struct device *dev = &pdev->dev;
823 	struct rockchip_dfi *dfi;
824 	struct devfreq_event_desc *desc;
825 	struct device_node *np = pdev->dev.of_node, *node;
826 	int (*soc_init)(struct rockchip_dfi *dfi);
827 	int ret;
828 
829 	soc_init = of_device_get_match_data(&pdev->dev);
830 	if (!soc_init)
831 		return -EINVAL;
832 
833 	dfi = devm_kzalloc(dev, sizeof(*dfi), GFP_KERNEL);
834 	if (!dfi)
835 		return -ENOMEM;
836 
837 	dfi->regs = devm_platform_ioremap_resource(pdev, 0);
838 	if (IS_ERR(dfi->regs))
839 		return PTR_ERR(dfi->regs);
840 
841 	node = of_parse_phandle(np, "rockchip,pmu", 0);
842 	if (!node)
843 		return dev_err_probe(&pdev->dev, -ENODEV, "Can't find pmu_grf registers\n");
844 
845 	dfi->regmap_pmu = syscon_node_to_regmap(node);
846 	of_node_put(node);
847 	if (IS_ERR(dfi->regmap_pmu))
848 		return PTR_ERR(dfi->regmap_pmu);
849 
850 	dfi->dev = dev;
851 	mutex_init(&dfi->mutex);
852 
853 	desc = &dfi->desc;
854 	desc->ops = &rockchip_dfi_ops;
855 	desc->driver_data = dfi;
856 	desc->name = np->name;
857 
858 	ret = soc_init(dfi);
859 	if (ret)
860 		return ret;
861 
862 	dfi->edev = devm_devfreq_event_add_edev(&pdev->dev, desc);
863 	if (IS_ERR(dfi->edev)) {
864 		dev_err(&pdev->dev,
865 			"failed to add devfreq-event device\n");
866 		return PTR_ERR(dfi->edev);
867 	}
868 
869 	ret = rockchip_ddr_perf_init(dfi);
870 	if (ret)
871 		return ret;
872 
873 	platform_set_drvdata(pdev, dfi);
874 
875 	return 0;
876 }
877 
878 static struct platform_driver rockchip_dfi_driver = {
879 	.probe	= rockchip_dfi_probe,
880 	.driver = {
881 		.name	= "rockchip-dfi",
882 		.of_match_table = rockchip_dfi_id_match,
883 		.suppress_bind_attrs = true,
884 	},
885 };
886 module_platform_driver(rockchip_dfi_driver);
887 
888 MODULE_LICENSE("GPL v2");
889 MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>");
890 MODULE_DESCRIPTION("Rockchip DFI driver");
891