xref: /linux/drivers/devfreq/event/rockchip-dfi.c (revision 8e1bb4a41aa78d6105e59186af3dcd545fc66e70)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
4  * Author: Lin Huang <hl@rock-chips.com>
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/devfreq-event.h>
9 #include <linux/kernel.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/io.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/regmap.h>
17 #include <linux/slab.h>
18 #include <linux/list.h>
19 #include <linux/seqlock.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/bitfield.h>
23 #include <linux/bits.h>
24 #include <linux/perf_event.h>
25 
26 #include <soc/rockchip/rockchip_grf.h>
27 #include <soc/rockchip/rk3399_grf.h>
28 #include <soc/rockchip/rk3568_grf.h>
29 #include <soc/rockchip/rk3588_grf.h>
30 
31 #define DMC_MAX_CHANNELS	4
32 
33 #define HIWORD_UPDATE(val, mask)	((val) | (mask) << 16)
34 
35 /* DDRMON_CTRL */
36 #define DDRMON_CTRL	0x04
37 #define DDRMON_CTRL_DDR4		BIT(5)
38 #define DDRMON_CTRL_LPDDR4		BIT(4)
39 #define DDRMON_CTRL_HARDWARE_EN		BIT(3)
40 #define DDRMON_CTRL_LPDDR23		BIT(2)
41 #define DDRMON_CTRL_SOFTWARE_EN		BIT(1)
42 #define DDRMON_CTRL_TIMER_CNT_EN	BIT(0)
43 #define DDRMON_CTRL_DDR_TYPE_MASK	(DDRMON_CTRL_DDR4 | \
44 					 DDRMON_CTRL_LPDDR4 | \
45 					 DDRMON_CTRL_LPDDR23)
46 
47 #define DDRMON_CH0_WR_NUM		0x20
48 #define DDRMON_CH0_RD_NUM		0x24
49 #define DDRMON_CH0_COUNT_NUM		0x28
50 #define DDRMON_CH0_DFI_ACCESS_NUM	0x2c
51 #define DDRMON_CH1_COUNT_NUM		0x3c
52 #define DDRMON_CH1_DFI_ACCESS_NUM	0x40
53 
54 #define PERF_EVENT_CYCLES		0x0
55 #define PERF_EVENT_READ_BYTES		0x1
56 #define PERF_EVENT_WRITE_BYTES		0x2
57 #define PERF_EVENT_READ_BYTES0		0x3
58 #define PERF_EVENT_WRITE_BYTES0		0x4
59 #define PERF_EVENT_READ_BYTES1		0x5
60 #define PERF_EVENT_WRITE_BYTES1		0x6
61 #define PERF_EVENT_READ_BYTES2		0x7
62 #define PERF_EVENT_WRITE_BYTES2		0x8
63 #define PERF_EVENT_READ_BYTES3		0x9
64 #define PERF_EVENT_WRITE_BYTES3		0xa
65 #define PERF_EVENT_BYTES		0xb
66 #define PERF_ACCESS_TYPE_MAX		0xc
67 
68 /**
69  * struct dmc_count_channel - structure to hold counter values from the DDR controller
70  * @access:       Number of read and write accesses
71  * @clock_cycles: DDR clock cycles
72  * @read_access:  number of read accesses
73  * @write_access: number of write accesses
74  */
75 struct dmc_count_channel {
76 	u64 access;
77 	u64 clock_cycles;
78 	u64 read_access;
79 	u64 write_access;
80 };
81 
82 struct dmc_count {
83 	struct dmc_count_channel c[DMC_MAX_CHANNELS];
84 };
85 
86 /*
87  * The dfi controller can monitor DDR load. It has an upper and lower threshold
88  * for the operating points. Whenever the usage leaves these bounds an event is
89  * generated to indicate the DDR frequency should be changed.
90  */
91 struct rockchip_dfi {
92 	struct devfreq_event_dev *edev;
93 	struct devfreq_event_desc desc;
94 	struct dmc_count last_event_count;
95 
96 	struct dmc_count last_perf_count;
97 	struct dmc_count total_count;
98 	seqlock_t count_seqlock; /* protects last_perf_count and total_count */
99 
100 	struct device *dev;
101 	void __iomem *regs;
102 	struct regmap *regmap_pmu;
103 	struct clk *clk;
104 	int usecount;
105 	struct mutex mutex;
106 	u32 ddr_type;
107 	unsigned int channel_mask;
108 	unsigned int max_channels;
109 	enum cpuhp_state cpuhp_state;
110 	struct hlist_node node;
111 	struct pmu pmu;
112 	struct hrtimer timer;
113 	unsigned int cpu;
114 	int active_events;
115 	int burst_len;
116 	int buswidth[DMC_MAX_CHANNELS];
117 	int ddrmon_stride;
118 	bool ddrmon_ctrl_single;
119 };
120 
121 static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
122 {
123 	void __iomem *dfi_regs = dfi->regs;
124 	int i, ret = 0;
125 
126 	mutex_lock(&dfi->mutex);
127 
128 	dfi->usecount++;
129 	if (dfi->usecount > 1)
130 		goto out;
131 
132 	ret = clk_prepare_enable(dfi->clk);
133 	if (ret) {
134 		dev_err(&dfi->edev->dev, "failed to enable dfi clk: %d\n", ret);
135 		goto out;
136 	}
137 
138 	for (i = 0; i < dfi->max_channels; i++) {
139 		u32 ctrl = 0;
140 
141 		if (!(dfi->channel_mask & BIT(i)))
142 			continue;
143 
144 		/* clear DDRMON_CTRL setting */
145 		writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_TIMER_CNT_EN |
146 			       DDRMON_CTRL_SOFTWARE_EN | DDRMON_CTRL_HARDWARE_EN),
147 			       dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
148 
149 		/* set ddr type to dfi */
150 		switch (dfi->ddr_type) {
151 		case ROCKCHIP_DDRTYPE_LPDDR2:
152 		case ROCKCHIP_DDRTYPE_LPDDR3:
153 			ctrl = DDRMON_CTRL_LPDDR23;
154 			break;
155 		case ROCKCHIP_DDRTYPE_LPDDR4:
156 		case ROCKCHIP_DDRTYPE_LPDDR4X:
157 			ctrl = DDRMON_CTRL_LPDDR4;
158 			break;
159 		default:
160 			break;
161 		}
162 
163 		writel_relaxed(HIWORD_UPDATE(ctrl, DDRMON_CTRL_DDR_TYPE_MASK),
164 			       dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
165 
166 		/* enable count, use software mode */
167 		writel_relaxed(HIWORD_UPDATE(DDRMON_CTRL_SOFTWARE_EN, DDRMON_CTRL_SOFTWARE_EN),
168 			       dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
169 
170 		if (dfi->ddrmon_ctrl_single)
171 			break;
172 	}
173 out:
174 	mutex_unlock(&dfi->mutex);
175 
176 	return ret;
177 }
178 
179 static void rockchip_dfi_disable(struct rockchip_dfi *dfi)
180 {
181 	void __iomem *dfi_regs = dfi->regs;
182 	int i;
183 
184 	mutex_lock(&dfi->mutex);
185 
186 	dfi->usecount--;
187 
188 	WARN_ON_ONCE(dfi->usecount < 0);
189 
190 	if (dfi->usecount > 0)
191 		goto out;
192 
193 	for (i = 0; i < dfi->max_channels; i++) {
194 		if (!(dfi->channel_mask & BIT(i)))
195 			continue;
196 
197 		writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_SOFTWARE_EN),
198 			      dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
199 
200 		if (dfi->ddrmon_ctrl_single)
201 			break;
202 	}
203 
204 	clk_disable_unprepare(dfi->clk);
205 out:
206 	mutex_unlock(&dfi->mutex);
207 }
208 
209 static void rockchip_dfi_read_counters(struct rockchip_dfi *dfi, struct dmc_count *res)
210 {
211 	u32 i;
212 	void __iomem *dfi_regs = dfi->regs;
213 
214 	for (i = 0; i < dfi->max_channels; i++) {
215 		if (!(dfi->channel_mask & BIT(i)))
216 			continue;
217 		res->c[i].read_access = readl_relaxed(dfi_regs +
218 				DDRMON_CH0_RD_NUM + i * dfi->ddrmon_stride);
219 		res->c[i].write_access = readl_relaxed(dfi_regs +
220 				DDRMON_CH0_WR_NUM + i * dfi->ddrmon_stride);
221 		res->c[i].access = readl_relaxed(dfi_regs +
222 				DDRMON_CH0_DFI_ACCESS_NUM + i * dfi->ddrmon_stride);
223 		res->c[i].clock_cycles = readl_relaxed(dfi_regs +
224 				DDRMON_CH0_COUNT_NUM + i * dfi->ddrmon_stride);
225 	}
226 }
227 
228 static int rockchip_dfi_event_disable(struct devfreq_event_dev *edev)
229 {
230 	struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev);
231 
232 	rockchip_dfi_disable(dfi);
233 
234 	return 0;
235 }
236 
237 static int rockchip_dfi_event_enable(struct devfreq_event_dev *edev)
238 {
239 	struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev);
240 
241 	return rockchip_dfi_enable(dfi);
242 }
243 
244 static int rockchip_dfi_set_event(struct devfreq_event_dev *edev)
245 {
246 	return 0;
247 }
248 
249 static int rockchip_dfi_get_event(struct devfreq_event_dev *edev,
250 				  struct devfreq_event_data *edata)
251 {
252 	struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev);
253 	struct dmc_count count;
254 	struct dmc_count *last = &dfi->last_event_count;
255 	u32 access = 0, clock_cycles = 0;
256 	int i;
257 
258 	rockchip_dfi_read_counters(dfi, &count);
259 
260 	/* We can only report one channel, so find the busiest one */
261 	for (i = 0; i < dfi->max_channels; i++) {
262 		u32 a, c;
263 
264 		if (!(dfi->channel_mask & BIT(i)))
265 			continue;
266 
267 		a = count.c[i].access - last->c[i].access;
268 		c = count.c[i].clock_cycles - last->c[i].clock_cycles;
269 
270 		if (a > access) {
271 			access = a;
272 			clock_cycles = c;
273 		}
274 	}
275 
276 	edata->load_count = access * 4;
277 	edata->total_count = clock_cycles;
278 
279 	dfi->last_event_count = count;
280 
281 	return 0;
282 }
283 
284 static const struct devfreq_event_ops rockchip_dfi_ops = {
285 	.disable = rockchip_dfi_event_disable,
286 	.enable = rockchip_dfi_event_enable,
287 	.get_event = rockchip_dfi_get_event,
288 	.set_event = rockchip_dfi_set_event,
289 };
290 
291 #ifdef CONFIG_PERF_EVENTS
292 
293 static void rockchip_ddr_perf_counters_add(struct rockchip_dfi *dfi,
294 					   const struct dmc_count *now,
295 					   struct dmc_count *res)
296 {
297 	const struct dmc_count *last = &dfi->last_perf_count;
298 	int i;
299 
300 	for (i = 0; i < dfi->max_channels; i++) {
301 		res->c[i].read_access = dfi->total_count.c[i].read_access +
302 			(u32)(now->c[i].read_access - last->c[i].read_access);
303 		res->c[i].write_access = dfi->total_count.c[i].write_access +
304 			(u32)(now->c[i].write_access - last->c[i].write_access);
305 		res->c[i].access = dfi->total_count.c[i].access +
306 			(u32)(now->c[i].access - last->c[i].access);
307 		res->c[i].clock_cycles = dfi->total_count.c[i].clock_cycles +
308 			(u32)(now->c[i].clock_cycles - last->c[i].clock_cycles);
309 	}
310 }
311 
312 static ssize_t ddr_perf_cpumask_show(struct device *dev,
313 				struct device_attribute *attr, char *buf)
314 {
315 	struct pmu *pmu = dev_get_drvdata(dev);
316 	struct rockchip_dfi *dfi = container_of(pmu, struct rockchip_dfi, pmu);
317 
318 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(dfi->cpu));
319 }
320 
321 static struct device_attribute ddr_perf_cpumask_attr =
322 	__ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
323 
324 static struct attribute *ddr_perf_cpumask_attrs[] = {
325 	&ddr_perf_cpumask_attr.attr,
326 	NULL,
327 };
328 
329 static const struct attribute_group ddr_perf_cpumask_attr_group = {
330 	.attrs = ddr_perf_cpumask_attrs,
331 };
332 
333 PMU_EVENT_ATTR_STRING(cycles, ddr_pmu_cycles, "event="__stringify(PERF_EVENT_CYCLES))
334 
335 #define DFI_PMU_EVENT_ATTR(_name, _var, _str) \
336 	PMU_EVENT_ATTR_STRING(_name, _var, _str); \
337 	PMU_EVENT_ATTR_STRING(_name.unit, _var##_unit, "MB"); \
338 	PMU_EVENT_ATTR_STRING(_name.scale, _var##_scale, "9.536743164e-07")
339 
340 DFI_PMU_EVENT_ATTR(read-bytes0, ddr_pmu_read_bytes0, "event="__stringify(PERF_EVENT_READ_BYTES0));
341 DFI_PMU_EVENT_ATTR(write-bytes0, ddr_pmu_write_bytes0, "event="__stringify(PERF_EVENT_WRITE_BYTES0));
342 
343 DFI_PMU_EVENT_ATTR(read-bytes1, ddr_pmu_read_bytes1, "event="__stringify(PERF_EVENT_READ_BYTES1));
344 DFI_PMU_EVENT_ATTR(write-bytes1, ddr_pmu_write_bytes1, "event="__stringify(PERF_EVENT_WRITE_BYTES1));
345 
346 DFI_PMU_EVENT_ATTR(read-bytes2, ddr_pmu_read_bytes2, "event="__stringify(PERF_EVENT_READ_BYTES2));
347 DFI_PMU_EVENT_ATTR(write-bytes2, ddr_pmu_write_bytes2, "event="__stringify(PERF_EVENT_WRITE_BYTES2));
348 
349 DFI_PMU_EVENT_ATTR(read-bytes3, ddr_pmu_read_bytes3, "event="__stringify(PERF_EVENT_READ_BYTES3));
350 DFI_PMU_EVENT_ATTR(write-bytes3, ddr_pmu_write_bytes3, "event="__stringify(PERF_EVENT_WRITE_BYTES3));
351 
352 DFI_PMU_EVENT_ATTR(read-bytes, ddr_pmu_read_bytes, "event="__stringify(PERF_EVENT_READ_BYTES));
353 DFI_PMU_EVENT_ATTR(write-bytes, ddr_pmu_write_bytes, "event="__stringify(PERF_EVENT_WRITE_BYTES));
354 
355 DFI_PMU_EVENT_ATTR(bytes, ddr_pmu_bytes, "event="__stringify(PERF_EVENT_BYTES));
356 
357 #define DFI_ATTR_MB(_name) 		\
358 	&_name.attr.attr,		\
359 	&_name##_unit.attr.attr,	\
360 	&_name##_scale.attr.attr
361 
362 static struct attribute *ddr_perf_events_attrs[] = {
363 	&ddr_pmu_cycles.attr.attr,
364 	DFI_ATTR_MB(ddr_pmu_read_bytes),
365 	DFI_ATTR_MB(ddr_pmu_write_bytes),
366 	DFI_ATTR_MB(ddr_pmu_read_bytes0),
367 	DFI_ATTR_MB(ddr_pmu_write_bytes0),
368 	DFI_ATTR_MB(ddr_pmu_read_bytes1),
369 	DFI_ATTR_MB(ddr_pmu_write_bytes1),
370 	DFI_ATTR_MB(ddr_pmu_read_bytes2),
371 	DFI_ATTR_MB(ddr_pmu_write_bytes2),
372 	DFI_ATTR_MB(ddr_pmu_read_bytes3),
373 	DFI_ATTR_MB(ddr_pmu_write_bytes3),
374 	DFI_ATTR_MB(ddr_pmu_bytes),
375 	NULL,
376 };
377 
378 static const struct attribute_group ddr_perf_events_attr_group = {
379 	.name = "events",
380 	.attrs = ddr_perf_events_attrs,
381 };
382 
383 PMU_FORMAT_ATTR(event, "config:0-7");
384 
385 static struct attribute *ddr_perf_format_attrs[] = {
386 	&format_attr_event.attr,
387 	NULL,
388 };
389 
390 static const struct attribute_group ddr_perf_format_attr_group = {
391 	.name = "format",
392 	.attrs = ddr_perf_format_attrs,
393 };
394 
395 static const struct attribute_group *attr_groups[] = {
396 	&ddr_perf_events_attr_group,
397 	&ddr_perf_cpumask_attr_group,
398 	&ddr_perf_format_attr_group,
399 	NULL,
400 };
401 
402 static int rockchip_ddr_perf_event_init(struct perf_event *event)
403 {
404 	struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
405 
406 	if (event->attr.type != event->pmu->type)
407 		return -ENOENT;
408 
409 	if (event->attach_state & PERF_ATTACH_TASK)
410 		return -EINVAL;
411 
412 	if (event->cpu < 0) {
413 		dev_warn(dfi->dev, "Can't provide per-task data!\n");
414 		return -EINVAL;
415 	}
416 
417 	return 0;
418 }
419 
420 static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event)
421 {
422 	struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
423 	int blen = dfi->burst_len;
424 	struct dmc_count total, now;
425 	unsigned int seq;
426 	u64 count = 0;
427 	int i;
428 
429 	rockchip_dfi_read_counters(dfi, &now);
430 
431 	do {
432 		seq = read_seqbegin(&dfi->count_seqlock);
433 		rockchip_ddr_perf_counters_add(dfi, &now, &total);
434 	} while (read_seqretry(&dfi->count_seqlock, seq));
435 
436 	switch (event->attr.config) {
437 	case PERF_EVENT_CYCLES:
438 		count = total.c[0].clock_cycles;
439 		break;
440 	case PERF_EVENT_READ_BYTES:
441 		for (i = 0; i < dfi->max_channels; i++)
442 			count += total.c[i].read_access * blen * dfi->buswidth[i];
443 		break;
444 	case PERF_EVENT_WRITE_BYTES:
445 		for (i = 0; i < dfi->max_channels; i++)
446 			count += total.c[i].write_access * blen * dfi->buswidth[i];
447 		break;
448 	case PERF_EVENT_READ_BYTES0:
449 		count = total.c[0].read_access * blen * dfi->buswidth[0];
450 		break;
451 	case PERF_EVENT_WRITE_BYTES0:
452 		count = total.c[0].write_access * blen * dfi->buswidth[0];
453 		break;
454 	case PERF_EVENT_READ_BYTES1:
455 		count = total.c[1].read_access * blen * dfi->buswidth[1];
456 		break;
457 	case PERF_EVENT_WRITE_BYTES1:
458 		count = total.c[1].write_access * blen * dfi->buswidth[1];
459 		break;
460 	case PERF_EVENT_READ_BYTES2:
461 		count = total.c[2].read_access * blen * dfi->buswidth[2];
462 		break;
463 	case PERF_EVENT_WRITE_BYTES2:
464 		count = total.c[2].write_access * blen * dfi->buswidth[2];
465 		break;
466 	case PERF_EVENT_READ_BYTES3:
467 		count = total.c[3].read_access * blen * dfi->buswidth[3];
468 		break;
469 	case PERF_EVENT_WRITE_BYTES3:
470 		count = total.c[3].write_access * blen * dfi->buswidth[3];
471 		break;
472 	case PERF_EVENT_BYTES:
473 		for (i = 0; i < dfi->max_channels; i++)
474 			count += total.c[i].access * blen * dfi->buswidth[i];
475 		break;
476 	}
477 
478 	return count;
479 }
480 
481 static void rockchip_ddr_perf_event_update(struct perf_event *event)
482 {
483 	u64 now;
484 	s64 prev;
485 
486 	if (event->attr.config >= PERF_ACCESS_TYPE_MAX)
487 		return;
488 
489 	now = rockchip_ddr_perf_event_get_count(event);
490 	prev = local64_xchg(&event->hw.prev_count, now);
491 	local64_add(now - prev, &event->count);
492 }
493 
494 static void rockchip_ddr_perf_event_start(struct perf_event *event, int flags)
495 {
496 	u64 now = rockchip_ddr_perf_event_get_count(event);
497 
498 	local64_set(&event->hw.prev_count, now);
499 }
500 
501 static int rockchip_ddr_perf_event_add(struct perf_event *event, int flags)
502 {
503 	struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
504 
505 	dfi->active_events++;
506 
507 	if (dfi->active_events == 1) {
508 		dfi->total_count = (struct dmc_count){};
509 		rockchip_dfi_read_counters(dfi, &dfi->last_perf_count);
510 		hrtimer_start(&dfi->timer, ns_to_ktime(NSEC_PER_SEC), HRTIMER_MODE_REL);
511 	}
512 
513 	if (flags & PERF_EF_START)
514 		rockchip_ddr_perf_event_start(event, flags);
515 
516 	return 0;
517 }
518 
519 static void rockchip_ddr_perf_event_stop(struct perf_event *event, int flags)
520 {
521 	rockchip_ddr_perf_event_update(event);
522 }
523 
524 static void rockchip_ddr_perf_event_del(struct perf_event *event, int flags)
525 {
526 	struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
527 
528 	rockchip_ddr_perf_event_stop(event, PERF_EF_UPDATE);
529 
530 	dfi->active_events--;
531 
532 	if (dfi->active_events == 0)
533 		hrtimer_cancel(&dfi->timer);
534 }
535 
536 static enum hrtimer_restart rockchip_dfi_timer(struct hrtimer *timer)
537 {
538 	struct rockchip_dfi *dfi = container_of(timer, struct rockchip_dfi, timer);
539 	struct dmc_count now, total;
540 
541 	rockchip_dfi_read_counters(dfi, &now);
542 
543 	write_seqlock(&dfi->count_seqlock);
544 
545 	rockchip_ddr_perf_counters_add(dfi, &now, &total);
546 	dfi->total_count = total;
547 	dfi->last_perf_count = now;
548 
549 	write_sequnlock(&dfi->count_seqlock);
550 
551 	hrtimer_forward_now(&dfi->timer, ns_to_ktime(NSEC_PER_SEC));
552 
553 	return HRTIMER_RESTART;
554 };
555 
556 static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
557 {
558 	struct rockchip_dfi *dfi = hlist_entry_safe(node, struct rockchip_dfi, node);
559 	int target;
560 
561 	if (cpu != dfi->cpu)
562 		return 0;
563 
564 	target = cpumask_any_but(cpu_online_mask, cpu);
565 	if (target >= nr_cpu_ids)
566 		return 0;
567 
568 	perf_pmu_migrate_context(&dfi->pmu, cpu, target);
569 	dfi->cpu = target;
570 
571 	return 0;
572 }
573 
574 static void rockchip_ddr_cpuhp_remove_state(void *data)
575 {
576 	struct rockchip_dfi *dfi = data;
577 
578 	cpuhp_remove_multi_state(dfi->cpuhp_state);
579 
580 	rockchip_dfi_disable(dfi);
581 }
582 
583 static void rockchip_ddr_cpuhp_remove_instance(void *data)
584 {
585 	struct rockchip_dfi *dfi = data;
586 
587 	cpuhp_state_remove_instance_nocalls(dfi->cpuhp_state, &dfi->node);
588 }
589 
590 static void rockchip_ddr_perf_remove(void *data)
591 {
592 	struct rockchip_dfi *dfi = data;
593 
594 	perf_pmu_unregister(&dfi->pmu);
595 }
596 
597 static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
598 {
599 	struct pmu *pmu = &dfi->pmu;
600 	int ret;
601 
602 	seqlock_init(&dfi->count_seqlock);
603 
604 	pmu->module = THIS_MODULE;
605 	pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE;
606 	pmu->task_ctx_nr = perf_invalid_context;
607 	pmu->attr_groups = attr_groups;
608 	pmu->event_init  = rockchip_ddr_perf_event_init;
609 	pmu->add = rockchip_ddr_perf_event_add;
610 	pmu->del = rockchip_ddr_perf_event_del;
611 	pmu->start = rockchip_ddr_perf_event_start;
612 	pmu->stop = rockchip_ddr_perf_event_stop;
613 	pmu->read = rockchip_ddr_perf_event_update;
614 
615 	dfi->cpu = raw_smp_processor_id();
616 
617 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
618 				      "rockchip_ddr_perf_pmu",
619 				      NULL,
620 				      ddr_perf_offline_cpu);
621 
622 	if (ret < 0) {
623 		dev_err(dfi->dev, "cpuhp_setup_state_multi failed: %d\n", ret);
624 		return ret;
625 	}
626 
627 	dfi->cpuhp_state = ret;
628 
629 	rockchip_dfi_enable(dfi);
630 
631 	ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_state, dfi);
632 	if (ret)
633 		return ret;
634 
635 	ret = cpuhp_state_add_instance_nocalls(dfi->cpuhp_state, &dfi->node);
636 	if (ret) {
637 		dev_err(dfi->dev, "Error %d registering hotplug\n", ret);
638 		return ret;
639 	}
640 
641 	ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_instance, dfi);
642 	if (ret)
643 		return ret;
644 
645 	hrtimer_init(&dfi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
646 	dfi->timer.function = rockchip_dfi_timer;
647 
648 	switch (dfi->ddr_type) {
649 	case ROCKCHIP_DDRTYPE_LPDDR2:
650 	case ROCKCHIP_DDRTYPE_LPDDR3:
651 		dfi->burst_len = 8;
652 		break;
653 	case ROCKCHIP_DDRTYPE_LPDDR4:
654 	case ROCKCHIP_DDRTYPE_LPDDR4X:
655 		dfi->burst_len = 16;
656 		break;
657 	}
658 
659 	ret = perf_pmu_register(pmu, "rockchip_ddr", -1);
660 	if (ret)
661 		return ret;
662 
663 	return devm_add_action_or_reset(dfi->dev, rockchip_ddr_perf_remove, dfi);
664 }
665 #else
666 static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
667 {
668 	return 0;
669 }
670 #endif
671 
672 static int rk3399_dfi_init(struct rockchip_dfi *dfi)
673 {
674 	struct regmap *regmap_pmu = dfi->regmap_pmu;
675 	u32 val;
676 
677 	dfi->clk = devm_clk_get(dfi->dev, "pclk_ddr_mon");
678 	if (IS_ERR(dfi->clk))
679 		return dev_err_probe(dfi->dev, PTR_ERR(dfi->clk),
680 				     "Cannot get the clk pclk_ddr_mon\n");
681 
682 	/* get ddr type */
683 	regmap_read(regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
684 	dfi->ddr_type = FIELD_GET(RK3399_PMUGRF_OS_REG2_DDRTYPE, val);
685 
686 	dfi->channel_mask = GENMASK(1, 0);
687 	dfi->max_channels = 2;
688 
689 	dfi->buswidth[0] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH0, val) == 0 ? 4 : 2;
690 	dfi->buswidth[1] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH1, val) == 0 ? 4 : 2;
691 
692 	dfi->ddrmon_stride = 0x14;
693 	dfi->ddrmon_ctrl_single = true;
694 
695 	return 0;
696 };
697 
698 static int rk3568_dfi_init(struct rockchip_dfi *dfi)
699 {
700 	struct regmap *regmap_pmu = dfi->regmap_pmu;
701 	u32 reg2, reg3;
702 
703 	regmap_read(regmap_pmu, RK3568_PMUGRF_OS_REG2, &reg2);
704 	regmap_read(regmap_pmu, RK3568_PMUGRF_OS_REG3, &reg3);
705 
706 	/* lower 3 bits of the DDR type */
707 	dfi->ddr_type = FIELD_GET(RK3568_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2);
708 
709 	/*
710 	 * For version three and higher the upper two bits of the DDR type are
711 	 * in RK3568_PMUGRF_OS_REG3
712 	 */
713 	if (FIELD_GET(RK3568_PMUGRF_OS_REG3_SYSREG_VERSION, reg3) >= 0x3)
714 		dfi->ddr_type |= FIELD_GET(RK3568_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3;
715 
716 	dfi->channel_mask = BIT(0);
717 	dfi->max_channels = 1;
718 
719 	dfi->buswidth[0] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2;
720 
721 	dfi->ddrmon_stride = 0x0; /* not relevant, we only have a single channel on this SoC */
722 	dfi->ddrmon_ctrl_single = true;
723 
724 	return 0;
725 };
726 
727 static int rk3588_dfi_init(struct rockchip_dfi *dfi)
728 {
729 	struct regmap *regmap_pmu = dfi->regmap_pmu;
730 	u32 reg2, reg3, reg4;
731 
732 	regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG2, &reg2);
733 	regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG3, &reg3);
734 	regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG4, &reg4);
735 
736 	/* lower 3 bits of the DDR type */
737 	dfi->ddr_type = FIELD_GET(RK3588_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2);
738 
739 	/*
740 	 * For version three and higher the upper two bits of the DDR type are
741 	 * in RK3588_PMUGRF_OS_REG3
742 	 */
743 	if (FIELD_GET(RK3588_PMUGRF_OS_REG3_SYSREG_VERSION, reg3) >= 0x3)
744 		dfi->ddr_type |= FIELD_GET(RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3;
745 
746 	dfi->buswidth[0] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2;
747 	dfi->buswidth[1] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg2) == 0 ? 4 : 2;
748 	dfi->buswidth[2] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg4) == 0 ? 4 : 2;
749 	dfi->buswidth[3] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg4) == 0 ? 4 : 2;
750 	dfi->channel_mask = FIELD_GET(RK3588_PMUGRF_OS_REG2_CH_INFO, reg2) |
751 			    FIELD_GET(RK3588_PMUGRF_OS_REG2_CH_INFO, reg4) << 2;
752 	dfi->max_channels = 4;
753 
754 	dfi->ddrmon_stride = 0x4000;
755 
756 	return 0;
757 };
758 
759 static const struct of_device_id rockchip_dfi_id_match[] = {
760 	{ .compatible = "rockchip,rk3399-dfi", .data = rk3399_dfi_init },
761 	{ .compatible = "rockchip,rk3568-dfi", .data = rk3568_dfi_init },
762 	{ .compatible = "rockchip,rk3588-dfi", .data = rk3588_dfi_init },
763 	{ },
764 };
765 
766 MODULE_DEVICE_TABLE(of, rockchip_dfi_id_match);
767 
768 static int rockchip_dfi_probe(struct platform_device *pdev)
769 {
770 	struct device *dev = &pdev->dev;
771 	struct rockchip_dfi *dfi;
772 	struct devfreq_event_desc *desc;
773 	struct device_node *np = pdev->dev.of_node, *node;
774 	int (*soc_init)(struct rockchip_dfi *dfi);
775 	int ret;
776 
777 	soc_init = of_device_get_match_data(&pdev->dev);
778 	if (!soc_init)
779 		return -EINVAL;
780 
781 	dfi = devm_kzalloc(dev, sizeof(*dfi), GFP_KERNEL);
782 	if (!dfi)
783 		return -ENOMEM;
784 
785 	dfi->regs = devm_platform_ioremap_resource(pdev, 0);
786 	if (IS_ERR(dfi->regs))
787 		return PTR_ERR(dfi->regs);
788 
789 	node = of_parse_phandle(np, "rockchip,pmu", 0);
790 	if (!node)
791 		return dev_err_probe(&pdev->dev, -ENODEV, "Can't find pmu_grf registers\n");
792 
793 	dfi->regmap_pmu = syscon_node_to_regmap(node);
794 	of_node_put(node);
795 	if (IS_ERR(dfi->regmap_pmu))
796 		return PTR_ERR(dfi->regmap_pmu);
797 
798 	dfi->dev = dev;
799 	mutex_init(&dfi->mutex);
800 
801 	desc = &dfi->desc;
802 	desc->ops = &rockchip_dfi_ops;
803 	desc->driver_data = dfi;
804 	desc->name = np->name;
805 
806 	ret = soc_init(dfi);
807 	if (ret)
808 		return ret;
809 
810 	dfi->edev = devm_devfreq_event_add_edev(&pdev->dev, desc);
811 	if (IS_ERR(dfi->edev)) {
812 		dev_err(&pdev->dev,
813 			"failed to add devfreq-event device\n");
814 		return PTR_ERR(dfi->edev);
815 	}
816 
817 	ret = rockchip_ddr_perf_init(dfi);
818 	if (ret)
819 		return ret;
820 
821 	platform_set_drvdata(pdev, dfi);
822 
823 	return 0;
824 }
825 
826 static struct platform_driver rockchip_dfi_driver = {
827 	.probe	= rockchip_dfi_probe,
828 	.driver = {
829 		.name	= "rockchip-dfi",
830 		.of_match_table = rockchip_dfi_id_match,
831 		.suppress_bind_attrs = true,
832 	},
833 };
834 module_platform_driver(rockchip_dfi_driver);
835 
836 MODULE_LICENSE("GPL v2");
837 MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>");
838 MODULE_DESCRIPTION("Rockchip DFI driver");
839