xref: /linux/arch/arm/mach-imx/mmdc.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2017 NXP
4  * Copyright 2011,2016 Freescale Semiconductor, Inc.
5  * Copyright 2011 Linaro Ltd.
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/hrtimer.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/platform_device.h>
17 #include <linux/property.h>
18 #include <linux/perf_event.h>
19 #include <linux/slab.h>
20 
21 #include "common.h"
22 
23 #define MMDC_MAPSR		0x404
24 #define BP_MMDC_MAPSR_PSD	0
25 #define BP_MMDC_MAPSR_PSS	4
26 
27 #define MMDC_MDMISC		0x18
28 #define BM_MMDC_MDMISC_DDR_TYPE	0x18
29 #define BP_MMDC_MDMISC_DDR_TYPE	0x3
30 
31 #define TOTAL_CYCLES		0x0
32 #define BUSY_CYCLES		0x1
33 #define READ_ACCESSES		0x2
34 #define WRITE_ACCESSES		0x3
35 #define READ_BYTES		0x4
36 #define WRITE_BYTES		0x5
37 
38 /* Enables, resets, freezes, overflow profiling*/
39 #define DBG_DIS			0x0
40 #define DBG_EN			0x1
41 #define DBG_RST			0x2
42 #define PRF_FRZ			0x4
43 #define CYC_OVF			0x8
44 #define PROFILE_SEL		0x10
45 
46 #define MMDC_MADPCR0	0x410
47 #define MMDC_MADPCR1	0x414
48 #define MMDC_MADPSR0	0x418
49 #define MMDC_MADPSR1	0x41C
50 #define MMDC_MADPSR2	0x420
51 #define MMDC_MADPSR3	0x424
52 #define MMDC_MADPSR4	0x428
53 #define MMDC_MADPSR5	0x42C
54 
55 #define MMDC_NUM_COUNTERS	6
56 
57 #define MMDC_FLAG_PROFILE_SEL	0x1
58 #define MMDC_PRF_AXI_ID_CLEAR	0x0
59 
60 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
61 
62 static int ddr_type;
63 
64 struct fsl_mmdc_devtype_data {
65 	unsigned int flags;
66 };
67 
68 static const struct fsl_mmdc_devtype_data imx6q_data = {
69 };
70 
71 static const struct fsl_mmdc_devtype_data imx6qp_data = {
72 	.flags = MMDC_FLAG_PROFILE_SEL,
73 };
74 
75 static const struct of_device_id imx_mmdc_dt_ids[] = {
76 	{ .compatible = "fsl,imx6q-mmdc", .data = (void *)&imx6q_data},
77 	{ .compatible = "fsl,imx6qp-mmdc", .data = (void *)&imx6qp_data},
78 	{ /* sentinel */ }
79 };
80 
81 #ifdef CONFIG_PERF_EVENTS
82 
83 static enum cpuhp_state cpuhp_mmdc_state;
84 static DEFINE_IDA(mmdc_ida);
85 
86 PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
87 PMU_EVENT_ATTR_STRING(busy-cycles, mmdc_pmu_busy_cycles, "event=0x01")
88 PMU_EVENT_ATTR_STRING(read-accesses, mmdc_pmu_read_accesses, "event=0x02")
89 PMU_EVENT_ATTR_STRING(write-accesses, mmdc_pmu_write_accesses, "event=0x03")
90 PMU_EVENT_ATTR_STRING(read-bytes, mmdc_pmu_read_bytes, "event=0x04")
91 PMU_EVENT_ATTR_STRING(read-bytes.unit, mmdc_pmu_read_bytes_unit, "MB");
92 PMU_EVENT_ATTR_STRING(read-bytes.scale, mmdc_pmu_read_bytes_scale, "0.000001");
93 PMU_EVENT_ATTR_STRING(write-bytes, mmdc_pmu_write_bytes, "event=0x05")
94 PMU_EVENT_ATTR_STRING(write-bytes.unit, mmdc_pmu_write_bytes_unit, "MB");
95 PMU_EVENT_ATTR_STRING(write-bytes.scale, mmdc_pmu_write_bytes_scale, "0.000001");
96 
97 struct mmdc_pmu {
98 	struct pmu pmu;
99 	void __iomem *mmdc_base;
100 	cpumask_t cpu;
101 	struct hrtimer hrtimer;
102 	unsigned int active_events;
103 	int id;
104 	struct device *dev;
105 	struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
106 	struct hlist_node node;
107 	const struct fsl_mmdc_devtype_data *devtype_data;
108 	struct clk *mmdc_ipg_clk;
109 };
110 
111 /*
112  * Polling period is set to one second, overflow of total-cycles (the fastest
113  * increasing counter) takes ten seconds so one second is safe
114  */
115 static unsigned int mmdc_pmu_poll_period_us = 1000000;
116 
117 module_param_named(pmu_pmu_poll_period_us, mmdc_pmu_poll_period_us, uint,
118 		S_IRUGO | S_IWUSR);
119 
mmdc_pmu_timer_period(void)120 static ktime_t mmdc_pmu_timer_period(void)
121 {
122 	return ns_to_ktime((u64)mmdc_pmu_poll_period_us * 1000);
123 }
124 
mmdc_pmu_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)125 static ssize_t mmdc_pmu_cpumask_show(struct device *dev,
126 		struct device_attribute *attr, char *buf)
127 {
128 	struct mmdc_pmu *pmu_mmdc = dev_get_drvdata(dev);
129 
130 	return cpumap_print_to_pagebuf(true, buf, &pmu_mmdc->cpu);
131 }
132 
133 static struct device_attribute mmdc_pmu_cpumask_attr =
134 	__ATTR(cpumask, S_IRUGO, mmdc_pmu_cpumask_show, NULL);
135 
136 static struct attribute *mmdc_pmu_cpumask_attrs[] = {
137 	&mmdc_pmu_cpumask_attr.attr,
138 	NULL,
139 };
140 
141 static struct attribute_group mmdc_pmu_cpumask_attr_group = {
142 	.attrs = mmdc_pmu_cpumask_attrs,
143 };
144 
145 static struct attribute *mmdc_pmu_events_attrs[] = {
146 	&mmdc_pmu_total_cycles.attr.attr,
147 	&mmdc_pmu_busy_cycles.attr.attr,
148 	&mmdc_pmu_read_accesses.attr.attr,
149 	&mmdc_pmu_write_accesses.attr.attr,
150 	&mmdc_pmu_read_bytes.attr.attr,
151 	&mmdc_pmu_read_bytes_unit.attr.attr,
152 	&mmdc_pmu_read_bytes_scale.attr.attr,
153 	&mmdc_pmu_write_bytes.attr.attr,
154 	&mmdc_pmu_write_bytes_unit.attr.attr,
155 	&mmdc_pmu_write_bytes_scale.attr.attr,
156 	NULL,
157 };
158 
159 static struct attribute_group mmdc_pmu_events_attr_group = {
160 	.name = "events",
161 	.attrs = mmdc_pmu_events_attrs,
162 };
163 
164 PMU_FORMAT_ATTR(event, "config:0-63");
165 PMU_FORMAT_ATTR(axi_id, "config1:0-63");
166 
167 static struct attribute *mmdc_pmu_format_attrs[] = {
168 	&format_attr_event.attr,
169 	&format_attr_axi_id.attr,
170 	NULL,
171 };
172 
173 static struct attribute_group mmdc_pmu_format_attr_group = {
174 	.name = "format",
175 	.attrs = mmdc_pmu_format_attrs,
176 };
177 
178 static const struct attribute_group *attr_groups[] = {
179 	&mmdc_pmu_events_attr_group,
180 	&mmdc_pmu_format_attr_group,
181 	&mmdc_pmu_cpumask_attr_group,
182 	NULL,
183 };
184 
mmdc_pmu_read_counter(struct mmdc_pmu * pmu_mmdc,int cfg)185 static u32 mmdc_pmu_read_counter(struct mmdc_pmu *pmu_mmdc, int cfg)
186 {
187 	void __iomem *mmdc_base, *reg;
188 
189 	mmdc_base = pmu_mmdc->mmdc_base;
190 
191 	switch (cfg) {
192 	case TOTAL_CYCLES:
193 		reg = mmdc_base + MMDC_MADPSR0;
194 		break;
195 	case BUSY_CYCLES:
196 		reg = mmdc_base + MMDC_MADPSR1;
197 		break;
198 	case READ_ACCESSES:
199 		reg = mmdc_base + MMDC_MADPSR2;
200 		break;
201 	case WRITE_ACCESSES:
202 		reg = mmdc_base + MMDC_MADPSR3;
203 		break;
204 	case READ_BYTES:
205 		reg = mmdc_base + MMDC_MADPSR4;
206 		break;
207 	case WRITE_BYTES:
208 		reg = mmdc_base + MMDC_MADPSR5;
209 		break;
210 	default:
211 		return WARN_ONCE(1,
212 			"invalid configuration %d for mmdc counter", cfg);
213 	}
214 	return readl(reg);
215 }
216 
mmdc_pmu_offline_cpu(unsigned int cpu,struct hlist_node * node)217 static int mmdc_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
218 {
219 	struct mmdc_pmu *pmu_mmdc = hlist_entry_safe(node, struct mmdc_pmu, node);
220 	int target;
221 
222 	if (!cpumask_test_and_clear_cpu(cpu, &pmu_mmdc->cpu))
223 		return 0;
224 
225 	target = cpumask_any_but(cpu_online_mask, cpu);
226 	if (target >= nr_cpu_ids)
227 		return 0;
228 
229 	perf_pmu_migrate_context(&pmu_mmdc->pmu, cpu, target);
230 	cpumask_set_cpu(target, &pmu_mmdc->cpu);
231 
232 	return 0;
233 }
234 
mmdc_pmu_group_event_is_valid(struct perf_event * event,struct pmu * pmu,unsigned long * used_counters)235 static bool mmdc_pmu_group_event_is_valid(struct perf_event *event,
236 					  struct pmu *pmu,
237 					  unsigned long *used_counters)
238 {
239 	int cfg = event->attr.config;
240 
241 	if (is_software_event(event))
242 		return true;
243 
244 	if (event->pmu != pmu)
245 		return false;
246 
247 	return !test_and_set_bit(cfg, used_counters);
248 }
249 
250 /*
251  * Each event has a single fixed-purpose counter, so we can only have a
252  * single active event for each at any point in time. Here we just check
253  * for duplicates, and rely on mmdc_pmu_event_init to verify that the HW
254  * event numbers are valid.
255  */
mmdc_pmu_group_is_valid(struct perf_event * event)256 static bool mmdc_pmu_group_is_valid(struct perf_event *event)
257 {
258 	struct pmu *pmu = event->pmu;
259 	struct perf_event *leader = event->group_leader;
260 	struct perf_event *sibling;
261 	unsigned long counter_mask = 0;
262 
263 	set_bit(leader->attr.config, &counter_mask);
264 
265 	if (event != leader) {
266 		if (!mmdc_pmu_group_event_is_valid(event, pmu, &counter_mask))
267 			return false;
268 	}
269 
270 	for_each_sibling_event(sibling, leader) {
271 		if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask))
272 			return false;
273 	}
274 
275 	return true;
276 }
277 
mmdc_pmu_event_init(struct perf_event * event)278 static int mmdc_pmu_event_init(struct perf_event *event)
279 {
280 	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
281 	int cfg = event->attr.config;
282 
283 	if (event->attr.type != event->pmu->type)
284 		return -ENOENT;
285 
286 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
287 		return -EOPNOTSUPP;
288 
289 	if (event->cpu < 0) {
290 		dev_warn(pmu_mmdc->dev, "Can't provide per-task data!\n");
291 		return -EOPNOTSUPP;
292 	}
293 
294 	if (event->attr.sample_period)
295 		return -EINVAL;
296 
297 	if (cfg < 0 || cfg >= MMDC_NUM_COUNTERS)
298 		return -EINVAL;
299 
300 	if (!mmdc_pmu_group_is_valid(event))
301 		return -EINVAL;
302 
303 	event->cpu = cpumask_first(&pmu_mmdc->cpu);
304 	return 0;
305 }
306 
mmdc_pmu_event_update(struct perf_event * event)307 static void mmdc_pmu_event_update(struct perf_event *event)
308 {
309 	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
310 	struct hw_perf_event *hwc = &event->hw;
311 	u64 delta, prev_raw_count, new_raw_count;
312 
313 	do {
314 		prev_raw_count = local64_read(&hwc->prev_count);
315 		new_raw_count = mmdc_pmu_read_counter(pmu_mmdc,
316 						      event->attr.config);
317 	} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
318 		new_raw_count) != prev_raw_count);
319 
320 	delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
321 
322 	local64_add(delta, &event->count);
323 }
324 
mmdc_pmu_event_start(struct perf_event * event,int flags)325 static void mmdc_pmu_event_start(struct perf_event *event, int flags)
326 {
327 	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
328 	struct hw_perf_event *hwc = &event->hw;
329 	void __iomem *mmdc_base, *reg;
330 	u32 val;
331 
332 	mmdc_base = pmu_mmdc->mmdc_base;
333 	reg = mmdc_base + MMDC_MADPCR0;
334 
335 	/*
336 	 * hrtimer is required because mmdc does not provide an interrupt so
337 	 * polling is necessary
338 	 */
339 	hrtimer_start(&pmu_mmdc->hrtimer, mmdc_pmu_timer_period(),
340 			HRTIMER_MODE_REL_PINNED);
341 
342 	local64_set(&hwc->prev_count, 0);
343 
344 	writel(DBG_RST, reg);
345 
346 	/*
347 	 * Write the AXI id parameter to MADPCR1.
348 	 */
349 	val = event->attr.config1;
350 	reg = mmdc_base + MMDC_MADPCR1;
351 	writel(val, reg);
352 
353 	reg = mmdc_base + MMDC_MADPCR0;
354 	val = DBG_EN;
355 	if (pmu_mmdc->devtype_data->flags & MMDC_FLAG_PROFILE_SEL)
356 		val |= PROFILE_SEL;
357 
358 	writel(val, reg);
359 }
360 
mmdc_pmu_event_add(struct perf_event * event,int flags)361 static int mmdc_pmu_event_add(struct perf_event *event, int flags)
362 {
363 	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
364 	struct hw_perf_event *hwc = &event->hw;
365 
366 	int cfg = event->attr.config;
367 
368 	if (flags & PERF_EF_START)
369 		mmdc_pmu_event_start(event, flags);
370 
371 	if (pmu_mmdc->mmdc_events[cfg] != NULL)
372 		return -EAGAIN;
373 
374 	pmu_mmdc->mmdc_events[cfg] = event;
375 	pmu_mmdc->active_events++;
376 
377 	local64_set(&hwc->prev_count, mmdc_pmu_read_counter(pmu_mmdc, cfg));
378 
379 	return 0;
380 }
381 
mmdc_pmu_event_stop(struct perf_event * event,int flags)382 static void mmdc_pmu_event_stop(struct perf_event *event, int flags)
383 {
384 	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
385 	void __iomem *mmdc_base, *reg;
386 
387 	mmdc_base = pmu_mmdc->mmdc_base;
388 	reg = mmdc_base + MMDC_MADPCR0;
389 
390 	writel(PRF_FRZ, reg);
391 
392 	reg = mmdc_base + MMDC_MADPCR1;
393 	writel(MMDC_PRF_AXI_ID_CLEAR, reg);
394 
395 	mmdc_pmu_event_update(event);
396 }
397 
mmdc_pmu_event_del(struct perf_event * event,int flags)398 static void mmdc_pmu_event_del(struct perf_event *event, int flags)
399 {
400 	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
401 	int cfg = event->attr.config;
402 
403 	pmu_mmdc->mmdc_events[cfg] = NULL;
404 	pmu_mmdc->active_events--;
405 
406 	if (pmu_mmdc->active_events == 0)
407 		hrtimer_cancel(&pmu_mmdc->hrtimer);
408 
409 	mmdc_pmu_event_stop(event, PERF_EF_UPDATE);
410 }
411 
mmdc_pmu_overflow_handler(struct mmdc_pmu * pmu_mmdc)412 static void mmdc_pmu_overflow_handler(struct mmdc_pmu *pmu_mmdc)
413 {
414 	int i;
415 
416 	for (i = 0; i < MMDC_NUM_COUNTERS; i++) {
417 		struct perf_event *event = pmu_mmdc->mmdc_events[i];
418 
419 		if (event)
420 			mmdc_pmu_event_update(event);
421 	}
422 }
423 
mmdc_pmu_timer_handler(struct hrtimer * hrtimer)424 static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer)
425 {
426 	struct mmdc_pmu *pmu_mmdc = container_of(hrtimer, struct mmdc_pmu,
427 			hrtimer);
428 
429 	mmdc_pmu_overflow_handler(pmu_mmdc);
430 	hrtimer_forward_now(hrtimer, mmdc_pmu_timer_period());
431 
432 	return HRTIMER_RESTART;
433 }
434 
mmdc_pmu_init(struct mmdc_pmu * pmu_mmdc,void __iomem * mmdc_base,struct device * dev)435 static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
436 		void __iomem *mmdc_base, struct device *dev)
437 {
438 	*pmu_mmdc = (struct mmdc_pmu) {
439 		.pmu = (struct pmu) {
440 			.parent		= dev,
441 			.task_ctx_nr    = perf_invalid_context,
442 			.attr_groups    = attr_groups,
443 			.event_init     = mmdc_pmu_event_init,
444 			.add            = mmdc_pmu_event_add,
445 			.del            = mmdc_pmu_event_del,
446 			.start          = mmdc_pmu_event_start,
447 			.stop           = mmdc_pmu_event_stop,
448 			.read           = mmdc_pmu_event_update,
449 			.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
450 		},
451 		.mmdc_base = mmdc_base,
452 		.dev = dev,
453 		.active_events = 0,
454 	};
455 
456 	pmu_mmdc->id = ida_alloc(&mmdc_ida, GFP_KERNEL);
457 
458 	return pmu_mmdc->id;
459 }
460 
imx_mmdc_remove(struct platform_device * pdev)461 static void imx_mmdc_remove(struct platform_device *pdev)
462 {
463 	struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
464 
465 	ida_free(&mmdc_ida, pmu_mmdc->id);
466 	cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
467 	perf_pmu_unregister(&pmu_mmdc->pmu);
468 	iounmap(pmu_mmdc->mmdc_base);
469 	clk_disable_unprepare(pmu_mmdc->mmdc_ipg_clk);
470 	kfree(pmu_mmdc);
471 }
472 
imx_mmdc_perf_init(struct platform_device * pdev,void __iomem * mmdc_base,struct clk * mmdc_ipg_clk)473 static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base,
474 			      struct clk *mmdc_ipg_clk)
475 {
476 	struct mmdc_pmu *pmu_mmdc;
477 	char *name;
478 	int ret;
479 
480 	pmu_mmdc = kzalloc(sizeof(*pmu_mmdc), GFP_KERNEL);
481 	if (!pmu_mmdc) {
482 		pr_err("failed to allocate PMU device!\n");
483 		return -ENOMEM;
484 	}
485 
486 	/* The first instance registers the hotplug state */
487 	if (!cpuhp_mmdc_state) {
488 		ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
489 					      "perf/arm/mmdc:online", NULL,
490 					      mmdc_pmu_offline_cpu);
491 		if (ret < 0) {
492 			pr_err("cpuhp_setup_state_multi failed\n");
493 			goto pmu_free;
494 		}
495 		cpuhp_mmdc_state = ret;
496 	}
497 
498 	ret = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
499 	if (ret < 0)
500 		goto  pmu_free;
501 
502 	name = devm_kasprintf(&pdev->dev,
503 				GFP_KERNEL, "mmdc%d", ret);
504 	if (!name) {
505 		ret = -ENOMEM;
506 		goto pmu_release_id;
507 	}
508 
509 	pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
510 	pmu_mmdc->devtype_data = device_get_match_data(&pdev->dev);
511 
512 	hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC,
513 			HRTIMER_MODE_REL);
514 	pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler;
515 
516 	cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu);
517 
518 	/* Register the pmu instance for cpu hotplug */
519 	cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
520 
521 	ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1);
522 	if (ret)
523 		goto pmu_register_err;
524 
525 	platform_set_drvdata(pdev, pmu_mmdc);
526 	return 0;
527 
528 pmu_register_err:
529 	pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
530 	cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
531 	hrtimer_cancel(&pmu_mmdc->hrtimer);
532 pmu_release_id:
533 	ida_free(&mmdc_ida, pmu_mmdc->id);
534 pmu_free:
535 	kfree(pmu_mmdc);
536 	return ret;
537 }
538 
539 #else
540 #define imx_mmdc_remove NULL
541 #define imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk) 0
542 #endif
543 
imx_mmdc_probe(struct platform_device * pdev)544 static int imx_mmdc_probe(struct platform_device *pdev)
545 {
546 	struct device_node *np = pdev->dev.of_node;
547 	void __iomem *mmdc_base, *reg;
548 	struct clk *mmdc_ipg_clk;
549 	u32 val;
550 	int err;
551 
552 	/* the ipg clock is optional */
553 	mmdc_ipg_clk = devm_clk_get(&pdev->dev, NULL);
554 	if (IS_ERR(mmdc_ipg_clk))
555 		mmdc_ipg_clk = NULL;
556 
557 	err = clk_prepare_enable(mmdc_ipg_clk);
558 	if (err) {
559 		dev_err(&pdev->dev, "Unable to enable mmdc ipg clock.\n");
560 		return err;
561 	}
562 
563 	mmdc_base = of_iomap(np, 0);
564 	WARN_ON(!mmdc_base);
565 
566 	reg = mmdc_base + MMDC_MDMISC;
567 	/* Get ddr type */
568 	val = readl_relaxed(reg);
569 	ddr_type = (val & BM_MMDC_MDMISC_DDR_TYPE) >>
570 		 BP_MMDC_MDMISC_DDR_TYPE;
571 
572 	reg = mmdc_base + MMDC_MAPSR;
573 
574 	/* Enable automatic power saving */
575 	val = readl_relaxed(reg);
576 	val &= ~(1 << BP_MMDC_MAPSR_PSD);
577 	writel_relaxed(val, reg);
578 
579 	err = imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk);
580 	if (err) {
581 		iounmap(mmdc_base);
582 		clk_disable_unprepare(mmdc_ipg_clk);
583 	}
584 
585 	return err;
586 }
587 
imx_mmdc_get_ddr_type(void)588 int imx_mmdc_get_ddr_type(void)
589 {
590 	return ddr_type;
591 }
592 
593 static struct platform_driver imx_mmdc_driver = {
594 	.driver		= {
595 		.name	= "imx-mmdc",
596 		.of_match_table = imx_mmdc_dt_ids,
597 	},
598 	.probe		= imx_mmdc_probe,
599 	.remove_new	= imx_mmdc_remove,
600 };
601 
imx_mmdc_init(void)602 static int __init imx_mmdc_init(void)
603 {
604 	return platform_driver_register(&imx_mmdc_driver);
605 }
606 postcore_initcall(imx_mmdc_init);
607