xref: /linux/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c (revision fb7399cf2d0b33825b8039f95c45395c7deba25c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/delay.h>
8 
9 #include <drm/drm_bridge.h>
10 #include <drm/drm_bridge_connector.h>
11 #include <drm/drm_vblank.h>
12 
13 #include "msm_drv.h"
14 #include "msm_gem.h"
15 #include "msm_mmu.h"
16 #include "mdp4_kms.h"
17 
18 static int mdp4_hw_init(struct msm_kms *kms)
19 {
20 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
21 	struct drm_device *dev = mdp4_kms->dev;
22 	u32 dmap_cfg, vg_cfg;
23 	unsigned long clk;
24 
25 	pm_runtime_get_sync(dev->dev);
26 
27 	if (mdp4_kms->rev > 1) {
28 		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
29 		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
30 	}
31 
32 	mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
33 
34 	/* max read pending cmd config, 3 pending requests: */
35 	mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
36 
37 	clk = clk_get_rate(mdp4_kms->clk);
38 
39 	if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
40 		dmap_cfg = 0x47;     /* 16 bytes-burst x 8 req */
41 		vg_cfg = 0x47;       /* 16 bytes-burs x 8 req */
42 	} else {
43 		dmap_cfg = 0x27;     /* 8 bytes-burst x 8 req */
44 		vg_cfg = 0x43;       /* 16 bytes-burst x 4 req */
45 	}
46 
47 	DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
48 
49 	mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
50 	mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
51 
52 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
53 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
54 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
55 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
56 
57 	if (mdp4_kms->rev >= 2)
58 		mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
59 	mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0);
60 
61 	/* disable CSC matrix / YUV by default: */
62 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
63 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
64 	mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
65 	mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
66 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
67 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
68 
69 	if (mdp4_kms->rev > 1)
70 		mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
71 
72 	pm_runtime_put_sync(dev->dev);
73 
74 	return 0;
75 }
76 
77 static void mdp4_enable_commit(struct msm_kms *kms)
78 {
79 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
80 	mdp4_enable(mdp4_kms);
81 }
82 
83 static void mdp4_disable_commit(struct msm_kms *kms)
84 {
85 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
86 	mdp4_disable(mdp4_kms);
87 }
88 
89 static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
90 {
91 	/* TODO */
92 }
93 
94 static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
95 {
96 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
97 	struct drm_crtc *crtc;
98 
99 	for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
100 		mdp4_crtc_wait_for_commit_done(crtc);
101 }
102 
103 static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
104 {
105 }
106 
107 static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
108 		struct drm_encoder *encoder)
109 {
110 	/* if we had >1 encoder, we'd need something more clever: */
111 	switch (encoder->encoder_type) {
112 	case DRM_MODE_ENCODER_TMDS:
113 		return mdp4_dtv_round_pixclk(encoder, rate);
114 	case DRM_MODE_ENCODER_LVDS:
115 	case DRM_MODE_ENCODER_DSI:
116 	default:
117 		return rate;
118 	}
119 }
120 
121 static void mdp4_destroy(struct msm_kms *kms)
122 {
123 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
124 	struct device *dev = mdp4_kms->dev->dev;
125 	struct msm_gem_address_space *aspace = kms->aspace;
126 
127 	if (mdp4_kms->blank_cursor_iova)
128 		msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
129 	drm_gem_object_put(mdp4_kms->blank_cursor_bo);
130 
131 	if (aspace) {
132 		aspace->mmu->funcs->detach(aspace->mmu);
133 		msm_gem_address_space_put(aspace);
134 	}
135 
136 	if (mdp4_kms->rpm_enabled)
137 		pm_runtime_disable(dev);
138 
139 	mdp_kms_destroy(&mdp4_kms->base);
140 }
141 
142 static const struct mdp_kms_funcs kms_funcs = {
143 	.base = {
144 		.hw_init         = mdp4_hw_init,
145 		.irq_preinstall  = mdp4_irq_preinstall,
146 		.irq_postinstall = mdp4_irq_postinstall,
147 		.irq_uninstall   = mdp4_irq_uninstall,
148 		.irq             = mdp4_irq,
149 		.enable_vblank   = mdp4_enable_vblank,
150 		.disable_vblank  = mdp4_disable_vblank,
151 		.enable_commit   = mdp4_enable_commit,
152 		.disable_commit  = mdp4_disable_commit,
153 		.flush_commit    = mdp4_flush_commit,
154 		.wait_flush      = mdp4_wait_flush,
155 		.complete_commit = mdp4_complete_commit,
156 		.round_pixclk    = mdp4_round_pixclk,
157 		.destroy         = mdp4_destroy,
158 	},
159 	.set_irqmask         = mdp4_set_irqmask,
160 };
161 
162 int mdp4_disable(struct mdp4_kms *mdp4_kms)
163 {
164 	DBG("");
165 
166 	clk_disable_unprepare(mdp4_kms->clk);
167 	clk_disable_unprepare(mdp4_kms->pclk);
168 	clk_disable_unprepare(mdp4_kms->lut_clk);
169 	clk_disable_unprepare(mdp4_kms->axi_clk);
170 
171 	return 0;
172 }
173 
174 int mdp4_enable(struct mdp4_kms *mdp4_kms)
175 {
176 	DBG("");
177 
178 	clk_prepare_enable(mdp4_kms->clk);
179 	clk_prepare_enable(mdp4_kms->pclk);
180 	clk_prepare_enable(mdp4_kms->lut_clk);
181 	clk_prepare_enable(mdp4_kms->axi_clk);
182 
183 	return 0;
184 }
185 
186 
187 static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
188 				  int intf_type)
189 {
190 	struct drm_device *dev = mdp4_kms->dev;
191 	struct msm_drm_private *priv = dev->dev_private;
192 	struct drm_encoder *encoder;
193 	struct drm_connector *connector;
194 	struct drm_bridge *next_bridge;
195 	int dsi_id;
196 	int ret;
197 
198 	switch (intf_type) {
199 	case DRM_MODE_ENCODER_LVDS:
200 		/*
201 		 * bail out early if there is no panel node (no need to
202 		 * initialize LCDC encoder and LVDS connector)
203 		 */
204 		next_bridge = devm_drm_of_get_bridge(dev->dev, dev->dev->of_node, 0, 0);
205 		if (IS_ERR(next_bridge)) {
206 			ret = PTR_ERR(next_bridge);
207 			if (ret == -ENODEV)
208 				return 0;
209 			return ret;
210 		}
211 
212 		encoder = mdp4_lcdc_encoder_init(dev);
213 		if (IS_ERR(encoder)) {
214 			DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
215 			return PTR_ERR(encoder);
216 		}
217 
218 		/* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
219 		encoder->possible_crtcs = 1 << DMA_P;
220 
221 		ret = drm_bridge_attach(encoder, next_bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
222 		if (ret) {
223 			DRM_DEV_ERROR(dev->dev, "failed to attach LVDS panel/bridge: %d\n", ret);
224 
225 			return ret;
226 		}
227 
228 		connector = drm_bridge_connector_init(dev, encoder);
229 		if (IS_ERR(connector)) {
230 			DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
231 			return PTR_ERR(connector);
232 		}
233 
234 		ret = drm_connector_attach_encoder(connector, encoder);
235 		if (ret) {
236 			DRM_DEV_ERROR(dev->dev, "failed to attach LVDS connector: %d\n", ret);
237 
238 			return ret;
239 		}
240 
241 		break;
242 	case DRM_MODE_ENCODER_TMDS:
243 		encoder = mdp4_dtv_encoder_init(dev);
244 		if (IS_ERR(encoder)) {
245 			DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
246 			return PTR_ERR(encoder);
247 		}
248 
249 		/* DTV can be hooked to DMA_E: */
250 		encoder->possible_crtcs = 1 << 1;
251 
252 		if (priv->hdmi) {
253 			/* Construct bridge/connector for HDMI: */
254 			ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
255 			if (ret) {
256 				DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
257 				return ret;
258 			}
259 		}
260 
261 		break;
262 	case DRM_MODE_ENCODER_DSI:
263 		/* only DSI1 supported for now */
264 		dsi_id = 0;
265 
266 		if (!priv->dsi[dsi_id])
267 			break;
268 
269 		encoder = mdp4_dsi_encoder_init(dev);
270 		if (IS_ERR(encoder)) {
271 			ret = PTR_ERR(encoder);
272 			DRM_DEV_ERROR(dev->dev,
273 				"failed to construct DSI encoder: %d\n", ret);
274 			return ret;
275 		}
276 
277 		/* TODO: Add DMA_S later? */
278 		encoder->possible_crtcs = 1 << DMA_P;
279 
280 		ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
281 		if (ret) {
282 			DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
283 				ret);
284 			return ret;
285 		}
286 
287 		break;
288 	default:
289 		DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
290 		return -EINVAL;
291 	}
292 
293 	return 0;
294 }
295 
296 static int modeset_init(struct mdp4_kms *mdp4_kms)
297 {
298 	struct drm_device *dev = mdp4_kms->dev;
299 	struct msm_drm_private *priv = dev->dev_private;
300 	struct drm_plane *plane;
301 	struct drm_crtc *crtc;
302 	int i, ret;
303 	static const enum mdp4_pipe rgb_planes[] = {
304 		RGB1, RGB2,
305 	};
306 	static const enum mdp4_pipe vg_planes[] = {
307 		VG1, VG2,
308 	};
309 	static const enum mdp4_dma mdp4_crtcs[] = {
310 		DMA_P, DMA_E,
311 	};
312 	static const char * const mdp4_crtc_names[] = {
313 		"DMA_P", "DMA_E",
314 	};
315 	static const int mdp4_intfs[] = {
316 		DRM_MODE_ENCODER_LVDS,
317 		DRM_MODE_ENCODER_DSI,
318 		DRM_MODE_ENCODER_TMDS,
319 	};
320 
321 	/* construct non-private planes: */
322 	for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
323 		plane = mdp4_plane_init(dev, vg_planes[i], false);
324 		if (IS_ERR(plane)) {
325 			DRM_DEV_ERROR(dev->dev,
326 				"failed to construct plane for VG%d\n", i + 1);
327 			ret = PTR_ERR(plane);
328 			goto fail;
329 		}
330 	}
331 
332 	for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
333 		plane = mdp4_plane_init(dev, rgb_planes[i], true);
334 		if (IS_ERR(plane)) {
335 			DRM_DEV_ERROR(dev->dev,
336 				"failed to construct plane for RGB%d\n", i + 1);
337 			ret = PTR_ERR(plane);
338 			goto fail;
339 		}
340 
341 		crtc  = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
342 				mdp4_crtcs[i]);
343 		if (IS_ERR(crtc)) {
344 			DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
345 				mdp4_crtc_names[i]);
346 			ret = PTR_ERR(crtc);
347 			goto fail;
348 		}
349 
350 		priv->num_crtcs++;
351 	}
352 
353 	/*
354 	 * we currently set up two relatively fixed paths:
355 	 *
356 	 * LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS
357 	 *			or
358 	 * DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel
359 	 *
360 	 * DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI
361 	 */
362 
363 	for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
364 		ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
365 		if (ret) {
366 			DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
367 				i, ret);
368 			goto fail;
369 		}
370 	}
371 
372 	return 0;
373 
374 fail:
375 	return ret;
376 }
377 
378 static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
379 				 u32 *major, u32 *minor)
380 {
381 	struct drm_device *dev = mdp4_kms->dev;
382 	u32 version;
383 
384 	mdp4_enable(mdp4_kms);
385 	version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
386 	mdp4_disable(mdp4_kms);
387 
388 	*major = FIELD(version, MDP4_VERSION_MAJOR);
389 	*minor = FIELD(version, MDP4_VERSION_MINOR);
390 
391 	DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor);
392 }
393 
394 static int mdp4_kms_init(struct drm_device *dev)
395 {
396 	struct platform_device *pdev = to_platform_device(dev->dev);
397 	struct msm_drm_private *priv = dev->dev_private;
398 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(priv->kms));
399 	struct msm_kms *kms = NULL;
400 	struct msm_mmu *mmu;
401 	struct msm_gem_address_space *aspace;
402 	int ret;
403 	u32 major, minor;
404 	unsigned long max_clk;
405 
406 	/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
407 	max_clk = 266667000;
408 
409 	ret = mdp_kms_init(&mdp4_kms->base, &kms_funcs);
410 	if (ret) {
411 		DRM_DEV_ERROR(dev->dev, "failed to init kms\n");
412 		goto fail;
413 	}
414 
415 	kms = priv->kms;
416 
417 	mdp4_kms->dev = dev;
418 
419 	if (mdp4_kms->vdd) {
420 		ret = regulator_enable(mdp4_kms->vdd);
421 		if (ret) {
422 			DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
423 			goto fail;
424 		}
425 	}
426 
427 	clk_set_rate(mdp4_kms->clk, max_clk);
428 
429 	read_mdp_hw_revision(mdp4_kms, &major, &minor);
430 
431 	if (major != 4) {
432 		DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
433 			      major, minor);
434 		ret = -ENXIO;
435 		goto fail;
436 	}
437 
438 	mdp4_kms->rev = minor;
439 
440 	if (mdp4_kms->rev >= 2) {
441 		if (!mdp4_kms->lut_clk) {
442 			DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
443 			ret = -ENODEV;
444 			goto fail;
445 		}
446 		clk_set_rate(mdp4_kms->lut_clk, max_clk);
447 	}
448 
449 	pm_runtime_enable(dev->dev);
450 	mdp4_kms->rpm_enabled = true;
451 
452 	/* make sure things are off before attaching iommu (bootloader could
453 	 * have left things on, in which case we'll start getting faults if
454 	 * we don't disable):
455 	 */
456 	mdp4_enable(mdp4_kms);
457 	mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
458 	mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
459 	mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
460 	mdp4_disable(mdp4_kms);
461 	mdelay(16);
462 
463 	mmu = msm_iommu_new(&pdev->dev, 0);
464 	if (IS_ERR(mmu)) {
465 		ret = PTR_ERR(mmu);
466 		goto fail;
467 	} else if (!mmu) {
468 		DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
469 				"contig buffers for scanout\n");
470 		aspace = NULL;
471 	} else {
472 		aspace  = msm_gem_address_space_create(mmu,
473 			"mdp4", 0x1000, 0x100000000 - 0x1000);
474 
475 		if (IS_ERR(aspace)) {
476 			if (!IS_ERR(mmu))
477 				mmu->funcs->destroy(mmu);
478 			ret = PTR_ERR(aspace);
479 			goto fail;
480 		}
481 
482 		kms->aspace = aspace;
483 	}
484 
485 	ret = modeset_init(mdp4_kms);
486 	if (ret) {
487 		DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
488 		goto fail;
489 	}
490 
491 	mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT);
492 	if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
493 		ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
494 		DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
495 		mdp4_kms->blank_cursor_bo = NULL;
496 		goto fail;
497 	}
498 
499 	ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
500 			&mdp4_kms->blank_cursor_iova);
501 	if (ret) {
502 		DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
503 		goto fail;
504 	}
505 
506 	dev->mode_config.min_width = 0;
507 	dev->mode_config.min_height = 0;
508 	dev->mode_config.max_width = 2048;
509 	dev->mode_config.max_height = 2048;
510 
511 	return 0;
512 
513 fail:
514 	if (kms)
515 		mdp4_destroy(kms);
516 
517 	return ret;
518 }
519 
520 static const struct dev_pm_ops mdp4_pm_ops = {
521 	.prepare = msm_kms_pm_prepare,
522 	.complete = msm_kms_pm_complete,
523 };
524 
525 static int mdp4_probe(struct platform_device *pdev)
526 {
527 	struct device *dev = &pdev->dev;
528 	struct mdp4_kms *mdp4_kms;
529 	int irq;
530 
531 	mdp4_kms = devm_kzalloc(dev, sizeof(*mdp4_kms), GFP_KERNEL);
532 	if (!mdp4_kms)
533 		return dev_err_probe(dev, -ENOMEM, "failed to allocate kms\n");
534 
535 	mdp4_kms->mmio = msm_ioremap(pdev, NULL);
536 	if (IS_ERR(mdp4_kms->mmio))
537 		return PTR_ERR(mdp4_kms->mmio);
538 
539 	irq = platform_get_irq(pdev, 0);
540 	if (irq < 0)
541 		return dev_err_probe(dev, irq, "failed to get irq\n");
542 
543 	mdp4_kms->base.base.irq = irq;
544 
545 	/* NOTE: driver for this regulator still missing upstream.. use
546 	 * _get_exclusive() and ignore the error if it does not exist
547 	 * (and hope that the bootloader left it on for us)
548 	 */
549 	mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
550 	if (IS_ERR(mdp4_kms->vdd))
551 		mdp4_kms->vdd = NULL;
552 
553 	mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
554 	if (IS_ERR(mdp4_kms->clk))
555 		return dev_err_probe(dev, PTR_ERR(mdp4_kms->clk), "failed to get core_clk\n");
556 
557 	mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
558 	if (IS_ERR(mdp4_kms->pclk))
559 		mdp4_kms->pclk = NULL;
560 
561 	mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
562 	if (IS_ERR(mdp4_kms->axi_clk))
563 		return dev_err_probe(dev, PTR_ERR(mdp4_kms->axi_clk), "failed to get axi_clk\n");
564 
565 	/*
566 	 * This is required for revn >= 2. Handle errors here and let the kms
567 	 * init bail out if the clock is not provided.
568 	 */
569 	mdp4_kms->lut_clk = devm_clk_get_optional(&pdev->dev, "lut_clk");
570 	if (IS_ERR(mdp4_kms->lut_clk))
571 		return dev_err_probe(dev, PTR_ERR(mdp4_kms->lut_clk), "failed to get lut_clk\n");
572 
573 	return msm_drv_probe(&pdev->dev, mdp4_kms_init, &mdp4_kms->base.base);
574 }
575 
576 static void mdp4_remove(struct platform_device *pdev)
577 {
578 	component_master_del(&pdev->dev, &msm_drm_ops);
579 }
580 
581 static const struct of_device_id mdp4_dt_match[] = {
582 	{ .compatible = "qcom,mdp4" },
583 	{ /* sentinel */ }
584 };
585 MODULE_DEVICE_TABLE(of, mdp4_dt_match);
586 
587 static struct platform_driver mdp4_platform_driver = {
588 	.probe      = mdp4_probe,
589 	.remove     = mdp4_remove,
590 	.shutdown   = msm_kms_shutdown,
591 	.driver     = {
592 		.name   = "mdp4",
593 		.of_match_table = mdp4_dt_match,
594 		.pm     = &mdp4_pm_ops,
595 	},
596 };
597 
598 void __init msm_mdp4_register(void)
599 {
600 	platform_driver_register(&mdp4_platform_driver);
601 }
602 
603 void __exit msm_mdp4_unregister(void)
604 {
605 	platform_driver_unregister(&mdp4_platform_driver);
606 }
607