xref: /linux/drivers/gpu/drm/msm/msm_drv.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*
2  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include <linux/kthread.h>
20 #include <uapi/linux/sched/types.h>
21 #include <drm/drm_of.h>
22 
23 #include "msm_drv.h"
24 #include "msm_debugfs.h"
25 #include "msm_fence.h"
26 #include "msm_gpu.h"
27 #include "msm_kms.h"
28 
29 
30 /*
31  * MSM driver version:
32  * - 1.0.0 - initial interface
33  * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
34  * - 1.2.0 - adds explicit fence support for submit ioctl
35  * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
36  *           SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
37  *           MSM_GEM_INFO ioctl.
38  */
39 #define MSM_VERSION_MAJOR	1
40 #define MSM_VERSION_MINOR	3
41 #define MSM_VERSION_PATCHLEVEL	0
42 
43 static const struct drm_mode_config_funcs mode_config_funcs = {
44 	.fb_create = msm_framebuffer_create,
45 	.output_poll_changed = drm_fb_helper_output_poll_changed,
46 	.atomic_check = drm_atomic_helper_check,
47 	.atomic_commit = drm_atomic_helper_commit,
48 };
49 
50 static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
51 	.atomic_commit_tail = msm_atomic_commit_tail,
52 };
53 
54 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
55 static bool reglog = false;
56 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
57 module_param(reglog, bool, 0600);
58 #else
59 #define reglog 0
60 #endif
61 
62 #ifdef CONFIG_DRM_FBDEV_EMULATION
63 static bool fbdev = true;
64 MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
65 module_param(fbdev, bool, 0600);
66 #endif
67 
68 static char *vram = "16m";
69 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
70 module_param(vram, charp, 0);
71 
72 bool dumpstate = false;
73 MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
74 module_param(dumpstate, bool, 0600);
75 
76 static bool modeset = true;
77 MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
78 module_param(modeset, bool, 0600);
79 
80 /*
81  * Util/helpers:
82  */
83 
84 int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk)
85 {
86 	struct property *prop;
87 	const char *name;
88 	struct clk_bulk_data *local;
89 	int i = 0, ret, count;
90 
91 	count = of_property_count_strings(dev->of_node, "clock-names");
92 	if (count < 1)
93 		return 0;
94 
95 	local = devm_kcalloc(dev, sizeof(struct clk_bulk_data *),
96 		count, GFP_KERNEL);
97 	if (!local)
98 		return -ENOMEM;
99 
100 	of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
101 		local[i].id = devm_kstrdup(dev, name, GFP_KERNEL);
102 		if (!local[i].id) {
103 			devm_kfree(dev, local);
104 			return -ENOMEM;
105 		}
106 
107 		i++;
108 	}
109 
110 	ret = devm_clk_bulk_get(dev, count, local);
111 
112 	if (ret) {
113 		for (i = 0; i < count; i++)
114 			devm_kfree(dev, (void *) local[i].id);
115 		devm_kfree(dev, local);
116 
117 		return ret;
118 	}
119 
120 	*bulk = local;
121 	return count;
122 }
123 
124 struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
125 		const char *name)
126 {
127 	int i;
128 	char n[32];
129 
130 	snprintf(n, sizeof(n), "%s_clk", name);
131 
132 	for (i = 0; bulk && i < count; i++) {
133 		if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
134 			return bulk[i].clk;
135 	}
136 
137 
138 	return NULL;
139 }
140 
141 struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
142 {
143 	struct clk *clk;
144 	char name2[32];
145 
146 	clk = devm_clk_get(&pdev->dev, name);
147 	if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
148 		return clk;
149 
150 	snprintf(name2, sizeof(name2), "%s_clk", name);
151 
152 	clk = devm_clk_get(&pdev->dev, name2);
153 	if (!IS_ERR(clk))
154 		dev_warn(&pdev->dev, "Using legacy clk name binding.  Use "
155 				"\"%s\" instead of \"%s\"\n", name, name2);
156 
157 	return clk;
158 }
159 
160 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
161 		const char *dbgname)
162 {
163 	struct resource *res;
164 	unsigned long size;
165 	void __iomem *ptr;
166 
167 	if (name)
168 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
169 	else
170 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
171 
172 	if (!res) {
173 		dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
174 		return ERR_PTR(-EINVAL);
175 	}
176 
177 	size = resource_size(res);
178 
179 	ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
180 	if (!ptr) {
181 		dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
182 		return ERR_PTR(-ENOMEM);
183 	}
184 
185 	if (reglog)
186 		printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
187 
188 	return ptr;
189 }
190 
191 void msm_writel(u32 data, void __iomem *addr)
192 {
193 	if (reglog)
194 		printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
195 	writel(data, addr);
196 }
197 
198 u32 msm_readl(const void __iomem *addr)
199 {
200 	u32 val = readl(addr);
201 	if (reglog)
202 		pr_err("IO:R %p %08x\n", addr, val);
203 	return val;
204 }
205 
206 struct vblank_event {
207 	struct list_head node;
208 	int crtc_id;
209 	bool enable;
210 };
211 
212 static void vblank_ctrl_worker(struct kthread_work *work)
213 {
214 	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
215 						struct msm_vblank_ctrl, work);
216 	struct msm_drm_private *priv = container_of(vbl_ctrl,
217 					struct msm_drm_private, vblank_ctrl);
218 	struct msm_kms *kms = priv->kms;
219 	struct vblank_event *vbl_ev, *tmp;
220 	unsigned long flags;
221 
222 	spin_lock_irqsave(&vbl_ctrl->lock, flags);
223 	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
224 		list_del(&vbl_ev->node);
225 		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
226 
227 		if (vbl_ev->enable)
228 			kms->funcs->enable_vblank(kms,
229 						priv->crtcs[vbl_ev->crtc_id]);
230 		else
231 			kms->funcs->disable_vblank(kms,
232 						priv->crtcs[vbl_ev->crtc_id]);
233 
234 		kfree(vbl_ev);
235 
236 		spin_lock_irqsave(&vbl_ctrl->lock, flags);
237 	}
238 
239 	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
240 }
241 
242 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
243 					int crtc_id, bool enable)
244 {
245 	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
246 	struct vblank_event *vbl_ev;
247 	unsigned long flags;
248 
249 	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
250 	if (!vbl_ev)
251 		return -ENOMEM;
252 
253 	vbl_ev->crtc_id = crtc_id;
254 	vbl_ev->enable = enable;
255 
256 	spin_lock_irqsave(&vbl_ctrl->lock, flags);
257 	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
258 	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
259 
260 	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
261 			&vbl_ctrl->work);
262 
263 	return 0;
264 }
265 
266 static int msm_drm_uninit(struct device *dev)
267 {
268 	struct platform_device *pdev = to_platform_device(dev);
269 	struct drm_device *ddev = platform_get_drvdata(pdev);
270 	struct msm_drm_private *priv = ddev->dev_private;
271 	struct msm_kms *kms = priv->kms;
272 	struct msm_mdss *mdss = priv->mdss;
273 	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
274 	struct vblank_event *vbl_ev, *tmp;
275 	int i;
276 
277 	/* We must cancel and cleanup any pending vblank enable/disable
278 	 * work before drm_irq_uninstall() to avoid work re-enabling an
279 	 * irq after uninstall has disabled it.
280 	 */
281 	kthread_flush_work(&vbl_ctrl->work);
282 	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
283 		list_del(&vbl_ev->node);
284 		kfree(vbl_ev);
285 	}
286 
287 	/* clean up display commit/event worker threads */
288 	for (i = 0; i < priv->num_crtcs; i++) {
289 		if (priv->disp_thread[i].thread) {
290 			kthread_flush_worker(&priv->disp_thread[i].worker);
291 			kthread_stop(priv->disp_thread[i].thread);
292 			priv->disp_thread[i].thread = NULL;
293 		}
294 
295 		if (priv->event_thread[i].thread) {
296 			kthread_flush_worker(&priv->event_thread[i].worker);
297 			kthread_stop(priv->event_thread[i].thread);
298 			priv->event_thread[i].thread = NULL;
299 		}
300 	}
301 
302 	msm_gem_shrinker_cleanup(ddev);
303 
304 	drm_kms_helper_poll_fini(ddev);
305 
306 	drm_dev_unregister(ddev);
307 
308 	msm_perf_debugfs_cleanup(priv);
309 	msm_rd_debugfs_cleanup(priv);
310 
311 #ifdef CONFIG_DRM_FBDEV_EMULATION
312 	if (fbdev && priv->fbdev)
313 		msm_fbdev_free(ddev);
314 #endif
315 	drm_mode_config_cleanup(ddev);
316 
317 	pm_runtime_get_sync(dev);
318 	drm_irq_uninstall(ddev);
319 	pm_runtime_put_sync(dev);
320 
321 	flush_workqueue(priv->wq);
322 	destroy_workqueue(priv->wq);
323 
324 	if (kms && kms->funcs)
325 		kms->funcs->destroy(kms);
326 
327 	if (priv->vram.paddr) {
328 		unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
329 		drm_mm_takedown(&priv->vram.mm);
330 		dma_free_attrs(dev, priv->vram.size, NULL,
331 			       priv->vram.paddr, attrs);
332 	}
333 
334 	component_unbind_all(dev, ddev);
335 
336 	if (mdss && mdss->funcs)
337 		mdss->funcs->destroy(ddev);
338 
339 	ddev->dev_private = NULL;
340 	drm_dev_unref(ddev);
341 
342 	kfree(priv);
343 
344 	return 0;
345 }
346 
347 #define KMS_MDP4 4
348 #define KMS_MDP5 5
349 #define KMS_DPU  3
350 
351 static int get_mdp_ver(struct platform_device *pdev)
352 {
353 	struct device *dev = &pdev->dev;
354 
355 	return (int) (unsigned long) of_device_get_match_data(dev);
356 }
357 
358 #include <linux/of_address.h>
359 
360 static int msm_init_vram(struct drm_device *dev)
361 {
362 	struct msm_drm_private *priv = dev->dev_private;
363 	struct device_node *node;
364 	unsigned long size = 0;
365 	int ret = 0;
366 
367 	/* In the device-tree world, we could have a 'memory-region'
368 	 * phandle, which gives us a link to our "vram".  Allocating
369 	 * is all nicely abstracted behind the dma api, but we need
370 	 * to know the entire size to allocate it all in one go. There
371 	 * are two cases:
372 	 *  1) device with no IOMMU, in which case we need exclusive
373 	 *     access to a VRAM carveout big enough for all gpu
374 	 *     buffers
375 	 *  2) device with IOMMU, but where the bootloader puts up
376 	 *     a splash screen.  In this case, the VRAM carveout
377 	 *     need only be large enough for fbdev fb.  But we need
378 	 *     exclusive access to the buffer to avoid the kernel
379 	 *     using those pages for other purposes (which appears
380 	 *     as corruption on screen before we have a chance to
381 	 *     load and do initial modeset)
382 	 */
383 
384 	node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
385 	if (node) {
386 		struct resource r;
387 		ret = of_address_to_resource(node, 0, &r);
388 		of_node_put(node);
389 		if (ret)
390 			return ret;
391 		size = r.end - r.start;
392 		DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
393 
394 		/* if we have no IOMMU, then we need to use carveout allocator.
395 		 * Grab the entire CMA chunk carved out in early startup in
396 		 * mach-msm:
397 		 */
398 	} else if (!iommu_present(&platform_bus_type)) {
399 		DRM_INFO("using %s VRAM carveout\n", vram);
400 		size = memparse(vram, NULL);
401 	}
402 
403 	if (size) {
404 		unsigned long attrs = 0;
405 		void *p;
406 
407 		priv->vram.size = size;
408 
409 		drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
410 		spin_lock_init(&priv->vram.lock);
411 
412 		attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
413 		attrs |= DMA_ATTR_WRITE_COMBINE;
414 
415 		/* note that for no-kernel-mapping, the vaddr returned
416 		 * is bogus, but non-null if allocation succeeded:
417 		 */
418 		p = dma_alloc_attrs(dev->dev, size,
419 				&priv->vram.paddr, GFP_KERNEL, attrs);
420 		if (!p) {
421 			dev_err(dev->dev, "failed to allocate VRAM\n");
422 			priv->vram.paddr = 0;
423 			return -ENOMEM;
424 		}
425 
426 		dev_info(dev->dev, "VRAM: %08x->%08x\n",
427 				(uint32_t)priv->vram.paddr,
428 				(uint32_t)(priv->vram.paddr + size));
429 	}
430 
431 	return ret;
432 }
433 
434 static int msm_drm_init(struct device *dev, struct drm_driver *drv)
435 {
436 	struct platform_device *pdev = to_platform_device(dev);
437 	struct drm_device *ddev;
438 	struct msm_drm_private *priv;
439 	struct msm_kms *kms;
440 	struct msm_mdss *mdss;
441 	int ret, i;
442 	struct sched_param param;
443 
444 	ddev = drm_dev_alloc(drv, dev);
445 	if (IS_ERR(ddev)) {
446 		dev_err(dev, "failed to allocate drm_device\n");
447 		return PTR_ERR(ddev);
448 	}
449 
450 	platform_set_drvdata(pdev, ddev);
451 
452 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
453 	if (!priv) {
454 		ret = -ENOMEM;
455 		goto err_unref_drm_dev;
456 	}
457 
458 	ddev->dev_private = priv;
459 	priv->dev = ddev;
460 
461 	switch (get_mdp_ver(pdev)) {
462 	case KMS_MDP5:
463 		ret = mdp5_mdss_init(ddev);
464 		break;
465 	case KMS_DPU:
466 		ret = dpu_mdss_init(ddev);
467 		break;
468 	default:
469 		ret = 0;
470 		break;
471 	}
472 	if (ret)
473 		goto err_free_priv;
474 
475 	mdss = priv->mdss;
476 
477 	priv->wq = alloc_ordered_workqueue("msm", 0);
478 
479 	INIT_LIST_HEAD(&priv->inactive_list);
480 	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
481 	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
482 	spin_lock_init(&priv->vblank_ctrl.lock);
483 
484 	drm_mode_config_init(ddev);
485 
486 	/* Bind all our sub-components: */
487 	ret = component_bind_all(dev, ddev);
488 	if (ret)
489 		goto err_destroy_mdss;
490 
491 	ret = msm_init_vram(ddev);
492 	if (ret)
493 		goto err_msm_uninit;
494 
495 	msm_gem_shrinker_init(ddev);
496 
497 	switch (get_mdp_ver(pdev)) {
498 	case KMS_MDP4:
499 		kms = mdp4_kms_init(ddev);
500 		priv->kms = kms;
501 		break;
502 	case KMS_MDP5:
503 		kms = mdp5_kms_init(ddev);
504 		break;
505 	case KMS_DPU:
506 		kms = dpu_kms_init(ddev);
507 		priv->kms = kms;
508 		break;
509 	default:
510 		kms = ERR_PTR(-ENODEV);
511 		break;
512 	}
513 
514 	if (IS_ERR(kms)) {
515 		/*
516 		 * NOTE: once we have GPU support, having no kms should not
517 		 * be considered fatal.. ideally we would still support gpu
518 		 * and (for example) use dmabuf/prime to share buffers with
519 		 * imx drm driver on iMX5
520 		 */
521 		dev_err(dev, "failed to load kms\n");
522 		ret = PTR_ERR(kms);
523 		goto err_msm_uninit;
524 	}
525 
526 	/* Enable normalization of plane zpos */
527 	ddev->mode_config.normalize_zpos = true;
528 
529 	if (kms) {
530 		ret = kms->funcs->hw_init(kms);
531 		if (ret) {
532 			dev_err(dev, "kms hw init failed: %d\n", ret);
533 			goto err_msm_uninit;
534 		}
535 	}
536 
537 	ddev->mode_config.funcs = &mode_config_funcs;
538 	ddev->mode_config.helper_private = &mode_config_helper_funcs;
539 
540 	/**
541 	 * this priority was found during empiric testing to have appropriate
542 	 * realtime scheduling to process display updates and interact with
543 	 * other real time and normal priority task
544 	 */
545 	param.sched_priority = 16;
546 	for (i = 0; i < priv->num_crtcs; i++) {
547 
548 		/* initialize display thread */
549 		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
550 		kthread_init_worker(&priv->disp_thread[i].worker);
551 		priv->disp_thread[i].dev = ddev;
552 		priv->disp_thread[i].thread =
553 			kthread_run(kthread_worker_fn,
554 				&priv->disp_thread[i].worker,
555 				"crtc_commit:%d", priv->disp_thread[i].crtc_id);
556 		ret = sched_setscheduler(priv->disp_thread[i].thread,
557 							SCHED_FIFO, &param);
558 		if (ret)
559 			pr_warn("display thread priority update failed: %d\n",
560 									ret);
561 
562 		if (IS_ERR(priv->disp_thread[i].thread)) {
563 			dev_err(dev, "failed to create crtc_commit kthread\n");
564 			priv->disp_thread[i].thread = NULL;
565 		}
566 
567 		/* initialize event thread */
568 		priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
569 		kthread_init_worker(&priv->event_thread[i].worker);
570 		priv->event_thread[i].dev = ddev;
571 		priv->event_thread[i].thread =
572 			kthread_run(kthread_worker_fn,
573 				&priv->event_thread[i].worker,
574 				"crtc_event:%d", priv->event_thread[i].crtc_id);
575 		/**
576 		 * event thread should also run at same priority as disp_thread
577 		 * because it is handling frame_done events. A lower priority
578 		 * event thread and higher priority disp_thread can causes
579 		 * frame_pending counters beyond 2. This can lead to commit
580 		 * failure at crtc commit level.
581 		 */
582 		ret = sched_setscheduler(priv->event_thread[i].thread,
583 							SCHED_FIFO, &param);
584 		if (ret)
585 			pr_warn("display event thread priority update failed: %d\n",
586 									ret);
587 
588 		if (IS_ERR(priv->event_thread[i].thread)) {
589 			dev_err(dev, "failed to create crtc_event kthread\n");
590 			priv->event_thread[i].thread = NULL;
591 		}
592 
593 		if ((!priv->disp_thread[i].thread) ||
594 				!priv->event_thread[i].thread) {
595 			/* clean up previously created threads if any */
596 			for ( ; i >= 0; i--) {
597 				if (priv->disp_thread[i].thread) {
598 					kthread_stop(
599 						priv->disp_thread[i].thread);
600 					priv->disp_thread[i].thread = NULL;
601 				}
602 
603 				if (priv->event_thread[i].thread) {
604 					kthread_stop(
605 						priv->event_thread[i].thread);
606 					priv->event_thread[i].thread = NULL;
607 				}
608 			}
609 			goto err_msm_uninit;
610 		}
611 	}
612 
613 	ret = drm_vblank_init(ddev, priv->num_crtcs);
614 	if (ret < 0) {
615 		dev_err(dev, "failed to initialize vblank\n");
616 		goto err_msm_uninit;
617 	}
618 
619 	if (kms) {
620 		pm_runtime_get_sync(dev);
621 		ret = drm_irq_install(ddev, kms->irq);
622 		pm_runtime_put_sync(dev);
623 		if (ret < 0) {
624 			dev_err(dev, "failed to install IRQ handler\n");
625 			goto err_msm_uninit;
626 		}
627 	}
628 
629 	ret = drm_dev_register(ddev, 0);
630 	if (ret)
631 		goto err_msm_uninit;
632 
633 	drm_mode_config_reset(ddev);
634 
635 #ifdef CONFIG_DRM_FBDEV_EMULATION
636 	if (fbdev)
637 		priv->fbdev = msm_fbdev_init(ddev);
638 #endif
639 
640 	ret = msm_debugfs_late_init(ddev);
641 	if (ret)
642 		goto err_msm_uninit;
643 
644 	drm_kms_helper_poll_init(ddev);
645 
646 	return 0;
647 
648 err_msm_uninit:
649 	msm_drm_uninit(dev);
650 	return ret;
651 err_destroy_mdss:
652 	if (mdss && mdss->funcs)
653 		mdss->funcs->destroy(ddev);
654 err_free_priv:
655 	kfree(priv);
656 err_unref_drm_dev:
657 	drm_dev_unref(ddev);
658 	return ret;
659 }
660 
661 /*
662  * DRM operations:
663  */
664 
665 static void load_gpu(struct drm_device *dev)
666 {
667 	static DEFINE_MUTEX(init_lock);
668 	struct msm_drm_private *priv = dev->dev_private;
669 
670 	mutex_lock(&init_lock);
671 
672 	if (!priv->gpu)
673 		priv->gpu = adreno_load_gpu(dev);
674 
675 	mutex_unlock(&init_lock);
676 }
677 
678 static int context_init(struct drm_device *dev, struct drm_file *file)
679 {
680 	struct msm_file_private *ctx;
681 
682 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
683 	if (!ctx)
684 		return -ENOMEM;
685 
686 	msm_submitqueue_init(dev, ctx);
687 
688 	file->driver_priv = ctx;
689 
690 	return 0;
691 }
692 
693 static int msm_open(struct drm_device *dev, struct drm_file *file)
694 {
695 	/* For now, load gpu on open.. to avoid the requirement of having
696 	 * firmware in the initrd.
697 	 */
698 	load_gpu(dev);
699 
700 	return context_init(dev, file);
701 }
702 
703 static void context_close(struct msm_file_private *ctx)
704 {
705 	msm_submitqueue_close(ctx);
706 	kfree(ctx);
707 }
708 
709 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
710 {
711 	struct msm_drm_private *priv = dev->dev_private;
712 	struct msm_file_private *ctx = file->driver_priv;
713 
714 	mutex_lock(&dev->struct_mutex);
715 	if (ctx == priv->lastctx)
716 		priv->lastctx = NULL;
717 	mutex_unlock(&dev->struct_mutex);
718 
719 	context_close(ctx);
720 }
721 
722 static irqreturn_t msm_irq(int irq, void *arg)
723 {
724 	struct drm_device *dev = arg;
725 	struct msm_drm_private *priv = dev->dev_private;
726 	struct msm_kms *kms = priv->kms;
727 	BUG_ON(!kms);
728 	return kms->funcs->irq(kms);
729 }
730 
731 static void msm_irq_preinstall(struct drm_device *dev)
732 {
733 	struct msm_drm_private *priv = dev->dev_private;
734 	struct msm_kms *kms = priv->kms;
735 	BUG_ON(!kms);
736 	kms->funcs->irq_preinstall(kms);
737 }
738 
739 static int msm_irq_postinstall(struct drm_device *dev)
740 {
741 	struct msm_drm_private *priv = dev->dev_private;
742 	struct msm_kms *kms = priv->kms;
743 	BUG_ON(!kms);
744 	return kms->funcs->irq_postinstall(kms);
745 }
746 
747 static void msm_irq_uninstall(struct drm_device *dev)
748 {
749 	struct msm_drm_private *priv = dev->dev_private;
750 	struct msm_kms *kms = priv->kms;
751 	BUG_ON(!kms);
752 	kms->funcs->irq_uninstall(kms);
753 }
754 
755 static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
756 {
757 	struct msm_drm_private *priv = dev->dev_private;
758 	struct msm_kms *kms = priv->kms;
759 	if (!kms)
760 		return -ENXIO;
761 	DBG("dev=%p, crtc=%u", dev, pipe);
762 	return vblank_ctrl_queue_work(priv, pipe, true);
763 }
764 
765 static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
766 {
767 	struct msm_drm_private *priv = dev->dev_private;
768 	struct msm_kms *kms = priv->kms;
769 	if (!kms)
770 		return;
771 	DBG("dev=%p, crtc=%u", dev, pipe);
772 	vblank_ctrl_queue_work(priv, pipe, false);
773 }
774 
775 /*
776  * DRM ioctls:
777  */
778 
779 static int msm_ioctl_get_param(struct drm_device *dev, void *data,
780 		struct drm_file *file)
781 {
782 	struct msm_drm_private *priv = dev->dev_private;
783 	struct drm_msm_param *args = data;
784 	struct msm_gpu *gpu;
785 
786 	/* for now, we just have 3d pipe.. eventually this would need to
787 	 * be more clever to dispatch to appropriate gpu module:
788 	 */
789 	if (args->pipe != MSM_PIPE_3D0)
790 		return -EINVAL;
791 
792 	gpu = priv->gpu;
793 
794 	if (!gpu)
795 		return -ENXIO;
796 
797 	return gpu->funcs->get_param(gpu, args->param, &args->value);
798 }
799 
800 static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
801 		struct drm_file *file)
802 {
803 	struct drm_msm_gem_new *args = data;
804 
805 	if (args->flags & ~MSM_BO_FLAGS) {
806 		DRM_ERROR("invalid flags: %08x\n", args->flags);
807 		return -EINVAL;
808 	}
809 
810 	return msm_gem_new_handle(dev, file, args->size,
811 			args->flags, &args->handle);
812 }
813 
814 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
815 {
816 	return ktime_set(timeout.tv_sec, timeout.tv_nsec);
817 }
818 
819 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
820 		struct drm_file *file)
821 {
822 	struct drm_msm_gem_cpu_prep *args = data;
823 	struct drm_gem_object *obj;
824 	ktime_t timeout = to_ktime(args->timeout);
825 	int ret;
826 
827 	if (args->op & ~MSM_PREP_FLAGS) {
828 		DRM_ERROR("invalid op: %08x\n", args->op);
829 		return -EINVAL;
830 	}
831 
832 	obj = drm_gem_object_lookup(file, args->handle);
833 	if (!obj)
834 		return -ENOENT;
835 
836 	ret = msm_gem_cpu_prep(obj, args->op, &timeout);
837 
838 	drm_gem_object_put_unlocked(obj);
839 
840 	return ret;
841 }
842 
843 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
844 		struct drm_file *file)
845 {
846 	struct drm_msm_gem_cpu_fini *args = data;
847 	struct drm_gem_object *obj;
848 	int ret;
849 
850 	obj = drm_gem_object_lookup(file, args->handle);
851 	if (!obj)
852 		return -ENOENT;
853 
854 	ret = msm_gem_cpu_fini(obj);
855 
856 	drm_gem_object_put_unlocked(obj);
857 
858 	return ret;
859 }
860 
861 static int msm_ioctl_gem_info_iova(struct drm_device *dev,
862 		struct drm_gem_object *obj, uint64_t *iova)
863 {
864 	struct msm_drm_private *priv = dev->dev_private;
865 
866 	if (!priv->gpu)
867 		return -EINVAL;
868 
869 	return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
870 }
871 
872 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
873 		struct drm_file *file)
874 {
875 	struct drm_msm_gem_info *args = data;
876 	struct drm_gem_object *obj;
877 	int ret = 0;
878 
879 	if (args->flags & ~MSM_INFO_FLAGS)
880 		return -EINVAL;
881 
882 	obj = drm_gem_object_lookup(file, args->handle);
883 	if (!obj)
884 		return -ENOENT;
885 
886 	if (args->flags & MSM_INFO_IOVA) {
887 		uint64_t iova;
888 
889 		ret = msm_ioctl_gem_info_iova(dev, obj, &iova);
890 		if (!ret)
891 			args->offset = iova;
892 	} else {
893 		args->offset = msm_gem_mmap_offset(obj);
894 	}
895 
896 	drm_gem_object_put_unlocked(obj);
897 
898 	return ret;
899 }
900 
901 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
902 		struct drm_file *file)
903 {
904 	struct msm_drm_private *priv = dev->dev_private;
905 	struct drm_msm_wait_fence *args = data;
906 	ktime_t timeout = to_ktime(args->timeout);
907 	struct msm_gpu_submitqueue *queue;
908 	struct msm_gpu *gpu = priv->gpu;
909 	int ret;
910 
911 	if (args->pad) {
912 		DRM_ERROR("invalid pad: %08x\n", args->pad);
913 		return -EINVAL;
914 	}
915 
916 	if (!gpu)
917 		return 0;
918 
919 	queue = msm_submitqueue_get(file->driver_priv, args->queueid);
920 	if (!queue)
921 		return -ENOENT;
922 
923 	ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout,
924 		true);
925 
926 	msm_submitqueue_put(queue);
927 	return ret;
928 }
929 
930 static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
931 		struct drm_file *file)
932 {
933 	struct drm_msm_gem_madvise *args = data;
934 	struct drm_gem_object *obj;
935 	int ret;
936 
937 	switch (args->madv) {
938 	case MSM_MADV_DONTNEED:
939 	case MSM_MADV_WILLNEED:
940 		break;
941 	default:
942 		return -EINVAL;
943 	}
944 
945 	ret = mutex_lock_interruptible(&dev->struct_mutex);
946 	if (ret)
947 		return ret;
948 
949 	obj = drm_gem_object_lookup(file, args->handle);
950 	if (!obj) {
951 		ret = -ENOENT;
952 		goto unlock;
953 	}
954 
955 	ret = msm_gem_madvise(obj, args->madv);
956 	if (ret >= 0) {
957 		args->retained = ret;
958 		ret = 0;
959 	}
960 
961 	drm_gem_object_put(obj);
962 
963 unlock:
964 	mutex_unlock(&dev->struct_mutex);
965 	return ret;
966 }
967 
968 
969 static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
970 		struct drm_file *file)
971 {
972 	struct drm_msm_submitqueue *args = data;
973 
974 	if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
975 		return -EINVAL;
976 
977 	return msm_submitqueue_create(dev, file->driver_priv, args->prio,
978 		args->flags, &args->id);
979 }
980 
981 
982 static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
983 		struct drm_file *file)
984 {
985 	u32 id = *(u32 *) data;
986 
987 	return msm_submitqueue_remove(file->driver_priv, id);
988 }
989 
990 static const struct drm_ioctl_desc msm_ioctls[] = {
991 	DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_AUTH|DRM_RENDER_ALLOW),
992 	DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_AUTH|DRM_RENDER_ALLOW),
993 	DRM_IOCTL_DEF_DRV(MSM_GEM_INFO,     msm_ioctl_gem_info,     DRM_AUTH|DRM_RENDER_ALLOW),
994 	DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
995 	DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
996 	DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_AUTH|DRM_RENDER_ALLOW),
997 	DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_AUTH|DRM_RENDER_ALLOW),
998 	DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE,  msm_ioctl_gem_madvise,  DRM_AUTH|DRM_RENDER_ALLOW),
999 	DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW,   msm_ioctl_submitqueue_new,   DRM_AUTH|DRM_RENDER_ALLOW),
1000 	DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
1001 };
1002 
1003 static const struct vm_operations_struct vm_ops = {
1004 	.fault = msm_gem_fault,
1005 	.open = drm_gem_vm_open,
1006 	.close = drm_gem_vm_close,
1007 };
1008 
1009 static const struct file_operations fops = {
1010 	.owner              = THIS_MODULE,
1011 	.open               = drm_open,
1012 	.release            = drm_release,
1013 	.unlocked_ioctl     = drm_ioctl,
1014 	.compat_ioctl       = drm_compat_ioctl,
1015 	.poll               = drm_poll,
1016 	.read               = drm_read,
1017 	.llseek             = no_llseek,
1018 	.mmap               = msm_gem_mmap,
1019 };
1020 
1021 static struct drm_driver msm_driver = {
1022 	.driver_features    = DRIVER_HAVE_IRQ |
1023 				DRIVER_GEM |
1024 				DRIVER_PRIME |
1025 				DRIVER_RENDER |
1026 				DRIVER_ATOMIC |
1027 				DRIVER_MODESET,
1028 	.open               = msm_open,
1029 	.postclose           = msm_postclose,
1030 	.lastclose          = drm_fb_helper_lastclose,
1031 	.irq_handler        = msm_irq,
1032 	.irq_preinstall     = msm_irq_preinstall,
1033 	.irq_postinstall    = msm_irq_postinstall,
1034 	.irq_uninstall      = msm_irq_uninstall,
1035 	.enable_vblank      = msm_enable_vblank,
1036 	.disable_vblank     = msm_disable_vblank,
1037 	.gem_free_object    = msm_gem_free_object,
1038 	.gem_vm_ops         = &vm_ops,
1039 	.dumb_create        = msm_gem_dumb_create,
1040 	.dumb_map_offset    = msm_gem_dumb_map_offset,
1041 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1042 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1043 	.gem_prime_export   = drm_gem_prime_export,
1044 	.gem_prime_import   = drm_gem_prime_import,
1045 	.gem_prime_res_obj  = msm_gem_prime_res_obj,
1046 	.gem_prime_pin      = msm_gem_prime_pin,
1047 	.gem_prime_unpin    = msm_gem_prime_unpin,
1048 	.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
1049 	.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
1050 	.gem_prime_vmap     = msm_gem_prime_vmap,
1051 	.gem_prime_vunmap   = msm_gem_prime_vunmap,
1052 	.gem_prime_mmap     = msm_gem_prime_mmap,
1053 #ifdef CONFIG_DEBUG_FS
1054 	.debugfs_init       = msm_debugfs_init,
1055 #endif
1056 	.ioctls             = msm_ioctls,
1057 	.num_ioctls         = ARRAY_SIZE(msm_ioctls),
1058 	.fops               = &fops,
1059 	.name               = "msm",
1060 	.desc               = "MSM Snapdragon DRM",
1061 	.date               = "20130625",
1062 	.major              = MSM_VERSION_MAJOR,
1063 	.minor              = MSM_VERSION_MINOR,
1064 	.patchlevel         = MSM_VERSION_PATCHLEVEL,
1065 };
1066 
1067 #ifdef CONFIG_PM_SLEEP
1068 static int msm_pm_suspend(struct device *dev)
1069 {
1070 	struct drm_device *ddev = dev_get_drvdata(dev);
1071 	struct msm_drm_private *priv = ddev->dev_private;
1072 	struct msm_kms *kms = priv->kms;
1073 
1074 	/* TODO: Use atomic helper suspend/resume */
1075 	if (kms && kms->funcs && kms->funcs->pm_suspend)
1076 		return kms->funcs->pm_suspend(dev);
1077 
1078 	drm_kms_helper_poll_disable(ddev);
1079 
1080 	priv->pm_state = drm_atomic_helper_suspend(ddev);
1081 	if (IS_ERR(priv->pm_state)) {
1082 		drm_kms_helper_poll_enable(ddev);
1083 		return PTR_ERR(priv->pm_state);
1084 	}
1085 
1086 	return 0;
1087 }
1088 
1089 static int msm_pm_resume(struct device *dev)
1090 {
1091 	struct drm_device *ddev = dev_get_drvdata(dev);
1092 	struct msm_drm_private *priv = ddev->dev_private;
1093 	struct msm_kms *kms = priv->kms;
1094 
1095 	/* TODO: Use atomic helper suspend/resume */
1096 	if (kms && kms->funcs && kms->funcs->pm_resume)
1097 		return kms->funcs->pm_resume(dev);
1098 
1099 	drm_atomic_helper_resume(ddev, priv->pm_state);
1100 	drm_kms_helper_poll_enable(ddev);
1101 
1102 	return 0;
1103 }
1104 #endif
1105 
1106 #ifdef CONFIG_PM
1107 static int msm_runtime_suspend(struct device *dev)
1108 {
1109 	struct drm_device *ddev = dev_get_drvdata(dev);
1110 	struct msm_drm_private *priv = ddev->dev_private;
1111 	struct msm_mdss *mdss = priv->mdss;
1112 
1113 	DBG("");
1114 
1115 	if (mdss && mdss->funcs)
1116 		return mdss->funcs->disable(mdss);
1117 
1118 	return 0;
1119 }
1120 
1121 static int msm_runtime_resume(struct device *dev)
1122 {
1123 	struct drm_device *ddev = dev_get_drvdata(dev);
1124 	struct msm_drm_private *priv = ddev->dev_private;
1125 	struct msm_mdss *mdss = priv->mdss;
1126 
1127 	DBG("");
1128 
1129 	if (mdss && mdss->funcs)
1130 		return mdss->funcs->enable(mdss);
1131 
1132 	return 0;
1133 }
1134 #endif
1135 
1136 static const struct dev_pm_ops msm_pm_ops = {
1137 	SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
1138 	SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
1139 };
1140 
1141 /*
1142  * Componentized driver support:
1143  */
1144 
1145 /*
1146  * NOTE: duplication of the same code as exynos or imx (or probably any other).
1147  * so probably some room for some helpers
1148  */
1149 static int compare_of(struct device *dev, void *data)
1150 {
1151 	return dev->of_node == data;
1152 }
1153 
1154 /*
1155  * Identify what components need to be added by parsing what remote-endpoints
1156  * our MDP output ports are connected to. In the case of LVDS on MDP4, there
1157  * is no external component that we need to add since LVDS is within MDP4
1158  * itself.
1159  */
1160 static int add_components_mdp(struct device *mdp_dev,
1161 			      struct component_match **matchptr)
1162 {
1163 	struct device_node *np = mdp_dev->of_node;
1164 	struct device_node *ep_node;
1165 	struct device *master_dev;
1166 
1167 	/*
1168 	 * on MDP4 based platforms, the MDP platform device is the component
1169 	 * master that adds other display interface components to itself.
1170 	 *
1171 	 * on MDP5 based platforms, the MDSS platform device is the component
1172 	 * master that adds MDP5 and other display interface components to
1173 	 * itself.
1174 	 */
1175 	if (of_device_is_compatible(np, "qcom,mdp4"))
1176 		master_dev = mdp_dev;
1177 	else
1178 		master_dev = mdp_dev->parent;
1179 
1180 	for_each_endpoint_of_node(np, ep_node) {
1181 		struct device_node *intf;
1182 		struct of_endpoint ep;
1183 		int ret;
1184 
1185 		ret = of_graph_parse_endpoint(ep_node, &ep);
1186 		if (ret) {
1187 			dev_err(mdp_dev, "unable to parse port endpoint\n");
1188 			of_node_put(ep_node);
1189 			return ret;
1190 		}
1191 
1192 		/*
1193 		 * The LCDC/LVDS port on MDP4 is a speacial case where the
1194 		 * remote-endpoint isn't a component that we need to add
1195 		 */
1196 		if (of_device_is_compatible(np, "qcom,mdp4") &&
1197 		    ep.port == 0)
1198 			continue;
1199 
1200 		/*
1201 		 * It's okay if some of the ports don't have a remote endpoint
1202 		 * specified. It just means that the port isn't connected to
1203 		 * any external interface.
1204 		 */
1205 		intf = of_graph_get_remote_port_parent(ep_node);
1206 		if (!intf)
1207 			continue;
1208 
1209 		drm_of_component_match_add(master_dev, matchptr, compare_of,
1210 					   intf);
1211 		of_node_put(intf);
1212 	}
1213 
1214 	return 0;
1215 }
1216 
1217 static int compare_name_mdp(struct device *dev, void *data)
1218 {
1219 	return (strstr(dev_name(dev), "mdp") != NULL);
1220 }
1221 
1222 static int add_display_components(struct device *dev,
1223 				  struct component_match **matchptr)
1224 {
1225 	struct device *mdp_dev;
1226 	int ret;
1227 
1228 	/*
1229 	 * MDP5/DPU based devices don't have a flat hierarchy. There is a top
1230 	 * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
1231 	 * Populate the children devices, find the MDP5/DPU node, and then add
1232 	 * the interfaces to our components list.
1233 	 */
1234 	if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
1235 	    of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
1236 		ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
1237 		if (ret) {
1238 			dev_err(dev, "failed to populate children devices\n");
1239 			return ret;
1240 		}
1241 
1242 		mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
1243 		if (!mdp_dev) {
1244 			dev_err(dev, "failed to find MDSS MDP node\n");
1245 			of_platform_depopulate(dev);
1246 			return -ENODEV;
1247 		}
1248 
1249 		put_device(mdp_dev);
1250 
1251 		/* add the MDP component itself */
1252 		drm_of_component_match_add(dev, matchptr, compare_of,
1253 					   mdp_dev->of_node);
1254 	} else {
1255 		/* MDP4 */
1256 		mdp_dev = dev;
1257 	}
1258 
1259 	ret = add_components_mdp(mdp_dev, matchptr);
1260 	if (ret)
1261 		of_platform_depopulate(dev);
1262 
1263 	return ret;
1264 }
1265 
1266 /*
1267  * We don't know what's the best binding to link the gpu with the drm device.
1268  * Fow now, we just hunt for all the possible gpus that we support, and add them
1269  * as components.
1270  */
1271 static const struct of_device_id msm_gpu_match[] = {
1272 	{ .compatible = "qcom,adreno" },
1273 	{ .compatible = "qcom,adreno-3xx" },
1274 	{ .compatible = "qcom,kgsl-3d0" },
1275 	{ },
1276 };
1277 
1278 static int add_gpu_components(struct device *dev,
1279 			      struct component_match **matchptr)
1280 {
1281 	struct device_node *np;
1282 
1283 	np = of_find_matching_node(NULL, msm_gpu_match);
1284 	if (!np)
1285 		return 0;
1286 
1287 	drm_of_component_match_add(dev, matchptr, compare_of, np);
1288 
1289 	of_node_put(np);
1290 
1291 	return 0;
1292 }
1293 
1294 static int msm_drm_bind(struct device *dev)
1295 {
1296 	return msm_drm_init(dev, &msm_driver);
1297 }
1298 
1299 static void msm_drm_unbind(struct device *dev)
1300 {
1301 	msm_drm_uninit(dev);
1302 }
1303 
1304 static const struct component_master_ops msm_drm_ops = {
1305 	.bind = msm_drm_bind,
1306 	.unbind = msm_drm_unbind,
1307 };
1308 
1309 /*
1310  * Platform driver:
1311  */
1312 
1313 static int msm_pdev_probe(struct platform_device *pdev)
1314 {
1315 	struct component_match *match = NULL;
1316 	int ret;
1317 
1318 	ret = add_display_components(&pdev->dev, &match);
1319 	if (ret)
1320 		return ret;
1321 
1322 	ret = add_gpu_components(&pdev->dev, &match);
1323 	if (ret)
1324 		return ret;
1325 
1326 	/* on all devices that I am aware of, iommu's which can map
1327 	 * any address the cpu can see are used:
1328 	 */
1329 	ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
1330 	if (ret)
1331 		return ret;
1332 
1333 	return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
1334 }
1335 
1336 static int msm_pdev_remove(struct platform_device *pdev)
1337 {
1338 	component_master_del(&pdev->dev, &msm_drm_ops);
1339 	of_platform_depopulate(&pdev->dev);
1340 
1341 	return 0;
1342 }
1343 
1344 static const struct of_device_id dt_match[] = {
1345 	{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
1346 	{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
1347 	{ .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
1348 	{}
1349 };
1350 MODULE_DEVICE_TABLE(of, dt_match);
1351 
1352 static struct platform_driver msm_platform_driver = {
1353 	.probe      = msm_pdev_probe,
1354 	.remove     = msm_pdev_remove,
1355 	.driver     = {
1356 		.name   = "msm",
1357 		.of_match_table = dt_match,
1358 		.pm     = &msm_pm_ops,
1359 	},
1360 };
1361 
1362 static int __init msm_drm_register(void)
1363 {
1364 	if (!modeset)
1365 		return -EINVAL;
1366 
1367 	DBG("init");
1368 	msm_mdp_register();
1369 	msm_dpu_register();
1370 	msm_dsi_register();
1371 	msm_edp_register();
1372 	msm_hdmi_register();
1373 	adreno_register();
1374 	return platform_driver_register(&msm_platform_driver);
1375 }
1376 
1377 static void __exit msm_drm_unregister(void)
1378 {
1379 	DBG("fini");
1380 	platform_driver_unregister(&msm_platform_driver);
1381 	msm_hdmi_unregister();
1382 	adreno_unregister();
1383 	msm_edp_unregister();
1384 	msm_dsi_unregister();
1385 	msm_mdp_unregister();
1386 	msm_dpu_unregister();
1387 }
1388 
1389 module_init(msm_drm_register);
1390 module_exit(msm_drm_unregister);
1391 
1392 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1393 MODULE_DESCRIPTION("MSM DRM Driver");
1394 MODULE_LICENSE("GPL");
1395