xref: /linux/drivers/gpu/drm/msm/msm_drv.c (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include "msm_drv.h"
19 #include "msm_gpu.h"
20 #include "msm_kms.h"
21 
22 static void msm_fb_output_poll_changed(struct drm_device *dev)
23 {
24 #ifdef CONFIG_DRM_MSM_FBDEV
25 	struct msm_drm_private *priv = dev->dev_private;
26 	if (priv->fbdev)
27 		drm_fb_helper_hotplug_event(priv->fbdev);
28 #endif
29 }
30 
31 static const struct drm_mode_config_funcs mode_config_funcs = {
32 	.fb_create = msm_framebuffer_create,
33 	.output_poll_changed = msm_fb_output_poll_changed,
34 	.atomic_check = msm_atomic_check,
35 	.atomic_commit = msm_atomic_commit,
36 };
37 
38 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
39 {
40 	struct msm_drm_private *priv = dev->dev_private;
41 	int idx = priv->num_mmus++;
42 
43 	if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
44 		return -EINVAL;
45 
46 	priv->mmus[idx] = mmu;
47 
48 	return idx;
49 }
50 
51 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
52 static bool reglog = false;
53 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
54 module_param(reglog, bool, 0600);
55 #else
56 #define reglog 0
57 #endif
58 
59 #ifdef CONFIG_DRM_MSM_FBDEV
60 static bool fbdev = true;
61 MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
62 module_param(fbdev, bool, 0600);
63 #endif
64 
65 static char *vram = "16m";
66 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
67 module_param(vram, charp, 0);
68 
69 /*
70  * Util/helpers:
71  */
72 
73 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
74 		const char *dbgname)
75 {
76 	struct resource *res;
77 	unsigned long size;
78 	void __iomem *ptr;
79 
80 	if (name)
81 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
82 	else
83 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
84 
85 	if (!res) {
86 		dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
87 		return ERR_PTR(-EINVAL);
88 	}
89 
90 	size = resource_size(res);
91 
92 	ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
93 	if (!ptr) {
94 		dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
95 		return ERR_PTR(-ENOMEM);
96 	}
97 
98 	if (reglog)
99 		printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
100 
101 	return ptr;
102 }
103 
104 void msm_writel(u32 data, void __iomem *addr)
105 {
106 	if (reglog)
107 		printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
108 	writel(data, addr);
109 }
110 
111 u32 msm_readl(const void __iomem *addr)
112 {
113 	u32 val = readl(addr);
114 	if (reglog)
115 		printk(KERN_ERR "IO:R %p %08x\n", addr, val);
116 	return val;
117 }
118 
119 /*
120  * DRM operations:
121  */
122 
123 static int msm_unload(struct drm_device *dev)
124 {
125 	struct msm_drm_private *priv = dev->dev_private;
126 	struct msm_kms *kms = priv->kms;
127 	struct msm_gpu *gpu = priv->gpu;
128 
129 	drm_kms_helper_poll_fini(dev);
130 	drm_mode_config_cleanup(dev);
131 	drm_vblank_cleanup(dev);
132 
133 	pm_runtime_get_sync(dev->dev);
134 	drm_irq_uninstall(dev);
135 	pm_runtime_put_sync(dev->dev);
136 
137 	flush_workqueue(priv->wq);
138 	destroy_workqueue(priv->wq);
139 
140 	if (kms) {
141 		pm_runtime_disable(dev->dev);
142 		kms->funcs->destroy(kms);
143 	}
144 
145 	if (gpu) {
146 		mutex_lock(&dev->struct_mutex);
147 		gpu->funcs->pm_suspend(gpu);
148 		mutex_unlock(&dev->struct_mutex);
149 		gpu->funcs->destroy(gpu);
150 	}
151 
152 	if (priv->vram.paddr) {
153 		DEFINE_DMA_ATTRS(attrs);
154 		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
155 		drm_mm_takedown(&priv->vram.mm);
156 		dma_free_attrs(dev->dev, priv->vram.size, NULL,
157 				priv->vram.paddr, &attrs);
158 	}
159 
160 	component_unbind_all(dev->dev, dev);
161 
162 	dev->dev_private = NULL;
163 
164 	kfree(priv);
165 
166 	return 0;
167 }
168 
169 static int get_mdp_ver(struct platform_device *pdev)
170 {
171 #ifdef CONFIG_OF
172 	static const struct of_device_id match_types[] = { {
173 		.compatible = "qcom,mdss_mdp",
174 		.data	= (void	*)5,
175 	}, {
176 		/* end node */
177 	} };
178 	struct device *dev = &pdev->dev;
179 	const struct of_device_id *match;
180 	match = of_match_node(match_types, dev->of_node);
181 	if (match)
182 		return (int)(unsigned long)match->data;
183 #endif
184 	return 4;
185 }
186 
187 #include <linux/of_address.h>
188 
189 static int msm_init_vram(struct drm_device *dev)
190 {
191 	struct msm_drm_private *priv = dev->dev_private;
192 	unsigned long size = 0;
193 	int ret = 0;
194 
195 #ifdef CONFIG_OF
196 	/* In the device-tree world, we could have a 'memory-region'
197 	 * phandle, which gives us a link to our "vram".  Allocating
198 	 * is all nicely abstracted behind the dma api, but we need
199 	 * to know the entire size to allocate it all in one go. There
200 	 * are two cases:
201 	 *  1) device with no IOMMU, in which case we need exclusive
202 	 *     access to a VRAM carveout big enough for all gpu
203 	 *     buffers
204 	 *  2) device with IOMMU, but where the bootloader puts up
205 	 *     a splash screen.  In this case, the VRAM carveout
206 	 *     need only be large enough for fbdev fb.  But we need
207 	 *     exclusive access to the buffer to avoid the kernel
208 	 *     using those pages for other purposes (which appears
209 	 *     as corruption on screen before we have a chance to
210 	 *     load and do initial modeset)
211 	 */
212 	struct device_node *node;
213 
214 	node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
215 	if (node) {
216 		struct resource r;
217 		ret = of_address_to_resource(node, 0, &r);
218 		if (ret)
219 			return ret;
220 		size = r.end - r.start;
221 		DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
222 	} else
223 #endif
224 
225 	/* if we have no IOMMU, then we need to use carveout allocator.
226 	 * Grab the entire CMA chunk carved out in early startup in
227 	 * mach-msm:
228 	 */
229 	if (!iommu_present(&platform_bus_type)) {
230 		DRM_INFO("using %s VRAM carveout\n", vram);
231 		size = memparse(vram, NULL);
232 	}
233 
234 	if (size) {
235 		DEFINE_DMA_ATTRS(attrs);
236 		void *p;
237 
238 		priv->vram.size = size;
239 
240 		drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
241 
242 		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
243 		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
244 
245 		/* note that for no-kernel-mapping, the vaddr returned
246 		 * is bogus, but non-null if allocation succeeded:
247 		 */
248 		p = dma_alloc_attrs(dev->dev, size,
249 				&priv->vram.paddr, GFP_KERNEL, &attrs);
250 		if (!p) {
251 			dev_err(dev->dev, "failed to allocate VRAM\n");
252 			priv->vram.paddr = 0;
253 			return -ENOMEM;
254 		}
255 
256 		dev_info(dev->dev, "VRAM: %08x->%08x\n",
257 				(uint32_t)priv->vram.paddr,
258 				(uint32_t)(priv->vram.paddr + size));
259 	}
260 
261 	return ret;
262 }
263 
264 static int msm_load(struct drm_device *dev, unsigned long flags)
265 {
266 	struct platform_device *pdev = dev->platformdev;
267 	struct msm_drm_private *priv;
268 	struct msm_kms *kms;
269 	int ret;
270 
271 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
272 	if (!priv) {
273 		dev_err(dev->dev, "failed to allocate private data\n");
274 		return -ENOMEM;
275 	}
276 
277 	dev->dev_private = priv;
278 
279 	priv->wq = alloc_ordered_workqueue("msm", 0);
280 	init_waitqueue_head(&priv->fence_event);
281 	init_waitqueue_head(&priv->pending_crtcs_event);
282 
283 	INIT_LIST_HEAD(&priv->inactive_list);
284 	INIT_LIST_HEAD(&priv->fence_cbs);
285 
286 	drm_mode_config_init(dev);
287 
288 	platform_set_drvdata(pdev, dev);
289 
290 	/* Bind all our sub-components: */
291 	ret = component_bind_all(dev->dev, dev);
292 	if (ret)
293 		return ret;
294 
295 	ret = msm_init_vram(dev);
296 	if (ret)
297 		goto fail;
298 
299 	switch (get_mdp_ver(pdev)) {
300 	case 4:
301 		kms = mdp4_kms_init(dev);
302 		break;
303 	case 5:
304 		kms = mdp5_kms_init(dev);
305 		break;
306 	default:
307 		kms = ERR_PTR(-ENODEV);
308 		break;
309 	}
310 
311 	if (IS_ERR(kms)) {
312 		/*
313 		 * NOTE: once we have GPU support, having no kms should not
314 		 * be considered fatal.. ideally we would still support gpu
315 		 * and (for example) use dmabuf/prime to share buffers with
316 		 * imx drm driver on iMX5
317 		 */
318 		dev_err(dev->dev, "failed to load kms\n");
319 		ret = PTR_ERR(kms);
320 		goto fail;
321 	}
322 
323 	priv->kms = kms;
324 
325 	if (kms) {
326 		pm_runtime_enable(dev->dev);
327 		ret = kms->funcs->hw_init(kms);
328 		if (ret) {
329 			dev_err(dev->dev, "kms hw init failed: %d\n", ret);
330 			goto fail;
331 		}
332 	}
333 
334 	dev->mode_config.min_width = 0;
335 	dev->mode_config.min_height = 0;
336 	dev->mode_config.max_width = 2048;
337 	dev->mode_config.max_height = 2048;
338 	dev->mode_config.funcs = &mode_config_funcs;
339 
340 	ret = drm_vblank_init(dev, priv->num_crtcs);
341 	if (ret < 0) {
342 		dev_err(dev->dev, "failed to initialize vblank\n");
343 		goto fail;
344 	}
345 
346 	pm_runtime_get_sync(dev->dev);
347 	ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
348 	pm_runtime_put_sync(dev->dev);
349 	if (ret < 0) {
350 		dev_err(dev->dev, "failed to install IRQ handler\n");
351 		goto fail;
352 	}
353 
354 	drm_mode_config_reset(dev);
355 
356 #ifdef CONFIG_DRM_MSM_FBDEV
357 	if (fbdev)
358 		priv->fbdev = msm_fbdev_init(dev);
359 #endif
360 
361 	ret = msm_debugfs_late_init(dev);
362 	if (ret)
363 		goto fail;
364 
365 	drm_kms_helper_poll_init(dev);
366 
367 	return 0;
368 
369 fail:
370 	msm_unload(dev);
371 	return ret;
372 }
373 
374 static void load_gpu(struct drm_device *dev)
375 {
376 	static DEFINE_MUTEX(init_lock);
377 	struct msm_drm_private *priv = dev->dev_private;
378 
379 	mutex_lock(&init_lock);
380 
381 	if (!priv->gpu)
382 		priv->gpu = adreno_load_gpu(dev);
383 
384 	mutex_unlock(&init_lock);
385 }
386 
387 static int msm_open(struct drm_device *dev, struct drm_file *file)
388 {
389 	struct msm_file_private *ctx;
390 
391 	/* For now, load gpu on open.. to avoid the requirement of having
392 	 * firmware in the initrd.
393 	 */
394 	load_gpu(dev);
395 
396 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
397 	if (!ctx)
398 		return -ENOMEM;
399 
400 	file->driver_priv = ctx;
401 
402 	return 0;
403 }
404 
405 static void msm_preclose(struct drm_device *dev, struct drm_file *file)
406 {
407 	struct msm_drm_private *priv = dev->dev_private;
408 	struct msm_file_private *ctx = file->driver_priv;
409 	struct msm_kms *kms = priv->kms;
410 
411 	if (kms)
412 		kms->funcs->preclose(kms, file);
413 
414 	mutex_lock(&dev->struct_mutex);
415 	if (ctx == priv->lastctx)
416 		priv->lastctx = NULL;
417 	mutex_unlock(&dev->struct_mutex);
418 
419 	kfree(ctx);
420 }
421 
422 static void msm_lastclose(struct drm_device *dev)
423 {
424 #ifdef CONFIG_DRM_MSM_FBDEV
425 	struct msm_drm_private *priv = dev->dev_private;
426 	if (priv->fbdev)
427 		drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
428 #endif
429 }
430 
431 static irqreturn_t msm_irq(int irq, void *arg)
432 {
433 	struct drm_device *dev = arg;
434 	struct msm_drm_private *priv = dev->dev_private;
435 	struct msm_kms *kms = priv->kms;
436 	BUG_ON(!kms);
437 	return kms->funcs->irq(kms);
438 }
439 
440 static void msm_irq_preinstall(struct drm_device *dev)
441 {
442 	struct msm_drm_private *priv = dev->dev_private;
443 	struct msm_kms *kms = priv->kms;
444 	BUG_ON(!kms);
445 	kms->funcs->irq_preinstall(kms);
446 }
447 
448 static int msm_irq_postinstall(struct drm_device *dev)
449 {
450 	struct msm_drm_private *priv = dev->dev_private;
451 	struct msm_kms *kms = priv->kms;
452 	BUG_ON(!kms);
453 	return kms->funcs->irq_postinstall(kms);
454 }
455 
456 static void msm_irq_uninstall(struct drm_device *dev)
457 {
458 	struct msm_drm_private *priv = dev->dev_private;
459 	struct msm_kms *kms = priv->kms;
460 	BUG_ON(!kms);
461 	kms->funcs->irq_uninstall(kms);
462 }
463 
464 static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
465 {
466 	struct msm_drm_private *priv = dev->dev_private;
467 	struct msm_kms *kms = priv->kms;
468 	if (!kms)
469 		return -ENXIO;
470 	DBG("dev=%p, crtc=%d", dev, crtc_id);
471 	return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
472 }
473 
474 static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
475 {
476 	struct msm_drm_private *priv = dev->dev_private;
477 	struct msm_kms *kms = priv->kms;
478 	if (!kms)
479 		return;
480 	DBG("dev=%p, crtc=%d", dev, crtc_id);
481 	kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
482 }
483 
484 /*
485  * DRM debugfs:
486  */
487 
488 #ifdef CONFIG_DEBUG_FS
489 static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
490 {
491 	struct msm_drm_private *priv = dev->dev_private;
492 	struct msm_gpu *gpu = priv->gpu;
493 
494 	if (gpu) {
495 		seq_printf(m, "%s Status:\n", gpu->name);
496 		gpu->funcs->show(gpu, m);
497 	}
498 
499 	return 0;
500 }
501 
502 static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
503 {
504 	struct msm_drm_private *priv = dev->dev_private;
505 	struct msm_gpu *gpu = priv->gpu;
506 
507 	if (gpu) {
508 		seq_printf(m, "Active Objects (%s):\n", gpu->name);
509 		msm_gem_describe_objects(&gpu->active_list, m);
510 	}
511 
512 	seq_printf(m, "Inactive Objects:\n");
513 	msm_gem_describe_objects(&priv->inactive_list, m);
514 
515 	return 0;
516 }
517 
518 static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
519 {
520 	return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
521 }
522 
523 static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
524 {
525 	struct msm_drm_private *priv = dev->dev_private;
526 	struct drm_framebuffer *fb, *fbdev_fb = NULL;
527 
528 	if (priv->fbdev) {
529 		seq_printf(m, "fbcon ");
530 		fbdev_fb = priv->fbdev->fb;
531 		msm_framebuffer_describe(fbdev_fb, m);
532 	}
533 
534 	mutex_lock(&dev->mode_config.fb_lock);
535 	list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
536 		if (fb == fbdev_fb)
537 			continue;
538 
539 		seq_printf(m, "user ");
540 		msm_framebuffer_describe(fb, m);
541 	}
542 	mutex_unlock(&dev->mode_config.fb_lock);
543 
544 	return 0;
545 }
546 
547 static int show_locked(struct seq_file *m, void *arg)
548 {
549 	struct drm_info_node *node = (struct drm_info_node *) m->private;
550 	struct drm_device *dev = node->minor->dev;
551 	int (*show)(struct drm_device *dev, struct seq_file *m) =
552 			node->info_ent->data;
553 	int ret;
554 
555 	ret = mutex_lock_interruptible(&dev->struct_mutex);
556 	if (ret)
557 		return ret;
558 
559 	ret = show(dev, m);
560 
561 	mutex_unlock(&dev->struct_mutex);
562 
563 	return ret;
564 }
565 
566 static struct drm_info_list msm_debugfs_list[] = {
567 		{"gpu", show_locked, 0, msm_gpu_show},
568 		{"gem", show_locked, 0, msm_gem_show},
569 		{ "mm", show_locked, 0, msm_mm_show },
570 		{ "fb", show_locked, 0, msm_fb_show },
571 };
572 
573 static int late_init_minor(struct drm_minor *minor)
574 {
575 	int ret;
576 
577 	if (!minor)
578 		return 0;
579 
580 	ret = msm_rd_debugfs_init(minor);
581 	if (ret) {
582 		dev_err(minor->dev->dev, "could not install rd debugfs\n");
583 		return ret;
584 	}
585 
586 	ret = msm_perf_debugfs_init(minor);
587 	if (ret) {
588 		dev_err(minor->dev->dev, "could not install perf debugfs\n");
589 		return ret;
590 	}
591 
592 	return 0;
593 }
594 
595 int msm_debugfs_late_init(struct drm_device *dev)
596 {
597 	int ret;
598 	ret = late_init_minor(dev->primary);
599 	if (ret)
600 		return ret;
601 	ret = late_init_minor(dev->render);
602 	if (ret)
603 		return ret;
604 	ret = late_init_minor(dev->control);
605 	return ret;
606 }
607 
608 static int msm_debugfs_init(struct drm_minor *minor)
609 {
610 	struct drm_device *dev = minor->dev;
611 	int ret;
612 
613 	ret = drm_debugfs_create_files(msm_debugfs_list,
614 			ARRAY_SIZE(msm_debugfs_list),
615 			minor->debugfs_root, minor);
616 
617 	if (ret) {
618 		dev_err(dev->dev, "could not install msm_debugfs_list\n");
619 		return ret;
620 	}
621 
622 	return 0;
623 }
624 
625 static void msm_debugfs_cleanup(struct drm_minor *minor)
626 {
627 	drm_debugfs_remove_files(msm_debugfs_list,
628 			ARRAY_SIZE(msm_debugfs_list), minor);
629 	if (!minor->dev->dev_private)
630 		return;
631 	msm_rd_debugfs_cleanup(minor);
632 	msm_perf_debugfs_cleanup(minor);
633 }
634 #endif
635 
636 /*
637  * Fences:
638  */
639 
640 int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
641 		ktime_t *timeout)
642 {
643 	struct msm_drm_private *priv = dev->dev_private;
644 	int ret;
645 
646 	if (!priv->gpu)
647 		return 0;
648 
649 	if (fence > priv->gpu->submitted_fence) {
650 		DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
651 				fence, priv->gpu->submitted_fence);
652 		return -EINVAL;
653 	}
654 
655 	if (!timeout) {
656 		/* no-wait: */
657 		ret = fence_completed(dev, fence) ? 0 : -EBUSY;
658 	} else {
659 		ktime_t now = ktime_get();
660 		unsigned long remaining_jiffies;
661 
662 		if (ktime_compare(*timeout, now) < 0) {
663 			remaining_jiffies = 0;
664 		} else {
665 			ktime_t rem = ktime_sub(*timeout, now);
666 			struct timespec ts = ktime_to_timespec(rem);
667 			remaining_jiffies = timespec_to_jiffies(&ts);
668 		}
669 
670 		ret = wait_event_interruptible_timeout(priv->fence_event,
671 				fence_completed(dev, fence),
672 				remaining_jiffies);
673 
674 		if (ret == 0) {
675 			DBG("timeout waiting for fence: %u (completed: %u)",
676 					fence, priv->completed_fence);
677 			ret = -ETIMEDOUT;
678 		} else if (ret != -ERESTARTSYS) {
679 			ret = 0;
680 		}
681 	}
682 
683 	return ret;
684 }
685 
686 int msm_queue_fence_cb(struct drm_device *dev,
687 		struct msm_fence_cb *cb, uint32_t fence)
688 {
689 	struct msm_drm_private *priv = dev->dev_private;
690 	int ret = 0;
691 
692 	mutex_lock(&dev->struct_mutex);
693 	if (!list_empty(&cb->work.entry)) {
694 		ret = -EINVAL;
695 	} else if (fence > priv->completed_fence) {
696 		cb->fence = fence;
697 		list_add_tail(&cb->work.entry, &priv->fence_cbs);
698 	} else {
699 		queue_work(priv->wq, &cb->work);
700 	}
701 	mutex_unlock(&dev->struct_mutex);
702 
703 	return ret;
704 }
705 
706 /* called from workqueue */
707 void msm_update_fence(struct drm_device *dev, uint32_t fence)
708 {
709 	struct msm_drm_private *priv = dev->dev_private;
710 
711 	mutex_lock(&dev->struct_mutex);
712 	priv->completed_fence = max(fence, priv->completed_fence);
713 
714 	while (!list_empty(&priv->fence_cbs)) {
715 		struct msm_fence_cb *cb;
716 
717 		cb = list_first_entry(&priv->fence_cbs,
718 				struct msm_fence_cb, work.entry);
719 
720 		if (cb->fence > priv->completed_fence)
721 			break;
722 
723 		list_del_init(&cb->work.entry);
724 		queue_work(priv->wq, &cb->work);
725 	}
726 
727 	mutex_unlock(&dev->struct_mutex);
728 
729 	wake_up_all(&priv->fence_event);
730 }
731 
732 void __msm_fence_worker(struct work_struct *work)
733 {
734 	struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
735 	cb->func(cb);
736 }
737 
738 /*
739  * DRM ioctls:
740  */
741 
742 static int msm_ioctl_get_param(struct drm_device *dev, void *data,
743 		struct drm_file *file)
744 {
745 	struct msm_drm_private *priv = dev->dev_private;
746 	struct drm_msm_param *args = data;
747 	struct msm_gpu *gpu;
748 
749 	/* for now, we just have 3d pipe.. eventually this would need to
750 	 * be more clever to dispatch to appropriate gpu module:
751 	 */
752 	if (args->pipe != MSM_PIPE_3D0)
753 		return -EINVAL;
754 
755 	gpu = priv->gpu;
756 
757 	if (!gpu)
758 		return -ENXIO;
759 
760 	return gpu->funcs->get_param(gpu, args->param, &args->value);
761 }
762 
763 static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
764 		struct drm_file *file)
765 {
766 	struct drm_msm_gem_new *args = data;
767 
768 	if (args->flags & ~MSM_BO_FLAGS) {
769 		DRM_ERROR("invalid flags: %08x\n", args->flags);
770 		return -EINVAL;
771 	}
772 
773 	return msm_gem_new_handle(dev, file, args->size,
774 			args->flags, &args->handle);
775 }
776 
777 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
778 {
779 	return ktime_set(timeout.tv_sec, timeout.tv_nsec);
780 }
781 
782 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
783 		struct drm_file *file)
784 {
785 	struct drm_msm_gem_cpu_prep *args = data;
786 	struct drm_gem_object *obj;
787 	ktime_t timeout = to_ktime(args->timeout);
788 	int ret;
789 
790 	if (args->op & ~MSM_PREP_FLAGS) {
791 		DRM_ERROR("invalid op: %08x\n", args->op);
792 		return -EINVAL;
793 	}
794 
795 	obj = drm_gem_object_lookup(dev, file, args->handle);
796 	if (!obj)
797 		return -ENOENT;
798 
799 	ret = msm_gem_cpu_prep(obj, args->op, &timeout);
800 
801 	drm_gem_object_unreference_unlocked(obj);
802 
803 	return ret;
804 }
805 
806 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
807 		struct drm_file *file)
808 {
809 	struct drm_msm_gem_cpu_fini *args = data;
810 	struct drm_gem_object *obj;
811 	int ret;
812 
813 	obj = drm_gem_object_lookup(dev, file, args->handle);
814 	if (!obj)
815 		return -ENOENT;
816 
817 	ret = msm_gem_cpu_fini(obj);
818 
819 	drm_gem_object_unreference_unlocked(obj);
820 
821 	return ret;
822 }
823 
824 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
825 		struct drm_file *file)
826 {
827 	struct drm_msm_gem_info *args = data;
828 	struct drm_gem_object *obj;
829 	int ret = 0;
830 
831 	if (args->pad)
832 		return -EINVAL;
833 
834 	obj = drm_gem_object_lookup(dev, file, args->handle);
835 	if (!obj)
836 		return -ENOENT;
837 
838 	args->offset = msm_gem_mmap_offset(obj);
839 
840 	drm_gem_object_unreference_unlocked(obj);
841 
842 	return ret;
843 }
844 
845 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
846 		struct drm_file *file)
847 {
848 	struct drm_msm_wait_fence *args = data;
849 	ktime_t timeout = to_ktime(args->timeout);
850 
851 	if (args->pad) {
852 		DRM_ERROR("invalid pad: %08x\n", args->pad);
853 		return -EINVAL;
854 	}
855 
856 	return msm_wait_fence_interruptable(dev, args->fence, &timeout);
857 }
858 
859 static const struct drm_ioctl_desc msm_ioctls[] = {
860 	DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
861 	DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
862 	DRM_IOCTL_DEF_DRV(MSM_GEM_INFO,     msm_ioctl_gem_info,     DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
863 	DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
864 	DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
865 	DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
866 	DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
867 };
868 
869 static const struct vm_operations_struct vm_ops = {
870 	.fault = msm_gem_fault,
871 	.open = drm_gem_vm_open,
872 	.close = drm_gem_vm_close,
873 };
874 
875 static const struct file_operations fops = {
876 	.owner              = THIS_MODULE,
877 	.open               = drm_open,
878 	.release            = drm_release,
879 	.unlocked_ioctl     = drm_ioctl,
880 #ifdef CONFIG_COMPAT
881 	.compat_ioctl       = drm_compat_ioctl,
882 #endif
883 	.poll               = drm_poll,
884 	.read               = drm_read,
885 	.llseek             = no_llseek,
886 	.mmap               = msm_gem_mmap,
887 };
888 
889 static struct drm_driver msm_driver = {
890 	.driver_features    = DRIVER_HAVE_IRQ |
891 				DRIVER_GEM |
892 				DRIVER_PRIME |
893 				DRIVER_RENDER |
894 				DRIVER_ATOMIC |
895 				DRIVER_MODESET,
896 	.load               = msm_load,
897 	.unload             = msm_unload,
898 	.open               = msm_open,
899 	.preclose           = msm_preclose,
900 	.lastclose          = msm_lastclose,
901 	.set_busid          = drm_platform_set_busid,
902 	.irq_handler        = msm_irq,
903 	.irq_preinstall     = msm_irq_preinstall,
904 	.irq_postinstall    = msm_irq_postinstall,
905 	.irq_uninstall      = msm_irq_uninstall,
906 	.get_vblank_counter = drm_vblank_count,
907 	.enable_vblank      = msm_enable_vblank,
908 	.disable_vblank     = msm_disable_vblank,
909 	.gem_free_object    = msm_gem_free_object,
910 	.gem_vm_ops         = &vm_ops,
911 	.dumb_create        = msm_gem_dumb_create,
912 	.dumb_map_offset    = msm_gem_dumb_map_offset,
913 	.dumb_destroy       = drm_gem_dumb_destroy,
914 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
915 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
916 	.gem_prime_export   = drm_gem_prime_export,
917 	.gem_prime_import   = drm_gem_prime_import,
918 	.gem_prime_pin      = msm_gem_prime_pin,
919 	.gem_prime_unpin    = msm_gem_prime_unpin,
920 	.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
921 	.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
922 	.gem_prime_vmap     = msm_gem_prime_vmap,
923 	.gem_prime_vunmap   = msm_gem_prime_vunmap,
924 	.gem_prime_mmap     = msm_gem_prime_mmap,
925 #ifdef CONFIG_DEBUG_FS
926 	.debugfs_init       = msm_debugfs_init,
927 	.debugfs_cleanup    = msm_debugfs_cleanup,
928 #endif
929 	.ioctls             = msm_ioctls,
930 	.num_ioctls         = DRM_MSM_NUM_IOCTLS,
931 	.fops               = &fops,
932 	.name               = "msm",
933 	.desc               = "MSM Snapdragon DRM",
934 	.date               = "20130625",
935 	.major              = 1,
936 	.minor              = 0,
937 };
938 
939 #ifdef CONFIG_PM_SLEEP
940 static int msm_pm_suspend(struct device *dev)
941 {
942 	struct drm_device *ddev = dev_get_drvdata(dev);
943 
944 	drm_kms_helper_poll_disable(ddev);
945 
946 	return 0;
947 }
948 
949 static int msm_pm_resume(struct device *dev)
950 {
951 	struct drm_device *ddev = dev_get_drvdata(dev);
952 
953 	drm_kms_helper_poll_enable(ddev);
954 
955 	return 0;
956 }
957 #endif
958 
959 static const struct dev_pm_ops msm_pm_ops = {
960 	SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
961 };
962 
963 /*
964  * Componentized driver support:
965  */
966 
967 #ifdef CONFIG_OF
968 /* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx
969  * (or probably any other).. so probably some room for some helpers
970  */
971 static int compare_of(struct device *dev, void *data)
972 {
973 	return dev->of_node == data;
974 }
975 
976 static int add_components(struct device *dev, struct component_match **matchptr,
977 		const char *name)
978 {
979 	struct device_node *np = dev->of_node;
980 	unsigned i;
981 
982 	for (i = 0; ; i++) {
983 		struct device_node *node;
984 
985 		node = of_parse_phandle(np, name, i);
986 		if (!node)
987 			break;
988 
989 		component_match_add(dev, matchptr, compare_of, node);
990 	}
991 
992 	return 0;
993 }
994 #else
995 static int compare_dev(struct device *dev, void *data)
996 {
997 	return dev == data;
998 }
999 #endif
1000 
1001 static int msm_drm_bind(struct device *dev)
1002 {
1003 	return drm_platform_init(&msm_driver, to_platform_device(dev));
1004 }
1005 
1006 static void msm_drm_unbind(struct device *dev)
1007 {
1008 	drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
1009 }
1010 
1011 static const struct component_master_ops msm_drm_ops = {
1012 	.bind = msm_drm_bind,
1013 	.unbind = msm_drm_unbind,
1014 };
1015 
1016 /*
1017  * Platform driver:
1018  */
1019 
1020 static int msm_pdev_probe(struct platform_device *pdev)
1021 {
1022 	struct component_match *match = NULL;
1023 #ifdef CONFIG_OF
1024 	add_components(&pdev->dev, &match, "connectors");
1025 	add_components(&pdev->dev, &match, "gpus");
1026 #else
1027 	/* For non-DT case, it kinda sucks.  We don't actually have a way
1028 	 * to know whether or not we are waiting for certain devices (or if
1029 	 * they are simply not present).  But for non-DT we only need to
1030 	 * care about apq8064/apq8060/etc (all mdp4/a3xx):
1031 	 */
1032 	static const char *devnames[] = {
1033 			"hdmi_msm.0", "kgsl-3d0.0",
1034 	};
1035 	int i;
1036 
1037 	DBG("Adding components..");
1038 
1039 	for (i = 0; i < ARRAY_SIZE(devnames); i++) {
1040 		struct device *dev;
1041 
1042 		dev = bus_find_device_by_name(&platform_bus_type,
1043 				NULL, devnames[i]);
1044 		if (!dev) {
1045 			dev_info(&pdev->dev, "still waiting for %s\n", devnames[i]);
1046 			return -EPROBE_DEFER;
1047 		}
1048 
1049 		component_match_add(&pdev->dev, &match, compare_dev, dev);
1050 	}
1051 #endif
1052 
1053 	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
1054 	return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
1055 }
1056 
1057 static int msm_pdev_remove(struct platform_device *pdev)
1058 {
1059 	component_master_del(&pdev->dev, &msm_drm_ops);
1060 
1061 	return 0;
1062 }
1063 
1064 static const struct platform_device_id msm_id[] = {
1065 	{ "mdp", 0 },
1066 	{ }
1067 };
1068 
1069 static const struct of_device_id dt_match[] = {
1070 	{ .compatible = "qcom,mdp" },      /* mdp4 */
1071 	{ .compatible = "qcom,mdss_mdp" }, /* mdp5 */
1072 	{}
1073 };
1074 MODULE_DEVICE_TABLE(of, dt_match);
1075 
1076 static struct platform_driver msm_platform_driver = {
1077 	.probe      = msm_pdev_probe,
1078 	.remove     = msm_pdev_remove,
1079 	.driver     = {
1080 		.name   = "msm",
1081 		.of_match_table = dt_match,
1082 		.pm     = &msm_pm_ops,
1083 	},
1084 	.id_table   = msm_id,
1085 };
1086 
1087 static int __init msm_drm_register(void)
1088 {
1089 	DBG("init");
1090 	msm_dsi_register();
1091 	msm_edp_register();
1092 	hdmi_register();
1093 	adreno_register();
1094 	return platform_driver_register(&msm_platform_driver);
1095 }
1096 
1097 static void __exit msm_drm_unregister(void)
1098 {
1099 	DBG("fini");
1100 	platform_driver_unregister(&msm_platform_driver);
1101 	hdmi_unregister();
1102 	adreno_unregister();
1103 	msm_edp_unregister();
1104 	msm_dsi_unregister();
1105 }
1106 
1107 module_init(msm_drm_register);
1108 module_exit(msm_drm_unregister);
1109 
1110 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1111 MODULE_DESCRIPTION("MSM DRM Driver");
1112 MODULE_LICENSE("GPL");
1113