xref: /linux/drivers/gpu/drm/etnaviv/etnaviv_drv.c (revision 74acee309fb2a434dce215d44014e6f8e06975ae)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5 
6 #include <linux/component.h>
7 #include <linux/of_platform.h>
8 #include <drm/drm_of.h>
9 
10 #include "etnaviv_cmdbuf.h"
11 #include "etnaviv_drv.h"
12 #include "etnaviv_gpu.h"
13 #include "etnaviv_gem.h"
14 #include "etnaviv_mmu.h"
15 #include "etnaviv_perfmon.h"
16 
17 /*
18  * DRM operations:
19  */
20 
21 
22 static void load_gpu(struct drm_device *dev)
23 {
24 	struct etnaviv_drm_private *priv = dev->dev_private;
25 	unsigned int i;
26 
27 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
28 		struct etnaviv_gpu *g = priv->gpu[i];
29 
30 		if (g) {
31 			int ret;
32 
33 			ret = etnaviv_gpu_init(g);
34 			if (ret)
35 				priv->gpu[i] = NULL;
36 		}
37 	}
38 }
39 
40 static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
41 {
42 	struct etnaviv_drm_private *priv = dev->dev_private;
43 	struct etnaviv_file_private *ctx;
44 	int i;
45 
46 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
47 	if (!ctx)
48 		return -ENOMEM;
49 
50 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
51 		struct etnaviv_gpu *gpu = priv->gpu[i];
52 		struct drm_sched_rq *rq;
53 
54 		if (gpu) {
55 			rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
56 			drm_sched_entity_init(&ctx->sched_entity[i],
57 					      &rq, 1, NULL);
58 			}
59 	}
60 
61 	file->driver_priv = ctx;
62 
63 	return 0;
64 }
65 
66 static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
67 {
68 	struct etnaviv_drm_private *priv = dev->dev_private;
69 	struct etnaviv_file_private *ctx = file->driver_priv;
70 	unsigned int i;
71 
72 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
73 		struct etnaviv_gpu *gpu = priv->gpu[i];
74 
75 		if (gpu)
76 			drm_sched_entity_destroy(&ctx->sched_entity[i]);
77 	}
78 
79 	kfree(ctx);
80 }
81 
82 /*
83  * DRM debugfs:
84  */
85 
86 #ifdef CONFIG_DEBUG_FS
87 static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
88 {
89 	struct etnaviv_drm_private *priv = dev->dev_private;
90 
91 	etnaviv_gem_describe_objects(priv, m);
92 
93 	return 0;
94 }
95 
96 static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
97 {
98 	struct drm_printer p = drm_seq_file_printer(m);
99 
100 	read_lock(&dev->vma_offset_manager->vm_lock);
101 	drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
102 	read_unlock(&dev->vma_offset_manager->vm_lock);
103 
104 	return 0;
105 }
106 
107 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
108 {
109 	struct drm_printer p = drm_seq_file_printer(m);
110 
111 	seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
112 
113 	mutex_lock(&gpu->mmu->lock);
114 	drm_mm_print(&gpu->mmu->mm, &p);
115 	mutex_unlock(&gpu->mmu->lock);
116 
117 	return 0;
118 }
119 
120 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
121 {
122 	struct etnaviv_cmdbuf *buf = &gpu->buffer;
123 	u32 size = buf->size;
124 	u32 *ptr = buf->vaddr;
125 	u32 i;
126 
127 	seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
128 			buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
129 			size - buf->user_size);
130 
131 	for (i = 0; i < size / 4; i++) {
132 		if (i && !(i % 4))
133 			seq_puts(m, "\n");
134 		if (i % 4 == 0)
135 			seq_printf(m, "\t0x%p: ", ptr + i);
136 		seq_printf(m, "%08x ", *(ptr + i));
137 	}
138 	seq_puts(m, "\n");
139 }
140 
141 static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
142 {
143 	seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
144 
145 	mutex_lock(&gpu->lock);
146 	etnaviv_buffer_dump(gpu, m);
147 	mutex_unlock(&gpu->lock);
148 
149 	return 0;
150 }
151 
152 static int show_unlocked(struct seq_file *m, void *arg)
153 {
154 	struct drm_info_node *node = (struct drm_info_node *) m->private;
155 	struct drm_device *dev = node->minor->dev;
156 	int (*show)(struct drm_device *dev, struct seq_file *m) =
157 			node->info_ent->data;
158 
159 	return show(dev, m);
160 }
161 
162 static int show_each_gpu(struct seq_file *m, void *arg)
163 {
164 	struct drm_info_node *node = (struct drm_info_node *) m->private;
165 	struct drm_device *dev = node->minor->dev;
166 	struct etnaviv_drm_private *priv = dev->dev_private;
167 	struct etnaviv_gpu *gpu;
168 	int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
169 			node->info_ent->data;
170 	unsigned int i;
171 	int ret = 0;
172 
173 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
174 		gpu = priv->gpu[i];
175 		if (!gpu)
176 			continue;
177 
178 		ret = show(gpu, m);
179 		if (ret < 0)
180 			break;
181 	}
182 
183 	return ret;
184 }
185 
186 static struct drm_info_list etnaviv_debugfs_list[] = {
187 		{"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
188 		{"gem", show_unlocked, 0, etnaviv_gem_show},
189 		{ "mm", show_unlocked, 0, etnaviv_mm_show },
190 		{"mmu", show_each_gpu, 0, etnaviv_mmu_show},
191 		{"ring", show_each_gpu, 0, etnaviv_ring_show},
192 };
193 
194 static int etnaviv_debugfs_init(struct drm_minor *minor)
195 {
196 	struct drm_device *dev = minor->dev;
197 	int ret;
198 
199 	ret = drm_debugfs_create_files(etnaviv_debugfs_list,
200 			ARRAY_SIZE(etnaviv_debugfs_list),
201 			minor->debugfs_root, minor);
202 
203 	if (ret) {
204 		dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
205 		return ret;
206 	}
207 
208 	return ret;
209 }
210 #endif
211 
212 /*
213  * DRM ioctls:
214  */
215 
216 static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
217 		struct drm_file *file)
218 {
219 	struct etnaviv_drm_private *priv = dev->dev_private;
220 	struct drm_etnaviv_param *args = data;
221 	struct etnaviv_gpu *gpu;
222 
223 	if (args->pipe >= ETNA_MAX_PIPES)
224 		return -EINVAL;
225 
226 	gpu = priv->gpu[args->pipe];
227 	if (!gpu)
228 		return -ENXIO;
229 
230 	return etnaviv_gpu_get_param(gpu, args->param, &args->value);
231 }
232 
233 static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
234 		struct drm_file *file)
235 {
236 	struct drm_etnaviv_gem_new *args = data;
237 
238 	if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
239 			    ETNA_BO_FORCE_MMU))
240 		return -EINVAL;
241 
242 	return etnaviv_gem_new_handle(dev, file, args->size,
243 			args->flags, &args->handle);
244 }
245 
246 #define TS(t) ((struct timespec){ \
247 	.tv_sec = (t).tv_sec, \
248 	.tv_nsec = (t).tv_nsec \
249 })
250 
251 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
252 		struct drm_file *file)
253 {
254 	struct drm_etnaviv_gem_cpu_prep *args = data;
255 	struct drm_gem_object *obj;
256 	int ret;
257 
258 	if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
259 		return -EINVAL;
260 
261 	obj = drm_gem_object_lookup(file, args->handle);
262 	if (!obj)
263 		return -ENOENT;
264 
265 	ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
266 
267 	drm_gem_object_put_unlocked(obj);
268 
269 	return ret;
270 }
271 
272 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
273 		struct drm_file *file)
274 {
275 	struct drm_etnaviv_gem_cpu_fini *args = data;
276 	struct drm_gem_object *obj;
277 	int ret;
278 
279 	if (args->flags)
280 		return -EINVAL;
281 
282 	obj = drm_gem_object_lookup(file, args->handle);
283 	if (!obj)
284 		return -ENOENT;
285 
286 	ret = etnaviv_gem_cpu_fini(obj);
287 
288 	drm_gem_object_put_unlocked(obj);
289 
290 	return ret;
291 }
292 
293 static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
294 		struct drm_file *file)
295 {
296 	struct drm_etnaviv_gem_info *args = data;
297 	struct drm_gem_object *obj;
298 	int ret;
299 
300 	if (args->pad)
301 		return -EINVAL;
302 
303 	obj = drm_gem_object_lookup(file, args->handle);
304 	if (!obj)
305 		return -ENOENT;
306 
307 	ret = etnaviv_gem_mmap_offset(obj, &args->offset);
308 	drm_gem_object_put_unlocked(obj);
309 
310 	return ret;
311 }
312 
313 static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
314 		struct drm_file *file)
315 {
316 	struct drm_etnaviv_wait_fence *args = data;
317 	struct etnaviv_drm_private *priv = dev->dev_private;
318 	struct timespec *timeout = &TS(args->timeout);
319 	struct etnaviv_gpu *gpu;
320 
321 	if (args->flags & ~(ETNA_WAIT_NONBLOCK))
322 		return -EINVAL;
323 
324 	if (args->pipe >= ETNA_MAX_PIPES)
325 		return -EINVAL;
326 
327 	gpu = priv->gpu[args->pipe];
328 	if (!gpu)
329 		return -ENXIO;
330 
331 	if (args->flags & ETNA_WAIT_NONBLOCK)
332 		timeout = NULL;
333 
334 	return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
335 						    timeout);
336 }
337 
338 static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
339 	struct drm_file *file)
340 {
341 	struct drm_etnaviv_gem_userptr *args = data;
342 
343 	if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
344 	    args->flags == 0)
345 		return -EINVAL;
346 
347 	if (offset_in_page(args->user_ptr | args->user_size) ||
348 	    (uintptr_t)args->user_ptr != args->user_ptr ||
349 	    (u32)args->user_size != args->user_size ||
350 	    args->user_ptr & ~PAGE_MASK)
351 		return -EINVAL;
352 
353 	if (!access_ok((void __user *)(unsigned long)args->user_ptr,
354 		       args->user_size))
355 		return -EFAULT;
356 
357 	return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
358 				       args->user_size, args->flags,
359 				       &args->handle);
360 }
361 
362 static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
363 	struct drm_file *file)
364 {
365 	struct etnaviv_drm_private *priv = dev->dev_private;
366 	struct drm_etnaviv_gem_wait *args = data;
367 	struct timespec *timeout = &TS(args->timeout);
368 	struct drm_gem_object *obj;
369 	struct etnaviv_gpu *gpu;
370 	int ret;
371 
372 	if (args->flags & ~(ETNA_WAIT_NONBLOCK))
373 		return -EINVAL;
374 
375 	if (args->pipe >= ETNA_MAX_PIPES)
376 		return -EINVAL;
377 
378 	gpu = priv->gpu[args->pipe];
379 	if (!gpu)
380 		return -ENXIO;
381 
382 	obj = drm_gem_object_lookup(file, args->handle);
383 	if (!obj)
384 		return -ENOENT;
385 
386 	if (args->flags & ETNA_WAIT_NONBLOCK)
387 		timeout = NULL;
388 
389 	ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
390 
391 	drm_gem_object_put_unlocked(obj);
392 
393 	return ret;
394 }
395 
396 static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
397 	struct drm_file *file)
398 {
399 	struct etnaviv_drm_private *priv = dev->dev_private;
400 	struct drm_etnaviv_pm_domain *args = data;
401 	struct etnaviv_gpu *gpu;
402 
403 	if (args->pipe >= ETNA_MAX_PIPES)
404 		return -EINVAL;
405 
406 	gpu = priv->gpu[args->pipe];
407 	if (!gpu)
408 		return -ENXIO;
409 
410 	return etnaviv_pm_query_dom(gpu, args);
411 }
412 
413 static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
414 	struct drm_file *file)
415 {
416 	struct etnaviv_drm_private *priv = dev->dev_private;
417 	struct drm_etnaviv_pm_signal *args = data;
418 	struct etnaviv_gpu *gpu;
419 
420 	if (args->pipe >= ETNA_MAX_PIPES)
421 		return -EINVAL;
422 
423 	gpu = priv->gpu[args->pipe];
424 	if (!gpu)
425 		return -ENXIO;
426 
427 	return etnaviv_pm_query_sig(gpu, args);
428 }
429 
430 static const struct drm_ioctl_desc etnaviv_ioctls[] = {
431 #define ETNA_IOCTL(n, func, flags) \
432 	DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
433 	ETNA_IOCTL(GET_PARAM,    get_param,    DRM_AUTH|DRM_RENDER_ALLOW),
434 	ETNA_IOCTL(GEM_NEW,      gem_new,      DRM_AUTH|DRM_RENDER_ALLOW),
435 	ETNA_IOCTL(GEM_INFO,     gem_info,     DRM_AUTH|DRM_RENDER_ALLOW),
436 	ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
437 	ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
438 	ETNA_IOCTL(GEM_SUBMIT,   gem_submit,   DRM_AUTH|DRM_RENDER_ALLOW),
439 	ETNA_IOCTL(WAIT_FENCE,   wait_fence,   DRM_AUTH|DRM_RENDER_ALLOW),
440 	ETNA_IOCTL(GEM_USERPTR,  gem_userptr,  DRM_AUTH|DRM_RENDER_ALLOW),
441 	ETNA_IOCTL(GEM_WAIT,     gem_wait,     DRM_AUTH|DRM_RENDER_ALLOW),
442 	ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_AUTH|DRM_RENDER_ALLOW),
443 	ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_AUTH|DRM_RENDER_ALLOW),
444 };
445 
446 static const struct vm_operations_struct vm_ops = {
447 	.fault = etnaviv_gem_fault,
448 	.open = drm_gem_vm_open,
449 	.close = drm_gem_vm_close,
450 };
451 
452 static const struct file_operations fops = {
453 	.owner              = THIS_MODULE,
454 	.open               = drm_open,
455 	.release            = drm_release,
456 	.unlocked_ioctl     = drm_ioctl,
457 	.compat_ioctl       = drm_compat_ioctl,
458 	.poll               = drm_poll,
459 	.read               = drm_read,
460 	.llseek             = no_llseek,
461 	.mmap               = etnaviv_gem_mmap,
462 };
463 
464 static struct drm_driver etnaviv_drm_driver = {
465 	.driver_features    = DRIVER_GEM |
466 				DRIVER_PRIME |
467 				DRIVER_RENDER,
468 	.open               = etnaviv_open,
469 	.postclose           = etnaviv_postclose,
470 	.gem_free_object_unlocked = etnaviv_gem_free_object,
471 	.gem_vm_ops         = &vm_ops,
472 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
473 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
474 	.gem_prime_export   = drm_gem_prime_export,
475 	.gem_prime_import   = drm_gem_prime_import,
476 	.gem_prime_pin      = etnaviv_gem_prime_pin,
477 	.gem_prime_unpin    = etnaviv_gem_prime_unpin,
478 	.gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
479 	.gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
480 	.gem_prime_vmap     = etnaviv_gem_prime_vmap,
481 	.gem_prime_vunmap   = etnaviv_gem_prime_vunmap,
482 	.gem_prime_mmap     = etnaviv_gem_prime_mmap,
483 #ifdef CONFIG_DEBUG_FS
484 	.debugfs_init       = etnaviv_debugfs_init,
485 #endif
486 	.ioctls             = etnaviv_ioctls,
487 	.num_ioctls         = DRM_ETNAVIV_NUM_IOCTLS,
488 	.fops               = &fops,
489 	.name               = "etnaviv",
490 	.desc               = "etnaviv DRM",
491 	.date               = "20151214",
492 	.major              = 1,
493 	.minor              = 2,
494 };
495 
496 /*
497  * Platform driver:
498  */
499 static int etnaviv_bind(struct device *dev)
500 {
501 	struct etnaviv_drm_private *priv;
502 	struct drm_device *drm;
503 	int ret;
504 
505 	drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
506 	if (IS_ERR(drm))
507 		return PTR_ERR(drm);
508 
509 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
510 	if (!priv) {
511 		dev_err(dev, "failed to allocate private data\n");
512 		ret = -ENOMEM;
513 		goto out_put;
514 	}
515 	drm->dev_private = priv;
516 
517 	dev->dma_parms = &priv->dma_parms;
518 	dma_set_max_seg_size(dev, SZ_2G);
519 
520 	mutex_init(&priv->gem_lock);
521 	INIT_LIST_HEAD(&priv->gem_list);
522 	priv->num_gpus = 0;
523 
524 	dev_set_drvdata(dev, drm);
525 
526 	ret = component_bind_all(dev, drm);
527 	if (ret < 0)
528 		goto out_bind;
529 
530 	load_gpu(drm);
531 
532 	ret = drm_dev_register(drm, 0);
533 	if (ret)
534 		goto out_register;
535 
536 	return 0;
537 
538 out_register:
539 	component_unbind_all(dev, drm);
540 out_bind:
541 	kfree(priv);
542 out_put:
543 	drm_dev_put(drm);
544 
545 	return ret;
546 }
547 
548 static void etnaviv_unbind(struct device *dev)
549 {
550 	struct drm_device *drm = dev_get_drvdata(dev);
551 	struct etnaviv_drm_private *priv = drm->dev_private;
552 
553 	drm_dev_unregister(drm);
554 
555 	component_unbind_all(dev, drm);
556 
557 	dev->dma_parms = NULL;
558 
559 	drm->dev_private = NULL;
560 	kfree(priv);
561 
562 	drm_dev_put(drm);
563 }
564 
565 static const struct component_master_ops etnaviv_master_ops = {
566 	.bind = etnaviv_bind,
567 	.unbind = etnaviv_unbind,
568 };
569 
570 static int compare_of(struct device *dev, void *data)
571 {
572 	struct device_node *np = data;
573 
574 	return dev->of_node == np;
575 }
576 
577 static int compare_str(struct device *dev, void *data)
578 {
579 	return !strcmp(dev_name(dev), data);
580 }
581 
582 static int etnaviv_pdev_probe(struct platform_device *pdev)
583 {
584 	struct device *dev = &pdev->dev;
585 	struct component_match *match = NULL;
586 
587 	if (!dev->platform_data) {
588 		struct device_node *core_node;
589 
590 		for_each_compatible_node(core_node, NULL, "vivante,gc") {
591 			if (!of_device_is_available(core_node))
592 				continue;
593 
594 			drm_of_component_match_add(&pdev->dev, &match,
595 						   compare_of, core_node);
596 		}
597 	} else {
598 		char **names = dev->platform_data;
599 		unsigned i;
600 
601 		for (i = 0; names[i]; i++)
602 			component_match_add(dev, &match, compare_str, names[i]);
603 	}
604 
605 	return component_master_add_with_match(dev, &etnaviv_master_ops, match);
606 }
607 
608 static int etnaviv_pdev_remove(struct platform_device *pdev)
609 {
610 	component_master_del(&pdev->dev, &etnaviv_master_ops);
611 
612 	return 0;
613 }
614 
615 static struct platform_driver etnaviv_platform_driver = {
616 	.probe      = etnaviv_pdev_probe,
617 	.remove     = etnaviv_pdev_remove,
618 	.driver     = {
619 		.name   = "etnaviv",
620 	},
621 };
622 
623 static struct platform_device *etnaviv_drm;
624 
625 static int __init etnaviv_init(void)
626 {
627 	struct platform_device *pdev;
628 	int ret;
629 	struct device_node *np;
630 
631 	etnaviv_validate_init();
632 
633 	ret = platform_driver_register(&etnaviv_gpu_driver);
634 	if (ret != 0)
635 		return ret;
636 
637 	ret = platform_driver_register(&etnaviv_platform_driver);
638 	if (ret != 0)
639 		goto unregister_gpu_driver;
640 
641 	/*
642 	 * If the DT contains at least one available GPU device, instantiate
643 	 * the DRM platform device.
644 	 */
645 	for_each_compatible_node(np, NULL, "vivante,gc") {
646 		if (!of_device_is_available(np))
647 			continue;
648 
649 		pdev = platform_device_alloc("etnaviv", -1);
650 		if (!pdev) {
651 			ret = -ENOMEM;
652 			of_node_put(np);
653 			goto unregister_platform_driver;
654 		}
655 		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
656 		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
657 
658 		/*
659 		 * Apply the same DMA configuration to the virtual etnaviv
660 		 * device as the GPU we found. This assumes that all Vivante
661 		 * GPUs in the system share the same DMA constraints.
662 		 */
663 		of_dma_configure(&pdev->dev, np, true);
664 
665 		ret = platform_device_add(pdev);
666 		if (ret) {
667 			platform_device_put(pdev);
668 			of_node_put(np);
669 			goto unregister_platform_driver;
670 		}
671 
672 		etnaviv_drm = pdev;
673 		of_node_put(np);
674 		break;
675 	}
676 
677 	return 0;
678 
679 unregister_platform_driver:
680 	platform_driver_unregister(&etnaviv_platform_driver);
681 unregister_gpu_driver:
682 	platform_driver_unregister(&etnaviv_gpu_driver);
683 	return ret;
684 }
685 module_init(etnaviv_init);
686 
687 static void __exit etnaviv_exit(void)
688 {
689 	platform_device_unregister(etnaviv_drm);
690 	platform_driver_unregister(&etnaviv_platform_driver);
691 	platform_driver_unregister(&etnaviv_gpu_driver);
692 }
693 module_exit(etnaviv_exit);
694 
695 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
696 MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
697 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
698 MODULE_DESCRIPTION("etnaviv DRM Driver");
699 MODULE_LICENSE("GPL v2");
700 MODULE_ALIAS("platform:etnaviv");
701