1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 */
7
8 #include <linux/dma-mapping.h>
9 #include <linux/fault-inject.h>
10 #include <linux/debugfs.h>
11 #include <linux/of_address.h>
12 #include <linux/uaccess.h>
13
14 #include <drm/drm_drv.h>
15 #include <drm/drm_file.h>
16 #include <drm/drm_ioctl.h>
17 #include <drm/drm_of.h>
18
19 #include "msm_drv.h"
20 #include "msm_debugfs.h"
21 #include "msm_gem.h"
22 #include "msm_gpu.h"
23 #include "msm_kms.h"
24
25 /*
26 * MSM driver version:
27 * - 1.0.0 - initial interface
28 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
29 * - 1.2.0 - adds explicit fence support for submit ioctl
30 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
31 * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
32 * MSM_GEM_INFO ioctl.
33 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
34 * GEM object's debug name
35 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
36 * - 1.6.0 - Syncobj support
37 * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
38 * - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
39 * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
40 * - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
41 * - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
42 * - 1.12.0 - Add MSM_INFO_SET_METADATA and MSM_INFO_GET_METADATA
43 * - 1.13.0 - Add VM_BIND
44 */
45 #define MSM_VERSION_MAJOR 1
46 #define MSM_VERSION_MINOR 13
47 #define MSM_VERSION_PATCHLEVEL 0
48
49 bool dumpstate;
50 MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
51 module_param(dumpstate, bool, 0600);
52
53 static bool modeset = true;
54 MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
55 module_param(modeset, bool, 0600);
56
57 static bool separate_gpu_kms;
58 MODULE_PARM_DESC(separate_gpu_drm, "Use separate DRM device for the GPU (0=single DRM device for both GPU and display (default), 1=two DRM devices)");
59 module_param(separate_gpu_kms, bool, 0400);
60
61 DECLARE_FAULT_ATTR(fail_gem_alloc);
62 DECLARE_FAULT_ATTR(fail_gem_iova);
63
msm_gpu_no_components(void)64 bool msm_gpu_no_components(void)
65 {
66 return separate_gpu_kms;
67 }
68
msm_drm_uninit(struct device * dev,const struct component_ops * gpu_ops)69 static int msm_drm_uninit(struct device *dev, const struct component_ops *gpu_ops)
70 {
71 struct platform_device *pdev = to_platform_device(dev);
72 struct msm_drm_private *priv = platform_get_drvdata(pdev);
73 struct drm_device *ddev = priv->dev;
74
75 /*
76 * Shutdown the hw if we're far enough along where things might be on.
77 * If we run this too early, we'll end up panicking in any variety of
78 * places. Since we don't register the drm device until late in
79 * msm_drm_init, drm_dev->registered is used as an indicator that the
80 * shutdown will be successful.
81 */
82 if (ddev->registered) {
83 drm_dev_unregister(ddev);
84 if (priv->kms)
85 msm_drm_kms_unregister(dev);
86 }
87
88 msm_gem_shrinker_cleanup(ddev);
89
90 msm_perf_debugfs_cleanup(priv);
91 msm_rd_debugfs_cleanup(priv);
92
93 if (priv->kms)
94 msm_drm_kms_uninit(dev);
95
96 if (gpu_ops)
97 gpu_ops->unbind(dev, dev, NULL);
98 else
99 component_unbind_all(dev, ddev);
100
101 ddev->dev_private = NULL;
102 drm_dev_put(ddev);
103
104 return 0;
105 }
106
msm_drm_init(struct device * dev,const struct drm_driver * drv,const struct component_ops * gpu_ops)107 static int msm_drm_init(struct device *dev, const struct drm_driver *drv,
108 const struct component_ops *gpu_ops)
109 {
110 struct msm_drm_private *priv = dev_get_drvdata(dev);
111 struct drm_device *ddev;
112 int ret;
113
114 if (drm_firmware_drivers_only())
115 return -ENODEV;
116
117 ddev = drm_dev_alloc(drv, dev);
118 if (IS_ERR(ddev)) {
119 DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
120 return PTR_ERR(ddev);
121 }
122 ddev->dev_private = priv;
123 priv->dev = ddev;
124
125 INIT_LIST_HEAD(&priv->objects);
126 mutex_init(&priv->obj_lock);
127
128 /*
129 * Initialize the LRUs:
130 */
131 mutex_init(&priv->lru.lock);
132 drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock);
133 drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock);
134 drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
135 drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
136
137 /* Initialize stall-on-fault */
138 spin_lock_init(&priv->fault_stall_lock);
139 priv->stall_enabled = true;
140
141 /* Teach lockdep about lock ordering wrt. shrinker: */
142 fs_reclaim_acquire(GFP_KERNEL);
143 might_lock(&priv->lru.lock);
144 fs_reclaim_release(GFP_KERNEL);
145
146 if (priv->kms_init) {
147 ret = drmm_mode_config_init(ddev);
148 if (ret)
149 goto err_put_dev;
150 }
151
152 dma_set_max_seg_size(dev, UINT_MAX);
153
154 /* Bind all our sub-components: */
155 if (gpu_ops)
156 ret = gpu_ops->bind(dev, dev, NULL);
157 else
158 ret = component_bind_all(dev, ddev);
159 if (ret)
160 goto err_put_dev;
161
162 ret = msm_gem_shrinker_init(ddev);
163 if (ret)
164 goto err_msm_uninit;
165
166 if (priv->kms_init) {
167 ret = msm_drm_kms_init(dev, drv);
168 if (ret)
169 goto err_msm_uninit;
170 }
171
172 ret = drm_dev_register(ddev, 0);
173 if (ret)
174 goto err_msm_uninit;
175
176 ret = msm_debugfs_late_init(ddev);
177 if (ret)
178 goto err_msm_uninit;
179
180 if (priv->kms_init)
181 msm_drm_kms_post_init(dev);
182
183 return 0;
184
185 err_msm_uninit:
186 msm_drm_uninit(dev, gpu_ops);
187
188 return ret;
189
190 err_put_dev:
191 drm_dev_put(ddev);
192
193 return ret;
194 }
195
196 /*
197 * DRM operations:
198 */
199
load_gpu(struct drm_device * dev)200 static void load_gpu(struct drm_device *dev)
201 {
202 static DEFINE_MUTEX(init_lock);
203 struct msm_drm_private *priv = dev->dev_private;
204
205 mutex_lock(&init_lock);
206
207 if (!priv->gpu)
208 priv->gpu = adreno_load_gpu(dev);
209
210 mutex_unlock(&init_lock);
211 }
212
213 /**
214 * msm_context_vm - lazily create the context's VM
215 *
216 * @dev: the drm device
217 * @ctx: the context
218 *
219 * The VM is lazily created, so that userspace has a chance to opt-in to having
220 * a userspace managed VM before the VM is created.
221 *
222 * Note that this does not return a reference to the VM. Once the VM is created,
223 * it exists for the lifetime of the context.
224 */
msm_context_vm(struct drm_device * dev,struct msm_context * ctx)225 struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx)
226 {
227 static DEFINE_MUTEX(init_lock);
228 struct msm_drm_private *priv = dev->dev_private;
229
230 /* Once ctx->vm is created it is valid for the lifetime of the context: */
231 if (ctx->vm)
232 return ctx->vm;
233
234 mutex_lock(&init_lock);
235 if (!ctx->vm) {
236 ctx->vm = msm_gpu_create_private_vm(
237 priv->gpu, current, !ctx->userspace_managed_vm);
238
239 }
240 mutex_unlock(&init_lock);
241
242 return ctx->vm;
243 }
244
context_init(struct drm_device * dev,struct drm_file * file)245 static int context_init(struct drm_device *dev, struct drm_file *file)
246 {
247 static atomic_t ident = ATOMIC_INIT(0);
248 struct msm_context *ctx;
249
250 ctx = kzalloc_obj(*ctx);
251 if (!ctx)
252 return -ENOMEM;
253
254 INIT_LIST_HEAD(&ctx->submitqueues);
255 rwlock_init(&ctx->queuelock);
256
257 kref_init(&ctx->ref);
258 msm_submitqueue_init(dev, ctx);
259
260 file->driver_priv = ctx;
261
262 ctx->seqno = atomic_inc_return(&ident);
263
264 return 0;
265 }
266
msm_open(struct drm_device * dev,struct drm_file * file)267 static int msm_open(struct drm_device *dev, struct drm_file *file)
268 {
269 /* For now, load gpu on open.. to avoid the requirement of having
270 * firmware in the initrd.
271 */
272 load_gpu(dev);
273
274 return context_init(dev, file);
275 }
276
context_close(struct msm_context * ctx)277 static void context_close(struct msm_context *ctx)
278 {
279 ctx->closed = true;
280 msm_submitqueue_close(ctx);
281 msm_context_put(ctx);
282 }
283
msm_postclose(struct drm_device * dev,struct drm_file * file)284 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
285 {
286 struct msm_drm_private *priv = dev->dev_private;
287 struct msm_context *ctx = file->driver_priv;
288
289 /*
290 * It is not possible to set sysprof param to non-zero if gpu
291 * is not initialized:
292 */
293 if (priv->gpu)
294 msm_context_set_sysprof(ctx, priv->gpu, 0);
295
296 context_close(ctx);
297 }
298
299 /*
300 * DRM ioctls:
301 */
302
msm_ioctl_get_param(struct drm_device * dev,void * data,struct drm_file * file)303 static int msm_ioctl_get_param(struct drm_device *dev, void *data,
304 struct drm_file *file)
305 {
306 struct msm_drm_private *priv = dev->dev_private;
307 struct drm_msm_param *args = data;
308 struct msm_gpu *gpu;
309
310 /* for now, we just have 3d pipe.. eventually this would need to
311 * be more clever to dispatch to appropriate gpu module:
312 */
313 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
314 return -EINVAL;
315
316 gpu = priv->gpu;
317
318 if (!gpu)
319 return -ENXIO;
320
321 return gpu->funcs->get_param(gpu, file->driver_priv,
322 args->param, &args->value, &args->len);
323 }
324
msm_ioctl_set_param(struct drm_device * dev,void * data,struct drm_file * file)325 static int msm_ioctl_set_param(struct drm_device *dev, void *data,
326 struct drm_file *file)
327 {
328 struct msm_drm_private *priv = dev->dev_private;
329 struct drm_msm_param *args = data;
330 struct msm_gpu *gpu;
331
332 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
333 return -EINVAL;
334
335 gpu = priv->gpu;
336
337 if (!gpu)
338 return -ENXIO;
339
340 return gpu->funcs->set_param(gpu, file->driver_priv,
341 args->param, args->value, args->len);
342 }
343
msm_ioctl_gem_new(struct drm_device * dev,void * data,struct drm_file * file)344 static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
345 struct drm_file *file)
346 {
347 struct drm_msm_gem_new *args = data;
348 uint32_t flags = args->flags;
349
350 if (args->flags & ~MSM_BO_FLAGS) {
351 DRM_ERROR("invalid flags: %08x\n", args->flags);
352 return -EINVAL;
353 }
354
355 /*
356 * Uncached CPU mappings are deprecated, as of:
357 *
358 * 9ef364432db4 ("drm/msm: deprecate MSM_BO_UNCACHED (map as writecombine instead)")
359 *
360 * So promote them to WC.
361 */
362 if (flags & MSM_BO_UNCACHED) {
363 flags &= ~MSM_BO_CACHED;
364 flags |= MSM_BO_WC;
365 }
366
367 if (should_fail(&fail_gem_alloc, args->size))
368 return -ENOMEM;
369
370 return msm_gem_new_handle(dev, file, args->size,
371 args->flags, &args->handle, NULL);
372 }
373
to_ktime(struct drm_msm_timespec timeout)374 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
375 {
376 return ktime_set(timeout.tv_sec, timeout.tv_nsec);
377 }
378
msm_ioctl_gem_cpu_prep(struct drm_device * dev,void * data,struct drm_file * file)379 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
380 struct drm_file *file)
381 {
382 struct drm_msm_gem_cpu_prep *args = data;
383 struct drm_gem_object *obj;
384 ktime_t timeout = to_ktime(args->timeout);
385 int ret;
386
387 if (args->op & ~MSM_PREP_FLAGS) {
388 DRM_ERROR("invalid op: %08x\n", args->op);
389 return -EINVAL;
390 }
391
392 obj = drm_gem_object_lookup(file, args->handle);
393 if (!obj)
394 return -ENOENT;
395
396 ret = msm_gem_cpu_prep(obj, args->op, &timeout);
397
398 drm_gem_object_put(obj);
399
400 return ret;
401 }
402
msm_ioctl_gem_cpu_fini(struct drm_device * dev,void * data,struct drm_file * file)403 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
404 struct drm_file *file)
405 {
406 struct drm_msm_gem_cpu_fini *args = data;
407 struct drm_gem_object *obj;
408 int ret;
409
410 obj = drm_gem_object_lookup(file, args->handle);
411 if (!obj)
412 return -ENOENT;
413
414 ret = msm_gem_cpu_fini(obj);
415
416 drm_gem_object_put(obj);
417
418 return ret;
419 }
420
msm_ioctl_gem_info_iova(struct drm_device * dev,struct drm_file * file,struct drm_gem_object * obj,uint64_t * iova)421 static int msm_ioctl_gem_info_iova(struct drm_device *dev,
422 struct drm_file *file, struct drm_gem_object *obj,
423 uint64_t *iova)
424 {
425 struct msm_drm_private *priv = dev->dev_private;
426 struct msm_context *ctx = file->driver_priv;
427
428 if (!priv->gpu)
429 return -EINVAL;
430
431 if (msm_context_is_vmbind(ctx))
432 return UERR(EINVAL, dev, "VM_BIND is enabled");
433
434 if (should_fail(&fail_gem_iova, obj->size))
435 return -ENOMEM;
436
437 /*
438 * Don't pin the memory here - just get an address so that userspace can
439 * be productive
440 */
441 return msm_gem_get_iova(obj, msm_context_vm(dev, ctx), iova);
442 }
443
msm_ioctl_gem_info_set_iova(struct drm_device * dev,struct drm_file * file,struct drm_gem_object * obj,uint64_t iova)444 static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
445 struct drm_file *file, struct drm_gem_object *obj,
446 uint64_t iova)
447 {
448 struct msm_drm_private *priv = dev->dev_private;
449 struct msm_context *ctx = file->driver_priv;
450 struct drm_gpuvm *vm = msm_context_vm(dev, ctx);
451
452 if (!priv->gpu)
453 return -EINVAL;
454
455 if (msm_context_is_vmbind(ctx))
456 return UERR(EINVAL, dev, "VM_BIND is enabled");
457
458 /* Only supported if per-process address space is supported: */
459 if (priv->gpu->vm == vm)
460 return UERR(EOPNOTSUPP, dev, "requires per-process pgtables");
461
462 if (should_fail(&fail_gem_iova, obj->size))
463 return -ENOMEM;
464
465 return msm_gem_set_iova(obj, vm, iova);
466 }
467
msm_ioctl_gem_info_set_metadata(struct drm_gem_object * obj,__user void * metadata,u32 metadata_size)468 static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
469 __user void *metadata,
470 u32 metadata_size)
471 {
472 struct msm_gem_object *msm_obj = to_msm_bo(obj);
473 void *new_metadata;
474 void *buf;
475 int ret;
476
477 /* Impose a moderate upper bound on metadata size: */
478 if (metadata_size > 128) {
479 return -EOVERFLOW;
480 }
481
482 /* Use a temporary buf to keep copy_from_user() outside of gem obj lock: */
483 buf = memdup_user(metadata, metadata_size);
484 if (IS_ERR(buf))
485 return PTR_ERR(buf);
486
487 ret = msm_gem_lock_interruptible(obj);
488 if (ret)
489 goto out;
490
491 new_metadata =
492 krealloc(msm_obj->metadata, metadata_size, GFP_KERNEL);
493 if (!new_metadata) {
494 ret = -ENOMEM;
495 goto out;
496 }
497
498 msm_obj->metadata = new_metadata;
499 msm_obj->metadata_size = metadata_size;
500 memcpy(msm_obj->metadata, buf, metadata_size);
501
502 msm_gem_unlock(obj);
503
504 out:
505 kfree(buf);
506
507 return ret;
508 }
509
msm_ioctl_gem_info_get_metadata(struct drm_gem_object * obj,__user void * metadata,u32 * metadata_size)510 static int msm_ioctl_gem_info_get_metadata(struct drm_gem_object *obj,
511 __user void *metadata,
512 u32 *metadata_size)
513 {
514 struct msm_gem_object *msm_obj = to_msm_bo(obj);
515 void *buf;
516 int ret, len;
517
518 if (!metadata) {
519 /*
520 * Querying the size is inherently racey, but
521 * EXT_external_objects expects the app to confirm
522 * via device and driver UUIDs that the exporter and
523 * importer versions match. All we can do from the
524 * kernel side is check the length under obj lock
525 * when userspace tries to retrieve the metadata
526 */
527 *metadata_size = msm_obj->metadata_size;
528 return 0;
529 }
530
531 ret = msm_gem_lock_interruptible(obj);
532 if (ret)
533 return ret;
534
535 /* Avoid copy_to_user() under gem obj lock: */
536 len = msm_obj->metadata_size;
537 buf = kmemdup(msm_obj->metadata, len, GFP_KERNEL);
538
539 msm_gem_unlock(obj);
540
541 if (*metadata_size < len) {
542 ret = -ETOOSMALL;
543 } else if (copy_to_user(metadata, buf, len)) {
544 ret = -EFAULT;
545 } else {
546 *metadata_size = len;
547 }
548
549 kfree(buf);
550
551 return 0;
552 }
553
msm_ioctl_gem_info(struct drm_device * dev,void * data,struct drm_file * file)554 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
555 struct drm_file *file)
556 {
557 struct drm_msm_gem_info *args = data;
558 struct drm_gem_object *obj;
559 struct msm_gem_object *msm_obj;
560 int i, ret = 0;
561
562 if (args->pad)
563 return -EINVAL;
564
565 switch (args->info) {
566 case MSM_INFO_GET_OFFSET:
567 case MSM_INFO_GET_IOVA:
568 case MSM_INFO_SET_IOVA:
569 case MSM_INFO_GET_FLAGS:
570 /* value returned as immediate, not pointer, so len==0: */
571 if (args->len)
572 return -EINVAL;
573 break;
574 case MSM_INFO_SET_NAME:
575 case MSM_INFO_GET_NAME:
576 case MSM_INFO_SET_METADATA:
577 case MSM_INFO_GET_METADATA:
578 break;
579 default:
580 return -EINVAL;
581 }
582
583 obj = drm_gem_object_lookup(file, args->handle);
584 if (!obj)
585 return -ENOENT;
586
587 msm_obj = to_msm_bo(obj);
588
589 switch (args->info) {
590 case MSM_INFO_GET_OFFSET:
591 ret = drm_gem_create_mmap_offset(obj);
592 if (ret == 0)
593 args->value = drm_vma_node_offset_addr(&obj->vma_node);
594 break;
595 case MSM_INFO_GET_IOVA:
596 ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
597 break;
598 case MSM_INFO_SET_IOVA:
599 ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
600 break;
601 case MSM_INFO_GET_FLAGS:
602 if (drm_gem_is_imported(obj)) {
603 ret = -EINVAL;
604 break;
605 }
606 /* Hide internal kernel-only flags: */
607 args->value = to_msm_bo(obj)->flags & MSM_BO_FLAGS;
608 ret = 0;
609 break;
610 case MSM_INFO_SET_NAME:
611 /* length check should leave room for terminating null: */
612 if (args->len >= sizeof(msm_obj->name)) {
613 ret = -EINVAL;
614 break;
615 }
616 if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
617 args->len)) {
618 msm_obj->name[0] = '\0';
619 ret = -EFAULT;
620 break;
621 }
622 msm_obj->name[args->len] = '\0';
623 for (i = 0; i < args->len; i++) {
624 if (!isprint(msm_obj->name[i])) {
625 msm_obj->name[i] = '\0';
626 break;
627 }
628 }
629 break;
630 case MSM_INFO_GET_NAME:
631 if (args->value && (args->len < strlen(msm_obj->name))) {
632 ret = -ETOOSMALL;
633 break;
634 }
635 args->len = strlen(msm_obj->name);
636 if (args->value) {
637 if (copy_to_user(u64_to_user_ptr(args->value),
638 msm_obj->name, args->len))
639 ret = -EFAULT;
640 }
641 break;
642 case MSM_INFO_SET_METADATA:
643 ret = msm_ioctl_gem_info_set_metadata(
644 obj, u64_to_user_ptr(args->value), args->len);
645 break;
646 case MSM_INFO_GET_METADATA:
647 ret = msm_ioctl_gem_info_get_metadata(
648 obj, u64_to_user_ptr(args->value), &args->len);
649 break;
650 }
651
652 drm_gem_object_put(obj);
653
654 return ret;
655 }
656
wait_fence(struct msm_gpu_submitqueue * queue,uint32_t fence_id,ktime_t timeout,uint32_t flags)657 static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
658 ktime_t timeout, uint32_t flags)
659 {
660 struct dma_fence *fence;
661 int ret;
662
663 if (fence_after(fence_id, queue->last_fence)) {
664 DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
665 fence_id, queue->last_fence);
666 return -EINVAL;
667 }
668
669 /*
670 * Map submitqueue scoped "seqno" (which is actually an idr key)
671 * back to underlying dma-fence
672 *
673 * The fence is removed from the fence_idr when the submit is
674 * retired, so if the fence is not found it means there is nothing
675 * to wait for
676 */
677 spin_lock(&queue->idr_lock);
678 fence = idr_find(&queue->fence_idr, fence_id);
679 if (fence)
680 fence = dma_fence_get_rcu(fence);
681 spin_unlock(&queue->idr_lock);
682
683 if (!fence)
684 return 0;
685
686 if (flags & MSM_WAIT_FENCE_BOOST)
687 dma_fence_set_deadline(fence, ktime_get());
688
689 ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout));
690 if (ret == 0) {
691 ret = -ETIMEDOUT;
692 } else if (ret != -ERESTARTSYS) {
693 ret = 0;
694 }
695
696 dma_fence_put(fence);
697
698 return ret;
699 }
700
msm_ioctl_wait_fence(struct drm_device * dev,void * data,struct drm_file * file)701 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
702 struct drm_file *file)
703 {
704 struct msm_drm_private *priv = dev->dev_private;
705 struct drm_msm_wait_fence *args = data;
706 struct msm_gpu_submitqueue *queue;
707 int ret;
708
709 if (args->flags & ~MSM_WAIT_FENCE_FLAGS) {
710 DRM_ERROR("invalid flags: %08x\n", args->flags);
711 return -EINVAL;
712 }
713
714 if (!priv->gpu)
715 return 0;
716
717 queue = msm_submitqueue_get(file->driver_priv, args->queueid);
718 if (!queue)
719 return -ENOENT;
720
721 ret = wait_fence(queue, args->fence, to_ktime(args->timeout), args->flags);
722
723 msm_submitqueue_put(queue);
724
725 return ret;
726 }
727
msm_ioctl_gem_madvise(struct drm_device * dev,void * data,struct drm_file * file)728 static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
729 struct drm_file *file)
730 {
731 struct drm_msm_gem_madvise *args = data;
732 struct drm_gem_object *obj;
733 int ret;
734
735 switch (args->madv) {
736 case MSM_MADV_DONTNEED:
737 case MSM_MADV_WILLNEED:
738 break;
739 default:
740 return -EINVAL;
741 }
742
743 obj = drm_gem_object_lookup(file, args->handle);
744 if (!obj) {
745 return -ENOENT;
746 }
747
748 ret = msm_gem_madvise(obj, args->madv);
749 if (ret >= 0) {
750 args->retained = ret;
751 ret = 0;
752 }
753
754 drm_gem_object_put(obj);
755
756 return ret;
757 }
758
759
msm_ioctl_submitqueue_new(struct drm_device * dev,void * data,struct drm_file * file)760 static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
761 struct drm_file *file)
762 {
763 struct drm_msm_submitqueue *args = data;
764
765 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
766 return -EINVAL;
767
768 return msm_submitqueue_create(dev, file->driver_priv, args->prio,
769 args->flags, &args->id);
770 }
771
msm_ioctl_submitqueue_query(struct drm_device * dev,void * data,struct drm_file * file)772 static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
773 struct drm_file *file)
774 {
775 return msm_submitqueue_query(dev, file->driver_priv, data);
776 }
777
msm_ioctl_submitqueue_close(struct drm_device * dev,void * data,struct drm_file * file)778 static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
779 struct drm_file *file)
780 {
781 u32 id = *(u32 *) data;
782
783 return msm_submitqueue_remove(file->driver_priv, id);
784 }
785
786 static const struct drm_ioctl_desc msm_ioctls[] = {
787 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
788 DRM_IOCTL_DEF_DRV(MSM_SET_PARAM, msm_ioctl_set_param, DRM_RENDER_ALLOW),
789 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
790 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
791 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
792 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
793 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
794 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
795 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
796 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
797 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
798 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
799 DRM_IOCTL_DEF_DRV(MSM_VM_BIND, msm_ioctl_vm_bind, DRM_RENDER_ALLOW),
800 };
801
msm_show_fdinfo(struct drm_printer * p,struct drm_file * file)802 static void msm_show_fdinfo(struct drm_printer *p, struct drm_file *file)
803 {
804 struct drm_device *dev = file->minor->dev;
805 struct msm_drm_private *priv = dev->dev_private;
806
807 if (!priv->gpu)
808 return;
809
810 msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, p);
811
812 drm_show_memory_stats(p, file);
813 }
814
815 static const struct file_operations fops = {
816 .owner = THIS_MODULE,
817 DRM_GEM_FOPS,
818 .show_fdinfo = drm_show_fdinfo,
819 };
820
821 #define DRIVER_FEATURES_GPU ( \
822 DRIVER_GEM | \
823 DRIVER_GEM_GPUVA | \
824 DRIVER_RENDER | \
825 DRIVER_SYNCOBJ | \
826 DRIVER_SYNCOBJ_TIMELINE | \
827 0 )
828
829 #define DRIVER_FEATURES_KMS ( \
830 DRIVER_GEM | \
831 DRIVER_GEM_GPUVA | \
832 DRIVER_ATOMIC | \
833 DRIVER_MODESET | \
834 0 )
835
836 static const struct drm_driver msm_driver = {
837 .driver_features = DRIVER_FEATURES_GPU | DRIVER_FEATURES_KMS,
838 .open = msm_open,
839 .postclose = msm_postclose,
840 .dumb_create = msm_gem_dumb_create,
841 .dumb_map_offset = drm_gem_dumb_map_offset,
842 .gem_prime_import = msm_gem_prime_import,
843 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
844 #ifdef CONFIG_DEBUG_FS
845 .debugfs_init = msm_debugfs_init,
846 #endif
847 MSM_FBDEV_DRIVER_OPS,
848 .show_fdinfo = msm_show_fdinfo,
849 .ioctls = msm_ioctls,
850 .num_ioctls = ARRAY_SIZE(msm_ioctls),
851 .fops = &fops,
852 .name = "msm",
853 .desc = "MSM Snapdragon DRM",
854 .major = MSM_VERSION_MAJOR,
855 .minor = MSM_VERSION_MINOR,
856 .patchlevel = MSM_VERSION_PATCHLEVEL,
857 };
858
859 static const struct drm_driver msm_kms_driver = {
860 .driver_features = DRIVER_FEATURES_KMS,
861 .open = msm_open,
862 .postclose = msm_postclose,
863 .dumb_create = msm_gem_dumb_create,
864 .dumb_map_offset = drm_gem_dumb_map_offset,
865 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
866 #ifdef CONFIG_DEBUG_FS
867 .debugfs_init = msm_debugfs_init,
868 #endif
869 MSM_FBDEV_DRIVER_OPS,
870 .show_fdinfo = msm_show_fdinfo,
871 .fops = &fops,
872 .name = "msm-kms",
873 .desc = "MSM Snapdragon DRM",
874 .major = MSM_VERSION_MAJOR,
875 .minor = MSM_VERSION_MINOR,
876 .patchlevel = MSM_VERSION_PATCHLEVEL,
877 };
878
879 static const struct drm_driver msm_gpu_driver = {
880 .driver_features = DRIVER_FEATURES_GPU,
881 .open = msm_open,
882 .postclose = msm_postclose,
883 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
884 #ifdef CONFIG_DEBUG_FS
885 .debugfs_init = msm_debugfs_init,
886 #endif
887 .show_fdinfo = msm_show_fdinfo,
888 .ioctls = msm_ioctls,
889 .num_ioctls = ARRAY_SIZE(msm_ioctls),
890 .fops = &fops,
891 .name = "msm",
892 .desc = "MSM Snapdragon DRM",
893 .major = MSM_VERSION_MAJOR,
894 .minor = MSM_VERSION_MINOR,
895 .patchlevel = MSM_VERSION_PATCHLEVEL,
896 };
897
898 /*
899 * Componentized driver support:
900 */
901
902 /*
903 * Identify what components need to be added by parsing what remote-endpoints
904 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
905 * is no external component that we need to add since LVDS is within MDP4
906 * itself.
907 */
add_mdp_components(struct device * master_dev,struct component_match ** matchptr)908 static int add_mdp_components(struct device *master_dev,
909 struct component_match **matchptr)
910 {
911 struct device_node *np = master_dev->of_node;
912 struct device_node *ep_node;
913
914 for_each_endpoint_of_node(np, ep_node) {
915 struct device_node *intf;
916 struct of_endpoint ep;
917 int ret;
918
919 ret = of_graph_parse_endpoint(ep_node, &ep);
920 if (ret) {
921 DRM_DEV_ERROR(master_dev, "unable to parse port endpoint\n");
922 of_node_put(ep_node);
923 return ret;
924 }
925
926 /*
927 * The LCDC/LVDS port on MDP4 is a speacial case where the
928 * remote-endpoint isn't a component that we need to add
929 */
930 if (of_device_is_compatible(np, "qcom,mdp4") &&
931 ep.port == 0)
932 continue;
933
934 /*
935 * It's okay if some of the ports don't have a remote endpoint
936 * specified. It just means that the port isn't connected to
937 * any external interface.
938 */
939 intf = of_graph_get_remote_port_parent(ep_node);
940 if (!intf)
941 continue;
942
943 if (of_device_is_available(intf))
944 drm_of_component_match_add(master_dev, matchptr,
945 component_compare_of, intf);
946
947 of_node_put(intf);
948 }
949
950 return 0;
951 }
952
953 #if !IS_REACHABLE(CONFIG_DRM_MSM_MDP5) || !IS_REACHABLE(CONFIG_DRM_MSM_DPU)
msm_disp_drv_should_bind(struct device * dev,bool dpu_driver)954 bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver)
955 {
956 /* If just a single driver is enabled, use it no matter what */
957 return true;
958 }
959 #else
960
961 static bool prefer_mdp5 = true;
962 MODULE_PARM_DESC(prefer_mdp5, "Select whether MDP5 or DPU driver should be preferred");
963 module_param(prefer_mdp5, bool, 0444);
964
965 /* list all platforms that have been migrated from mdp5 to dpu driver */
966 static const char *const msm_mdp5_dpu_migrated[] = {
967 /* there never was qcom,msm8998-mdp5 */
968 "qcom,sdm630-mdp5",
969 "qcom,sdm660-mdp5",
970 NULL
971 };
972
973 /* list all platforms supported by both mdp5 and dpu drivers */
974 static const char *const msm_mdp5_dpu_migration[] = {
975 "qcom,msm8917-mdp5",
976 "qcom,msm8937-mdp5",
977 "qcom,msm8953-mdp5",
978 "qcom,msm8996-mdp5",
979 NULL,
980 };
981
msm_disp_drv_should_bind(struct device * dev,bool dpu_driver)982 bool msm_disp_drv_should_bind(struct device *dev, bool dpu_driver)
983 {
984 /* If it is not an MDP5 device, use DPU */
985 if (!of_device_is_compatible(dev->of_node, "qcom,mdp5"))
986 return dpu_driver;
987
988 /* If it is no longer supported by MDP5, use DPU */
989 if (of_device_compatible_match(dev->of_node, msm_mdp5_dpu_migrated))
990 return dpu_driver;
991
992 /* If it is not in the migration list, use MDP5 */
993 if (!of_device_compatible_match(dev->of_node, msm_mdp5_dpu_migration))
994 return !dpu_driver;
995
996 return prefer_mdp5 ? !dpu_driver : dpu_driver;
997 }
998 #endif
999
1000 /*
1001 * We don't know what's the best binding to link the gpu with the drm device.
1002 * Fow now, we just hunt for all the possible gpus that we support, and add them
1003 * as components.
1004 */
1005 static const struct of_device_id msm_gpu_match[] = {
1006 { .compatible = "qcom,adreno" },
1007 { .compatible = "qcom,adreno-3xx" },
1008 { .compatible = "amd,imageon" },
1009 { .compatible = "qcom,kgsl-3d0" },
1010 { },
1011 };
1012
add_gpu_components(struct device * dev,struct component_match ** matchptr)1013 static int add_gpu_components(struct device *dev,
1014 struct component_match **matchptr)
1015 {
1016 struct device_node *np;
1017
1018 np = of_find_matching_node(NULL, msm_gpu_match);
1019 if (!np)
1020 return 0;
1021
1022 if (of_device_is_available(np) && adreno_has_gpu(np))
1023 drm_of_component_match_add(dev, matchptr, component_compare_of, np);
1024
1025 of_node_put(np);
1026
1027 return 0;
1028 }
1029
msm_drm_bind(struct device * dev)1030 static int msm_drm_bind(struct device *dev)
1031 {
1032 return msm_drm_init(dev,
1033 msm_gpu_no_components() ?
1034 &msm_kms_driver :
1035 &msm_driver,
1036 NULL);
1037 }
1038
msm_drm_unbind(struct device * dev)1039 static void msm_drm_unbind(struct device *dev)
1040 {
1041 msm_drm_uninit(dev, NULL);
1042 }
1043
1044 const struct component_master_ops msm_drm_ops = {
1045 .bind = msm_drm_bind,
1046 .unbind = msm_drm_unbind,
1047 };
1048
msm_drv_probe(struct device * master_dev,int (* kms_init)(struct drm_device * dev),struct msm_kms * kms)1049 int msm_drv_probe(struct device *master_dev,
1050 int (*kms_init)(struct drm_device *dev),
1051 struct msm_kms *kms)
1052 {
1053 struct msm_drm_private *priv;
1054 struct component_match *match = NULL;
1055 int ret;
1056
1057 priv = devm_kzalloc(master_dev, sizeof(*priv), GFP_KERNEL);
1058 if (!priv)
1059 return -ENOMEM;
1060
1061 priv->kms = kms;
1062 priv->kms_init = kms_init;
1063 dev_set_drvdata(master_dev, priv);
1064
1065 /* Add mdp components if we have KMS. */
1066 if (kms_init) {
1067 ret = add_mdp_components(master_dev, &match);
1068 if (ret)
1069 return ret;
1070 }
1071
1072 if (!msm_gpu_no_components()) {
1073 ret = add_gpu_components(master_dev, &match);
1074 if (ret)
1075 return ret;
1076 }
1077
1078 /* on all devices that I am aware of, iommu's which can map
1079 * any address the cpu can see are used:
1080 */
1081 ret = dma_set_mask_and_coherent(master_dev, ~0);
1082 if (ret)
1083 return ret;
1084
1085 ret = component_master_add_with_match(master_dev, &msm_drm_ops, match);
1086 if (ret)
1087 return ret;
1088
1089 return 0;
1090 }
1091
msm_gpu_probe(struct platform_device * pdev,const struct component_ops * ops)1092 int msm_gpu_probe(struct platform_device *pdev,
1093 const struct component_ops *ops)
1094 {
1095 struct msm_drm_private *priv;
1096 int ret;
1097
1098 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1099 if (!priv)
1100 return -ENOMEM;
1101
1102 platform_set_drvdata(pdev, priv);
1103
1104 /* on all devices that I am aware of, iommu's which can map
1105 * any address the cpu can see are used:
1106 */
1107 ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
1108 if (ret)
1109 return ret;
1110
1111 return msm_drm_init(&pdev->dev, &msm_gpu_driver, ops);
1112 }
1113
msm_gpu_remove(struct platform_device * pdev,const struct component_ops * ops)1114 void msm_gpu_remove(struct platform_device *pdev,
1115 const struct component_ops *ops)
1116 {
1117 msm_drm_uninit(&pdev->dev, ops);
1118 }
1119
msm_drm_register(void)1120 static int __init msm_drm_register(void)
1121 {
1122 if (!modeset)
1123 return -EINVAL;
1124
1125 DBG("init");
1126 msm_mdp_register();
1127 msm_dpu_register();
1128 msm_dsi_register();
1129 msm_hdmi_register();
1130 msm_dp_register();
1131 adreno_register();
1132 msm_mdp4_register();
1133 msm_mdss_register();
1134
1135 return 0;
1136 }
1137
msm_drm_unregister(void)1138 static void __exit msm_drm_unregister(void)
1139 {
1140 DBG("fini");
1141 msm_mdss_unregister();
1142 msm_mdp4_unregister();
1143 msm_dp_unregister();
1144 msm_hdmi_unregister();
1145 adreno_unregister();
1146 msm_dsi_unregister();
1147 msm_mdp_unregister();
1148 msm_dpu_unregister();
1149 }
1150
1151 module_init(msm_drm_register);
1152 module_exit(msm_drm_unregister);
1153
1154 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1155 MODULE_DESCRIPTION("MSM DRM Driver");
1156 MODULE_LICENSE("GPL");
1157