xref: /linux/drivers/gpu/drm/xe/display/xe_display_bo.c (revision 4a57e0913e8c7fff407e97909f4ae48caa84d612)
1 // SPDX-License-Identifier: MIT
2 /* Copyright © 2024 Intel Corporation */
3 
4 #include <drm/drm_gem.h>
5 #include <drm/intel/display_parent_interface.h>
6 
7 #include "intel_fb.h"
8 #include "xe_bo.h"
9 #include "xe_display_bo.h"
10 #include "xe_pxp.h"
11 
12 static bool xe_display_bo_is_protected(struct drm_gem_object *obj)
13 {
14 	return xe_bo_is_protected(gem_to_xe_bo(obj));
15 }
16 
17 static int xe_display_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
18 {
19 	struct xe_bo *bo = gem_to_xe_bo(obj);
20 
21 	return xe_bo_read(bo, offset, dst, size);
22 }
23 
24 static int xe_display_bo_framebuffer_init(struct drm_gem_object *obj,
25 					  struct drm_mode_fb_cmd2 *mode_cmd)
26 {
27 	struct xe_bo *bo = gem_to_xe_bo(obj);
28 	struct xe_device *xe = to_xe_device(bo->ttm.base.dev);
29 	int ret;
30 
31 	/*
32 	 * Some modifiers require physical alignment of 64KiB VRAM pages;
33 	 * require that the BO in those cases is created correctly.
34 	 */
35 	if (XE_IOCTL_DBG(xe, intel_fb_needs_64k_phys(mode_cmd->modifier[0]) &&
36 			     !(bo->flags & XE_BO_FLAG_NEEDS_64K)))
37 		return -EINVAL;
38 
39 	xe_bo_get(bo);
40 
41 	ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
42 	if (ret)
43 		goto err;
44 
45 	if (!(bo->flags & XE_BO_FLAG_FORCE_WC)) {
46 		/*
47 		 * XE_BO_FLAG_FORCE_WC should ideally be set at creation, or is
48 		 * automatically set when creating FB. We cannot change caching
49 		 * mode when the bo is VM_BINDed, so we can only set
50 		 * coherency with display when unbound.
51 		 */
52 		if (XE_IOCTL_DBG(xe, xe_bo_is_vm_bound(bo))) {
53 			ttm_bo_unreserve(&bo->ttm);
54 			ret = -EINVAL;
55 			goto err;
56 		}
57 		bo->flags |= XE_BO_FLAG_FORCE_WC;
58 	}
59 	ttm_bo_unreserve(&bo->ttm);
60 	return 0;
61 
62 err:
63 	xe_bo_put(bo);
64 	return ret;
65 }
66 
67 static void xe_display_bo_framebuffer_fini(struct drm_gem_object *obj)
68 {
69 	struct xe_bo *bo = gem_to_xe_bo(obj);
70 
71 	if (bo->flags & XE_BO_FLAG_PINNED) {
72 		/* Unpin our kernel fb first */
73 		xe_bo_lock(bo, false);
74 		xe_bo_unpin(bo);
75 		xe_bo_unlock(bo);
76 	}
77 	xe_bo_put(bo);
78 }
79 
80 static struct drm_gem_object *
81 xe_display_bo_framebuffer_lookup(struct drm_device *drm,
82 				 struct drm_file *filp,
83 				 const struct drm_mode_fb_cmd2 *mode_cmd)
84 {
85 	struct xe_device *xe = to_xe_device(drm);
86 	struct xe_bo *bo;
87 	struct drm_gem_object *gem = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
88 
89 	if (!gem)
90 		return ERR_PTR(-ENOENT);
91 
92 	bo = gem_to_xe_bo(gem);
93 	/* Require vram placement or dma-buf import */
94 	if (IS_DGFX(xe) &&
95 	    !xe_bo_can_migrate(bo, XE_PL_VRAM0) &&
96 	    bo->ttm.type != ttm_bo_type_sg) {
97 		drm_gem_object_put(gem);
98 		return ERR_PTR(-EREMOTE);
99 	}
100 
101 	return gem;
102 }
103 
104 const struct intel_display_bo_interface xe_display_bo_interface = {
105 	.is_protected = xe_display_bo_is_protected,
106 	.key_check = xe_pxp_obj_key_check,
107 	.fb_mmap = drm_gem_prime_mmap,
108 	.read_from_page = xe_display_bo_read_from_page,
109 	.framebuffer_init = xe_display_bo_framebuffer_init,
110 	.framebuffer_fini = xe_display_bo_framebuffer_fini,
111 	.framebuffer_lookup = xe_display_bo_framebuffer_lookup,
112 };
113