xref: /linux/drivers/gpu/drm/xe/display/xe_display_bo.c (revision 4a57e0913e8c7fff407e97909f4ae48caa84d612)
149464bb6SJani Nikula // SPDX-License-Identifier: MIT
249464bb6SJani Nikula /* Copyright © 2024 Intel Corporation */
349464bb6SJani Nikula 
449464bb6SJani Nikula #include <drm/drm_gem.h>
537a6ed2cSJani Nikula #include <drm/intel/display_parent_interface.h>
649464bb6SJani Nikula 
79876394fSJani Nikula #include "intel_fb.h"
849464bb6SJani Nikula #include "xe_bo.h"
937a6ed2cSJani Nikula #include "xe_display_bo.h"
1049464bb6SJani Nikula #include "xe_pxp.h"
1149464bb6SJani Nikula 
1237a6ed2cSJani Nikula static bool xe_display_bo_is_protected(struct drm_gem_object *obj)
1349464bb6SJani Nikula {
1449464bb6SJani Nikula 	return xe_bo_is_protected(gem_to_xe_bo(obj));
1549464bb6SJani Nikula }
1649464bb6SJani Nikula 
1737a6ed2cSJani Nikula static int xe_display_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
1849464bb6SJani Nikula {
1949464bb6SJani Nikula 	struct xe_bo *bo = gem_to_xe_bo(obj);
2049464bb6SJani Nikula 
2149464bb6SJani Nikula 	return xe_bo_read(bo, offset, dst, size);
2249464bb6SJani Nikula }
2349464bb6SJani Nikula 
249876394fSJani Nikula static int xe_display_bo_framebuffer_init(struct drm_gem_object *obj,
259876394fSJani Nikula 					  struct drm_mode_fb_cmd2 *mode_cmd)
269876394fSJani Nikula {
279876394fSJani Nikula 	struct xe_bo *bo = gem_to_xe_bo(obj);
289876394fSJani Nikula 	struct xe_device *xe = to_xe_device(bo->ttm.base.dev);
299876394fSJani Nikula 	int ret;
309876394fSJani Nikula 
319876394fSJani Nikula 	/*
329876394fSJani Nikula 	 * Some modifiers require physical alignment of 64KiB VRAM pages;
339876394fSJani Nikula 	 * require that the BO in those cases is created correctly.
349876394fSJani Nikula 	 */
359876394fSJani Nikula 	if (XE_IOCTL_DBG(xe, intel_fb_needs_64k_phys(mode_cmd->modifier[0]) &&
369876394fSJani Nikula 			     !(bo->flags & XE_BO_FLAG_NEEDS_64K)))
379876394fSJani Nikula 		return -EINVAL;
389876394fSJani Nikula 
399876394fSJani Nikula 	xe_bo_get(bo);
409876394fSJani Nikula 
419876394fSJani Nikula 	ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
429876394fSJani Nikula 	if (ret)
439876394fSJani Nikula 		goto err;
449876394fSJani Nikula 
45*2bb026f3STvrtko Ursulin 	if (!(bo->flags & XE_BO_FLAG_FORCE_WC)) {
469876394fSJani Nikula 		/*
47*2bb026f3STvrtko Ursulin 		 * XE_BO_FLAG_FORCE_WC should ideally be set at creation, or is
489876394fSJani Nikula 		 * automatically set when creating FB. We cannot change caching
499876394fSJani Nikula 		 * mode when the bo is VM_BINDed, so we can only set
509876394fSJani Nikula 		 * coherency with display when unbound.
519876394fSJani Nikula 		 */
529876394fSJani Nikula 		if (XE_IOCTL_DBG(xe, xe_bo_is_vm_bound(bo))) {
539876394fSJani Nikula 			ttm_bo_unreserve(&bo->ttm);
549876394fSJani Nikula 			ret = -EINVAL;
559876394fSJani Nikula 			goto err;
569876394fSJani Nikula 		}
57*2bb026f3STvrtko Ursulin 		bo->flags |= XE_BO_FLAG_FORCE_WC;
589876394fSJani Nikula 	}
599876394fSJani Nikula 	ttm_bo_unreserve(&bo->ttm);
609876394fSJani Nikula 	return 0;
619876394fSJani Nikula 
629876394fSJani Nikula err:
639876394fSJani Nikula 	xe_bo_put(bo);
649876394fSJani Nikula 	return ret;
659876394fSJani Nikula }
669876394fSJani Nikula 
679876394fSJani Nikula static void xe_display_bo_framebuffer_fini(struct drm_gem_object *obj)
689876394fSJani Nikula {
699876394fSJani Nikula 	struct xe_bo *bo = gem_to_xe_bo(obj);
709876394fSJani Nikula 
719876394fSJani Nikula 	if (bo->flags & XE_BO_FLAG_PINNED) {
729876394fSJani Nikula 		/* Unpin our kernel fb first */
739876394fSJani Nikula 		xe_bo_lock(bo, false);
749876394fSJani Nikula 		xe_bo_unpin(bo);
759876394fSJani Nikula 		xe_bo_unlock(bo);
769876394fSJani Nikula 	}
779876394fSJani Nikula 	xe_bo_put(bo);
789876394fSJani Nikula }
799876394fSJani Nikula 
809876394fSJani Nikula static struct drm_gem_object *
819876394fSJani Nikula xe_display_bo_framebuffer_lookup(struct drm_device *drm,
829876394fSJani Nikula 				 struct drm_file *filp,
839876394fSJani Nikula 				 const struct drm_mode_fb_cmd2 *mode_cmd)
849876394fSJani Nikula {
859876394fSJani Nikula 	struct xe_device *xe = to_xe_device(drm);
869876394fSJani Nikula 	struct xe_bo *bo;
879876394fSJani Nikula 	struct drm_gem_object *gem = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
889876394fSJani Nikula 
899876394fSJani Nikula 	if (!gem)
909876394fSJani Nikula 		return ERR_PTR(-ENOENT);
919876394fSJani Nikula 
929876394fSJani Nikula 	bo = gem_to_xe_bo(gem);
939876394fSJani Nikula 	/* Require vram placement or dma-buf import */
949876394fSJani Nikula 	if (IS_DGFX(xe) &&
959876394fSJani Nikula 	    !xe_bo_can_migrate(bo, XE_PL_VRAM0) &&
969876394fSJani Nikula 	    bo->ttm.type != ttm_bo_type_sg) {
979876394fSJani Nikula 		drm_gem_object_put(gem);
989876394fSJani Nikula 		return ERR_PTR(-EREMOTE);
999876394fSJani Nikula 	}
1009876394fSJani Nikula 
1019876394fSJani Nikula 	return gem;
1029876394fSJani Nikula }
1039876394fSJani Nikula 
10437a6ed2cSJani Nikula const struct intel_display_bo_interface xe_display_bo_interface = {
10537a6ed2cSJani Nikula 	.is_protected = xe_display_bo_is_protected,
10637a6ed2cSJani Nikula 	.key_check = xe_pxp_obj_key_check,
10737a6ed2cSJani Nikula 	.fb_mmap = drm_gem_prime_mmap,
10837a6ed2cSJani Nikula 	.read_from_page = xe_display_bo_read_from_page,
1099876394fSJani Nikula 	.framebuffer_init = xe_display_bo_framebuffer_init,
1109876394fSJani Nikula 	.framebuffer_fini = xe_display_bo_framebuffer_fini,
1119876394fSJani Nikula 	.framebuffer_lookup = xe_display_bo_framebuffer_lookup,
11237a6ed2cSJani Nikula };
113