xref: /linux/drivers/gpu/drm/xe/display/xe_panic.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: MIT
2 /* Copyright © 2025 Intel Corporation */
3 
4 #include <drm/drm_cache.h>
5 #include <drm/drm_panic.h>
6 #include <drm/intel/display_parent_interface.h>
7 
8 #include "intel_display_types.h"
9 #include "intel_fb.h"
10 #include "xe_bo.h"
11 #include "xe_panic.h"
12 #include "xe_res_cursor.h"
13 
14 struct intel_panic {
15 	struct xe_res_cursor res;
16 	struct iosys_map vmap;
17 
18 	int page;
19 };
20 
21 static void xe_panic_kunmap(struct intel_panic *panic)
22 {
23 	if (!panic->vmap.is_iomem && iosys_map_is_set(&panic->vmap)) {
24 		drm_clflush_virt_range(panic->vmap.vaddr, PAGE_SIZE);
25 		kunmap_local(panic->vmap.vaddr);
26 	}
27 	iosys_map_clear(&panic->vmap);
28 	panic->page = -1;
29 }
30 
31 /*
32  * The scanout buffer pages are not mapped, so for each pixel,
33  * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
34  * Try to keep the map from the previous pixel, to avoid too much map/unmap.
35  */
36 static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
37 				    unsigned int y, u32 color)
38 {
39 	struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
40 	struct intel_panic *panic = fb->panic;
41 	struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
42 	unsigned int new_page;
43 	unsigned int offset;
44 
45 	if (fb->panic_tiling)
46 		offset = fb->panic_tiling(sb->width, x, y);
47 	else
48 		offset = y * sb->pitch[0] + x * sb->format->cpp[0];
49 
50 	new_page = offset >> PAGE_SHIFT;
51 	offset = offset % PAGE_SIZE;
52 	if (new_page != panic->page) {
53 		if (xe_bo_is_vram(bo)) {
54 			/* Display is always mapped on root tile */
55 			struct xe_vram_region *vram = xe_bo_device(bo)->mem.vram;
56 
57 			if (panic->page < 0 || new_page < panic->page) {
58 				xe_res_first(bo->ttm.resource, new_page * PAGE_SIZE,
59 					     bo->ttm.base.size - new_page * PAGE_SIZE, &panic->res);
60 			} else {
61 				xe_res_next(&panic->res, PAGE_SIZE * (new_page - panic->page));
62 			}
63 			iosys_map_set_vaddr_iomem(&panic->vmap,
64 						  vram->mapping + panic->res.start);
65 		} else {
66 			xe_panic_kunmap(panic);
67 			iosys_map_set_vaddr(&panic->vmap,
68 					    ttm_bo_kmap_try_from_panic(&bo->ttm,
69 								       new_page));
70 		}
71 		panic->page = new_page;
72 	}
73 
74 	if (iosys_map_is_set(&panic->vmap))
75 		iosys_map_wr(&panic->vmap, offset, u32, color);
76 }
77 
78 static struct intel_panic *xe_panic_alloc(void)
79 {
80 	struct intel_panic *panic;
81 
82 	panic = kzalloc_obj(*panic, GFP_KERNEL);
83 
84 	return panic;
85 }
86 
87 static int xe_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb)
88 {
89 	struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
90 	struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
91 
92 	if (xe_bo_is_vram(bo) && !xe_bo_is_visible_vram(bo))
93 		return -ENODEV;
94 
95 	panic->page = -1;
96 	sb->set_pixel = xe_panic_page_set_pixel;
97 	return 0;
98 }
99 
100 const struct intel_display_panic_interface xe_display_panic_interface = {
101 	.alloc = xe_panic_alloc,
102 	.setup = xe_panic_setup,
103 	.finish = xe_panic_kunmap,
104 };
105