xref: /linux/drivers/gpu/drm/xe/display/xe_panic.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: MIT
2 /* Copyright © 2025 Intel Corporation */
3 
4 #include <drm/drm_cache.h>
5 #include <drm/drm_panic.h>
6 
7 #include "intel_display_types.h"
8 #include "intel_fb.h"
9 #include "intel_panic.h"
10 #include "xe_bo.h"
11 #include "xe_res_cursor.h"
12 
13 struct intel_panic {
14 	struct xe_res_cursor res;
15 	struct iosys_map vmap;
16 
17 	int page;
18 };
19 
20 static void xe_panic_kunmap(struct intel_panic *panic)
21 {
22 	if (!panic->vmap.is_iomem && iosys_map_is_set(&panic->vmap)) {
23 		drm_clflush_virt_range(panic->vmap.vaddr, PAGE_SIZE);
24 		kunmap_local(panic->vmap.vaddr);
25 	}
26 	iosys_map_clear(&panic->vmap);
27 	panic->page = -1;
28 }
29 
30 /*
31  * The scanout buffer pages are not mapped, so for each pixel,
32  * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
33  * Try to keep the map from the previous pixel, to avoid too much map/unmap.
34  */
35 static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
36 				    unsigned int y, u32 color)
37 {
38 	struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
39 	struct intel_panic *panic = fb->panic;
40 	struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
41 	unsigned int new_page;
42 	unsigned int offset;
43 
44 	if (fb->panic_tiling)
45 		offset = fb->panic_tiling(sb->width, x, y);
46 	else
47 		offset = y * sb->pitch[0] + x * sb->format->cpp[0];
48 
49 	new_page = offset >> PAGE_SHIFT;
50 	offset = offset % PAGE_SIZE;
51 	if (new_page != panic->page) {
52 		if (xe_bo_is_vram(bo)) {
53 			/* Display is always mapped on root tile */
54 			struct xe_vram_region *vram = xe_bo_device(bo)->mem.vram;
55 
56 			if (panic->page < 0 || new_page < panic->page) {
57 				xe_res_first(bo->ttm.resource, new_page * PAGE_SIZE,
58 					     bo->ttm.base.size - new_page * PAGE_SIZE, &panic->res);
59 			} else {
60 				xe_res_next(&panic->res, PAGE_SIZE * (new_page - panic->page));
61 			}
62 			iosys_map_set_vaddr_iomem(&panic->vmap,
63 						  vram->mapping + panic->res.start);
64 		} else {
65 			xe_panic_kunmap(panic);
66 			iosys_map_set_vaddr(&panic->vmap,
67 					    ttm_bo_kmap_try_from_panic(&bo->ttm,
68 								       new_page));
69 		}
70 		panic->page = new_page;
71 	}
72 
73 	if (iosys_map_is_set(&panic->vmap))
74 		iosys_map_wr(&panic->vmap, offset, u32, color);
75 }
76 
77 struct intel_panic *intel_panic_alloc(void)
78 {
79 	struct intel_panic *panic;
80 
81 	panic = kzalloc(sizeof(*panic), GFP_KERNEL);
82 
83 	return panic;
84 }
85 
86 int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb)
87 {
88 	struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
89 	struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
90 
91 	if (xe_bo_is_vram(bo) && !xe_bo_is_visible_vram(bo))
92 		return -ENODEV;
93 
94 	panic->page = -1;
95 	sb->set_pixel = xe_panic_page_set_pixel;
96 	return 0;
97 }
98 
99 void intel_panic_finish(struct intel_panic *panic)
100 {
101 	xe_panic_kunmap(panic);
102 }
103