xref: /linux/drivers/gpu/drm/xe/display/xe_fb_pin.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include <drm/ttm/ttm_bo.h>
7 
8 #include "intel_display_core.h"
9 #include "intel_display_types.h"
10 #include "intel_fb.h"
11 #include "intel_fb_pin.h"
12 #include "intel_fbdev.h"
13 #include "xe_bo.h"
14 #include "xe_device.h"
15 #include "xe_display_vma.h"
16 #include "xe_ggtt.h"
17 #include "xe_pm.h"
18 #include "xe_vram_types.h"
19 
20 static void
21 write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ofs,
22 		  u32 width, u32 height, u32 src_stride, u32 dst_stride)
23 {
24 	struct xe_device *xe = xe_bo_device(bo);
25 	struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
26 	u32 column, row;
27 	u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]);
28 
29 	/* TODO: Maybe rewrite so we can traverse the bo addresses sequentially,
30 	 * by writing dpt/ggtt in a different order?
31 	 */
32 
33 	for (column = 0; column < width; column++) {
34 		u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
35 
36 		for (row = 0; row < height; row++) {
37 			u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE);
38 
39 			iosys_map_wr(map, *dpt_ofs, u64, pte | addr);
40 			*dpt_ofs += 8;
41 			src_idx -= src_stride;
42 		}
43 
44 		/* The DE ignores the PTEs for the padding tiles */
45 		*dpt_ofs += (dst_stride - height) * 8;
46 	}
47 
48 	/* Align to next page */
49 	*dpt_ofs = ALIGN(*dpt_ofs, 4096);
50 }
51 
52 static unsigned int
53 write_dpt_padding(struct iosys_map *map, unsigned int dest, unsigned int pad)
54 {
55 	/* The DE ignores the PTEs for the padding tiles */
56 	return dest + pad * sizeof(u64);
57 }
58 
59 static unsigned int
60 write_dpt_remapped_linear(struct xe_bo *bo, struct iosys_map *map,
61 			  unsigned int dest,
62 			  const struct intel_remapped_plane_info *plane)
63 {
64 	struct xe_device *xe = xe_bo_device(bo);
65 	struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
66 	const u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo,
67 						 xe->pat.idx[XE_CACHE_NONE]);
68 	unsigned int offset = plane->offset * XE_PAGE_SIZE;
69 	unsigned int size = plane->size;
70 
71 	while (size--) {
72 		u64 addr = xe_bo_addr(bo, offset, XE_PAGE_SIZE);
73 
74 		iosys_map_wr(map, dest, u64, addr | pte);
75 		dest += sizeof(u64);
76 		offset += XE_PAGE_SIZE;
77 	}
78 
79 	return dest;
80 }
81 
82 static unsigned int
83 write_dpt_remapped_tiled(struct xe_bo *bo, struct iosys_map *map,
84 			 unsigned int dest,
85 			 const struct intel_remapped_plane_info *plane)
86 {
87 	struct xe_device *xe = xe_bo_device(bo);
88 	struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
89 	const u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo,
90 						 xe->pat.idx[XE_CACHE_NONE]);
91 	unsigned int offset, column, row;
92 
93 	for (row = 0; row < plane->height; row++) {
94 		offset = (plane->offset + plane->src_stride * row) *
95 			 XE_PAGE_SIZE;
96 
97 		for (column = 0; column < plane->width; column++) {
98 			u64 addr = xe_bo_addr(bo, offset, XE_PAGE_SIZE);
99 
100 			iosys_map_wr(map, dest, u64, addr | pte);
101 			dest += sizeof(u64);
102 			offset += XE_PAGE_SIZE;
103 		}
104 
105 		dest = write_dpt_padding(map, dest,
106 					 plane->dst_stride - plane->width);
107 	}
108 
109 	return dest;
110 }
111 
112 static void
113 write_dpt_remapped(struct xe_bo *bo,
114 		   const struct intel_remapped_info *remap_info,
115 		   struct iosys_map *map)
116 {
117 	unsigned int i, dest = 0;
118 
119 	for (i = 0; i < ARRAY_SIZE(remap_info->plane); i++) {
120 		const struct intel_remapped_plane_info *plane =
121 				&remap_info->plane[i];
122 
123 		if (!plane->linear && !plane->width && !plane->height)
124 			continue;
125 
126 		if (dest && remap_info->plane_alignment) {
127 			const unsigned int index = dest / sizeof(u64);
128 			const unsigned int pad =
129 				ALIGN(index, remap_info->plane_alignment) -
130 				index;
131 
132 			dest = write_dpt_padding(map, dest, pad);
133 		}
134 
135 		if (plane->linear)
136 			dest = write_dpt_remapped_linear(bo, map, dest, plane);
137 		else
138 			dest = write_dpt_remapped_tiled(bo, map, dest, plane);
139 	}
140 }
141 
142 static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
143 			       const struct i915_gtt_view *view,
144 			       struct i915_vma *vma,
145 			       unsigned int alignment)
146 {
147 	struct xe_device *xe = to_xe_device(fb->base.dev);
148 	struct xe_tile *tile0 = xe_device_get_root_tile(xe);
149 	struct xe_ggtt *ggtt = tile0->mem.ggtt;
150 	struct drm_gem_object *obj = intel_fb_bo(&fb->base);
151 	struct xe_bo *bo = gem_to_xe_bo(obj), *dpt;
152 	u32 dpt_size, size = bo->ttm.base.size;
153 
154 	if (view->type == I915_GTT_VIEW_NORMAL)
155 		dpt_size = ALIGN(size / XE_PAGE_SIZE * 8, XE_PAGE_SIZE);
156 	else if (view->type == I915_GTT_VIEW_REMAPPED)
157 		dpt_size = ALIGN(intel_remapped_info_size(&fb->remapped_view.gtt.remapped) * 8,
158 				 XE_PAGE_SIZE);
159 	else
160 		/* display uses 4K tiles instead of bytes here, convert to entries.. */
161 		dpt_size = ALIGN(intel_rotation_info_size(&view->rotated) * 8,
162 				 XE_PAGE_SIZE);
163 
164 	if (IS_DGFX(xe))
165 		dpt = xe_bo_create_pin_map_at_novm(xe, tile0,
166 						   dpt_size, ~0ull,
167 						   ttm_bo_type_kernel,
168 						   XE_BO_FLAG_VRAM0 |
169 						   XE_BO_FLAG_GGTT |
170 						   XE_BO_FLAG_PAGETABLE,
171 						   alignment, false);
172 	else
173 		dpt = xe_bo_create_pin_map_at_novm(xe, tile0,
174 						   dpt_size,  ~0ull,
175 						   ttm_bo_type_kernel,
176 						   XE_BO_FLAG_STOLEN |
177 						   XE_BO_FLAG_GGTT |
178 						   XE_BO_FLAG_PAGETABLE,
179 						   alignment, false);
180 	if (IS_ERR(dpt))
181 		dpt = xe_bo_create_pin_map_at_novm(xe, tile0,
182 						   dpt_size,  ~0ull,
183 						   ttm_bo_type_kernel,
184 						   XE_BO_FLAG_SYSTEM |
185 						   XE_BO_FLAG_GGTT |
186 						   XE_BO_FLAG_PAGETABLE |
187 						   XE_BO_FLAG_FORCE_WC,
188 						   alignment, false);
189 	if (IS_ERR(dpt))
190 		return PTR_ERR(dpt);
191 
192 	if (view->type == I915_GTT_VIEW_NORMAL) {
193 		u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]);
194 		u32 x;
195 
196 		for (x = 0; x < size / XE_PAGE_SIZE; x++) {
197 			u64 addr = xe_bo_addr(bo, x * XE_PAGE_SIZE, XE_PAGE_SIZE);
198 
199 			iosys_map_wr(&dpt->vmap, x * 8, u64, pte | addr);
200 		}
201 	} else if (view->type == I915_GTT_VIEW_REMAPPED) {
202 		write_dpt_remapped(bo, &view->remapped, &dpt->vmap);
203 	} else {
204 		const struct intel_rotation_info *rot_info = &view->rotated;
205 		u32 i, dpt_ofs = 0;
206 
207 		for (i = 0; i < ARRAY_SIZE(rot_info->plane); i++)
208 			write_dpt_rotated(bo, &dpt->vmap, &dpt_ofs,
209 					  rot_info->plane[i].offset,
210 					  rot_info->plane[i].width,
211 					  rot_info->plane[i].height,
212 					  rot_info->plane[i].src_stride,
213 					  rot_info->plane[i].dst_stride);
214 	}
215 
216 	vma->dpt = dpt;
217 	vma->node = dpt->ggtt_node[tile0->id];
218 
219 	/* Ensure DPT writes are flushed */
220 	xe_device_l2_flush(xe);
221 	return 0;
222 }
223 
224 static void
225 write_ggtt_rotated(struct xe_ggtt *ggtt, u32 *ggtt_ofs,
226 		   u64 pte_flags,
227 		   xe_ggtt_set_pte_fn write_pte,
228 		   struct xe_bo *bo, u32 bo_ofs,
229 		   u32 width, u32 height, u32 src_stride, u32 dst_stride)
230 {
231 	u32 column, row;
232 
233 	for (column = 0; column < width; column++) {
234 		u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
235 
236 		for (row = 0; row < height; row++) {
237 			u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE);
238 
239 			write_pte(ggtt, *ggtt_ofs, pte_flags | addr);
240 			*ggtt_ofs += XE_PAGE_SIZE;
241 			src_idx -= src_stride;
242 		}
243 
244 		/* The DE ignores the PTEs for the padding tiles */
245 		*ggtt_ofs += (dst_stride - height) * XE_PAGE_SIZE;
246 	}
247 }
248 
249 struct fb_rotate_args {
250 	const struct i915_gtt_view *view;
251 	struct xe_bo *bo;
252 };
253 
254 static void write_ggtt_rotated_node(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
255 				    u64 pte_flags, xe_ggtt_set_pte_fn write_pte, void *data)
256 {
257 	struct fb_rotate_args *args = data;
258 	struct xe_bo *bo = args->bo;
259 	const struct intel_rotation_info *rot_info = &args->view->rotated;
260 	u32 ggtt_ofs = xe_ggtt_node_addr(node);
261 
262 	for (u32 i = 0; i < ARRAY_SIZE(rot_info->plane); i++)
263 		write_ggtt_rotated(ggtt, &ggtt_ofs, pte_flags, write_pte,
264 				   bo, rot_info->plane[i].offset,
265 				   rot_info->plane[i].width,
266 				   rot_info->plane[i].height,
267 				   rot_info->plane[i].src_stride,
268 				   rot_info->plane[i].dst_stride);
269 }
270 
271 static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
272 				const struct i915_gtt_view *view,
273 				struct i915_vma *vma,
274 				unsigned int alignment)
275 {
276 	struct drm_gem_object *obj = intel_fb_bo(&fb->base);
277 	struct xe_bo *bo = gem_to_xe_bo(obj);
278 	struct xe_device *xe = to_xe_device(fb->base.dev);
279 	struct xe_tile *tile0 = xe_device_get_root_tile(xe);
280 	struct xe_ggtt *ggtt = tile0->mem.ggtt;
281 	u64 pte, size;
282 	u32 align;
283 	int ret = 0;
284 
285 	/* TODO: Consider sharing framebuffer mapping?
286 	 * embed i915_vma inside intel_framebuffer
287 	 */
288 	guard(xe_pm_runtime_noresume)(xe);
289 
290 	align = XE_PAGE_SIZE;
291 	if (xe_bo_is_vram(bo) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
292 		align = max(align, SZ_64K);
293 
294 	/* Fast case, preallocated GGTT view? */
295 	if (bo->ggtt_node[tile0->id] && view->type == I915_GTT_VIEW_NORMAL) {
296 		vma->node = bo->ggtt_node[tile0->id];
297 		return 0;
298 	}
299 
300 	/* TODO: Consider sharing framebuffer mapping?
301 	 * embed i915_vma inside intel_framebuffer
302 	 */
303 	if (view->type == I915_GTT_VIEW_NORMAL)
304 		size = xe_bo_size(bo);
305 	else
306 		/* display uses tiles instead of bytes here, so convert it back.. */
307 		size = intel_rotation_info_size(&view->rotated) * XE_PAGE_SIZE;
308 
309 	pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]);
310 	vma->node = xe_ggtt_insert_node_transform(ggtt, bo, pte,
311 						  ALIGN(size, align), align,
312 						  view->type == I915_GTT_VIEW_NORMAL ?
313 						  NULL : write_ggtt_rotated_node,
314 						  &(struct fb_rotate_args){view, bo});
315 	if (IS_ERR(vma->node))
316 		ret = PTR_ERR(vma->node);
317 
318 	return ret;
319 }
320 
321 static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
322 					const struct i915_gtt_view *view,
323 					unsigned int alignment)
324 {
325 	struct drm_device *dev = fb->base.dev;
326 	struct xe_device *xe = to_xe_device(dev);
327 	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
328 	struct drm_gem_object *obj = intel_fb_bo(&fb->base);
329 	struct xe_bo *bo = gem_to_xe_bo(obj);
330 	struct xe_validation_ctx ctx;
331 	struct drm_exec exec;
332 	int ret = 0;
333 
334 	if (!vma)
335 		return ERR_PTR(-ENODEV);
336 
337 	refcount_set(&vma->ref, 1);
338 	if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
339 	    intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
340 	    !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) {
341 		struct xe_vram_region *vram = xe_device_get_root_tile(xe)->mem.vram;
342 
343 		/*
344 		 * If we need to able to access the clear-color value stored in
345 		 * the buffer, then we require that such buffers are also CPU
346 		 * accessible.  This is important on small-bar systems where
347 		 * only some subset of VRAM is CPU accessible.
348 		 */
349 		if (xe_vram_region_io_size(vram) < xe_vram_region_usable_size(vram)) {
350 			ret = -EINVAL;
351 			goto err;
352 		}
353 	}
354 
355 	/*
356 	 * Pin the framebuffer, we can't use xe_bo_(un)pin functions as the
357 	 * assumptions are incorrect for framebuffers
358 	 */
359 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
360 			    ret) {
361 		ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
362 		drm_exec_retry_on_contention(&exec);
363 		if (ret)
364 			break;
365 
366 		if (IS_DGFX(xe))
367 			ret = xe_bo_migrate(bo, XE_PL_VRAM0, NULL, &exec);
368 		else
369 			ret = xe_bo_validate(bo, NULL, true, &exec);
370 		drm_exec_retry_on_contention(&exec);
371 		xe_validation_retry_on_oom(&ctx, &ret);
372 		if (!ret)
373 			ttm_bo_pin(&bo->ttm);
374 	}
375 	if (ret)
376 		goto err;
377 
378 	vma->bo = bo;
379 	if (intel_fb_uses_dpt(&fb->base))
380 		ret = __xe_pin_fb_vma_dpt(fb, view, vma, alignment);
381 	else
382 		ret = __xe_pin_fb_vma_ggtt(fb, view, vma,  alignment);
383 	if (ret)
384 		goto err_unpin;
385 
386 	return vma;
387 
388 err_unpin:
389 	ttm_bo_reserve(&bo->ttm, false, false, NULL);
390 	ttm_bo_unpin(&bo->ttm);
391 	ttm_bo_unreserve(&bo->ttm);
392 err:
393 	kfree(vma);
394 	return ERR_PTR(ret);
395 }
396 
397 static void __xe_unpin_fb_vma(struct i915_vma *vma)
398 {
399 	u8 tile_id = xe_device_get_root_tile(xe_bo_device(vma->bo))->id;
400 
401 	if (!refcount_dec_and_test(&vma->ref))
402 		return;
403 
404 	if (vma->dpt)
405 		xe_bo_unpin_map_no_vm(vma->dpt);
406 	else if (vma->bo->ggtt_node[tile_id] != vma->node)
407 		xe_ggtt_node_remove(vma->node, false);
408 
409 	ttm_bo_reserve(&vma->bo->ttm, false, false, NULL);
410 	ttm_bo_unpin(&vma->bo->ttm);
411 	ttm_bo_unreserve(&vma->bo->ttm);
412 	kfree(vma);
413 }
414 
415 struct i915_vma *
416 intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
417 		     const struct i915_gtt_view *view,
418 		     unsigned int alignment,
419 		     unsigned int phys_alignment,
420 		     unsigned int vtd_guard,
421 		     bool uses_fence,
422 		     unsigned long *out_flags)
423 {
424 	*out_flags = 0;
425 
426 	return __xe_pin_fb_vma(to_intel_framebuffer(fb), view, alignment);
427 }
428 
429 void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags)
430 {
431 	__xe_unpin_fb_vma(vma);
432 }
433 
434 static bool reuse_vma(struct intel_plane_state *new_plane_state,
435 		      const struct intel_plane_state *old_plane_state)
436 {
437 	struct intel_framebuffer *fb = to_intel_framebuffer(new_plane_state->hw.fb);
438 	struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
439 	struct xe_device *xe = to_xe_device(fb->base.dev);
440 	struct intel_display *display = xe->display;
441 	struct i915_vma *vma;
442 
443 	if (old_plane_state->hw.fb == new_plane_state->hw.fb &&
444 	    !memcmp(&old_plane_state->view.gtt,
445 		    &new_plane_state->view.gtt,
446 		    sizeof(new_plane_state->view.gtt))) {
447 		vma = old_plane_state->ggtt_vma;
448 		goto found;
449 	}
450 
451 	if (fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) {
452 		vma = intel_fbdev_vma_pointer(display->fbdev.fbdev);
453 		if (vma)
454 			goto found;
455 	}
456 
457 	return false;
458 
459 found:
460 	refcount_inc(&vma->ref);
461 	new_plane_state->ggtt_vma = vma;
462 
463 	new_plane_state->surf = xe_ggtt_node_addr(new_plane_state->ggtt_vma->node) +
464 		plane->surf_offset(new_plane_state);
465 
466 	return true;
467 }
468 
469 int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
470 		       const struct intel_plane_state *old_plane_state)
471 {
472 	struct drm_framebuffer *fb = new_plane_state->hw.fb;
473 	struct drm_gem_object *obj = intel_fb_bo(fb);
474 	struct xe_bo *bo = gem_to_xe_bo(obj);
475 	struct i915_vma *vma;
476 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
477 	struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
478 	unsigned int alignment = plane->min_alignment(plane, fb, 0);
479 
480 	if (reuse_vma(new_plane_state, old_plane_state))
481 		return 0;
482 
483 	/* We reject creating !SCANOUT fb's, so this is weird.. */
484 	drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_FORCE_WC));
485 
486 	vma = __xe_pin_fb_vma(intel_fb, &new_plane_state->view.gtt, alignment);
487 
488 	if (IS_ERR(vma))
489 		return PTR_ERR(vma);
490 
491 	new_plane_state->ggtt_vma = vma;
492 
493 	new_plane_state->surf = xe_ggtt_node_addr(new_plane_state->ggtt_vma->node) +
494 		plane->surf_offset(new_plane_state);
495 
496 	return 0;
497 }
498 
499 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
500 {
501 	__xe_unpin_fb_vma(old_plane_state->ggtt_vma);
502 	old_plane_state->ggtt_vma = NULL;
503 }
504 
505 void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map)
506 {
507 	*map = vma->bo->vmap;
508 }
509