xref: /linux/drivers/gpu/drm/xe/xe_ggtt.c (revision 8934827db5403eae57d4537114a9ff88b0a8460f)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_ggtt.h"
7 
8 #include <kunit/visibility.h>
9 #include <linux/fault-inject.h>
10 #include <linux/io-64-nonatomic-lo-hi.h>
11 #include <linux/sizes.h>
12 
13 #include <drm/drm_drv.h>
14 #include <drm/drm_managed.h>
15 #include <drm/intel/i915_drm.h>
16 #include <generated/xe_wa_oob.h>
17 
18 #include "regs/xe_gt_regs.h"
19 #include "regs/xe_gtt_defs.h"
20 #include "regs/xe_regs.h"
21 #include "xe_assert.h"
22 #include "xe_bo.h"
23 #include "xe_gt_printk.h"
24 #include "xe_gt_types.h"
25 #include "xe_map.h"
26 #include "xe_mmio.h"
27 #include "xe_pm.h"
28 #include "xe_res_cursor.h"
29 #include "xe_sriov.h"
30 #include "xe_tile_printk.h"
31 #include "xe_tile_sriov_vf.h"
32 #include "xe_tlb_inval.h"
33 #include "xe_wa.h"
34 #include "xe_wopcm.h"
35 
36 /**
37  * DOC: Global Graphics Translation Table (GGTT)
38  *
39  * Xe GGTT implements the support for a Global Virtual Address space that is used
40  * for resources that are accessible to privileged (i.e. kernel-mode) processes,
41  * and not tied to a specific user-level process. For example, the Graphics
42  * micro-Controller (GuC) and Display Engine (if present) utilize this Global
43  * address space.
44  *
45  * The Global GTT (GGTT) translates from the Global virtual address to a physical
46  * address that can be accessed by HW. The GGTT is a flat, single-level table.
47  *
48  * Xe implements a simplified version of the GGTT specifically managing only a
49  * certain range of it that goes from the Write Once Protected Content Memory (WOPCM)
50  * Layout to a predefined GUC_GGTT_TOP. This approach avoids complications related to
51  * the GuC (Graphics Microcontroller) hardware limitations. The GuC address space
52  * is limited on both ends of the GGTT, because the GuC shim HW redirects
53  * accesses to those addresses to other HW areas instead of going through the
54  * GGTT. On the bottom end, the GuC can't access offsets below the WOPCM size,
55  * while on the top side the limit is fixed at GUC_GGTT_TOP. To keep things
56  * simple, instead of checking each object to see if they are accessed by GuC or
57  * not, we just exclude those areas from the allocator. Additionally, to simplify
58  * the driver load, we use the maximum WOPCM size in this logic instead of the
59  * programmed one, so we don't need to wait until the actual size to be
60  * programmed is determined (which requires FW fetch) before initializing the
61  * GGTT. These simplifications might waste space in the GGTT (about 20-25 MBs
62  * depending on the platform) but we can live with this. Another benefit of this
63  * is the GuC bootrom can't access anything below the WOPCM max size so anything
64  * the bootrom needs to access (e.g. a RSA key) needs to be placed in the GGTT
65  * above the WOPCM max size. Starting the GGTT allocations above the WOPCM max
66  * give us the correct placement for free.
67  */
68 
69 /**
70  * struct xe_ggtt_node - A node in GGTT.
71  *
72  * This struct needs to be initialized (only-once) with xe_ggtt_node_init() before any node
73  * insertion, reservation, or 'ballooning'.
74  * It will, then, be finalized by either xe_ggtt_node_remove() or xe_ggtt_node_deballoon().
75  */
76 struct xe_ggtt_node {
77 	/** @ggtt: Back pointer to xe_ggtt where this region will be inserted at */
78 	struct xe_ggtt *ggtt;
79 	/** @base: A drm_mm_node */
80 	struct drm_mm_node base;
81 	/** @delayed_removal_work: The work struct for the delayed removal */
82 	struct work_struct delayed_removal_work;
83 	/** @invalidate_on_remove: If it needs invalidation upon removal */
84 	bool invalidate_on_remove;
85 };
86 
xelp_ggtt_pte_flags(struct xe_bo * bo,u16 pat_index)87 static u64 xelp_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
88 {
89 	u64 pte = XE_PAGE_PRESENT;
90 
91 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
92 		pte |= XE_GGTT_PTE_DM;
93 
94 	return pte;
95 }
96 
xelpg_ggtt_pte_flags(struct xe_bo * bo,u16 pat_index)97 static u64 xelpg_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
98 {
99 	struct xe_device *xe = xe_bo_device(bo);
100 	u64 pte;
101 
102 	pte = xelp_ggtt_pte_flags(bo, pat_index);
103 
104 	xe_assert(xe, pat_index <= 3);
105 
106 	if (pat_index & BIT(0))
107 		pte |= XELPG_GGTT_PTE_PAT0;
108 
109 	if (pat_index & BIT(1))
110 		pte |= XELPG_GGTT_PTE_PAT1;
111 
112 	return pte;
113 }
114 
probe_gsm_size(struct pci_dev * pdev)115 static unsigned int probe_gsm_size(struct pci_dev *pdev)
116 {
117 	u16 gmch_ctl, ggms;
118 
119 	pci_read_config_word(pdev, SNB_GMCH_CTRL, &gmch_ctl);
120 	ggms = (gmch_ctl >> BDW_GMCH_GGMS_SHIFT) & BDW_GMCH_GGMS_MASK;
121 	return ggms ? SZ_1M << ggms : 0;
122 }
123 
ggtt_update_access_counter(struct xe_ggtt * ggtt)124 static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
125 {
126 	struct xe_tile *tile = ggtt->tile;
127 	struct xe_gt *affected_gt;
128 	u32 max_gtt_writes;
129 
130 	if (tile->primary_gt && XE_GT_WA(tile->primary_gt, 22019338487)) {
131 		affected_gt = tile->primary_gt;
132 		max_gtt_writes = 1100;
133 
134 		/* Only expected to apply to primary GT on dgpu platforms */
135 		xe_tile_assert(tile, IS_DGFX(tile_to_xe(tile)));
136 	} else {
137 		affected_gt = tile->media_gt;
138 		max_gtt_writes = 63;
139 
140 		/* Only expected to apply to media GT on igpu platforms */
141 		xe_tile_assert(tile, !IS_DGFX(tile_to_xe(tile)));
142 	}
143 
144 	/*
145 	 * Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit
146 	 * to wait for completion of prior GTT writes before letting this through.
147 	 * This needs to be done for all GGTT writes originating from the CPU.
148 	 */
149 	lockdep_assert_held(&ggtt->lock);
150 
151 	if ((++ggtt->access_count % max_gtt_writes) == 0) {
152 		xe_mmio_write32(&affected_gt->mmio, GMD_ID, 0x0);
153 		ggtt->access_count = 0;
154 	}
155 }
156 
157 /**
158  * xe_ggtt_start - Get starting offset of GGTT.
159  * @ggtt: &xe_ggtt
160  *
161  * Returns: Starting offset for this &xe_ggtt.
162  */
xe_ggtt_start(struct xe_ggtt * ggtt)163 u64 xe_ggtt_start(struct xe_ggtt *ggtt)
164 {
165 	return ggtt->start;
166 }
167 
168 /**
169  * xe_ggtt_size - Get size of GGTT.
170  * @ggtt: &xe_ggtt
171  *
172  * Returns: Total usable size of this &xe_ggtt.
173  */
xe_ggtt_size(struct xe_ggtt * ggtt)174 u64 xe_ggtt_size(struct xe_ggtt *ggtt)
175 {
176 	return ggtt->size;
177 }
178 
xe_ggtt_set_pte(struct xe_ggtt * ggtt,u64 addr,u64 pte)179 static void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte)
180 {
181 	xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK));
182 	xe_tile_assert(ggtt->tile, addr < ggtt->start + ggtt->size);
183 
184 	writeq(pte, &ggtt->gsm[addr >> XE_PTE_SHIFT]);
185 }
186 
xe_ggtt_set_pte_and_flush(struct xe_ggtt * ggtt,u64 addr,u64 pte)187 static void xe_ggtt_set_pte_and_flush(struct xe_ggtt *ggtt, u64 addr, u64 pte)
188 {
189 	xe_ggtt_set_pte(ggtt, addr, pte);
190 	ggtt_update_access_counter(ggtt);
191 }
192 
xe_ggtt_get_pte(struct xe_ggtt * ggtt,u64 addr)193 static u64 xe_ggtt_get_pte(struct xe_ggtt *ggtt, u64 addr)
194 {
195 	xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK));
196 	xe_tile_assert(ggtt->tile, addr < ggtt->size);
197 
198 	return readq(&ggtt->gsm[addr >> XE_PTE_SHIFT]);
199 }
200 
xe_ggtt_clear(struct xe_ggtt * ggtt,u64 start,u64 size)201 static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
202 {
203 	u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
204 	u64 end = start + size - 1;
205 	u64 scratch_pte;
206 
207 	xe_tile_assert(ggtt->tile, start < end);
208 
209 	if (ggtt->scratch)
210 		scratch_pte = xe_bo_addr(ggtt->scratch, 0, XE_PAGE_SIZE) |
211 			      ggtt->pt_ops->pte_encode_flags(ggtt->scratch,
212 							     pat_index);
213 	else
214 		scratch_pte = 0;
215 
216 	while (start < end) {
217 		ggtt->pt_ops->ggtt_set_pte(ggtt, start, scratch_pte);
218 		start += XE_PAGE_SIZE;
219 	}
220 }
221 
primelockdep(struct xe_ggtt * ggtt)222 static void primelockdep(struct xe_ggtt *ggtt)
223 {
224 	if (!IS_ENABLED(CONFIG_LOCKDEP))
225 		return;
226 
227 	fs_reclaim_acquire(GFP_KERNEL);
228 	might_lock(&ggtt->lock);
229 	fs_reclaim_release(GFP_KERNEL);
230 }
231 
232 /**
233  * xe_ggtt_alloc - Allocate a GGTT for a given &xe_tile
234  * @tile: &xe_tile
235  *
236  * Allocates a &xe_ggtt for a given tile.
237  *
238  * Return: &xe_ggtt on success, or NULL when out of memory.
239  */
xe_ggtt_alloc(struct xe_tile * tile)240 struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile)
241 {
242 	struct xe_device *xe = tile_to_xe(tile);
243 	struct xe_ggtt *ggtt;
244 
245 	ggtt = drmm_kzalloc(&xe->drm, sizeof(*ggtt), GFP_KERNEL);
246 	if (!ggtt)
247 		return NULL;
248 
249 	if (drmm_mutex_init(&xe->drm, &ggtt->lock))
250 		return NULL;
251 
252 	primelockdep(ggtt);
253 	ggtt->tile = tile;
254 
255 	return ggtt;
256 }
257 
ggtt_fini_early(struct drm_device * drm,void * arg)258 static void ggtt_fini_early(struct drm_device *drm, void *arg)
259 {
260 	struct xe_ggtt *ggtt = arg;
261 
262 	destroy_workqueue(ggtt->wq);
263 	drm_mm_takedown(&ggtt->mm);
264 }
265 
ggtt_fini(void * arg)266 static void ggtt_fini(void *arg)
267 {
268 	struct xe_ggtt *ggtt = arg;
269 
270 	ggtt->scratch = NULL;
271 }
272 
273 #ifdef CONFIG_LOCKDEP
xe_ggtt_might_lock(struct xe_ggtt * ggtt)274 void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
275 {
276 	might_lock(&ggtt->lock);
277 }
278 #endif
279 
280 static const struct xe_ggtt_pt_ops xelp_pt_ops = {
281 	.pte_encode_flags = xelp_ggtt_pte_flags,
282 	.ggtt_set_pte = xe_ggtt_set_pte,
283 	.ggtt_get_pte = xe_ggtt_get_pte,
284 };
285 
286 static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
287 	.pte_encode_flags = xelpg_ggtt_pte_flags,
288 	.ggtt_set_pte = xe_ggtt_set_pte,
289 	.ggtt_get_pte = xe_ggtt_get_pte,
290 };
291 
292 static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
293 	.pte_encode_flags = xelpg_ggtt_pte_flags,
294 	.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
295 	.ggtt_get_pte = xe_ggtt_get_pte,
296 };
297 
__xe_ggtt_init_early(struct xe_ggtt * ggtt,u64 start,u64 size)298 static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u64 start, u64 size)
299 {
300 	ggtt->start = start;
301 	ggtt->size = size;
302 	drm_mm_init(&ggtt->mm, start, size);
303 }
304 
xe_ggtt_init_kunit(struct xe_ggtt * ggtt,u32 start,u32 size)305 int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 start, u32 size)
306 {
307 	__xe_ggtt_init_early(ggtt, start, size);
308 	return 0;
309 }
310 EXPORT_SYMBOL_IF_KUNIT(xe_ggtt_init_kunit);
311 
dev_fini_ggtt(void * arg)312 static void dev_fini_ggtt(void *arg)
313 {
314 	struct xe_ggtt *ggtt = arg;
315 
316 	drain_workqueue(ggtt->wq);
317 }
318 
319 /**
320  * xe_ggtt_init_early - Early GGTT initialization
321  * @ggtt: the &xe_ggtt to be initialized
322  *
323  * It allows to create new mappings usable by the GuC.
324  * Mappings are not usable by the HW engines, as it doesn't have scratch nor
325  * initial clear done to it yet. That will happen in the regular, non-early
326  * GGTT initialization.
327  *
328  * Return: 0 on success or a negative error code on failure.
329  */
xe_ggtt_init_early(struct xe_ggtt * ggtt)330 int xe_ggtt_init_early(struct xe_ggtt *ggtt)
331 {
332 	struct xe_device *xe = tile_to_xe(ggtt->tile);
333 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
334 	unsigned int gsm_size;
335 	u64 ggtt_start, wopcm = xe_wopcm_size(xe), ggtt_size;
336 	int err;
337 
338 	if (!IS_SRIOV_VF(xe)) {
339 		if (GRAPHICS_VERx100(xe) >= 1250)
340 			gsm_size = SZ_8M; /* GGTT is expected to be 4GiB */
341 		else
342 			gsm_size = probe_gsm_size(pdev);
343 		if (gsm_size == 0) {
344 			xe_tile_err(ggtt->tile, "Hardware reported no preallocated GSM\n");
345 			return -ENOMEM;
346 		}
347 		ggtt_start = wopcm;
348 		ggtt_size = (gsm_size / 8) * (u64)XE_PAGE_SIZE - ggtt_start;
349 	} else {
350 		/* GGTT is expected to be 4GiB */
351 		ggtt_start = wopcm;
352 		ggtt_size = SZ_4G - ggtt_start;
353 	}
354 
355 	ggtt->gsm = ggtt->tile->mmio.regs + SZ_8M;
356 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
357 		ggtt->flags |= XE_GGTT_FLAGS_64K;
358 
359 	if (ggtt_size + ggtt_start > GUC_GGTT_TOP)
360 		ggtt_size = GUC_GGTT_TOP - ggtt_start;
361 
362 	if (GRAPHICS_VERx100(xe) >= 1270)
363 		ggtt->pt_ops =
364 			(ggtt->tile->media_gt && XE_GT_WA(ggtt->tile->media_gt, 22019338487)) ||
365 			(ggtt->tile->primary_gt && XE_GT_WA(ggtt->tile->primary_gt, 22019338487)) ?
366 			&xelpg_pt_wa_ops : &xelpg_pt_ops;
367 	else
368 		ggtt->pt_ops = &xelp_pt_ops;
369 
370 	ggtt->wq = alloc_workqueue("xe-ggtt-wq", WQ_MEM_RECLAIM, 0);
371 	if (!ggtt->wq)
372 		return -ENOMEM;
373 
374 	__xe_ggtt_init_early(ggtt, ggtt_start, ggtt_size);
375 
376 	err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
377 	if (err)
378 		return err;
379 
380 	err = devm_add_action_or_reset(xe->drm.dev, dev_fini_ggtt, ggtt);
381 	if (err)
382 		return err;
383 
384 	if (IS_SRIOV_VF(xe)) {
385 		err = xe_tile_sriov_vf_prepare_ggtt(ggtt->tile);
386 		if (err)
387 			return err;
388 	}
389 
390 	return 0;
391 }
392 ALLOW_ERROR_INJECTION(xe_ggtt_init_early, ERRNO); /* See xe_pci_probe() */
393 
394 static void xe_ggtt_invalidate(struct xe_ggtt *ggtt);
395 
xe_ggtt_initial_clear(struct xe_ggtt * ggtt)396 static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
397 {
398 	struct drm_mm_node *hole;
399 	u64 start, end;
400 
401 	/* Display may have allocated inside ggtt, so be careful with clearing here */
402 	mutex_lock(&ggtt->lock);
403 	drm_mm_for_each_hole(hole, &ggtt->mm, start, end)
404 		xe_ggtt_clear(ggtt, start, end - start);
405 
406 	xe_ggtt_invalidate(ggtt);
407 	mutex_unlock(&ggtt->lock);
408 }
409 
ggtt_node_remove(struct xe_ggtt_node * node)410 static void ggtt_node_remove(struct xe_ggtt_node *node)
411 {
412 	struct xe_ggtt *ggtt = node->ggtt;
413 	struct xe_device *xe = tile_to_xe(ggtt->tile);
414 	bool bound;
415 	int idx;
416 
417 	bound = drm_dev_enter(&xe->drm, &idx);
418 
419 	mutex_lock(&ggtt->lock);
420 	if (bound)
421 		xe_ggtt_clear(ggtt, node->base.start, node->base.size);
422 	drm_mm_remove_node(&node->base);
423 	node->base.size = 0;
424 	mutex_unlock(&ggtt->lock);
425 
426 	if (!bound)
427 		goto free_node;
428 
429 	if (node->invalidate_on_remove)
430 		xe_ggtt_invalidate(ggtt);
431 
432 	drm_dev_exit(idx);
433 
434 free_node:
435 	xe_ggtt_node_fini(node);
436 }
437 
ggtt_node_remove_work_func(struct work_struct * work)438 static void ggtt_node_remove_work_func(struct work_struct *work)
439 {
440 	struct xe_ggtt_node *node = container_of(work, typeof(*node),
441 						 delayed_removal_work);
442 	struct xe_device *xe = tile_to_xe(node->ggtt->tile);
443 
444 	guard(xe_pm_runtime)(xe);
445 	ggtt_node_remove(node);
446 }
447 
448 /**
449  * xe_ggtt_node_remove - Remove a &xe_ggtt_node from the GGTT
450  * @node: the &xe_ggtt_node to be removed
451  * @invalidate: if node needs invalidation upon removal
452  */
xe_ggtt_node_remove(struct xe_ggtt_node * node,bool invalidate)453 void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate)
454 {
455 	struct xe_ggtt *ggtt;
456 	struct xe_device *xe;
457 
458 	if (!node || !node->ggtt)
459 		return;
460 
461 	ggtt = node->ggtt;
462 	xe = tile_to_xe(ggtt->tile);
463 
464 	node->invalidate_on_remove = invalidate;
465 
466 	if (xe_pm_runtime_get_if_active(xe)) {
467 		ggtt_node_remove(node);
468 		xe_pm_runtime_put(xe);
469 	} else {
470 		queue_work(ggtt->wq, &node->delayed_removal_work);
471 	}
472 }
473 
474 /**
475  * xe_ggtt_init - Regular non-early GGTT initialization
476  * @ggtt: the &xe_ggtt to be initialized
477  *
478  * Return: 0 on success or a negative error code on failure.
479  */
xe_ggtt_init(struct xe_ggtt * ggtt)480 int xe_ggtt_init(struct xe_ggtt *ggtt)
481 {
482 	struct xe_device *xe = tile_to_xe(ggtt->tile);
483 	unsigned int flags;
484 	int err;
485 
486 	/*
487 	 * So we don't need to worry about 64K GGTT layout when dealing with
488 	 * scratch entries, rather keep the scratch page in system memory on
489 	 * platforms where 64K pages are needed for VRAM.
490 	 */
491 	flags = 0;
492 	if (ggtt->flags & XE_GGTT_FLAGS_64K)
493 		flags |= XE_BO_FLAG_SYSTEM;
494 	else
495 		flags |= XE_BO_FLAG_VRAM_IF_DGFX(ggtt->tile);
496 
497 	ggtt->scratch = xe_managed_bo_create_pin_map(xe, ggtt->tile, XE_PAGE_SIZE, flags);
498 	if (IS_ERR(ggtt->scratch)) {
499 		err = PTR_ERR(ggtt->scratch);
500 		goto err;
501 	}
502 
503 	xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, xe_bo_size(ggtt->scratch));
504 
505 	xe_ggtt_initial_clear(ggtt);
506 
507 	return devm_add_action_or_reset(xe->drm.dev, ggtt_fini, ggtt);
508 err:
509 	ggtt->scratch = NULL;
510 	return err;
511 }
512 
ggtt_invalidate_gt_tlb(struct xe_gt * gt)513 static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
514 {
515 	int err;
516 
517 	if (!gt)
518 		return;
519 
520 	err = xe_tlb_inval_ggtt(&gt->tlb_inval);
521 	xe_gt_WARN(gt, err, "Failed to invalidate GGTT (%pe)", ERR_PTR(err));
522 }
523 
xe_ggtt_invalidate(struct xe_ggtt * ggtt)524 static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
525 {
526 	struct xe_device *xe = tile_to_xe(ggtt->tile);
527 
528 	/*
529 	 * XXX: Barrier for GGTT pages. Unsure exactly why this required but
530 	 * without this LNL is having issues with the GuC reading scratch page
531 	 * vs. correct GGTT page. Not particularly a hot code path so blindly
532 	 * do a mmio read here which results in GuC reading correct GGTT page.
533 	 */
534 	xe_mmio_read32(xe_root_tile_mmio(xe), VF_CAP_REG);
535 
536 	/* Each GT in a tile has its own TLB to cache GGTT lookups */
537 	ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
538 	ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
539 }
540 
xe_ggtt_dump_node(struct xe_ggtt * ggtt,const struct drm_mm_node * node,const char * description)541 static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
542 			      const struct drm_mm_node *node, const char *description)
543 {
544 	char buf[10];
545 
546 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
547 		string_get_size(node->size, 1, STRING_UNITS_2, buf, sizeof(buf));
548 		xe_tile_dbg(ggtt->tile, "GGTT %#llx-%#llx (%s) %s\n",
549 			    node->start, node->start + node->size, buf, description);
550 	}
551 }
552 
553 /**
554  * xe_ggtt_node_insert_balloon_locked - prevent allocation of specified GGTT addresses
555  * @node: the &xe_ggtt_node to hold reserved GGTT node
556  * @start: the starting GGTT address of the reserved region
557  * @end: then end GGTT address of the reserved region
558  *
559  * To be used in cases where ggtt->lock is already taken.
560  * Use xe_ggtt_node_remove_balloon_locked() to release a reserved GGTT node.
561  *
562  * Return: 0 on success or a negative error code on failure.
563  */
xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node * node,u64 start,u64 end)564 int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node, u64 start, u64 end)
565 {
566 	struct xe_ggtt *ggtt = node->ggtt;
567 	int err;
568 
569 	xe_tile_assert(ggtt->tile, start < end);
570 	xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE));
571 	xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE));
572 	xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(&node->base));
573 	lockdep_assert_held(&ggtt->lock);
574 
575 	node->base.color = 0;
576 	node->base.start = start;
577 	node->base.size = end - start;
578 
579 	err = drm_mm_reserve_node(&ggtt->mm, &node->base);
580 
581 	if (xe_tile_WARN(ggtt->tile, err, "Failed to balloon GGTT %#llx-%#llx (%pe)\n",
582 			 node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
583 		return err;
584 
585 	xe_ggtt_dump_node(ggtt, &node->base, "balloon");
586 	return 0;
587 }
588 
589 /**
590  * xe_ggtt_node_remove_balloon_locked - release a reserved GGTT region
591  * @node: the &xe_ggtt_node with reserved GGTT region
592  *
593  * To be used in cases where ggtt->lock is already taken.
594  * See xe_ggtt_node_insert_balloon_locked() for details.
595  */
xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node * node)596 void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node)
597 {
598 	if (!xe_ggtt_node_allocated(node))
599 		return;
600 
601 	lockdep_assert_held(&node->ggtt->lock);
602 
603 	xe_ggtt_dump_node(node->ggtt, &node->base, "remove-balloon");
604 
605 	drm_mm_remove_node(&node->base);
606 }
607 
xe_ggtt_assert_fit(struct xe_ggtt * ggtt,u64 start,u64 size)608 static void xe_ggtt_assert_fit(struct xe_ggtt *ggtt, u64 start, u64 size)
609 {
610 	struct xe_tile *tile = ggtt->tile;
611 
612 	xe_tile_assert(tile, start >= ggtt->start);
613 	xe_tile_assert(tile, start + size <= ggtt->start + ggtt->size);
614 }
615 
616 /**
617  * xe_ggtt_shift_nodes_locked - Shift GGTT nodes to adjust for a change in usable address range.
618  * @ggtt: the &xe_ggtt struct instance
619  * @shift: change to the location of area provisioned for current VF
620  *
621  * This function moves all nodes from the GGTT VM, to a temp list. These nodes are expected
622  * to represent allocations in range formerly assigned to current VF, before the range changed.
623  * When the GGTT VM is completely clear of any nodes, they are re-added with shifted offsets.
624  *
625  * The function has no ability of failing - because it shifts existing nodes, without
626  * any additional processing. If the nodes were successfully existing at the old address,
627  * they will do the same at the new one. A fail inside this function would indicate that
628  * the list of nodes was either already damaged, or that the shift brings the address range
629  * outside of valid bounds. Both cases justify an assert rather than error code.
630  */
xe_ggtt_shift_nodes_locked(struct xe_ggtt * ggtt,s64 shift)631 void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift)
632 {
633 	struct xe_tile *tile __maybe_unused = ggtt->tile;
634 	struct drm_mm_node *node, *tmpn;
635 	LIST_HEAD(temp_list_head);
636 
637 	lockdep_assert_held(&ggtt->lock);
638 
639 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
640 		drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm)
641 			xe_ggtt_assert_fit(ggtt, node->start + shift, node->size);
642 
643 	drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm) {
644 		drm_mm_remove_node(node);
645 		list_add(&node->node_list, &temp_list_head);
646 	}
647 
648 	list_for_each_entry_safe(node, tmpn, &temp_list_head, node_list) {
649 		list_del(&node->node_list);
650 		node->start += shift;
651 		drm_mm_reserve_node(&ggtt->mm, node);
652 		xe_tile_assert(tile, drm_mm_node_allocated(node));
653 	}
654 }
655 
xe_ggtt_node_insert_locked(struct xe_ggtt_node * node,u32 size,u32 align,u32 mm_flags)656 static int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
657 				      u32 size, u32 align, u32 mm_flags)
658 {
659 	return drm_mm_insert_node_generic(&node->ggtt->mm, &node->base, size, align, 0,
660 					  mm_flags);
661 }
662 
663 /**
664  * xe_ggtt_node_insert - Insert a &xe_ggtt_node into the GGTT
665  * @node: the &xe_ggtt_node to be inserted
666  * @size: size of the node
667  * @align: alignment constrain of the node
668  *
669  * It cannot be called without first having called xe_ggtt_init() once.
670  *
671  * Return: 0 on success or a negative error code on failure.
672  */
xe_ggtt_node_insert(struct xe_ggtt_node * node,u32 size,u32 align)673 int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align)
674 {
675 	int ret;
676 
677 	if (!node || !node->ggtt)
678 		return -ENOENT;
679 
680 	mutex_lock(&node->ggtt->lock);
681 	ret = xe_ggtt_node_insert_locked(node, size, align,
682 					 DRM_MM_INSERT_HIGH);
683 	mutex_unlock(&node->ggtt->lock);
684 
685 	return ret;
686 }
687 
688 /**
689  * xe_ggtt_node_init - Initialize %xe_ggtt_node struct
690  * @ggtt: the &xe_ggtt where the new node will later be inserted/reserved.
691  *
692  * This function will allocate the struct %xe_ggtt_node and return its pointer.
693  * This struct will then be freed after the node removal upon xe_ggtt_node_remove()
694  * or xe_ggtt_node_remove_balloon_locked().
695  *
696  * Having %xe_ggtt_node struct allocated doesn't mean that the node is already
697  * allocated in GGTT. Only xe_ggtt_node_insert(), allocation through
698  * xe_ggtt_node_insert_transform(), or xe_ggtt_node_insert_balloon_locked() will ensure the node is inserted or reserved
699  * in GGTT.
700  *
701  * Return: A pointer to %xe_ggtt_node struct on success. An ERR_PTR otherwise.
702  **/
xe_ggtt_node_init(struct xe_ggtt * ggtt)703 struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt)
704 {
705 	struct xe_ggtt_node *node = kzalloc_obj(*node, GFP_NOFS);
706 
707 	if (!node)
708 		return ERR_PTR(-ENOMEM);
709 
710 	INIT_WORK(&node->delayed_removal_work, ggtt_node_remove_work_func);
711 	node->ggtt = ggtt;
712 
713 	return node;
714 }
715 
716 /**
717  * xe_ggtt_node_fini - Forcebly finalize %xe_ggtt_node struct
718  * @node: the &xe_ggtt_node to be freed
719  *
720  * If anything went wrong with either xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
721  * or xe_ggtt_node_insert_balloon_locked(); and this @node is not going to be reused, then,
722  * this function needs to be called to free the %xe_ggtt_node struct
723  **/
xe_ggtt_node_fini(struct xe_ggtt_node * node)724 void xe_ggtt_node_fini(struct xe_ggtt_node *node)
725 {
726 	kfree(node);
727 }
728 
729 /**
730  * xe_ggtt_node_allocated - Check if node is allocated in GGTT
731  * @node: the &xe_ggtt_node to be inspected
732  *
733  * Return: True if allocated, False otherwise.
734  */
xe_ggtt_node_allocated(const struct xe_ggtt_node * node)735 bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node)
736 {
737 	if (!node || !node->ggtt)
738 		return false;
739 
740 	return drm_mm_node_allocated(&node->base);
741 }
742 
743 /**
744  * xe_ggtt_node_pt_size() - Get the size of page table entries needed to map a GGTT node.
745  * @node: the &xe_ggtt_node
746  *
747  * Return: GGTT node page table entries size in bytes.
748  */
xe_ggtt_node_pt_size(const struct xe_ggtt_node * node)749 size_t xe_ggtt_node_pt_size(const struct xe_ggtt_node *node)
750 {
751 	if (!node)
752 		return 0;
753 
754 	return node->base.size / XE_PAGE_SIZE * sizeof(u64);
755 }
756 
757 /**
758  * xe_ggtt_map_bo - Map the BO into GGTT
759  * @ggtt: the &xe_ggtt where node will be mapped
760  * @node: the &xe_ggtt_node where this BO is mapped
761  * @bo: the &xe_bo to be mapped
762  * @pte: The pte flags to append.
763  */
xe_ggtt_map_bo(struct xe_ggtt * ggtt,struct xe_ggtt_node * node,struct xe_bo * bo,u64 pte)764 static void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
765 			   struct xe_bo *bo, u64 pte)
766 {
767 	u64 start, end;
768 	struct xe_res_cursor cur;
769 
770 	if (XE_WARN_ON(!node))
771 		return;
772 
773 	start = node->base.start;
774 	end = start + xe_bo_size(bo);
775 
776 	if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
777 		xe_assert(xe_bo_device(bo), bo->ttm.ttm);
778 
779 		for (xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &cur);
780 		     cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
781 			ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
782 						   pte | xe_res_dma(&cur));
783 	} else {
784 		/* Prepend GPU offset */
785 		pte |= vram_region_gpu_offset(bo->ttm.resource);
786 
787 		for (xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur);
788 		     cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
789 			ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
790 						   pte + cur.start);
791 	}
792 }
793 
794 /**
795  * xe_ggtt_map_bo_unlocked - Restore a mapping of a BO into GGTT
796  * @ggtt: the &xe_ggtt where node will be mapped
797  * @bo: the &xe_bo to be mapped
798  *
799  * This is used to restore a GGTT mapping after suspend.
800  */
xe_ggtt_map_bo_unlocked(struct xe_ggtt * ggtt,struct xe_bo * bo)801 void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo)
802 {
803 	u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
804 	u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
805 	u64 pte;
806 
807 	mutex_lock(&ggtt->lock);
808 	pte = ggtt->pt_ops->pte_encode_flags(bo, pat_index);
809 	xe_ggtt_map_bo(ggtt, bo->ggtt_node[ggtt->tile->id], bo, pte);
810 	mutex_unlock(&ggtt->lock);
811 }
812 
813 /**
814  * xe_ggtt_node_insert_transform - Insert a newly allocated &xe_ggtt_node into the GGTT
815  * @ggtt: the &xe_ggtt where the node will inserted/reserved.
816  * @bo: The bo to be transformed
817  * @pte_flags: The extra GGTT flags to add to mapping.
818  * @size: size of the node
819  * @align: required alignment for node
820  * @transform: transformation function that will populate the GGTT node, or NULL for linear mapping.
821  * @arg: Extra argument to pass to the transformation function.
822  *
823  * This function allows inserting a GGTT node with a custom transformation function.
824  * This is useful for display to allow inserting rotated framebuffers to GGTT.
825  *
826  * Return: A pointer to %xe_ggtt_node struct on success. An ERR_PTR otherwise.
827  */
xe_ggtt_node_insert_transform(struct xe_ggtt * ggtt,struct xe_bo * bo,u64 pte_flags,u64 size,u32 align,xe_ggtt_transform_cb transform,void * arg)828 struct xe_ggtt_node *xe_ggtt_node_insert_transform(struct xe_ggtt *ggtt,
829 						   struct xe_bo *bo, u64 pte_flags,
830 						   u64 size, u32 align,
831 						   xe_ggtt_transform_cb transform, void *arg)
832 {
833 	struct xe_ggtt_node *node;
834 	int ret;
835 
836 	node = xe_ggtt_node_init(ggtt);
837 	if (IS_ERR(node))
838 		return ERR_CAST(node);
839 
840 	if (mutex_lock_interruptible(&ggtt->lock) < 0) {
841 		ret = -ERESTARTSYS;
842 		goto err;
843 	}
844 
845 	ret = xe_ggtt_node_insert_locked(node, size, align, 0);
846 	if (ret)
847 		goto err_unlock;
848 
849 	if (transform)
850 		transform(ggtt, node, pte_flags, ggtt->pt_ops->ggtt_set_pte, arg);
851 	else
852 		xe_ggtt_map_bo(ggtt, node, bo, pte_flags);
853 
854 	mutex_unlock(&ggtt->lock);
855 	return node;
856 
857 err_unlock:
858 	mutex_unlock(&ggtt->lock);
859 err:
860 	xe_ggtt_node_fini(node);
861 	return ERR_PTR(ret);
862 }
863 
__xe_ggtt_insert_bo_at(struct xe_ggtt * ggtt,struct xe_bo * bo,u64 start,u64 end,struct drm_exec * exec)864 static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
865 				  u64 start, u64 end, struct drm_exec *exec)
866 {
867 	u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
868 	u8 tile_id = ggtt->tile->id;
869 	int err;
870 
871 	if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
872 		alignment = SZ_64K;
873 
874 	if (XE_WARN_ON(bo->ggtt_node[tile_id])) {
875 		/* Someone's already inserted this BO in the GGTT */
876 		xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo));
877 		return 0;
878 	}
879 
880 	err = xe_bo_validate(bo, NULL, false, exec);
881 	if (err)
882 		return err;
883 
884 	xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
885 
886 	bo->ggtt_node[tile_id] = xe_ggtt_node_init(ggtt);
887 	if (IS_ERR(bo->ggtt_node[tile_id])) {
888 		err = PTR_ERR(bo->ggtt_node[tile_id]);
889 		bo->ggtt_node[tile_id] = NULL;
890 		goto out;
891 	}
892 
893 	mutex_lock(&ggtt->lock);
894 	err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base,
895 					  xe_bo_size(bo), alignment, 0, start, end, 0);
896 	if (err) {
897 		xe_ggtt_node_fini(bo->ggtt_node[tile_id]);
898 		bo->ggtt_node[tile_id] = NULL;
899 	} else {
900 		u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
901 		u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
902 		u64 pte = ggtt->pt_ops->pte_encode_flags(bo, pat_index);
903 
904 		xe_ggtt_map_bo(ggtt, bo->ggtt_node[tile_id], bo, pte);
905 	}
906 	mutex_unlock(&ggtt->lock);
907 
908 	if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
909 		xe_ggtt_invalidate(ggtt);
910 
911 out:
912 	xe_pm_runtime_put(tile_to_xe(ggtt->tile));
913 
914 	return err;
915 }
916 
917 /**
918  * xe_ggtt_insert_bo_at - Insert BO at a specific GGTT space
919  * @ggtt: the &xe_ggtt where bo will be inserted
920  * @bo: the &xe_bo to be inserted
921  * @start: address where it will be inserted
922  * @end: end of the range where it will be inserted
923  * @exec: The drm_exec transaction to use for exhaustive eviction.
924  *
925  * Return: 0 on success or a negative error code on failure.
926  */
xe_ggtt_insert_bo_at(struct xe_ggtt * ggtt,struct xe_bo * bo,u64 start,u64 end,struct drm_exec * exec)927 int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
928 			 u64 start, u64 end, struct drm_exec *exec)
929 {
930 	return __xe_ggtt_insert_bo_at(ggtt, bo, start, end, exec);
931 }
932 
933 /**
934  * xe_ggtt_insert_bo - Insert BO into GGTT
935  * @ggtt: the &xe_ggtt where bo will be inserted
936  * @bo: the &xe_bo to be inserted
937  * @exec: The drm_exec transaction to use for exhaustive eviction.
938  *
939  * Return: 0 on success or a negative error code on failure.
940  */
xe_ggtt_insert_bo(struct xe_ggtt * ggtt,struct xe_bo * bo,struct drm_exec * exec)941 int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo,
942 		      struct drm_exec *exec)
943 {
944 	return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX, exec);
945 }
946 
947 /**
948  * xe_ggtt_remove_bo - Remove a BO from the GGTT
949  * @ggtt: the &xe_ggtt where node will be removed
950  * @bo: the &xe_bo to be removed
951  */
xe_ggtt_remove_bo(struct xe_ggtt * ggtt,struct xe_bo * bo)952 void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
953 {
954 	u8 tile_id = ggtt->tile->id;
955 
956 	if (XE_WARN_ON(!bo->ggtt_node[tile_id]))
957 		return;
958 
959 	/* This BO is not currently in the GGTT */
960 	xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo));
961 
962 	xe_ggtt_node_remove(bo->ggtt_node[tile_id],
963 			    bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
964 }
965 
966 /**
967  * xe_ggtt_largest_hole - Largest GGTT hole
968  * @ggtt: the &xe_ggtt that will be inspected
969  * @alignment: minimum alignment
970  * @spare: If not NULL: in: desired memory size to be spared / out: Adjusted possible spare
971  *
972  * Return: size of the largest continuous GGTT region
973  */
xe_ggtt_largest_hole(struct xe_ggtt * ggtt,u64 alignment,u64 * spare)974 u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare)
975 {
976 	const struct drm_mm *mm = &ggtt->mm;
977 	const struct drm_mm_node *entry;
978 	u64 hole_start, hole_end, hole_size;
979 	u64 max_hole = 0;
980 
981 	mutex_lock(&ggtt->lock);
982 	drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
983 		hole_start = max(hole_start, ggtt->start);
984 		hole_start = ALIGN(hole_start, alignment);
985 		hole_end = ALIGN_DOWN(hole_end, alignment);
986 		if (hole_start >= hole_end)
987 			continue;
988 		hole_size = hole_end - hole_start;
989 		if (spare)
990 			*spare -= min3(*spare, hole_size, max_hole);
991 		max_hole = max(max_hole, hole_size);
992 	}
993 
994 	mutex_unlock(&ggtt->lock);
995 
996 	return max_hole;
997 }
998 
999 #ifdef CONFIG_PCI_IOV
xe_encode_vfid_pte(u16 vfid)1000 static u64 xe_encode_vfid_pte(u16 vfid)
1001 {
1002 	return FIELD_PREP(GGTT_PTE_VFID, vfid) | XE_PAGE_PRESENT;
1003 }
1004 
xe_ggtt_assign_locked(struct xe_ggtt * ggtt,const struct drm_mm_node * node,u16 vfid)1005 static void xe_ggtt_assign_locked(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid)
1006 {
1007 	u64 start = node->start;
1008 	u64 size = node->size;
1009 	u64 end = start + size - 1;
1010 	u64 pte = xe_encode_vfid_pte(vfid);
1011 
1012 	lockdep_assert_held(&ggtt->lock);
1013 
1014 	if (!drm_mm_node_allocated(node))
1015 		return;
1016 
1017 	while (start < end) {
1018 		ggtt->pt_ops->ggtt_set_pte(ggtt, start, pte);
1019 		start += XE_PAGE_SIZE;
1020 	}
1021 
1022 	xe_ggtt_invalidate(ggtt);
1023 }
1024 
1025 /**
1026  * xe_ggtt_assign - assign a GGTT region to the VF
1027  * @node: the &xe_ggtt_node to update
1028  * @vfid: the VF identifier
1029  *
1030  * This function is used by the PF driver to assign a GGTT region to the VF.
1031  * In addition to PTE's VFID bits 11:2 also PRESENT bit 0 is set as on some
1032  * platforms VFs can't modify that either.
1033  */
xe_ggtt_assign(const struct xe_ggtt_node * node,u16 vfid)1034 void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid)
1035 {
1036 	mutex_lock(&node->ggtt->lock);
1037 	xe_ggtt_assign_locked(node->ggtt, &node->base, vfid);
1038 	mutex_unlock(&node->ggtt->lock);
1039 }
1040 
1041 /**
1042  * xe_ggtt_node_save() - Save a &xe_ggtt_node to a buffer.
1043  * @node: the &xe_ggtt_node to be saved
1044  * @dst: destination buffer
1045  * @size: destination buffer size in bytes
1046  * @vfid: VF identifier
1047  *
1048  * Return: 0 on success or a negative error code on failure.
1049  */
xe_ggtt_node_save(struct xe_ggtt_node * node,void * dst,size_t size,u16 vfid)1050 int xe_ggtt_node_save(struct xe_ggtt_node *node, void *dst, size_t size, u16 vfid)
1051 {
1052 	struct xe_ggtt *ggtt;
1053 	u64 start, end;
1054 	u64 *buf = dst;
1055 	u64 pte;
1056 
1057 	if (!node)
1058 		return -ENOENT;
1059 
1060 	guard(mutex)(&node->ggtt->lock);
1061 
1062 	if (xe_ggtt_node_pt_size(node) != size)
1063 		return -EINVAL;
1064 
1065 	ggtt = node->ggtt;
1066 	start = node->base.start;
1067 	end = start + node->base.size - 1;
1068 
1069 	while (start < end) {
1070 		pte = ggtt->pt_ops->ggtt_get_pte(ggtt, start);
1071 		if (vfid != u64_get_bits(pte, GGTT_PTE_VFID))
1072 			return -EPERM;
1073 
1074 		*buf++ = u64_replace_bits(pte, 0, GGTT_PTE_VFID);
1075 		start += XE_PAGE_SIZE;
1076 	}
1077 
1078 	return 0;
1079 }
1080 
1081 /**
1082  * xe_ggtt_node_load() - Load a &xe_ggtt_node from a buffer.
1083  * @node: the &xe_ggtt_node to be loaded
1084  * @src: source buffer
1085  * @size: source buffer size in bytes
1086  * @vfid: VF identifier
1087  *
1088  * Return: 0 on success or a negative error code on failure.
1089  */
xe_ggtt_node_load(struct xe_ggtt_node * node,const void * src,size_t size,u16 vfid)1090 int xe_ggtt_node_load(struct xe_ggtt_node *node, const void *src, size_t size, u16 vfid)
1091 {
1092 	u64 vfid_pte = xe_encode_vfid_pte(vfid);
1093 	const u64 *buf = src;
1094 	struct xe_ggtt *ggtt;
1095 	u64 start, end;
1096 
1097 	if (!node)
1098 		return -ENOENT;
1099 
1100 	guard(mutex)(&node->ggtt->lock);
1101 
1102 	if (xe_ggtt_node_pt_size(node) != size)
1103 		return -EINVAL;
1104 
1105 	ggtt = node->ggtt;
1106 	start = node->base.start;
1107 	end = start + node->base.size - 1;
1108 
1109 	while (start < end) {
1110 		vfid_pte = u64_replace_bits(*buf++, vfid, GGTT_PTE_VFID);
1111 		ggtt->pt_ops->ggtt_set_pte(ggtt, start, vfid_pte);
1112 		start += XE_PAGE_SIZE;
1113 	}
1114 	xe_ggtt_invalidate(ggtt);
1115 
1116 	return 0;
1117 }
1118 
1119 #endif
1120 
1121 /**
1122  * xe_ggtt_dump - Dump GGTT for debug
1123  * @ggtt: the &xe_ggtt to be dumped
1124  * @p: the &drm_mm_printer helper handle to be used to dump the information
1125  *
1126  * Return: 0 on success or a negative error code on failure.
1127  */
xe_ggtt_dump(struct xe_ggtt * ggtt,struct drm_printer * p)1128 int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
1129 {
1130 	int err;
1131 
1132 	err = mutex_lock_interruptible(&ggtt->lock);
1133 	if (err)
1134 		return err;
1135 
1136 	drm_mm_print(&ggtt->mm, p);
1137 	mutex_unlock(&ggtt->lock);
1138 	return err;
1139 }
1140 
1141 /**
1142  * xe_ggtt_print_holes - Print holes
1143  * @ggtt: the &xe_ggtt to be inspected
1144  * @alignment: min alignment
1145  * @p: the &drm_printer
1146  *
1147  * Print GGTT ranges that are available and return total size available.
1148  *
1149  * Return: Total available size.
1150  */
xe_ggtt_print_holes(struct xe_ggtt * ggtt,u64 alignment,struct drm_printer * p)1151 u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer *p)
1152 {
1153 	const struct drm_mm *mm = &ggtt->mm;
1154 	const struct drm_mm_node *entry;
1155 	u64 hole_start, hole_end, hole_size;
1156 	u64 total = 0;
1157 	char buf[10];
1158 
1159 	mutex_lock(&ggtt->lock);
1160 	drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
1161 		hole_start = max(hole_start, ggtt->start);
1162 		hole_start = ALIGN(hole_start, alignment);
1163 		hole_end = ALIGN_DOWN(hole_end, alignment);
1164 		if (hole_start >= hole_end)
1165 			continue;
1166 		hole_size = hole_end - hole_start;
1167 		total += hole_size;
1168 
1169 		string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf));
1170 		drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n",
1171 			   hole_start, hole_end - 1, buf);
1172 	}
1173 
1174 	mutex_unlock(&ggtt->lock);
1175 
1176 	return total;
1177 }
1178 
1179 /**
1180  * xe_ggtt_encode_pte_flags - Get PTE encoding flags for BO
1181  * @ggtt: &xe_ggtt
1182  * @bo: &xe_bo
1183  * @pat_index: The pat_index for the PTE.
1184  *
1185  * This function returns the pte_flags for a given BO, without  address.
1186  * It's used for DPT to fill a GGTT mapped BO with a linear lookup table.
1187  */
xe_ggtt_encode_pte_flags(struct xe_ggtt * ggtt,struct xe_bo * bo,u16 pat_index)1188 u64 xe_ggtt_encode_pte_flags(struct xe_ggtt *ggtt,
1189 			     struct xe_bo *bo, u16 pat_index)
1190 {
1191 	return ggtt->pt_ops->pte_encode_flags(bo, pat_index);
1192 }
1193 
1194 /**
1195  * xe_ggtt_read_pte - Read a PTE from the GGTT
1196  * @ggtt: &xe_ggtt
1197  * @offset: the offset for which the mapping should be read.
1198  *
1199  * Used by testcases, and by display reading out an inherited bios FB.
1200  */
xe_ggtt_read_pte(struct xe_ggtt * ggtt,u64 offset)1201 u64 xe_ggtt_read_pte(struct xe_ggtt *ggtt, u64 offset)
1202 {
1203 	return ioread64(ggtt->gsm + (offset / XE_PAGE_SIZE));
1204 }
1205 
1206 /**
1207  * xe_ggtt_node_addr - Get @node offset in GGTT.
1208  * @node: &xe_ggtt_node
1209  *
1210  * Get the GGTT offset for allocated node.
1211  */
xe_ggtt_node_addr(const struct xe_ggtt_node * node)1212 u64 xe_ggtt_node_addr(const struct xe_ggtt_node *node)
1213 {
1214 	return node->base.start;
1215 }
1216 
1217 /**
1218  * xe_ggtt_node_size - Get @node allocation size.
1219  * @node: &xe_ggtt_node
1220  *
1221  * Get the allocated node's size.
1222  */
xe_ggtt_node_size(const struct xe_ggtt_node * node)1223 u64 xe_ggtt_node_size(const struct xe_ggtt_node *node)
1224 {
1225 	return node->base.size;
1226 }
1227