xref: /linux/drivers/gpu/drm/xe/xe_ggtt.c (revision face6a3615a649456eb4549f6d474221d877d604)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_ggtt.h"
7 
8 #include <kunit/visibility.h>
9 #include <linux/fault-inject.h>
10 #include <linux/io-64-nonatomic-lo-hi.h>
11 #include <linux/sizes.h>
12 
13 #include <drm/drm_drv.h>
14 #include <drm/drm_managed.h>
15 #include <drm/intel/i915_drm.h>
16 #include <generated/xe_wa_oob.h>
17 
18 #include "regs/xe_gt_regs.h"
19 #include "regs/xe_gtt_defs.h"
20 #include "regs/xe_regs.h"
21 #include "xe_assert.h"
22 #include "xe_bo.h"
23 #include "xe_device.h"
24 #include "xe_gt.h"
25 #include "xe_gt_printk.h"
26 #include "xe_map.h"
27 #include "xe_mmio.h"
28 #include "xe_pm.h"
29 #include "xe_res_cursor.h"
30 #include "xe_sriov.h"
31 #include "xe_tile_printk.h"
32 #include "xe_tile_sriov_vf.h"
33 #include "xe_tlb_inval.h"
34 #include "xe_wa.h"
35 #include "xe_wopcm.h"
36 
37 /**
38  * DOC: Global Graphics Translation Table (GGTT)
39  *
40  * Xe GGTT implements the support for a Global Virtual Address space that is used
41  * for resources that are accessible to privileged (i.e. kernel-mode) processes,
42  * and not tied to a specific user-level process. For example, the Graphics
43  * micro-Controller (GuC) and Display Engine (if present) utilize this Global
44  * address space.
45  *
46  * The Global GTT (GGTT) translates from the Global virtual address to a physical
47  * address that can be accessed by HW. The GGTT is a flat, single-level table.
48  *
49  * Xe implements a simplified version of the GGTT specifically managing only a
50  * certain range of it that goes from the Write Once Protected Content Memory (WOPCM)
51  * Layout to a predefined GUC_GGTT_TOP. This approach avoids complications related to
52  * the GuC (Graphics Microcontroller) hardware limitations. The GuC address space
53  * is limited on both ends of the GGTT, because the GuC shim HW redirects
54  * accesses to those addresses to other HW areas instead of going through the
55  * GGTT. On the bottom end, the GuC can't access offsets below the WOPCM size,
56  * while on the top side the limit is fixed at GUC_GGTT_TOP. To keep things
57  * simple, instead of checking each object to see if they are accessed by GuC or
58  * not, we just exclude those areas from the allocator. Additionally, to simplify
59  * the driver load, we use the maximum WOPCM size in this logic instead of the
60  * programmed one, so we don't need to wait until the actual size to be
61  * programmed is determined (which requires FW fetch) before initializing the
62  * GGTT. These simplifications might waste space in the GGTT (about 20-25 MBs
63  * depending on the platform) but we can live with this. Another benefit of this
64  * is the GuC bootrom can't access anything below the WOPCM max size so anything
65  * the bootrom needs to access (e.g. a RSA key) needs to be placed in the GGTT
66  * above the WOPCM max size. Starting the GGTT allocations above the WOPCM max
67  * give us the correct placement for free.
68  */
69 
70 static u64 xelp_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
71 {
72 	u64 pte = XE_PAGE_PRESENT;
73 
74 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
75 		pte |= XE_GGTT_PTE_DM;
76 
77 	return pte;
78 }
79 
80 static u64 xelpg_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
81 {
82 	struct xe_device *xe = xe_bo_device(bo);
83 	u64 pte;
84 
85 	pte = xelp_ggtt_pte_flags(bo, pat_index);
86 
87 	xe_assert(xe, pat_index <= 3);
88 
89 	if (pat_index & BIT(0))
90 		pte |= XELPG_GGTT_PTE_PAT0;
91 
92 	if (pat_index & BIT(1))
93 		pte |= XELPG_GGTT_PTE_PAT1;
94 
95 	return pte;
96 }
97 
98 static unsigned int probe_gsm_size(struct pci_dev *pdev)
99 {
100 	u16 gmch_ctl, ggms;
101 
102 	pci_read_config_word(pdev, SNB_GMCH_CTRL, &gmch_ctl);
103 	ggms = (gmch_ctl >> BDW_GMCH_GGMS_SHIFT) & BDW_GMCH_GGMS_MASK;
104 	return ggms ? SZ_1M << ggms : 0;
105 }
106 
107 static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
108 {
109 	struct xe_tile *tile = ggtt->tile;
110 	struct xe_gt *affected_gt;
111 	u32 max_gtt_writes;
112 
113 	if (tile->primary_gt && XE_GT_WA(tile->primary_gt, 22019338487)) {
114 		affected_gt = tile->primary_gt;
115 		max_gtt_writes = 1100;
116 
117 		/* Only expected to apply to primary GT on dgpu platforms */
118 		xe_tile_assert(tile, IS_DGFX(tile_to_xe(tile)));
119 	} else {
120 		affected_gt = tile->media_gt;
121 		max_gtt_writes = 63;
122 
123 		/* Only expected to apply to media GT on igpu platforms */
124 		xe_tile_assert(tile, !IS_DGFX(tile_to_xe(tile)));
125 	}
126 
127 	/*
128 	 * Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit
129 	 * to wait for completion of prior GTT writes before letting this through.
130 	 * This needs to be done for all GGTT writes originating from the CPU.
131 	 */
132 	lockdep_assert_held(&ggtt->lock);
133 
134 	if ((++ggtt->access_count % max_gtt_writes) == 0) {
135 		xe_mmio_write32(&affected_gt->mmio, GMD_ID, 0x0);
136 		ggtt->access_count = 0;
137 	}
138 }
139 
140 static void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte)
141 {
142 	xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK));
143 	xe_tile_assert(ggtt->tile, addr < ggtt->size);
144 
145 	writeq(pte, &ggtt->gsm[addr >> XE_PTE_SHIFT]);
146 }
147 
148 static void xe_ggtt_set_pte_and_flush(struct xe_ggtt *ggtt, u64 addr, u64 pte)
149 {
150 	xe_ggtt_set_pte(ggtt, addr, pte);
151 	ggtt_update_access_counter(ggtt);
152 }
153 
154 static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
155 {
156 	u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
157 	u64 end = start + size - 1;
158 	u64 scratch_pte;
159 
160 	xe_tile_assert(ggtt->tile, start < end);
161 
162 	if (ggtt->scratch)
163 		scratch_pte = xe_bo_addr(ggtt->scratch, 0, XE_PAGE_SIZE) |
164 			      ggtt->pt_ops->pte_encode_flags(ggtt->scratch,
165 							     pat_index);
166 	else
167 		scratch_pte = 0;
168 
169 	while (start < end) {
170 		ggtt->pt_ops->ggtt_set_pte(ggtt, start, scratch_pte);
171 		start += XE_PAGE_SIZE;
172 	}
173 }
174 
175 static void primelockdep(struct xe_ggtt *ggtt)
176 {
177 	if (!IS_ENABLED(CONFIG_LOCKDEP))
178 		return;
179 
180 	fs_reclaim_acquire(GFP_KERNEL);
181 	might_lock(&ggtt->lock);
182 	fs_reclaim_release(GFP_KERNEL);
183 }
184 
185 /**
186  * xe_ggtt_alloc - Allocate a GGTT for a given &xe_tile
187  * @tile: &xe_tile
188  *
189  * Allocates a &xe_ggtt for a given tile.
190  *
191  * Return: &xe_ggtt on success, or NULL when out of memory.
192  */
193 struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile)
194 {
195 	struct xe_device *xe = tile_to_xe(tile);
196 	struct xe_ggtt *ggtt;
197 
198 	ggtt = drmm_kzalloc(&xe->drm, sizeof(*ggtt), GFP_KERNEL);
199 	if (!ggtt)
200 		return NULL;
201 
202 	if (drmm_mutex_init(&xe->drm, &ggtt->lock))
203 		return NULL;
204 
205 	primelockdep(ggtt);
206 	ggtt->tile = tile;
207 
208 	return ggtt;
209 }
210 
211 static void ggtt_fini_early(struct drm_device *drm, void *arg)
212 {
213 	struct xe_ggtt *ggtt = arg;
214 
215 	destroy_workqueue(ggtt->wq);
216 	drm_mm_takedown(&ggtt->mm);
217 }
218 
219 static void ggtt_fini(void *arg)
220 {
221 	struct xe_ggtt *ggtt = arg;
222 
223 	ggtt->scratch = NULL;
224 }
225 
226 #ifdef CONFIG_LOCKDEP
227 void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
228 {
229 	might_lock(&ggtt->lock);
230 }
231 #endif
232 
233 static const struct xe_ggtt_pt_ops xelp_pt_ops = {
234 	.pte_encode_flags = xelp_ggtt_pte_flags,
235 	.ggtt_set_pte = xe_ggtt_set_pte,
236 };
237 
238 static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
239 	.pte_encode_flags = xelpg_ggtt_pte_flags,
240 	.ggtt_set_pte = xe_ggtt_set_pte,
241 };
242 
243 static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
244 	.pte_encode_flags = xelpg_ggtt_pte_flags,
245 	.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
246 };
247 
248 static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved)
249 {
250 	drm_mm_init(&ggtt->mm, reserved,
251 		    ggtt->size - reserved);
252 }
253 
254 int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size)
255 {
256 	ggtt->size = size;
257 	__xe_ggtt_init_early(ggtt, reserved);
258 	return 0;
259 }
260 EXPORT_SYMBOL_IF_KUNIT(xe_ggtt_init_kunit);
261 
262 static void dev_fini_ggtt(void *arg)
263 {
264 	struct xe_ggtt *ggtt = arg;
265 
266 	drain_workqueue(ggtt->wq);
267 }
268 
269 /**
270  * xe_ggtt_init_early - Early GGTT initialization
271  * @ggtt: the &xe_ggtt to be initialized
272  *
273  * It allows to create new mappings usable by the GuC.
274  * Mappings are not usable by the HW engines, as it doesn't have scratch nor
275  * initial clear done to it yet. That will happen in the regular, non-early
276  * GGTT initialization.
277  *
278  * Return: 0 on success or a negative error code on failure.
279  */
280 int xe_ggtt_init_early(struct xe_ggtt *ggtt)
281 {
282 	struct xe_device *xe = tile_to_xe(ggtt->tile);
283 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
284 	unsigned int gsm_size;
285 	int err;
286 
287 	if (IS_SRIOV_VF(xe) || GRAPHICS_VERx100(xe) >= 1250)
288 		gsm_size = SZ_8M; /* GGTT is expected to be 4GiB */
289 	else
290 		gsm_size = probe_gsm_size(pdev);
291 
292 	if (gsm_size == 0) {
293 		xe_tile_err(ggtt->tile, "Hardware reported no preallocated GSM\n");
294 		return -ENOMEM;
295 	}
296 
297 	ggtt->gsm = ggtt->tile->mmio.regs + SZ_8M;
298 	ggtt->size = (gsm_size / 8) * (u64) XE_PAGE_SIZE;
299 
300 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
301 		ggtt->flags |= XE_GGTT_FLAGS_64K;
302 
303 	if (ggtt->size > GUC_GGTT_TOP)
304 		ggtt->size = GUC_GGTT_TOP;
305 
306 	if (GRAPHICS_VERx100(xe) >= 1270)
307 		ggtt->pt_ops =
308 			(ggtt->tile->media_gt && XE_GT_WA(ggtt->tile->media_gt, 22019338487)) ||
309 			(ggtt->tile->primary_gt && XE_GT_WA(ggtt->tile->primary_gt, 22019338487)) ?
310 			&xelpg_pt_wa_ops : &xelpg_pt_ops;
311 	else
312 		ggtt->pt_ops = &xelp_pt_ops;
313 
314 	ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
315 	if (!ggtt->wq)
316 		return -ENOMEM;
317 
318 	__xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
319 
320 	err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
321 	if (err)
322 		return err;
323 
324 	err = devm_add_action_or_reset(xe->drm.dev, dev_fini_ggtt, ggtt);
325 	if (err)
326 		return err;
327 
328 	if (IS_SRIOV_VF(xe)) {
329 		err = xe_tile_sriov_vf_prepare_ggtt(ggtt->tile);
330 		if (err)
331 			return err;
332 	}
333 
334 	return 0;
335 }
336 ALLOW_ERROR_INJECTION(xe_ggtt_init_early, ERRNO); /* See xe_pci_probe() */
337 
338 static void xe_ggtt_invalidate(struct xe_ggtt *ggtt);
339 
340 static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
341 {
342 	struct drm_mm_node *hole;
343 	u64 start, end;
344 
345 	/* Display may have allocated inside ggtt, so be careful with clearing here */
346 	mutex_lock(&ggtt->lock);
347 	drm_mm_for_each_hole(hole, &ggtt->mm, start, end)
348 		xe_ggtt_clear(ggtt, start, end - start);
349 
350 	xe_ggtt_invalidate(ggtt);
351 	mutex_unlock(&ggtt->lock);
352 }
353 
354 static void ggtt_node_remove(struct xe_ggtt_node *node)
355 {
356 	struct xe_ggtt *ggtt = node->ggtt;
357 	struct xe_device *xe = tile_to_xe(ggtt->tile);
358 	bool bound;
359 	int idx;
360 
361 	bound = drm_dev_enter(&xe->drm, &idx);
362 
363 	mutex_lock(&ggtt->lock);
364 	if (bound)
365 		xe_ggtt_clear(ggtt, node->base.start, node->base.size);
366 	drm_mm_remove_node(&node->base);
367 	node->base.size = 0;
368 	mutex_unlock(&ggtt->lock);
369 
370 	if (!bound)
371 		goto free_node;
372 
373 	if (node->invalidate_on_remove)
374 		xe_ggtt_invalidate(ggtt);
375 
376 	drm_dev_exit(idx);
377 
378 free_node:
379 	xe_ggtt_node_fini(node);
380 }
381 
382 static void ggtt_node_remove_work_func(struct work_struct *work)
383 {
384 	struct xe_ggtt_node *node = container_of(work, typeof(*node),
385 						 delayed_removal_work);
386 	struct xe_device *xe = tile_to_xe(node->ggtt->tile);
387 
388 	xe_pm_runtime_get(xe);
389 	ggtt_node_remove(node);
390 	xe_pm_runtime_put(xe);
391 }
392 
393 /**
394  * xe_ggtt_node_remove - Remove a &xe_ggtt_node from the GGTT
395  * @node: the &xe_ggtt_node to be removed
396  * @invalidate: if node needs invalidation upon removal
397  */
398 void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate)
399 {
400 	struct xe_ggtt *ggtt;
401 	struct xe_device *xe;
402 
403 	if (!node || !node->ggtt)
404 		return;
405 
406 	ggtt = node->ggtt;
407 	xe = tile_to_xe(ggtt->tile);
408 
409 	node->invalidate_on_remove = invalidate;
410 
411 	if (xe_pm_runtime_get_if_active(xe)) {
412 		ggtt_node_remove(node);
413 		xe_pm_runtime_put(xe);
414 	} else {
415 		queue_work(ggtt->wq, &node->delayed_removal_work);
416 	}
417 }
418 
419 /**
420  * xe_ggtt_init - Regular non-early GGTT initialization
421  * @ggtt: the &xe_ggtt to be initialized
422  *
423  * Return: 0 on success or a negative error code on failure.
424  */
425 int xe_ggtt_init(struct xe_ggtt *ggtt)
426 {
427 	struct xe_device *xe = tile_to_xe(ggtt->tile);
428 	unsigned int flags;
429 	int err;
430 
431 	/*
432 	 * So we don't need to worry about 64K GGTT layout when dealing with
433 	 * scratch entries, rather keep the scratch page in system memory on
434 	 * platforms where 64K pages are needed for VRAM.
435 	 */
436 	flags = 0;
437 	if (ggtt->flags & XE_GGTT_FLAGS_64K)
438 		flags |= XE_BO_FLAG_SYSTEM;
439 	else
440 		flags |= XE_BO_FLAG_VRAM_IF_DGFX(ggtt->tile);
441 
442 	ggtt->scratch = xe_managed_bo_create_pin_map(xe, ggtt->tile, XE_PAGE_SIZE, flags);
443 	if (IS_ERR(ggtt->scratch)) {
444 		err = PTR_ERR(ggtt->scratch);
445 		goto err;
446 	}
447 
448 	xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, xe_bo_size(ggtt->scratch));
449 
450 	xe_ggtt_initial_clear(ggtt);
451 
452 	return devm_add_action_or_reset(xe->drm.dev, ggtt_fini, ggtt);
453 err:
454 	ggtt->scratch = NULL;
455 	return err;
456 }
457 
458 static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
459 {
460 	int err;
461 
462 	if (!gt)
463 		return;
464 
465 	err = xe_tlb_inval_ggtt(&gt->tlb_inval);
466 	xe_gt_WARN(gt, err, "Failed to invalidate GGTT (%pe)", ERR_PTR(err));
467 }
468 
469 static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
470 {
471 	struct xe_device *xe = tile_to_xe(ggtt->tile);
472 
473 	/*
474 	 * XXX: Barrier for GGTT pages. Unsure exactly why this required but
475 	 * without this LNL is having issues with the GuC reading scratch page
476 	 * vs. correct GGTT page. Not particularly a hot code path so blindly
477 	 * do a mmio read here which results in GuC reading correct GGTT page.
478 	 */
479 	xe_mmio_read32(xe_root_tile_mmio(xe), VF_CAP_REG);
480 
481 	/* Each GT in a tile has its own TLB to cache GGTT lookups */
482 	ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
483 	ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
484 }
485 
486 static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
487 			      const struct drm_mm_node *node, const char *description)
488 {
489 	char buf[10];
490 
491 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
492 		string_get_size(node->size, 1, STRING_UNITS_2, buf, sizeof(buf));
493 		xe_tile_dbg(ggtt->tile, "GGTT %#llx-%#llx (%s) %s\n",
494 			    node->start, node->start + node->size, buf, description);
495 	}
496 }
497 
498 /**
499  * xe_ggtt_node_insert_balloon_locked - prevent allocation of specified GGTT addresses
500  * @node: the &xe_ggtt_node to hold reserved GGTT node
501  * @start: the starting GGTT address of the reserved region
502  * @end: then end GGTT address of the reserved region
503  *
504  * To be used in cases where ggtt->lock is already taken.
505  * Use xe_ggtt_node_remove_balloon_locked() to release a reserved GGTT node.
506  *
507  * Return: 0 on success or a negative error code on failure.
508  */
509 int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node, u64 start, u64 end)
510 {
511 	struct xe_ggtt *ggtt = node->ggtt;
512 	int err;
513 
514 	xe_tile_assert(ggtt->tile, start < end);
515 	xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE));
516 	xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE));
517 	xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(&node->base));
518 	lockdep_assert_held(&ggtt->lock);
519 
520 	node->base.color = 0;
521 	node->base.start = start;
522 	node->base.size = end - start;
523 
524 	err = drm_mm_reserve_node(&ggtt->mm, &node->base);
525 
526 	if (xe_tile_WARN(ggtt->tile, err, "Failed to balloon GGTT %#llx-%#llx (%pe)\n",
527 			 node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
528 		return err;
529 
530 	xe_ggtt_dump_node(ggtt, &node->base, "balloon");
531 	return 0;
532 }
533 
534 /**
535  * xe_ggtt_node_remove_balloon_locked - release a reserved GGTT region
536  * @node: the &xe_ggtt_node with reserved GGTT region
537  *
538  * To be used in cases where ggtt->lock is already taken.
539  * See xe_ggtt_node_insert_balloon_locked() for details.
540  */
541 void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node)
542 {
543 	if (!xe_ggtt_node_allocated(node))
544 		return;
545 
546 	lockdep_assert_held(&node->ggtt->lock);
547 
548 	xe_ggtt_dump_node(node->ggtt, &node->base, "remove-balloon");
549 
550 	drm_mm_remove_node(&node->base);
551 }
552 
553 static void xe_ggtt_assert_fit(struct xe_ggtt *ggtt, u64 start, u64 size)
554 {
555 	struct xe_tile *tile = ggtt->tile;
556 	struct xe_device *xe = tile_to_xe(tile);
557 	u64 __maybe_unused wopcm = xe_wopcm_size(xe);
558 
559 	xe_tile_assert(tile, start >= wopcm);
560 	xe_tile_assert(tile, start + size < ggtt->size - wopcm);
561 }
562 
563 /**
564  * xe_ggtt_shift_nodes_locked - Shift GGTT nodes to adjust for a change in usable address range.
565  * @ggtt: the &xe_ggtt struct instance
566  * @shift: change to the location of area provisioned for current VF
567  *
568  * This function moves all nodes from the GGTT VM, to a temp list. These nodes are expected
569  * to represent allocations in range formerly assigned to current VF, before the range changed.
570  * When the GGTT VM is completely clear of any nodes, they are re-added with shifted offsets.
571  *
572  * The function has no ability of failing - because it shifts existing nodes, without
573  * any additional processing. If the nodes were successfully existing at the old address,
574  * they will do the same at the new one. A fail inside this function would indicate that
575  * the list of nodes was either already damaged, or that the shift brings the address range
576  * outside of valid bounds. Both cases justify an assert rather than error code.
577  */
578 void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift)
579 {
580 	struct xe_tile *tile __maybe_unused = ggtt->tile;
581 	struct drm_mm_node *node, *tmpn;
582 	LIST_HEAD(temp_list_head);
583 
584 	lockdep_assert_held(&ggtt->lock);
585 
586 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
587 		drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm)
588 			xe_ggtt_assert_fit(ggtt, node->start + shift, node->size);
589 
590 	drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm) {
591 		drm_mm_remove_node(node);
592 		list_add(&node->node_list, &temp_list_head);
593 	}
594 
595 	list_for_each_entry_safe(node, tmpn, &temp_list_head, node_list) {
596 		list_del(&node->node_list);
597 		node->start += shift;
598 		drm_mm_reserve_node(&ggtt->mm, node);
599 		xe_tile_assert(tile, drm_mm_node_allocated(node));
600 	}
601 }
602 
603 /**
604  * xe_ggtt_node_insert_locked - Locked version to insert a &xe_ggtt_node into the GGTT
605  * @node: the &xe_ggtt_node to be inserted
606  * @size: size of the node
607  * @align: alignment constrain of the node
608  * @mm_flags: flags to control the node behavior
609  *
610  * It cannot be called without first having called xe_ggtt_init() once.
611  * To be used in cases where ggtt->lock is already taken.
612  *
613  * Return: 0 on success or a negative error code on failure.
614  */
615 int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
616 			       u32 size, u32 align, u32 mm_flags)
617 {
618 	return drm_mm_insert_node_generic(&node->ggtt->mm, &node->base, size, align, 0,
619 					  mm_flags);
620 }
621 
622 /**
623  * xe_ggtt_node_insert - Insert a &xe_ggtt_node into the GGTT
624  * @node: the &xe_ggtt_node to be inserted
625  * @size: size of the node
626  * @align: alignment constrain of the node
627  *
628  * It cannot be called without first having called xe_ggtt_init() once.
629  *
630  * Return: 0 on success or a negative error code on failure.
631  */
632 int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align)
633 {
634 	int ret;
635 
636 	if (!node || !node->ggtt)
637 		return -ENOENT;
638 
639 	mutex_lock(&node->ggtt->lock);
640 	ret = xe_ggtt_node_insert_locked(node, size, align,
641 					 DRM_MM_INSERT_HIGH);
642 	mutex_unlock(&node->ggtt->lock);
643 
644 	return ret;
645 }
646 
647 /**
648  * xe_ggtt_node_init - Initialize %xe_ggtt_node struct
649  * @ggtt: the &xe_ggtt where the new node will later be inserted/reserved.
650  *
651  * This function will allocate the struct %xe_ggtt_node and return its pointer.
652  * This struct will then be freed after the node removal upon xe_ggtt_node_remove()
653  * or xe_ggtt_node_remove_balloon_locked().
654  * Having %xe_ggtt_node struct allocated doesn't mean that the node is already allocated
655  * in GGTT. Only the xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
656  * xe_ggtt_node_insert_balloon_locked() will ensure the node is inserted or reserved in GGTT.
657  *
658  * Return: A pointer to %xe_ggtt_node struct on success. An ERR_PTR otherwise.
659  **/
660 struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt)
661 {
662 	struct xe_ggtt_node *node = kzalloc(sizeof(*node), GFP_NOFS);
663 
664 	if (!node)
665 		return ERR_PTR(-ENOMEM);
666 
667 	INIT_WORK(&node->delayed_removal_work, ggtt_node_remove_work_func);
668 	node->ggtt = ggtt;
669 
670 	return node;
671 }
672 
673 /**
674  * xe_ggtt_node_fini - Forcebly finalize %xe_ggtt_node struct
675  * @node: the &xe_ggtt_node to be freed
676  *
677  * If anything went wrong with either xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
678  * or xe_ggtt_node_insert_balloon_locked(); and this @node is not going to be reused, then,
679  * this function needs to be called to free the %xe_ggtt_node struct
680  **/
681 void xe_ggtt_node_fini(struct xe_ggtt_node *node)
682 {
683 	kfree(node);
684 }
685 
686 /**
687  * xe_ggtt_node_allocated - Check if node is allocated in GGTT
688  * @node: the &xe_ggtt_node to be inspected
689  *
690  * Return: True if allocated, False otherwise.
691  */
692 bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node)
693 {
694 	if (!node || !node->ggtt)
695 		return false;
696 
697 	return drm_mm_node_allocated(&node->base);
698 }
699 
700 /**
701  * xe_ggtt_map_bo - Map the BO into GGTT
702  * @ggtt: the &xe_ggtt where node will be mapped
703  * @node: the &xe_ggtt_node where this BO is mapped
704  * @bo: the &xe_bo to be mapped
705  * @pat_index: Which pat_index to use.
706  */
707 void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
708 		    struct xe_bo *bo, u16 pat_index)
709 {
710 
711 	u64 start, pte, end;
712 	struct xe_res_cursor cur;
713 
714 	if (XE_WARN_ON(!node))
715 		return;
716 
717 	start = node->base.start;
718 	end = start + xe_bo_size(bo);
719 
720 	pte = ggtt->pt_ops->pte_encode_flags(bo, pat_index);
721 	if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
722 		xe_assert(xe_bo_device(bo), bo->ttm.ttm);
723 
724 		for (xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &cur);
725 		     cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
726 			ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
727 						   pte | xe_res_dma(&cur));
728 	} else {
729 		/* Prepend GPU offset */
730 		pte |= vram_region_gpu_offset(bo->ttm.resource);
731 
732 		for (xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur);
733 		     cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
734 			ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
735 						   pte + cur.start);
736 	}
737 }
738 
739 /**
740  * xe_ggtt_map_bo_unlocked - Restore a mapping of a BO into GGTT
741  * @ggtt: the &xe_ggtt where node will be mapped
742  * @bo: the &xe_bo to be mapped
743  *
744  * This is used to restore a GGTT mapping after suspend.
745  */
746 void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo)
747 {
748 	u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
749 	u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
750 
751 	mutex_lock(&ggtt->lock);
752 	xe_ggtt_map_bo(ggtt, bo->ggtt_node[ggtt->tile->id], bo, pat_index);
753 	mutex_unlock(&ggtt->lock);
754 }
755 
756 static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
757 				  u64 start, u64 end, struct drm_exec *exec)
758 {
759 	u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
760 	u8 tile_id = ggtt->tile->id;
761 	int err;
762 
763 	if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
764 		alignment = SZ_64K;
765 
766 	if (XE_WARN_ON(bo->ggtt_node[tile_id])) {
767 		/* Someone's already inserted this BO in the GGTT */
768 		xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo));
769 		return 0;
770 	}
771 
772 	err = xe_bo_validate(bo, NULL, false, exec);
773 	if (err)
774 		return err;
775 
776 	xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
777 
778 	bo->ggtt_node[tile_id] = xe_ggtt_node_init(ggtt);
779 	if (IS_ERR(bo->ggtt_node[tile_id])) {
780 		err = PTR_ERR(bo->ggtt_node[tile_id]);
781 		bo->ggtt_node[tile_id] = NULL;
782 		goto out;
783 	}
784 
785 	mutex_lock(&ggtt->lock);
786 	err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base,
787 					  xe_bo_size(bo), alignment, 0, start, end, 0);
788 	if (err) {
789 		xe_ggtt_node_fini(bo->ggtt_node[tile_id]);
790 		bo->ggtt_node[tile_id] = NULL;
791 	} else {
792 		u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
793 		u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
794 
795 		xe_ggtt_map_bo(ggtt, bo->ggtt_node[tile_id], bo, pat_index);
796 	}
797 	mutex_unlock(&ggtt->lock);
798 
799 	if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
800 		xe_ggtt_invalidate(ggtt);
801 
802 out:
803 	xe_pm_runtime_put(tile_to_xe(ggtt->tile));
804 
805 	return err;
806 }
807 
808 /**
809  * xe_ggtt_insert_bo_at - Insert BO at a specific GGTT space
810  * @ggtt: the &xe_ggtt where bo will be inserted
811  * @bo: the &xe_bo to be inserted
812  * @start: address where it will be inserted
813  * @end: end of the range where it will be inserted
814  * @exec: The drm_exec transaction to use for exhaustive eviction.
815  *
816  * Return: 0 on success or a negative error code on failure.
817  */
818 int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
819 			 u64 start, u64 end, struct drm_exec *exec)
820 {
821 	return __xe_ggtt_insert_bo_at(ggtt, bo, start, end, exec);
822 }
823 
824 /**
825  * xe_ggtt_insert_bo - Insert BO into GGTT
826  * @ggtt: the &xe_ggtt where bo will be inserted
827  * @bo: the &xe_bo to be inserted
828  * @exec: The drm_exec transaction to use for exhaustive eviction.
829  *
830  * Return: 0 on success or a negative error code on failure.
831  */
832 int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo,
833 		      struct drm_exec *exec)
834 {
835 	return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX, exec);
836 }
837 
838 /**
839  * xe_ggtt_remove_bo - Remove a BO from the GGTT
840  * @ggtt: the &xe_ggtt where node will be removed
841  * @bo: the &xe_bo to be removed
842  */
843 void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
844 {
845 	u8 tile_id = ggtt->tile->id;
846 
847 	if (XE_WARN_ON(!bo->ggtt_node[tile_id]))
848 		return;
849 
850 	/* This BO is not currently in the GGTT */
851 	xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo));
852 
853 	xe_ggtt_node_remove(bo->ggtt_node[tile_id],
854 			    bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
855 }
856 
857 /**
858  * xe_ggtt_largest_hole - Largest GGTT hole
859  * @ggtt: the &xe_ggtt that will be inspected
860  * @alignment: minimum alignment
861  * @spare: If not NULL: in: desired memory size to be spared / out: Adjusted possible spare
862  *
863  * Return: size of the largest continuous GGTT region
864  */
865 u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare)
866 {
867 	const struct drm_mm *mm = &ggtt->mm;
868 	const struct drm_mm_node *entry;
869 	u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
870 	u64 hole_start, hole_end, hole_size;
871 	u64 max_hole = 0;
872 
873 	mutex_lock(&ggtt->lock);
874 
875 	drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
876 		hole_start = max(hole_start, hole_min_start);
877 		hole_start = ALIGN(hole_start, alignment);
878 		hole_end = ALIGN_DOWN(hole_end, alignment);
879 		if (hole_start >= hole_end)
880 			continue;
881 		hole_size = hole_end - hole_start;
882 		if (spare)
883 			*spare -= min3(*spare, hole_size, max_hole);
884 		max_hole = max(max_hole, hole_size);
885 	}
886 
887 	mutex_unlock(&ggtt->lock);
888 
889 	return max_hole;
890 }
891 
892 #ifdef CONFIG_PCI_IOV
893 static u64 xe_encode_vfid_pte(u16 vfid)
894 {
895 	return FIELD_PREP(GGTT_PTE_VFID, vfid) | XE_PAGE_PRESENT;
896 }
897 
898 static void xe_ggtt_assign_locked(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid)
899 {
900 	u64 start = node->start;
901 	u64 size = node->size;
902 	u64 end = start + size - 1;
903 	u64 pte = xe_encode_vfid_pte(vfid);
904 
905 	lockdep_assert_held(&ggtt->lock);
906 
907 	if (!drm_mm_node_allocated(node))
908 		return;
909 
910 	while (start < end) {
911 		ggtt->pt_ops->ggtt_set_pte(ggtt, start, pte);
912 		start += XE_PAGE_SIZE;
913 	}
914 
915 	xe_ggtt_invalidate(ggtt);
916 }
917 
918 /**
919  * xe_ggtt_assign - assign a GGTT region to the VF
920  * @node: the &xe_ggtt_node to update
921  * @vfid: the VF identifier
922  *
923  * This function is used by the PF driver to assign a GGTT region to the VF.
924  * In addition to PTE's VFID bits 11:2 also PRESENT bit 0 is set as on some
925  * platforms VFs can't modify that either.
926  */
927 void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid)
928 {
929 	mutex_lock(&node->ggtt->lock);
930 	xe_ggtt_assign_locked(node->ggtt, &node->base, vfid);
931 	mutex_unlock(&node->ggtt->lock);
932 }
933 #endif
934 
935 /**
936  * xe_ggtt_dump - Dump GGTT for debug
937  * @ggtt: the &xe_ggtt to be dumped
938  * @p: the &drm_mm_printer helper handle to be used to dump the information
939  *
940  * Return: 0 on success or a negative error code on failure.
941  */
942 int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
943 {
944 	int err;
945 
946 	err = mutex_lock_interruptible(&ggtt->lock);
947 	if (err)
948 		return err;
949 
950 	drm_mm_print(&ggtt->mm, p);
951 	mutex_unlock(&ggtt->lock);
952 	return err;
953 }
954 
955 /**
956  * xe_ggtt_print_holes - Print holes
957  * @ggtt: the &xe_ggtt to be inspected
958  * @alignment: min alignment
959  * @p: the &drm_printer
960  *
961  * Print GGTT ranges that are available and return total size available.
962  *
963  * Return: Total available size.
964  */
965 u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer *p)
966 {
967 	const struct drm_mm *mm = &ggtt->mm;
968 	const struct drm_mm_node *entry;
969 	u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
970 	u64 hole_start, hole_end, hole_size;
971 	u64 total = 0;
972 	char buf[10];
973 
974 	mutex_lock(&ggtt->lock);
975 
976 	drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
977 		hole_start = max(hole_start, hole_min_start);
978 		hole_start = ALIGN(hole_start, alignment);
979 		hole_end = ALIGN_DOWN(hole_end, alignment);
980 		if (hole_start >= hole_end)
981 			continue;
982 		hole_size = hole_end - hole_start;
983 		total += hole_size;
984 
985 		string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf));
986 		drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n",
987 			   hole_start, hole_end - 1, buf);
988 	}
989 
990 	mutex_unlock(&ggtt->lock);
991 
992 	return total;
993 }
994 
995 /**
996  * xe_ggtt_encode_pte_flags - Get PTE encoding flags for BO
997  * @ggtt: &xe_ggtt
998  * @bo: &xe_bo
999  * @pat_index: The pat_index for the PTE.
1000  *
1001  * This function returns the pte_flags for a given BO, without  address.
1002  * It's used for DPT to fill a GGTT mapped BO with a linear lookup table.
1003  */
1004 u64 xe_ggtt_encode_pte_flags(struct xe_ggtt *ggtt,
1005 			     struct xe_bo *bo, u16 pat_index)
1006 {
1007 	return ggtt->pt_ops->pte_encode_flags(bo, pat_index);
1008 }
1009 
1010 /**
1011  * xe_ggtt_read_pte - Read a PTE from the GGTT
1012  * @ggtt: &xe_ggtt
1013  * @offset: the offset for which the mapping should be read.
1014  *
1015  * Used by testcases, and by display reading out an inherited bios FB.
1016  */
1017 u64 xe_ggtt_read_pte(struct xe_ggtt *ggtt, u64 offset)
1018 {
1019 	return ioread64(ggtt->gsm + (offset / XE_PAGE_SIZE));
1020 }
1021