xref: /linux/drivers/gpu/drm/xe/xe_ggtt.c (revision 5d95cbf21a4a550f2a2050c947083de2587cf46d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_ggtt.h"
7 
8 #include <kunit/visibility.h>
9 #include <linux/fault-inject.h>
10 #include <linux/io-64-nonatomic-lo-hi.h>
11 #include <linux/sizes.h>
12 
13 #include <drm/drm_drv.h>
14 #include <drm/drm_managed.h>
15 #include <drm/intel/i915_drm.h>
16 #include <generated/xe_wa_oob.h>
17 
18 #include "regs/xe_gt_regs.h"
19 #include "regs/xe_gtt_defs.h"
20 #include "regs/xe_regs.h"
21 #include "xe_assert.h"
22 #include "xe_bo.h"
23 #include "xe_device.h"
24 #include "xe_gt.h"
25 #include "xe_gt_printk.h"
26 #include "xe_gt_tlb_invalidation.h"
27 #include "xe_map.h"
28 #include "xe_mmio.h"
29 #include "xe_pm.h"
30 #include "xe_res_cursor.h"
31 #include "xe_sriov.h"
32 #include "xe_tile_sriov_vf.h"
33 #include "xe_wa.h"
34 #include "xe_wopcm.h"
35 
36 /**
37  * DOC: Global Graphics Translation Table (GGTT)
38  *
39  * Xe GGTT implements the support for a Global Virtual Address space that is used
40  * for resources that are accessible to privileged (i.e. kernel-mode) processes,
41  * and not tied to a specific user-level process. For example, the Graphics
42  * micro-Controller (GuC) and Display Engine (if present) utilize this Global
43  * address space.
44  *
45  * The Global GTT (GGTT) translates from the Global virtual address to a physical
46  * address that can be accessed by HW. The GGTT is a flat, single-level table.
47  *
48  * Xe implements a simplified version of the GGTT specifically managing only a
49  * certain range of it that goes from the Write Once Protected Content Memory (WOPCM)
50  * Layout to a predefined GUC_GGTT_TOP. This approach avoids complications related to
51  * the GuC (Graphics Microcontroller) hardware limitations. The GuC address space
52  * is limited on both ends of the GGTT, because the GuC shim HW redirects
53  * accesses to those addresses to other HW areas instead of going through the
54  * GGTT. On the bottom end, the GuC can't access offsets below the WOPCM size,
55  * while on the top side the limit is fixed at GUC_GGTT_TOP. To keep things
56  * simple, instead of checking each object to see if they are accessed by GuC or
57  * not, we just exclude those areas from the allocator. Additionally, to simplify
58  * the driver load, we use the maximum WOPCM size in this logic instead of the
59  * programmed one, so we don't need to wait until the actual size to be
60  * programmed is determined (which requires FW fetch) before initializing the
61  * GGTT. These simplifications might waste space in the GGTT (about 20-25 MBs
62  * depending on the platform) but we can live with this. Another benefit of this
63  * is the GuC bootrom can't access anything below the WOPCM max size so anything
64  * the bootrom needs to access (e.g. a RSA key) needs to be placed in the GGTT
65  * above the WOPCM max size. Starting the GGTT allocations above the WOPCM max
66  * give us the correct placement for free.
67  */
68 
69 static u64 xelp_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
70 {
71 	u64 pte = XE_PAGE_PRESENT;
72 
73 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
74 		pte |= XE_GGTT_PTE_DM;
75 
76 	return pte;
77 }
78 
79 static u64 xelpg_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
80 {
81 	struct xe_device *xe = xe_bo_device(bo);
82 	u64 pte;
83 
84 	pte = xelp_ggtt_pte_flags(bo, pat_index);
85 
86 	xe_assert(xe, pat_index <= 3);
87 
88 	if (pat_index & BIT(0))
89 		pte |= XELPG_GGTT_PTE_PAT0;
90 
91 	if (pat_index & BIT(1))
92 		pte |= XELPG_GGTT_PTE_PAT1;
93 
94 	return pte;
95 }
96 
97 static unsigned int probe_gsm_size(struct pci_dev *pdev)
98 {
99 	u16 gmch_ctl, ggms;
100 
101 	pci_read_config_word(pdev, SNB_GMCH_CTRL, &gmch_ctl);
102 	ggms = (gmch_ctl >> BDW_GMCH_GGMS_SHIFT) & BDW_GMCH_GGMS_MASK;
103 	return ggms ? SZ_1M << ggms : 0;
104 }
105 
106 static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
107 {
108 	struct xe_tile *tile = ggtt->tile;
109 	struct xe_gt *affected_gt = XE_WA(tile->primary_gt, 22019338487) ?
110 		tile->primary_gt : tile->media_gt;
111 	struct xe_mmio *mmio = &affected_gt->mmio;
112 	u32 max_gtt_writes = XE_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63;
113 	/*
114 	 * Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit
115 	 * to wait for completion of prior GTT writes before letting this through.
116 	 * This needs to be done for all GGTT writes originating from the CPU.
117 	 */
118 	lockdep_assert_held(&ggtt->lock);
119 
120 	if ((++ggtt->access_count % max_gtt_writes) == 0) {
121 		xe_mmio_write32(mmio, GMD_ID, 0x0);
122 		ggtt->access_count = 0;
123 	}
124 }
125 
126 static void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte)
127 {
128 	xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK));
129 	xe_tile_assert(ggtt->tile, addr < ggtt->size);
130 
131 	writeq(pte, &ggtt->gsm[addr >> XE_PTE_SHIFT]);
132 }
133 
134 static void xe_ggtt_set_pte_and_flush(struct xe_ggtt *ggtt, u64 addr, u64 pte)
135 {
136 	xe_ggtt_set_pte(ggtt, addr, pte);
137 	ggtt_update_access_counter(ggtt);
138 }
139 
140 static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
141 {
142 	u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
143 	u64 end = start + size - 1;
144 	u64 scratch_pte;
145 
146 	xe_tile_assert(ggtt->tile, start < end);
147 
148 	if (ggtt->scratch)
149 		scratch_pte = xe_bo_addr(ggtt->scratch, 0, XE_PAGE_SIZE) |
150 			      ggtt->pt_ops->pte_encode_flags(ggtt->scratch,
151 							     pat_index);
152 	else
153 		scratch_pte = 0;
154 
155 	while (start < end) {
156 		ggtt->pt_ops->ggtt_set_pte(ggtt, start, scratch_pte);
157 		start += XE_PAGE_SIZE;
158 	}
159 }
160 
161 /**
162  * xe_ggtt_alloc - Allocate a GGTT for a given &xe_tile
163  * @tile: &xe_tile
164  *
165  * Allocates a &xe_ggtt for a given tile.
166  *
167  * Return: &xe_ggtt on success, or NULL when out of memory.
168  */
169 struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile)
170 {
171 	struct xe_ggtt *ggtt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*ggtt), GFP_KERNEL);
172 	if (ggtt)
173 		ggtt->tile = tile;
174 	return ggtt;
175 }
176 
177 static void ggtt_fini_early(struct drm_device *drm, void *arg)
178 {
179 	struct xe_ggtt *ggtt = arg;
180 
181 	destroy_workqueue(ggtt->wq);
182 	mutex_destroy(&ggtt->lock);
183 	drm_mm_takedown(&ggtt->mm);
184 }
185 
186 static void ggtt_fini(void *arg)
187 {
188 	struct xe_ggtt *ggtt = arg;
189 
190 	ggtt->scratch = NULL;
191 }
192 
193 #ifdef CONFIG_LOCKDEP
194 void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
195 {
196 	might_lock(&ggtt->lock);
197 }
198 #endif
199 
200 static void primelockdep(struct xe_ggtt *ggtt)
201 {
202 	if (!IS_ENABLED(CONFIG_LOCKDEP))
203 		return;
204 
205 	fs_reclaim_acquire(GFP_KERNEL);
206 	might_lock(&ggtt->lock);
207 	fs_reclaim_release(GFP_KERNEL);
208 }
209 
210 static const struct xe_ggtt_pt_ops xelp_pt_ops = {
211 	.pte_encode_flags = xelp_ggtt_pte_flags,
212 	.ggtt_set_pte = xe_ggtt_set_pte,
213 };
214 
215 static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
216 	.pte_encode_flags = xelpg_ggtt_pte_flags,
217 	.ggtt_set_pte = xe_ggtt_set_pte,
218 };
219 
220 static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
221 	.pte_encode_flags = xelpg_ggtt_pte_flags,
222 	.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
223 };
224 
225 static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved)
226 {
227 	drm_mm_init(&ggtt->mm, reserved,
228 		    ggtt->size - reserved);
229 	mutex_init(&ggtt->lock);
230 	primelockdep(ggtt);
231 }
232 
233 int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size)
234 {
235 	ggtt->size = size;
236 	__xe_ggtt_init_early(ggtt, reserved);
237 	return 0;
238 }
239 EXPORT_SYMBOL_IF_KUNIT(xe_ggtt_init_kunit);
240 
241 /**
242  * xe_ggtt_init_early - Early GGTT initialization
243  * @ggtt: the &xe_ggtt to be initialized
244  *
245  * It allows to create new mappings usable by the GuC.
246  * Mappings are not usable by the HW engines, as it doesn't have scratch nor
247  * initial clear done to it yet. That will happen in the regular, non-early
248  * GGTT initialization.
249  *
250  * Return: 0 on success or a negative error code on failure.
251  */
252 int xe_ggtt_init_early(struct xe_ggtt *ggtt)
253 {
254 	struct xe_device *xe = tile_to_xe(ggtt->tile);
255 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
256 	unsigned int gsm_size;
257 	int err;
258 
259 	if (IS_SRIOV_VF(xe) || GRAPHICS_VERx100(xe) >= 1250)
260 		gsm_size = SZ_8M; /* GGTT is expected to be 4GiB */
261 	else
262 		gsm_size = probe_gsm_size(pdev);
263 
264 	if (gsm_size == 0) {
265 		drm_err(&xe->drm, "Hardware reported no preallocated GSM\n");
266 		return -ENOMEM;
267 	}
268 
269 	ggtt->gsm = ggtt->tile->mmio.regs + SZ_8M;
270 	ggtt->size = (gsm_size / 8) * (u64) XE_PAGE_SIZE;
271 
272 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
273 		ggtt->flags |= XE_GGTT_FLAGS_64K;
274 
275 	if (ggtt->size > GUC_GGTT_TOP)
276 		ggtt->size = GUC_GGTT_TOP;
277 
278 	if (GRAPHICS_VERx100(xe) >= 1270)
279 		ggtt->pt_ops = (ggtt->tile->media_gt &&
280 			       XE_WA(ggtt->tile->media_gt, 22019338487)) ||
281 			       XE_WA(ggtt->tile->primary_gt, 22019338487) ?
282 			       &xelpg_pt_wa_ops : &xelpg_pt_ops;
283 	else
284 		ggtt->pt_ops = &xelp_pt_ops;
285 
286 	ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
287 	__xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
288 
289 	err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
290 	if (err)
291 		return err;
292 
293 	if (IS_SRIOV_VF(xe)) {
294 		err = xe_tile_sriov_vf_prepare_ggtt(ggtt->tile);
295 		if (err)
296 			return err;
297 	}
298 
299 	return 0;
300 }
301 ALLOW_ERROR_INJECTION(xe_ggtt_init_early, ERRNO); /* See xe_pci_probe() */
302 
303 static void xe_ggtt_invalidate(struct xe_ggtt *ggtt);
304 
305 static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
306 {
307 	struct drm_mm_node *hole;
308 	u64 start, end;
309 
310 	/* Display may have allocated inside ggtt, so be careful with clearing here */
311 	mutex_lock(&ggtt->lock);
312 	drm_mm_for_each_hole(hole, &ggtt->mm, start, end)
313 		xe_ggtt_clear(ggtt, start, end - start);
314 
315 	xe_ggtt_invalidate(ggtt);
316 	mutex_unlock(&ggtt->lock);
317 }
318 
319 static void ggtt_node_remove(struct xe_ggtt_node *node)
320 {
321 	struct xe_ggtt *ggtt = node->ggtt;
322 	struct xe_device *xe = tile_to_xe(ggtt->tile);
323 	bool bound;
324 	int idx;
325 
326 	bound = drm_dev_enter(&xe->drm, &idx);
327 
328 	mutex_lock(&ggtt->lock);
329 	if (bound)
330 		xe_ggtt_clear(ggtt, node->base.start, node->base.size);
331 	drm_mm_remove_node(&node->base);
332 	node->base.size = 0;
333 	mutex_unlock(&ggtt->lock);
334 
335 	if (!bound)
336 		goto free_node;
337 
338 	if (node->invalidate_on_remove)
339 		xe_ggtt_invalidate(ggtt);
340 
341 	drm_dev_exit(idx);
342 
343 free_node:
344 	xe_ggtt_node_fini(node);
345 }
346 
347 static void ggtt_node_remove_work_func(struct work_struct *work)
348 {
349 	struct xe_ggtt_node *node = container_of(work, typeof(*node),
350 						 delayed_removal_work);
351 	struct xe_device *xe = tile_to_xe(node->ggtt->tile);
352 
353 	xe_pm_runtime_get(xe);
354 	ggtt_node_remove(node);
355 	xe_pm_runtime_put(xe);
356 }
357 
358 /**
359  * xe_ggtt_node_remove - Remove a &xe_ggtt_node from the GGTT
360  * @node: the &xe_ggtt_node to be removed
361  * @invalidate: if node needs invalidation upon removal
362  */
363 void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate)
364 {
365 	struct xe_ggtt *ggtt;
366 	struct xe_device *xe;
367 
368 	if (!node || !node->ggtt)
369 		return;
370 
371 	ggtt = node->ggtt;
372 	xe = tile_to_xe(ggtt->tile);
373 
374 	node->invalidate_on_remove = invalidate;
375 
376 	if (xe_pm_runtime_get_if_active(xe)) {
377 		ggtt_node_remove(node);
378 		xe_pm_runtime_put(xe);
379 	} else {
380 		queue_work(ggtt->wq, &node->delayed_removal_work);
381 	}
382 }
383 
384 /**
385  * xe_ggtt_init - Regular non-early GGTT initialization
386  * @ggtt: the &xe_ggtt to be initialized
387  *
388  * Return: 0 on success or a negative error code on failure.
389  */
390 int xe_ggtt_init(struct xe_ggtt *ggtt)
391 {
392 	struct xe_device *xe = tile_to_xe(ggtt->tile);
393 	unsigned int flags;
394 	int err;
395 
396 	/*
397 	 * So we don't need to worry about 64K GGTT layout when dealing with
398 	 * scratch entries, rather keep the scratch page in system memory on
399 	 * platforms where 64K pages are needed for VRAM.
400 	 */
401 	flags = 0;
402 	if (ggtt->flags & XE_GGTT_FLAGS_64K)
403 		flags |= XE_BO_FLAG_SYSTEM;
404 	else
405 		flags |= XE_BO_FLAG_VRAM_IF_DGFX(ggtt->tile);
406 
407 	ggtt->scratch = xe_managed_bo_create_pin_map(xe, ggtt->tile, XE_PAGE_SIZE, flags);
408 	if (IS_ERR(ggtt->scratch)) {
409 		err = PTR_ERR(ggtt->scratch);
410 		goto err;
411 	}
412 
413 	xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, ggtt->scratch->size);
414 
415 	xe_ggtt_initial_clear(ggtt);
416 
417 	return devm_add_action_or_reset(xe->drm.dev, ggtt_fini, ggtt);
418 err:
419 	ggtt->scratch = NULL;
420 	return err;
421 }
422 
423 static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
424 {
425 	int err;
426 
427 	if (!gt)
428 		return;
429 
430 	err = xe_gt_tlb_invalidation_ggtt(gt);
431 	if (err)
432 		drm_warn(&gt_to_xe(gt)->drm, "xe_gt_tlb_invalidation_ggtt error=%d", err);
433 }
434 
435 static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
436 {
437 	struct xe_device *xe = tile_to_xe(ggtt->tile);
438 
439 	/*
440 	 * XXX: Barrier for GGTT pages. Unsure exactly why this required but
441 	 * without this LNL is having issues with the GuC reading scratch page
442 	 * vs. correct GGTT page. Not particularly a hot code path so blindly
443 	 * do a mmio read here which results in GuC reading correct GGTT page.
444 	 */
445 	xe_mmio_read32(xe_root_tile_mmio(xe), VF_CAP_REG);
446 
447 	/* Each GT in a tile has its own TLB to cache GGTT lookups */
448 	ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
449 	ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
450 }
451 
452 static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
453 			      const struct drm_mm_node *node, const char *description)
454 {
455 	char buf[10];
456 
457 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
458 		string_get_size(node->size, 1, STRING_UNITS_2, buf, sizeof(buf));
459 		xe_gt_dbg(ggtt->tile->primary_gt, "GGTT %#llx-%#llx (%s) %s\n",
460 			  node->start, node->start + node->size, buf, description);
461 	}
462 }
463 
464 /**
465  * xe_ggtt_node_insert_balloon_locked - prevent allocation of specified GGTT addresses
466  * @node: the &xe_ggtt_node to hold reserved GGTT node
467  * @start: the starting GGTT address of the reserved region
468  * @end: then end GGTT address of the reserved region
469  *
470  * To be used in cases where ggtt->lock is already taken.
471  * Use xe_ggtt_node_remove_balloon_locked() to release a reserved GGTT node.
472  *
473  * Return: 0 on success or a negative error code on failure.
474  */
475 int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node, u64 start, u64 end)
476 {
477 	struct xe_ggtt *ggtt = node->ggtt;
478 	int err;
479 
480 	xe_tile_assert(ggtt->tile, start < end);
481 	xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE));
482 	xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE));
483 	xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(&node->base));
484 	lockdep_assert_held(&ggtt->lock);
485 
486 	node->base.color = 0;
487 	node->base.start = start;
488 	node->base.size = end - start;
489 
490 	err = drm_mm_reserve_node(&ggtt->mm, &node->base);
491 
492 	if (xe_gt_WARN(ggtt->tile->primary_gt, err,
493 		       "Failed to balloon GGTT %#llx-%#llx (%pe)\n",
494 		       node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
495 		return err;
496 
497 	xe_ggtt_dump_node(ggtt, &node->base, "balloon");
498 	return 0;
499 }
500 
501 /**
502  * xe_ggtt_node_remove_balloon_locked - release a reserved GGTT region
503  * @node: the &xe_ggtt_node with reserved GGTT region
504  *
505  * To be used in cases where ggtt->lock is already taken.
506  * See xe_ggtt_node_insert_balloon_locked() for details.
507  */
508 void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node)
509 {
510 	if (!xe_ggtt_node_allocated(node))
511 		return;
512 
513 	lockdep_assert_held(&node->ggtt->lock);
514 
515 	xe_ggtt_dump_node(node->ggtt, &node->base, "remove-balloon");
516 
517 	drm_mm_remove_node(&node->base);
518 }
519 
520 static void xe_ggtt_assert_fit(struct xe_ggtt *ggtt, u64 start, u64 size)
521 {
522 	struct xe_tile *tile = ggtt->tile;
523 	struct xe_device *xe = tile_to_xe(tile);
524 	u64 __maybe_unused wopcm = xe_wopcm_size(xe);
525 
526 	xe_tile_assert(tile, start >= wopcm);
527 	xe_tile_assert(tile, start + size < ggtt->size - wopcm);
528 }
529 
530 /**
531  * xe_ggtt_shift_nodes_locked - Shift GGTT nodes to adjust for a change in usable address range.
532  * @ggtt: the &xe_ggtt struct instance
533  * @shift: change to the location of area provisioned for current VF
534  *
535  * This function moves all nodes from the GGTT VM, to a temp list. These nodes are expected
536  * to represent allocations in range formerly assigned to current VF, before the range changed.
537  * When the GGTT VM is completely clear of any nodes, they are re-added with shifted offsets.
538  *
539  * The function has no ability of failing - because it shifts existing nodes, without
540  * any additional processing. If the nodes were successfully existing at the old address,
541  * they will do the same at the new one. A fail inside this function would indicate that
542  * the list of nodes was either already damaged, or that the shift brings the address range
543  * outside of valid bounds. Both cases justify an assert rather than error code.
544  */
545 void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift)
546 {
547 	struct xe_tile *tile __maybe_unused = ggtt->tile;
548 	struct drm_mm_node *node, *tmpn;
549 	LIST_HEAD(temp_list_head);
550 
551 	lockdep_assert_held(&ggtt->lock);
552 
553 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
554 		drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm)
555 			xe_ggtt_assert_fit(ggtt, node->start + shift, node->size);
556 
557 	drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm) {
558 		drm_mm_remove_node(node);
559 		list_add(&node->node_list, &temp_list_head);
560 	}
561 
562 	list_for_each_entry_safe(node, tmpn, &temp_list_head, node_list) {
563 		list_del(&node->node_list);
564 		node->start += shift;
565 		drm_mm_reserve_node(&ggtt->mm, node);
566 		xe_tile_assert(tile, drm_mm_node_allocated(node));
567 	}
568 }
569 
570 /**
571  * xe_ggtt_node_insert_locked - Locked version to insert a &xe_ggtt_node into the GGTT
572  * @node: the &xe_ggtt_node to be inserted
573  * @size: size of the node
574  * @align: alignment constrain of the node
575  * @mm_flags: flags to control the node behavior
576  *
577  * It cannot be called without first having called xe_ggtt_init() once.
578  * To be used in cases where ggtt->lock is already taken.
579  *
580  * Return: 0 on success or a negative error code on failure.
581  */
582 int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
583 			       u32 size, u32 align, u32 mm_flags)
584 {
585 	return drm_mm_insert_node_generic(&node->ggtt->mm, &node->base, size, align, 0,
586 					  mm_flags);
587 }
588 
589 /**
590  * xe_ggtt_node_insert - Insert a &xe_ggtt_node into the GGTT
591  * @node: the &xe_ggtt_node to be inserted
592  * @size: size of the node
593  * @align: alignment constrain of the node
594  *
595  * It cannot be called without first having called xe_ggtt_init() once.
596  *
597  * Return: 0 on success or a negative error code on failure.
598  */
599 int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align)
600 {
601 	int ret;
602 
603 	if (!node || !node->ggtt)
604 		return -ENOENT;
605 
606 	mutex_lock(&node->ggtt->lock);
607 	ret = xe_ggtt_node_insert_locked(node, size, align,
608 					 DRM_MM_INSERT_HIGH);
609 	mutex_unlock(&node->ggtt->lock);
610 
611 	return ret;
612 }
613 
614 /**
615  * xe_ggtt_node_init - Initialize %xe_ggtt_node struct
616  * @ggtt: the &xe_ggtt where the new node will later be inserted/reserved.
617  *
618  * This function will allocate the struct %xe_ggtt_node and return its pointer.
619  * This struct will then be freed after the node removal upon xe_ggtt_node_remove()
620  * or xe_ggtt_node_remove_balloon_locked().
621  * Having %xe_ggtt_node struct allocated doesn't mean that the node is already allocated
622  * in GGTT. Only the xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
623  * xe_ggtt_node_insert_balloon_locked() will ensure the node is inserted or reserved in GGTT.
624  *
625  * Return: A pointer to %xe_ggtt_node struct on success. An ERR_PTR otherwise.
626  **/
627 struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt)
628 {
629 	struct xe_ggtt_node *node = kzalloc(sizeof(*node), GFP_NOFS);
630 
631 	if (!node)
632 		return ERR_PTR(-ENOMEM);
633 
634 	INIT_WORK(&node->delayed_removal_work, ggtt_node_remove_work_func);
635 	node->ggtt = ggtt;
636 
637 	return node;
638 }
639 
640 /**
641  * xe_ggtt_node_fini - Forcebly finalize %xe_ggtt_node struct
642  * @node: the &xe_ggtt_node to be freed
643  *
644  * If anything went wrong with either xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
645  * or xe_ggtt_node_insert_balloon_locked(); and this @node is not going to be reused, then,
646  * this function needs to be called to free the %xe_ggtt_node struct
647  **/
648 void xe_ggtt_node_fini(struct xe_ggtt_node *node)
649 {
650 	kfree(node);
651 }
652 
653 /**
654  * xe_ggtt_node_allocated - Check if node is allocated in GGTT
655  * @node: the &xe_ggtt_node to be inspected
656  *
657  * Return: True if allocated, False otherwise.
658  */
659 bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node)
660 {
661 	if (!node || !node->ggtt)
662 		return false;
663 
664 	return drm_mm_node_allocated(&node->base);
665 }
666 
667 /**
668  * xe_ggtt_map_bo - Map the BO into GGTT
669  * @ggtt: the &xe_ggtt where node will be mapped
670  * @node: the &xe_ggtt_node where this BO is mapped
671  * @bo: the &xe_bo to be mapped
672  * @pat_index: Which pat_index to use.
673  */
674 void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
675 		    struct xe_bo *bo, u16 pat_index)
676 {
677 
678 	u64 start, pte, end;
679 	struct xe_res_cursor cur;
680 
681 	if (XE_WARN_ON(!node))
682 		return;
683 
684 	start = node->base.start;
685 	end = start + bo->size;
686 
687 	pte = ggtt->pt_ops->pte_encode_flags(bo, pat_index);
688 	if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
689 		xe_assert(xe_bo_device(bo), bo->ttm.ttm);
690 
691 		for (xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &cur);
692 		     cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
693 			ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
694 						   pte | xe_res_dma(&cur));
695 	} else {
696 		/* Prepend GPU offset */
697 		pte |= vram_region_gpu_offset(bo->ttm.resource);
698 
699 		for (xe_res_first(bo->ttm.resource, 0, bo->size, &cur);
700 		     cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
701 			ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
702 						   pte + cur.start);
703 	}
704 }
705 
706 /**
707  * xe_ggtt_map_bo_unlocked - Restore a mapping of a BO into GGTT
708  * @ggtt: the &xe_ggtt where node will be mapped
709  * @bo: the &xe_bo to be mapped
710  *
711  * This is used to restore a GGTT mapping after suspend.
712  */
713 void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo)
714 {
715 	u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
716 	u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
717 
718 	mutex_lock(&ggtt->lock);
719 	xe_ggtt_map_bo(ggtt, bo->ggtt_node[ggtt->tile->id], bo, pat_index);
720 	mutex_unlock(&ggtt->lock);
721 }
722 
723 static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
724 				  u64 start, u64 end)
725 {
726 	u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
727 	u8 tile_id = ggtt->tile->id;
728 	int err;
729 
730 	if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
731 		alignment = SZ_64K;
732 
733 	if (XE_WARN_ON(bo->ggtt_node[tile_id])) {
734 		/* Someone's already inserted this BO in the GGTT */
735 		xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
736 		return 0;
737 	}
738 
739 	err = xe_bo_validate(bo, NULL, false);
740 	if (err)
741 		return err;
742 
743 	xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
744 
745 	bo->ggtt_node[tile_id] = xe_ggtt_node_init(ggtt);
746 	if (IS_ERR(bo->ggtt_node[tile_id])) {
747 		err = PTR_ERR(bo->ggtt_node[tile_id]);
748 		bo->ggtt_node[tile_id] = NULL;
749 		goto out;
750 	}
751 
752 	mutex_lock(&ggtt->lock);
753 	err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base,
754 					  bo->size, alignment, 0, start, end, 0);
755 	if (err) {
756 		xe_ggtt_node_fini(bo->ggtt_node[tile_id]);
757 		bo->ggtt_node[tile_id] = NULL;
758 	} else {
759 		u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
760 		u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
761 
762 		xe_ggtt_map_bo(ggtt, bo->ggtt_node[tile_id], bo, pat_index);
763 	}
764 	mutex_unlock(&ggtt->lock);
765 
766 	if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
767 		xe_ggtt_invalidate(ggtt);
768 
769 out:
770 	xe_pm_runtime_put(tile_to_xe(ggtt->tile));
771 
772 	return err;
773 }
774 
775 /**
776  * xe_ggtt_insert_bo_at - Insert BO at a specific GGTT space
777  * @ggtt: the &xe_ggtt where bo will be inserted
778  * @bo: the &xe_bo to be inserted
779  * @start: address where it will be inserted
780  * @end: end of the range where it will be inserted
781  *
782  * Return: 0 on success or a negative error code on failure.
783  */
784 int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
785 			 u64 start, u64 end)
786 {
787 	return __xe_ggtt_insert_bo_at(ggtt, bo, start, end);
788 }
789 
790 /**
791  * xe_ggtt_insert_bo - Insert BO into GGTT
792  * @ggtt: the &xe_ggtt where bo will be inserted
793  * @bo: the &xe_bo to be inserted
794  *
795  * Return: 0 on success or a negative error code on failure.
796  */
797 int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
798 {
799 	return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX);
800 }
801 
802 /**
803  * xe_ggtt_remove_bo - Remove a BO from the GGTT
804  * @ggtt: the &xe_ggtt where node will be removed
805  * @bo: the &xe_bo to be removed
806  */
807 void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
808 {
809 	u8 tile_id = ggtt->tile->id;
810 
811 	if (XE_WARN_ON(!bo->ggtt_node[tile_id]))
812 		return;
813 
814 	/* This BO is not currently in the GGTT */
815 	xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
816 
817 	xe_ggtt_node_remove(bo->ggtt_node[tile_id],
818 			    bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
819 }
820 
821 /**
822  * xe_ggtt_largest_hole - Largest GGTT hole
823  * @ggtt: the &xe_ggtt that will be inspected
824  * @alignment: minimum alignment
825  * @spare: If not NULL: in: desired memory size to be spared / out: Adjusted possible spare
826  *
827  * Return: size of the largest continuous GGTT region
828  */
829 u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare)
830 {
831 	const struct drm_mm *mm = &ggtt->mm;
832 	const struct drm_mm_node *entry;
833 	u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
834 	u64 hole_start, hole_end, hole_size;
835 	u64 max_hole = 0;
836 
837 	mutex_lock(&ggtt->lock);
838 
839 	drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
840 		hole_start = max(hole_start, hole_min_start);
841 		hole_start = ALIGN(hole_start, alignment);
842 		hole_end = ALIGN_DOWN(hole_end, alignment);
843 		if (hole_start >= hole_end)
844 			continue;
845 		hole_size = hole_end - hole_start;
846 		if (spare)
847 			*spare -= min3(*spare, hole_size, max_hole);
848 		max_hole = max(max_hole, hole_size);
849 	}
850 
851 	mutex_unlock(&ggtt->lock);
852 
853 	return max_hole;
854 }
855 
856 #ifdef CONFIG_PCI_IOV
857 static u64 xe_encode_vfid_pte(u16 vfid)
858 {
859 	return FIELD_PREP(GGTT_PTE_VFID, vfid) | XE_PAGE_PRESENT;
860 }
861 
862 static void xe_ggtt_assign_locked(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid)
863 {
864 	u64 start = node->start;
865 	u64 size = node->size;
866 	u64 end = start + size - 1;
867 	u64 pte = xe_encode_vfid_pte(vfid);
868 
869 	lockdep_assert_held(&ggtt->lock);
870 
871 	if (!drm_mm_node_allocated(node))
872 		return;
873 
874 	while (start < end) {
875 		ggtt->pt_ops->ggtt_set_pte(ggtt, start, pte);
876 		start += XE_PAGE_SIZE;
877 	}
878 
879 	xe_ggtt_invalidate(ggtt);
880 }
881 
882 /**
883  * xe_ggtt_assign - assign a GGTT region to the VF
884  * @node: the &xe_ggtt_node to update
885  * @vfid: the VF identifier
886  *
887  * This function is used by the PF driver to assign a GGTT region to the VF.
888  * In addition to PTE's VFID bits 11:2 also PRESENT bit 0 is set as on some
889  * platforms VFs can't modify that either.
890  */
891 void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid)
892 {
893 	mutex_lock(&node->ggtt->lock);
894 	xe_ggtt_assign_locked(node->ggtt, &node->base, vfid);
895 	mutex_unlock(&node->ggtt->lock);
896 }
897 #endif
898 
899 /**
900  * xe_ggtt_dump - Dump GGTT for debug
901  * @ggtt: the &xe_ggtt to be dumped
902  * @p: the &drm_mm_printer helper handle to be used to dump the information
903  *
904  * Return: 0 on success or a negative error code on failure.
905  */
906 int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
907 {
908 	int err;
909 
910 	err = mutex_lock_interruptible(&ggtt->lock);
911 	if (err)
912 		return err;
913 
914 	drm_mm_print(&ggtt->mm, p);
915 	mutex_unlock(&ggtt->lock);
916 	return err;
917 }
918 
919 /**
920  * xe_ggtt_print_holes - Print holes
921  * @ggtt: the &xe_ggtt to be inspected
922  * @alignment: min alignment
923  * @p: the &drm_printer
924  *
925  * Print GGTT ranges that are available and return total size available.
926  *
927  * Return: Total available size.
928  */
929 u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer *p)
930 {
931 	const struct drm_mm *mm = &ggtt->mm;
932 	const struct drm_mm_node *entry;
933 	u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
934 	u64 hole_start, hole_end, hole_size;
935 	u64 total = 0;
936 	char buf[10];
937 
938 	mutex_lock(&ggtt->lock);
939 
940 	drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
941 		hole_start = max(hole_start, hole_min_start);
942 		hole_start = ALIGN(hole_start, alignment);
943 		hole_end = ALIGN_DOWN(hole_end, alignment);
944 		if (hole_start >= hole_end)
945 			continue;
946 		hole_size = hole_end - hole_start;
947 		total += hole_size;
948 
949 		string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf));
950 		drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n",
951 			   hole_start, hole_end - 1, buf);
952 	}
953 
954 	mutex_unlock(&ggtt->lock);
955 
956 	return total;
957 }
958 
959 /**
960  * xe_ggtt_encode_pte_flags - Get PTE encoding flags for BO
961  * @ggtt: &xe_ggtt
962  * @bo: &xe_bo
963  * @pat_index: The pat_index for the PTE.
964  *
965  * This function returns the pte_flags for a given BO, without  address.
966  * It's used for DPT to fill a GGTT mapped BO with a linear lookup table.
967  */
968 u64 xe_ggtt_encode_pte_flags(struct xe_ggtt *ggtt,
969 			     struct xe_bo *bo, u16 pat_index)
970 {
971 	return ggtt->pt_ops->pte_encode_flags(bo, pat_index);
972 }
973 
974 /**
975  * xe_ggtt_read_pte - Read a PTE from the GGTT
976  * @ggtt: &xe_ggtt
977  * @offset: the offset for which the mapping should be read.
978  *
979  * Used by testcases, and by display reading out an inherited bios FB.
980  */
981 u64 xe_ggtt_read_pte(struct xe_ggtt *ggtt, u64 offset)
982 {
983 	return ioread64(ggtt->gsm + (offset / XE_PAGE_SIZE));
984 }
985