1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "xe_ggtt.h"
7
8 #include <kunit/visibility.h>
9 #include <linux/fault-inject.h>
10 #include <linux/io-64-nonatomic-lo-hi.h>
11 #include <linux/sizes.h>
12
13 #include <drm/drm_drv.h>
14 #include <drm/drm_managed.h>
15 #include <drm/intel/i915_drm.h>
16 #include <generated/xe_wa_oob.h>
17
18 #include "regs/xe_gt_regs.h"
19 #include "regs/xe_gtt_defs.h"
20 #include "regs/xe_regs.h"
21 #include "xe_assert.h"
22 #include "xe_bo.h"
23 #include "xe_device.h"
24 #include "xe_gt.h"
25 #include "xe_gt_printk.h"
26 #include "xe_gt_tlb_invalidation.h"
27 #include "xe_map.h"
28 #include "xe_mmio.h"
29 #include "xe_pm.h"
30 #include "xe_res_cursor.h"
31 #include "xe_sriov.h"
32 #include "xe_tile_sriov_vf.h"
33 #include "xe_wa.h"
34 #include "xe_wopcm.h"
35
36 /**
37 * DOC: Global Graphics Translation Table (GGTT)
38 *
39 * Xe GGTT implements the support for a Global Virtual Address space that is used
40 * for resources that are accessible to privileged (i.e. kernel-mode) processes,
41 * and not tied to a specific user-level process. For example, the Graphics
42 * micro-Controller (GuC) and Display Engine (if present) utilize this Global
43 * address space.
44 *
45 * The Global GTT (GGTT) translates from the Global virtual address to a physical
46 * address that can be accessed by HW. The GGTT is a flat, single-level table.
47 *
48 * Xe implements a simplified version of the GGTT specifically managing only a
49 * certain range of it that goes from the Write Once Protected Content Memory (WOPCM)
50 * Layout to a predefined GUC_GGTT_TOP. This approach avoids complications related to
51 * the GuC (Graphics Microcontroller) hardware limitations. The GuC address space
52 * is limited on both ends of the GGTT, because the GuC shim HW redirects
53 * accesses to those addresses to other HW areas instead of going through the
54 * GGTT. On the bottom end, the GuC can't access offsets below the WOPCM size,
55 * while on the top side the limit is fixed at GUC_GGTT_TOP. To keep things
56 * simple, instead of checking each object to see if they are accessed by GuC or
57 * not, we just exclude those areas from the allocator. Additionally, to simplify
58 * the driver load, we use the maximum WOPCM size in this logic instead of the
59 * programmed one, so we don't need to wait until the actual size to be
60 * programmed is determined (which requires FW fetch) before initializing the
61 * GGTT. These simplifications might waste space in the GGTT (about 20-25 MBs
62 * depending on the platform) but we can live with this. Another benefit of this
63 * is the GuC bootrom can't access anything below the WOPCM max size so anything
64 * the bootrom needs to access (e.g. a RSA key) needs to be placed in the GGTT
65 * above the WOPCM max size. Starting the GGTT allocations above the WOPCM max
66 * give us the correct placement for free.
67 */
68
xelp_ggtt_pte_flags(struct xe_bo * bo,u16 pat_index)69 static u64 xelp_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
70 {
71 u64 pte = XE_PAGE_PRESENT;
72
73 if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
74 pte |= XE_GGTT_PTE_DM;
75
76 return pte;
77 }
78
xelpg_ggtt_pte_flags(struct xe_bo * bo,u16 pat_index)79 static u64 xelpg_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
80 {
81 struct xe_device *xe = xe_bo_device(bo);
82 u64 pte;
83
84 pte = xelp_ggtt_pte_flags(bo, pat_index);
85
86 xe_assert(xe, pat_index <= 3);
87
88 if (pat_index & BIT(0))
89 pte |= XELPG_GGTT_PTE_PAT0;
90
91 if (pat_index & BIT(1))
92 pte |= XELPG_GGTT_PTE_PAT1;
93
94 return pte;
95 }
96
probe_gsm_size(struct pci_dev * pdev)97 static unsigned int probe_gsm_size(struct pci_dev *pdev)
98 {
99 u16 gmch_ctl, ggms;
100
101 pci_read_config_word(pdev, SNB_GMCH_CTRL, &gmch_ctl);
102 ggms = (gmch_ctl >> BDW_GMCH_GGMS_SHIFT) & BDW_GMCH_GGMS_MASK;
103 return ggms ? SZ_1M << ggms : 0;
104 }
105
ggtt_update_access_counter(struct xe_ggtt * ggtt)106 static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
107 {
108 struct xe_tile *tile = ggtt->tile;
109 struct xe_gt *affected_gt = XE_WA(tile->primary_gt, 22019338487) ?
110 tile->primary_gt : tile->media_gt;
111 struct xe_mmio *mmio = &affected_gt->mmio;
112 u32 max_gtt_writes = XE_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63;
113 /*
114 * Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit
115 * to wait for completion of prior GTT writes before letting this through.
116 * This needs to be done for all GGTT writes originating from the CPU.
117 */
118 lockdep_assert_held(&ggtt->lock);
119
120 if ((++ggtt->access_count % max_gtt_writes) == 0) {
121 xe_mmio_write32(mmio, GMD_ID, 0x0);
122 ggtt->access_count = 0;
123 }
124 }
125
xe_ggtt_set_pte(struct xe_ggtt * ggtt,u64 addr,u64 pte)126 static void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte)
127 {
128 xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK));
129 xe_tile_assert(ggtt->tile, addr < ggtt->size);
130
131 writeq(pte, &ggtt->gsm[addr >> XE_PTE_SHIFT]);
132 }
133
xe_ggtt_set_pte_and_flush(struct xe_ggtt * ggtt,u64 addr,u64 pte)134 static void xe_ggtt_set_pte_and_flush(struct xe_ggtt *ggtt, u64 addr, u64 pte)
135 {
136 xe_ggtt_set_pte(ggtt, addr, pte);
137 ggtt_update_access_counter(ggtt);
138 }
139
xe_ggtt_clear(struct xe_ggtt * ggtt,u64 start,u64 size)140 static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
141 {
142 u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
143 u64 end = start + size - 1;
144 u64 scratch_pte;
145
146 xe_tile_assert(ggtt->tile, start < end);
147
148 if (ggtt->scratch)
149 scratch_pte = xe_bo_addr(ggtt->scratch, 0, XE_PAGE_SIZE) |
150 ggtt->pt_ops->pte_encode_flags(ggtt->scratch,
151 pat_index);
152 else
153 scratch_pte = 0;
154
155 while (start < end) {
156 ggtt->pt_ops->ggtt_set_pte(ggtt, start, scratch_pte);
157 start += XE_PAGE_SIZE;
158 }
159 }
160
161 /**
162 * xe_ggtt_alloc - Allocate a GGTT for a given &xe_tile
163 * @tile: &xe_tile
164 *
165 * Allocates a &xe_ggtt for a given tile.
166 *
167 * Return: &xe_ggtt on success, or NULL when out of memory.
168 */
xe_ggtt_alloc(struct xe_tile * tile)169 struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile)
170 {
171 struct xe_ggtt *ggtt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*ggtt), GFP_KERNEL);
172 if (ggtt)
173 ggtt->tile = tile;
174 return ggtt;
175 }
176
ggtt_fini_early(struct drm_device * drm,void * arg)177 static void ggtt_fini_early(struct drm_device *drm, void *arg)
178 {
179 struct xe_ggtt *ggtt = arg;
180
181 destroy_workqueue(ggtt->wq);
182 mutex_destroy(&ggtt->lock);
183 drm_mm_takedown(&ggtt->mm);
184 }
185
ggtt_fini(void * arg)186 static void ggtt_fini(void *arg)
187 {
188 struct xe_ggtt *ggtt = arg;
189
190 ggtt->scratch = NULL;
191 }
192
193 #ifdef CONFIG_LOCKDEP
xe_ggtt_might_lock(struct xe_ggtt * ggtt)194 void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
195 {
196 might_lock(&ggtt->lock);
197 }
198 #endif
199
primelockdep(struct xe_ggtt * ggtt)200 static void primelockdep(struct xe_ggtt *ggtt)
201 {
202 if (!IS_ENABLED(CONFIG_LOCKDEP))
203 return;
204
205 fs_reclaim_acquire(GFP_KERNEL);
206 might_lock(&ggtt->lock);
207 fs_reclaim_release(GFP_KERNEL);
208 }
209
210 static const struct xe_ggtt_pt_ops xelp_pt_ops = {
211 .pte_encode_flags = xelp_ggtt_pte_flags,
212 .ggtt_set_pte = xe_ggtt_set_pte,
213 };
214
215 static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
216 .pte_encode_flags = xelpg_ggtt_pte_flags,
217 .ggtt_set_pte = xe_ggtt_set_pte,
218 };
219
220 static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
221 .pte_encode_flags = xelpg_ggtt_pte_flags,
222 .ggtt_set_pte = xe_ggtt_set_pte_and_flush,
223 };
224
__xe_ggtt_init_early(struct xe_ggtt * ggtt,u32 reserved)225 static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved)
226 {
227 drm_mm_init(&ggtt->mm, reserved,
228 ggtt->size - reserved);
229 mutex_init(&ggtt->lock);
230 primelockdep(ggtt);
231 }
232
xe_ggtt_init_kunit(struct xe_ggtt * ggtt,u32 reserved,u32 size)233 int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size)
234 {
235 ggtt->size = size;
236 __xe_ggtt_init_early(ggtt, reserved);
237 return 0;
238 }
239 EXPORT_SYMBOL_IF_KUNIT(xe_ggtt_init_kunit);
240
dev_fini_ggtt(void * arg)241 static void dev_fini_ggtt(void *arg)
242 {
243 struct xe_ggtt *ggtt = arg;
244
245 drain_workqueue(ggtt->wq);
246 }
247
248 /**
249 * xe_ggtt_init_early - Early GGTT initialization
250 * @ggtt: the &xe_ggtt to be initialized
251 *
252 * It allows to create new mappings usable by the GuC.
253 * Mappings are not usable by the HW engines, as it doesn't have scratch nor
254 * initial clear done to it yet. That will happen in the regular, non-early
255 * GGTT initialization.
256 *
257 * Return: 0 on success or a negative error code on failure.
258 */
xe_ggtt_init_early(struct xe_ggtt * ggtt)259 int xe_ggtt_init_early(struct xe_ggtt *ggtt)
260 {
261 struct xe_device *xe = tile_to_xe(ggtt->tile);
262 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
263 unsigned int gsm_size;
264 int err;
265
266 if (IS_SRIOV_VF(xe) || GRAPHICS_VERx100(xe) >= 1250)
267 gsm_size = SZ_8M; /* GGTT is expected to be 4GiB */
268 else
269 gsm_size = probe_gsm_size(pdev);
270
271 if (gsm_size == 0) {
272 drm_err(&xe->drm, "Hardware reported no preallocated GSM\n");
273 return -ENOMEM;
274 }
275
276 ggtt->gsm = ggtt->tile->mmio.regs + SZ_8M;
277 ggtt->size = (gsm_size / 8) * (u64) XE_PAGE_SIZE;
278
279 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
280 ggtt->flags |= XE_GGTT_FLAGS_64K;
281
282 if (ggtt->size > GUC_GGTT_TOP)
283 ggtt->size = GUC_GGTT_TOP;
284
285 if (GRAPHICS_VERx100(xe) >= 1270)
286 ggtt->pt_ops = (ggtt->tile->media_gt &&
287 XE_WA(ggtt->tile->media_gt, 22019338487)) ||
288 XE_WA(ggtt->tile->primary_gt, 22019338487) ?
289 &xelpg_pt_wa_ops : &xelpg_pt_ops;
290 else
291 ggtt->pt_ops = &xelp_pt_ops;
292
293 ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
294 __xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
295
296 err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
297 if (err)
298 return err;
299
300 err = devm_add_action_or_reset(xe->drm.dev, dev_fini_ggtt, ggtt);
301 if (err)
302 return err;
303
304 if (IS_SRIOV_VF(xe)) {
305 err = xe_tile_sriov_vf_prepare_ggtt(ggtt->tile);
306 if (err)
307 return err;
308 }
309
310 return 0;
311 }
312 ALLOW_ERROR_INJECTION(xe_ggtt_init_early, ERRNO); /* See xe_pci_probe() */
313
314 static void xe_ggtt_invalidate(struct xe_ggtt *ggtt);
315
xe_ggtt_initial_clear(struct xe_ggtt * ggtt)316 static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
317 {
318 struct drm_mm_node *hole;
319 u64 start, end;
320
321 /* Display may have allocated inside ggtt, so be careful with clearing here */
322 mutex_lock(&ggtt->lock);
323 drm_mm_for_each_hole(hole, &ggtt->mm, start, end)
324 xe_ggtt_clear(ggtt, start, end - start);
325
326 xe_ggtt_invalidate(ggtt);
327 mutex_unlock(&ggtt->lock);
328 }
329
ggtt_node_remove(struct xe_ggtt_node * node)330 static void ggtt_node_remove(struct xe_ggtt_node *node)
331 {
332 struct xe_ggtt *ggtt = node->ggtt;
333 struct xe_device *xe = tile_to_xe(ggtt->tile);
334 bool bound;
335 int idx;
336
337 bound = drm_dev_enter(&xe->drm, &idx);
338
339 mutex_lock(&ggtt->lock);
340 if (bound)
341 xe_ggtt_clear(ggtt, node->base.start, node->base.size);
342 drm_mm_remove_node(&node->base);
343 node->base.size = 0;
344 mutex_unlock(&ggtt->lock);
345
346 if (!bound)
347 goto free_node;
348
349 if (node->invalidate_on_remove)
350 xe_ggtt_invalidate(ggtt);
351
352 drm_dev_exit(idx);
353
354 free_node:
355 xe_ggtt_node_fini(node);
356 }
357
ggtt_node_remove_work_func(struct work_struct * work)358 static void ggtt_node_remove_work_func(struct work_struct *work)
359 {
360 struct xe_ggtt_node *node = container_of(work, typeof(*node),
361 delayed_removal_work);
362 struct xe_device *xe = tile_to_xe(node->ggtt->tile);
363
364 xe_pm_runtime_get(xe);
365 ggtt_node_remove(node);
366 xe_pm_runtime_put(xe);
367 }
368
369 /**
370 * xe_ggtt_node_remove - Remove a &xe_ggtt_node from the GGTT
371 * @node: the &xe_ggtt_node to be removed
372 * @invalidate: if node needs invalidation upon removal
373 */
xe_ggtt_node_remove(struct xe_ggtt_node * node,bool invalidate)374 void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate)
375 {
376 struct xe_ggtt *ggtt;
377 struct xe_device *xe;
378
379 if (!node || !node->ggtt)
380 return;
381
382 ggtt = node->ggtt;
383 xe = tile_to_xe(ggtt->tile);
384
385 node->invalidate_on_remove = invalidate;
386
387 if (xe_pm_runtime_get_if_active(xe)) {
388 ggtt_node_remove(node);
389 xe_pm_runtime_put(xe);
390 } else {
391 queue_work(ggtt->wq, &node->delayed_removal_work);
392 }
393 }
394
395 /**
396 * xe_ggtt_init - Regular non-early GGTT initialization
397 * @ggtt: the &xe_ggtt to be initialized
398 *
399 * Return: 0 on success or a negative error code on failure.
400 */
xe_ggtt_init(struct xe_ggtt * ggtt)401 int xe_ggtt_init(struct xe_ggtt *ggtt)
402 {
403 struct xe_device *xe = tile_to_xe(ggtt->tile);
404 unsigned int flags;
405 int err;
406
407 /*
408 * So we don't need to worry about 64K GGTT layout when dealing with
409 * scratch entries, rather keep the scratch page in system memory on
410 * platforms where 64K pages are needed for VRAM.
411 */
412 flags = 0;
413 if (ggtt->flags & XE_GGTT_FLAGS_64K)
414 flags |= XE_BO_FLAG_SYSTEM;
415 else
416 flags |= XE_BO_FLAG_VRAM_IF_DGFX(ggtt->tile);
417
418 ggtt->scratch = xe_managed_bo_create_pin_map(xe, ggtt->tile, XE_PAGE_SIZE, flags);
419 if (IS_ERR(ggtt->scratch)) {
420 err = PTR_ERR(ggtt->scratch);
421 goto err;
422 }
423
424 xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, xe_bo_size(ggtt->scratch));
425
426 xe_ggtt_initial_clear(ggtt);
427
428 return devm_add_action_or_reset(xe->drm.dev, ggtt_fini, ggtt);
429 err:
430 ggtt->scratch = NULL;
431 return err;
432 }
433
ggtt_invalidate_gt_tlb(struct xe_gt * gt)434 static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
435 {
436 int err;
437
438 if (!gt)
439 return;
440
441 err = xe_gt_tlb_invalidation_ggtt(gt);
442 if (err)
443 drm_warn(>_to_xe(gt)->drm, "xe_gt_tlb_invalidation_ggtt error=%d", err);
444 }
445
xe_ggtt_invalidate(struct xe_ggtt * ggtt)446 static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
447 {
448 struct xe_device *xe = tile_to_xe(ggtt->tile);
449
450 /*
451 * XXX: Barrier for GGTT pages. Unsure exactly why this required but
452 * without this LNL is having issues with the GuC reading scratch page
453 * vs. correct GGTT page. Not particularly a hot code path so blindly
454 * do a mmio read here which results in GuC reading correct GGTT page.
455 */
456 xe_mmio_read32(xe_root_tile_mmio(xe), VF_CAP_REG);
457
458 /* Each GT in a tile has its own TLB to cache GGTT lookups */
459 ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
460 ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
461 }
462
xe_ggtt_dump_node(struct xe_ggtt * ggtt,const struct drm_mm_node * node,const char * description)463 static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
464 const struct drm_mm_node *node, const char *description)
465 {
466 char buf[10];
467
468 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
469 string_get_size(node->size, 1, STRING_UNITS_2, buf, sizeof(buf));
470 xe_gt_dbg(ggtt->tile->primary_gt, "GGTT %#llx-%#llx (%s) %s\n",
471 node->start, node->start + node->size, buf, description);
472 }
473 }
474
475 /**
476 * xe_ggtt_node_insert_balloon_locked - prevent allocation of specified GGTT addresses
477 * @node: the &xe_ggtt_node to hold reserved GGTT node
478 * @start: the starting GGTT address of the reserved region
479 * @end: then end GGTT address of the reserved region
480 *
481 * To be used in cases where ggtt->lock is already taken.
482 * Use xe_ggtt_node_remove_balloon_locked() to release a reserved GGTT node.
483 *
484 * Return: 0 on success or a negative error code on failure.
485 */
xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node * node,u64 start,u64 end)486 int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node, u64 start, u64 end)
487 {
488 struct xe_ggtt *ggtt = node->ggtt;
489 int err;
490
491 xe_tile_assert(ggtt->tile, start < end);
492 xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE));
493 xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE));
494 xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(&node->base));
495 lockdep_assert_held(&ggtt->lock);
496
497 node->base.color = 0;
498 node->base.start = start;
499 node->base.size = end - start;
500
501 err = drm_mm_reserve_node(&ggtt->mm, &node->base);
502
503 if (xe_gt_WARN(ggtt->tile->primary_gt, err,
504 "Failed to balloon GGTT %#llx-%#llx (%pe)\n",
505 node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
506 return err;
507
508 xe_ggtt_dump_node(ggtt, &node->base, "balloon");
509 return 0;
510 }
511
512 /**
513 * xe_ggtt_node_remove_balloon_locked - release a reserved GGTT region
514 * @node: the &xe_ggtt_node with reserved GGTT region
515 *
516 * To be used in cases where ggtt->lock is already taken.
517 * See xe_ggtt_node_insert_balloon_locked() for details.
518 */
xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node * node)519 void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node)
520 {
521 if (!xe_ggtt_node_allocated(node))
522 return;
523
524 lockdep_assert_held(&node->ggtt->lock);
525
526 xe_ggtt_dump_node(node->ggtt, &node->base, "remove-balloon");
527
528 drm_mm_remove_node(&node->base);
529 }
530
xe_ggtt_assert_fit(struct xe_ggtt * ggtt,u64 start,u64 size)531 static void xe_ggtt_assert_fit(struct xe_ggtt *ggtt, u64 start, u64 size)
532 {
533 struct xe_tile *tile = ggtt->tile;
534 struct xe_device *xe = tile_to_xe(tile);
535 u64 __maybe_unused wopcm = xe_wopcm_size(xe);
536
537 xe_tile_assert(tile, start >= wopcm);
538 xe_tile_assert(tile, start + size < ggtt->size - wopcm);
539 }
540
541 /**
542 * xe_ggtt_shift_nodes_locked - Shift GGTT nodes to adjust for a change in usable address range.
543 * @ggtt: the &xe_ggtt struct instance
544 * @shift: change to the location of area provisioned for current VF
545 *
546 * This function moves all nodes from the GGTT VM, to a temp list. These nodes are expected
547 * to represent allocations in range formerly assigned to current VF, before the range changed.
548 * When the GGTT VM is completely clear of any nodes, they are re-added with shifted offsets.
549 *
550 * The function has no ability of failing - because it shifts existing nodes, without
551 * any additional processing. If the nodes were successfully existing at the old address,
552 * they will do the same at the new one. A fail inside this function would indicate that
553 * the list of nodes was either already damaged, or that the shift brings the address range
554 * outside of valid bounds. Both cases justify an assert rather than error code.
555 */
xe_ggtt_shift_nodes_locked(struct xe_ggtt * ggtt,s64 shift)556 void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift)
557 {
558 struct xe_tile *tile __maybe_unused = ggtt->tile;
559 struct drm_mm_node *node, *tmpn;
560 LIST_HEAD(temp_list_head);
561
562 lockdep_assert_held(&ggtt->lock);
563
564 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
565 drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm)
566 xe_ggtt_assert_fit(ggtt, node->start + shift, node->size);
567
568 drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm) {
569 drm_mm_remove_node(node);
570 list_add(&node->node_list, &temp_list_head);
571 }
572
573 list_for_each_entry_safe(node, tmpn, &temp_list_head, node_list) {
574 list_del(&node->node_list);
575 node->start += shift;
576 drm_mm_reserve_node(&ggtt->mm, node);
577 xe_tile_assert(tile, drm_mm_node_allocated(node));
578 }
579 }
580
581 /**
582 * xe_ggtt_node_insert_locked - Locked version to insert a &xe_ggtt_node into the GGTT
583 * @node: the &xe_ggtt_node to be inserted
584 * @size: size of the node
585 * @align: alignment constrain of the node
586 * @mm_flags: flags to control the node behavior
587 *
588 * It cannot be called without first having called xe_ggtt_init() once.
589 * To be used in cases where ggtt->lock is already taken.
590 *
591 * Return: 0 on success or a negative error code on failure.
592 */
xe_ggtt_node_insert_locked(struct xe_ggtt_node * node,u32 size,u32 align,u32 mm_flags)593 int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
594 u32 size, u32 align, u32 mm_flags)
595 {
596 return drm_mm_insert_node_generic(&node->ggtt->mm, &node->base, size, align, 0,
597 mm_flags);
598 }
599
600 /**
601 * xe_ggtt_node_insert - Insert a &xe_ggtt_node into the GGTT
602 * @node: the &xe_ggtt_node to be inserted
603 * @size: size of the node
604 * @align: alignment constrain of the node
605 *
606 * It cannot be called without first having called xe_ggtt_init() once.
607 *
608 * Return: 0 on success or a negative error code on failure.
609 */
xe_ggtt_node_insert(struct xe_ggtt_node * node,u32 size,u32 align)610 int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align)
611 {
612 int ret;
613
614 if (!node || !node->ggtt)
615 return -ENOENT;
616
617 mutex_lock(&node->ggtt->lock);
618 ret = xe_ggtt_node_insert_locked(node, size, align,
619 DRM_MM_INSERT_HIGH);
620 mutex_unlock(&node->ggtt->lock);
621
622 return ret;
623 }
624
625 /**
626 * xe_ggtt_node_init - Initialize %xe_ggtt_node struct
627 * @ggtt: the &xe_ggtt where the new node will later be inserted/reserved.
628 *
629 * This function will allocate the struct %xe_ggtt_node and return its pointer.
630 * This struct will then be freed after the node removal upon xe_ggtt_node_remove()
631 * or xe_ggtt_node_remove_balloon_locked().
632 * Having %xe_ggtt_node struct allocated doesn't mean that the node is already allocated
633 * in GGTT. Only the xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
634 * xe_ggtt_node_insert_balloon_locked() will ensure the node is inserted or reserved in GGTT.
635 *
636 * Return: A pointer to %xe_ggtt_node struct on success. An ERR_PTR otherwise.
637 **/
xe_ggtt_node_init(struct xe_ggtt * ggtt)638 struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt)
639 {
640 struct xe_ggtt_node *node = kzalloc(sizeof(*node), GFP_NOFS);
641
642 if (!node)
643 return ERR_PTR(-ENOMEM);
644
645 INIT_WORK(&node->delayed_removal_work, ggtt_node_remove_work_func);
646 node->ggtt = ggtt;
647
648 return node;
649 }
650
651 /**
652 * xe_ggtt_node_fini - Forcebly finalize %xe_ggtt_node struct
653 * @node: the &xe_ggtt_node to be freed
654 *
655 * If anything went wrong with either xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
656 * or xe_ggtt_node_insert_balloon_locked(); and this @node is not going to be reused, then,
657 * this function needs to be called to free the %xe_ggtt_node struct
658 **/
xe_ggtt_node_fini(struct xe_ggtt_node * node)659 void xe_ggtt_node_fini(struct xe_ggtt_node *node)
660 {
661 kfree(node);
662 }
663
664 /**
665 * xe_ggtt_node_allocated - Check if node is allocated in GGTT
666 * @node: the &xe_ggtt_node to be inspected
667 *
668 * Return: True if allocated, False otherwise.
669 */
xe_ggtt_node_allocated(const struct xe_ggtt_node * node)670 bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node)
671 {
672 if (!node || !node->ggtt)
673 return false;
674
675 return drm_mm_node_allocated(&node->base);
676 }
677
678 /**
679 * xe_ggtt_map_bo - Map the BO into GGTT
680 * @ggtt: the &xe_ggtt where node will be mapped
681 * @node: the &xe_ggtt_node where this BO is mapped
682 * @bo: the &xe_bo to be mapped
683 * @pat_index: Which pat_index to use.
684 */
xe_ggtt_map_bo(struct xe_ggtt * ggtt,struct xe_ggtt_node * node,struct xe_bo * bo,u16 pat_index)685 void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
686 struct xe_bo *bo, u16 pat_index)
687 {
688
689 u64 start, pte, end;
690 struct xe_res_cursor cur;
691
692 if (XE_WARN_ON(!node))
693 return;
694
695 start = node->base.start;
696 end = start + xe_bo_size(bo);
697
698 pte = ggtt->pt_ops->pte_encode_flags(bo, pat_index);
699 if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
700 xe_assert(xe_bo_device(bo), bo->ttm.ttm);
701
702 for (xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &cur);
703 cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
704 ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
705 pte | xe_res_dma(&cur));
706 } else {
707 /* Prepend GPU offset */
708 pte |= vram_region_gpu_offset(bo->ttm.resource);
709
710 for (xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur);
711 cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
712 ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
713 pte + cur.start);
714 }
715 }
716
717 /**
718 * xe_ggtt_map_bo_unlocked - Restore a mapping of a BO into GGTT
719 * @ggtt: the &xe_ggtt where node will be mapped
720 * @bo: the &xe_bo to be mapped
721 *
722 * This is used to restore a GGTT mapping after suspend.
723 */
xe_ggtt_map_bo_unlocked(struct xe_ggtt * ggtt,struct xe_bo * bo)724 void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo)
725 {
726 u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
727 u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
728
729 mutex_lock(&ggtt->lock);
730 xe_ggtt_map_bo(ggtt, bo->ggtt_node[ggtt->tile->id], bo, pat_index);
731 mutex_unlock(&ggtt->lock);
732 }
733
__xe_ggtt_insert_bo_at(struct xe_ggtt * ggtt,struct xe_bo * bo,u64 start,u64 end)734 static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
735 u64 start, u64 end)
736 {
737 u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
738 u8 tile_id = ggtt->tile->id;
739 int err;
740
741 if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
742 alignment = SZ_64K;
743
744 if (XE_WARN_ON(bo->ggtt_node[tile_id])) {
745 /* Someone's already inserted this BO in the GGTT */
746 xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo));
747 return 0;
748 }
749
750 err = xe_bo_validate(bo, NULL, false);
751 if (err)
752 return err;
753
754 xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
755
756 bo->ggtt_node[tile_id] = xe_ggtt_node_init(ggtt);
757 if (IS_ERR(bo->ggtt_node[tile_id])) {
758 err = PTR_ERR(bo->ggtt_node[tile_id]);
759 bo->ggtt_node[tile_id] = NULL;
760 goto out;
761 }
762
763 mutex_lock(&ggtt->lock);
764 err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base,
765 xe_bo_size(bo), alignment, 0, start, end, 0);
766 if (err) {
767 xe_ggtt_node_fini(bo->ggtt_node[tile_id]);
768 bo->ggtt_node[tile_id] = NULL;
769 } else {
770 u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
771 u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
772
773 xe_ggtt_map_bo(ggtt, bo->ggtt_node[tile_id], bo, pat_index);
774 }
775 mutex_unlock(&ggtt->lock);
776
777 if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
778 xe_ggtt_invalidate(ggtt);
779
780 out:
781 xe_pm_runtime_put(tile_to_xe(ggtt->tile));
782
783 return err;
784 }
785
786 /**
787 * xe_ggtt_insert_bo_at - Insert BO at a specific GGTT space
788 * @ggtt: the &xe_ggtt where bo will be inserted
789 * @bo: the &xe_bo to be inserted
790 * @start: address where it will be inserted
791 * @end: end of the range where it will be inserted
792 *
793 * Return: 0 on success or a negative error code on failure.
794 */
xe_ggtt_insert_bo_at(struct xe_ggtt * ggtt,struct xe_bo * bo,u64 start,u64 end)795 int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
796 u64 start, u64 end)
797 {
798 return __xe_ggtt_insert_bo_at(ggtt, bo, start, end);
799 }
800
801 /**
802 * xe_ggtt_insert_bo - Insert BO into GGTT
803 * @ggtt: the &xe_ggtt where bo will be inserted
804 * @bo: the &xe_bo to be inserted
805 *
806 * Return: 0 on success or a negative error code on failure.
807 */
xe_ggtt_insert_bo(struct xe_ggtt * ggtt,struct xe_bo * bo)808 int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
809 {
810 return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX);
811 }
812
813 /**
814 * xe_ggtt_remove_bo - Remove a BO from the GGTT
815 * @ggtt: the &xe_ggtt where node will be removed
816 * @bo: the &xe_bo to be removed
817 */
xe_ggtt_remove_bo(struct xe_ggtt * ggtt,struct xe_bo * bo)818 void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
819 {
820 u8 tile_id = ggtt->tile->id;
821
822 if (XE_WARN_ON(!bo->ggtt_node[tile_id]))
823 return;
824
825 /* This BO is not currently in the GGTT */
826 xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo));
827
828 xe_ggtt_node_remove(bo->ggtt_node[tile_id],
829 bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
830 }
831
832 /**
833 * xe_ggtt_largest_hole - Largest GGTT hole
834 * @ggtt: the &xe_ggtt that will be inspected
835 * @alignment: minimum alignment
836 * @spare: If not NULL: in: desired memory size to be spared / out: Adjusted possible spare
837 *
838 * Return: size of the largest continuous GGTT region
839 */
xe_ggtt_largest_hole(struct xe_ggtt * ggtt,u64 alignment,u64 * spare)840 u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare)
841 {
842 const struct drm_mm *mm = &ggtt->mm;
843 const struct drm_mm_node *entry;
844 u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
845 u64 hole_start, hole_end, hole_size;
846 u64 max_hole = 0;
847
848 mutex_lock(&ggtt->lock);
849
850 drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
851 hole_start = max(hole_start, hole_min_start);
852 hole_start = ALIGN(hole_start, alignment);
853 hole_end = ALIGN_DOWN(hole_end, alignment);
854 if (hole_start >= hole_end)
855 continue;
856 hole_size = hole_end - hole_start;
857 if (spare)
858 *spare -= min3(*spare, hole_size, max_hole);
859 max_hole = max(max_hole, hole_size);
860 }
861
862 mutex_unlock(&ggtt->lock);
863
864 return max_hole;
865 }
866
867 #ifdef CONFIG_PCI_IOV
xe_encode_vfid_pte(u16 vfid)868 static u64 xe_encode_vfid_pte(u16 vfid)
869 {
870 return FIELD_PREP(GGTT_PTE_VFID, vfid) | XE_PAGE_PRESENT;
871 }
872
xe_ggtt_assign_locked(struct xe_ggtt * ggtt,const struct drm_mm_node * node,u16 vfid)873 static void xe_ggtt_assign_locked(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid)
874 {
875 u64 start = node->start;
876 u64 size = node->size;
877 u64 end = start + size - 1;
878 u64 pte = xe_encode_vfid_pte(vfid);
879
880 lockdep_assert_held(&ggtt->lock);
881
882 if (!drm_mm_node_allocated(node))
883 return;
884
885 while (start < end) {
886 ggtt->pt_ops->ggtt_set_pte(ggtt, start, pte);
887 start += XE_PAGE_SIZE;
888 }
889
890 xe_ggtt_invalidate(ggtt);
891 }
892
893 /**
894 * xe_ggtt_assign - assign a GGTT region to the VF
895 * @node: the &xe_ggtt_node to update
896 * @vfid: the VF identifier
897 *
898 * This function is used by the PF driver to assign a GGTT region to the VF.
899 * In addition to PTE's VFID bits 11:2 also PRESENT bit 0 is set as on some
900 * platforms VFs can't modify that either.
901 */
xe_ggtt_assign(const struct xe_ggtt_node * node,u16 vfid)902 void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid)
903 {
904 mutex_lock(&node->ggtt->lock);
905 xe_ggtt_assign_locked(node->ggtt, &node->base, vfid);
906 mutex_unlock(&node->ggtt->lock);
907 }
908 #endif
909
910 /**
911 * xe_ggtt_dump - Dump GGTT for debug
912 * @ggtt: the &xe_ggtt to be dumped
913 * @p: the &drm_mm_printer helper handle to be used to dump the information
914 *
915 * Return: 0 on success or a negative error code on failure.
916 */
xe_ggtt_dump(struct xe_ggtt * ggtt,struct drm_printer * p)917 int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
918 {
919 int err;
920
921 err = mutex_lock_interruptible(&ggtt->lock);
922 if (err)
923 return err;
924
925 drm_mm_print(&ggtt->mm, p);
926 mutex_unlock(&ggtt->lock);
927 return err;
928 }
929
930 /**
931 * xe_ggtt_print_holes - Print holes
932 * @ggtt: the &xe_ggtt to be inspected
933 * @alignment: min alignment
934 * @p: the &drm_printer
935 *
936 * Print GGTT ranges that are available and return total size available.
937 *
938 * Return: Total available size.
939 */
xe_ggtt_print_holes(struct xe_ggtt * ggtt,u64 alignment,struct drm_printer * p)940 u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer *p)
941 {
942 const struct drm_mm *mm = &ggtt->mm;
943 const struct drm_mm_node *entry;
944 u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
945 u64 hole_start, hole_end, hole_size;
946 u64 total = 0;
947 char buf[10];
948
949 mutex_lock(&ggtt->lock);
950
951 drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
952 hole_start = max(hole_start, hole_min_start);
953 hole_start = ALIGN(hole_start, alignment);
954 hole_end = ALIGN_DOWN(hole_end, alignment);
955 if (hole_start >= hole_end)
956 continue;
957 hole_size = hole_end - hole_start;
958 total += hole_size;
959
960 string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf));
961 drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n",
962 hole_start, hole_end - 1, buf);
963 }
964
965 mutex_unlock(&ggtt->lock);
966
967 return total;
968 }
969
970 /**
971 * xe_ggtt_encode_pte_flags - Get PTE encoding flags for BO
972 * @ggtt: &xe_ggtt
973 * @bo: &xe_bo
974 * @pat_index: The pat_index for the PTE.
975 *
976 * This function returns the pte_flags for a given BO, without address.
977 * It's used for DPT to fill a GGTT mapped BO with a linear lookup table.
978 */
xe_ggtt_encode_pte_flags(struct xe_ggtt * ggtt,struct xe_bo * bo,u16 pat_index)979 u64 xe_ggtt_encode_pte_flags(struct xe_ggtt *ggtt,
980 struct xe_bo *bo, u16 pat_index)
981 {
982 return ggtt->pt_ops->pte_encode_flags(bo, pat_index);
983 }
984
985 /**
986 * xe_ggtt_read_pte - Read a PTE from the GGTT
987 * @ggtt: &xe_ggtt
988 * @offset: the offset for which the mapping should be read.
989 *
990 * Used by testcases, and by display reading out an inherited bios FB.
991 */
xe_ggtt_read_pte(struct xe_ggtt * ggtt,u64 offset)992 u64 xe_ggtt_read_pte(struct xe_ggtt *ggtt, u64 offset)
993 {
994 return ioread64(ggtt->gsm + (offset / XE_PAGE_SIZE));
995 }
996