1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "xe_ggtt.h"
7
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <linux/sizes.h>
10
11 #include <drm/drm_drv.h>
12 #include <drm/drm_managed.h>
13 #include <drm/intel/i915_drm.h>
14 #include <generated/xe_wa_oob.h>
15
16 #include "regs/xe_gt_regs.h"
17 #include "regs/xe_gtt_defs.h"
18 #include "regs/xe_regs.h"
19 #include "xe_assert.h"
20 #include "xe_bo.h"
21 #include "xe_device.h"
22 #include "xe_gt.h"
23 #include "xe_gt_printk.h"
24 #include "xe_gt_sriov_vf.h"
25 #include "xe_gt_tlb_invalidation.h"
26 #include "xe_map.h"
27 #include "xe_mmio.h"
28 #include "xe_pm.h"
29 #include "xe_sriov.h"
30 #include "xe_wa.h"
31 #include "xe_wopcm.h"
32
33 /**
34 * DOC: Global Graphics Translation Table (GGTT)
35 *
36 * Xe GGTT implements the support for a Global Virtual Address space that is used
37 * for resources that are accessible to privileged (i.e. kernel-mode) processes,
38 * and not tied to a specific user-level process. For example, the Graphics
39 * micro-Controller (GuC) and Display Engine (if present) utilize this Global
40 * address space.
41 *
42 * The Global GTT (GGTT) translates from the Global virtual address to a physical
43 * address that can be accessed by HW. The GGTT is a flat, single-level table.
44 *
45 * Xe implements a simplified version of the GGTT specifically managing only a
46 * certain range of it that goes from the Write Once Protected Content Memory (WOPCM)
47 * Layout to a predefined GUC_GGTT_TOP. This approach avoids complications related to
48 * the GuC (Graphics Microcontroller) hardware limitations. The GuC address space
49 * is limited on both ends of the GGTT, because the GuC shim HW redirects
50 * accesses to those addresses to other HW areas instead of going through the
51 * GGTT. On the bottom end, the GuC can't access offsets below the WOPCM size,
52 * while on the top side the limit is fixed at GUC_GGTT_TOP. To keep things
53 * simple, instead of checking each object to see if they are accessed by GuC or
54 * not, we just exclude those areas from the allocator. Additionally, to simplify
55 * the driver load, we use the maximum WOPCM size in this logic instead of the
56 * programmed one, so we don't need to wait until the actual size to be
57 * programmed is determined (which requires FW fetch) before initializing the
58 * GGTT. These simplifications might waste space in the GGTT (about 20-25 MBs
59 * depending on the platform) but we can live with this. Another benefit of this
60 * is the GuC bootrom can't access anything below the WOPCM max size so anything
61 * the bootrom needs to access (e.g. a RSA key) needs to be placed in the GGTT
62 * above the WOPCM max size. Starting the GGTT allocations above the WOPCM max
63 * give us the correct placement for free.
64 */
65
xelp_ggtt_pte_encode_bo(struct xe_bo * bo,u64 bo_offset,u16 pat_index)66 static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
67 u16 pat_index)
68 {
69 u64 pte;
70
71 pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
72 pte |= XE_PAGE_PRESENT;
73
74 if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
75 pte |= XE_GGTT_PTE_DM;
76
77 return pte;
78 }
79
xelpg_ggtt_pte_encode_bo(struct xe_bo * bo,u64 bo_offset,u16 pat_index)80 static u64 xelpg_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
81 u16 pat_index)
82 {
83 struct xe_device *xe = xe_bo_device(bo);
84 u64 pte;
85
86 pte = xelp_ggtt_pte_encode_bo(bo, bo_offset, pat_index);
87
88 xe_assert(xe, pat_index <= 3);
89
90 if (pat_index & BIT(0))
91 pte |= XELPG_GGTT_PTE_PAT0;
92
93 if (pat_index & BIT(1))
94 pte |= XELPG_GGTT_PTE_PAT1;
95
96 return pte;
97 }
98
probe_gsm_size(struct pci_dev * pdev)99 static unsigned int probe_gsm_size(struct pci_dev *pdev)
100 {
101 u16 gmch_ctl, ggms;
102
103 pci_read_config_word(pdev, SNB_GMCH_CTRL, &gmch_ctl);
104 ggms = (gmch_ctl >> BDW_GMCH_GGMS_SHIFT) & BDW_GMCH_GGMS_MASK;
105 return ggms ? SZ_1M << ggms : 0;
106 }
107
ggtt_update_access_counter(struct xe_ggtt * ggtt)108 static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
109 {
110 struct xe_gt *gt = XE_WA(ggtt->tile->primary_gt, 22019338487) ? ggtt->tile->primary_gt :
111 ggtt->tile->media_gt;
112 u32 max_gtt_writes = XE_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63;
113 /*
114 * Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit
115 * to wait for completion of prior GTT writes before letting this through.
116 * This needs to be done for all GGTT writes originating from the CPU.
117 */
118 lockdep_assert_held(&ggtt->lock);
119
120 if ((++ggtt->access_count % max_gtt_writes) == 0) {
121 xe_mmio_write32(gt, GMD_ID, 0x0);
122 ggtt->access_count = 0;
123 }
124 }
125
xe_ggtt_set_pte(struct xe_ggtt * ggtt,u64 addr,u64 pte)126 static void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte)
127 {
128 xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK));
129 xe_tile_assert(ggtt->tile, addr < ggtt->size);
130
131 writeq(pte, &ggtt->gsm[addr >> XE_PTE_SHIFT]);
132 }
133
xe_ggtt_set_pte_and_flush(struct xe_ggtt * ggtt,u64 addr,u64 pte)134 static void xe_ggtt_set_pte_and_flush(struct xe_ggtt *ggtt, u64 addr, u64 pte)
135 {
136 xe_ggtt_set_pte(ggtt, addr, pte);
137 ggtt_update_access_counter(ggtt);
138 }
139
xe_ggtt_clear(struct xe_ggtt * ggtt,u64 start,u64 size)140 static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
141 {
142 u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
143 u64 end = start + size - 1;
144 u64 scratch_pte;
145
146 xe_tile_assert(ggtt->tile, start < end);
147
148 if (ggtt->scratch)
149 scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0,
150 pat_index);
151 else
152 scratch_pte = 0;
153
154 while (start < end) {
155 ggtt->pt_ops->ggtt_set_pte(ggtt, start, scratch_pte);
156 start += XE_PAGE_SIZE;
157 }
158 }
159
ggtt_fini_early(struct drm_device * drm,void * arg)160 static void ggtt_fini_early(struct drm_device *drm, void *arg)
161 {
162 struct xe_ggtt *ggtt = arg;
163
164 destroy_workqueue(ggtt->wq);
165 mutex_destroy(&ggtt->lock);
166 drm_mm_takedown(&ggtt->mm);
167 }
168
ggtt_fini(void * arg)169 static void ggtt_fini(void *arg)
170 {
171 struct xe_ggtt *ggtt = arg;
172
173 ggtt->scratch = NULL;
174 }
175
primelockdep(struct xe_ggtt * ggtt)176 static void primelockdep(struct xe_ggtt *ggtt)
177 {
178 if (!IS_ENABLED(CONFIG_LOCKDEP))
179 return;
180
181 fs_reclaim_acquire(GFP_KERNEL);
182 might_lock(&ggtt->lock);
183 fs_reclaim_release(GFP_KERNEL);
184 }
185
186 static const struct xe_ggtt_pt_ops xelp_pt_ops = {
187 .pte_encode_bo = xelp_ggtt_pte_encode_bo,
188 .ggtt_set_pte = xe_ggtt_set_pte,
189 };
190
191 static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
192 .pte_encode_bo = xelpg_ggtt_pte_encode_bo,
193 .ggtt_set_pte = xe_ggtt_set_pte,
194 };
195
196 static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
197 .pte_encode_bo = xelpg_ggtt_pte_encode_bo,
198 .ggtt_set_pte = xe_ggtt_set_pte_and_flush,
199 };
200
201 /**
202 * xe_ggtt_init_early - Early GGTT initialization
203 * @ggtt: the &xe_ggtt to be initialized
204 *
205 * It allows to create new mappings usable by the GuC.
206 * Mappings are not usable by the HW engines, as it doesn't have scratch nor
207 * initial clear done to it yet. That will happen in the regular, non-early
208 * GGTT initialization.
209 *
210 * Return: 0 on success or a negative error code on failure.
211 */
xe_ggtt_init_early(struct xe_ggtt * ggtt)212 int xe_ggtt_init_early(struct xe_ggtt *ggtt)
213 {
214 struct xe_device *xe = tile_to_xe(ggtt->tile);
215 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
216 unsigned int gsm_size;
217 int err;
218
219 if (IS_SRIOV_VF(xe))
220 gsm_size = SZ_8M; /* GGTT is expected to be 4GiB */
221 else
222 gsm_size = probe_gsm_size(pdev);
223
224 if (gsm_size == 0) {
225 drm_err(&xe->drm, "Hardware reported no preallocated GSM\n");
226 return -ENOMEM;
227 }
228
229 ggtt->gsm = ggtt->tile->mmio.regs + SZ_8M;
230 ggtt->size = (gsm_size / 8) * (u64) XE_PAGE_SIZE;
231
232 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
233 ggtt->flags |= XE_GGTT_FLAGS_64K;
234
235 if (ggtt->size > GUC_GGTT_TOP)
236 ggtt->size = GUC_GGTT_TOP;
237
238 if (GRAPHICS_VERx100(xe) >= 1270)
239 ggtt->pt_ops = (ggtt->tile->media_gt &&
240 XE_WA(ggtt->tile->media_gt, 22019338487)) ||
241 XE_WA(ggtt->tile->primary_gt, 22019338487) ?
242 &xelpg_pt_wa_ops : &xelpg_pt_ops;
243 else
244 ggtt->pt_ops = &xelp_pt_ops;
245
246 ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, 0);
247
248 drm_mm_init(&ggtt->mm, xe_wopcm_size(xe),
249 ggtt->size - xe_wopcm_size(xe));
250 mutex_init(&ggtt->lock);
251 primelockdep(ggtt);
252
253 err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
254 if (err)
255 return err;
256
257 if (IS_SRIOV_VF(xe)) {
258 err = xe_gt_sriov_vf_prepare_ggtt(xe_tile_get_gt(ggtt->tile, 0));
259 if (err)
260 return err;
261 }
262
263 return 0;
264 }
265
266 static void xe_ggtt_invalidate(struct xe_ggtt *ggtt);
267
xe_ggtt_initial_clear(struct xe_ggtt * ggtt)268 static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
269 {
270 struct drm_mm_node *hole;
271 u64 start, end;
272
273 /* Display may have allocated inside ggtt, so be careful with clearing here */
274 mutex_lock(&ggtt->lock);
275 drm_mm_for_each_hole(hole, &ggtt->mm, start, end)
276 xe_ggtt_clear(ggtt, start, end - start);
277
278 xe_ggtt_invalidate(ggtt);
279 mutex_unlock(&ggtt->lock);
280 }
281
ggtt_node_remove(struct xe_ggtt_node * node)282 static void ggtt_node_remove(struct xe_ggtt_node *node)
283 {
284 struct xe_ggtt *ggtt = node->ggtt;
285 struct xe_device *xe = tile_to_xe(ggtt->tile);
286 bool bound;
287 int idx;
288
289 bound = drm_dev_enter(&xe->drm, &idx);
290
291 mutex_lock(&ggtt->lock);
292 if (bound)
293 xe_ggtt_clear(ggtt, node->base.start, node->base.size);
294 drm_mm_remove_node(&node->base);
295 node->base.size = 0;
296 mutex_unlock(&ggtt->lock);
297
298 if (!bound)
299 goto free_node;
300
301 if (node->invalidate_on_remove)
302 xe_ggtt_invalidate(ggtt);
303
304 drm_dev_exit(idx);
305
306 free_node:
307 xe_ggtt_node_fini(node);
308 }
309
ggtt_node_remove_work_func(struct work_struct * work)310 static void ggtt_node_remove_work_func(struct work_struct *work)
311 {
312 struct xe_ggtt_node *node = container_of(work, typeof(*node),
313 delayed_removal_work);
314 struct xe_device *xe = tile_to_xe(node->ggtt->tile);
315
316 xe_pm_runtime_get(xe);
317 ggtt_node_remove(node);
318 xe_pm_runtime_put(xe);
319 }
320
321 /**
322 * xe_ggtt_node_remove - Remove a &xe_ggtt_node from the GGTT
323 * @node: the &xe_ggtt_node to be removed
324 * @invalidate: if node needs invalidation upon removal
325 */
xe_ggtt_node_remove(struct xe_ggtt_node * node,bool invalidate)326 void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate)
327 {
328 struct xe_ggtt *ggtt;
329 struct xe_device *xe;
330
331 if (!node || !node->ggtt)
332 return;
333
334 ggtt = node->ggtt;
335 xe = tile_to_xe(ggtt->tile);
336
337 node->invalidate_on_remove = invalidate;
338
339 if (xe_pm_runtime_get_if_active(xe)) {
340 ggtt_node_remove(node);
341 xe_pm_runtime_put(xe);
342 } else {
343 queue_work(ggtt->wq, &node->delayed_removal_work);
344 }
345 }
346
347 /**
348 * xe_ggtt_init - Regular non-early GGTT initialization
349 * @ggtt: the &xe_ggtt to be initialized
350 *
351 * Return: 0 on success or a negative error code on failure.
352 */
xe_ggtt_init(struct xe_ggtt * ggtt)353 int xe_ggtt_init(struct xe_ggtt *ggtt)
354 {
355 struct xe_device *xe = tile_to_xe(ggtt->tile);
356 unsigned int flags;
357 int err;
358
359 /*
360 * So we don't need to worry about 64K GGTT layout when dealing with
361 * scratch entires, rather keep the scratch page in system memory on
362 * platforms where 64K pages are needed for VRAM.
363 */
364 flags = XE_BO_FLAG_PINNED;
365 if (ggtt->flags & XE_GGTT_FLAGS_64K)
366 flags |= XE_BO_FLAG_SYSTEM;
367 else
368 flags |= XE_BO_FLAG_VRAM_IF_DGFX(ggtt->tile);
369
370 ggtt->scratch = xe_managed_bo_create_pin_map(xe, ggtt->tile, XE_PAGE_SIZE, flags);
371 if (IS_ERR(ggtt->scratch)) {
372 err = PTR_ERR(ggtt->scratch);
373 goto err;
374 }
375
376 xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, ggtt->scratch->size);
377
378 xe_ggtt_initial_clear(ggtt);
379
380 return devm_add_action_or_reset(xe->drm.dev, ggtt_fini, ggtt);
381 err:
382 ggtt->scratch = NULL;
383 return err;
384 }
385
ggtt_invalidate_gt_tlb(struct xe_gt * gt)386 static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
387 {
388 int err;
389
390 if (!gt)
391 return;
392
393 err = xe_gt_tlb_invalidation_ggtt(gt);
394 if (err)
395 drm_warn(>_to_xe(gt)->drm, "xe_gt_tlb_invalidation_ggtt error=%d", err);
396 }
397
xe_ggtt_invalidate(struct xe_ggtt * ggtt)398 static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
399 {
400 struct xe_device *xe = tile_to_xe(ggtt->tile);
401
402 /*
403 * XXX: Barrier for GGTT pages. Unsure exactly why this required but
404 * without this LNL is having issues with the GuC reading scratch page
405 * vs. correct GGTT page. Not particularly a hot code path so blindly
406 * do a mmio read here which results in GuC reading correct GGTT page.
407 */
408 xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG);
409
410 /* Each GT in a tile has its own TLB to cache GGTT lookups */
411 ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
412 ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
413 }
414
xe_ggtt_dump_node(struct xe_ggtt * ggtt,const struct drm_mm_node * node,const char * description)415 static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
416 const struct drm_mm_node *node, const char *description)
417 {
418 char buf[10];
419
420 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
421 string_get_size(node->size, 1, STRING_UNITS_2, buf, sizeof(buf));
422 xe_gt_dbg(ggtt->tile->primary_gt, "GGTT %#llx-%#llx (%s) %s\n",
423 node->start, node->start + node->size, buf, description);
424 }
425 }
426
427 /**
428 * xe_ggtt_node_insert_balloon - prevent allocation of specified GGTT addresses
429 * @node: the &xe_ggtt_node to hold reserved GGTT node
430 * @start: the starting GGTT address of the reserved region
431 * @end: then end GGTT address of the reserved region
432 *
433 * Use xe_ggtt_node_remove_balloon() to release a reserved GGTT node.
434 *
435 * Return: 0 on success or a negative error code on failure.
436 */
xe_ggtt_node_insert_balloon(struct xe_ggtt_node * node,u64 start,u64 end)437 int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
438 {
439 struct xe_ggtt *ggtt = node->ggtt;
440 int err;
441
442 xe_tile_assert(ggtt->tile, start < end);
443 xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE));
444 xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE));
445 xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(&node->base));
446
447 node->base.color = 0;
448 node->base.start = start;
449 node->base.size = end - start;
450
451 mutex_lock(&ggtt->lock);
452 err = drm_mm_reserve_node(&ggtt->mm, &node->base);
453 mutex_unlock(&ggtt->lock);
454
455 if (xe_gt_WARN(ggtt->tile->primary_gt, err,
456 "Failed to balloon GGTT %#llx-%#llx (%pe)\n",
457 node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
458 return err;
459
460 xe_ggtt_dump_node(ggtt, &node->base, "balloon");
461 return 0;
462 }
463
464 /**
465 * xe_ggtt_node_remove_balloon - release a reserved GGTT region
466 * @node: the &xe_ggtt_node with reserved GGTT region
467 *
468 * See xe_ggtt_node_insert_balloon() for details.
469 */
xe_ggtt_node_remove_balloon(struct xe_ggtt_node * node)470 void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node)
471 {
472 if (!node || !node->ggtt)
473 return;
474
475 if (!drm_mm_node_allocated(&node->base))
476 goto free_node;
477
478 xe_ggtt_dump_node(node->ggtt, &node->base, "remove-balloon");
479
480 mutex_lock(&node->ggtt->lock);
481 drm_mm_remove_node(&node->base);
482 mutex_unlock(&node->ggtt->lock);
483
484 free_node:
485 xe_ggtt_node_fini(node);
486 }
487
488 /**
489 * xe_ggtt_node_insert_locked - Locked version to insert a &xe_ggtt_node into the GGTT
490 * @node: the &xe_ggtt_node to be inserted
491 * @size: size of the node
492 * @align: alignment constrain of the node
493 * @mm_flags: flags to control the node behavior
494 *
495 * It cannot be called without first having called xe_ggtt_init() once.
496 * To be used in cases where ggtt->lock is already taken.
497 *
498 * Return: 0 on success or a negative error code on failure.
499 */
xe_ggtt_node_insert_locked(struct xe_ggtt_node * node,u32 size,u32 align,u32 mm_flags)500 int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
501 u32 size, u32 align, u32 mm_flags)
502 {
503 return drm_mm_insert_node_generic(&node->ggtt->mm, &node->base, size, align, 0,
504 mm_flags);
505 }
506
507 /**
508 * xe_ggtt_node_insert - Insert a &xe_ggtt_node into the GGTT
509 * @node: the &xe_ggtt_node to be inserted
510 * @size: size of the node
511 * @align: alignment constrain of the node
512 *
513 * It cannot be called without first having called xe_ggtt_init() once.
514 *
515 * Return: 0 on success or a negative error code on failure.
516 */
xe_ggtt_node_insert(struct xe_ggtt_node * node,u32 size,u32 align)517 int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align)
518 {
519 int ret;
520
521 if (!node || !node->ggtt)
522 return -ENOENT;
523
524 mutex_lock(&node->ggtt->lock);
525 ret = xe_ggtt_node_insert_locked(node, size, align,
526 DRM_MM_INSERT_HIGH);
527 mutex_unlock(&node->ggtt->lock);
528
529 return ret;
530 }
531
532 /**
533 * xe_ggtt_node_init - Initialize %xe_ggtt_node struct
534 * @ggtt: the &xe_ggtt where the new node will later be inserted/reserved.
535 *
536 * This function will allocated the struct %xe_ggtt_node and return it's pointer.
537 * This struct will then be freed after the node removal upon xe_ggtt_node_remove()
538 * or xe_ggtt_node_remove_balloon().
539 * Having %xe_ggtt_node struct allocated doesn't mean that the node is already allocated
540 * in GGTT. Only the xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
541 * xe_ggtt_node_insert_balloon() will ensure the node is inserted or reserved in GGTT.
542 *
543 * Return: A pointer to %xe_ggtt_node struct on success. An ERR_PTR otherwise.
544 **/
xe_ggtt_node_init(struct xe_ggtt * ggtt)545 struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt)
546 {
547 struct xe_ggtt_node *node = kzalloc(sizeof(*node), GFP_NOFS);
548
549 if (!node)
550 return ERR_PTR(-ENOMEM);
551
552 INIT_WORK(&node->delayed_removal_work, ggtt_node_remove_work_func);
553 node->ggtt = ggtt;
554
555 return node;
556 }
557
558 /**
559 * xe_ggtt_node_fini - Forcebly finalize %xe_ggtt_node struct
560 * @node: the &xe_ggtt_node to be freed
561 *
562 * If anything went wrong with either xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
563 * or xe_ggtt_node_insert_balloon(); and this @node is not going to be reused, then,
564 * this function needs to be called to free the %xe_ggtt_node struct
565 **/
xe_ggtt_node_fini(struct xe_ggtt_node * node)566 void xe_ggtt_node_fini(struct xe_ggtt_node *node)
567 {
568 kfree(node);
569 }
570
571 /**
572 * xe_ggtt_node_allocated - Check if node is allocated in GGTT
573 * @node: the &xe_ggtt_node to be inspected
574 *
575 * Return: True if allocated, False otherwise.
576 */
xe_ggtt_node_allocated(const struct xe_ggtt_node * node)577 bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node)
578 {
579 if (!node || !node->ggtt)
580 return false;
581
582 return drm_mm_node_allocated(&node->base);
583 }
584
585 /**
586 * xe_ggtt_map_bo - Map the BO into GGTT
587 * @ggtt: the &xe_ggtt where node will be mapped
588 * @bo: the &xe_bo to be mapped
589 */
xe_ggtt_map_bo(struct xe_ggtt * ggtt,struct xe_bo * bo)590 void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
591 {
592 u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
593 u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
594 u64 start;
595 u64 offset, pte;
596
597 if (XE_WARN_ON(!bo->ggtt_node))
598 return;
599
600 start = bo->ggtt_node->base.start;
601
602 for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
603 pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
604 ggtt->pt_ops->ggtt_set_pte(ggtt, start + offset, pte);
605 }
606 }
607
__xe_ggtt_insert_bo_at(struct xe_ggtt * ggtt,struct xe_bo * bo,u64 start,u64 end)608 static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
609 u64 start, u64 end)
610 {
611 int err;
612 u64 alignment = XE_PAGE_SIZE;
613
614 if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
615 alignment = SZ_64K;
616
617 if (XE_WARN_ON(bo->ggtt_node)) {
618 /* Someone's already inserted this BO in the GGTT */
619 xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
620 return 0;
621 }
622
623 err = xe_bo_validate(bo, NULL, false);
624 if (err)
625 return err;
626
627 xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
628
629 bo->ggtt_node = xe_ggtt_node_init(ggtt);
630 if (IS_ERR(bo->ggtt_node)) {
631 err = PTR_ERR(bo->ggtt_node);
632 bo->ggtt_node = NULL;
633 goto out;
634 }
635
636 mutex_lock(&ggtt->lock);
637 err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node->base, bo->size,
638 alignment, 0, start, end, 0);
639 if (err) {
640 xe_ggtt_node_fini(bo->ggtt_node);
641 bo->ggtt_node = NULL;
642 } else {
643 xe_ggtt_map_bo(ggtt, bo);
644 }
645 mutex_unlock(&ggtt->lock);
646
647 if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
648 xe_ggtt_invalidate(ggtt);
649
650 out:
651 xe_pm_runtime_put(tile_to_xe(ggtt->tile));
652
653 return err;
654 }
655
656 /**
657 * xe_ggtt_insert_bo_at - Insert BO at a specific GGTT space
658 * @ggtt: the &xe_ggtt where bo will be inserted
659 * @bo: the &xe_bo to be inserted
660 * @start: address where it will be inserted
661 * @end: end of the range where it will be inserted
662 *
663 * Return: 0 on success or a negative error code on failure.
664 */
xe_ggtt_insert_bo_at(struct xe_ggtt * ggtt,struct xe_bo * bo,u64 start,u64 end)665 int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
666 u64 start, u64 end)
667 {
668 return __xe_ggtt_insert_bo_at(ggtt, bo, start, end);
669 }
670
671 /**
672 * xe_ggtt_insert_bo - Insert BO into GGTT
673 * @ggtt: the &xe_ggtt where bo will be inserted
674 * @bo: the &xe_bo to be inserted
675 *
676 * Return: 0 on success or a negative error code on failure.
677 */
xe_ggtt_insert_bo(struct xe_ggtt * ggtt,struct xe_bo * bo)678 int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
679 {
680 return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX);
681 }
682
683 /**
684 * xe_ggtt_remove_bo - Remove a BO from the GGTT
685 * @ggtt: the &xe_ggtt where node will be removed
686 * @bo: the &xe_bo to be removed
687 */
xe_ggtt_remove_bo(struct xe_ggtt * ggtt,struct xe_bo * bo)688 void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
689 {
690 if (XE_WARN_ON(!bo->ggtt_node))
691 return;
692
693 /* This BO is not currently in the GGTT */
694 xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
695
696 xe_ggtt_node_remove(bo->ggtt_node,
697 bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
698 }
699
700 /**
701 * xe_ggtt_largest_hole - Largest GGTT hole
702 * @ggtt: the &xe_ggtt that will be inspected
703 * @alignment: minimum alignment
704 * @spare: If not NULL: in: desired memory size to be spared / out: Adjusted possible spare
705 *
706 * Return: size of the largest continuous GGTT region
707 */
xe_ggtt_largest_hole(struct xe_ggtt * ggtt,u64 alignment,u64 * spare)708 u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare)
709 {
710 const struct drm_mm *mm = &ggtt->mm;
711 const struct drm_mm_node *entry;
712 u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
713 u64 hole_start, hole_end, hole_size;
714 u64 max_hole = 0;
715
716 mutex_lock(&ggtt->lock);
717
718 drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
719 hole_start = max(hole_start, hole_min_start);
720 hole_start = ALIGN(hole_start, alignment);
721 hole_end = ALIGN_DOWN(hole_end, alignment);
722 if (hole_start >= hole_end)
723 continue;
724 hole_size = hole_end - hole_start;
725 if (spare)
726 *spare -= min3(*spare, hole_size, max_hole);
727 max_hole = max(max_hole, hole_size);
728 }
729
730 mutex_unlock(&ggtt->lock);
731
732 return max_hole;
733 }
734
735 #ifdef CONFIG_PCI_IOV
xe_encode_vfid_pte(u16 vfid)736 static u64 xe_encode_vfid_pte(u16 vfid)
737 {
738 return FIELD_PREP(GGTT_PTE_VFID, vfid) | XE_PAGE_PRESENT;
739 }
740
xe_ggtt_assign_locked(struct xe_ggtt * ggtt,const struct drm_mm_node * node,u16 vfid)741 static void xe_ggtt_assign_locked(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid)
742 {
743 u64 start = node->start;
744 u64 size = node->size;
745 u64 end = start + size - 1;
746 u64 pte = xe_encode_vfid_pte(vfid);
747
748 lockdep_assert_held(&ggtt->lock);
749
750 if (!drm_mm_node_allocated(node))
751 return;
752
753 while (start < end) {
754 ggtt->pt_ops->ggtt_set_pte(ggtt, start, pte);
755 start += XE_PAGE_SIZE;
756 }
757
758 xe_ggtt_invalidate(ggtt);
759 }
760
761 /**
762 * xe_ggtt_assign - assign a GGTT region to the VF
763 * @node: the &xe_ggtt_node to update
764 * @vfid: the VF identifier
765 *
766 * This function is used by the PF driver to assign a GGTT region to the VF.
767 * In addition to PTE's VFID bits 11:2 also PRESENT bit 0 is set as on some
768 * platforms VFs can't modify that either.
769 */
xe_ggtt_assign(const struct xe_ggtt_node * node,u16 vfid)770 void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid)
771 {
772 mutex_lock(&node->ggtt->lock);
773 xe_ggtt_assign_locked(node->ggtt, &node->base, vfid);
774 mutex_unlock(&node->ggtt->lock);
775 }
776 #endif
777
778 /**
779 * xe_ggtt_dump - Dump GGTT for debug
780 * @ggtt: the &xe_ggtt to be dumped
781 * @p: the &drm_mm_printer helper handle to be used to dump the information
782 *
783 * Return: 0 on success or a negative error code on failure.
784 */
xe_ggtt_dump(struct xe_ggtt * ggtt,struct drm_printer * p)785 int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
786 {
787 int err;
788
789 err = mutex_lock_interruptible(&ggtt->lock);
790 if (err)
791 return err;
792
793 drm_mm_print(&ggtt->mm, p);
794 mutex_unlock(&ggtt->lock);
795 return err;
796 }
797
798 /**
799 * xe_ggtt_print_holes - Print holes
800 * @ggtt: the &xe_ggtt to be inspected
801 * @alignment: min alignment
802 * @p: the &drm_printer
803 *
804 * Print GGTT ranges that are available and return total size available.
805 *
806 * Return: Total available size.
807 */
xe_ggtt_print_holes(struct xe_ggtt * ggtt,u64 alignment,struct drm_printer * p)808 u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer *p)
809 {
810 const struct drm_mm *mm = &ggtt->mm;
811 const struct drm_mm_node *entry;
812 u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
813 u64 hole_start, hole_end, hole_size;
814 u64 total = 0;
815 char buf[10];
816
817 mutex_lock(&ggtt->lock);
818
819 drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
820 hole_start = max(hole_start, hole_min_start);
821 hole_start = ALIGN(hole_start, alignment);
822 hole_end = ALIGN_DOWN(hole_end, alignment);
823 if (hole_start >= hole_end)
824 continue;
825 hole_size = hole_end - hole_start;
826 total += hole_size;
827
828 string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf));
829 drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n",
830 hole_start, hole_end - 1, buf);
831 }
832
833 mutex_unlock(&ggtt->lock);
834
835 return total;
836 }
837