xref: /linux/drivers/gpu/drm/xe/xe_lmtt.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <linux/align.h>
7 
8 #include <drm/drm_managed.h>
9 
10 #include "regs/xe_gt_regs.h"
11 #include "regs/xe_mert_regs.h"
12 
13 #include "xe_assert.h"
14 #include "xe_bo.h"
15 #include "xe_tlb_inval.h"
16 #include "xe_lmtt.h"
17 #include "xe_map.h"
18 #include "xe_mert.h"
19 #include "xe_mmio.h"
20 #include "xe_res_cursor.h"
21 #include "xe_sriov.h"
22 #include "xe_tile.h"
23 #include "xe_tile_sriov_printk.h"
24 
25 /**
26  * DOC: Local Memory Translation Table
27  *
28  * The Local Memory Translation Table (LMTT) provides additional abstraction
29  * when Virtual Function (VF) is accessing device Local Memory (VRAM).
30  *
31  * The Root LMTT Page Directory contains one entry for each VF. Entries are
32  * indexed by the function number (1-based, index 0 is unused).
33  *
34  * See `Two-Level LMTT Structure`_ and `Multi-Level LMTT Structure`_.
35  */
36 
37 #define lmtt_assert(lmtt, condition)	xe_tile_assert(lmtt_to_tile(lmtt), condition)
38 #define lmtt_debug(lmtt, msg...)	xe_tile_sriov_dbg_verbose(lmtt_to_tile(lmtt), "LMTT: " msg)
39 
40 static bool xe_has_multi_level_lmtt(struct xe_device *xe)
41 {
42 	return GRAPHICS_VERx100(xe) >= 1260;
43 }
44 
45 static struct xe_tile *lmtt_to_tile(struct xe_lmtt *lmtt)
46 {
47 	return container_of(lmtt, struct xe_tile, sriov.pf.lmtt);
48 }
49 
50 static struct xe_device *lmtt_to_xe(struct xe_lmtt *lmtt)
51 {
52 	return tile_to_xe(lmtt_to_tile(lmtt));
53 }
54 
55 static u64 lmtt_page_size(struct xe_lmtt *lmtt)
56 {
57 	return BIT_ULL(lmtt->ops->lmtt_pte_shift(0));
58 }
59 
60 /**
61  * xe_lmtt_page_size() - Get LMTT page size.
62  * @lmtt: the &xe_lmtt
63  *
64  * This function shall be called only by PF.
65  *
66  * Return: LMTT page size.
67  */
68 u64 xe_lmtt_page_size(struct xe_lmtt *lmtt)
69 {
70 	lmtt_assert(lmtt, IS_SRIOV_PF(lmtt_to_xe(lmtt)));
71 	lmtt_assert(lmtt, xe_device_has_lmtt(lmtt_to_xe(lmtt)));
72 	lmtt_assert(lmtt, lmtt->ops);
73 
74 	return lmtt_page_size(lmtt);
75 }
76 
77 static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level)
78 {
79 	unsigned int num_entries = level ? lmtt->ops->lmtt_pte_num(level) : 0;
80 	struct xe_lmtt_pt *pt;
81 	struct xe_bo *bo;
82 	int err;
83 
84 	pt = kzalloc_flex(*pt, entries, num_entries);
85 	if (!pt) {
86 		err = -ENOMEM;
87 		goto out;
88 	}
89 
90 	bo = xe_bo_create_pin_map_novm(lmtt_to_xe(lmtt), lmtt_to_tile(lmtt),
91 				       PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
92 						  lmtt->ops->lmtt_pte_num(level)),
93 				       ttm_bo_type_kernel,
94 				       XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
95 				       XE_BO_FLAG_NEEDS_64K, false);
96 	if (IS_ERR(bo)) {
97 		err = PTR_ERR(bo);
98 		goto out_free_pt;
99 	}
100 
101 	lmtt_assert(lmtt, xe_bo_is_vram(bo));
102 	lmtt_debug(lmtt, "level=%u addr=%#llx\n", level, (u64)xe_bo_main_addr(bo, XE_PAGE_SIZE));
103 
104 	xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, xe_bo_size(bo));
105 
106 	pt->level = level;
107 	pt->bo = bo;
108 	return pt;
109 
110 out_free_pt:
111 	kfree(pt);
112 out:
113 	return ERR_PTR(err);
114 }
115 
116 static void lmtt_pt_free(struct xe_lmtt_pt *pt)
117 {
118 	lmtt_debug(&pt->bo->tile->sriov.pf.lmtt, "level=%u addr=%llx\n",
119 		   pt->level, (u64)xe_bo_main_addr(pt->bo, XE_PAGE_SIZE));
120 
121 	xe_bo_unpin_map_no_vm(pt->bo);
122 	kfree(pt);
123 }
124 
125 static int lmtt_init_pd(struct xe_lmtt *lmtt)
126 {
127 	struct xe_lmtt_pt *pd;
128 
129 	lmtt_assert(lmtt, !lmtt->pd);
130 	lmtt_assert(lmtt, lmtt->ops->lmtt_root_pd_level());
131 
132 	pd = lmtt_pt_alloc(lmtt, lmtt->ops->lmtt_root_pd_level());
133 	if (IS_ERR(pd))
134 		return PTR_ERR(pd);
135 
136 	lmtt->pd = pd;
137 	return 0;
138 }
139 
140 static void lmtt_fini_pd(struct xe_lmtt *lmtt)
141 {
142 	struct xe_lmtt_pt *pd = lmtt->pd;
143 	unsigned int num_entries = lmtt->ops->lmtt_pte_num(pd->level);
144 	unsigned int n = 0;
145 
146 	/* make sure we don't leak */
147 	for (n = 0; n < num_entries; n++)
148 		lmtt_assert(lmtt, !pd->entries[n]);
149 
150 	lmtt->pd = NULL;
151 	lmtt_pt_free(pd);
152 }
153 
154 static void fini_lmtt(struct drm_device *drm, void *arg)
155 {
156 	struct xe_lmtt *lmtt = arg;
157 
158 	lmtt_assert(lmtt, !(!!lmtt->ops ^ !!lmtt->pd));
159 
160 	if (!lmtt->pd)
161 		return;
162 
163 	lmtt_fini_pd(lmtt);
164 	lmtt->ops = NULL;
165 }
166 
167 /**
168  * xe_lmtt_init - LMTT software initialization.
169  * @lmtt: the &xe_lmtt to initialize
170  *
171  * The LMTT initialization requires two steps.
172  *
173  * The xe_lmtt_init() checks if LMTT is required on current device and selects
174  * and initialize proper variant of the LMTT Root Directory. Currently supported
175  * variants are `Two-Level LMTT Structure`_ and `Multi-Level LMTT Structure`_.
176  *
177  * In next step xe_lmtt_init_hw() will register this directory on the hardware.
178  *
179  * Notes:
180  * The LMTT allocations are managed and will be implicitly released on driver unload.
181  * This function shall be called only once and only when running as a PF driver.
182  * Any LMTT initialization failure should block VFs enabling.
183  *
184  * Return: 0 on success or a negative error code on failure.
185  */
186 int xe_lmtt_init(struct xe_lmtt *lmtt)
187 {
188 	struct xe_device *xe = lmtt_to_xe(lmtt);
189 	int err;
190 
191 	lmtt_assert(lmtt, IS_SRIOV_PF(xe));
192 	lmtt_assert(lmtt, !lmtt->ops);
193 
194 	if (!xe_device_has_lmtt(xe))
195 		return 0;
196 
197 	if (xe_has_multi_level_lmtt(xe))
198 		lmtt->ops = &lmtt_ml_ops;
199 	else
200 		lmtt->ops = &lmtt_2l_ops;
201 
202 	err = lmtt_init_pd(lmtt);
203 	if (unlikely(err))
204 		goto fail;
205 
206 	return drmm_add_action_or_reset(&xe->drm, fini_lmtt, lmtt);
207 
208 fail:
209 	lmtt->ops = NULL;
210 	return err;
211 }
212 
213 static void lmtt_setup_dir_ptr(struct xe_lmtt *lmtt)
214 {
215 	struct xe_tile *tile = lmtt_to_tile(lmtt);
216 	struct xe_device *xe = tile_to_xe(tile);
217 	dma_addr_t offset = xe_bo_main_addr(lmtt->pd->bo, XE_PAGE_SIZE);
218 	struct xe_gt *gt;
219 	u32 config;
220 	u8 id;
221 
222 	lmtt_debug(lmtt, "DIR offset %pad\n", &offset);
223 	lmtt_assert(lmtt, xe_bo_is_vram(lmtt->pd->bo));
224 	lmtt_assert(lmtt, IS_ALIGNED(offset, SZ_64K));
225 
226 	config = LMEM_EN | REG_FIELD_PREP(LMTT_DIR_PTR, offset / SZ_64K);
227 
228 	for_each_gt_on_tile(gt, tile, id)
229 		xe_mmio_write32(&gt->mmio,
230 				GRAPHICS_VER(xe) >= 20 ? XE2_LMEM_CFG : LMEM_CFG,
231 				config);
232 
233 	if (xe_device_has_mert(xe) && xe_tile_is_root(tile))
234 		xe_mmio_write32(&tile->mmio, MERT_LMEM_CFG, config);
235 }
236 
237 /**
238  * xe_lmtt_init_hw - Perform LMTT hardware initialization.
239  * @lmtt: the &xe_lmtt to initialize
240  *
241  * This function is a second step of the LMTT initialization.
242  * This function registers LMTT Root Directory prepared in xe_lmtt_init().
243  *
244  * This function shall be called after every hardware reset.
245  * This function shall be called only when running as a PF driver.
246  */
247 void xe_lmtt_init_hw(struct xe_lmtt *lmtt)
248 {
249 	if (!lmtt->pd)
250 		return;
251 
252 	lmtt_setup_dir_ptr(lmtt);
253 }
254 
255 static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
256 {
257 	struct xe_tlb_inval_fence fences[XE_MAX_GT_PER_TILE];
258 	struct xe_tlb_inval_fence *fence = fences;
259 	struct xe_tile *tile = lmtt_to_tile(lmtt);
260 	struct xe_gt *gt;
261 	int result = 0;
262 	int err;
263 	u8 id;
264 
265 	for_each_gt_on_tile(gt, tile, id) {
266 		xe_tlb_inval_fence_init(&gt->tlb_inval, fence, true);
267 		err = xe_tlb_inval_all(&gt->tlb_inval, fence);
268 		result = result ?: err;
269 		fence++;
270 	}
271 
272 	lmtt_debug(lmtt, "num_fences=%d err=%d\n", (int)(fence - fences), result);
273 
274 	/*
275 	 * It is fine to wait for all fences, even for those which covers the
276 	 * invalidation request that failed, as such fence should be already
277 	 * marked as signaled.
278 	 */
279 	fence = fences;
280 	for_each_gt_on_tile(gt, tile, id)
281 		xe_tlb_inval_fence_wait(fence++);
282 
283 	return result;
284 }
285 
286 /**
287  * xe_lmtt_invalidate_hw - Invalidate LMTT hardware.
288  * @lmtt: the &xe_lmtt to invalidate
289  *
290  * Send requests to all GuCs on this tile to invalidate all TLBs.
291  * If the platform has a standalone MERT, also invalidate MERT's TLB.
292  *
293  * This function should be called only when running as a PF driver.
294  */
295 void xe_lmtt_invalidate_hw(struct xe_lmtt *lmtt)
296 {
297 	struct xe_tile *tile = lmtt_to_tile(lmtt);
298 	struct xe_device *xe = lmtt_to_xe(lmtt);
299 	int err;
300 
301 	lmtt_assert(lmtt, IS_SRIOV_PF(xe));
302 
303 	err = lmtt_invalidate_hw(lmtt);
304 	if (err)
305 		xe_tile_sriov_err(tile, "LMTT invalidation failed (%pe)",
306 				  ERR_PTR(err));
307 
308 	if (xe_device_has_mert(xe) && xe_tile_is_root(tile)) {
309 		err = xe_mert_invalidate_lmtt(xe);
310 		if (err)
311 			xe_tile_sriov_err(tile, "MERT LMTT invalidation failed (%pe)",
312 					  ERR_PTR(err));
313 	}
314 }
315 
316 static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt,
317 			   u64 pte, unsigned int idx)
318 {
319 	unsigned int level = pt->level;
320 
321 	lmtt_assert(lmtt, idx <= lmtt->ops->lmtt_pte_num(level));
322 	lmtt_debug(lmtt, "WRITE level=%u index=%u pte=%#llx\n", level, idx, pte);
323 
324 	switch (lmtt->ops->lmtt_pte_size(level)) {
325 	case sizeof(u32):
326 		lmtt_assert(lmtt, !overflows_type(pte, u32));
327 		lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u32), u32));
328 
329 		xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte);
330 		break;
331 	case sizeof(u64):
332 		lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u64), u64));
333 
334 		xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte);
335 		break;
336 	default:
337 		lmtt_assert(lmtt, !!!"invalid pte size");
338 	}
339 }
340 
341 static void lmtt_destroy_pt(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pd)
342 {
343 	unsigned int num_entries = pd->level ? lmtt->ops->lmtt_pte_num(pd->level) : 0;
344 	struct xe_lmtt_pt *pt;
345 	unsigned int i;
346 
347 	for (i = 0; i < num_entries; i++) {
348 		pt = pd->entries[i];
349 		pd->entries[i] = NULL;
350 		if (!pt)
351 			continue;
352 
353 		lmtt_destroy_pt(lmtt, pt);
354 	}
355 
356 	lmtt_pt_free(pd);
357 }
358 
359 static void lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid)
360 {
361 	struct xe_lmtt_pt *pd = lmtt->pd;
362 	struct xe_lmtt_pt *pt;
363 
364 	pt = pd->entries[vfid];
365 	pd->entries[vfid] = NULL;
366 	if (!pt)
367 		return;
368 
369 	lmtt_write_pte(lmtt, pd, LMTT_PTE_INVALID, vfid);
370 	lmtt_invalidate_hw(lmtt);
371 
372 	lmtt_assert(lmtt, pd->level > 0);
373 	lmtt_assert(lmtt, pt->level == pd->level - 1);
374 	lmtt_destroy_pt(lmtt, pt);
375 }
376 
377 static int __lmtt_alloc_range(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pd,
378 			      u64 start, u64 end)
379 {
380 	u64 pte_addr_shift = BIT_ULL(lmtt->ops->lmtt_pte_shift(pd->level));
381 	u64 offset;
382 	int err;
383 
384 	lmtt_assert(lmtt, pd->level > 0);
385 
386 	offset = start;
387 	while (offset < end) {
388 		struct xe_lmtt_pt *pt;
389 		u64 next, pde, pt_addr;
390 		unsigned int idx;
391 
392 		pt = lmtt_pt_alloc(lmtt, pd->level - 1);
393 		if (IS_ERR(pt))
394 			return PTR_ERR(pt);
395 
396 		pt_addr = xe_bo_main_addr(pt->bo, XE_PAGE_SIZE);
397 
398 		idx = lmtt->ops->lmtt_pte_index(offset, pd->level);
399 		pde = lmtt->ops->lmtt_pte_encode(pt_addr, pd->level);
400 
401 		lmtt_write_pte(lmtt, pd, pde, idx);
402 
403 		pd->entries[idx] = pt;
404 
405 		next = min(end, round_up(offset + 1, pte_addr_shift));
406 
407 		if (pt->level != 0) {
408 			err = __lmtt_alloc_range(lmtt, pt, offset, next);
409 			if (err)
410 				return err;
411 		}
412 
413 		offset = next;
414 	}
415 
416 	return 0;
417 }
418 
419 static int lmtt_alloc_range(struct xe_lmtt *lmtt, unsigned int vfid, u64 start, u64 end)
420 {
421 	struct xe_lmtt_pt *pd = lmtt->pd;
422 	struct xe_lmtt_pt *pt;
423 	u64 pt_addr;
424 	u64 pde;
425 	int err;
426 
427 	lmtt_assert(lmtt, pd->level > 0);
428 	lmtt_assert(lmtt, vfid <= lmtt->ops->lmtt_pte_num(pd->level));
429 	lmtt_assert(lmtt, IS_ALIGNED(start, lmtt_page_size(lmtt)));
430 	lmtt_assert(lmtt, IS_ALIGNED(end, lmtt_page_size(lmtt)));
431 
432 	if (pd->entries[vfid])
433 		return -ENOTEMPTY;
434 
435 	pt = lmtt_pt_alloc(lmtt, pd->level - 1);
436 	if (IS_ERR(pt))
437 		return PTR_ERR(pt);
438 
439 	pt_addr = xe_bo_main_addr(pt->bo, XE_PAGE_SIZE);
440 
441 	pde = lmtt->ops->lmtt_pte_encode(pt_addr, pd->level);
442 
443 	lmtt_write_pte(lmtt, pd, pde, vfid);
444 
445 	pd->entries[vfid] = pt;
446 
447 	if (pt->level != 0) {
448 		err = __lmtt_alloc_range(lmtt, pt, start, end);
449 		if (err)
450 			goto out_free_pt;
451 	}
452 
453 	return 0;
454 
455 out_free_pt:
456 	lmtt_pt_free(pt);
457 	return err;
458 }
459 
460 static struct xe_lmtt_pt *lmtt_leaf_pt(struct xe_lmtt *lmtt, unsigned int vfid, u64 addr)
461 {
462 	struct xe_lmtt_pt *pd = lmtt->pd;
463 	struct xe_lmtt_pt *pt;
464 
465 	lmtt_assert(lmtt, vfid <= lmtt->ops->lmtt_pte_num(pd->level));
466 	pt = pd->entries[vfid];
467 
468 	while (pt->level) {
469 		lmtt_assert(lmtt, lmtt->ops->lmtt_pte_index(addr, pt->level) <=
470 			    lmtt->ops->lmtt_pte_num(pt->level));
471 
472 		pt = pt->entries[lmtt->ops->lmtt_pte_index(addr, pt->level)];
473 
474 		addr >>= lmtt->ops->lmtt_pte_shift(pt->level);
475 	}
476 
477 	lmtt_assert(lmtt, lmtt->ops->lmtt_pte_index(addr, pt->level) <=
478 		    lmtt->ops->lmtt_pte_num(pt->level));
479 	lmtt_assert(lmtt, pt->level != pd->level);
480 	lmtt_assert(lmtt, pt->level == 0);
481 	return pt;
482 }
483 
484 static void lmtt_insert_bo(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo *bo, u64 start)
485 {
486 	u64 page_size = lmtt_page_size(lmtt);
487 	struct xe_res_cursor cur;
488 	struct xe_lmtt_pt *pt;
489 	u64 addr, vram_offset;
490 
491 	lmtt_assert(lmtt, IS_ALIGNED(start, page_size));
492 	lmtt_assert(lmtt, IS_ALIGNED(xe_bo_size(bo), page_size));
493 	lmtt_assert(lmtt, xe_bo_is_vram(bo));
494 
495 	vram_offset = vram_region_gpu_offset(bo->ttm.resource);
496 	xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur);
497 	while (cur.remaining) {
498 		addr = xe_res_dma(&cur);
499 		addr += vram_offset; /* XXX */
500 
501 		pt = lmtt_leaf_pt(lmtt, vfid, start);
502 
503 		lmtt_write_pte(lmtt, pt, lmtt->ops->lmtt_pte_encode(addr, 0),
504 					 lmtt->ops->lmtt_pte_index(start, 0));
505 
506 		xe_res_next(&cur, page_size);
507 		start += page_size;
508 	}
509 }
510 
511 /**
512  * xe_lmtt_prepare_pages - Create VF's LMTT Page Tables.
513  * @lmtt: the &xe_lmtt to update
514  * @vfid: the VF identifier (1-based)
515  * @range: top range of LMEM offset to be supported
516  *
517  * This function creates empty LMTT page tables for given VF to support
518  * up to maximum #range LMEM offset. The LMTT page tables created by this
519  * function must be released using xe_lmtt_drop_pages() function.
520  *
521  * Notes:
522  * This function shall be called only after successful LMTT initialization.
523  * See xe_lmtt_init().
524  *
525  * Return: 0 on success or a negative error code on failure.
526  */
527 int xe_lmtt_prepare_pages(struct xe_lmtt *lmtt, unsigned int vfid, u64 range)
528 {
529 	lmtt_assert(lmtt, lmtt->pd);
530 	lmtt_assert(lmtt, vfid);
531 
532 	return lmtt_alloc_range(lmtt, vfid, 0, range);
533 }
534 
535 /**
536  * xe_lmtt_populate_pages - Update VF's LMTT Page Table Entries.
537  * @lmtt: the &xe_lmtt to update
538  * @vfid: the VF identifier (1-based)
539  * @bo: the buffer object with LMEM allocation to be mapped
540  * @offset: the offset at which #bo should be mapped
541  *
542  * This function updates VF's LMTT entries to use given buffer object as a backstore.
543  *
544  * Notes:
545  * This function shall be called only after successful preparation of the
546  * VF's LMTT Page Tables. See xe_lmtt_prepare().
547  *
548  * Return: 0 on success or a negative error code on failure.
549  */
550 int xe_lmtt_populate_pages(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo *bo, u64 offset)
551 {
552 	lmtt_assert(lmtt, lmtt->pd);
553 	lmtt_assert(lmtt, vfid);
554 
555 	lmtt_insert_bo(lmtt, vfid, bo, offset);
556 	return 0;
557 }
558 
559 /**
560  * xe_lmtt_drop_pages - Remove VF's LMTT Pages.
561  * @lmtt: the &xe_lmtt to update
562  * @vfid: the VF identifier (1-based)
563  *
564  * This function removes all LMTT Page Tables prepared by xe_lmtt_prepare_pages().
565  *
566  * This function shall be called only after successful LMTT initialization.
567  * See xe_lmtt_init().
568  */
569 void xe_lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid)
570 {
571 	lmtt_assert(lmtt, lmtt->pd);
572 	lmtt_assert(lmtt, vfid);
573 
574 	lmtt_drop_pages(lmtt, vfid);
575 }
576 
577 /**
578  * xe_lmtt_estimate_pt_size - Estimate size of LMTT PT allocations.
579  * @lmtt: the &xe_lmtt
580  * @size: the size of the LMEM to be mapped over LMTT (including any offset)
581  *
582  * This function shall be called only by PF.
583  *
584  * Return: size of the PT allocation(s) needed to support given LMEM size.
585  */
586 u64 xe_lmtt_estimate_pt_size(struct xe_lmtt *lmtt, u64 size)
587 {
588 	unsigned int level = 0;
589 	u64 pt_size;
590 
591 	lmtt_assert(lmtt, IS_SRIOV_PF(lmtt_to_xe(lmtt)));
592 	lmtt_assert(lmtt, xe_device_has_lmtt(lmtt_to_xe(lmtt)));
593 	lmtt_assert(lmtt, lmtt->ops);
594 
595 	pt_size = PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
596 			     lmtt->ops->lmtt_pte_num(level));
597 
598 	while (++level < lmtt->ops->lmtt_root_pd_level()) {
599 		pt_size *= lmtt->ops->lmtt_pte_index(size, level) + 1;
600 		pt_size += PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
601 				      lmtt->ops->lmtt_pte_num(level));
602 	}
603 
604 	return pt_size;
605 }
606 
607 #if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
608 #include "tests/xe_lmtt_test.c"
609 #endif
610