xref: /linux/drivers/gpu/drm/xe/xe_lmtt.c (revision 220994d61cebfc04f071d69049127657c7e8191b)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <linux/align.h>
7 
8 #include <drm/drm_managed.h>
9 
10 #include "regs/xe_gt_regs.h"
11 
12 #include "xe_assert.h"
13 #include "xe_bo.h"
14 #include "xe_gt_tlb_invalidation.h"
15 #include "xe_lmtt.h"
16 #include "xe_map.h"
17 #include "xe_mmio.h"
18 #include "xe_res_cursor.h"
19 #include "xe_sriov.h"
20 #include "xe_sriov_printk.h"
21 
22 /**
23  * DOC: Local Memory Translation Table
24  *
25  * The Local Memory Translation Table (LMTT) provides additional abstraction
26  * when Virtual Function (VF) is accessing device Local Memory (VRAM).
27  *
28  * The Root LMTT Page Directory contains one entry for each VF. Entries are
29  * indexed by the function number (1-based, index 0 is unused).
30  *
31  * See `Two-Level LMTT Structure`_ and `Multi-Level LMTT Structure`_.
32  */
33 
34 #define lmtt_assert(lmtt, condition)	xe_tile_assert(lmtt_to_tile(lmtt), condition)
35 #define lmtt_debug(lmtt, msg...)	xe_sriov_dbg_verbose(lmtt_to_xe(lmtt), "LMTT: " msg)
36 
xe_has_multi_level_lmtt(struct xe_device * xe)37 static bool xe_has_multi_level_lmtt(struct xe_device *xe)
38 {
39 	return GRAPHICS_VERx100(xe) >= 1260;
40 }
41 
lmtt_to_tile(struct xe_lmtt * lmtt)42 static struct xe_tile *lmtt_to_tile(struct xe_lmtt *lmtt)
43 {
44 	return container_of(lmtt, struct xe_tile, sriov.pf.lmtt);
45 }
46 
lmtt_to_xe(struct xe_lmtt * lmtt)47 static struct xe_device *lmtt_to_xe(struct xe_lmtt *lmtt)
48 {
49 	return tile_to_xe(lmtt_to_tile(lmtt));
50 }
51 
lmtt_page_size(struct xe_lmtt * lmtt)52 static u64 lmtt_page_size(struct xe_lmtt *lmtt)
53 {
54 	return BIT_ULL(lmtt->ops->lmtt_pte_shift(0));
55 }
56 
lmtt_pt_alloc(struct xe_lmtt * lmtt,unsigned int level)57 static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level)
58 {
59 	unsigned int num_entries = level ? lmtt->ops->lmtt_pte_num(level) : 0;
60 	struct xe_lmtt_pt *pt;
61 	struct xe_bo *bo;
62 	int err;
63 
64 	pt = kzalloc(struct_size(pt, entries, num_entries), GFP_KERNEL);
65 	if (!pt) {
66 		err = -ENOMEM;
67 		goto out;
68 	}
69 
70 	bo = xe_bo_create_pin_map(lmtt_to_xe(lmtt), lmtt_to_tile(lmtt), NULL,
71 				  PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
72 					     lmtt->ops->lmtt_pte_num(level)),
73 				  ttm_bo_type_kernel,
74 				  XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
75 				  XE_BO_FLAG_NEEDS_64K);
76 	if (IS_ERR(bo)) {
77 		err = PTR_ERR(bo);
78 		goto out_free_pt;
79 	}
80 
81 	lmtt_assert(lmtt, xe_bo_is_vram(bo));
82 	lmtt_debug(lmtt, "level=%u addr=%#llx\n", level, (u64)xe_bo_main_addr(bo, XE_PAGE_SIZE));
83 
84 	xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, xe_bo_size(bo));
85 
86 	pt->level = level;
87 	pt->bo = bo;
88 	return pt;
89 
90 out_free_pt:
91 	kfree(pt);
92 out:
93 	return ERR_PTR(err);
94 }
95 
lmtt_pt_free(struct xe_lmtt_pt * pt)96 static void lmtt_pt_free(struct xe_lmtt_pt *pt)
97 {
98 	lmtt_debug(&pt->bo->tile->sriov.pf.lmtt, "level=%u addr=%llx\n",
99 		   pt->level, (u64)xe_bo_main_addr(pt->bo, XE_PAGE_SIZE));
100 
101 	xe_bo_unpin_map_no_vm(pt->bo);
102 	kfree(pt);
103 }
104 
lmtt_init_pd(struct xe_lmtt * lmtt)105 static int lmtt_init_pd(struct xe_lmtt *lmtt)
106 {
107 	struct xe_lmtt_pt *pd;
108 
109 	lmtt_assert(lmtt, !lmtt->pd);
110 	lmtt_assert(lmtt, lmtt->ops->lmtt_root_pd_level());
111 
112 	pd = lmtt_pt_alloc(lmtt, lmtt->ops->lmtt_root_pd_level());
113 	if (IS_ERR(pd))
114 		return PTR_ERR(pd);
115 
116 	lmtt->pd = pd;
117 	return 0;
118 }
119 
lmtt_fini_pd(struct xe_lmtt * lmtt)120 static void lmtt_fini_pd(struct xe_lmtt *lmtt)
121 {
122 	struct xe_lmtt_pt *pd = lmtt->pd;
123 	unsigned int num_entries = lmtt->ops->lmtt_pte_num(pd->level);
124 	unsigned int n = 0;
125 
126 	/* make sure we don't leak */
127 	for (n = 0; n < num_entries; n++)
128 		lmtt_assert(lmtt, !pd->entries[n]);
129 
130 	lmtt->pd = NULL;
131 	lmtt_pt_free(pd);
132 }
133 
fini_lmtt(struct drm_device * drm,void * arg)134 static void fini_lmtt(struct drm_device *drm, void *arg)
135 {
136 	struct xe_lmtt *lmtt = arg;
137 
138 	lmtt_assert(lmtt, !(!!lmtt->ops ^ !!lmtt->pd));
139 
140 	if (!lmtt->pd)
141 		return;
142 
143 	lmtt_fini_pd(lmtt);
144 	lmtt->ops = NULL;
145 }
146 
147 /**
148  * xe_lmtt_init - LMTT software initialization.
149  * @lmtt: the &xe_lmtt to initialize
150  *
151  * The LMTT initialization requires two steps.
152  *
153  * The xe_lmtt_init() checks if LMTT is required on current device and selects
154  * and initialize proper variant of the LMTT Root Directory. Currently supported
155  * variants are `Two-Level LMTT Structure`_ and `Multi-Level LMTT Structure`_.
156  *
157  * In next step xe_lmtt_init_hw() will register this directory on the hardware.
158  *
159  * Notes:
160  * The LMTT allocations are managed and will be implicitly released on driver unload.
161  * This function shall be called only once and only when running as a PF driver.
162  * Any LMTT initialization failure should block VFs enabling.
163  *
164  * Return: 0 on success or a negative error code on failure.
165  */
xe_lmtt_init(struct xe_lmtt * lmtt)166 int xe_lmtt_init(struct xe_lmtt *lmtt)
167 {
168 	struct xe_device *xe = lmtt_to_xe(lmtt);
169 	int err;
170 
171 	lmtt_assert(lmtt, IS_SRIOV_PF(xe));
172 	lmtt_assert(lmtt, !lmtt->ops);
173 
174 	if (!xe_device_has_lmtt(xe))
175 		return 0;
176 
177 	if (xe_has_multi_level_lmtt(xe))
178 		lmtt->ops = &lmtt_ml_ops;
179 	else
180 		lmtt->ops = &lmtt_2l_ops;
181 
182 	err = lmtt_init_pd(lmtt);
183 	if (unlikely(err))
184 		goto fail;
185 
186 	return drmm_add_action_or_reset(&xe->drm, fini_lmtt, lmtt);
187 
188 fail:
189 	lmtt->ops = NULL;
190 	return err;
191 }
192 
lmtt_setup_dir_ptr(struct xe_lmtt * lmtt)193 static void lmtt_setup_dir_ptr(struct xe_lmtt *lmtt)
194 {
195 	struct xe_tile *tile = lmtt_to_tile(lmtt);
196 	struct xe_device *xe = tile_to_xe(tile);
197 	dma_addr_t offset = xe_bo_main_addr(lmtt->pd->bo, XE_PAGE_SIZE);
198 
199 	lmtt_debug(lmtt, "DIR offset %pad\n", &offset);
200 	lmtt_assert(lmtt, xe_bo_is_vram(lmtt->pd->bo));
201 	lmtt_assert(lmtt, IS_ALIGNED(offset, SZ_64K));
202 
203 	xe_mmio_write32(&tile->mmio,
204 			GRAPHICS_VER(xe) >= 20 ? XE2_LMEM_CFG : LMEM_CFG,
205 			LMEM_EN | REG_FIELD_PREP(LMTT_DIR_PTR, offset / SZ_64K));
206 }
207 
208 /**
209  * xe_lmtt_init_hw - Perform LMTT hardware initialization.
210  * @lmtt: the &xe_lmtt to initialize
211  *
212  * This function is a second step of the LMTT initialization.
213  * This function registers LMTT Root Directory prepared in xe_lmtt_init().
214  *
215  * This function shall be called after every hardware reset.
216  * This function shall be called only when running as a PF driver.
217  */
xe_lmtt_init_hw(struct xe_lmtt * lmtt)218 void xe_lmtt_init_hw(struct xe_lmtt *lmtt)
219 {
220 	if (!lmtt->pd)
221 		return;
222 
223 	lmtt_setup_dir_ptr(lmtt);
224 }
225 
lmtt_invalidate_hw(struct xe_lmtt * lmtt)226 static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
227 {
228 	struct xe_gt_tlb_invalidation_fence fences[XE_MAX_GT_PER_TILE];
229 	struct xe_gt_tlb_invalidation_fence *fence = fences;
230 	struct xe_tile *tile = lmtt_to_tile(lmtt);
231 	struct xe_gt *gt;
232 	int result = 0;
233 	int err;
234 	u8 id;
235 
236 	for_each_gt_on_tile(gt, tile, id) {
237 		xe_gt_tlb_invalidation_fence_init(gt, fence, true);
238 		err = xe_gt_tlb_invalidation_all(gt, fence);
239 		result = result ?: err;
240 		fence++;
241 	}
242 
243 	lmtt_debug(lmtt, "num_fences=%d err=%d\n", (int)(fence - fences), result);
244 
245 	/*
246 	 * It is fine to wait for all fences, even for those which covers the
247 	 * invalidation request that failed, as such fence should be already
248 	 * marked as signaled.
249 	 */
250 	fence = fences;
251 	for_each_gt_on_tile(gt, tile, id)
252 		xe_gt_tlb_invalidation_fence_wait(fence++);
253 
254 	return result;
255 }
256 
257 /**
258  * xe_lmtt_invalidate_hw - Invalidate LMTT hardware.
259  * @lmtt: the &xe_lmtt to invalidate
260  *
261  * Send requests to all GuCs on this tile to invalidate all TLBs.
262  *
263  * This function should be called only when running as a PF driver.
264  */
xe_lmtt_invalidate_hw(struct xe_lmtt * lmtt)265 void xe_lmtt_invalidate_hw(struct xe_lmtt *lmtt)
266 {
267 	struct xe_device *xe = lmtt_to_xe(lmtt);
268 	int err;
269 
270 	lmtt_assert(lmtt, IS_SRIOV_PF(xe));
271 
272 	err = lmtt_invalidate_hw(lmtt);
273 	if (err)
274 		xe_sriov_warn(xe, "LMTT%u invalidation failed (%pe)",
275 			      lmtt_to_tile(lmtt)->id, ERR_PTR(err));
276 }
277 
lmtt_write_pte(struct xe_lmtt * lmtt,struct xe_lmtt_pt * pt,u64 pte,unsigned int idx)278 static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt,
279 			   u64 pte, unsigned int idx)
280 {
281 	unsigned int level = pt->level;
282 
283 	lmtt_assert(lmtt, idx <= lmtt->ops->lmtt_pte_num(level));
284 	lmtt_debug(lmtt, "WRITE level=%u index=%u pte=%#llx\n", level, idx, pte);
285 
286 	switch (lmtt->ops->lmtt_pte_size(level)) {
287 	case sizeof(u32):
288 		lmtt_assert(lmtt, !overflows_type(pte, u32));
289 		lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u32), u32));
290 
291 		xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte);
292 		break;
293 	case sizeof(u64):
294 		lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u64), u64));
295 
296 		xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte);
297 		break;
298 	default:
299 		lmtt_assert(lmtt, !!!"invalid pte size");
300 	}
301 }
302 
lmtt_destroy_pt(struct xe_lmtt * lmtt,struct xe_lmtt_pt * pd)303 static void lmtt_destroy_pt(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pd)
304 {
305 	unsigned int num_entries = pd->level ? lmtt->ops->lmtt_pte_num(pd->level) : 0;
306 	struct xe_lmtt_pt *pt;
307 	unsigned int i;
308 
309 	for (i = 0; i < num_entries; i++) {
310 		pt = pd->entries[i];
311 		pd->entries[i] = NULL;
312 		if (!pt)
313 			continue;
314 
315 		lmtt_destroy_pt(lmtt, pt);
316 	}
317 
318 	lmtt_pt_free(pd);
319 }
320 
lmtt_drop_pages(struct xe_lmtt * lmtt,unsigned int vfid)321 static void lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid)
322 {
323 	struct xe_lmtt_pt *pd = lmtt->pd;
324 	struct xe_lmtt_pt *pt;
325 
326 	pt = pd->entries[vfid];
327 	pd->entries[vfid] = NULL;
328 	if (!pt)
329 		return;
330 
331 	lmtt_write_pte(lmtt, pd, LMTT_PTE_INVALID, vfid);
332 	lmtt_invalidate_hw(lmtt);
333 
334 	lmtt_assert(lmtt, pd->level > 0);
335 	lmtt_assert(lmtt, pt->level == pd->level - 1);
336 	lmtt_destroy_pt(lmtt, pt);
337 }
338 
__lmtt_alloc_range(struct xe_lmtt * lmtt,struct xe_lmtt_pt * pd,u64 start,u64 end)339 static int __lmtt_alloc_range(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pd,
340 			      u64 start, u64 end)
341 {
342 	u64 pte_addr_shift = BIT_ULL(lmtt->ops->lmtt_pte_shift(pd->level));
343 	u64 offset;
344 	int err;
345 
346 	lmtt_assert(lmtt, pd->level > 0);
347 
348 	offset = start;
349 	while (offset < end) {
350 		struct xe_lmtt_pt *pt;
351 		u64 next, pde, pt_addr;
352 		unsigned int idx;
353 
354 		pt = lmtt_pt_alloc(lmtt, pd->level - 1);
355 		if (IS_ERR(pt))
356 			return PTR_ERR(pt);
357 
358 		pt_addr = xe_bo_main_addr(pt->bo, XE_PAGE_SIZE);
359 
360 		idx = lmtt->ops->lmtt_pte_index(offset, pd->level);
361 		pde = lmtt->ops->lmtt_pte_encode(pt_addr, pd->level);
362 
363 		lmtt_write_pte(lmtt, pd, pde, idx);
364 
365 		pd->entries[idx] = pt;
366 
367 		next = min(end, round_up(offset + 1, pte_addr_shift));
368 
369 		if (pt->level != 0) {
370 			err = __lmtt_alloc_range(lmtt, pt, offset, next);
371 			if (err)
372 				return err;
373 		}
374 
375 		offset = next;
376 	}
377 
378 	return 0;
379 }
380 
lmtt_alloc_range(struct xe_lmtt * lmtt,unsigned int vfid,u64 start,u64 end)381 static int lmtt_alloc_range(struct xe_lmtt *lmtt, unsigned int vfid, u64 start, u64 end)
382 {
383 	struct xe_lmtt_pt *pd = lmtt->pd;
384 	struct xe_lmtt_pt *pt;
385 	u64 pt_addr;
386 	u64 pde;
387 	int err;
388 
389 	lmtt_assert(lmtt, pd->level > 0);
390 	lmtt_assert(lmtt, vfid <= lmtt->ops->lmtt_pte_num(pd->level));
391 	lmtt_assert(lmtt, IS_ALIGNED(start, lmtt_page_size(lmtt)));
392 	lmtt_assert(lmtt, IS_ALIGNED(end, lmtt_page_size(lmtt)));
393 
394 	if (pd->entries[vfid])
395 		return -ENOTEMPTY;
396 
397 	pt = lmtt_pt_alloc(lmtt, pd->level - 1);
398 	if (IS_ERR(pt))
399 		return PTR_ERR(pt);
400 
401 	pt_addr = xe_bo_main_addr(pt->bo, XE_PAGE_SIZE);
402 
403 	pde = lmtt->ops->lmtt_pte_encode(pt_addr, pd->level);
404 
405 	lmtt_write_pte(lmtt, pd, pde, vfid);
406 
407 	pd->entries[vfid] = pt;
408 
409 	if (pt->level != 0) {
410 		err = __lmtt_alloc_range(lmtt, pt, start, end);
411 		if (err)
412 			goto out_free_pt;
413 	}
414 
415 	return 0;
416 
417 out_free_pt:
418 	lmtt_pt_free(pt);
419 	return err;
420 }
421 
lmtt_leaf_pt(struct xe_lmtt * lmtt,unsigned int vfid,u64 addr)422 static struct xe_lmtt_pt *lmtt_leaf_pt(struct xe_lmtt *lmtt, unsigned int vfid, u64 addr)
423 {
424 	struct xe_lmtt_pt *pd = lmtt->pd;
425 	struct xe_lmtt_pt *pt;
426 
427 	lmtt_assert(lmtt, vfid <= lmtt->ops->lmtt_pte_num(pd->level));
428 	pt = pd->entries[vfid];
429 
430 	while (pt->level) {
431 		lmtt_assert(lmtt, lmtt->ops->lmtt_pte_index(addr, pt->level) <=
432 			    lmtt->ops->lmtt_pte_num(pt->level));
433 
434 		pt = pt->entries[lmtt->ops->lmtt_pte_index(addr, pt->level)];
435 
436 		addr >>= lmtt->ops->lmtt_pte_shift(pt->level);
437 	}
438 
439 	lmtt_assert(lmtt, lmtt->ops->lmtt_pte_index(addr, pt->level) <=
440 		    lmtt->ops->lmtt_pte_num(pt->level));
441 	lmtt_assert(lmtt, pt->level != pd->level);
442 	lmtt_assert(lmtt, pt->level == 0);
443 	return pt;
444 }
445 
lmtt_insert_bo(struct xe_lmtt * lmtt,unsigned int vfid,struct xe_bo * bo,u64 start)446 static void lmtt_insert_bo(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo *bo, u64 start)
447 {
448 	u64 page_size = lmtt_page_size(lmtt);
449 	struct xe_res_cursor cur;
450 	struct xe_lmtt_pt *pt;
451 	u64 addr, vram_offset;
452 
453 	lmtt_assert(lmtt, IS_ALIGNED(start, page_size));
454 	lmtt_assert(lmtt, IS_ALIGNED(xe_bo_size(bo), page_size));
455 	lmtt_assert(lmtt, xe_bo_is_vram(bo));
456 
457 	vram_offset = vram_region_gpu_offset(bo->ttm.resource);
458 	xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur);
459 	while (cur.remaining) {
460 		addr = xe_res_dma(&cur);
461 		addr += vram_offset; /* XXX */
462 
463 		pt = lmtt_leaf_pt(lmtt, vfid, start);
464 
465 		lmtt_write_pte(lmtt, pt, lmtt->ops->lmtt_pte_encode(addr, 0),
466 					 lmtt->ops->lmtt_pte_index(start, 0));
467 
468 		xe_res_next(&cur, page_size);
469 		start += page_size;
470 	}
471 }
472 
473 /**
474  * xe_lmtt_prepare_pages - Create VF's LMTT Page Tables.
475  * @lmtt: the &xe_lmtt to update
476  * @vfid: the VF identifier (1-based)
477  * @range: top range of LMEM offset to be supported
478  *
479  * This function creates empty LMTT page tables for given VF to support
480  * up to maximum #range LMEM offset. The LMTT page tables created by this
481  * function must be released using xe_lmtt_drop_pages() function.
482  *
483  * Notes:
484  * This function shall be called only after successful LMTT initialization.
485  * See xe_lmtt_init().
486  *
487  * Return: 0 on success or a negative error code on failure.
488  */
xe_lmtt_prepare_pages(struct xe_lmtt * lmtt,unsigned int vfid,u64 range)489 int xe_lmtt_prepare_pages(struct xe_lmtt *lmtt, unsigned int vfid, u64 range)
490 {
491 	lmtt_assert(lmtt, lmtt->pd);
492 	lmtt_assert(lmtt, vfid);
493 
494 	return lmtt_alloc_range(lmtt, vfid, 0, range);
495 }
496 
497 /**
498  * xe_lmtt_populate_pages - Update VF's LMTT Page Table Entries.
499  * @lmtt: the &xe_lmtt to update
500  * @vfid: the VF identifier (1-based)
501  * @bo: the buffer object with LMEM allocation to be mapped
502  * @offset: the offset at which #bo should be mapped
503  *
504  * This function updates VF's LMTT entries to use given buffer object as a backstore.
505  *
506  * Notes:
507  * This function shall be called only after successful preparation of the
508  * VF's LMTT Page Tables. See xe_lmtt_prepare().
509  *
510  * Return: 0 on success or a negative error code on failure.
511  */
xe_lmtt_populate_pages(struct xe_lmtt * lmtt,unsigned int vfid,struct xe_bo * bo,u64 offset)512 int xe_lmtt_populate_pages(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo *bo, u64 offset)
513 {
514 	lmtt_assert(lmtt, lmtt->pd);
515 	lmtt_assert(lmtt, vfid);
516 
517 	lmtt_insert_bo(lmtt, vfid, bo, offset);
518 	return 0;
519 }
520 
521 /**
522  * xe_lmtt_drop_pages - Remove VF's LMTT Pages.
523  * @lmtt: the &xe_lmtt to update
524  * @vfid: the VF identifier (1-based)
525  *
526  * This function removes all LMTT Page Tables prepared by xe_lmtt_prepare_pages().
527  *
528  * This function shall be called only after successful LMTT initialization.
529  * See xe_lmtt_init().
530  */
xe_lmtt_drop_pages(struct xe_lmtt * lmtt,unsigned int vfid)531 void xe_lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid)
532 {
533 	lmtt_assert(lmtt, lmtt->pd);
534 	lmtt_assert(lmtt, vfid);
535 
536 	lmtt_drop_pages(lmtt, vfid);
537 }
538 
539 /**
540  * xe_lmtt_estimate_pt_size - Estimate size of LMTT PT allocations.
541  * @lmtt: the &xe_lmtt
542  * @size: the size of the LMEM to be mapped over LMTT (including any offset)
543  *
544  * This function shall be called only by PF.
545  *
546  * Return: size of the PT allocation(s) needed to support given LMEM size.
547  */
xe_lmtt_estimate_pt_size(struct xe_lmtt * lmtt,u64 size)548 u64 xe_lmtt_estimate_pt_size(struct xe_lmtt *lmtt, u64 size)
549 {
550 	unsigned int level = 0;
551 	u64 pt_size;
552 
553 	lmtt_assert(lmtt, IS_SRIOV_PF(lmtt_to_xe(lmtt)));
554 	lmtt_assert(lmtt, xe_device_has_lmtt(lmtt_to_xe(lmtt)));
555 	lmtt_assert(lmtt, lmtt->ops);
556 
557 	pt_size = PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
558 			     lmtt->ops->lmtt_pte_num(level));
559 
560 	while (++level < lmtt->ops->lmtt_root_pd_level()) {
561 		pt_size *= lmtt->ops->lmtt_pte_index(size, level) + 1;
562 		pt_size += PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
563 				      lmtt->ops->lmtt_pte_num(level));
564 	}
565 
566 	return pt_size;
567 }
568 
569 #if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
570 #include "tests/xe_lmtt_test.c"
571 #endif
572