xref: /linux/drivers/accel/ivpu/ivpu_mmu_context.c (revision 78c3925c048c752334873f56c3a3d1c9d53e0416)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/highmem.h>
8 #include <linux/set_memory.h>
9 
10 #include <drm/drm_cache.h>
11 
12 #include "ivpu_drv.h"
13 #include "ivpu_hw.h"
14 #include "ivpu_mmu.h"
15 #include "ivpu_mmu_context.h"
16 
17 #define IVPU_MMU_VPU_ADDRESS_MASK        GENMASK(47, 12)
18 #define IVPU_MMU_PGD_INDEX_MASK          GENMASK(47, 39)
19 #define IVPU_MMU_PUD_INDEX_MASK          GENMASK(38, 30)
20 #define IVPU_MMU_PMD_INDEX_MASK          GENMASK(29, 21)
21 #define IVPU_MMU_PTE_INDEX_MASK          GENMASK(20, 12)
22 #define IVPU_MMU_ENTRY_FLAGS_MASK        (BIT(52) | GENMASK(11, 0))
23 #define IVPU_MMU_ENTRY_FLAG_CONT         BIT(52)
24 #define IVPU_MMU_ENTRY_FLAG_NG           BIT(11)
25 #define IVPU_MMU_ENTRY_FLAG_AF           BIT(10)
26 #define IVPU_MMU_ENTRY_FLAG_USER         BIT(6)
27 #define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)
28 #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE    BIT(1)
29 #define IVPU_MMU_ENTRY_FLAG_VALID        BIT(0)
30 
31 #define IVPU_MMU_PAGE_SIZE       SZ_4K
32 #define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16)
33 #define IVPU_MMU_PTE_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
34 #define IVPU_MMU_PMD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
35 #define IVPU_MMU_PUD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE)
36 #define IVPU_MMU_PGD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE)
37 #define IVPU_MMU_PGTABLE_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
38 
39 #define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000
40 #define IVPU_MMU_ENTRY_VALID   (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)
41 #define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK)
42 #define IVPU_MMU_ENTRY_MAPPED  (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \
43 				IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)
44 
45 static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma)
46 {
47 	dma_addr_t dma_addr;
48 	struct page *page;
49 	void *cpu;
50 
51 	page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
52 	if (!page)
53 		return NULL;
54 
55 	set_pages_array_wc(&page, 1);
56 
57 	dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
58 	if (dma_mapping_error(vdev->drm.dev, dma_addr))
59 		goto err_free_page;
60 
61 	cpu = vmap(&page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
62 	if (!cpu)
63 		goto err_dma_unmap_page;
64 
65 
66 	*dma = dma_addr;
67 	return cpu;
68 
69 err_dma_unmap_page:
70 	dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
71 
72 err_free_page:
73 	put_page(page);
74 	return NULL;
75 }
76 
77 static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
78 {
79 	struct page *page;
80 
81 	if (cpu_addr) {
82 		page = vmalloc_to_page(cpu_addr);
83 		vunmap(cpu_addr);
84 		dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE,
85 			       DMA_BIDIRECTIONAL);
86 		set_pages_array_wb(&page, 1);
87 		put_page(page);
88 	}
89 }
90 
91 static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
92 {
93 	dma_addr_t pgd_dma;
94 
95 	pgtable->pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);
96 	if (!pgtable->pgd_dma_ptr)
97 		return -ENOMEM;
98 
99 	pgtable->pgd_dma = pgd_dma;
100 
101 	return 0;
102 }
103 
104 static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
105 {
106 	int pgd_idx, pud_idx, pmd_idx;
107 	dma_addr_t pud_dma, pmd_dma, pte_dma;
108 	u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr;
109 
110 	for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) {
111 		pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
112 		pud_dma = pgtable->pgd_dma_ptr[pgd_idx];
113 
114 		if (!pud_dma_ptr)
115 			continue;
116 
117 		for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) {
118 			pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
119 			pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx];
120 
121 			if (!pmd_dma_ptr)
122 				continue;
123 
124 			for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) {
125 				pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
126 				pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];
127 
128 				ivpu_pgtable_free_page(vdev, pte_dma_ptr, pte_dma);
129 			}
130 
131 			kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);
132 			ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);
133 		}
134 
135 		kfree(pgtable->pmd_ptrs[pgd_idx]);
136 		kfree(pgtable->pte_ptrs[pgd_idx]);
137 		ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);
138 	}
139 
140 	ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
141 }
142 
143 static u64*
144 ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)
145 {
146 	u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
147 	dma_addr_t pud_dma;
148 
149 	if (pud_dma_ptr)
150 		return pud_dma_ptr;
151 
152 	pud_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pud_dma);
153 	if (!pud_dma_ptr)
154 		return NULL;
155 
156 	drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);
157 	pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
158 	if (!pgtable->pmd_ptrs[pgd_idx])
159 		goto err_free_pud_dma_ptr;
160 
161 	drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);
162 	pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
163 	if (!pgtable->pte_ptrs[pgd_idx])
164 		goto err_free_pmd_ptrs;
165 
166 	pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr;
167 	pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID;
168 
169 	return pud_dma_ptr;
170 
171 err_free_pmd_ptrs:
172 	kfree(pgtable->pmd_ptrs[pgd_idx]);
173 
174 err_free_pud_dma_ptr:
175 	ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);
176 	return NULL;
177 }
178 
179 static u64*
180 ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,
181 		    int pud_idx)
182 {
183 	u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
184 	dma_addr_t pmd_dma;
185 
186 	if (pmd_dma_ptr)
187 		return pmd_dma_ptr;
188 
189 	pmd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pmd_dma);
190 	if (!pmd_dma_ptr)
191 		return NULL;
192 
193 	drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);
194 	pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
195 	if (!pgtable->pte_ptrs[pgd_idx][pud_idx])
196 		goto err_free_pmd_dma_ptr;
197 
198 	pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr;
199 	pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID;
200 
201 	return pmd_dma_ptr;
202 
203 err_free_pmd_dma_ptr:
204 	ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);
205 	return NULL;
206 }
207 
208 static u64*
209 ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
210 		    int pgd_idx, int pud_idx, int pmd_idx)
211 {
212 	u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
213 	dma_addr_t pte_dma;
214 
215 	if (pte_dma_ptr)
216 		return pte_dma_ptr;
217 
218 	pte_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pte_dma);
219 	if (!pte_dma_ptr)
220 		return NULL;
221 
222 	pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr;
223 	pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID;
224 
225 	return pte_dma_ptr;
226 }
227 
228 static int
229 ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
230 			  u64 vpu_addr, dma_addr_t dma_addr, u64 prot)
231 {
232 	u64 *pte;
233 	int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
234 	int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
235 	int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
236 	int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
237 
238 	/* Allocate PUD - second level page table if needed */
239 	if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
240 		return -ENOMEM;
241 
242 	/* Allocate PMD - third level page table if needed */
243 	if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))
244 		return -ENOMEM;
245 
246 	/* Allocate PTE - fourth level page table if needed */
247 	pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);
248 	if (!pte)
249 		return -ENOMEM;
250 
251 	/* Update PTE */
252 	pte[pte_idx] = dma_addr | prot;
253 
254 	return 0;
255 }
256 
257 static int
258 ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
259 			      dma_addr_t dma_addr, u64 prot)
260 {
261 	size_t size = IVPU_MMU_CONT_PAGES_SIZE;
262 
263 	drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));
264 	drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size));
265 
266 	prot |= IVPU_MMU_ENTRY_FLAG_CONT;
267 
268 	while (size) {
269 		int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
270 
271 		if (ret)
272 			return ret;
273 
274 		size -= IVPU_MMU_PAGE_SIZE;
275 		vpu_addr += IVPU_MMU_PAGE_SIZE;
276 		dma_addr += IVPU_MMU_PAGE_SIZE;
277 	}
278 
279 	return 0;
280 }
281 
282 static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
283 {
284 	int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
285 	int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
286 	int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
287 	int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
288 
289 	/* Update PTE with dummy physical address and clear flags */
290 	ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;
291 }
292 
293 static int
294 ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
295 			   u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)
296 {
297 	int map_size;
298 	int ret;
299 
300 	while (size) {
301 		if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE &&
302 		    IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) {
303 			ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);
304 			map_size = IVPU_MMU_CONT_PAGES_SIZE;
305 		} else {
306 			ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
307 			map_size = IVPU_MMU_PAGE_SIZE;
308 		}
309 
310 		if (ret)
311 			return ret;
312 
313 		vpu_addr += map_size;
314 		dma_addr += map_size;
315 		size -= map_size;
316 	}
317 
318 	return 0;
319 }
320 
321 static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
322 {
323 	while (size) {
324 		ivpu_mmu_context_unmap_page(ctx, vpu_addr);
325 		vpu_addr += IVPU_MMU_PAGE_SIZE;
326 		size -= IVPU_MMU_PAGE_SIZE;
327 	}
328 }
329 
330 int
331 ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
332 			 u64 vpu_addr, struct sg_table *sgt,  bool llc_coherent)
333 {
334 	struct scatterlist *sg;
335 	int ret;
336 	u64 prot;
337 	u64 i;
338 
339 	if (drm_WARN_ON(&vdev->drm, !ctx))
340 		return -EINVAL;
341 
342 	if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
343 		return -EINVAL;
344 
345 	if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK)
346 		return -EINVAL;
347 
348 	prot = IVPU_MMU_ENTRY_MAPPED;
349 	if (llc_coherent)
350 		prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;
351 
352 	mutex_lock(&ctx->lock);
353 
354 	for_each_sgtable_dma_sg(sgt, sg, i) {
355 		dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
356 		size_t size = sg_dma_len(sg) + sg->offset;
357 
358 		ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
359 			 ctx->id, dma_addr, vpu_addr, size);
360 
361 		ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
362 		if (ret) {
363 			ivpu_err(vdev, "Failed to map context pages\n");
364 			mutex_unlock(&ctx->lock);
365 			return ret;
366 		}
367 		vpu_addr += size;
368 	}
369 
370 	/* Ensure page table modifications are flushed from wc buffers to memory */
371 	wmb();
372 
373 	mutex_unlock(&ctx->lock);
374 
375 	ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
376 	if (ret)
377 		ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
378 	return ret;
379 }
380 
381 void
382 ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
383 			   u64 vpu_addr, struct sg_table *sgt)
384 {
385 	struct scatterlist *sg;
386 	int ret;
387 	u64 i;
388 
389 	if (drm_WARN_ON(&vdev->drm, !ctx))
390 		return;
391 
392 	mutex_lock(&ctx->lock);
393 
394 	for_each_sgtable_dma_sg(sgt, sg, i) {
395 		dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
396 		size_t size = sg_dma_len(sg) + sg->offset;
397 
398 		ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
399 			 ctx->id, dma_addr, vpu_addr, size);
400 
401 		ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
402 		vpu_addr += size;
403 	}
404 
405 	/* Ensure page table modifications are flushed from wc buffers to memory */
406 	wmb();
407 
408 	mutex_unlock(&ctx->lock);
409 
410 	ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
411 	if (ret)
412 		ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
413 }
414 
415 int
416 ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
417 			     u64 size, struct drm_mm_node *node)
418 {
419 	int ret;
420 
421 	WARN_ON(!range);
422 
423 	mutex_lock(&ctx->lock);
424 	if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) {
425 		ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
426 						  range->start, range->end, DRM_MM_INSERT_BEST);
427 		if (!ret)
428 			goto unlock;
429 	}
430 
431 	ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
432 					  range->start, range->end, DRM_MM_INSERT_BEST);
433 unlock:
434 	mutex_unlock(&ctx->lock);
435 	return ret;
436 }
437 
438 void
439 ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
440 {
441 	mutex_lock(&ctx->lock);
442 	drm_mm_remove_node(node);
443 	mutex_unlock(&ctx->lock);
444 }
445 
446 static int
447 ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
448 {
449 	u64 start, end;
450 	int ret;
451 
452 	mutex_init(&ctx->lock);
453 
454 	ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
455 	if (ret) {
456 		ivpu_err(vdev, "Failed to initialize pgtable for ctx %u: %d\n", context_id, ret);
457 		return ret;
458 	}
459 
460 	if (!context_id) {
461 		start = vdev->hw->ranges.global.start;
462 		end = vdev->hw->ranges.shave.end;
463 	} else {
464 		start = vdev->hw->ranges.user.start;
465 		end = vdev->hw->ranges.dma.end;
466 	}
467 
468 	drm_mm_init(&ctx->mm, start, end - start);
469 	ctx->id = context_id;
470 
471 	return 0;
472 }
473 
474 static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
475 {
476 	if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
477 		return;
478 
479 	mutex_destroy(&ctx->lock);
480 	ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
481 	drm_mm_takedown(&ctx->mm);
482 
483 	ctx->pgtable.pgd_dma_ptr = NULL;
484 	ctx->pgtable.pgd_dma = 0;
485 }
486 
487 int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
488 {
489 	return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
490 }
491 
492 void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
493 {
494 	return ivpu_mmu_context_fini(vdev, &vdev->gctx);
495 }
496 
497 int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev)
498 {
499 	return ivpu_mmu_user_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID);
500 }
501 
502 void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
503 {
504 	return ivpu_mmu_user_context_fini(vdev, &vdev->rctx);
505 }
506 
507 void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
508 {
509 	struct ivpu_file_priv *file_priv;
510 
511 	xa_lock(&vdev->context_xa);
512 
513 	file_priv = xa_load(&vdev->context_xa, ssid);
514 	if (file_priv)
515 		file_priv->has_mmu_faults = true;
516 
517 	xa_unlock(&vdev->context_xa);
518 }
519 
520 int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
521 {
522 	int ret;
523 
524 	drm_WARN_ON(&vdev->drm, !ctx_id);
525 
526 	ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
527 	if (ret) {
528 		ivpu_err(vdev, "Failed to initialize context %u: %d\n", ctx_id, ret);
529 		return ret;
530 	}
531 
532 	ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
533 	if (ret) {
534 		ivpu_err(vdev, "Failed to set page table for context %u: %d\n", ctx_id, ret);
535 		goto err_context_fini;
536 	}
537 
538 	return 0;
539 
540 err_context_fini:
541 	ivpu_mmu_context_fini(vdev, ctx);
542 	return ret;
543 }
544 
545 void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
546 {
547 	drm_WARN_ON(&vdev->drm, !ctx->id);
548 
549 	ivpu_mmu_clear_pgtable(vdev, ctx->id);
550 	ivpu_mmu_context_fini(vdev, ctx);
551 }
552