xref: /linux/drivers/accel/ivpu/ivpu_mmu_context.c (revision 2a52ca7c98960aafb0eca9ef96b2d0c932171357)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/highmem.h>
8 #include <linux/set_memory.h>
9 #include <linux/vmalloc.h>
10 
11 #include <drm/drm_cache.h>
12 
13 #include "ivpu_drv.h"
14 #include "ivpu_hw.h"
15 #include "ivpu_mmu.h"
16 #include "ivpu_mmu_context.h"
17 
18 #define IVPU_MMU_VPU_ADDRESS_MASK        GENMASK(47, 12)
19 #define IVPU_MMU_PGD_INDEX_MASK          GENMASK(47, 39)
20 #define IVPU_MMU_PUD_INDEX_MASK          GENMASK(38, 30)
21 #define IVPU_MMU_PMD_INDEX_MASK          GENMASK(29, 21)
22 #define IVPU_MMU_PTE_INDEX_MASK          GENMASK(20, 12)
23 #define IVPU_MMU_ENTRY_FLAGS_MASK        (BIT(52) | GENMASK(11, 0))
24 #define IVPU_MMU_ENTRY_FLAG_CONT         BIT(52)
25 #define IVPU_MMU_ENTRY_FLAG_NG           BIT(11)
26 #define IVPU_MMU_ENTRY_FLAG_AF           BIT(10)
27 #define IVPU_MMU_ENTRY_FLAG_USER         BIT(6)
28 #define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)
29 #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE    BIT(1)
30 #define IVPU_MMU_ENTRY_FLAG_VALID        BIT(0)
31 
32 #define IVPU_MMU_PAGE_SIZE       SZ_4K
33 #define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16)
34 #define IVPU_MMU_PTE_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
35 #define IVPU_MMU_PMD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
36 #define IVPU_MMU_PUD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE)
37 #define IVPU_MMU_PGD_MAP_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE)
38 #define IVPU_MMU_PGTABLE_SIZE    (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
39 
40 #define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000
41 #define IVPU_MMU_ENTRY_VALID   (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)
42 #define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK)
43 #define IVPU_MMU_ENTRY_MAPPED  (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \
44 				IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)
45 
46 static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma)
47 {
48 	dma_addr_t dma_addr;
49 	struct page *page;
50 	void *cpu;
51 
52 	page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
53 	if (!page)
54 		return NULL;
55 
56 	set_pages_array_wc(&page, 1);
57 
58 	dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
59 	if (dma_mapping_error(vdev->drm.dev, dma_addr))
60 		goto err_free_page;
61 
62 	cpu = vmap(&page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
63 	if (!cpu)
64 		goto err_dma_unmap_page;
65 
66 
67 	*dma = dma_addr;
68 	return cpu;
69 
70 err_dma_unmap_page:
71 	dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
72 
73 err_free_page:
74 	put_page(page);
75 	return NULL;
76 }
77 
78 static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
79 {
80 	struct page *page;
81 
82 	if (cpu_addr) {
83 		page = vmalloc_to_page(cpu_addr);
84 		vunmap(cpu_addr);
85 		dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE,
86 			       DMA_BIDIRECTIONAL);
87 		set_pages_array_wb(&page, 1);
88 		put_page(page);
89 	}
90 }
91 
92 static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
93 {
94 	dma_addr_t pgd_dma;
95 
96 	pgtable->pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);
97 	if (!pgtable->pgd_dma_ptr)
98 		return -ENOMEM;
99 
100 	pgtable->pgd_dma = pgd_dma;
101 
102 	return 0;
103 }
104 
105 static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
106 {
107 	int pgd_idx, pud_idx, pmd_idx;
108 	dma_addr_t pud_dma, pmd_dma, pte_dma;
109 	u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr;
110 
111 	for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) {
112 		pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
113 		pud_dma = pgtable->pgd_dma_ptr[pgd_idx];
114 
115 		if (!pud_dma_ptr)
116 			continue;
117 
118 		for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) {
119 			pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
120 			pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx];
121 
122 			if (!pmd_dma_ptr)
123 				continue;
124 
125 			for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) {
126 				pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
127 				pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];
128 
129 				ivpu_pgtable_free_page(vdev, pte_dma_ptr, pte_dma);
130 			}
131 
132 			kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);
133 			ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);
134 		}
135 
136 		kfree(pgtable->pmd_ptrs[pgd_idx]);
137 		kfree(pgtable->pte_ptrs[pgd_idx]);
138 		ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);
139 	}
140 
141 	ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
142 }
143 
144 static u64*
145 ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)
146 {
147 	u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
148 	dma_addr_t pud_dma;
149 
150 	if (pud_dma_ptr)
151 		return pud_dma_ptr;
152 
153 	pud_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pud_dma);
154 	if (!pud_dma_ptr)
155 		return NULL;
156 
157 	drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);
158 	pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
159 	if (!pgtable->pmd_ptrs[pgd_idx])
160 		goto err_free_pud_dma_ptr;
161 
162 	drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);
163 	pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
164 	if (!pgtable->pte_ptrs[pgd_idx])
165 		goto err_free_pmd_ptrs;
166 
167 	pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr;
168 	pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID;
169 
170 	return pud_dma_ptr;
171 
172 err_free_pmd_ptrs:
173 	kfree(pgtable->pmd_ptrs[pgd_idx]);
174 
175 err_free_pud_dma_ptr:
176 	ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);
177 	return NULL;
178 }
179 
180 static u64*
181 ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,
182 		    int pud_idx)
183 {
184 	u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
185 	dma_addr_t pmd_dma;
186 
187 	if (pmd_dma_ptr)
188 		return pmd_dma_ptr;
189 
190 	pmd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pmd_dma);
191 	if (!pmd_dma_ptr)
192 		return NULL;
193 
194 	drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);
195 	pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
196 	if (!pgtable->pte_ptrs[pgd_idx][pud_idx])
197 		goto err_free_pmd_dma_ptr;
198 
199 	pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr;
200 	pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID;
201 
202 	return pmd_dma_ptr;
203 
204 err_free_pmd_dma_ptr:
205 	ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);
206 	return NULL;
207 }
208 
209 static u64*
210 ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
211 		    int pgd_idx, int pud_idx, int pmd_idx)
212 {
213 	u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
214 	dma_addr_t pte_dma;
215 
216 	if (pte_dma_ptr)
217 		return pte_dma_ptr;
218 
219 	pte_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pte_dma);
220 	if (!pte_dma_ptr)
221 		return NULL;
222 
223 	pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr;
224 	pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID;
225 
226 	return pte_dma_ptr;
227 }
228 
229 static int
230 ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
231 			  u64 vpu_addr, dma_addr_t dma_addr, u64 prot)
232 {
233 	u64 *pte;
234 	int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
235 	int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
236 	int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
237 	int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
238 
239 	/* Allocate PUD - second level page table if needed */
240 	if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
241 		return -ENOMEM;
242 
243 	/* Allocate PMD - third level page table if needed */
244 	if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))
245 		return -ENOMEM;
246 
247 	/* Allocate PTE - fourth level page table if needed */
248 	pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);
249 	if (!pte)
250 		return -ENOMEM;
251 
252 	/* Update PTE */
253 	pte[pte_idx] = dma_addr | prot;
254 
255 	return 0;
256 }
257 
258 static int
259 ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
260 			      dma_addr_t dma_addr, u64 prot)
261 {
262 	size_t size = IVPU_MMU_CONT_PAGES_SIZE;
263 
264 	drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));
265 	drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size));
266 
267 	prot |= IVPU_MMU_ENTRY_FLAG_CONT;
268 
269 	while (size) {
270 		int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
271 
272 		if (ret)
273 			return ret;
274 
275 		size -= IVPU_MMU_PAGE_SIZE;
276 		vpu_addr += IVPU_MMU_PAGE_SIZE;
277 		dma_addr += IVPU_MMU_PAGE_SIZE;
278 	}
279 
280 	return 0;
281 }
282 
283 static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
284 {
285 	int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
286 	int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
287 	int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
288 	int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
289 
290 	/* Update PTE with dummy physical address and clear flags */
291 	ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;
292 }
293 
294 static int
295 ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
296 			   u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)
297 {
298 	int map_size;
299 	int ret;
300 
301 	while (size) {
302 		if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE &&
303 		    IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) {
304 			ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);
305 			map_size = IVPU_MMU_CONT_PAGES_SIZE;
306 		} else {
307 			ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
308 			map_size = IVPU_MMU_PAGE_SIZE;
309 		}
310 
311 		if (ret)
312 			return ret;
313 
314 		vpu_addr += map_size;
315 		dma_addr += map_size;
316 		size -= map_size;
317 	}
318 
319 	return 0;
320 }
321 
322 static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
323 {
324 	while (size) {
325 		ivpu_mmu_context_unmap_page(ctx, vpu_addr);
326 		vpu_addr += IVPU_MMU_PAGE_SIZE;
327 		size -= IVPU_MMU_PAGE_SIZE;
328 	}
329 }
330 
331 int
332 ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
333 			 u64 vpu_addr, struct sg_table *sgt,  bool llc_coherent)
334 {
335 	struct scatterlist *sg;
336 	int ret;
337 	u64 prot;
338 	u64 i;
339 
340 	if (drm_WARN_ON(&vdev->drm, !ctx))
341 		return -EINVAL;
342 
343 	if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
344 		return -EINVAL;
345 
346 	if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK)
347 		return -EINVAL;
348 
349 	prot = IVPU_MMU_ENTRY_MAPPED;
350 	if (llc_coherent)
351 		prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;
352 
353 	mutex_lock(&ctx->lock);
354 
355 	for_each_sgtable_dma_sg(sgt, sg, i) {
356 		dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
357 		size_t size = sg_dma_len(sg) + sg->offset;
358 
359 		ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
360 			 ctx->id, dma_addr, vpu_addr, size);
361 
362 		ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
363 		if (ret) {
364 			ivpu_err(vdev, "Failed to map context pages\n");
365 			mutex_unlock(&ctx->lock);
366 			return ret;
367 		}
368 		vpu_addr += size;
369 	}
370 
371 	/* Ensure page table modifications are flushed from wc buffers to memory */
372 	wmb();
373 
374 	mutex_unlock(&ctx->lock);
375 
376 	ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
377 	if (ret)
378 		ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
379 	return ret;
380 }
381 
382 void
383 ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
384 			   u64 vpu_addr, struct sg_table *sgt)
385 {
386 	struct scatterlist *sg;
387 	int ret;
388 	u64 i;
389 
390 	if (drm_WARN_ON(&vdev->drm, !ctx))
391 		return;
392 
393 	mutex_lock(&ctx->lock);
394 
395 	for_each_sgtable_dma_sg(sgt, sg, i) {
396 		dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
397 		size_t size = sg_dma_len(sg) + sg->offset;
398 
399 		ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
400 			 ctx->id, dma_addr, vpu_addr, size);
401 
402 		ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
403 		vpu_addr += size;
404 	}
405 
406 	/* Ensure page table modifications are flushed from wc buffers to memory */
407 	wmb();
408 
409 	mutex_unlock(&ctx->lock);
410 
411 	ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
412 	if (ret)
413 		ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
414 }
415 
416 int
417 ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
418 			     u64 size, struct drm_mm_node *node)
419 {
420 	int ret;
421 
422 	WARN_ON(!range);
423 
424 	mutex_lock(&ctx->lock);
425 	if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) {
426 		ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
427 						  range->start, range->end, DRM_MM_INSERT_BEST);
428 		if (!ret)
429 			goto unlock;
430 	}
431 
432 	ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
433 					  range->start, range->end, DRM_MM_INSERT_BEST);
434 unlock:
435 	mutex_unlock(&ctx->lock);
436 	return ret;
437 }
438 
439 void
440 ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
441 {
442 	mutex_lock(&ctx->lock);
443 	drm_mm_remove_node(node);
444 	mutex_unlock(&ctx->lock);
445 }
446 
447 static int
448 ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
449 {
450 	u64 start, end;
451 	int ret;
452 
453 	mutex_init(&ctx->lock);
454 
455 	ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
456 	if (ret) {
457 		ivpu_err(vdev, "Failed to initialize pgtable for ctx %u: %d\n", context_id, ret);
458 		return ret;
459 	}
460 
461 	if (!context_id) {
462 		start = vdev->hw->ranges.global.start;
463 		end = vdev->hw->ranges.shave.end;
464 	} else {
465 		start = vdev->hw->ranges.user.start;
466 		end = vdev->hw->ranges.dma.end;
467 	}
468 
469 	drm_mm_init(&ctx->mm, start, end - start);
470 	ctx->id = context_id;
471 
472 	return 0;
473 }
474 
475 static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
476 {
477 	if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
478 		return;
479 
480 	mutex_destroy(&ctx->lock);
481 	ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
482 	drm_mm_takedown(&ctx->mm);
483 
484 	ctx->pgtable.pgd_dma_ptr = NULL;
485 	ctx->pgtable.pgd_dma = 0;
486 }
487 
488 int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
489 {
490 	return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
491 }
492 
493 void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
494 {
495 	return ivpu_mmu_context_fini(vdev, &vdev->gctx);
496 }
497 
498 int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev)
499 {
500 	return ivpu_mmu_user_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID);
501 }
502 
503 void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
504 {
505 	return ivpu_mmu_user_context_fini(vdev, &vdev->rctx);
506 }
507 
508 void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
509 {
510 	struct ivpu_file_priv *file_priv;
511 
512 	xa_lock(&vdev->context_xa);
513 
514 	file_priv = xa_load(&vdev->context_xa, ssid);
515 	if (file_priv)
516 		file_priv->has_mmu_faults = true;
517 
518 	xa_unlock(&vdev->context_xa);
519 }
520 
521 int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
522 {
523 	int ret;
524 
525 	drm_WARN_ON(&vdev->drm, !ctx_id);
526 
527 	ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
528 	if (ret) {
529 		ivpu_err(vdev, "Failed to initialize context %u: %d\n", ctx_id, ret);
530 		return ret;
531 	}
532 
533 	ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
534 	if (ret) {
535 		ivpu_err(vdev, "Failed to set page table for context %u: %d\n", ctx_id, ret);
536 		goto err_context_fini;
537 	}
538 
539 	return 0;
540 
541 err_context_fini:
542 	ivpu_mmu_context_fini(vdev, ctx);
543 	return ret;
544 }
545 
546 void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
547 {
548 	drm_WARN_ON(&vdev->drm, !ctx->id);
549 
550 	ivpu_mmu_clear_pgtable(vdev, ctx->id);
551 	ivpu_mmu_context_fini(vdev, ctx);
552 }
553