1263b2ba5SJacek Lawrynowicz // SPDX-License-Identifier: GPL-2.0-only
2263b2ba5SJacek Lawrynowicz /*
3263b2ba5SJacek Lawrynowicz * Copyright (C) 2020-2023 Intel Corporation
4263b2ba5SJacek Lawrynowicz */
5263b2ba5SJacek Lawrynowicz
6263b2ba5SJacek Lawrynowicz #include <linux/bitfield.h>
7263b2ba5SJacek Lawrynowicz #include <linux/highmem.h>
83bcc5209SKarol Wachowski #include <linux/set_memory.h>
90069455bSKent Overstreet #include <linux/vmalloc.h>
103bcc5209SKarol Wachowski
113bcc5209SKarol Wachowski #include <drm/drm_cache.h>
12263b2ba5SJacek Lawrynowicz
13263b2ba5SJacek Lawrynowicz #include "ivpu_drv.h"
14263b2ba5SJacek Lawrynowicz #include "ivpu_hw.h"
15263b2ba5SJacek Lawrynowicz #include "ivpu_mmu.h"
16263b2ba5SJacek Lawrynowicz #include "ivpu_mmu_context.h"
17263b2ba5SJacek Lawrynowicz
188f5ad367SWludzik, Jozef #define IVPU_MMU_VPU_ADDRESS_MASK GENMASK(47, 12)
19a2fd4a6fSKarol Wachowski #define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39)
20a2fd4a6fSKarol Wachowski #define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30)
21263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)
22263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12)
2395d44018SKarol Wachowski #define IVPU_MMU_ENTRY_FLAGS_MASK (BIT(52) | GENMASK(11, 0))
2495d44018SKarol Wachowski #define IVPU_MMU_ENTRY_FLAG_CONT BIT(52)
25263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_NG BIT(11)
26263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_AF BIT(10)
27*72b96ec6SWachowski, Karol #define IVPU_MMU_ENTRY_FLAG_RO BIT(7)
28263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_USER BIT(6)
29263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)
30263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1)
31263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_VALID BIT(0)
32263b2ba5SJacek Lawrynowicz
33263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PAGE_SIZE SZ_4K
3495d44018SKarol Wachowski #define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16)
35263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
36263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
37a2fd4a6fSKarol Wachowski #define IVPU_MMU_PUD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE)
38a2fd4a6fSKarol Wachowski #define IVPU_MMU_PGD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE)
39263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
40263b2ba5SJacek Lawrynowicz
41263b2ba5SJacek Lawrynowicz #define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000
42263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)
43263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK)
44263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \
45263b2ba5SJacek Lawrynowicz IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)
46263b2ba5SJacek Lawrynowicz
ivpu_pgtable_alloc_page(struct ivpu_device * vdev,dma_addr_t * dma)473bcc5209SKarol Wachowski static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma)
483bcc5209SKarol Wachowski {
493bcc5209SKarol Wachowski dma_addr_t dma_addr;
503bcc5209SKarol Wachowski struct page *page;
513bcc5209SKarol Wachowski void *cpu;
523bcc5209SKarol Wachowski
533bcc5209SKarol Wachowski page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
543bcc5209SKarol Wachowski if (!page)
553bcc5209SKarol Wachowski return NULL;
563bcc5209SKarol Wachowski
573bcc5209SKarol Wachowski set_pages_array_wc(&page, 1);
583bcc5209SKarol Wachowski
593bcc5209SKarol Wachowski dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
603bcc5209SKarol Wachowski if (dma_mapping_error(vdev->drm.dev, dma_addr))
613bcc5209SKarol Wachowski goto err_free_page;
623bcc5209SKarol Wachowski
633bcc5209SKarol Wachowski cpu = vmap(&page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
643bcc5209SKarol Wachowski if (!cpu)
653bcc5209SKarol Wachowski goto err_dma_unmap_page;
663bcc5209SKarol Wachowski
673bcc5209SKarol Wachowski
683bcc5209SKarol Wachowski *dma = dma_addr;
693bcc5209SKarol Wachowski return cpu;
703bcc5209SKarol Wachowski
713bcc5209SKarol Wachowski err_dma_unmap_page:
723bcc5209SKarol Wachowski dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
733bcc5209SKarol Wachowski
743bcc5209SKarol Wachowski err_free_page:
753bcc5209SKarol Wachowski put_page(page);
763bcc5209SKarol Wachowski return NULL;
773bcc5209SKarol Wachowski }
783bcc5209SKarol Wachowski
ivpu_pgtable_free_page(struct ivpu_device * vdev,u64 * cpu_addr,dma_addr_t dma_addr)793bcc5209SKarol Wachowski static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
803bcc5209SKarol Wachowski {
813bcc5209SKarol Wachowski struct page *page;
823bcc5209SKarol Wachowski
833bcc5209SKarol Wachowski if (cpu_addr) {
843bcc5209SKarol Wachowski page = vmalloc_to_page(cpu_addr);
853bcc5209SKarol Wachowski vunmap(cpu_addr);
863bcc5209SKarol Wachowski dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE,
873bcc5209SKarol Wachowski DMA_BIDIRECTIONAL);
883bcc5209SKarol Wachowski set_pages_array_wb(&page, 1);
893bcc5209SKarol Wachowski put_page(page);
903bcc5209SKarol Wachowski }
913bcc5209SKarol Wachowski }
923bcc5209SKarol Wachowski
ivpu_mmu_pgtable_init(struct ivpu_device * vdev,struct ivpu_mmu_pgtable * pgtable)93263b2ba5SJacek Lawrynowicz static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
94263b2ba5SJacek Lawrynowicz {
95263b2ba5SJacek Lawrynowicz dma_addr_t pgd_dma;
96263b2ba5SJacek Lawrynowicz
973bcc5209SKarol Wachowski pgtable->pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);
98103d2ea1SKarol Wachowski if (!pgtable->pgd_dma_ptr)
99263b2ba5SJacek Lawrynowicz return -ENOMEM;
100263b2ba5SJacek Lawrynowicz
101263b2ba5SJacek Lawrynowicz pgtable->pgd_dma = pgd_dma;
102263b2ba5SJacek Lawrynowicz
103263b2ba5SJacek Lawrynowicz return 0;
104263b2ba5SJacek Lawrynowicz }
105263b2ba5SJacek Lawrynowicz
ivpu_mmu_pgtables_free(struct ivpu_device * vdev,struct ivpu_mmu_pgtable * pgtable)106103d2ea1SKarol Wachowski static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
107263b2ba5SJacek Lawrynowicz {
108a2fd4a6fSKarol Wachowski int pgd_idx, pud_idx, pmd_idx;
109103d2ea1SKarol Wachowski dma_addr_t pud_dma, pmd_dma, pte_dma;
110103d2ea1SKarol Wachowski u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr;
111263b2ba5SJacek Lawrynowicz
112a2fd4a6fSKarol Wachowski for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) {
113103d2ea1SKarol Wachowski pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
114103d2ea1SKarol Wachowski pud_dma = pgtable->pgd_dma_ptr[pgd_idx];
115a2fd4a6fSKarol Wachowski
116103d2ea1SKarol Wachowski if (!pud_dma_ptr)
117a2fd4a6fSKarol Wachowski continue;
118a2fd4a6fSKarol Wachowski
119a2fd4a6fSKarol Wachowski for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) {
120103d2ea1SKarol Wachowski pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
121103d2ea1SKarol Wachowski pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx];
122263b2ba5SJacek Lawrynowicz
123103d2ea1SKarol Wachowski if (!pmd_dma_ptr)
124263b2ba5SJacek Lawrynowicz continue;
125263b2ba5SJacek Lawrynowicz
126a2fd4a6fSKarol Wachowski for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) {
127103d2ea1SKarol Wachowski pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
128103d2ea1SKarol Wachowski pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];
129103d2ea1SKarol Wachowski
1303bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pte_dma_ptr, pte_dma);
131263b2ba5SJacek Lawrynowicz }
132263b2ba5SJacek Lawrynowicz
133103d2ea1SKarol Wachowski kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);
1343bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);
135a2fd4a6fSKarol Wachowski }
136a2fd4a6fSKarol Wachowski
137103d2ea1SKarol Wachowski kfree(pgtable->pmd_ptrs[pgd_idx]);
138103d2ea1SKarol Wachowski kfree(pgtable->pte_ptrs[pgd_idx]);
1393bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);
140263b2ba5SJacek Lawrynowicz }
141263b2ba5SJacek Lawrynowicz
1423bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
143263b2ba5SJacek Lawrynowicz }
144263b2ba5SJacek Lawrynowicz
145263b2ba5SJacek Lawrynowicz static u64*
ivpu_mmu_ensure_pud(struct ivpu_device * vdev,struct ivpu_mmu_pgtable * pgtable,int pgd_idx)146a2fd4a6fSKarol Wachowski ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)
147a2fd4a6fSKarol Wachowski {
148103d2ea1SKarol Wachowski u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
149a2fd4a6fSKarol Wachowski dma_addr_t pud_dma;
150a2fd4a6fSKarol Wachowski
151103d2ea1SKarol Wachowski if (pud_dma_ptr)
152103d2ea1SKarol Wachowski return pud_dma_ptr;
153a2fd4a6fSKarol Wachowski
1543bcc5209SKarol Wachowski pud_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pud_dma);
155103d2ea1SKarol Wachowski if (!pud_dma_ptr)
156a2fd4a6fSKarol Wachowski return NULL;
157a2fd4a6fSKarol Wachowski
158103d2ea1SKarol Wachowski drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);
159103d2ea1SKarol Wachowski pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
160103d2ea1SKarol Wachowski if (!pgtable->pmd_ptrs[pgd_idx])
161103d2ea1SKarol Wachowski goto err_free_pud_dma_ptr;
162a2fd4a6fSKarol Wachowski
163103d2ea1SKarol Wachowski drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);
164103d2ea1SKarol Wachowski pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
165103d2ea1SKarol Wachowski if (!pgtable->pte_ptrs[pgd_idx])
166103d2ea1SKarol Wachowski goto err_free_pmd_ptrs;
167a2fd4a6fSKarol Wachowski
168103d2ea1SKarol Wachowski pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr;
169103d2ea1SKarol Wachowski pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID;
170a2fd4a6fSKarol Wachowski
171103d2ea1SKarol Wachowski return pud_dma_ptr;
172a2fd4a6fSKarol Wachowski
173103d2ea1SKarol Wachowski err_free_pmd_ptrs:
174103d2ea1SKarol Wachowski kfree(pgtable->pmd_ptrs[pgd_idx]);
175a2fd4a6fSKarol Wachowski
176103d2ea1SKarol Wachowski err_free_pud_dma_ptr:
1773bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);
178a2fd4a6fSKarol Wachowski return NULL;
179a2fd4a6fSKarol Wachowski }
180a2fd4a6fSKarol Wachowski
181a2fd4a6fSKarol Wachowski static u64*
ivpu_mmu_ensure_pmd(struct ivpu_device * vdev,struct ivpu_mmu_pgtable * pgtable,int pgd_idx,int pud_idx)182103d2ea1SKarol Wachowski ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,
183103d2ea1SKarol Wachowski int pud_idx)
184263b2ba5SJacek Lawrynowicz {
185103d2ea1SKarol Wachowski u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
186263b2ba5SJacek Lawrynowicz dma_addr_t pmd_dma;
187263b2ba5SJacek Lawrynowicz
188103d2ea1SKarol Wachowski if (pmd_dma_ptr)
189103d2ea1SKarol Wachowski return pmd_dma_ptr;
190263b2ba5SJacek Lawrynowicz
1913bcc5209SKarol Wachowski pmd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pmd_dma);
192103d2ea1SKarol Wachowski if (!pmd_dma_ptr)
193263b2ba5SJacek Lawrynowicz return NULL;
194263b2ba5SJacek Lawrynowicz
195103d2ea1SKarol Wachowski drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);
196103d2ea1SKarol Wachowski pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
197103d2ea1SKarol Wachowski if (!pgtable->pte_ptrs[pgd_idx][pud_idx])
198103d2ea1SKarol Wachowski goto err_free_pmd_dma_ptr;
199263b2ba5SJacek Lawrynowicz
200103d2ea1SKarol Wachowski pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr;
201103d2ea1SKarol Wachowski pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID;
202263b2ba5SJacek Lawrynowicz
203103d2ea1SKarol Wachowski return pmd_dma_ptr;
204263b2ba5SJacek Lawrynowicz
205103d2ea1SKarol Wachowski err_free_pmd_dma_ptr:
2063bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);
207263b2ba5SJacek Lawrynowicz return NULL;
208263b2ba5SJacek Lawrynowicz }
209263b2ba5SJacek Lawrynowicz
210263b2ba5SJacek Lawrynowicz static u64*
ivpu_mmu_ensure_pte(struct ivpu_device * vdev,struct ivpu_mmu_pgtable * pgtable,int pgd_idx,int pud_idx,int pmd_idx)211263b2ba5SJacek Lawrynowicz ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
212a2fd4a6fSKarol Wachowski int pgd_idx, int pud_idx, int pmd_idx)
213263b2ba5SJacek Lawrynowicz {
214103d2ea1SKarol Wachowski u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
215263b2ba5SJacek Lawrynowicz dma_addr_t pte_dma;
216263b2ba5SJacek Lawrynowicz
217103d2ea1SKarol Wachowski if (pte_dma_ptr)
218103d2ea1SKarol Wachowski return pte_dma_ptr;
219263b2ba5SJacek Lawrynowicz
2203bcc5209SKarol Wachowski pte_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pte_dma);
221103d2ea1SKarol Wachowski if (!pte_dma_ptr)
222263b2ba5SJacek Lawrynowicz return NULL;
223263b2ba5SJacek Lawrynowicz
224103d2ea1SKarol Wachowski pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr;
225103d2ea1SKarol Wachowski pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID;
226263b2ba5SJacek Lawrynowicz
227103d2ea1SKarol Wachowski return pte_dma_ptr;
228263b2ba5SJacek Lawrynowicz }
229263b2ba5SJacek Lawrynowicz
230263b2ba5SJacek Lawrynowicz static int
ivpu_mmu_context_map_page(struct ivpu_device * vdev,struct ivpu_mmu_context * ctx,u64 vpu_addr,dma_addr_t dma_addr,u64 prot)231263b2ba5SJacek Lawrynowicz ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
23295d44018SKarol Wachowski u64 vpu_addr, dma_addr_t dma_addr, u64 prot)
233263b2ba5SJacek Lawrynowicz {
234263b2ba5SJacek Lawrynowicz u64 *pte;
235a2fd4a6fSKarol Wachowski int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
236a2fd4a6fSKarol Wachowski int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
237a2fd4a6fSKarol Wachowski int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
238a2fd4a6fSKarol Wachowski int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
239a2fd4a6fSKarol Wachowski
240103d2ea1SKarol Wachowski /* Allocate PUD - second level page table if needed */
241a2fd4a6fSKarol Wachowski if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
242a2fd4a6fSKarol Wachowski return -ENOMEM;
243263b2ba5SJacek Lawrynowicz
244103d2ea1SKarol Wachowski /* Allocate PMD - third level page table if needed */
245a2fd4a6fSKarol Wachowski if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))
246263b2ba5SJacek Lawrynowicz return -ENOMEM;
247263b2ba5SJacek Lawrynowicz
248103d2ea1SKarol Wachowski /* Allocate PTE - fourth level page table if needed */
249a2fd4a6fSKarol Wachowski pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);
250263b2ba5SJacek Lawrynowicz if (!pte)
251263b2ba5SJacek Lawrynowicz return -ENOMEM;
252263b2ba5SJacek Lawrynowicz
253103d2ea1SKarol Wachowski /* Update PTE */
254a2fd4a6fSKarol Wachowski pte[pte_idx] = dma_addr | prot;
255263b2ba5SJacek Lawrynowicz
256263b2ba5SJacek Lawrynowicz return 0;
257263b2ba5SJacek Lawrynowicz }
258263b2ba5SJacek Lawrynowicz
25995d44018SKarol Wachowski static int
ivpu_mmu_context_map_cont_64k(struct ivpu_device * vdev,struct ivpu_mmu_context * ctx,u64 vpu_addr,dma_addr_t dma_addr,u64 prot)26095d44018SKarol Wachowski ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
26195d44018SKarol Wachowski dma_addr_t dma_addr, u64 prot)
26295d44018SKarol Wachowski {
26395d44018SKarol Wachowski size_t size = IVPU_MMU_CONT_PAGES_SIZE;
26495d44018SKarol Wachowski
26595d44018SKarol Wachowski drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));
26695d44018SKarol Wachowski drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size));
26795d44018SKarol Wachowski
26895d44018SKarol Wachowski prot |= IVPU_MMU_ENTRY_FLAG_CONT;
26995d44018SKarol Wachowski
27095d44018SKarol Wachowski while (size) {
27195d44018SKarol Wachowski int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
27295d44018SKarol Wachowski
27395d44018SKarol Wachowski if (ret)
27495d44018SKarol Wachowski return ret;
27595d44018SKarol Wachowski
27695d44018SKarol Wachowski size -= IVPU_MMU_PAGE_SIZE;
27795d44018SKarol Wachowski vpu_addr += IVPU_MMU_PAGE_SIZE;
27895d44018SKarol Wachowski dma_addr += IVPU_MMU_PAGE_SIZE;
27995d44018SKarol Wachowski }
28095d44018SKarol Wachowski
28195d44018SKarol Wachowski return 0;
28295d44018SKarol Wachowski }
28395d44018SKarol Wachowski
ivpu_mmu_context_unmap_page(struct ivpu_mmu_context * ctx,u64 vpu_addr)284263b2ba5SJacek Lawrynowicz static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
285263b2ba5SJacek Lawrynowicz {
286a2fd4a6fSKarol Wachowski int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
287a2fd4a6fSKarol Wachowski int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
288a2fd4a6fSKarol Wachowski int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
289a2fd4a6fSKarol Wachowski int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
290263b2ba5SJacek Lawrynowicz
291263b2ba5SJacek Lawrynowicz /* Update PTE with dummy physical address and clear flags */
292103d2ea1SKarol Wachowski ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;
293263b2ba5SJacek Lawrynowicz }
294263b2ba5SJacek Lawrynowicz
295263b2ba5SJacek Lawrynowicz static int
ivpu_mmu_context_map_pages(struct ivpu_device * vdev,struct ivpu_mmu_context * ctx,u64 vpu_addr,dma_addr_t dma_addr,size_t size,u64 prot)296263b2ba5SJacek Lawrynowicz ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
29795d44018SKarol Wachowski u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)
298263b2ba5SJacek Lawrynowicz {
29995d44018SKarol Wachowski int map_size;
30095d44018SKarol Wachowski int ret;
30195d44018SKarol Wachowski
302263b2ba5SJacek Lawrynowicz while (size) {
30395d44018SKarol Wachowski if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE &&
30495d44018SKarol Wachowski IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) {
30595d44018SKarol Wachowski ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);
30695d44018SKarol Wachowski map_size = IVPU_MMU_CONT_PAGES_SIZE;
30795d44018SKarol Wachowski } else {
30895d44018SKarol Wachowski ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
30995d44018SKarol Wachowski map_size = IVPU_MMU_PAGE_SIZE;
31095d44018SKarol Wachowski }
311263b2ba5SJacek Lawrynowicz
312263b2ba5SJacek Lawrynowicz if (ret)
313263b2ba5SJacek Lawrynowicz return ret;
314263b2ba5SJacek Lawrynowicz
31595d44018SKarol Wachowski vpu_addr += map_size;
31695d44018SKarol Wachowski dma_addr += map_size;
31795d44018SKarol Wachowski size -= map_size;
318263b2ba5SJacek Lawrynowicz }
319263b2ba5SJacek Lawrynowicz
320263b2ba5SJacek Lawrynowicz return 0;
321263b2ba5SJacek Lawrynowicz }
322263b2ba5SJacek Lawrynowicz
ivpu_mmu_context_set_page_ro(struct ivpu_device * vdev,struct ivpu_mmu_context * ctx,u64 vpu_addr)323*72b96ec6SWachowski, Karol static void ivpu_mmu_context_set_page_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
324*72b96ec6SWachowski, Karol u64 vpu_addr)
325*72b96ec6SWachowski, Karol {
326*72b96ec6SWachowski, Karol int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
327*72b96ec6SWachowski, Karol int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
328*72b96ec6SWachowski, Karol int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
329*72b96ec6SWachowski, Karol int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
330*72b96ec6SWachowski, Karol
331*72b96ec6SWachowski, Karol ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] |= IVPU_MMU_ENTRY_FLAG_RO;
332*72b96ec6SWachowski, Karol }
333*72b96ec6SWachowski, Karol
ivpu_mmu_context_split_page(struct ivpu_device * vdev,struct ivpu_mmu_context * ctx,u64 vpu_addr)334*72b96ec6SWachowski, Karol static void ivpu_mmu_context_split_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
335*72b96ec6SWachowski, Karol u64 vpu_addr)
336*72b96ec6SWachowski, Karol {
337*72b96ec6SWachowski, Karol int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
338*72b96ec6SWachowski, Karol int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
339*72b96ec6SWachowski, Karol int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
340*72b96ec6SWachowski, Karol int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
341*72b96ec6SWachowski, Karol
342*72b96ec6SWachowski, Karol ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] &= ~IVPU_MMU_ENTRY_FLAG_CONT;
343*72b96ec6SWachowski, Karol }
344*72b96ec6SWachowski, Karol
ivpu_mmu_context_split_64k_page(struct ivpu_device * vdev,struct ivpu_mmu_context * ctx,u64 vpu_addr)345*72b96ec6SWachowski, Karol static void ivpu_mmu_context_split_64k_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
346*72b96ec6SWachowski, Karol u64 vpu_addr)
347*72b96ec6SWachowski, Karol {
348*72b96ec6SWachowski, Karol u64 start = ALIGN_DOWN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);
349*72b96ec6SWachowski, Karol u64 end = ALIGN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);
350*72b96ec6SWachowski, Karol u64 offset = 0;
351*72b96ec6SWachowski, Karol
352*72b96ec6SWachowski, Karol ivpu_dbg(vdev, MMU_MAP, "Split 64K page ctx: %u vpu_addr: 0x%llx\n", ctx->id, vpu_addr);
353*72b96ec6SWachowski, Karol
354*72b96ec6SWachowski, Karol while (start + offset < end) {
355*72b96ec6SWachowski, Karol ivpu_mmu_context_split_page(vdev, ctx, start + offset);
356*72b96ec6SWachowski, Karol offset += IVPU_MMU_PAGE_SIZE;
357*72b96ec6SWachowski, Karol }
358*72b96ec6SWachowski, Karol }
359*72b96ec6SWachowski, Karol
360*72b96ec6SWachowski, Karol int
ivpu_mmu_context_set_pages_ro(struct ivpu_device * vdev,struct ivpu_mmu_context * ctx,u64 vpu_addr,size_t size)361*72b96ec6SWachowski, Karol ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
362*72b96ec6SWachowski, Karol size_t size)
363*72b96ec6SWachowski, Karol {
364*72b96ec6SWachowski, Karol u64 end = vpu_addr + size;
365*72b96ec6SWachowski, Karol size_t size_left = size;
366*72b96ec6SWachowski, Karol int ret;
367*72b96ec6SWachowski, Karol
368*72b96ec6SWachowski, Karol if (size == 0)
369*72b96ec6SWachowski, Karol return 0;
370*72b96ec6SWachowski, Karol
371*72b96ec6SWachowski, Karol if (drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr | size, IVPU_MMU_PAGE_SIZE)))
372*72b96ec6SWachowski, Karol return -EINVAL;
373*72b96ec6SWachowski, Karol
374*72b96ec6SWachowski, Karol mutex_lock(&ctx->lock);
375*72b96ec6SWachowski, Karol
376*72b96ec6SWachowski, Karol ivpu_dbg(vdev, MMU_MAP, "Set read-only pages ctx: %u vpu_addr: 0x%llx size: %lu\n",
377*72b96ec6SWachowski, Karol ctx->id, vpu_addr, size);
378*72b96ec6SWachowski, Karol
379*72b96ec6SWachowski, Karol if (!ivpu_disable_mmu_cont_pages) {
380*72b96ec6SWachowski, Karol /* Split 64K contiguous page at the beginning if needed */
381*72b96ec6SWachowski, Karol if (!IS_ALIGNED(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE))
382*72b96ec6SWachowski, Karol ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr);
383*72b96ec6SWachowski, Karol
384*72b96ec6SWachowski, Karol /* Split 64K contiguous page at the end if needed */
385*72b96ec6SWachowski, Karol if (!IS_ALIGNED(vpu_addr + size, IVPU_MMU_CONT_PAGES_SIZE))
386*72b96ec6SWachowski, Karol ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr + size);
387*72b96ec6SWachowski, Karol }
388*72b96ec6SWachowski, Karol
389*72b96ec6SWachowski, Karol while (size_left) {
390*72b96ec6SWachowski, Karol if (vpu_addr < end)
391*72b96ec6SWachowski, Karol ivpu_mmu_context_set_page_ro(vdev, ctx, vpu_addr);
392*72b96ec6SWachowski, Karol
393*72b96ec6SWachowski, Karol vpu_addr += IVPU_MMU_PAGE_SIZE;
394*72b96ec6SWachowski, Karol size_left -= IVPU_MMU_PAGE_SIZE;
395*72b96ec6SWachowski, Karol }
396*72b96ec6SWachowski, Karol
397*72b96ec6SWachowski, Karol /* Ensure page table modifications are flushed from wc buffers to memory */
398*72b96ec6SWachowski, Karol wmb();
399*72b96ec6SWachowski, Karol
400*72b96ec6SWachowski, Karol mutex_unlock(&ctx->lock);
401*72b96ec6SWachowski, Karol ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
402*72b96ec6SWachowski, Karol if (ret)
403*72b96ec6SWachowski, Karol ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
404*72b96ec6SWachowski, Karol
405*72b96ec6SWachowski, Karol return 0;
406*72b96ec6SWachowski, Karol }
407*72b96ec6SWachowski, Karol
ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context * ctx,u64 vpu_addr,size_t size)408263b2ba5SJacek Lawrynowicz static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
409263b2ba5SJacek Lawrynowicz {
410263b2ba5SJacek Lawrynowicz while (size) {
411263b2ba5SJacek Lawrynowicz ivpu_mmu_context_unmap_page(ctx, vpu_addr);
412263b2ba5SJacek Lawrynowicz vpu_addr += IVPU_MMU_PAGE_SIZE;
413263b2ba5SJacek Lawrynowicz size -= IVPU_MMU_PAGE_SIZE;
414263b2ba5SJacek Lawrynowicz }
415263b2ba5SJacek Lawrynowicz }
416263b2ba5SJacek Lawrynowicz
417263b2ba5SJacek Lawrynowicz int
ivpu_mmu_context_map_sgt(struct ivpu_device * vdev,struct ivpu_mmu_context * ctx,u64 vpu_addr,struct sg_table * sgt,bool llc_coherent)418263b2ba5SJacek Lawrynowicz ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
419263b2ba5SJacek Lawrynowicz u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
420263b2ba5SJacek Lawrynowicz {
421263b2ba5SJacek Lawrynowicz struct scatterlist *sg;
422263b2ba5SJacek Lawrynowicz int ret;
42395d44018SKarol Wachowski u64 prot;
424263b2ba5SJacek Lawrynowicz u64 i;
425263b2ba5SJacek Lawrynowicz
42648aea7f2SJacek Lawrynowicz if (drm_WARN_ON(&vdev->drm, !ctx))
42748aea7f2SJacek Lawrynowicz return -EINVAL;
42848aea7f2SJacek Lawrynowicz
429263b2ba5SJacek Lawrynowicz if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
430263b2ba5SJacek Lawrynowicz return -EINVAL;
4318f5ad367SWludzik, Jozef
4328f5ad367SWludzik, Jozef if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK)
433263b2ba5SJacek Lawrynowicz return -EINVAL;
434263b2ba5SJacek Lawrynowicz
435263b2ba5SJacek Lawrynowicz prot = IVPU_MMU_ENTRY_MAPPED;
436263b2ba5SJacek Lawrynowicz if (llc_coherent)
437263b2ba5SJacek Lawrynowicz prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;
438263b2ba5SJacek Lawrynowicz
439263b2ba5SJacek Lawrynowicz mutex_lock(&ctx->lock);
440263b2ba5SJacek Lawrynowicz
441263b2ba5SJacek Lawrynowicz for_each_sgtable_dma_sg(sgt, sg, i) {
442103d2ea1SKarol Wachowski dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
443263b2ba5SJacek Lawrynowicz size_t size = sg_dma_len(sg) + sg->offset;
444263b2ba5SJacek Lawrynowicz
4458047d36fSWachowski, Karol ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
4468047d36fSWachowski, Karol ctx->id, dma_addr, vpu_addr, size);
4478047d36fSWachowski, Karol
448263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
449263b2ba5SJacek Lawrynowicz if (ret) {
450263b2ba5SJacek Lawrynowicz ivpu_err(vdev, "Failed to map context pages\n");
451263b2ba5SJacek Lawrynowicz mutex_unlock(&ctx->lock);
452263b2ba5SJacek Lawrynowicz return ret;
453263b2ba5SJacek Lawrynowicz }
454263b2ba5SJacek Lawrynowicz vpu_addr += size;
455263b2ba5SJacek Lawrynowicz }
456263b2ba5SJacek Lawrynowicz
4573bcc5209SKarol Wachowski /* Ensure page table modifications are flushed from wc buffers to memory */
4583bcc5209SKarol Wachowski wmb();
4598047d36fSWachowski, Karol
460263b2ba5SJacek Lawrynowicz mutex_unlock(&ctx->lock);
461263b2ba5SJacek Lawrynowicz
462263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
463263b2ba5SJacek Lawrynowicz if (ret)
464263b2ba5SJacek Lawrynowicz ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
465263b2ba5SJacek Lawrynowicz return ret;
466263b2ba5SJacek Lawrynowicz }
467263b2ba5SJacek Lawrynowicz
468263b2ba5SJacek Lawrynowicz void
ivpu_mmu_context_unmap_sgt(struct ivpu_device * vdev,struct ivpu_mmu_context * ctx,u64 vpu_addr,struct sg_table * sgt)469263b2ba5SJacek Lawrynowicz ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
470263b2ba5SJacek Lawrynowicz u64 vpu_addr, struct sg_table *sgt)
471263b2ba5SJacek Lawrynowicz {
472263b2ba5SJacek Lawrynowicz struct scatterlist *sg;
473263b2ba5SJacek Lawrynowicz int ret;
474263b2ba5SJacek Lawrynowicz u64 i;
475263b2ba5SJacek Lawrynowicz
47648aea7f2SJacek Lawrynowicz if (drm_WARN_ON(&vdev->drm, !ctx))
47748aea7f2SJacek Lawrynowicz return;
478263b2ba5SJacek Lawrynowicz
479263b2ba5SJacek Lawrynowicz mutex_lock(&ctx->lock);
480263b2ba5SJacek Lawrynowicz
481263b2ba5SJacek Lawrynowicz for_each_sgtable_dma_sg(sgt, sg, i) {
4828047d36fSWachowski, Karol dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
483263b2ba5SJacek Lawrynowicz size_t size = sg_dma_len(sg) + sg->offset;
484263b2ba5SJacek Lawrynowicz
4858047d36fSWachowski, Karol ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
4868047d36fSWachowski, Karol ctx->id, dma_addr, vpu_addr, size);
4878047d36fSWachowski, Karol
488263b2ba5SJacek Lawrynowicz ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
489263b2ba5SJacek Lawrynowicz vpu_addr += size;
490263b2ba5SJacek Lawrynowicz }
491263b2ba5SJacek Lawrynowicz
4923bcc5209SKarol Wachowski /* Ensure page table modifications are flushed from wc buffers to memory */
4933bcc5209SKarol Wachowski wmb();
4948047d36fSWachowski, Karol
495263b2ba5SJacek Lawrynowicz mutex_unlock(&ctx->lock);
496263b2ba5SJacek Lawrynowicz
497263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
498263b2ba5SJacek Lawrynowicz if (ret)
499263b2ba5SJacek Lawrynowicz ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
500263b2ba5SJacek Lawrynowicz }
501263b2ba5SJacek Lawrynowicz
502263b2ba5SJacek Lawrynowicz int
ivpu_mmu_context_insert_node(struct ivpu_mmu_context * ctx,const struct ivpu_addr_range * range,u64 size,struct drm_mm_node * node)50348aea7f2SJacek Lawrynowicz ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
504263b2ba5SJacek Lawrynowicz u64 size, struct drm_mm_node *node)
505263b2ba5SJacek Lawrynowicz {
50648aea7f2SJacek Lawrynowicz int ret;
507263b2ba5SJacek Lawrynowicz
508b0352241SJacek Lawrynowicz WARN_ON(!range);
509b0352241SJacek Lawrynowicz
51048aea7f2SJacek Lawrynowicz mutex_lock(&ctx->lock);
51195d44018SKarol Wachowski if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) {
51248aea7f2SJacek Lawrynowicz ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
51348aea7f2SJacek Lawrynowicz range->start, range->end, DRM_MM_INSERT_BEST);
51448aea7f2SJacek Lawrynowicz if (!ret)
51548aea7f2SJacek Lawrynowicz goto unlock;
51695d44018SKarol Wachowski }
51795d44018SKarol Wachowski
51848aea7f2SJacek Lawrynowicz ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
51995d44018SKarol Wachowski range->start, range->end, DRM_MM_INSERT_BEST);
52048aea7f2SJacek Lawrynowicz unlock:
52148aea7f2SJacek Lawrynowicz mutex_unlock(&ctx->lock);
52248aea7f2SJacek Lawrynowicz return ret;
523263b2ba5SJacek Lawrynowicz }
524263b2ba5SJacek Lawrynowicz
525263b2ba5SJacek Lawrynowicz void
ivpu_mmu_context_remove_node(struct ivpu_mmu_context * ctx,struct drm_mm_node * node)52648aea7f2SJacek Lawrynowicz ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
527263b2ba5SJacek Lawrynowicz {
52848aea7f2SJacek Lawrynowicz mutex_lock(&ctx->lock);
529263b2ba5SJacek Lawrynowicz drm_mm_remove_node(node);
53048aea7f2SJacek Lawrynowicz mutex_unlock(&ctx->lock);
531263b2ba5SJacek Lawrynowicz }
532263b2ba5SJacek Lawrynowicz
533263b2ba5SJacek Lawrynowicz static int
ivpu_mmu_context_init(struct ivpu_device * vdev,struct ivpu_mmu_context * ctx,u32 context_id)534263b2ba5SJacek Lawrynowicz ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
535263b2ba5SJacek Lawrynowicz {
536263b2ba5SJacek Lawrynowicz u64 start, end;
537263b2ba5SJacek Lawrynowicz int ret;
538263b2ba5SJacek Lawrynowicz
539263b2ba5SJacek Lawrynowicz mutex_init(&ctx->lock);
540263b2ba5SJacek Lawrynowicz
541263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
5420a9cd792SJacek Lawrynowicz if (ret) {
5430a9cd792SJacek Lawrynowicz ivpu_err(vdev, "Failed to initialize pgtable for ctx %u: %d\n", context_id, ret);
544263b2ba5SJacek Lawrynowicz return ret;
5450a9cd792SJacek Lawrynowicz }
546263b2ba5SJacek Lawrynowicz
547263b2ba5SJacek Lawrynowicz if (!context_id) {
548162f17b2SKarol Wachowski start = vdev->hw->ranges.global.start;
549162f17b2SKarol Wachowski end = vdev->hw->ranges.shave.end;
550263b2ba5SJacek Lawrynowicz } else {
551162f17b2SKarol Wachowski start = vdev->hw->ranges.user.start;
552162f17b2SKarol Wachowski end = vdev->hw->ranges.dma.end;
553263b2ba5SJacek Lawrynowicz }
554263b2ba5SJacek Lawrynowicz
555263b2ba5SJacek Lawrynowicz drm_mm_init(&ctx->mm, start, end - start);
556263b2ba5SJacek Lawrynowicz ctx->id = context_id;
557263b2ba5SJacek Lawrynowicz
558263b2ba5SJacek Lawrynowicz return 0;
559263b2ba5SJacek Lawrynowicz }
560263b2ba5SJacek Lawrynowicz
ivpu_mmu_context_fini(struct ivpu_device * vdev,struct ivpu_mmu_context * ctx)561263b2ba5SJacek Lawrynowicz static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
562263b2ba5SJacek Lawrynowicz {
563103d2ea1SKarol Wachowski if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
564103d2ea1SKarol Wachowski return;
565263b2ba5SJacek Lawrynowicz
566263b2ba5SJacek Lawrynowicz mutex_destroy(&ctx->lock);
567103d2ea1SKarol Wachowski ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
568263b2ba5SJacek Lawrynowicz drm_mm_takedown(&ctx->mm);
569103d2ea1SKarol Wachowski
570103d2ea1SKarol Wachowski ctx->pgtable.pgd_dma_ptr = NULL;
571103d2ea1SKarol Wachowski ctx->pgtable.pgd_dma = 0;
572263b2ba5SJacek Lawrynowicz }
573263b2ba5SJacek Lawrynowicz
ivpu_mmu_global_context_init(struct ivpu_device * vdev)574263b2ba5SJacek Lawrynowicz int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
575263b2ba5SJacek Lawrynowicz {
576263b2ba5SJacek Lawrynowicz return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
577263b2ba5SJacek Lawrynowicz }
578263b2ba5SJacek Lawrynowicz
ivpu_mmu_global_context_fini(struct ivpu_device * vdev)579263b2ba5SJacek Lawrynowicz void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
580263b2ba5SJacek Lawrynowicz {
581263b2ba5SJacek Lawrynowicz return ivpu_mmu_context_fini(vdev, &vdev->gctx);
582263b2ba5SJacek Lawrynowicz }
583263b2ba5SJacek Lawrynowicz
ivpu_mmu_reserved_context_init(struct ivpu_device * vdev)58434d03f2aSKarol Wachowski int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev)
58534d03f2aSKarol Wachowski {
58634d03f2aSKarol Wachowski return ivpu_mmu_user_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID);
58734d03f2aSKarol Wachowski }
58834d03f2aSKarol Wachowski
ivpu_mmu_reserved_context_fini(struct ivpu_device * vdev)58934d03f2aSKarol Wachowski void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
59034d03f2aSKarol Wachowski {
59134d03f2aSKarol Wachowski return ivpu_mmu_user_context_fini(vdev, &vdev->rctx);
59234d03f2aSKarol Wachowski }
59334d03f2aSKarol Wachowski
ivpu_mmu_user_context_mark_invalid(struct ivpu_device * vdev,u32 ssid)594263b2ba5SJacek Lawrynowicz void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
595263b2ba5SJacek Lawrynowicz {
596263b2ba5SJacek Lawrynowicz struct ivpu_file_priv *file_priv;
597263b2ba5SJacek Lawrynowicz
598263b2ba5SJacek Lawrynowicz xa_lock(&vdev->context_xa);
599263b2ba5SJacek Lawrynowicz
600263b2ba5SJacek Lawrynowicz file_priv = xa_load(&vdev->context_xa, ssid);
601263b2ba5SJacek Lawrynowicz if (file_priv)
602263b2ba5SJacek Lawrynowicz file_priv->has_mmu_faults = true;
603263b2ba5SJacek Lawrynowicz
604263b2ba5SJacek Lawrynowicz xa_unlock(&vdev->context_xa);
605263b2ba5SJacek Lawrynowicz }
606263b2ba5SJacek Lawrynowicz
ivpu_mmu_user_context_init(struct ivpu_device * vdev,struct ivpu_mmu_context * ctx,u32 ctx_id)607263b2ba5SJacek Lawrynowicz int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
608263b2ba5SJacek Lawrynowicz {
609263b2ba5SJacek Lawrynowicz int ret;
610263b2ba5SJacek Lawrynowicz
611263b2ba5SJacek Lawrynowicz drm_WARN_ON(&vdev->drm, !ctx_id);
612263b2ba5SJacek Lawrynowicz
613263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
614263b2ba5SJacek Lawrynowicz if (ret) {
615edee62c0SStanislaw Gruszka ivpu_err(vdev, "Failed to initialize context %u: %d\n", ctx_id, ret);
616263b2ba5SJacek Lawrynowicz return ret;
617263b2ba5SJacek Lawrynowicz }
618263b2ba5SJacek Lawrynowicz
619263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
620263b2ba5SJacek Lawrynowicz if (ret) {
621edee62c0SStanislaw Gruszka ivpu_err(vdev, "Failed to set page table for context %u: %d\n", ctx_id, ret);
622263b2ba5SJacek Lawrynowicz goto err_context_fini;
623263b2ba5SJacek Lawrynowicz }
624263b2ba5SJacek Lawrynowicz
625263b2ba5SJacek Lawrynowicz return 0;
626263b2ba5SJacek Lawrynowicz
627263b2ba5SJacek Lawrynowicz err_context_fini:
628263b2ba5SJacek Lawrynowicz ivpu_mmu_context_fini(vdev, ctx);
629263b2ba5SJacek Lawrynowicz return ret;
630263b2ba5SJacek Lawrynowicz }
631263b2ba5SJacek Lawrynowicz
ivpu_mmu_user_context_fini(struct ivpu_device * vdev,struct ivpu_mmu_context * ctx)632263b2ba5SJacek Lawrynowicz void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
633263b2ba5SJacek Lawrynowicz {
634263b2ba5SJacek Lawrynowicz drm_WARN_ON(&vdev->drm, !ctx->id);
635263b2ba5SJacek Lawrynowicz
636263b2ba5SJacek Lawrynowicz ivpu_mmu_clear_pgtable(vdev, ctx->id);
637263b2ba5SJacek Lawrynowicz ivpu_mmu_context_fini(vdev, ctx);
638263b2ba5SJacek Lawrynowicz }
639