1263b2ba5SJacek Lawrynowicz // SPDX-License-Identifier: GPL-2.0-only 2263b2ba5SJacek Lawrynowicz /* 3263b2ba5SJacek Lawrynowicz * Copyright (C) 2020-2023 Intel Corporation 4263b2ba5SJacek Lawrynowicz */ 5263b2ba5SJacek Lawrynowicz 6263b2ba5SJacek Lawrynowicz #include <linux/bitfield.h> 7263b2ba5SJacek Lawrynowicz #include <linux/highmem.h> 83bcc5209SKarol Wachowski #include <linux/set_memory.h> 93bcc5209SKarol Wachowski 103bcc5209SKarol Wachowski #include <drm/drm_cache.h> 11263b2ba5SJacek Lawrynowicz 12263b2ba5SJacek Lawrynowicz #include "ivpu_drv.h" 13263b2ba5SJacek Lawrynowicz #include "ivpu_hw.h" 14263b2ba5SJacek Lawrynowicz #include "ivpu_mmu.h" 15263b2ba5SJacek Lawrynowicz #include "ivpu_mmu_context.h" 16263b2ba5SJacek Lawrynowicz 17a2fd4a6fSKarol Wachowski #define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39) 18a2fd4a6fSKarol Wachowski #define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30) 19263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21) 20263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12) 2195d44018SKarol Wachowski #define IVPU_MMU_ENTRY_FLAGS_MASK (BIT(52) | GENMASK(11, 0)) 2295d44018SKarol Wachowski #define IVPU_MMU_ENTRY_FLAG_CONT BIT(52) 23263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_NG BIT(11) 24263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_AF BIT(10) 25263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_USER BIT(6) 26263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2) 27263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1) 28263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_VALID BIT(0) 29263b2ba5SJacek Lawrynowicz 30263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PAGE_SIZE SZ_4K 3195d44018SKarol Wachowski #define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16) 32263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE) 33263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE) 34a2fd4a6fSKarol Wachowski #define IVPU_MMU_PUD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE) 35a2fd4a6fSKarol Wachowski #define IVPU_MMU_PGD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE) 36263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64)) 37263b2ba5SJacek Lawrynowicz 38263b2ba5SJacek Lawrynowicz #define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000 39263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID) 40263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK) 41263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \ 42263b2ba5SJacek Lawrynowicz IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID) 43263b2ba5SJacek Lawrynowicz 443bcc5209SKarol Wachowski static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma) 453bcc5209SKarol Wachowski { 463bcc5209SKarol Wachowski dma_addr_t dma_addr; 473bcc5209SKarol Wachowski struct page *page; 483bcc5209SKarol Wachowski void *cpu; 493bcc5209SKarol Wachowski 503bcc5209SKarol Wachowski page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 513bcc5209SKarol Wachowski if (!page) 523bcc5209SKarol Wachowski return NULL; 533bcc5209SKarol Wachowski 543bcc5209SKarol Wachowski set_pages_array_wc(&page, 1); 553bcc5209SKarol Wachowski 563bcc5209SKarol Wachowski dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 573bcc5209SKarol Wachowski if (dma_mapping_error(vdev->drm.dev, dma_addr)) 583bcc5209SKarol Wachowski goto err_free_page; 593bcc5209SKarol Wachowski 603bcc5209SKarol Wachowski cpu = vmap(&page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 613bcc5209SKarol Wachowski if (!cpu) 623bcc5209SKarol Wachowski goto err_dma_unmap_page; 633bcc5209SKarol Wachowski 643bcc5209SKarol Wachowski 653bcc5209SKarol Wachowski *dma = dma_addr; 663bcc5209SKarol Wachowski return cpu; 673bcc5209SKarol Wachowski 683bcc5209SKarol Wachowski err_dma_unmap_page: 693bcc5209SKarol Wachowski dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); 703bcc5209SKarol Wachowski 713bcc5209SKarol Wachowski err_free_page: 723bcc5209SKarol Wachowski put_page(page); 733bcc5209SKarol Wachowski return NULL; 743bcc5209SKarol Wachowski } 753bcc5209SKarol Wachowski 763bcc5209SKarol Wachowski static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr) 773bcc5209SKarol Wachowski { 783bcc5209SKarol Wachowski struct page *page; 793bcc5209SKarol Wachowski 803bcc5209SKarol Wachowski if (cpu_addr) { 813bcc5209SKarol Wachowski page = vmalloc_to_page(cpu_addr); 823bcc5209SKarol Wachowski vunmap(cpu_addr); 833bcc5209SKarol Wachowski dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE, 843bcc5209SKarol Wachowski DMA_BIDIRECTIONAL); 853bcc5209SKarol Wachowski set_pages_array_wb(&page, 1); 863bcc5209SKarol Wachowski put_page(page); 873bcc5209SKarol Wachowski } 883bcc5209SKarol Wachowski } 893bcc5209SKarol Wachowski 90263b2ba5SJacek Lawrynowicz static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) 91263b2ba5SJacek Lawrynowicz { 92263b2ba5SJacek Lawrynowicz dma_addr_t pgd_dma; 93263b2ba5SJacek Lawrynowicz 943bcc5209SKarol Wachowski pgtable->pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma); 95103d2ea1SKarol Wachowski if (!pgtable->pgd_dma_ptr) 96263b2ba5SJacek Lawrynowicz return -ENOMEM; 97263b2ba5SJacek Lawrynowicz 98263b2ba5SJacek Lawrynowicz pgtable->pgd_dma = pgd_dma; 99263b2ba5SJacek Lawrynowicz 100263b2ba5SJacek Lawrynowicz return 0; 101263b2ba5SJacek Lawrynowicz } 102263b2ba5SJacek Lawrynowicz 103103d2ea1SKarol Wachowski static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) 104263b2ba5SJacek Lawrynowicz { 105a2fd4a6fSKarol Wachowski int pgd_idx, pud_idx, pmd_idx; 106103d2ea1SKarol Wachowski dma_addr_t pud_dma, pmd_dma, pte_dma; 107103d2ea1SKarol Wachowski u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr; 108263b2ba5SJacek Lawrynowicz 109a2fd4a6fSKarol Wachowski for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) { 110103d2ea1SKarol Wachowski pud_dma_ptr = pgtable->pud_ptrs[pgd_idx]; 111103d2ea1SKarol Wachowski pud_dma = pgtable->pgd_dma_ptr[pgd_idx]; 112a2fd4a6fSKarol Wachowski 113103d2ea1SKarol Wachowski if (!pud_dma_ptr) 114a2fd4a6fSKarol Wachowski continue; 115a2fd4a6fSKarol Wachowski 116a2fd4a6fSKarol Wachowski for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) { 117103d2ea1SKarol Wachowski pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx]; 118103d2ea1SKarol Wachowski pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx]; 119263b2ba5SJacek Lawrynowicz 120103d2ea1SKarol Wachowski if (!pmd_dma_ptr) 121263b2ba5SJacek Lawrynowicz continue; 122263b2ba5SJacek Lawrynowicz 123a2fd4a6fSKarol Wachowski for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) { 124103d2ea1SKarol Wachowski pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx]; 125103d2ea1SKarol Wachowski pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx]; 126103d2ea1SKarol Wachowski 1273bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pte_dma_ptr, pte_dma); 128263b2ba5SJacek Lawrynowicz } 129263b2ba5SJacek Lawrynowicz 130103d2ea1SKarol Wachowski kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]); 1313bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma); 132a2fd4a6fSKarol Wachowski } 133a2fd4a6fSKarol Wachowski 134103d2ea1SKarol Wachowski kfree(pgtable->pmd_ptrs[pgd_idx]); 135103d2ea1SKarol Wachowski kfree(pgtable->pte_ptrs[pgd_idx]); 1363bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma); 137263b2ba5SJacek Lawrynowicz } 138263b2ba5SJacek Lawrynowicz 1393bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma); 140263b2ba5SJacek Lawrynowicz } 141263b2ba5SJacek Lawrynowicz 142263b2ba5SJacek Lawrynowicz static u64* 143a2fd4a6fSKarol Wachowski ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx) 144a2fd4a6fSKarol Wachowski { 145103d2ea1SKarol Wachowski u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx]; 146a2fd4a6fSKarol Wachowski dma_addr_t pud_dma; 147a2fd4a6fSKarol Wachowski 148103d2ea1SKarol Wachowski if (pud_dma_ptr) 149103d2ea1SKarol Wachowski return pud_dma_ptr; 150a2fd4a6fSKarol Wachowski 1513bcc5209SKarol Wachowski pud_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pud_dma); 152103d2ea1SKarol Wachowski if (!pud_dma_ptr) 153a2fd4a6fSKarol Wachowski return NULL; 154a2fd4a6fSKarol Wachowski 155103d2ea1SKarol Wachowski drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]); 156103d2ea1SKarol Wachowski pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); 157103d2ea1SKarol Wachowski if (!pgtable->pmd_ptrs[pgd_idx]) 158103d2ea1SKarol Wachowski goto err_free_pud_dma_ptr; 159a2fd4a6fSKarol Wachowski 160103d2ea1SKarol Wachowski drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]); 161103d2ea1SKarol Wachowski pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); 162103d2ea1SKarol Wachowski if (!pgtable->pte_ptrs[pgd_idx]) 163103d2ea1SKarol Wachowski goto err_free_pmd_ptrs; 164a2fd4a6fSKarol Wachowski 165103d2ea1SKarol Wachowski pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr; 166103d2ea1SKarol Wachowski pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID; 167a2fd4a6fSKarol Wachowski 168103d2ea1SKarol Wachowski return pud_dma_ptr; 169a2fd4a6fSKarol Wachowski 170103d2ea1SKarol Wachowski err_free_pmd_ptrs: 171103d2ea1SKarol Wachowski kfree(pgtable->pmd_ptrs[pgd_idx]); 172a2fd4a6fSKarol Wachowski 173103d2ea1SKarol Wachowski err_free_pud_dma_ptr: 1743bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma); 175a2fd4a6fSKarol Wachowski return NULL; 176a2fd4a6fSKarol Wachowski } 177a2fd4a6fSKarol Wachowski 178a2fd4a6fSKarol Wachowski static u64* 179103d2ea1SKarol Wachowski ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx, 180103d2ea1SKarol Wachowski int pud_idx) 181263b2ba5SJacek Lawrynowicz { 182103d2ea1SKarol Wachowski u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx]; 183263b2ba5SJacek Lawrynowicz dma_addr_t pmd_dma; 184263b2ba5SJacek Lawrynowicz 185103d2ea1SKarol Wachowski if (pmd_dma_ptr) 186103d2ea1SKarol Wachowski return pmd_dma_ptr; 187263b2ba5SJacek Lawrynowicz 1883bcc5209SKarol Wachowski pmd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pmd_dma); 189103d2ea1SKarol Wachowski if (!pmd_dma_ptr) 190263b2ba5SJacek Lawrynowicz return NULL; 191263b2ba5SJacek Lawrynowicz 192103d2ea1SKarol Wachowski drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]); 193103d2ea1SKarol Wachowski pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); 194103d2ea1SKarol Wachowski if (!pgtable->pte_ptrs[pgd_idx][pud_idx]) 195103d2ea1SKarol Wachowski goto err_free_pmd_dma_ptr; 196263b2ba5SJacek Lawrynowicz 197103d2ea1SKarol Wachowski pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr; 198103d2ea1SKarol Wachowski pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID; 199263b2ba5SJacek Lawrynowicz 200103d2ea1SKarol Wachowski return pmd_dma_ptr; 201263b2ba5SJacek Lawrynowicz 202103d2ea1SKarol Wachowski err_free_pmd_dma_ptr: 2033bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma); 204263b2ba5SJacek Lawrynowicz return NULL; 205263b2ba5SJacek Lawrynowicz } 206263b2ba5SJacek Lawrynowicz 207263b2ba5SJacek Lawrynowicz static u64* 208263b2ba5SJacek Lawrynowicz ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, 209a2fd4a6fSKarol Wachowski int pgd_idx, int pud_idx, int pmd_idx) 210263b2ba5SJacek Lawrynowicz { 211103d2ea1SKarol Wachowski u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx]; 212263b2ba5SJacek Lawrynowicz dma_addr_t pte_dma; 213263b2ba5SJacek Lawrynowicz 214103d2ea1SKarol Wachowski if (pte_dma_ptr) 215103d2ea1SKarol Wachowski return pte_dma_ptr; 216263b2ba5SJacek Lawrynowicz 2173bcc5209SKarol Wachowski pte_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pte_dma); 218103d2ea1SKarol Wachowski if (!pte_dma_ptr) 219263b2ba5SJacek Lawrynowicz return NULL; 220263b2ba5SJacek Lawrynowicz 221103d2ea1SKarol Wachowski pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr; 222103d2ea1SKarol Wachowski pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID; 223263b2ba5SJacek Lawrynowicz 224103d2ea1SKarol Wachowski return pte_dma_ptr; 225263b2ba5SJacek Lawrynowicz } 226263b2ba5SJacek Lawrynowicz 227263b2ba5SJacek Lawrynowicz static int 228263b2ba5SJacek Lawrynowicz ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 22995d44018SKarol Wachowski u64 vpu_addr, dma_addr_t dma_addr, u64 prot) 230263b2ba5SJacek Lawrynowicz { 231263b2ba5SJacek Lawrynowicz u64 *pte; 232a2fd4a6fSKarol Wachowski int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); 233a2fd4a6fSKarol Wachowski int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); 234a2fd4a6fSKarol Wachowski int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); 235a2fd4a6fSKarol Wachowski int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); 236a2fd4a6fSKarol Wachowski 237103d2ea1SKarol Wachowski /* Allocate PUD - second level page table if needed */ 238a2fd4a6fSKarol Wachowski if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx)) 239a2fd4a6fSKarol Wachowski return -ENOMEM; 240263b2ba5SJacek Lawrynowicz 241103d2ea1SKarol Wachowski /* Allocate PMD - third level page table if needed */ 242a2fd4a6fSKarol Wachowski if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx)) 243263b2ba5SJacek Lawrynowicz return -ENOMEM; 244263b2ba5SJacek Lawrynowicz 245103d2ea1SKarol Wachowski /* Allocate PTE - fourth level page table if needed */ 246a2fd4a6fSKarol Wachowski pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx); 247263b2ba5SJacek Lawrynowicz if (!pte) 248263b2ba5SJacek Lawrynowicz return -ENOMEM; 249263b2ba5SJacek Lawrynowicz 250103d2ea1SKarol Wachowski /* Update PTE */ 251a2fd4a6fSKarol Wachowski pte[pte_idx] = dma_addr | prot; 252263b2ba5SJacek Lawrynowicz 253263b2ba5SJacek Lawrynowicz return 0; 254263b2ba5SJacek Lawrynowicz } 255263b2ba5SJacek Lawrynowicz 25695d44018SKarol Wachowski static int 25795d44018SKarol Wachowski ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, 25895d44018SKarol Wachowski dma_addr_t dma_addr, u64 prot) 25995d44018SKarol Wachowski { 26095d44018SKarol Wachowski size_t size = IVPU_MMU_CONT_PAGES_SIZE; 26195d44018SKarol Wachowski 26295d44018SKarol Wachowski drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size)); 26395d44018SKarol Wachowski drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size)); 26495d44018SKarol Wachowski 26595d44018SKarol Wachowski prot |= IVPU_MMU_ENTRY_FLAG_CONT; 26695d44018SKarol Wachowski 26795d44018SKarol Wachowski while (size) { 26895d44018SKarol Wachowski int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); 26995d44018SKarol Wachowski 27095d44018SKarol Wachowski if (ret) 27195d44018SKarol Wachowski return ret; 27295d44018SKarol Wachowski 27395d44018SKarol Wachowski size -= IVPU_MMU_PAGE_SIZE; 27495d44018SKarol Wachowski vpu_addr += IVPU_MMU_PAGE_SIZE; 27595d44018SKarol Wachowski dma_addr += IVPU_MMU_PAGE_SIZE; 27695d44018SKarol Wachowski } 27795d44018SKarol Wachowski 27895d44018SKarol Wachowski return 0; 27995d44018SKarol Wachowski } 28095d44018SKarol Wachowski 281263b2ba5SJacek Lawrynowicz static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr) 282263b2ba5SJacek Lawrynowicz { 283a2fd4a6fSKarol Wachowski int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); 284a2fd4a6fSKarol Wachowski int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); 285a2fd4a6fSKarol Wachowski int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); 286a2fd4a6fSKarol Wachowski int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); 287263b2ba5SJacek Lawrynowicz 288263b2ba5SJacek Lawrynowicz /* Update PTE with dummy physical address and clear flags */ 289103d2ea1SKarol Wachowski ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID; 290263b2ba5SJacek Lawrynowicz } 291263b2ba5SJacek Lawrynowicz 292263b2ba5SJacek Lawrynowicz static int 293263b2ba5SJacek Lawrynowicz ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 29495d44018SKarol Wachowski u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot) 295263b2ba5SJacek Lawrynowicz { 29695d44018SKarol Wachowski int map_size; 29795d44018SKarol Wachowski int ret; 29895d44018SKarol Wachowski 299263b2ba5SJacek Lawrynowicz while (size) { 30095d44018SKarol Wachowski if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE && 30195d44018SKarol Wachowski IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) { 30295d44018SKarol Wachowski ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot); 30395d44018SKarol Wachowski map_size = IVPU_MMU_CONT_PAGES_SIZE; 30495d44018SKarol Wachowski } else { 30595d44018SKarol Wachowski ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); 30695d44018SKarol Wachowski map_size = IVPU_MMU_PAGE_SIZE; 30795d44018SKarol Wachowski } 308263b2ba5SJacek Lawrynowicz 309263b2ba5SJacek Lawrynowicz if (ret) 310263b2ba5SJacek Lawrynowicz return ret; 311263b2ba5SJacek Lawrynowicz 31295d44018SKarol Wachowski vpu_addr += map_size; 31395d44018SKarol Wachowski dma_addr += map_size; 31495d44018SKarol Wachowski size -= map_size; 315263b2ba5SJacek Lawrynowicz } 316263b2ba5SJacek Lawrynowicz 317263b2ba5SJacek Lawrynowicz return 0; 318263b2ba5SJacek Lawrynowicz } 319263b2ba5SJacek Lawrynowicz 320263b2ba5SJacek Lawrynowicz static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size) 321263b2ba5SJacek Lawrynowicz { 322263b2ba5SJacek Lawrynowicz while (size) { 323263b2ba5SJacek Lawrynowicz ivpu_mmu_context_unmap_page(ctx, vpu_addr); 324263b2ba5SJacek Lawrynowicz vpu_addr += IVPU_MMU_PAGE_SIZE; 325263b2ba5SJacek Lawrynowicz size -= IVPU_MMU_PAGE_SIZE; 326263b2ba5SJacek Lawrynowicz } 327263b2ba5SJacek Lawrynowicz } 328263b2ba5SJacek Lawrynowicz 329263b2ba5SJacek Lawrynowicz int 330263b2ba5SJacek Lawrynowicz ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 331263b2ba5SJacek Lawrynowicz u64 vpu_addr, struct sg_table *sgt, bool llc_coherent) 332263b2ba5SJacek Lawrynowicz { 333263b2ba5SJacek Lawrynowicz struct scatterlist *sg; 334263b2ba5SJacek Lawrynowicz int ret; 33595d44018SKarol Wachowski u64 prot; 336263b2ba5SJacek Lawrynowicz u64 i; 337263b2ba5SJacek Lawrynowicz 338*48aea7f2SJacek Lawrynowicz if (drm_WARN_ON(&vdev->drm, !ctx)) 339*48aea7f2SJacek Lawrynowicz return -EINVAL; 340*48aea7f2SJacek Lawrynowicz 341263b2ba5SJacek Lawrynowicz if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE)) 342263b2ba5SJacek Lawrynowicz return -EINVAL; 343263b2ba5SJacek Lawrynowicz /* 344263b2ba5SJacek Lawrynowicz * VPU is only 32 bit, but DMA engine is 38 bit 345263b2ba5SJacek Lawrynowicz * Ranges < 2 GB are reserved for VPU internal registers 346263b2ba5SJacek Lawrynowicz * Limit range to 8 GB 347263b2ba5SJacek Lawrynowicz */ 348263b2ba5SJacek Lawrynowicz if (vpu_addr < SZ_2G || vpu_addr > SZ_8G) 349263b2ba5SJacek Lawrynowicz return -EINVAL; 350263b2ba5SJacek Lawrynowicz 351263b2ba5SJacek Lawrynowicz prot = IVPU_MMU_ENTRY_MAPPED; 352263b2ba5SJacek Lawrynowicz if (llc_coherent) 353263b2ba5SJacek Lawrynowicz prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT; 354263b2ba5SJacek Lawrynowicz 355263b2ba5SJacek Lawrynowicz mutex_lock(&ctx->lock); 356263b2ba5SJacek Lawrynowicz 357263b2ba5SJacek Lawrynowicz for_each_sgtable_dma_sg(sgt, sg, i) { 358103d2ea1SKarol Wachowski dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset; 359263b2ba5SJacek Lawrynowicz size_t size = sg_dma_len(sg) + sg->offset; 360263b2ba5SJacek Lawrynowicz 361263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot); 362263b2ba5SJacek Lawrynowicz if (ret) { 363263b2ba5SJacek Lawrynowicz ivpu_err(vdev, "Failed to map context pages\n"); 364263b2ba5SJacek Lawrynowicz mutex_unlock(&ctx->lock); 365263b2ba5SJacek Lawrynowicz return ret; 366263b2ba5SJacek Lawrynowicz } 367263b2ba5SJacek Lawrynowicz vpu_addr += size; 368263b2ba5SJacek Lawrynowicz } 369263b2ba5SJacek Lawrynowicz 3703bcc5209SKarol Wachowski /* Ensure page table modifications are flushed from wc buffers to memory */ 3713bcc5209SKarol Wachowski wmb(); 372263b2ba5SJacek Lawrynowicz mutex_unlock(&ctx->lock); 373263b2ba5SJacek Lawrynowicz 374263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); 375263b2ba5SJacek Lawrynowicz if (ret) 376263b2ba5SJacek Lawrynowicz ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); 377263b2ba5SJacek Lawrynowicz return ret; 378263b2ba5SJacek Lawrynowicz } 379263b2ba5SJacek Lawrynowicz 380263b2ba5SJacek Lawrynowicz void 381263b2ba5SJacek Lawrynowicz ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 382263b2ba5SJacek Lawrynowicz u64 vpu_addr, struct sg_table *sgt) 383263b2ba5SJacek Lawrynowicz { 384263b2ba5SJacek Lawrynowicz struct scatterlist *sg; 385263b2ba5SJacek Lawrynowicz int ret; 386263b2ba5SJacek Lawrynowicz u64 i; 387263b2ba5SJacek Lawrynowicz 388*48aea7f2SJacek Lawrynowicz if (drm_WARN_ON(&vdev->drm, !ctx)) 389*48aea7f2SJacek Lawrynowicz return; 390263b2ba5SJacek Lawrynowicz 391263b2ba5SJacek Lawrynowicz mutex_lock(&ctx->lock); 392263b2ba5SJacek Lawrynowicz 393263b2ba5SJacek Lawrynowicz for_each_sgtable_dma_sg(sgt, sg, i) { 394263b2ba5SJacek Lawrynowicz size_t size = sg_dma_len(sg) + sg->offset; 395263b2ba5SJacek Lawrynowicz 396263b2ba5SJacek Lawrynowicz ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size); 397263b2ba5SJacek Lawrynowicz vpu_addr += size; 398263b2ba5SJacek Lawrynowicz } 399263b2ba5SJacek Lawrynowicz 4003bcc5209SKarol Wachowski /* Ensure page table modifications are flushed from wc buffers to memory */ 4013bcc5209SKarol Wachowski wmb(); 402263b2ba5SJacek Lawrynowicz mutex_unlock(&ctx->lock); 403263b2ba5SJacek Lawrynowicz 404263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); 405263b2ba5SJacek Lawrynowicz if (ret) 406263b2ba5SJacek Lawrynowicz ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); 407263b2ba5SJacek Lawrynowicz } 408263b2ba5SJacek Lawrynowicz 409263b2ba5SJacek Lawrynowicz int 410*48aea7f2SJacek Lawrynowicz ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range, 411263b2ba5SJacek Lawrynowicz u64 size, struct drm_mm_node *node) 412263b2ba5SJacek Lawrynowicz { 413*48aea7f2SJacek Lawrynowicz int ret; 414263b2ba5SJacek Lawrynowicz 415b0352241SJacek Lawrynowicz WARN_ON(!range); 416b0352241SJacek Lawrynowicz 417*48aea7f2SJacek Lawrynowicz mutex_lock(&ctx->lock); 41895d44018SKarol Wachowski if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) { 419*48aea7f2SJacek Lawrynowicz ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0, 420*48aea7f2SJacek Lawrynowicz range->start, range->end, DRM_MM_INSERT_BEST); 421*48aea7f2SJacek Lawrynowicz if (!ret) 422*48aea7f2SJacek Lawrynowicz goto unlock; 42395d44018SKarol Wachowski } 42495d44018SKarol Wachowski 425*48aea7f2SJacek Lawrynowicz ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0, 42695d44018SKarol Wachowski range->start, range->end, DRM_MM_INSERT_BEST); 427*48aea7f2SJacek Lawrynowicz unlock: 428*48aea7f2SJacek Lawrynowicz mutex_unlock(&ctx->lock); 429*48aea7f2SJacek Lawrynowicz return ret; 430263b2ba5SJacek Lawrynowicz } 431263b2ba5SJacek Lawrynowicz 432263b2ba5SJacek Lawrynowicz void 433*48aea7f2SJacek Lawrynowicz ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node) 434263b2ba5SJacek Lawrynowicz { 435*48aea7f2SJacek Lawrynowicz mutex_lock(&ctx->lock); 436263b2ba5SJacek Lawrynowicz drm_mm_remove_node(node); 437*48aea7f2SJacek Lawrynowicz mutex_unlock(&ctx->lock); 438263b2ba5SJacek Lawrynowicz } 439263b2ba5SJacek Lawrynowicz 440263b2ba5SJacek Lawrynowicz static int 441263b2ba5SJacek Lawrynowicz ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id) 442263b2ba5SJacek Lawrynowicz { 443263b2ba5SJacek Lawrynowicz u64 start, end; 444263b2ba5SJacek Lawrynowicz int ret; 445263b2ba5SJacek Lawrynowicz 446263b2ba5SJacek Lawrynowicz mutex_init(&ctx->lock); 447263b2ba5SJacek Lawrynowicz 448263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable); 4490a9cd792SJacek Lawrynowicz if (ret) { 4500a9cd792SJacek Lawrynowicz ivpu_err(vdev, "Failed to initialize pgtable for ctx %u: %d\n", context_id, ret); 451263b2ba5SJacek Lawrynowicz return ret; 4520a9cd792SJacek Lawrynowicz } 453263b2ba5SJacek Lawrynowicz 454263b2ba5SJacek Lawrynowicz if (!context_id) { 455162f17b2SKarol Wachowski start = vdev->hw->ranges.global.start; 456162f17b2SKarol Wachowski end = vdev->hw->ranges.shave.end; 457263b2ba5SJacek Lawrynowicz } else { 458162f17b2SKarol Wachowski start = vdev->hw->ranges.user.start; 459162f17b2SKarol Wachowski end = vdev->hw->ranges.dma.end; 460263b2ba5SJacek Lawrynowicz } 461263b2ba5SJacek Lawrynowicz 462263b2ba5SJacek Lawrynowicz drm_mm_init(&ctx->mm, start, end - start); 463263b2ba5SJacek Lawrynowicz ctx->id = context_id; 464263b2ba5SJacek Lawrynowicz 465263b2ba5SJacek Lawrynowicz return 0; 466263b2ba5SJacek Lawrynowicz } 467263b2ba5SJacek Lawrynowicz 468263b2ba5SJacek Lawrynowicz static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) 469263b2ba5SJacek Lawrynowicz { 470103d2ea1SKarol Wachowski if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr)) 471103d2ea1SKarol Wachowski return; 472263b2ba5SJacek Lawrynowicz 473263b2ba5SJacek Lawrynowicz mutex_destroy(&ctx->lock); 474103d2ea1SKarol Wachowski ivpu_mmu_pgtables_free(vdev, &ctx->pgtable); 475263b2ba5SJacek Lawrynowicz drm_mm_takedown(&ctx->mm); 476103d2ea1SKarol Wachowski 477103d2ea1SKarol Wachowski ctx->pgtable.pgd_dma_ptr = NULL; 478103d2ea1SKarol Wachowski ctx->pgtable.pgd_dma = 0; 479263b2ba5SJacek Lawrynowicz } 480263b2ba5SJacek Lawrynowicz 481263b2ba5SJacek Lawrynowicz int ivpu_mmu_global_context_init(struct ivpu_device *vdev) 482263b2ba5SJacek Lawrynowicz { 483263b2ba5SJacek Lawrynowicz return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID); 484263b2ba5SJacek Lawrynowicz } 485263b2ba5SJacek Lawrynowicz 486263b2ba5SJacek Lawrynowicz void ivpu_mmu_global_context_fini(struct ivpu_device *vdev) 487263b2ba5SJacek Lawrynowicz { 488263b2ba5SJacek Lawrynowicz return ivpu_mmu_context_fini(vdev, &vdev->gctx); 489263b2ba5SJacek Lawrynowicz } 490263b2ba5SJacek Lawrynowicz 49134d03f2aSKarol Wachowski int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev) 49234d03f2aSKarol Wachowski { 49334d03f2aSKarol Wachowski return ivpu_mmu_user_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID); 49434d03f2aSKarol Wachowski } 49534d03f2aSKarol Wachowski 49634d03f2aSKarol Wachowski void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev) 49734d03f2aSKarol Wachowski { 49834d03f2aSKarol Wachowski return ivpu_mmu_user_context_fini(vdev, &vdev->rctx); 49934d03f2aSKarol Wachowski } 50034d03f2aSKarol Wachowski 501263b2ba5SJacek Lawrynowicz void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid) 502263b2ba5SJacek Lawrynowicz { 503263b2ba5SJacek Lawrynowicz struct ivpu_file_priv *file_priv; 504263b2ba5SJacek Lawrynowicz 505263b2ba5SJacek Lawrynowicz xa_lock(&vdev->context_xa); 506263b2ba5SJacek Lawrynowicz 507263b2ba5SJacek Lawrynowicz file_priv = xa_load(&vdev->context_xa, ssid); 508263b2ba5SJacek Lawrynowicz if (file_priv) 509263b2ba5SJacek Lawrynowicz file_priv->has_mmu_faults = true; 510263b2ba5SJacek Lawrynowicz 511263b2ba5SJacek Lawrynowicz xa_unlock(&vdev->context_xa); 512263b2ba5SJacek Lawrynowicz } 513263b2ba5SJacek Lawrynowicz 514263b2ba5SJacek Lawrynowicz int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id) 515263b2ba5SJacek Lawrynowicz { 516263b2ba5SJacek Lawrynowicz int ret; 517263b2ba5SJacek Lawrynowicz 518263b2ba5SJacek Lawrynowicz drm_WARN_ON(&vdev->drm, !ctx_id); 519263b2ba5SJacek Lawrynowicz 520263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_context_init(vdev, ctx, ctx_id); 521263b2ba5SJacek Lawrynowicz if (ret) { 522edee62c0SStanislaw Gruszka ivpu_err(vdev, "Failed to initialize context %u: %d\n", ctx_id, ret); 523263b2ba5SJacek Lawrynowicz return ret; 524263b2ba5SJacek Lawrynowicz } 525263b2ba5SJacek Lawrynowicz 526263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable); 527263b2ba5SJacek Lawrynowicz if (ret) { 528edee62c0SStanislaw Gruszka ivpu_err(vdev, "Failed to set page table for context %u: %d\n", ctx_id, ret); 529263b2ba5SJacek Lawrynowicz goto err_context_fini; 530263b2ba5SJacek Lawrynowicz } 531263b2ba5SJacek Lawrynowicz 532263b2ba5SJacek Lawrynowicz return 0; 533263b2ba5SJacek Lawrynowicz 534263b2ba5SJacek Lawrynowicz err_context_fini: 535263b2ba5SJacek Lawrynowicz ivpu_mmu_context_fini(vdev, ctx); 536263b2ba5SJacek Lawrynowicz return ret; 537263b2ba5SJacek Lawrynowicz } 538263b2ba5SJacek Lawrynowicz 539263b2ba5SJacek Lawrynowicz void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) 540263b2ba5SJacek Lawrynowicz { 541263b2ba5SJacek Lawrynowicz drm_WARN_ON(&vdev->drm, !ctx->id); 542263b2ba5SJacek Lawrynowicz 543263b2ba5SJacek Lawrynowicz ivpu_mmu_clear_pgtable(vdev, ctx->id); 544263b2ba5SJacek Lawrynowicz ivpu_mmu_context_fini(vdev, ctx); 545263b2ba5SJacek Lawrynowicz } 546