1263b2ba5SJacek Lawrynowicz // SPDX-License-Identifier: GPL-2.0-only 2263b2ba5SJacek Lawrynowicz /* 3263b2ba5SJacek Lawrynowicz * Copyright (C) 2020-2023 Intel Corporation 4263b2ba5SJacek Lawrynowicz */ 5263b2ba5SJacek Lawrynowicz 6263b2ba5SJacek Lawrynowicz #include <linux/bitfield.h> 7263b2ba5SJacek Lawrynowicz #include <linux/highmem.h> 83bcc5209SKarol Wachowski #include <linux/set_memory.h> 93bcc5209SKarol Wachowski 103bcc5209SKarol Wachowski #include <drm/drm_cache.h> 11263b2ba5SJacek Lawrynowicz 12263b2ba5SJacek Lawrynowicz #include "ivpu_drv.h" 13263b2ba5SJacek Lawrynowicz #include "ivpu_hw.h" 14263b2ba5SJacek Lawrynowicz #include "ivpu_mmu.h" 15263b2ba5SJacek Lawrynowicz #include "ivpu_mmu_context.h" 16263b2ba5SJacek Lawrynowicz 178f5ad367SWludzik, Jozef #define IVPU_MMU_VPU_ADDRESS_MASK GENMASK(47, 12) 18a2fd4a6fSKarol Wachowski #define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39) 19a2fd4a6fSKarol Wachowski #define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30) 20263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21) 21263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12) 2295d44018SKarol Wachowski #define IVPU_MMU_ENTRY_FLAGS_MASK (BIT(52) | GENMASK(11, 0)) 2395d44018SKarol Wachowski #define IVPU_MMU_ENTRY_FLAG_CONT BIT(52) 24263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_NG BIT(11) 25263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_AF BIT(10) 26263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_USER BIT(6) 27263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2) 28263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1) 29263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_VALID BIT(0) 30263b2ba5SJacek Lawrynowicz 31263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PAGE_SIZE SZ_4K 3295d44018SKarol Wachowski #define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16) 33263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE) 34263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE) 35a2fd4a6fSKarol Wachowski #define IVPU_MMU_PUD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE) 36a2fd4a6fSKarol Wachowski #define IVPU_MMU_PGD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE) 37263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64)) 38263b2ba5SJacek Lawrynowicz 39263b2ba5SJacek Lawrynowicz #define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000 40263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID) 41263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK) 42263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \ 43263b2ba5SJacek Lawrynowicz IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID) 44263b2ba5SJacek Lawrynowicz 453bcc5209SKarol Wachowski static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma) 463bcc5209SKarol Wachowski { 473bcc5209SKarol Wachowski dma_addr_t dma_addr; 483bcc5209SKarol Wachowski struct page *page; 493bcc5209SKarol Wachowski void *cpu; 503bcc5209SKarol Wachowski 513bcc5209SKarol Wachowski page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 523bcc5209SKarol Wachowski if (!page) 533bcc5209SKarol Wachowski return NULL; 543bcc5209SKarol Wachowski 553bcc5209SKarol Wachowski set_pages_array_wc(&page, 1); 563bcc5209SKarol Wachowski 573bcc5209SKarol Wachowski dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 583bcc5209SKarol Wachowski if (dma_mapping_error(vdev->drm.dev, dma_addr)) 593bcc5209SKarol Wachowski goto err_free_page; 603bcc5209SKarol Wachowski 613bcc5209SKarol Wachowski cpu = vmap(&page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 623bcc5209SKarol Wachowski if (!cpu) 633bcc5209SKarol Wachowski goto err_dma_unmap_page; 643bcc5209SKarol Wachowski 653bcc5209SKarol Wachowski 663bcc5209SKarol Wachowski *dma = dma_addr; 673bcc5209SKarol Wachowski return cpu; 683bcc5209SKarol Wachowski 693bcc5209SKarol Wachowski err_dma_unmap_page: 703bcc5209SKarol Wachowski dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); 713bcc5209SKarol Wachowski 723bcc5209SKarol Wachowski err_free_page: 733bcc5209SKarol Wachowski put_page(page); 743bcc5209SKarol Wachowski return NULL; 753bcc5209SKarol Wachowski } 763bcc5209SKarol Wachowski 773bcc5209SKarol Wachowski static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr) 783bcc5209SKarol Wachowski { 793bcc5209SKarol Wachowski struct page *page; 803bcc5209SKarol Wachowski 813bcc5209SKarol Wachowski if (cpu_addr) { 823bcc5209SKarol Wachowski page = vmalloc_to_page(cpu_addr); 833bcc5209SKarol Wachowski vunmap(cpu_addr); 843bcc5209SKarol Wachowski dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE, 853bcc5209SKarol Wachowski DMA_BIDIRECTIONAL); 863bcc5209SKarol Wachowski set_pages_array_wb(&page, 1); 873bcc5209SKarol Wachowski put_page(page); 883bcc5209SKarol Wachowski } 893bcc5209SKarol Wachowski } 903bcc5209SKarol Wachowski 91263b2ba5SJacek Lawrynowicz static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) 92263b2ba5SJacek Lawrynowicz { 93263b2ba5SJacek Lawrynowicz dma_addr_t pgd_dma; 94263b2ba5SJacek Lawrynowicz 953bcc5209SKarol Wachowski pgtable->pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma); 96103d2ea1SKarol Wachowski if (!pgtable->pgd_dma_ptr) 97263b2ba5SJacek Lawrynowicz return -ENOMEM; 98263b2ba5SJacek Lawrynowicz 99263b2ba5SJacek Lawrynowicz pgtable->pgd_dma = pgd_dma; 100263b2ba5SJacek Lawrynowicz 101263b2ba5SJacek Lawrynowicz return 0; 102263b2ba5SJacek Lawrynowicz } 103263b2ba5SJacek Lawrynowicz 104103d2ea1SKarol Wachowski static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) 105263b2ba5SJacek Lawrynowicz { 106a2fd4a6fSKarol Wachowski int pgd_idx, pud_idx, pmd_idx; 107103d2ea1SKarol Wachowski dma_addr_t pud_dma, pmd_dma, pte_dma; 108103d2ea1SKarol Wachowski u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr; 109263b2ba5SJacek Lawrynowicz 110a2fd4a6fSKarol Wachowski for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) { 111103d2ea1SKarol Wachowski pud_dma_ptr = pgtable->pud_ptrs[pgd_idx]; 112103d2ea1SKarol Wachowski pud_dma = pgtable->pgd_dma_ptr[pgd_idx]; 113a2fd4a6fSKarol Wachowski 114103d2ea1SKarol Wachowski if (!pud_dma_ptr) 115a2fd4a6fSKarol Wachowski continue; 116a2fd4a6fSKarol Wachowski 117a2fd4a6fSKarol Wachowski for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) { 118103d2ea1SKarol Wachowski pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx]; 119103d2ea1SKarol Wachowski pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx]; 120263b2ba5SJacek Lawrynowicz 121103d2ea1SKarol Wachowski if (!pmd_dma_ptr) 122263b2ba5SJacek Lawrynowicz continue; 123263b2ba5SJacek Lawrynowicz 124a2fd4a6fSKarol Wachowski for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) { 125103d2ea1SKarol Wachowski pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx]; 126103d2ea1SKarol Wachowski pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx]; 127103d2ea1SKarol Wachowski 1283bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pte_dma_ptr, pte_dma); 129263b2ba5SJacek Lawrynowicz } 130263b2ba5SJacek Lawrynowicz 131103d2ea1SKarol Wachowski kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]); 1323bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma); 133a2fd4a6fSKarol Wachowski } 134a2fd4a6fSKarol Wachowski 135103d2ea1SKarol Wachowski kfree(pgtable->pmd_ptrs[pgd_idx]); 136103d2ea1SKarol Wachowski kfree(pgtable->pte_ptrs[pgd_idx]); 1373bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma); 138263b2ba5SJacek Lawrynowicz } 139263b2ba5SJacek Lawrynowicz 1403bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma); 141263b2ba5SJacek Lawrynowicz } 142263b2ba5SJacek Lawrynowicz 143263b2ba5SJacek Lawrynowicz static u64* 144a2fd4a6fSKarol Wachowski ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx) 145a2fd4a6fSKarol Wachowski { 146103d2ea1SKarol Wachowski u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx]; 147a2fd4a6fSKarol Wachowski dma_addr_t pud_dma; 148a2fd4a6fSKarol Wachowski 149103d2ea1SKarol Wachowski if (pud_dma_ptr) 150103d2ea1SKarol Wachowski return pud_dma_ptr; 151a2fd4a6fSKarol Wachowski 1523bcc5209SKarol Wachowski pud_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pud_dma); 153103d2ea1SKarol Wachowski if (!pud_dma_ptr) 154a2fd4a6fSKarol Wachowski return NULL; 155a2fd4a6fSKarol Wachowski 156103d2ea1SKarol Wachowski drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]); 157103d2ea1SKarol Wachowski pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); 158103d2ea1SKarol Wachowski if (!pgtable->pmd_ptrs[pgd_idx]) 159103d2ea1SKarol Wachowski goto err_free_pud_dma_ptr; 160a2fd4a6fSKarol Wachowski 161103d2ea1SKarol Wachowski drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]); 162103d2ea1SKarol Wachowski pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); 163103d2ea1SKarol Wachowski if (!pgtable->pte_ptrs[pgd_idx]) 164103d2ea1SKarol Wachowski goto err_free_pmd_ptrs; 165a2fd4a6fSKarol Wachowski 166103d2ea1SKarol Wachowski pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr; 167103d2ea1SKarol Wachowski pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID; 168a2fd4a6fSKarol Wachowski 169103d2ea1SKarol Wachowski return pud_dma_ptr; 170a2fd4a6fSKarol Wachowski 171103d2ea1SKarol Wachowski err_free_pmd_ptrs: 172103d2ea1SKarol Wachowski kfree(pgtable->pmd_ptrs[pgd_idx]); 173a2fd4a6fSKarol Wachowski 174103d2ea1SKarol Wachowski err_free_pud_dma_ptr: 1753bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma); 176a2fd4a6fSKarol Wachowski return NULL; 177a2fd4a6fSKarol Wachowski } 178a2fd4a6fSKarol Wachowski 179a2fd4a6fSKarol Wachowski static u64* 180103d2ea1SKarol Wachowski ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx, 181103d2ea1SKarol Wachowski int pud_idx) 182263b2ba5SJacek Lawrynowicz { 183103d2ea1SKarol Wachowski u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx]; 184263b2ba5SJacek Lawrynowicz dma_addr_t pmd_dma; 185263b2ba5SJacek Lawrynowicz 186103d2ea1SKarol Wachowski if (pmd_dma_ptr) 187103d2ea1SKarol Wachowski return pmd_dma_ptr; 188263b2ba5SJacek Lawrynowicz 1893bcc5209SKarol Wachowski pmd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pmd_dma); 190103d2ea1SKarol Wachowski if (!pmd_dma_ptr) 191263b2ba5SJacek Lawrynowicz return NULL; 192263b2ba5SJacek Lawrynowicz 193103d2ea1SKarol Wachowski drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]); 194103d2ea1SKarol Wachowski pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); 195103d2ea1SKarol Wachowski if (!pgtable->pte_ptrs[pgd_idx][pud_idx]) 196103d2ea1SKarol Wachowski goto err_free_pmd_dma_ptr; 197263b2ba5SJacek Lawrynowicz 198103d2ea1SKarol Wachowski pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr; 199103d2ea1SKarol Wachowski pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID; 200263b2ba5SJacek Lawrynowicz 201103d2ea1SKarol Wachowski return pmd_dma_ptr; 202263b2ba5SJacek Lawrynowicz 203103d2ea1SKarol Wachowski err_free_pmd_dma_ptr: 2043bcc5209SKarol Wachowski ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma); 205263b2ba5SJacek Lawrynowicz return NULL; 206263b2ba5SJacek Lawrynowicz } 207263b2ba5SJacek Lawrynowicz 208263b2ba5SJacek Lawrynowicz static u64* 209263b2ba5SJacek Lawrynowicz ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, 210a2fd4a6fSKarol Wachowski int pgd_idx, int pud_idx, int pmd_idx) 211263b2ba5SJacek Lawrynowicz { 212103d2ea1SKarol Wachowski u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx]; 213263b2ba5SJacek Lawrynowicz dma_addr_t pte_dma; 214263b2ba5SJacek Lawrynowicz 215103d2ea1SKarol Wachowski if (pte_dma_ptr) 216103d2ea1SKarol Wachowski return pte_dma_ptr; 217263b2ba5SJacek Lawrynowicz 2183bcc5209SKarol Wachowski pte_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pte_dma); 219103d2ea1SKarol Wachowski if (!pte_dma_ptr) 220263b2ba5SJacek Lawrynowicz return NULL; 221263b2ba5SJacek Lawrynowicz 222103d2ea1SKarol Wachowski pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr; 223103d2ea1SKarol Wachowski pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID; 224263b2ba5SJacek Lawrynowicz 225103d2ea1SKarol Wachowski return pte_dma_ptr; 226263b2ba5SJacek Lawrynowicz } 227263b2ba5SJacek Lawrynowicz 228263b2ba5SJacek Lawrynowicz static int 229263b2ba5SJacek Lawrynowicz ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 23095d44018SKarol Wachowski u64 vpu_addr, dma_addr_t dma_addr, u64 prot) 231263b2ba5SJacek Lawrynowicz { 232263b2ba5SJacek Lawrynowicz u64 *pte; 233a2fd4a6fSKarol Wachowski int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); 234a2fd4a6fSKarol Wachowski int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); 235a2fd4a6fSKarol Wachowski int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); 236a2fd4a6fSKarol Wachowski int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); 237a2fd4a6fSKarol Wachowski 238103d2ea1SKarol Wachowski /* Allocate PUD - second level page table if needed */ 239a2fd4a6fSKarol Wachowski if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx)) 240a2fd4a6fSKarol Wachowski return -ENOMEM; 241263b2ba5SJacek Lawrynowicz 242103d2ea1SKarol Wachowski /* Allocate PMD - third level page table if needed */ 243a2fd4a6fSKarol Wachowski if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx)) 244263b2ba5SJacek Lawrynowicz return -ENOMEM; 245263b2ba5SJacek Lawrynowicz 246103d2ea1SKarol Wachowski /* Allocate PTE - fourth level page table if needed */ 247a2fd4a6fSKarol Wachowski pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx); 248263b2ba5SJacek Lawrynowicz if (!pte) 249263b2ba5SJacek Lawrynowicz return -ENOMEM; 250263b2ba5SJacek Lawrynowicz 251103d2ea1SKarol Wachowski /* Update PTE */ 252a2fd4a6fSKarol Wachowski pte[pte_idx] = dma_addr | prot; 253263b2ba5SJacek Lawrynowicz 254263b2ba5SJacek Lawrynowicz return 0; 255263b2ba5SJacek Lawrynowicz } 256263b2ba5SJacek Lawrynowicz 25795d44018SKarol Wachowski static int 25895d44018SKarol Wachowski ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, 25995d44018SKarol Wachowski dma_addr_t dma_addr, u64 prot) 26095d44018SKarol Wachowski { 26195d44018SKarol Wachowski size_t size = IVPU_MMU_CONT_PAGES_SIZE; 26295d44018SKarol Wachowski 26395d44018SKarol Wachowski drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size)); 26495d44018SKarol Wachowski drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size)); 26595d44018SKarol Wachowski 26695d44018SKarol Wachowski prot |= IVPU_MMU_ENTRY_FLAG_CONT; 26795d44018SKarol Wachowski 26895d44018SKarol Wachowski while (size) { 26995d44018SKarol Wachowski int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); 27095d44018SKarol Wachowski 27195d44018SKarol Wachowski if (ret) 27295d44018SKarol Wachowski return ret; 27395d44018SKarol Wachowski 27495d44018SKarol Wachowski size -= IVPU_MMU_PAGE_SIZE; 27595d44018SKarol Wachowski vpu_addr += IVPU_MMU_PAGE_SIZE; 27695d44018SKarol Wachowski dma_addr += IVPU_MMU_PAGE_SIZE; 27795d44018SKarol Wachowski } 27895d44018SKarol Wachowski 27995d44018SKarol Wachowski return 0; 28095d44018SKarol Wachowski } 28195d44018SKarol Wachowski 282263b2ba5SJacek Lawrynowicz static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr) 283263b2ba5SJacek Lawrynowicz { 284a2fd4a6fSKarol Wachowski int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); 285a2fd4a6fSKarol Wachowski int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); 286a2fd4a6fSKarol Wachowski int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); 287a2fd4a6fSKarol Wachowski int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); 288263b2ba5SJacek Lawrynowicz 289263b2ba5SJacek Lawrynowicz /* Update PTE with dummy physical address and clear flags */ 290103d2ea1SKarol Wachowski ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID; 291263b2ba5SJacek Lawrynowicz } 292263b2ba5SJacek Lawrynowicz 293263b2ba5SJacek Lawrynowicz static int 294263b2ba5SJacek Lawrynowicz ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 29595d44018SKarol Wachowski u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot) 296263b2ba5SJacek Lawrynowicz { 29795d44018SKarol Wachowski int map_size; 29895d44018SKarol Wachowski int ret; 29995d44018SKarol Wachowski 300263b2ba5SJacek Lawrynowicz while (size) { 30195d44018SKarol Wachowski if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE && 30295d44018SKarol Wachowski IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) { 30395d44018SKarol Wachowski ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot); 30495d44018SKarol Wachowski map_size = IVPU_MMU_CONT_PAGES_SIZE; 30595d44018SKarol Wachowski } else { 30695d44018SKarol Wachowski ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); 30795d44018SKarol Wachowski map_size = IVPU_MMU_PAGE_SIZE; 30895d44018SKarol Wachowski } 309263b2ba5SJacek Lawrynowicz 310263b2ba5SJacek Lawrynowicz if (ret) 311263b2ba5SJacek Lawrynowicz return ret; 312263b2ba5SJacek Lawrynowicz 31395d44018SKarol Wachowski vpu_addr += map_size; 31495d44018SKarol Wachowski dma_addr += map_size; 31595d44018SKarol Wachowski size -= map_size; 316263b2ba5SJacek Lawrynowicz } 317263b2ba5SJacek Lawrynowicz 318263b2ba5SJacek Lawrynowicz return 0; 319263b2ba5SJacek Lawrynowicz } 320263b2ba5SJacek Lawrynowicz 321263b2ba5SJacek Lawrynowicz static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size) 322263b2ba5SJacek Lawrynowicz { 323263b2ba5SJacek Lawrynowicz while (size) { 324263b2ba5SJacek Lawrynowicz ivpu_mmu_context_unmap_page(ctx, vpu_addr); 325263b2ba5SJacek Lawrynowicz vpu_addr += IVPU_MMU_PAGE_SIZE; 326263b2ba5SJacek Lawrynowicz size -= IVPU_MMU_PAGE_SIZE; 327263b2ba5SJacek Lawrynowicz } 328263b2ba5SJacek Lawrynowicz } 329263b2ba5SJacek Lawrynowicz 330263b2ba5SJacek Lawrynowicz int 331263b2ba5SJacek Lawrynowicz ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 332263b2ba5SJacek Lawrynowicz u64 vpu_addr, struct sg_table *sgt, bool llc_coherent) 333263b2ba5SJacek Lawrynowicz { 334263b2ba5SJacek Lawrynowicz struct scatterlist *sg; 335263b2ba5SJacek Lawrynowicz int ret; 33695d44018SKarol Wachowski u64 prot; 337263b2ba5SJacek Lawrynowicz u64 i; 338263b2ba5SJacek Lawrynowicz 33948aea7f2SJacek Lawrynowicz if (drm_WARN_ON(&vdev->drm, !ctx)) 34048aea7f2SJacek Lawrynowicz return -EINVAL; 34148aea7f2SJacek Lawrynowicz 342263b2ba5SJacek Lawrynowicz if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE)) 343263b2ba5SJacek Lawrynowicz return -EINVAL; 3448f5ad367SWludzik, Jozef 3458f5ad367SWludzik, Jozef if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK) 346263b2ba5SJacek Lawrynowicz return -EINVAL; 347263b2ba5SJacek Lawrynowicz 348263b2ba5SJacek Lawrynowicz prot = IVPU_MMU_ENTRY_MAPPED; 349263b2ba5SJacek Lawrynowicz if (llc_coherent) 350263b2ba5SJacek Lawrynowicz prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT; 351263b2ba5SJacek Lawrynowicz 352263b2ba5SJacek Lawrynowicz mutex_lock(&ctx->lock); 353263b2ba5SJacek Lawrynowicz 354263b2ba5SJacek Lawrynowicz for_each_sgtable_dma_sg(sgt, sg, i) { 355103d2ea1SKarol Wachowski dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset; 356263b2ba5SJacek Lawrynowicz size_t size = sg_dma_len(sg) + sg->offset; 357263b2ba5SJacek Lawrynowicz 358*8047d36fSWachowski, Karol ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n", 359*8047d36fSWachowski, Karol ctx->id, dma_addr, vpu_addr, size); 360*8047d36fSWachowski, Karol 361263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot); 362263b2ba5SJacek Lawrynowicz if (ret) { 363263b2ba5SJacek Lawrynowicz ivpu_err(vdev, "Failed to map context pages\n"); 364263b2ba5SJacek Lawrynowicz mutex_unlock(&ctx->lock); 365263b2ba5SJacek Lawrynowicz return ret; 366263b2ba5SJacek Lawrynowicz } 367263b2ba5SJacek Lawrynowicz vpu_addr += size; 368263b2ba5SJacek Lawrynowicz } 369263b2ba5SJacek Lawrynowicz 3703bcc5209SKarol Wachowski /* Ensure page table modifications are flushed from wc buffers to memory */ 3713bcc5209SKarol Wachowski wmb(); 372*8047d36fSWachowski, Karol 373263b2ba5SJacek Lawrynowicz mutex_unlock(&ctx->lock); 374263b2ba5SJacek Lawrynowicz 375263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); 376263b2ba5SJacek Lawrynowicz if (ret) 377263b2ba5SJacek Lawrynowicz ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); 378263b2ba5SJacek Lawrynowicz return ret; 379263b2ba5SJacek Lawrynowicz } 380263b2ba5SJacek Lawrynowicz 381263b2ba5SJacek Lawrynowicz void 382263b2ba5SJacek Lawrynowicz ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 383263b2ba5SJacek Lawrynowicz u64 vpu_addr, struct sg_table *sgt) 384263b2ba5SJacek Lawrynowicz { 385263b2ba5SJacek Lawrynowicz struct scatterlist *sg; 386263b2ba5SJacek Lawrynowicz int ret; 387263b2ba5SJacek Lawrynowicz u64 i; 388263b2ba5SJacek Lawrynowicz 38948aea7f2SJacek Lawrynowicz if (drm_WARN_ON(&vdev->drm, !ctx)) 39048aea7f2SJacek Lawrynowicz return; 391263b2ba5SJacek Lawrynowicz 392263b2ba5SJacek Lawrynowicz mutex_lock(&ctx->lock); 393263b2ba5SJacek Lawrynowicz 394263b2ba5SJacek Lawrynowicz for_each_sgtable_dma_sg(sgt, sg, i) { 395*8047d36fSWachowski, Karol dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset; 396263b2ba5SJacek Lawrynowicz size_t size = sg_dma_len(sg) + sg->offset; 397263b2ba5SJacek Lawrynowicz 398*8047d36fSWachowski, Karol ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n", 399*8047d36fSWachowski, Karol ctx->id, dma_addr, vpu_addr, size); 400*8047d36fSWachowski, Karol 401263b2ba5SJacek Lawrynowicz ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size); 402263b2ba5SJacek Lawrynowicz vpu_addr += size; 403263b2ba5SJacek Lawrynowicz } 404263b2ba5SJacek Lawrynowicz 4053bcc5209SKarol Wachowski /* Ensure page table modifications are flushed from wc buffers to memory */ 4063bcc5209SKarol Wachowski wmb(); 407*8047d36fSWachowski, Karol 408263b2ba5SJacek Lawrynowicz mutex_unlock(&ctx->lock); 409263b2ba5SJacek Lawrynowicz 410263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); 411263b2ba5SJacek Lawrynowicz if (ret) 412263b2ba5SJacek Lawrynowicz ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); 413263b2ba5SJacek Lawrynowicz } 414263b2ba5SJacek Lawrynowicz 415263b2ba5SJacek Lawrynowicz int 41648aea7f2SJacek Lawrynowicz ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range, 417263b2ba5SJacek Lawrynowicz u64 size, struct drm_mm_node *node) 418263b2ba5SJacek Lawrynowicz { 41948aea7f2SJacek Lawrynowicz int ret; 420263b2ba5SJacek Lawrynowicz 421b0352241SJacek Lawrynowicz WARN_ON(!range); 422b0352241SJacek Lawrynowicz 42348aea7f2SJacek Lawrynowicz mutex_lock(&ctx->lock); 42495d44018SKarol Wachowski if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) { 42548aea7f2SJacek Lawrynowicz ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0, 42648aea7f2SJacek Lawrynowicz range->start, range->end, DRM_MM_INSERT_BEST); 42748aea7f2SJacek Lawrynowicz if (!ret) 42848aea7f2SJacek Lawrynowicz goto unlock; 42995d44018SKarol Wachowski } 43095d44018SKarol Wachowski 43148aea7f2SJacek Lawrynowicz ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0, 43295d44018SKarol Wachowski range->start, range->end, DRM_MM_INSERT_BEST); 43348aea7f2SJacek Lawrynowicz unlock: 43448aea7f2SJacek Lawrynowicz mutex_unlock(&ctx->lock); 43548aea7f2SJacek Lawrynowicz return ret; 436263b2ba5SJacek Lawrynowicz } 437263b2ba5SJacek Lawrynowicz 438263b2ba5SJacek Lawrynowicz void 43948aea7f2SJacek Lawrynowicz ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node) 440263b2ba5SJacek Lawrynowicz { 44148aea7f2SJacek Lawrynowicz mutex_lock(&ctx->lock); 442263b2ba5SJacek Lawrynowicz drm_mm_remove_node(node); 44348aea7f2SJacek Lawrynowicz mutex_unlock(&ctx->lock); 444263b2ba5SJacek Lawrynowicz } 445263b2ba5SJacek Lawrynowicz 446263b2ba5SJacek Lawrynowicz static int 447263b2ba5SJacek Lawrynowicz ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id) 448263b2ba5SJacek Lawrynowicz { 449263b2ba5SJacek Lawrynowicz u64 start, end; 450263b2ba5SJacek Lawrynowicz int ret; 451263b2ba5SJacek Lawrynowicz 452263b2ba5SJacek Lawrynowicz mutex_init(&ctx->lock); 453263b2ba5SJacek Lawrynowicz 454263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable); 4550a9cd792SJacek Lawrynowicz if (ret) { 4560a9cd792SJacek Lawrynowicz ivpu_err(vdev, "Failed to initialize pgtable for ctx %u: %d\n", context_id, ret); 457263b2ba5SJacek Lawrynowicz return ret; 4580a9cd792SJacek Lawrynowicz } 459263b2ba5SJacek Lawrynowicz 460263b2ba5SJacek Lawrynowicz if (!context_id) { 461162f17b2SKarol Wachowski start = vdev->hw->ranges.global.start; 462162f17b2SKarol Wachowski end = vdev->hw->ranges.shave.end; 463263b2ba5SJacek Lawrynowicz } else { 464162f17b2SKarol Wachowski start = vdev->hw->ranges.user.start; 465162f17b2SKarol Wachowski end = vdev->hw->ranges.dma.end; 466263b2ba5SJacek Lawrynowicz } 467263b2ba5SJacek Lawrynowicz 468263b2ba5SJacek Lawrynowicz drm_mm_init(&ctx->mm, start, end - start); 469263b2ba5SJacek Lawrynowicz ctx->id = context_id; 470263b2ba5SJacek Lawrynowicz 471263b2ba5SJacek Lawrynowicz return 0; 472263b2ba5SJacek Lawrynowicz } 473263b2ba5SJacek Lawrynowicz 474263b2ba5SJacek Lawrynowicz static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) 475263b2ba5SJacek Lawrynowicz { 476103d2ea1SKarol Wachowski if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr)) 477103d2ea1SKarol Wachowski return; 478263b2ba5SJacek Lawrynowicz 479263b2ba5SJacek Lawrynowicz mutex_destroy(&ctx->lock); 480103d2ea1SKarol Wachowski ivpu_mmu_pgtables_free(vdev, &ctx->pgtable); 481263b2ba5SJacek Lawrynowicz drm_mm_takedown(&ctx->mm); 482103d2ea1SKarol Wachowski 483103d2ea1SKarol Wachowski ctx->pgtable.pgd_dma_ptr = NULL; 484103d2ea1SKarol Wachowski ctx->pgtable.pgd_dma = 0; 485263b2ba5SJacek Lawrynowicz } 486263b2ba5SJacek Lawrynowicz 487263b2ba5SJacek Lawrynowicz int ivpu_mmu_global_context_init(struct ivpu_device *vdev) 488263b2ba5SJacek Lawrynowicz { 489263b2ba5SJacek Lawrynowicz return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID); 490263b2ba5SJacek Lawrynowicz } 491263b2ba5SJacek Lawrynowicz 492263b2ba5SJacek Lawrynowicz void ivpu_mmu_global_context_fini(struct ivpu_device *vdev) 493263b2ba5SJacek Lawrynowicz { 494263b2ba5SJacek Lawrynowicz return ivpu_mmu_context_fini(vdev, &vdev->gctx); 495263b2ba5SJacek Lawrynowicz } 496263b2ba5SJacek Lawrynowicz 49734d03f2aSKarol Wachowski int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev) 49834d03f2aSKarol Wachowski { 49934d03f2aSKarol Wachowski return ivpu_mmu_user_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID); 50034d03f2aSKarol Wachowski } 50134d03f2aSKarol Wachowski 50234d03f2aSKarol Wachowski void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev) 50334d03f2aSKarol Wachowski { 50434d03f2aSKarol Wachowski return ivpu_mmu_user_context_fini(vdev, &vdev->rctx); 50534d03f2aSKarol Wachowski } 50634d03f2aSKarol Wachowski 507263b2ba5SJacek Lawrynowicz void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid) 508263b2ba5SJacek Lawrynowicz { 509263b2ba5SJacek Lawrynowicz struct ivpu_file_priv *file_priv; 510263b2ba5SJacek Lawrynowicz 511263b2ba5SJacek Lawrynowicz xa_lock(&vdev->context_xa); 512263b2ba5SJacek Lawrynowicz 513263b2ba5SJacek Lawrynowicz file_priv = xa_load(&vdev->context_xa, ssid); 514263b2ba5SJacek Lawrynowicz if (file_priv) 515263b2ba5SJacek Lawrynowicz file_priv->has_mmu_faults = true; 516263b2ba5SJacek Lawrynowicz 517263b2ba5SJacek Lawrynowicz xa_unlock(&vdev->context_xa); 518263b2ba5SJacek Lawrynowicz } 519263b2ba5SJacek Lawrynowicz 520263b2ba5SJacek Lawrynowicz int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id) 521263b2ba5SJacek Lawrynowicz { 522263b2ba5SJacek Lawrynowicz int ret; 523263b2ba5SJacek Lawrynowicz 524263b2ba5SJacek Lawrynowicz drm_WARN_ON(&vdev->drm, !ctx_id); 525263b2ba5SJacek Lawrynowicz 526263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_context_init(vdev, ctx, ctx_id); 527263b2ba5SJacek Lawrynowicz if (ret) { 528edee62c0SStanislaw Gruszka ivpu_err(vdev, "Failed to initialize context %u: %d\n", ctx_id, ret); 529263b2ba5SJacek Lawrynowicz return ret; 530263b2ba5SJacek Lawrynowicz } 531263b2ba5SJacek Lawrynowicz 532263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable); 533263b2ba5SJacek Lawrynowicz if (ret) { 534edee62c0SStanislaw Gruszka ivpu_err(vdev, "Failed to set page table for context %u: %d\n", ctx_id, ret); 535263b2ba5SJacek Lawrynowicz goto err_context_fini; 536263b2ba5SJacek Lawrynowicz } 537263b2ba5SJacek Lawrynowicz 538263b2ba5SJacek Lawrynowicz return 0; 539263b2ba5SJacek Lawrynowicz 540263b2ba5SJacek Lawrynowicz err_context_fini: 541263b2ba5SJacek Lawrynowicz ivpu_mmu_context_fini(vdev, ctx); 542263b2ba5SJacek Lawrynowicz return ret; 543263b2ba5SJacek Lawrynowicz } 544263b2ba5SJacek Lawrynowicz 545263b2ba5SJacek Lawrynowicz void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) 546263b2ba5SJacek Lawrynowicz { 547263b2ba5SJacek Lawrynowicz drm_WARN_ON(&vdev->drm, !ctx->id); 548263b2ba5SJacek Lawrynowicz 549263b2ba5SJacek Lawrynowicz ivpu_mmu_clear_pgtable(vdev, ctx->id); 550263b2ba5SJacek Lawrynowicz ivpu_mmu_context_fini(vdev, ctx); 551263b2ba5SJacek Lawrynowicz } 552