197a6f43bSArd Biesheuvel // SPDX-License-Identifier: GPL-2.0-only 297a6f43bSArd Biesheuvel // Copyright 2023 Google LLC 397a6f43bSArd Biesheuvel // Author: Ard Biesheuvel <ardb@google.com> 497a6f43bSArd Biesheuvel 597a6f43bSArd Biesheuvel #include <linux/types.h> 697a6f43bSArd Biesheuvel #include <linux/sizes.h> 797a6f43bSArd Biesheuvel 897a6f43bSArd Biesheuvel #include <asm/memory.h> 997a6f43bSArd Biesheuvel #include <asm/pgalloc.h> 1097a6f43bSArd Biesheuvel #include <asm/pgtable.h> 1197a6f43bSArd Biesheuvel 1297a6f43bSArd Biesheuvel #include "pi.h" 1397a6f43bSArd Biesheuvel 1497a6f43bSArd Biesheuvel /** 1597a6f43bSArd Biesheuvel * map_range - Map a contiguous range of physical pages into virtual memory 1697a6f43bSArd Biesheuvel * 1797a6f43bSArd Biesheuvel * @pte: Address of physical pointer to array of pages to 1897a6f43bSArd Biesheuvel * allocate page tables from 1997a6f43bSArd Biesheuvel * @start: Virtual address of the start of the range 2097a6f43bSArd Biesheuvel * @end: Virtual address of the end of the range (exclusive) 2197a6f43bSArd Biesheuvel * @pa: Physical address of the start of the range 2297a6f43bSArd Biesheuvel * @prot: Access permissions of the range 2397a6f43bSArd Biesheuvel * @level: Translation level for the mapping 2497a6f43bSArd Biesheuvel * @tbl: The level @level page table to create the mappings in 2597a6f43bSArd Biesheuvel * @may_use_cont: Whether the use of the contiguous attribute is allowed 2697a6f43bSArd Biesheuvel * @va_offset: Offset between a physical page and its current mapping 2797a6f43bSArd Biesheuvel * in the VA space 2897a6f43bSArd Biesheuvel */ 2997a6f43bSArd Biesheuvel void __init map_range(u64 *pte, u64 start, u64 end, u64 pa, pgprot_t prot, 3097a6f43bSArd Biesheuvel int level, pte_t *tbl, bool may_use_cont, u64 va_offset) 3197a6f43bSArd Biesheuvel { 3297a6f43bSArd Biesheuvel u64 cmask = (level == 3) ? CONT_PTE_SIZE - 1 : U64_MAX; 3397a6f43bSArd Biesheuvel u64 protval = pgprot_val(prot) & ~PTE_TYPE_MASK; 3497a6f43bSArd Biesheuvel int lshift = (3 - level) * (PAGE_SHIFT - 3); 3597a6f43bSArd Biesheuvel u64 lmask = (PAGE_SIZE << lshift) - 1; 3697a6f43bSArd Biesheuvel 3797a6f43bSArd Biesheuvel start &= PAGE_MASK; 3897a6f43bSArd Biesheuvel pa &= PAGE_MASK; 3997a6f43bSArd Biesheuvel 4097a6f43bSArd Biesheuvel /* Advance tbl to the entry that covers start */ 4197a6f43bSArd Biesheuvel tbl += (start >> (lshift + PAGE_SHIFT)) % PTRS_PER_PTE; 4297a6f43bSArd Biesheuvel 4397a6f43bSArd Biesheuvel /* 4497a6f43bSArd Biesheuvel * Set the right block/page bits for this level unless we are 4597a6f43bSArd Biesheuvel * clearing the mapping 4697a6f43bSArd Biesheuvel */ 4797a6f43bSArd Biesheuvel if (protval) 4897a6f43bSArd Biesheuvel protval |= (level < 3) ? PMD_TYPE_SECT : PTE_TYPE_PAGE; 4997a6f43bSArd Biesheuvel 5097a6f43bSArd Biesheuvel while (start < end) { 5197a6f43bSArd Biesheuvel u64 next = min((start | lmask) + 1, PAGE_ALIGN(end)); 5297a6f43bSArd Biesheuvel 5397a6f43bSArd Biesheuvel if (level < 3 && (start | next | pa) & lmask) { 5497a6f43bSArd Biesheuvel /* 5597a6f43bSArd Biesheuvel * This chunk needs a finer grained mapping. Create a 5697a6f43bSArd Biesheuvel * table mapping if necessary and recurse. 5797a6f43bSArd Biesheuvel */ 5897a6f43bSArd Biesheuvel if (pte_none(*tbl)) { 5997a6f43bSArd Biesheuvel *tbl = __pte(__phys_to_pte_val(*pte) | 6097a6f43bSArd Biesheuvel PMD_TYPE_TABLE | PMD_TABLE_UXN); 6197a6f43bSArd Biesheuvel *pte += PTRS_PER_PTE * sizeof(pte_t); 6297a6f43bSArd Biesheuvel } 6397a6f43bSArd Biesheuvel map_range(pte, start, next, pa, prot, level + 1, 6497a6f43bSArd Biesheuvel (pte_t *)(__pte_to_phys(*tbl) + va_offset), 6597a6f43bSArd Biesheuvel may_use_cont, va_offset); 6697a6f43bSArd Biesheuvel } else { 6797a6f43bSArd Biesheuvel /* 6897a6f43bSArd Biesheuvel * Start a contiguous range if start and pa are 6997a6f43bSArd Biesheuvel * suitably aligned 7097a6f43bSArd Biesheuvel */ 7197a6f43bSArd Biesheuvel if (((start | pa) & cmask) == 0 && may_use_cont) 7297a6f43bSArd Biesheuvel protval |= PTE_CONT; 7397a6f43bSArd Biesheuvel 7497a6f43bSArd Biesheuvel /* 7597a6f43bSArd Biesheuvel * Clear the contiguous attribute if the remaining 7697a6f43bSArd Biesheuvel * range does not cover a contiguous block 7797a6f43bSArd Biesheuvel */ 7897a6f43bSArd Biesheuvel if ((end & ~cmask) <= start) 7997a6f43bSArd Biesheuvel protval &= ~PTE_CONT; 8097a6f43bSArd Biesheuvel 8197a6f43bSArd Biesheuvel /* Put down a block or page mapping */ 8297a6f43bSArd Biesheuvel *tbl = __pte(__phys_to_pte_val(pa) | protval); 8397a6f43bSArd Biesheuvel } 8497a6f43bSArd Biesheuvel pa += next - start; 8597a6f43bSArd Biesheuvel start = next; 8697a6f43bSArd Biesheuvel tbl++; 8797a6f43bSArd Biesheuvel } 8897a6f43bSArd Biesheuvel } 89*84b04d3eSArd Biesheuvel 90*84b04d3eSArd Biesheuvel asmlinkage u64 __init create_init_idmap(pgd_t *pg_dir) 91*84b04d3eSArd Biesheuvel { 92*84b04d3eSArd Biesheuvel u64 ptep = (u64)pg_dir + PAGE_SIZE; 93*84b04d3eSArd Biesheuvel 94*84b04d3eSArd Biesheuvel map_range(&ptep, (u64)_stext, (u64)__initdata_begin, (u64)_stext, 95*84b04d3eSArd Biesheuvel PAGE_KERNEL_ROX, IDMAP_ROOT_LEVEL, (pte_t *)pg_dir, false, 0); 96*84b04d3eSArd Biesheuvel map_range(&ptep, (u64)__initdata_begin, (u64)_end, (u64)__initdata_begin, 97*84b04d3eSArd Biesheuvel PAGE_KERNEL, IDMAP_ROOT_LEVEL, (pte_t *)pg_dir, false, 0); 98*84b04d3eSArd Biesheuvel 99*84b04d3eSArd Biesheuvel return ptep; 100*84b04d3eSArd Biesheuvel } 101