1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/mm/mmap.c 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 */ 7 8 #include <linux/io.h> 9 #include <linux/memblock.h> 10 #include <linux/mm.h> 11 #include <linux/types.h> 12 13 #include <asm/cpufeature.h> 14 #include <asm/page.h> 15 16 static pgprot_t protection_map[16] __ro_after_init = { 17 [VM_NONE] = PAGE_NONE, 18 [VM_READ] = PAGE_READONLY, 19 [VM_WRITE] = PAGE_READONLY, 20 [VM_WRITE | VM_READ] = PAGE_READONLY, 21 /* PAGE_EXECONLY if Enhanced PAN */ 22 [VM_EXEC] = PAGE_READONLY_EXEC, 23 [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, 24 [VM_EXEC | VM_WRITE] = PAGE_READONLY_EXEC, 25 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READONLY_EXEC, 26 [VM_SHARED] = PAGE_NONE, 27 [VM_SHARED | VM_READ] = PAGE_READONLY, 28 [VM_SHARED | VM_WRITE] = PAGE_SHARED, 29 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, 30 /* PAGE_EXECONLY if Enhanced PAN */ 31 [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC, 32 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, 33 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC, 34 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC 35 }; 36 37 /* 38 * You really shouldn't be using read() or write() on /dev/mem. This might go 39 * away in the future. 40 */ 41 int valid_phys_addr_range(phys_addr_t addr, size_t size) 42 { 43 /* 44 * Check whether addr is covered by a memory region without the 45 * MEMBLOCK_NOMAP attribute, and whether that region covers the 46 * entire range. In theory, this could lead to false negatives 47 * if the range is covered by distinct but adjacent memory regions 48 * that only differ in other attributes. However, few of such 49 * attributes have been defined, and it is debatable whether it 50 * follows that /dev/mem read() calls should be able traverse 51 * such boundaries. 52 */ 53 return memblock_is_region_memory(addr, size) && 54 memblock_is_map_memory(addr); 55 } 56 57 /* 58 * Do not allow /dev/mem mappings beyond the supported physical range. 59 */ 60 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) 61 { 62 return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK); 63 } 64 65 static int __init adjust_protection_map(void) 66 { 67 /* 68 * With Enhanced PAN we can honour the execute-only permissions as 69 * there is no PAN override with such mappings. 70 */ 71 if (cpus_have_cap(ARM64_HAS_EPAN)) { 72 protection_map[VM_EXEC] = PAGE_EXECONLY; 73 protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY; 74 } 75 76 if (lpa2_is_enabled()) 77 for (int i = 0; i < ARRAY_SIZE(protection_map); i++) 78 pgprot_val(protection_map[i]) &= ~PTE_SHARED; 79 80 return 0; 81 } 82 arch_initcall(adjust_protection_map); 83 84 pgprot_t vm_get_page_prot(unsigned long vm_flags) 85 { 86 pteval_t prot; 87 88 /* Short circuit GCS to avoid bloating the table. */ 89 if (system_supports_gcs() && (vm_flags & VM_SHADOW_STACK)) { 90 prot = _PAGE_GCS_RO; 91 } else { 92 prot = pgprot_val(protection_map[vm_flags & 93 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]); 94 } 95 96 if (vm_flags & VM_ARM64_BTI) 97 prot |= PTE_GP; 98 99 /* 100 * There are two conditions required for returning a Normal Tagged 101 * memory type: (1) the user requested it via PROT_MTE passed to 102 * mmap() or mprotect() and (2) the corresponding vma supports MTE. We 103 * register (1) as VM_MTE in the vma->vm_flags and (2) as 104 * VM_MTE_ALLOWED. Note that the latter can only be set during the 105 * mmap() call since mprotect() does not accept MAP_* flags. 106 * Checking for VM_MTE only is sufficient since arch_validate_flags() 107 * does not permit (VM_MTE & !VM_MTE_ALLOWED). 108 */ 109 if (vm_flags & VM_MTE) 110 prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); 111 112 #ifdef CONFIG_ARCH_HAS_PKEYS 113 if (system_supports_poe()) { 114 if (vm_flags & VM_PKEY_BIT0) 115 prot |= PTE_PO_IDX_0; 116 if (vm_flags & VM_PKEY_BIT1) 117 prot |= PTE_PO_IDX_1; 118 if (vm_flags & VM_PKEY_BIT2) 119 prot |= PTE_PO_IDX_2; 120 } 121 #endif 122 123 return __pgprot(prot); 124 } 125 EXPORT_SYMBOL(vm_get_page_prot); 126