xref: /linux/arch/arm64/mm/mmap.c (revision ebc733e54a1a79ea2dde2ba5121ae73a188e20d4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/mm/mmap.c
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 
8 #include <linux/io.h>
9 #include <linux/memblock.h>
10 #include <linux/mm.h>
11 #include <linux/types.h>
12 
13 #include <asm/cpufeature.h>
14 #include <asm/page.h>
15 
16 static pgprot_t protection_map[16] __ro_after_init = {
17 	[VM_NONE]					= PAGE_NONE,
18 	[VM_READ]					= PAGE_READONLY,
19 	[VM_WRITE]					= PAGE_READONLY,
20 	[VM_WRITE | VM_READ]				= PAGE_READONLY,
21 	/* PAGE_EXECONLY if Enhanced PAN */
22 	[VM_EXEC]					= PAGE_READONLY_EXEC,
23 	[VM_EXEC | VM_READ]				= PAGE_READONLY_EXEC,
24 	[VM_EXEC | VM_WRITE]				= PAGE_READONLY_EXEC,
25 	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_READONLY_EXEC,
26 	[VM_SHARED]					= PAGE_NONE,
27 	[VM_SHARED | VM_READ]				= PAGE_READONLY,
28 	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
29 	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
30 	/* PAGE_EXECONLY if Enhanced PAN */
31 	[VM_SHARED | VM_EXEC]				= PAGE_READONLY_EXEC,
32 	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_EXEC,
33 	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_EXEC,
34 	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_EXEC
35 };
36 
37 /*
38  * You really shouldn't be using read() or write() on /dev/mem.  This might go
39  * away in the future.
40  */
41 int valid_phys_addr_range(phys_addr_t addr, size_t size)
42 {
43 	/*
44 	 * Check whether addr is covered by a memory region without the
45 	 * MEMBLOCK_NOMAP attribute, and whether that region covers the
46 	 * entire range. In theory, this could lead to false negatives
47 	 * if the range is covered by distinct but adjacent memory regions
48 	 * that only differ in other attributes. However, few of such
49 	 * attributes have been defined, and it is debatable whether it
50 	 * follows that /dev/mem read() calls should be able traverse
51 	 * such boundaries.
52 	 */
53 	return memblock_is_region_memory(addr, size) &&
54 	       memblock_is_map_memory(addr);
55 }
56 
57 /*
58  * Do not allow /dev/mem mappings beyond the supported physical range.
59  */
60 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
61 {
62 	return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
63 }
64 
65 static int __init adjust_protection_map(void)
66 {
67 	/*
68 	 * With Enhanced PAN we can honour the execute-only permissions as
69 	 * there is no PAN override with such mappings.
70 	 */
71 	if (cpus_have_cap(ARM64_HAS_EPAN)) {
72 		protection_map[VM_EXEC] = PAGE_EXECONLY;
73 		protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY;
74 	}
75 
76 	return 0;
77 }
78 arch_initcall(adjust_protection_map);
79 
80 pgprot_t vm_get_page_prot(unsigned long vm_flags)
81 {
82 	pteval_t prot = pgprot_val(protection_map[vm_flags &
83 				   (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
84 
85 	if (vm_flags & VM_ARM64_BTI)
86 		prot |= PTE_GP;
87 
88 	/*
89 	 * There are two conditions required for returning a Normal Tagged
90 	 * memory type: (1) the user requested it via PROT_MTE passed to
91 	 * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
92 	 * register (1) as VM_MTE in the vma->vm_flags and (2) as
93 	 * VM_MTE_ALLOWED. Note that the latter can only be set during the
94 	 * mmap() call since mprotect() does not accept MAP_* flags.
95 	 * Checking for VM_MTE only is sufficient since arch_validate_flags()
96 	 * does not permit (VM_MTE & !VM_MTE_ALLOWED).
97 	 */
98 	if (vm_flags & VM_MTE)
99 		prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
100 
101 	return __pgprot(prot);
102 }
103 EXPORT_SYMBOL(vm_get_page_prot);
104