xref: /linux/arch/arm64/include/asm/mman.h (revision ec8a42e7343234802b9054874fe01810880289ce)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_MMAN_H__
3 #define __ASM_MMAN_H__
4 
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7 #include <uapi/asm/mman.h>
8 
9 static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
10 	unsigned long pkey __always_unused)
11 {
12 	unsigned long ret = 0;
13 
14 	if (system_supports_bti() && (prot & PROT_BTI))
15 		ret |= VM_ARM64_BTI;
16 
17 	if (system_supports_mte() && (prot & PROT_MTE))
18 		ret |= VM_MTE;
19 
20 	return ret;
21 }
22 #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
23 
24 static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
25 {
26 	/*
27 	 * Only allow MTE on anonymous mappings as these are guaranteed to be
28 	 * backed by tags-capable memory. The vm_flags may be overridden by a
29 	 * filesystem supporting MTE (RAM-based).
30 	 */
31 	if (system_supports_mte() && (flags & MAP_ANONYMOUS))
32 		return VM_MTE_ALLOWED;
33 
34 	return 0;
35 }
36 #define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
37 
38 static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
39 {
40 	pteval_t prot = 0;
41 
42 	if (vm_flags & VM_ARM64_BTI)
43 		prot |= PTE_GP;
44 
45 	/*
46 	 * There are two conditions required for returning a Normal Tagged
47 	 * memory type: (1) the user requested it via PROT_MTE passed to
48 	 * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
49 	 * register (1) as VM_MTE in the vma->vm_flags and (2) as
50 	 * VM_MTE_ALLOWED. Note that the latter can only be set during the
51 	 * mmap() call since mprotect() does not accept MAP_* flags.
52 	 * Checking for VM_MTE only is sufficient since arch_validate_flags()
53 	 * does not permit (VM_MTE & !VM_MTE_ALLOWED).
54 	 */
55 	if (vm_flags & VM_MTE)
56 		prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
57 
58 	return __pgprot(prot);
59 }
60 #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
61 
62 static inline bool arch_validate_prot(unsigned long prot,
63 	unsigned long addr __always_unused)
64 {
65 	unsigned long supported = PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM;
66 
67 	if (system_supports_bti())
68 		supported |= PROT_BTI;
69 
70 	if (system_supports_mte())
71 		supported |= PROT_MTE;
72 
73 	return (prot & ~supported) == 0;
74 }
75 #define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr)
76 
77 static inline bool arch_validate_flags(unsigned long vm_flags)
78 {
79 	if (!system_supports_mte())
80 		return true;
81 
82 	/* only allow VM_MTE if VM_MTE_ALLOWED has been set previously */
83 	return !(vm_flags & VM_MTE) || (vm_flags & VM_MTE_ALLOWED);
84 }
85 #define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags)
86 
87 #endif /* ! __ASM_MMAN_H__ */
88