xref: /linux/arch/arm64/include/asm/mmu.h (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 #ifndef __ASM_MMU_H
17 #define __ASM_MMU_H
18 
19 #define MMCF_AARCH32	0x1	/* mm context flag for AArch32 executables */
20 #define USER_ASID_BIT	48
21 #define USER_ASID_FLAG	(UL(1) << USER_ASID_BIT)
22 #define TTBR_ASID_MASK	(UL(0xffff) << 48)
23 
24 #define BP_HARDEN_EL2_SLOTS 4
25 
26 #ifndef __ASSEMBLY__
27 
28 typedef struct {
29 	atomic64_t	id;
30 	void		*vdso;
31 	unsigned long	flags;
32 } mm_context_t;
33 
34 /*
35  * This macro is only used by the TLBI code, which cannot race with an
36  * ASID change and therefore doesn't need to reload the counter using
37  * atomic64_read.
38  */
39 #define ASID(mm)	((mm)->context.id.counter & 0xffff)
40 
41 static inline bool arm64_kernel_unmapped_at_el0(void)
42 {
43 	return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
44 	       cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
45 }
46 
47 typedef void (*bp_hardening_cb_t)(void);
48 
49 struct bp_hardening_data {
50 	int			hyp_vectors_slot;
51 	bp_hardening_cb_t	fn;
52 };
53 
54 #if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||	\
55      defined(CONFIG_HARDEN_EL2_VECTORS))
56 extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
57 extern atomic_t arm64_el2_vector_last_slot;
58 #endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
59 
60 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
61 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
62 
63 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
64 {
65 	return this_cpu_ptr(&bp_hardening_data);
66 }
67 
68 static inline void arm64_apply_bp_hardening(void)
69 {
70 	struct bp_hardening_data *d;
71 
72 	if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
73 		return;
74 
75 	d = arm64_get_bp_hardening_data();
76 	if (d->fn)
77 		d->fn();
78 }
79 #else
80 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
81 {
82 	return NULL;
83 }
84 
85 static inline void arm64_apply_bp_hardening(void)	{ }
86 #endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
87 
88 extern void paging_init(void);
89 extern void bootmem_init(void);
90 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
91 extern void init_mem_pgprot(void);
92 extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
93 			       unsigned long virt, phys_addr_t size,
94 			       pgprot_t prot, bool page_mappings_only);
95 extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
96 extern void mark_linear_text_alias_ro(void);
97 
98 #endif	/* !__ASSEMBLY__ */
99 #endif
100