xref: /linux/arch/arm64/include/asm/mmu.h (revision 9d106c6dd81bb26ad7fc3ee89cb1d62557c8e2c9)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_MMU_H
6 #define __ASM_MMU_H
7 
8 #include <asm/cputype.h>
9 
10 #define MMCF_AARCH32	0x1	/* mm context flag for AArch32 executables */
11 #define USER_ASID_BIT	48
12 #define USER_ASID_FLAG	(UL(1) << USER_ASID_BIT)
13 #define TTBR_ASID_MASK	(UL(0xffff) << 48)
14 
15 #define BP_HARDEN_EL2_SLOTS 4
16 
17 #ifndef __ASSEMBLY__
18 
19 typedef struct {
20 	atomic64_t	id;
21 	void		*vdso;
22 	unsigned long	flags;
23 } mm_context_t;
24 
25 /*
26  * This macro is only used by the TLBI code, which cannot race with an
27  * ASID change and therefore doesn't need to reload the counter using
28  * atomic64_read.
29  */
30 #define ASID(mm)	((mm)->context.id.counter & 0xffff)
31 
32 static inline bool arm64_kernel_unmapped_at_el0(void)
33 {
34 	return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
35 }
36 
37 typedef void (*bp_hardening_cb_t)(void);
38 
39 struct bp_hardening_data {
40 	int			hyp_vectors_slot;
41 	bp_hardening_cb_t	fn;
42 };
43 
44 #if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||	\
45      defined(CONFIG_HARDEN_EL2_VECTORS))
46 extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
47 extern atomic_t arm64_el2_vector_last_slot;
48 #endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
49 
50 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
51 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
52 
53 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
54 {
55 	return this_cpu_ptr(&bp_hardening_data);
56 }
57 
58 static inline void arm64_apply_bp_hardening(void)
59 {
60 	struct bp_hardening_data *d;
61 
62 	if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
63 		return;
64 
65 	d = arm64_get_bp_hardening_data();
66 	if (d->fn)
67 		d->fn();
68 }
69 #else
70 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
71 {
72 	return NULL;
73 }
74 
75 static inline void arm64_apply_bp_hardening(void)	{ }
76 #endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
77 
78 extern void arm64_memblock_init(void);
79 extern void paging_init(void);
80 extern void bootmem_init(void);
81 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
82 extern void init_mem_pgprot(void);
83 extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
84 			       unsigned long virt, phys_addr_t size,
85 			       pgprot_t prot, bool page_mappings_only);
86 extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
87 extern void mark_linear_text_alias_ro(void);
88 extern bool kaslr_requires_kpti(void);
89 
90 #define INIT_MM_CONTEXT(name)	\
91 	.pgd = init_pg_dir,
92 
93 #endif	/* !__ASSEMBLY__ */
94 #endif
95