xref: /linux/arch/arm/mm/idmap.c (revision cc04a46f11ea046ed53e2c832ae29e4790f7e35f)
1 #include <linux/module.h>
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 
5 #include <asm/cputype.h>
6 #include <asm/idmap.h>
7 #include <asm/pgalloc.h>
8 #include <asm/pgtable.h>
9 #include <asm/sections.h>
10 #include <asm/system_info.h>
11 
12 /*
13  * Note: accesses outside of the kernel image and the identity map area
14  * are not supported on any CPU using the idmap tables as its current
15  * page tables.
16  */
17 pgd_t *idmap_pgd;
18 phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
19 
20 #ifdef CONFIG_ARM_LPAE
21 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
22 	unsigned long prot)
23 {
24 	pmd_t *pmd;
25 	unsigned long next;
26 
27 	if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
28 		pmd = pmd_alloc_one(&init_mm, addr);
29 		if (!pmd) {
30 			pr_warn("Failed to allocate identity pmd.\n");
31 			return;
32 		}
33 		/*
34 		 * Copy the original PMD to ensure that the PMD entries for
35 		 * the kernel image are preserved.
36 		 */
37 		if (!pud_none(*pud))
38 			memcpy(pmd, pmd_offset(pud, 0),
39 			       PTRS_PER_PMD * sizeof(pmd_t));
40 		pud_populate(&init_mm, pud, pmd);
41 		pmd += pmd_index(addr);
42 	} else
43 		pmd = pmd_offset(pud, addr);
44 
45 	do {
46 		next = pmd_addr_end(addr, end);
47 		*pmd = __pmd((addr & PMD_MASK) | prot);
48 		flush_pmd_entry(pmd);
49 	} while (pmd++, addr = next, addr != end);
50 }
51 #else	/* !CONFIG_ARM_LPAE */
52 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
53 	unsigned long prot)
54 {
55 	pmd_t *pmd = pmd_offset(pud, addr);
56 
57 	addr = (addr & PMD_MASK) | prot;
58 	pmd[0] = __pmd(addr);
59 	addr += SECTION_SIZE;
60 	pmd[1] = __pmd(addr);
61 	flush_pmd_entry(pmd);
62 }
63 #endif	/* CONFIG_ARM_LPAE */
64 
65 static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
66 	unsigned long prot)
67 {
68 	pud_t *pud = pud_offset(pgd, addr);
69 	unsigned long next;
70 
71 	do {
72 		next = pud_addr_end(addr, end);
73 		idmap_add_pmd(pud, addr, next, prot);
74 	} while (pud++, addr = next, addr != end);
75 }
76 
77 static void identity_mapping_add(pgd_t *pgd, const char *text_start,
78 				 const char *text_end, unsigned long prot)
79 {
80 	unsigned long addr, end;
81 	unsigned long next;
82 
83 	addr = virt_to_idmap(text_start);
84 	end = virt_to_idmap(text_end);
85 	pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end);
86 
87 	prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
88 
89 	if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
90 		prot |= PMD_BIT4;
91 
92 	pgd += pgd_index(addr);
93 	do {
94 		next = pgd_addr_end(addr, end);
95 		idmap_add_pud(pgd, addr, next, prot);
96 	} while (pgd++, addr = next, addr != end);
97 }
98 
99 extern char  __idmap_text_start[], __idmap_text_end[];
100 
101 static int __init init_static_idmap(void)
102 {
103 	idmap_pgd = pgd_alloc(&init_mm);
104 	if (!idmap_pgd)
105 		return -ENOMEM;
106 
107 	identity_mapping_add(idmap_pgd, __idmap_text_start,
108 			     __idmap_text_end, 0);
109 
110 	/* Flush L1 for the hardware to see this page table content */
111 	flush_cache_louis();
112 
113 	return 0;
114 }
115 early_initcall(init_static_idmap);
116 
117 /*
118  * In order to soft-boot, we need to switch to a 1:1 mapping for the
119  * cpu_reset functions. This will then ensure that we have predictable
120  * results when turning off the mmu.
121  */
122 void setup_mm_for_reboot(void)
123 {
124 	/* Switch to the identity mapping. */
125 	cpu_switch_mm(idmap_pgd, &init_mm);
126 	local_flush_bp_all();
127 
128 #ifdef CONFIG_CPU_HAS_ASID
129 	/*
130 	 * We don't have a clean ASID for the identity mapping, which
131 	 * may clash with virtual addresses of the previous page tables
132 	 * and therefore potentially in the TLB.
133 	 */
134 	local_flush_tlb_all();
135 #endif
136 }
137