xref: /linux/arch/m68k/mm/mcfmmu.c (revision 9c5968db9e625019a0ee5226c7eebef5519d366a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Based upon linux/arch/m68k/mm/sun3mmu.c
4  * Based upon linux/arch/ppc/mm/mmu_context.c
5  *
6  * Implementations of mm routines specific to the Coldfire MMU.
7  *
8  * Copyright (c) 2008 Freescale Semiconductor, Inc.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/string.h>
16 #include <linux/memblock.h>
17 
18 #include <asm/setup.h>
19 #include <asm/page.h>
20 #include <asm/mmu_context.h>
21 #include <asm/mcf_pgalloc.h>
22 #include <asm/tlbflush.h>
23 #include <asm/pgalloc.h>
24 
25 #define KMAPAREA(x)	((x >= VMALLOC_START) && (x < KMAP_END))
26 
27 mm_context_t next_mmu_context;
28 unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
29 atomic_t nr_free_contexts;
30 struct mm_struct *context_mm[LAST_CONTEXT+1];
31 unsigned long num_pages;
32 
33 /*
34  * ColdFire paging_init derived from sun3.
35  */
36 void __init paging_init(void)
37 {
38 	pgd_t *pg_dir;
39 	pte_t *pg_table;
40 	unsigned long address, size;
41 	unsigned long next_pgtable;
42 	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
43 	int i;
44 
45 	empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
46 
47 	pg_dir = swapper_pg_dir;
48 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
49 
50 	size = num_pages * sizeof(pte_t);
51 	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
52 	next_pgtable = (unsigned long) memblock_alloc_or_panic(size, PAGE_SIZE);
53 
54 	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
55 
56 	address = PAGE_OFFSET;
57 	while (address < (unsigned long)high_memory) {
58 		pg_table = (pte_t *) next_pgtable;
59 		next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
60 		pgd_val(*pg_dir) = (unsigned long) pg_table;
61 		pg_dir++;
62 
63 		/* now change pg_table to kernel virtual addresses */
64 		for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
65 			pte_t pte = pfn_pte(virt_to_pfn((void *)address),
66 					    PAGE_INIT);
67 			if (address >= (unsigned long) high_memory)
68 				pte_val(pte) = 0;
69 
70 			set_pte(pg_table, pte);
71 			address += PAGE_SIZE;
72 		}
73 	}
74 
75 	current->mm = NULL;
76 	max_zone_pfn[ZONE_DMA] = PFN_DOWN(_ramend);
77 	free_area_init(max_zone_pfn);
78 }
79 
80 int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
81 {
82 	unsigned long flags, mmuar, mmutr;
83 	struct mm_struct *mm;
84 	pgd_t *pgd;
85 	p4d_t *p4d;
86 	pud_t *pud;
87 	pmd_t *pmd;
88 	pte_t *pte = NULL;
89 	int ret = -1;
90 	int asid;
91 
92 	local_irq_save(flags);
93 
94 	mmuar = (dtlb) ? mmu_read(MMUAR) :
95 		regs->pc + (extension_word * sizeof(long));
96 
97 	mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
98 	if (!mm)
99 		goto out;
100 
101 	pgd = pgd_offset(mm, mmuar);
102 	if (pgd_none(*pgd))
103 		goto out;
104 
105 	p4d = p4d_offset(pgd, mmuar);
106 	if (p4d_none(*p4d))
107 		goto out;
108 
109 	pud = pud_offset(p4d, mmuar);
110 	if (pud_none(*pud))
111 		goto out;
112 
113 	pmd = pmd_offset(pud, mmuar);
114 	if (pmd_none(*pmd))
115 		goto out;
116 
117 	pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
118 				: pte_offset_map(pmd, mmuar);
119 	if (!pte || pte_none(*pte) || !pte_present(*pte))
120 		goto out;
121 
122 	if (write) {
123 		if (!pte_write(*pte))
124 			goto out;
125 		set_pte(pte, pte_mkdirty(*pte));
126 	}
127 
128 	set_pte(pte, pte_mkyoung(*pte));
129 	asid = mm->context & 0xff;
130 	if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
131 		set_pte(pte, pte_wrprotect(*pte));
132 
133 	mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
134 	if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
135 		mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
136 	mmu_write(MMUTR, mmutr);
137 
138 	mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
139 		((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
140 
141 	if (dtlb)
142 		mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
143 	else
144 		mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
145 	ret = 0;
146 out:
147 	if (pte && !KMAPAREA(mmuar))
148 		pte_unmap(pte);
149 	local_irq_restore(flags);
150 	return ret;
151 }
152 
153 void __init cf_bootmem_alloc(void)
154 {
155 	unsigned long memstart;
156 
157 	/* _rambase and _ramend will be naturally page aligned */
158 	m68k_memory[0].addr = _rambase;
159 	m68k_memory[0].size = _ramend - _rambase;
160 
161 	memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
162 			  MEMBLOCK_NONE);
163 
164 	/* compute total pages in system */
165 	num_pages = PFN_DOWN(_ramend - _rambase);
166 
167 	/* page numbers */
168 	memstart = PAGE_ALIGN(_ramstart);
169 	min_low_pfn = PFN_DOWN(_rambase);
170 	max_pfn = max_low_pfn = PFN_DOWN(_ramend);
171 	high_memory = (void *)_ramend;
172 
173 	/* Reserve kernel text/data/bss */
174 	memblock_reserve(_rambase, memstart - _rambase);
175 
176 	m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
177 	module_fixup(NULL, __start_fixup, __stop_fixup);
178 
179 	/* setup node data */
180 	m68k_setup_node(0);
181 }
182 
183 /*
184  * Initialize the context management stuff.
185  * The following was taken from arch/ppc/mmu_context.c
186  */
187 void __init cf_mmu_context_init(void)
188 {
189 	/*
190 	 * Some processors have too few contexts to reserve one for
191 	 * init_mm, and require using context 0 for a normal task.
192 	 * Other processors reserve the use of context zero for the kernel.
193 	 * This code assumes FIRST_CONTEXT < 32.
194 	 */
195 	context_map[0] = (1 << FIRST_CONTEXT) - 1;
196 	next_mmu_context = FIRST_CONTEXT;
197 	atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
198 }
199 
200 /*
201  * Steal a context from a task that has one at the moment.
202  * This isn't an LRU system, it just frees up each context in
203  * turn (sort-of pseudo-random replacement :).  This would be the
204  * place to implement an LRU scheme if anyone was motivated to do it.
205  *  -- paulus
206  */
207 void steal_context(void)
208 {
209 	struct mm_struct *mm;
210 	/*
211 	 * free up context `next_mmu_context'
212 	 * if we shouldn't free context 0, don't...
213 	 */
214 	if (next_mmu_context < FIRST_CONTEXT)
215 		next_mmu_context = FIRST_CONTEXT;
216 	mm = context_mm[next_mmu_context];
217 	flush_tlb_mm(mm);
218 	destroy_context(mm);
219 }
220 
221 static const pgprot_t protection_map[16] = {
222 	[VM_NONE]					= PAGE_NONE,
223 	[VM_READ]					= __pgprot(CF_PAGE_VALID |
224 								   CF_PAGE_ACCESSED |
225 								   CF_PAGE_READABLE),
226 	[VM_WRITE]					= __pgprot(CF_PAGE_VALID |
227 								   CF_PAGE_ACCESSED |
228 								   CF_PAGE_WRITABLE),
229 	[VM_WRITE | VM_READ]				= __pgprot(CF_PAGE_VALID |
230 								   CF_PAGE_ACCESSED |
231 								   CF_PAGE_READABLE |
232 								   CF_PAGE_WRITABLE),
233 	[VM_EXEC]					= __pgprot(CF_PAGE_VALID |
234 								   CF_PAGE_ACCESSED |
235 								   CF_PAGE_EXEC),
236 	[VM_EXEC | VM_READ]				= __pgprot(CF_PAGE_VALID |
237 								   CF_PAGE_ACCESSED |
238 								   CF_PAGE_READABLE |
239 								   CF_PAGE_EXEC),
240 	[VM_EXEC | VM_WRITE]				= __pgprot(CF_PAGE_VALID |
241 								   CF_PAGE_ACCESSED |
242 								   CF_PAGE_WRITABLE |
243 								   CF_PAGE_EXEC),
244 	[VM_EXEC | VM_WRITE | VM_READ]			=  __pgprot(CF_PAGE_VALID |
245 								    CF_PAGE_ACCESSED |
246 								    CF_PAGE_READABLE |
247 								    CF_PAGE_WRITABLE |
248 								    CF_PAGE_EXEC),
249 	[VM_SHARED]					= PAGE_NONE,
250 	[VM_SHARED | VM_READ]				= __pgprot(CF_PAGE_VALID |
251 								   CF_PAGE_ACCESSED |
252 								   CF_PAGE_READABLE),
253 	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
254 	[VM_SHARED | VM_WRITE | VM_READ]		= __pgprot(CF_PAGE_VALID |
255 								   CF_PAGE_ACCESSED |
256 								   CF_PAGE_READABLE |
257 								   CF_PAGE_SHARED),
258 	[VM_SHARED | VM_EXEC]				= __pgprot(CF_PAGE_VALID |
259 								   CF_PAGE_ACCESSED |
260 								   CF_PAGE_EXEC),
261 	[VM_SHARED | VM_EXEC | VM_READ]			= __pgprot(CF_PAGE_VALID |
262 								   CF_PAGE_ACCESSED |
263 								   CF_PAGE_READABLE |
264 								   CF_PAGE_EXEC),
265 	[VM_SHARED | VM_EXEC | VM_WRITE]		= __pgprot(CF_PAGE_VALID |
266 								   CF_PAGE_ACCESSED |
267 								   CF_PAGE_SHARED |
268 								   CF_PAGE_EXEC),
269 	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= __pgprot(CF_PAGE_VALID |
270 								   CF_PAGE_ACCESSED |
271 								   CF_PAGE_READABLE |
272 								   CF_PAGE_SHARED |
273 								   CF_PAGE_EXEC)
274 };
275 DECLARE_VM_GET_PAGE_PROT
276