xref: /linux/arch/um/kernel/tlb.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3  * Licensed under the GPL
4  */
5 
6 #include "linux/mm.h"
7 #include "asm/page.h"
8 #include "asm/pgalloc.h"
9 #include "asm/tlbflush.h"
10 #include "choose-mode.h"
11 #include "mode_kern.h"
12 #include "user_util.h"
13 #include "tlb.h"
14 #include "mem.h"
15 #include "mem_user.h"
16 #include "os.h"
17 
18 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
19 		    int r, int w, int x, struct host_vm_op *ops, int *index,
20 		    int last_filled, union mm_context *mmu, void **flush,
21 		    int (*do_ops)(union mm_context *, struct host_vm_op *,
22 				  int, int, void **))
23 {
24 	__u64 offset;
25 	struct host_vm_op *last;
26 	int fd, ret = 0;
27 
28 	fd = phys_mapping(phys, &offset);
29 	if(*index != -1){
30 		last = &ops[*index];
31 		if((last->type == MMAP) &&
32 		   (last->u.mmap.addr + last->u.mmap.len == virt) &&
33 		   (last->u.mmap.r == r) && (last->u.mmap.w == w) &&
34 		   (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
35 		   (last->u.mmap.offset + last->u.mmap.len == offset)){
36 			last->u.mmap.len += len;
37 			return 0;
38 		}
39 	}
40 
41 	if(*index == last_filled){
42 		ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
43 		*index = -1;
44 	}
45 
46 	ops[++*index] = ((struct host_vm_op) { .type	= MMAP,
47 			     			.u = { .mmap = {
48 						       .addr	= virt,
49 						       .len	= len,
50 						       .r	= r,
51 						       .w	= w,
52 						       .x	= x,
53 						       .fd	= fd,
54 						       .offset	= offset }
55 			   } });
56 	return ret;
57 }
58 
59 static int add_munmap(unsigned long addr, unsigned long len,
60 		      struct host_vm_op *ops, int *index, int last_filled,
61 		      union mm_context *mmu, void **flush,
62 		      int (*do_ops)(union mm_context *, struct host_vm_op *,
63 				    int, int, void **))
64 {
65 	struct host_vm_op *last;
66 	int ret = 0;
67 
68 	if(*index != -1){
69 		last = &ops[*index];
70 		if((last->type == MUNMAP) &&
71 		   (last->u.munmap.addr + last->u.mmap.len == addr)){
72 			last->u.munmap.len += len;
73 			return 0;
74 		}
75 	}
76 
77 	if(*index == last_filled){
78 		ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
79 		*index = -1;
80 	}
81 
82 	ops[++*index] = ((struct host_vm_op) { .type	= MUNMAP,
83 			     		       .u = { .munmap = {
84 						        .addr	= addr,
85 							.len	= len } } });
86 	return ret;
87 }
88 
89 static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
90 			int x, struct host_vm_op *ops, int *index,
91 			int last_filled, union mm_context *mmu, void **flush,
92 			int (*do_ops)(union mm_context *, struct host_vm_op *,
93 				      int, int, void **))
94 {
95 	struct host_vm_op *last;
96 	int ret = 0;
97 
98 	if(*index != -1){
99 		last = &ops[*index];
100 		if((last->type == MPROTECT) &&
101 		   (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
102 		   (last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
103 		   (last->u.mprotect.x == x)){
104 			last->u.mprotect.len += len;
105 			return 0;
106 		}
107 	}
108 
109 	if(*index == last_filled){
110 		ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
111 		*index = -1;
112 	}
113 
114 	ops[++*index] = ((struct host_vm_op) { .type	= MPROTECT,
115 			     		       .u = { .mprotect = {
116 						       .addr	= addr,
117 						       .len	= len,
118 						       .r	= r,
119 						       .w	= w,
120 						       .x	= x } } });
121 	return ret;
122 }
123 
124 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
125 
126 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
127 		      unsigned long end_addr, int force,
128 		      int (*do_ops)(union mm_context *, struct host_vm_op *,
129 				    int, int, void **))
130 {
131 	pgd_t *npgd;
132 	pud_t *npud;
133 	pmd_t *npmd;
134 	pte_t *npte;
135 	union mm_context *mmu = &mm->context;
136 	unsigned long addr, end;
137 	int r, w, x;
138 	struct host_vm_op ops[1];
139 	void *flush = NULL;
140 	int op_index = -1, last_op = ARRAY_SIZE(ops) - 1;
141 	int ret = 0;
142 
143 	if(mm == NULL)
144 		return;
145 
146 	ops[0].type = NONE;
147 	for(addr = start_addr; addr < end_addr && !ret;){
148 		npgd = pgd_offset(mm, addr);
149 		if(!pgd_present(*npgd)){
150 			end = ADD_ROUND(addr, PGDIR_SIZE);
151 			if(end > end_addr)
152 				end = end_addr;
153 			if(force || pgd_newpage(*npgd)){
154 				ret = add_munmap(addr, end - addr, ops,
155 						 &op_index, last_op, mmu,
156 						 &flush, do_ops);
157 				pgd_mkuptodate(*npgd);
158 			}
159 			addr = end;
160 			continue;
161 		}
162 
163 		npud = pud_offset(npgd, addr);
164 		if(!pud_present(*npud)){
165 			end = ADD_ROUND(addr, PUD_SIZE);
166 			if(end > end_addr)
167 				end = end_addr;
168 			if(force || pud_newpage(*npud)){
169 				ret = add_munmap(addr, end - addr, ops,
170 						 &op_index, last_op, mmu,
171 						 &flush, do_ops);
172 				pud_mkuptodate(*npud);
173 			}
174 			addr = end;
175 			continue;
176 		}
177 
178 		npmd = pmd_offset(npud, addr);
179 		if(!pmd_present(*npmd)){
180 			end = ADD_ROUND(addr, PMD_SIZE);
181 			if(end > end_addr)
182 				end = end_addr;
183 			if(force || pmd_newpage(*npmd)){
184 				ret = add_munmap(addr, end - addr, ops,
185 						 &op_index, last_op, mmu,
186 						 &flush, do_ops);
187 				pmd_mkuptodate(*npmd);
188 			}
189 			addr = end;
190 			continue;
191 		}
192 
193 		npte = pte_offset_kernel(npmd, addr);
194 		r = pte_read(*npte);
195 		w = pte_write(*npte);
196 		x = pte_exec(*npte);
197 		if (!pte_young(*npte)) {
198 			r = 0;
199 			w = 0;
200 		} else if (!pte_dirty(*npte)) {
201 			w = 0;
202 		}
203 		if(force || pte_newpage(*npte)){
204 			if(pte_present(*npte))
205 				ret = add_mmap(addr,
206 					       pte_val(*npte) & PAGE_MASK,
207 					       PAGE_SIZE, r, w, x, ops,
208 					       &op_index, last_op, mmu,
209 					       &flush, do_ops);
210 			else ret = add_munmap(addr, PAGE_SIZE, ops,
211 					      &op_index, last_op, mmu,
212 					      &flush, do_ops);
213 		}
214 		else if(pte_newprot(*npte))
215 			ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
216 					   &op_index, last_op, mmu,
217 					   &flush, do_ops);
218 
219 		*npte = pte_mkuptodate(*npte);
220 		addr += PAGE_SIZE;
221 	}
222 	if(!ret)
223 		ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
224 
225 /* This is not an else because ret is modified above */
226 	if(ret) {
227 		printk("fix_range_common: failed, killing current process\n");
228 		force_sig(SIGKILL, current);
229 	}
230 }
231 
232 int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
233 {
234 	struct mm_struct *mm;
235 	pgd_t *pgd;
236 	pud_t *pud;
237 	pmd_t *pmd;
238 	pte_t *pte;
239 	unsigned long addr, last;
240 	int updated = 0, err;
241 
242 	mm = &init_mm;
243 	for(addr = start; addr < end;){
244 		pgd = pgd_offset(mm, addr);
245 		if(!pgd_present(*pgd)){
246 			last = ADD_ROUND(addr, PGDIR_SIZE);
247 			if(last > end)
248 				last = end;
249 			if(pgd_newpage(*pgd)){
250 				updated = 1;
251 				err = os_unmap_memory((void *) addr,
252 						      last - addr);
253 				if(err < 0)
254 					panic("munmap failed, errno = %d\n",
255 					      -err);
256 			}
257 			addr = last;
258 			continue;
259 		}
260 
261 		pud = pud_offset(pgd, addr);
262 		if(!pud_present(*pud)){
263 			last = ADD_ROUND(addr, PUD_SIZE);
264 			if(last > end)
265 				last = end;
266 			if(pud_newpage(*pud)){
267 				updated = 1;
268 				err = os_unmap_memory((void *) addr,
269 						      last - addr);
270 				if(err < 0)
271 					panic("munmap failed, errno = %d\n",
272 					      -err);
273 			}
274 			addr = last;
275 			continue;
276 		}
277 
278 		pmd = pmd_offset(pud, addr);
279 		if(!pmd_present(*pmd)){
280 			last = ADD_ROUND(addr, PMD_SIZE);
281 			if(last > end)
282 				last = end;
283 			if(pmd_newpage(*pmd)){
284 				updated = 1;
285 				err = os_unmap_memory((void *) addr,
286 						      last - addr);
287 				if(err < 0)
288 					panic("munmap failed, errno = %d\n",
289 					      -err);
290 			}
291 			addr = last;
292 			continue;
293 		}
294 
295 		pte = pte_offset_kernel(pmd, addr);
296 		if(!pte_present(*pte) || pte_newpage(*pte)){
297 			updated = 1;
298 			err = os_unmap_memory((void *) addr,
299 					      PAGE_SIZE);
300 			if(err < 0)
301 				panic("munmap failed, errno = %d\n",
302 				      -err);
303 			if(pte_present(*pte))
304 				map_memory(addr,
305 					   pte_val(*pte) & PAGE_MASK,
306 					   PAGE_SIZE, 1, 1, 1);
307 		}
308 		else if(pte_newprot(*pte)){
309 			updated = 1;
310 			os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
311 		}
312 		addr += PAGE_SIZE;
313 	}
314 	return(updated);
315 }
316 
317 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
318 {
319 	return(pgd_offset(mm, address));
320 }
321 
322 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
323 {
324 	return(pud_offset(pgd, address));
325 }
326 
327 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
328 {
329 	return(pmd_offset(pud, address));
330 }
331 
332 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
333 {
334 	return(pte_offset_kernel(pmd, address));
335 }
336 
337 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
338 {
339 	pgd_t *pgd = pgd_offset(task->mm, addr);
340 	pud_t *pud = pud_offset(pgd, addr);
341 	pmd_t *pmd = pmd_offset(pud, addr);
342 
343 	return(pte_offset_map(pmd, addr));
344 }
345 
346 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
347 {
348 	address &= PAGE_MASK;
349 	flush_tlb_range(vma, address, address + PAGE_SIZE);
350 }
351 
352 void flush_tlb_all(void)
353 {
354 	flush_tlb_mm(current->mm);
355 }
356 
357 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
358 {
359 	CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
360 			 flush_tlb_kernel_range_common, start, end);
361 }
362 
363 void flush_tlb_kernel_vm(void)
364 {
365 	CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
366 		    flush_tlb_kernel_range_common(start_vm, end_vm));
367 }
368 
369 void __flush_tlb_one(unsigned long addr)
370 {
371 	CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
372 }
373 
374 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
375 		     unsigned long end)
376 {
377 	CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
378 			 end);
379 }
380 
381 void flush_tlb_mm(struct mm_struct *mm)
382 {
383 	CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
384 }
385 
386 void force_flush_all(void)
387 {
388 	CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
389 }
390 
391