xref: /linux/arch/um/kernel/tlb.c (revision ba199dc909a20fe62270ae4e93f263987bb9d119)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/module.h>
8 #include <linux/sched/signal.h>
9 
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <as-layout.h>
13 #include <mem_user.h>
14 #include <os.h>
15 #include <skas.h>
16 #include <kern_util.h>
17 
18 struct vm_ops {
19 	struct mm_id *mm_idp;
20 
21 	int (*mmap)(struct mm_id *mm_idp,
22 		    unsigned long virt, unsigned long len, int prot,
23 		    int phys_fd, unsigned long long offset);
24 	int (*unmap)(struct mm_id *mm_idp,
25 		     unsigned long virt, unsigned long len);
26 	int (*mprotect)(struct mm_id *mm_idp,
27 			unsigned long virt, unsigned long len,
28 			unsigned int prot);
29 };
30 
31 static int kern_map(struct mm_id *mm_idp,
32 		    unsigned long virt, unsigned long len, int prot,
33 		    int phys_fd, unsigned long long offset)
34 {
35 	/* TODO: Why is executable needed to be always set in the kernel? */
36 	return os_map_memory((void *)virt, phys_fd, offset, len,
37 			     prot & UM_PROT_READ, prot & UM_PROT_WRITE,
38 			     1);
39 }
40 
41 static int kern_unmap(struct mm_id *mm_idp,
42 		      unsigned long virt, unsigned long len)
43 {
44 	return os_unmap_memory((void *)virt, len);
45 }
46 
47 static int kern_mprotect(struct mm_id *mm_idp,
48 			 unsigned long virt, unsigned long len,
49 			 unsigned int prot)
50 {
51 	return os_protect_memory((void *)virt, len,
52 				 prot & UM_PROT_READ, prot & UM_PROT_WRITE,
53 				 1);
54 }
55 
56 void report_enomem(void)
57 {
58 	printk(KERN_ERR "UML ran out of memory on the host side! "
59 			"This can happen due to a memory limitation or "
60 			"vm.max_map_count has been reached.\n");
61 }
62 
63 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
64 				   unsigned long end,
65 				   struct vm_ops *ops)
66 {
67 	pte_t *pte;
68 	int r, w, x, prot, ret = 0;
69 
70 	pte = pte_offset_kernel(pmd, addr);
71 	do {
72 		r = pte_read(*pte);
73 		w = pte_write(*pte);
74 		x = pte_exec(*pte);
75 		if (!pte_young(*pte)) {
76 			r = 0;
77 			w = 0;
78 		} else if (!pte_dirty(*pte))
79 			w = 0;
80 
81 		prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
82 			(x ? UM_PROT_EXEC : 0));
83 		if (pte_newpage(*pte)) {
84 			if (pte_present(*pte)) {
85 				if (pte_newpage(*pte)) {
86 					__u64 offset;
87 					unsigned long phys =
88 						pte_val(*pte) & PAGE_MASK;
89 					int fd =  phys_mapping(phys, &offset);
90 
91 					ret = ops->mmap(ops->mm_idp, addr,
92 							PAGE_SIZE, prot, fd,
93 							offset);
94 				}
95 			} else
96 				ret = ops->unmap(ops->mm_idp, addr, PAGE_SIZE);
97 		} else if (pte_newprot(*pte))
98 			ret = ops->mprotect(ops->mm_idp, addr, PAGE_SIZE, prot);
99 		*pte = pte_mkuptodate(*pte);
100 	} while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
101 	return ret;
102 }
103 
104 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
105 				   unsigned long end,
106 				   struct vm_ops *ops)
107 {
108 	pmd_t *pmd;
109 	unsigned long next;
110 	int ret = 0;
111 
112 	pmd = pmd_offset(pud, addr);
113 	do {
114 		next = pmd_addr_end(addr, end);
115 		if (!pmd_present(*pmd)) {
116 			if (pmd_newpage(*pmd)) {
117 				ret = ops->unmap(ops->mm_idp, addr,
118 						 next - addr);
119 				pmd_mkuptodate(*pmd);
120 			}
121 		}
122 		else ret = update_pte_range(pmd, addr, next, ops);
123 	} while (pmd++, addr = next, ((addr < end) && !ret));
124 	return ret;
125 }
126 
127 static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
128 				   unsigned long end,
129 				   struct vm_ops *ops)
130 {
131 	pud_t *pud;
132 	unsigned long next;
133 	int ret = 0;
134 
135 	pud = pud_offset(p4d, addr);
136 	do {
137 		next = pud_addr_end(addr, end);
138 		if (!pud_present(*pud)) {
139 			if (pud_newpage(*pud)) {
140 				ret = ops->unmap(ops->mm_idp, addr,
141 						 next - addr);
142 				pud_mkuptodate(*pud);
143 			}
144 		}
145 		else ret = update_pmd_range(pud, addr, next, ops);
146 	} while (pud++, addr = next, ((addr < end) && !ret));
147 	return ret;
148 }
149 
150 static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
151 				   unsigned long end,
152 				   struct vm_ops *ops)
153 {
154 	p4d_t *p4d;
155 	unsigned long next;
156 	int ret = 0;
157 
158 	p4d = p4d_offset(pgd, addr);
159 	do {
160 		next = p4d_addr_end(addr, end);
161 		if (!p4d_present(*p4d)) {
162 			if (p4d_newpage(*p4d)) {
163 				ret = ops->unmap(ops->mm_idp, addr,
164 						 next - addr);
165 				p4d_mkuptodate(*p4d);
166 			}
167 		} else
168 			ret = update_pud_range(p4d, addr, next, ops);
169 	} while (p4d++, addr = next, ((addr < end) && !ret));
170 	return ret;
171 }
172 
173 int um_tlb_sync(struct mm_struct *mm)
174 {
175 	pgd_t *pgd;
176 	struct vm_ops ops;
177 	unsigned long addr = mm->context.sync_tlb_range_from, next;
178 	int ret = 0;
179 
180 	if (mm->context.sync_tlb_range_to == 0)
181 		return 0;
182 
183 	ops.mm_idp = &mm->context.id;
184 	if (mm == &init_mm) {
185 		ops.mmap = kern_map;
186 		ops.unmap = kern_unmap;
187 		ops.mprotect = kern_mprotect;
188 	} else {
189 		ops.mmap = map;
190 		ops.unmap = unmap;
191 		ops.mprotect = protect;
192 	}
193 
194 	pgd = pgd_offset(mm, addr);
195 	do {
196 		next = pgd_addr_end(addr, mm->context.sync_tlb_range_to);
197 		if (!pgd_present(*pgd)) {
198 			if (pgd_newpage(*pgd)) {
199 				ret = ops.unmap(ops.mm_idp, addr,
200 						next - addr);
201 				pgd_mkuptodate(*pgd);
202 			}
203 		} else
204 			ret = update_p4d_range(pgd, addr, next, &ops);
205 	} while (pgd++, addr = next,
206 		 ((addr < mm->context.sync_tlb_range_to) && !ret));
207 
208 	if (ret == -ENOMEM)
209 		report_enomem();
210 
211 	mm->context.sync_tlb_range_from = 0;
212 	mm->context.sync_tlb_range_to = 0;
213 
214 	return ret;
215 }
216 
217 void flush_tlb_all(void)
218 {
219 	/*
220 	 * Don't bother flushing if this address space is about to be
221 	 * destroyed.
222 	 */
223 	if (atomic_read(&current->mm->mm_users) == 0)
224 		return;
225 
226 	flush_tlb_mm(current->mm);
227 }
228 
229 void flush_tlb_mm(struct mm_struct *mm)
230 {
231 	struct vm_area_struct *vma;
232 	VMA_ITERATOR(vmi, mm, 0);
233 
234 	for_each_vma(vmi, vma)
235 		um_tlb_mark_sync(mm, vma->vm_start, vma->vm_end);
236 }
237