xref: /linux/arch/um/kernel/tlb.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/module.h>
8 #include <linux/sched/signal.h>
9 
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <as-layout.h>
13 #include <mem_user.h>
14 #include <os.h>
15 #include <skas.h>
16 #include <kern_util.h>
17 
18 struct vm_ops {
19 	struct mm_id *mm_idp;
20 
21 	int (*mmap)(struct mm_id *mm_idp,
22 		    unsigned long virt, unsigned long len, int prot,
23 		    int phys_fd, unsigned long long offset);
24 	int (*unmap)(struct mm_id *mm_idp,
25 		     unsigned long virt, unsigned long len);
26 	int (*mprotect)(struct mm_id *mm_idp,
27 			unsigned long virt, unsigned long len,
28 			unsigned int prot);
29 };
30 
31 static int kern_map(struct mm_id *mm_idp,
32 		    unsigned long virt, unsigned long len, int prot,
33 		    int phys_fd, unsigned long long offset)
34 {
35 	/* TODO: Why is executable needed to be always set in the kernel? */
36 	return os_map_memory((void *)virt, phys_fd, offset, len,
37 			     prot & UM_PROT_READ, prot & UM_PROT_WRITE,
38 			     1);
39 }
40 
41 static int kern_unmap(struct mm_id *mm_idp,
42 		      unsigned long virt, unsigned long len)
43 {
44 	return os_unmap_memory((void *)virt, len);
45 }
46 
47 static int kern_mprotect(struct mm_id *mm_idp,
48 			 unsigned long virt, unsigned long len,
49 			 unsigned int prot)
50 {
51 	return os_protect_memory((void *)virt, len,
52 				 prot & UM_PROT_READ, prot & UM_PROT_WRITE,
53 				 1);
54 }
55 
56 void report_enomem(void)
57 {
58 	printk(KERN_ERR "UML ran out of memory on the host side! "
59 			"This can happen due to a memory limitation or "
60 			"vm.max_map_count has been reached.\n");
61 }
62 
63 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
64 				   unsigned long end,
65 				   struct vm_ops *ops)
66 {
67 	pte_t *pte;
68 	int r, w, x, prot, ret = 0;
69 
70 	pte = pte_offset_kernel(pmd, addr);
71 	do {
72 		r = pte_read(*pte);
73 		w = pte_write(*pte);
74 		x = pte_exec(*pte);
75 		if (!pte_young(*pte)) {
76 			r = 0;
77 			w = 0;
78 		} else if (!pte_dirty(*pte))
79 			w = 0;
80 
81 		prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
82 			(x ? UM_PROT_EXEC : 0));
83 		if (pte_newpage(*pte)) {
84 			if (pte_present(*pte)) {
85 				__u64 offset;
86 				unsigned long phys = pte_val(*pte) & PAGE_MASK;
87 				int fd = phys_mapping(phys, &offset);
88 
89 				ret = ops->mmap(ops->mm_idp, addr, PAGE_SIZE,
90 						prot, fd, offset);
91 			} else
92 				ret = ops->unmap(ops->mm_idp, addr, PAGE_SIZE);
93 		} else if (pte_newprot(*pte))
94 			ret = ops->mprotect(ops->mm_idp, addr, PAGE_SIZE, prot);
95 		*pte = pte_mkuptodate(*pte);
96 	} while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
97 	return ret;
98 }
99 
100 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
101 				   unsigned long end,
102 				   struct vm_ops *ops)
103 {
104 	pmd_t *pmd;
105 	unsigned long next;
106 	int ret = 0;
107 
108 	pmd = pmd_offset(pud, addr);
109 	do {
110 		next = pmd_addr_end(addr, end);
111 		if (!pmd_present(*pmd)) {
112 			if (pmd_newpage(*pmd)) {
113 				ret = ops->unmap(ops->mm_idp, addr,
114 						 next - addr);
115 				pmd_mkuptodate(*pmd);
116 			}
117 		}
118 		else ret = update_pte_range(pmd, addr, next, ops);
119 	} while (pmd++, addr = next, ((addr < end) && !ret));
120 	return ret;
121 }
122 
123 static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
124 				   unsigned long end,
125 				   struct vm_ops *ops)
126 {
127 	pud_t *pud;
128 	unsigned long next;
129 	int ret = 0;
130 
131 	pud = pud_offset(p4d, addr);
132 	do {
133 		next = pud_addr_end(addr, end);
134 		if (!pud_present(*pud)) {
135 			if (pud_newpage(*pud)) {
136 				ret = ops->unmap(ops->mm_idp, addr,
137 						 next - addr);
138 				pud_mkuptodate(*pud);
139 			}
140 		}
141 		else ret = update_pmd_range(pud, addr, next, ops);
142 	} while (pud++, addr = next, ((addr < end) && !ret));
143 	return ret;
144 }
145 
146 static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
147 				   unsigned long end,
148 				   struct vm_ops *ops)
149 {
150 	p4d_t *p4d;
151 	unsigned long next;
152 	int ret = 0;
153 
154 	p4d = p4d_offset(pgd, addr);
155 	do {
156 		next = p4d_addr_end(addr, end);
157 		if (!p4d_present(*p4d)) {
158 			if (p4d_newpage(*p4d)) {
159 				ret = ops->unmap(ops->mm_idp, addr,
160 						 next - addr);
161 				p4d_mkuptodate(*p4d);
162 			}
163 		} else
164 			ret = update_pud_range(p4d, addr, next, ops);
165 	} while (p4d++, addr = next, ((addr < end) && !ret));
166 	return ret;
167 }
168 
169 int um_tlb_sync(struct mm_struct *mm)
170 {
171 	pgd_t *pgd;
172 	struct vm_ops ops;
173 	unsigned long addr = mm->context.sync_tlb_range_from, next;
174 	int ret = 0;
175 
176 	if (mm->context.sync_tlb_range_to == 0)
177 		return 0;
178 
179 	ops.mm_idp = &mm->context.id;
180 	if (mm == &init_mm) {
181 		ops.mmap = kern_map;
182 		ops.unmap = kern_unmap;
183 		ops.mprotect = kern_mprotect;
184 	} else {
185 		ops.mmap = map;
186 		ops.unmap = unmap;
187 		ops.mprotect = protect;
188 	}
189 
190 	pgd = pgd_offset(mm, addr);
191 	do {
192 		next = pgd_addr_end(addr, mm->context.sync_tlb_range_to);
193 		if (!pgd_present(*pgd)) {
194 			if (pgd_newpage(*pgd)) {
195 				ret = ops.unmap(ops.mm_idp, addr,
196 						next - addr);
197 				pgd_mkuptodate(*pgd);
198 			}
199 		} else
200 			ret = update_p4d_range(pgd, addr, next, &ops);
201 	} while (pgd++, addr = next,
202 		 ((addr < mm->context.sync_tlb_range_to) && !ret));
203 
204 	if (ret == -ENOMEM)
205 		report_enomem();
206 
207 	mm->context.sync_tlb_range_from = 0;
208 	mm->context.sync_tlb_range_to = 0;
209 
210 	return ret;
211 }
212 
213 void flush_tlb_all(void)
214 {
215 	/*
216 	 * Don't bother flushing if this address space is about to be
217 	 * destroyed.
218 	 */
219 	if (atomic_read(&current->mm->mm_users) == 0)
220 		return;
221 
222 	flush_tlb_mm(current->mm);
223 }
224 
225 void flush_tlb_mm(struct mm_struct *mm)
226 {
227 	struct vm_area_struct *vma;
228 	VMA_ITERATOR(vmi, mm, 0);
229 
230 	for_each_vma(vmi, vma)
231 		um_tlb_mark_sync(mm, vma->vm_start, vma->vm_end);
232 }
233