xref: /linux/arch/um/kernel/tlb.c (revision 065c4e67cc2c40e6dd94649e8e720096fbabd4ee)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/module.h>
8 #include <linux/sched/signal.h>
9 
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <as-layout.h>
13 #include <mem_user.h>
14 #include <os.h>
15 #include <skas.h>
16 #include <kern_util.h>
17 
18 struct vm_ops {
19 	struct mm_id *mm_idp;
20 
21 	int (*mmap)(struct mm_id *mm_idp,
22 		    unsigned long virt, unsigned long len, int prot,
23 		    int phys_fd, unsigned long long offset);
24 	int (*unmap)(struct mm_id *mm_idp,
25 		     unsigned long virt, unsigned long len);
26 };
27 
28 static int kern_map(struct mm_id *mm_idp,
29 		    unsigned long virt, unsigned long len, int prot,
30 		    int phys_fd, unsigned long long offset)
31 {
32 	return os_map_memory((void *)virt, phys_fd, offset, len,
33 			     prot & UM_PROT_READ, prot & UM_PROT_WRITE,
34 			     prot & UM_PROT_EXEC);
35 }
36 
37 static int kern_unmap(struct mm_id *mm_idp,
38 		      unsigned long virt, unsigned long len)
39 {
40 	return os_unmap_memory((void *)virt, len);
41 }
42 
43 void report_enomem(void)
44 {
45 	printk(KERN_ERR "UML ran out of memory on the host side! "
46 			"This can happen due to a memory limitation or "
47 			"vm.max_map_count has been reached.\n");
48 }
49 
50 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
51 				   unsigned long end,
52 				   struct vm_ops *ops)
53 {
54 	pte_t *pte;
55 	int ret = 0;
56 
57 	pte = pte_offset_kernel(pmd, addr);
58 	do {
59 		if (!pte_needsync(*pte))
60 			continue;
61 
62 		if (pte_present(*pte)) {
63 			__u64 offset;
64 			unsigned long phys = pte_val(*pte) & PAGE_MASK;
65 			int fd = phys_mapping(phys, &offset);
66 			int r, w, x, prot;
67 
68 			r = pte_read(*pte);
69 			w = pte_write(*pte);
70 			x = pte_exec(*pte);
71 			if (!pte_young(*pte)) {
72 				r = 0;
73 				w = 0;
74 			} else if (!pte_dirty(*pte))
75 				w = 0;
76 
77 			prot = (r ? UM_PROT_READ : 0) |
78 			       (w ? UM_PROT_WRITE : 0) |
79 			       (x ? UM_PROT_EXEC : 0);
80 
81 			ret = ops->mmap(ops->mm_idp, addr, PAGE_SIZE,
82 					prot, fd, offset);
83 		} else
84 			ret = ops->unmap(ops->mm_idp, addr, PAGE_SIZE);
85 
86 		*pte = pte_mkuptodate(*pte);
87 	} while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
88 	return ret;
89 }
90 
91 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
92 				   unsigned long end,
93 				   struct vm_ops *ops)
94 {
95 	pmd_t *pmd;
96 	unsigned long next;
97 	int ret = 0;
98 
99 	pmd = pmd_offset(pud, addr);
100 	do {
101 		next = pmd_addr_end(addr, end);
102 		if (!pmd_present(*pmd)) {
103 			if (pmd_needsync(*pmd)) {
104 				ret = ops->unmap(ops->mm_idp, addr,
105 						 next - addr);
106 				pmd_mkuptodate(*pmd);
107 			}
108 		}
109 		else ret = update_pte_range(pmd, addr, next, ops);
110 	} while (pmd++, addr = next, ((addr < end) && !ret));
111 	return ret;
112 }
113 
114 static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
115 				   unsigned long end,
116 				   struct vm_ops *ops)
117 {
118 	pud_t *pud;
119 	unsigned long next;
120 	int ret = 0;
121 
122 	pud = pud_offset(p4d, addr);
123 	do {
124 		next = pud_addr_end(addr, end);
125 		if (!pud_present(*pud)) {
126 			if (pud_needsync(*pud)) {
127 				ret = ops->unmap(ops->mm_idp, addr,
128 						 next - addr);
129 				pud_mkuptodate(*pud);
130 			}
131 		}
132 		else ret = update_pmd_range(pud, addr, next, ops);
133 	} while (pud++, addr = next, ((addr < end) && !ret));
134 	return ret;
135 }
136 
137 static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
138 				   unsigned long end,
139 				   struct vm_ops *ops)
140 {
141 	p4d_t *p4d;
142 	unsigned long next;
143 	int ret = 0;
144 
145 	p4d = p4d_offset(pgd, addr);
146 	do {
147 		next = p4d_addr_end(addr, end);
148 		if (!p4d_present(*p4d)) {
149 			if (p4d_needsync(*p4d)) {
150 				ret = ops->unmap(ops->mm_idp, addr,
151 						 next - addr);
152 				p4d_mkuptodate(*p4d);
153 			}
154 		} else
155 			ret = update_pud_range(p4d, addr, next, ops);
156 	} while (p4d++, addr = next, ((addr < end) && !ret));
157 	return ret;
158 }
159 
160 int um_tlb_sync(struct mm_struct *mm)
161 {
162 	pgd_t *pgd;
163 	struct vm_ops ops;
164 	unsigned long addr, next;
165 	int ret = 0;
166 
167 	guard(spinlock_irqsave)(&mm->page_table_lock);
168 	guard(spinlock_irqsave)(&mm->context.sync_tlb_lock);
169 
170 	if (mm->context.sync_tlb_range_to == 0)
171 		return 0;
172 
173 	ops.mm_idp = &mm->context.id;
174 	if (mm == &init_mm) {
175 		ops.mmap = kern_map;
176 		ops.unmap = kern_unmap;
177 	} else {
178 		ops.mmap = map;
179 		ops.unmap = unmap;
180 	}
181 
182 	addr = mm->context.sync_tlb_range_from;
183 	pgd = pgd_offset(mm, addr);
184 	do {
185 		next = pgd_addr_end(addr, mm->context.sync_tlb_range_to);
186 		if (!pgd_present(*pgd)) {
187 			if (pgd_needsync(*pgd)) {
188 				ret = ops.unmap(ops.mm_idp, addr,
189 						next - addr);
190 				pgd_mkuptodate(*pgd);
191 			}
192 		} else
193 			ret = update_p4d_range(pgd, addr, next, &ops);
194 	} while (pgd++, addr = next,
195 		 ((addr < mm->context.sync_tlb_range_to) && !ret));
196 
197 	if (ret == -ENOMEM)
198 		report_enomem();
199 
200 	mm->context.sync_tlb_range_from = 0;
201 	mm->context.sync_tlb_range_to = 0;
202 
203 	return ret;
204 }
205 
206 void flush_tlb_all(void)
207 {
208 	/*
209 	 * Don't bother flushing if this address space is about to be
210 	 * destroyed.
211 	 */
212 	if (atomic_read(&current->mm->mm_users) == 0)
213 		return;
214 
215 	flush_tlb_mm(current->mm);
216 }
217 
218 void flush_tlb_mm(struct mm_struct *mm)
219 {
220 	struct vm_area_struct *vma;
221 	VMA_ITERATOR(vmi, mm, 0);
222 
223 	for_each_vma(vmi, vma)
224 		um_tlb_mark_sync(mm, vma->vm_start, vma->vm_end);
225 }
226