xref: /freebsd/sys/i386/i386/copyout.c (revision fdafd315ad0d0f28a11b9fb4476a9ab059c62b92)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2018 The FreeBSD Foundation
5  *
6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/param.h>
32 #include <sys/lock.h>
33 #include <sys/mutex.h>
34 #include <sys/pcpu.h>
35 #include <sys/proc.h>
36 #include <sys/sched.h>
37 #include <sys/sysctl.h>
38 #include <sys/systm.h>
39 #include <vm/vm.h>
40 #include <vm/vm_param.h>
41 #include <vm/vm_extern.h>
42 #include <vm/pmap.h>
43 #include <vm/vm_map.h>
44 #include <vm/vm_page.h>
45 
46 int copyin_fast(const void *udaddr, void *kaddr, size_t len, u_int);
47 static int (*copyin_fast_tramp)(const void *, void *, size_t, u_int);
48 int copyout_fast(const void *kaddr, void *udaddr, size_t len, u_int);
49 static int (*copyout_fast_tramp)(const void *, void *, size_t, u_int);
50 int fubyte_fast(volatile const void *base, u_int kcr3);
51 static int (*fubyte_fast_tramp)(volatile const void *, u_int);
52 int fuword16_fast(volatile const void *base, u_int kcr3);
53 static int (*fuword16_fast_tramp)(volatile const void *, u_int);
54 int fueword_fast(volatile const void *base, long *val, u_int kcr3);
55 static int (*fueword_fast_tramp)(volatile const void *, long *, u_int);
56 int subyte_fast(volatile void *base, int val, u_int kcr3);
57 static int (*subyte_fast_tramp)(volatile void *, int, u_int);
58 int suword16_fast(volatile void *base, int val, u_int kcr3);
59 static int (*suword16_fast_tramp)(volatile void *, int, u_int);
60 int suword_fast(volatile void *base, long val, u_int kcr3);
61 static int (*suword_fast_tramp)(volatile void *, long, u_int);
62 
63 static int fast_copyout = 1;
64 SYSCTL_INT(_machdep, OID_AUTO, fast_copyout, CTLFLAG_RWTUN,
65     &fast_copyout, 0,
66     "");
67 
68 void
copyout_init_tramp(void)69 copyout_init_tramp(void)
70 {
71 
72 	copyin_fast_tramp = (int (*)(const void *, void *, size_t, u_int))(
73 	    (uintptr_t)copyin_fast + setidt_disp);
74 	copyout_fast_tramp = (int (*)(const void *, void *, size_t, u_int))(
75 	    (uintptr_t)copyout_fast + setidt_disp);
76 	fubyte_fast_tramp = (int (*)(volatile const void *, u_int))(
77 	    (uintptr_t)fubyte_fast + setidt_disp);
78 	fuword16_fast_tramp = (int (*)(volatile const void *, u_int))(
79 	    (uintptr_t)fuword16_fast + setidt_disp);
80 	fueword_fast_tramp = (int (*)(volatile const void *, long *, u_int))(
81 	    (uintptr_t)fueword_fast + setidt_disp);
82 	subyte_fast_tramp = (int (*)(volatile void *, int, u_int))(
83 	    (uintptr_t)subyte_fast + setidt_disp);
84 	suword16_fast_tramp = (int (*)(volatile void *, int, u_int))(
85 	    (uintptr_t)suword16_fast + setidt_disp);
86 	suword_fast_tramp = (int (*)(volatile void *, long, u_int))(
87 	    (uintptr_t)suword_fast + setidt_disp);
88 }
89 
90 int
cp_slow0(vm_offset_t uva,size_t len,bool write,void (* f)(vm_offset_t,void *),void * arg)91 cp_slow0(vm_offset_t uva, size_t len, bool write,
92     void (*f)(vm_offset_t, void *), void *arg)
93 {
94 	struct pcpu *pc;
95 	vm_page_t m[2];
96 	vm_offset_t kaddr;
97 	int error, i, plen;
98 	bool sleepable;
99 
100 	plen = howmany(uva - trunc_page(uva) + len, PAGE_SIZE);
101 	MPASS(plen <= nitems(m));
102 	error = 0;
103 	i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, uva, len,
104 	    (write ? VM_PROT_WRITE : VM_PROT_READ) | VM_PROT_QUICK_NOFAULT,
105 	    m, nitems(m));
106 	if (i != plen)
107 		return (EFAULT);
108 	sched_pin();
109 	pc = get_pcpu();
110 	if (!THREAD_CAN_SLEEP() || curthread->td_vslock_sz > 0 ||
111 	    (curthread->td_pflags & TDP_NOFAULTING) != 0) {
112 		sleepable = false;
113 		mtx_lock(&pc->pc_copyout_mlock);
114 		kaddr = pc->pc_copyout_maddr;
115 	} else {
116 		sleepable = true;
117 		sx_xlock(&pc->pc_copyout_slock);
118 		kaddr = pc->pc_copyout_saddr;
119 	}
120 	pmap_cp_slow0_map(kaddr, plen, m);
121 	kaddr += uva - trunc_page(uva);
122 	f(kaddr, arg);
123 	sched_unpin();
124 	if (sleepable)
125 		sx_xunlock(&pc->pc_copyout_slock);
126 	else
127 		mtx_unlock(&pc->pc_copyout_mlock);
128 	vm_page_unhold_pages(m, plen);
129 	return (error);
130 }
131 
132 struct copyinstr_arg0 {
133 	vm_offset_t kc;
134 	size_t len;
135 	size_t alen;
136 	bool end;
137 };
138 
139 static void
copyinstr_slow0(vm_offset_t kva,void * arg)140 copyinstr_slow0(vm_offset_t kva, void *arg)
141 {
142 	struct copyinstr_arg0 *ca;
143 	char c;
144 
145 	ca = arg;
146 	MPASS(ca->alen == 0 && ca->len > 0 && !ca->end);
147 	while (ca->alen < ca->len && !ca->end) {
148 		c = *(char *)(kva + ca->alen);
149 		*(char *)ca->kc = c;
150 		ca->alen++;
151 		ca->kc++;
152 		if (c == '\0')
153 			ca->end = true;
154 	}
155 }
156 
157 int
copyinstr(const void * udaddr,void * kaddr,size_t maxlen,size_t * lencopied)158 copyinstr(const void *udaddr, void *kaddr, size_t maxlen, size_t *lencopied)
159 {
160 	struct copyinstr_arg0 ca;
161 	vm_offset_t uc;
162 	size_t plen;
163 	int error;
164 
165 	error = 0;
166 	ca.end = false;
167 	for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
168 	    plen < maxlen && !ca.end; uc += ca.alen, plen += ca.alen) {
169 		ca.len = round_page(uc) - uc;
170 		if (ca.len == 0)
171 			ca.len = PAGE_SIZE;
172 		if (plen + ca.len > maxlen)
173 			ca.len = maxlen - plen;
174 		ca.alen = 0;
175 		if (cp_slow0(uc, ca.len, false, copyinstr_slow0, &ca) != 0) {
176 			error = EFAULT;
177 			break;
178 		}
179 	}
180 	if (!ca.end && plen == maxlen && error == 0)
181 		error = ENAMETOOLONG;
182 	if (lencopied != NULL)
183 		*lencopied = plen;
184 	return (error);
185 }
186 
187 struct copyin_arg0 {
188 	vm_offset_t kc;
189 	size_t len;
190 };
191 
192 static void
copyin_slow0(vm_offset_t kva,void * arg)193 copyin_slow0(vm_offset_t kva, void *arg)
194 {
195 	struct copyin_arg0 *ca;
196 
197 	ca = arg;
198 	bcopy((void *)kva, (void *)ca->kc, ca->len);
199 }
200 
201 int
copyin(const void * udaddr,void * kaddr,size_t len)202 copyin(const void *udaddr, void *kaddr, size_t len)
203 {
204 	struct copyin_arg0 ca;
205 	vm_offset_t uc;
206 	size_t plen;
207 
208 	if ((uintptr_t)udaddr + len < (uintptr_t)udaddr ||
209 	    (uintptr_t)udaddr + len > VM_MAXUSER_ADDRESS)
210 		return (EFAULT);
211 	if (len == 0 || (fast_copyout && len <= TRAMP_COPYOUT_SZ &&
212 	    copyin_fast_tramp(udaddr, kaddr, len, pmap_get_kcr3()) == 0))
213 		return (0);
214 	for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
215 	    plen < len; uc += ca.len, ca.kc += ca.len, plen += ca.len) {
216 		ca.len = round_page(uc) - uc;
217 		if (ca.len == 0)
218 			ca.len = PAGE_SIZE;
219 		if (plen + ca.len > len)
220 			ca.len = len - plen;
221 		if (cp_slow0(uc, ca.len, false, copyin_slow0, &ca) != 0)
222 			return (EFAULT);
223 	}
224 	return (0);
225 }
226 
227 static void
copyout_slow0(vm_offset_t kva,void * arg)228 copyout_slow0(vm_offset_t kva, void *arg)
229 {
230 	struct copyin_arg0 *ca;
231 
232 	ca = arg;
233 	bcopy((void *)ca->kc, (void *)kva, ca->len);
234 }
235 
236 int
copyout(const void * kaddr,void * udaddr,size_t len)237 copyout(const void *kaddr, void *udaddr, size_t len)
238 {
239 	struct copyin_arg0 ca;
240 	vm_offset_t uc;
241 	size_t plen;
242 
243 	if ((uintptr_t)udaddr + len < (uintptr_t)udaddr ||
244 	    (uintptr_t)udaddr + len > VM_MAXUSER_ADDRESS)
245 		return (EFAULT);
246 	if (len == 0 || (fast_copyout && len <= TRAMP_COPYOUT_SZ &&
247 	    copyout_fast_tramp(kaddr, udaddr, len, pmap_get_kcr3()) == 0))
248 		return (0);
249 	for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
250 	    plen < len; uc += ca.len, ca.kc += ca.len, plen += ca.len) {
251 		ca.len = round_page(uc) - uc;
252 		if (ca.len == 0)
253 			ca.len = PAGE_SIZE;
254 		if (plen + ca.len > len)
255 			ca.len = len - plen;
256 		if (cp_slow0(uc, ca.len, true, copyout_slow0, &ca) != 0)
257 			return (EFAULT);
258 	}
259 	return (0);
260 }
261 
262 /*
263  * Fetch (load) a 32-bit word, a 16-bit word, or an 8-bit byte from user
264  * memory.
265  */
266 
267 static void
fubyte_slow0(vm_offset_t kva,void * arg)268 fubyte_slow0(vm_offset_t kva, void *arg)
269 {
270 
271 	*(int *)arg = *(u_char *)kva;
272 }
273 
274 int
fubyte(volatile const void * base)275 fubyte(volatile const void *base)
276 {
277 	int res;
278 
279 	if ((uintptr_t)base + sizeof(uint8_t) < (uintptr_t)base ||
280 	    (uintptr_t)base + sizeof(uint8_t) > VM_MAXUSER_ADDRESS)
281 		return (-1);
282 	if (fast_copyout) {
283 		res = fubyte_fast_tramp(base, pmap_get_kcr3());
284 		if (res != -1)
285 			return (res);
286 	}
287 	if (cp_slow0((vm_offset_t)base, sizeof(char), false, fubyte_slow0,
288 	    &res) != 0)
289 		return (-1);
290 	return (res);
291 }
292 
293 static void
fuword16_slow0(vm_offset_t kva,void * arg)294 fuword16_slow0(vm_offset_t kva, void *arg)
295 {
296 
297 	*(int *)arg = *(uint16_t *)kva;
298 }
299 
300 int
fuword16(volatile const void * base)301 fuword16(volatile const void *base)
302 {
303 	int res;
304 
305 	if ((uintptr_t)base + sizeof(uint16_t) < (uintptr_t)base ||
306 	    (uintptr_t)base + sizeof(uint16_t) > VM_MAXUSER_ADDRESS)
307 		return (-1);
308 	if (fast_copyout) {
309 		res = fuword16_fast_tramp(base, pmap_get_kcr3());
310 		if (res != -1)
311 			return (res);
312 	}
313 	if (cp_slow0((vm_offset_t)base, sizeof(uint16_t), false,
314 	    fuword16_slow0, &res) != 0)
315 		return (-1);
316 	return (res);
317 }
318 
319 static void
fueword_slow0(vm_offset_t kva,void * arg)320 fueword_slow0(vm_offset_t kva, void *arg)
321 {
322 
323 	*(uint32_t *)arg = *(uint32_t *)kva;
324 }
325 
326 int
fueword(volatile const void * base,long * val)327 fueword(volatile const void *base, long *val)
328 {
329 	uint32_t res;
330 
331 	if ((uintptr_t)base + sizeof(*val) < (uintptr_t)base ||
332 	    (uintptr_t)base + sizeof(*val) > VM_MAXUSER_ADDRESS)
333 		return (-1);
334 	if (fast_copyout) {
335 		if (fueword_fast_tramp(base, val, pmap_get_kcr3()) == 0)
336 			return (0);
337 	}
338 	if (cp_slow0((vm_offset_t)base, sizeof(long), false, fueword_slow0,
339 	    &res) != 0)
340 		return (-1);
341 	*val = res;
342 	return (0);
343 }
344 
345 int
fueword32(volatile const void * base,int32_t * val)346 fueword32(volatile const void *base, int32_t *val)
347 {
348 
349 	return (fueword(base, (long *)val));
350 }
351 
352 /*
353  * Store a 32-bit word, a 16-bit word, or an 8-bit byte to user memory.
354  */
355 
356 static void
subyte_slow0(vm_offset_t kva,void * arg)357 subyte_slow0(vm_offset_t kva, void *arg)
358 {
359 
360 	*(u_char *)kva = *(int *)arg;
361 }
362 
363 int
subyte(volatile void * base,int byte)364 subyte(volatile void *base, int byte)
365 {
366 
367 	if ((uintptr_t)base + sizeof(uint8_t) < (uintptr_t)base ||
368 	    (uintptr_t)base + sizeof(uint8_t) > VM_MAXUSER_ADDRESS)
369 		return (-1);
370 	if (fast_copyout && subyte_fast_tramp(base, byte, pmap_get_kcr3()) == 0)
371 		return (0);
372 	return (cp_slow0((vm_offset_t)base, sizeof(u_char), true, subyte_slow0,
373 	    &byte) != 0 ? -1 : 0);
374 }
375 
376 static void
suword16_slow0(vm_offset_t kva,void * arg)377 suword16_slow0(vm_offset_t kva, void *arg)
378 {
379 
380 	*(int *)kva = *(uint16_t *)arg;
381 }
382 
383 int
suword16(volatile void * base,int word)384 suword16(volatile void *base, int word)
385 {
386 
387 	if ((uintptr_t)base + sizeof(uint16_t) < (uintptr_t)base ||
388 	    (uintptr_t)base + sizeof(uint16_t) > VM_MAXUSER_ADDRESS)
389 		return (-1);
390 	if (fast_copyout && suword16_fast_tramp(base, word, pmap_get_kcr3())
391 	    == 0)
392 		return (0);
393 	return (cp_slow0((vm_offset_t)base, sizeof(int16_t), true,
394 	    suword16_slow0, &word) != 0 ? -1 : 0);
395 }
396 
397 static void
suword_slow0(vm_offset_t kva,void * arg)398 suword_slow0(vm_offset_t kva, void *arg)
399 {
400 
401 	*(int *)kva = *(uint32_t *)arg;
402 }
403 
404 int
suword(volatile void * base,long word)405 suword(volatile void *base, long word)
406 {
407 
408 	if ((uintptr_t)base + sizeof(word) < (uintptr_t)base ||
409 	    (uintptr_t)base + sizeof(word) > VM_MAXUSER_ADDRESS)
410 		return (-1);
411 	if (fast_copyout && suword_fast_tramp(base, word, pmap_get_kcr3()) == 0)
412 		return (0);
413 	return (cp_slow0((vm_offset_t)base, sizeof(long), true,
414 	    suword_slow0, &word) != 0 ? -1 : 0);
415 }
416 
417 int
suword32(volatile void * base,int32_t word)418 suword32(volatile void *base, int32_t word)
419 {
420 
421 	return (suword(base, word));
422 }
423 
424 struct casueword_arg0 {
425 	uint32_t oldval;
426 	uint32_t newval;
427 	int res;
428 };
429 
430 static void
casueword_slow0(vm_offset_t kva,void * arg)431 casueword_slow0(vm_offset_t kva, void *arg)
432 {
433 	struct casueword_arg0 *ca;
434 
435 	ca = arg;
436 	ca->res = 1 - atomic_fcmpset_int((u_int *)kva, &ca->oldval,
437 	    ca->newval);
438 }
439 
440 int
casueword32(volatile uint32_t * base,uint32_t oldval,uint32_t * oldvalp,uint32_t newval)441 casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp,
442     uint32_t newval)
443 {
444 	struct casueword_arg0 ca;
445 	int res;
446 
447 	ca.oldval = oldval;
448 	ca.newval = newval;
449 	res = cp_slow0((vm_offset_t)base, sizeof(int32_t), true,
450 	    casueword_slow0, &ca);
451 	if (res == 0) {
452 		*oldvalp = ca.oldval;
453 		return (ca.res);
454 	}
455 	return (-1);
456 }
457 
458 int
casueword(volatile u_long * base,u_long oldval,u_long * oldvalp,u_long newval)459 casueword(volatile u_long *base, u_long oldval, u_long *oldvalp, u_long newval)
460 {
461 	struct casueword_arg0 ca;
462 	int res;
463 
464 	ca.oldval = oldval;
465 	ca.newval = newval;
466 	res = cp_slow0((vm_offset_t)base, sizeof(int32_t), true,
467 	    casueword_slow0, &ca);
468 	if (res == 0) {
469 		*oldvalp = ca.oldval;
470 		return (ca.res);
471 	}
472 	return (-1);
473 }
474