xref: /freebsd/sys/kern/subr_uio.c (revision 145992504973bd16cf3518af9ba5ce185fefa82a)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_subr.c	8.3 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_zero.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/mman.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/resourcevar.h>
51 #include <sys/sched.h>
52 #include <sys/sysctl.h>
53 #include <sys/vnode.h>
54 
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/vm_extern.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_map.h>
60 #ifdef ZERO_COPY_SOCKETS
61 #include <vm/vm_object.h>
62 #endif
63 
64 SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
65 	"Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
66 
67 static int uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault);
68 
69 #ifdef ZERO_COPY_SOCKETS
70 /* Declared in uipc_socket.c */
71 extern int so_zero_copy_receive;
72 
73 /*
74  * Identify the physical page mapped at the given kernel virtual
75  * address.  Insert this physical page into the given address space at
76  * the given virtual address, replacing the physical page, if any,
77  * that already exists there.
78  */
79 static int
80 vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
81 {
82 	vm_map_t map = mapa;
83 	vm_page_t kern_pg, user_pg;
84 	vm_object_t uobject;
85 	vm_map_entry_t entry;
86 	vm_pindex_t upindex;
87 	vm_prot_t prot;
88 	boolean_t wired;
89 
90 	KASSERT((uaddr & PAGE_MASK) == 0,
91 	    ("vm_pgmoveco: uaddr is not page aligned"));
92 
93 	/*
94 	 * Herein the physical page is validated and dirtied.  It is
95 	 * unwired in sf_buf_mext().
96 	 */
97 	kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr));
98 	kern_pg->valid = VM_PAGE_BITS_ALL;
99 	KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1,
100 	    ("vm_pgmoveco: kern_pg is not correctly wired"));
101 
102 	if ((vm_map_lookup(&map, uaddr,
103 			   VM_PROT_WRITE, &entry, &uobject,
104 			   &upindex, &prot, &wired)) != KERN_SUCCESS) {
105 		return(EFAULT);
106 	}
107 	VM_OBJECT_LOCK(uobject);
108 retry:
109 	if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
110 		if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
111 			goto retry;
112 		vm_page_lock(user_pg);
113 		pmap_remove_all(user_pg);
114 		vm_page_free(user_pg);
115 		vm_page_unlock(user_pg);
116 	} else {
117 		/*
118 		 * Even if a physical page does not exist in the
119 		 * object chain's first object, a physical page from a
120 		 * backing object may be mapped read only.
121 		 */
122 		if (uobject->backing_object != NULL)
123 			pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE);
124 	}
125 	vm_page_insert(kern_pg, uobject, upindex);
126 	vm_page_dirty(kern_pg);
127 	VM_OBJECT_UNLOCK(uobject);
128 	vm_map_lookup_done(map, entry);
129 	return(KERN_SUCCESS);
130 }
131 #endif /* ZERO_COPY_SOCKETS */
132 
133 int
134 copyin_nofault(const void *udaddr, void *kaddr, size_t len)
135 {
136 	int error, save;
137 
138 	save = vm_fault_disable_pagefaults();
139 	error = copyin(udaddr, kaddr, len);
140 	vm_fault_enable_pagefaults(save);
141 	return (error);
142 }
143 
144 int
145 copyout_nofault(const void *kaddr, void *udaddr, size_t len)
146 {
147 	int error, save;
148 
149 	save = vm_fault_disable_pagefaults();
150 	error = copyout(kaddr, udaddr, len);
151 	vm_fault_enable_pagefaults(save);
152 	return (error);
153 }
154 
155 int
156 uiomove(void *cp, int n, struct uio *uio)
157 {
158 
159 	return (uiomove_faultflag(cp, n, uio, 0));
160 }
161 
162 int
163 uiomove_nofault(void *cp, int n, struct uio *uio)
164 {
165 
166 	return (uiomove_faultflag(cp, n, uio, 1));
167 }
168 
169 static int
170 uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault)
171 {
172 	struct thread *td;
173 	struct iovec *iov;
174 	size_t cnt;
175 	int error, newflags, save;
176 
177 	td = curthread;
178 	error = 0;
179 
180 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
181 	    ("uiomove: mode"));
182 	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == td,
183 	    ("uiomove proc"));
184 	if (!nofault)
185 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
186 		    "Calling uiomove()");
187 
188 	/* XXX does it make a sense to set TDP_DEADLKTREAT for UIO_SYSSPACE ? */
189 	newflags = TDP_DEADLKTREAT;
190 	if (uio->uio_segflg == UIO_USERSPACE && nofault) {
191 		/*
192 		 * Fail if a non-spurious page fault occurs.
193 		 */
194 		newflags |= TDP_NOFAULTING | TDP_RESETSPUR;
195 	}
196 	save = curthread_pflags_set(newflags);
197 
198 	while (n > 0 && uio->uio_resid) {
199 		iov = uio->uio_iov;
200 		cnt = iov->iov_len;
201 		if (cnt == 0) {
202 			uio->uio_iov++;
203 			uio->uio_iovcnt--;
204 			continue;
205 		}
206 		if (cnt > n)
207 			cnt = n;
208 
209 		switch (uio->uio_segflg) {
210 
211 		case UIO_USERSPACE:
212 			maybe_yield();
213 			if (uio->uio_rw == UIO_READ)
214 				error = copyout(cp, iov->iov_base, cnt);
215 			else
216 				error = copyin(iov->iov_base, cp, cnt);
217 			if (error)
218 				goto out;
219 			break;
220 
221 		case UIO_SYSSPACE:
222 			if (uio->uio_rw == UIO_READ)
223 				bcopy(cp, iov->iov_base, cnt);
224 			else
225 				bcopy(iov->iov_base, cp, cnt);
226 			break;
227 		case UIO_NOCOPY:
228 			break;
229 		}
230 		iov->iov_base = (char *)iov->iov_base + cnt;
231 		iov->iov_len -= cnt;
232 		uio->uio_resid -= cnt;
233 		uio->uio_offset += cnt;
234 		cp = (char *)cp + cnt;
235 		n -= cnt;
236 	}
237 out:
238 	curthread_pflags_restore(save);
239 	return (error);
240 }
241 
242 /*
243  * Wrapper for uiomove() that validates the arguments against a known-good
244  * kernel buffer.  Currently, uiomove accepts a signed (n) argument, which
245  * is almost definitely a bad thing, so we catch that here as well.  We
246  * return a runtime failure, but it might be desirable to generate a runtime
247  * assertion failure instead.
248  */
249 int
250 uiomove_frombuf(void *buf, int buflen, struct uio *uio)
251 {
252 	size_t offset, n;
253 
254 	if (uio->uio_offset < 0 || uio->uio_resid < 0 ||
255 	    (offset = uio->uio_offset) != uio->uio_offset)
256 		return (EINVAL);
257 	if (buflen <= 0 || offset >= buflen)
258 		return (0);
259 	if ((n = buflen - offset) > IOSIZE_MAX)
260 		return (EINVAL);
261 	return (uiomove((char *)buf + offset, n, uio));
262 }
263 
264 #ifdef ZERO_COPY_SOCKETS
265 /*
266  * Experimental support for zero-copy I/O
267  */
268 static int
269 userspaceco(void *cp, u_int cnt, struct uio *uio, int disposable)
270 {
271 	struct iovec *iov;
272 	int error;
273 
274 	iov = uio->uio_iov;
275 	if (uio->uio_rw == UIO_READ) {
276 		if ((so_zero_copy_receive != 0)
277 		 && ((cnt & PAGE_MASK) == 0)
278 		 && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0)
279 		 && ((uio->uio_offset & PAGE_MASK) == 0)
280 		 && ((((intptr_t) cp) & PAGE_MASK) == 0)
281 		 && (disposable != 0)) {
282 			/* SOCKET: use page-trading */
283 			/*
284 			 * We only want to call vm_pgmoveco() on
285 			 * disposeable pages, since it gives the
286 			 * kernel page to the userland process.
287 			 */
288 			error =	vm_pgmoveco(&curproc->p_vmspace->vm_map,
289 			    (vm_offset_t)cp, (vm_offset_t)iov->iov_base);
290 
291 			/*
292 			 * If we get an error back, attempt
293 			 * to use copyout() instead.  The
294 			 * disposable page should be freed
295 			 * automatically if we weren't able to move
296 			 * it into userland.
297 			 */
298 			if (error != 0)
299 				error = copyout(cp, iov->iov_base, cnt);
300 		} else {
301 			error = copyout(cp, iov->iov_base, cnt);
302 		}
303 	} else {
304 		error = copyin(iov->iov_base, cp, cnt);
305 	}
306 	return (error);
307 }
308 
309 int
310 uiomoveco(void *cp, int n, struct uio *uio, int disposable)
311 {
312 	struct iovec *iov;
313 	u_int cnt;
314 	int error;
315 
316 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
317 	    ("uiomoveco: mode"));
318 	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
319 	    ("uiomoveco proc"));
320 
321 	while (n > 0 && uio->uio_resid) {
322 		iov = uio->uio_iov;
323 		cnt = iov->iov_len;
324 		if (cnt == 0) {
325 			uio->uio_iov++;
326 			uio->uio_iovcnt--;
327 			continue;
328 		}
329 		if (cnt > n)
330 			cnt = n;
331 
332 		switch (uio->uio_segflg) {
333 
334 		case UIO_USERSPACE:
335 			maybe_yield();
336 			error = userspaceco(cp, cnt, uio, disposable);
337 			if (error)
338 				return (error);
339 			break;
340 
341 		case UIO_SYSSPACE:
342 			if (uio->uio_rw == UIO_READ)
343 				bcopy(cp, iov->iov_base, cnt);
344 			else
345 				bcopy(iov->iov_base, cp, cnt);
346 			break;
347 		case UIO_NOCOPY:
348 			break;
349 		}
350 		iov->iov_base = (char *)iov->iov_base + cnt;
351 		iov->iov_len -= cnt;
352 		uio->uio_resid -= cnt;
353 		uio->uio_offset += cnt;
354 		cp = (char *)cp + cnt;
355 		n -= cnt;
356 	}
357 	return (0);
358 }
359 #endif /* ZERO_COPY_SOCKETS */
360 
361 /*
362  * Give next character to user as result of read.
363  */
364 int
365 ureadc(int c, struct uio *uio)
366 {
367 	struct iovec *iov;
368 	char *iov_base;
369 
370 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
371 	    "Calling ureadc()");
372 
373 again:
374 	if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
375 		panic("ureadc");
376 	iov = uio->uio_iov;
377 	if (iov->iov_len == 0) {
378 		uio->uio_iovcnt--;
379 		uio->uio_iov++;
380 		goto again;
381 	}
382 	switch (uio->uio_segflg) {
383 
384 	case UIO_USERSPACE:
385 		if (subyte(iov->iov_base, c) < 0)
386 			return (EFAULT);
387 		break;
388 
389 	case UIO_SYSSPACE:
390 		iov_base = iov->iov_base;
391 		*iov_base = c;
392 		iov->iov_base = iov_base;
393 		break;
394 
395 	case UIO_NOCOPY:
396 		break;
397 	}
398 	iov->iov_base = (char *)iov->iov_base + 1;
399 	iov->iov_len--;
400 	uio->uio_resid--;
401 	uio->uio_offset++;
402 	return (0);
403 }
404 
405 int
406 copyinfrom(const void * __restrict src, void * __restrict dst, size_t len,
407     int seg)
408 {
409 	int error = 0;
410 
411 	switch (seg) {
412 	case UIO_USERSPACE:
413 		error = copyin(src, dst, len);
414 		break;
415 	case UIO_SYSSPACE:
416 		bcopy(src, dst, len);
417 		break;
418 	default:
419 		panic("copyinfrom: bad seg %d\n", seg);
420 	}
421 	return (error);
422 }
423 
424 int
425 copyinstrfrom(const void * __restrict src, void * __restrict dst, size_t len,
426     size_t * __restrict copied, int seg)
427 {
428 	int error = 0;
429 
430 	switch (seg) {
431 	case UIO_USERSPACE:
432 		error = copyinstr(src, dst, len, copied);
433 		break;
434 	case UIO_SYSSPACE:
435 		error = copystr(src, dst, len, copied);
436 		break;
437 	default:
438 		panic("copyinstrfrom: bad seg %d\n", seg);
439 	}
440 	return (error);
441 }
442 
443 int
444 copyiniov(const struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
445 {
446 	u_int iovlen;
447 
448 	*iov = NULL;
449 	if (iovcnt > UIO_MAXIOV)
450 		return (error);
451 	iovlen = iovcnt * sizeof (struct iovec);
452 	*iov = malloc(iovlen, M_IOV, M_WAITOK);
453 	error = copyin(iovp, *iov, iovlen);
454 	if (error) {
455 		free(*iov, M_IOV);
456 		*iov = NULL;
457 	}
458 	return (error);
459 }
460 
461 int
462 copyinuio(const struct iovec *iovp, u_int iovcnt, struct uio **uiop)
463 {
464 	struct iovec *iov;
465 	struct uio *uio;
466 	u_int iovlen;
467 	int error, i;
468 
469 	*uiop = NULL;
470 	if (iovcnt > UIO_MAXIOV)
471 		return (EINVAL);
472 	iovlen = iovcnt * sizeof (struct iovec);
473 	uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
474 	iov = (struct iovec *)(uio + 1);
475 	error = copyin(iovp, iov, iovlen);
476 	if (error) {
477 		free(uio, M_IOV);
478 		return (error);
479 	}
480 	uio->uio_iov = iov;
481 	uio->uio_iovcnt = iovcnt;
482 	uio->uio_segflg = UIO_USERSPACE;
483 	uio->uio_offset = -1;
484 	uio->uio_resid = 0;
485 	for (i = 0; i < iovcnt; i++) {
486 		if (iov->iov_len > IOSIZE_MAX - uio->uio_resid) {
487 			free(uio, M_IOV);
488 			return (EINVAL);
489 		}
490 		uio->uio_resid += iov->iov_len;
491 		iov++;
492 	}
493 	*uiop = uio;
494 	return (0);
495 }
496 
497 struct uio *
498 cloneuio(struct uio *uiop)
499 {
500 	struct uio *uio;
501 	int iovlen;
502 
503 	iovlen = uiop->uio_iovcnt * sizeof (struct iovec);
504 	uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
505 	*uio = *uiop;
506 	uio->uio_iov = (struct iovec *)(uio + 1);
507 	bcopy(uiop->uio_iov, uio->uio_iov, iovlen);
508 	return (uio);
509 }
510 
511 /*
512  * Map some anonymous memory in user space of size sz, rounded up to the page
513  * boundary.
514  */
515 int
516 copyout_map(struct thread *td, vm_offset_t *addr, size_t sz)
517 {
518 	struct vmspace *vms;
519 	int error;
520 	vm_size_t size;
521 
522 	vms = td->td_proc->p_vmspace;
523 
524 	/*
525 	 * Map somewhere after heap in process memory.
526 	 */
527 	PROC_LOCK(td->td_proc);
528 	*addr = round_page((vm_offset_t)vms->vm_daddr +
529 	    lim_max(td->td_proc, RLIMIT_DATA));
530 	PROC_UNLOCK(td->td_proc);
531 
532 	/* round size up to page boundry */
533 	size = (vm_size_t)round_page(sz);
534 
535 	error = vm_mmap(&vms->vm_map, addr, size, PROT_READ | PROT_WRITE,
536 	    VM_PROT_ALL, MAP_PRIVATE | MAP_ANON, OBJT_DEFAULT, NULL, 0);
537 
538 	return (error);
539 }
540 
541 /*
542  * Unmap memory in user space.
543  */
544 int
545 copyout_unmap(struct thread *td, vm_offset_t addr, size_t sz)
546 {
547 	vm_map_t map;
548 	vm_size_t size;
549 
550 	if (sz == 0)
551 		return (0);
552 
553 	map = &td->td_proc->p_vmspace->vm_map;
554 	size = (vm_size_t)round_page(sz);
555 
556 	if (vm_map_remove(map, addr, addr + size) != KERN_SUCCESS)
557 		return (EINVAL);
558 
559 	return (0);
560 }
561