xref: /freebsd/sys/kern/uipc_syscalls.c (revision 282a3889ebf826db9839be296ff1dd903f6d6d6e)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * sendfile(2) and related extensions:
6  * Copyright (c) 1998, David Greenman. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)uipc_syscalls.c	8.4 (Berkeley) 2/21/94
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_sctp.h"
39 #include "opt_compat.h"
40 #include "opt_ktrace.h"
41 #include "opt_mac.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/sysproto.h>
49 #include <sys/malloc.h>
50 #include <sys/filedesc.h>
51 #include <sys/event.h>
52 #include <sys/proc.h>
53 #include <sys/fcntl.h>
54 #include <sys/file.h>
55 #include <sys/filio.h>
56 #include <sys/mount.h>
57 #include <sys/mbuf.h>
58 #include <sys/protosw.h>
59 #include <sys/sf_buf.h>
60 #include <sys/socket.h>
61 #include <sys/socketvar.h>
62 #include <sys/signalvar.h>
63 #include <sys/syscallsubr.h>
64 #include <sys/sysctl.h>
65 #include <sys/uio.h>
66 #include <sys/vnode.h>
67 #ifdef KTRACE
68 #include <sys/ktrace.h>
69 #endif
70 
71 #include <security/mac/mac_framework.h>
72 
73 #include <vm/vm.h>
74 #include <vm/vm_object.h>
75 #include <vm/vm_page.h>
76 #include <vm/vm_pageout.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_extern.h>
79 
80 #ifdef SCTP
81 #include <netinet/sctp.h>
82 #include <netinet/sctp_peeloff.h>
83 #endif /* SCTP */
84 
85 static int sendit(struct thread *td, int s, struct msghdr *mp, int flags);
86 static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp);
87 
88 static int accept1(struct thread *td, struct accept_args *uap, int compat);
89 static int do_sendfile(struct thread *td, struct sendfile_args *uap, int compat);
90 static int getsockname1(struct thread *td, struct getsockname_args *uap,
91 			int compat);
92 static int getpeername1(struct thread *td, struct getpeername_args *uap,
93 			int compat);
94 
95 /*
96  * NSFBUFS-related variables and associated sysctls
97  */
98 int nsfbufs;
99 int nsfbufspeak;
100 int nsfbufsused;
101 
102 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0,
103     "Maximum number of sendfile(2) sf_bufs available");
104 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0,
105     "Number of sendfile(2) sf_bufs at peak usage");
106 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0,
107     "Number of sendfile(2) sf_bufs in use");
108 
109 /*
110  * Convert a user file descriptor to a kernel file entry.  A reference on the
111  * file entry is held upon returning.  This is lighter weight than
112  * fgetsock(), which bumps the socket reference drops the file reference
113  * count instead, as this approach avoids several additional mutex operations
114  * associated with the additional reference count.  If requested, return the
115  * open file flags.
116  */
117 static int
118 getsock(struct filedesc *fdp, int fd, struct file **fpp, u_int *fflagp)
119 {
120 	struct file *fp;
121 	int error;
122 
123 	fp = NULL;
124 	if (fdp == NULL)
125 		error = EBADF;
126 	else {
127 		FILEDESC_SLOCK(fdp);
128 		fp = fget_locked(fdp, fd);
129 		if (fp == NULL)
130 			error = EBADF;
131 		else if (fp->f_type != DTYPE_SOCKET) {
132 			fp = NULL;
133 			error = ENOTSOCK;
134 		} else {
135 			fhold(fp);
136 			if (fflagp != NULL)
137 				*fflagp = fp->f_flag;
138 			error = 0;
139 		}
140 		FILEDESC_SUNLOCK(fdp);
141 	}
142 	*fpp = fp;
143 	return (error);
144 }
145 
146 /*
147  * System call interface to the socket abstraction.
148  */
149 #if defined(COMPAT_43)
150 #define COMPAT_OLDSOCK
151 #endif
152 
153 int
154 socket(td, uap)
155 	struct thread *td;
156 	struct socket_args /* {
157 		int	domain;
158 		int	type;
159 		int	protocol;
160 	} */ *uap;
161 {
162 	struct filedesc *fdp;
163 	struct socket *so;
164 	struct file *fp;
165 	int fd, error;
166 
167 #ifdef MAC
168 	error = mac_check_socket_create(td->td_ucred, uap->domain, uap->type,
169 	    uap->protocol);
170 	if (error)
171 		return (error);
172 #endif
173 	fdp = td->td_proc->p_fd;
174 	error = falloc(td, &fp, &fd);
175 	if (error)
176 		return (error);
177 	/* An extra reference on `fp' has been held for us by falloc(). */
178 	NET_LOCK_GIANT();
179 	error = socreate(uap->domain, &so, uap->type, uap->protocol,
180 	    td->td_ucred, td);
181 	NET_UNLOCK_GIANT();
182 	if (error) {
183 		fdclose(fdp, fp, fd, td);
184 	} else {
185 		FILE_LOCK(fp);
186 		fp->f_data = so;	/* already has ref count */
187 		fp->f_flag = FREAD|FWRITE;
188 		fp->f_type = DTYPE_SOCKET;
189 		fp->f_ops = &socketops;
190 		FILE_UNLOCK(fp);
191 		td->td_retval[0] = fd;
192 	}
193 	fdrop(fp, td);
194 	return (error);
195 }
196 
197 /* ARGSUSED */
198 int
199 bind(td, uap)
200 	struct thread *td;
201 	struct bind_args /* {
202 		int	s;
203 		caddr_t	name;
204 		int	namelen;
205 	} */ *uap;
206 {
207 	struct sockaddr *sa;
208 	int error;
209 
210 	if ((error = getsockaddr(&sa, uap->name, uap->namelen)) != 0)
211 		return (error);
212 
213 	error = kern_bind(td, uap->s, sa);
214 	free(sa, M_SONAME);
215 	return (error);
216 }
217 
218 int
219 kern_bind(td, fd, sa)
220 	struct thread *td;
221 	int fd;
222 	struct sockaddr *sa;
223 {
224 	struct socket *so;
225 	struct file *fp;
226 	int error;
227 
228 	NET_LOCK_GIANT();
229 	error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
230 	if (error)
231 		goto done2;
232 	so = fp->f_data;
233 #ifdef MAC
234 	SOCK_LOCK(so);
235 	error = mac_check_socket_bind(td->td_ucred, so, sa);
236 	SOCK_UNLOCK(so);
237 	if (error)
238 		goto done1;
239 #endif
240 	error = sobind(so, sa, td);
241 #ifdef MAC
242 done1:
243 #endif
244 	fdrop(fp, td);
245 done2:
246 	NET_UNLOCK_GIANT();
247 	return (error);
248 }
249 
250 /* ARGSUSED */
251 int
252 listen(td, uap)
253 	struct thread *td;
254 	struct listen_args /* {
255 		int	s;
256 		int	backlog;
257 	} */ *uap;
258 {
259 	struct socket *so;
260 	struct file *fp;
261 	int error;
262 
263 	NET_LOCK_GIANT();
264 	error = getsock(td->td_proc->p_fd, uap->s, &fp, NULL);
265 	if (error == 0) {
266 		so = fp->f_data;
267 #ifdef MAC
268 		SOCK_LOCK(so);
269 		error = mac_check_socket_listen(td->td_ucred, so);
270 		SOCK_UNLOCK(so);
271 		if (error)
272 			goto done;
273 #endif
274 		error = solisten(so, uap->backlog, td);
275 #ifdef MAC
276 done:
277 #endif
278 		fdrop(fp, td);
279 	}
280 	NET_UNLOCK_GIANT();
281 	return(error);
282 }
283 
284 /*
285  * accept1()
286  */
287 static int
288 accept1(td, uap, compat)
289 	struct thread *td;
290 	struct accept_args /* {
291 		int	s;
292 		struct sockaddr	* __restrict name;
293 		socklen_t	* __restrict anamelen;
294 	} */ *uap;
295 	int compat;
296 {
297 	struct sockaddr *name;
298 	socklen_t namelen;
299 	struct file *fp;
300 	int error;
301 
302 	if (uap->name == NULL)
303 		return (kern_accept(td, uap->s, NULL, NULL, NULL));
304 
305 	error = copyin(uap->anamelen, &namelen, sizeof (namelen));
306 	if (error)
307 		return (error);
308 
309 	error = kern_accept(td, uap->s, &name, &namelen, &fp);
310 
311 	/*
312 	 * return a namelen of zero for older code which might
313 	 * ignore the return value from accept.
314 	 */
315 	if (error) {
316 		(void) copyout(&namelen,
317 		    uap->anamelen, sizeof(*uap->anamelen));
318 		return (error);
319 	}
320 
321 	if (error == 0 && name != NULL) {
322 #ifdef COMPAT_OLDSOCK
323 		if (compat)
324 			((struct osockaddr *)name)->sa_family =
325 			    name->sa_family;
326 #endif
327 		error = copyout(name, uap->name, namelen);
328 	}
329 	if (error == 0)
330 		error = copyout(&namelen, uap->anamelen,
331 		    sizeof(namelen));
332 	if (error)
333 		fdclose(td->td_proc->p_fd, fp, td->td_retval[0], td);
334 	fdrop(fp, td);
335 	free(name, M_SONAME);
336 	return (error);
337 }
338 
339 int
340 kern_accept(struct thread *td, int s, struct sockaddr **name,
341     socklen_t *namelen, struct file **fp)
342 {
343 	struct filedesc *fdp;
344 	struct file *headfp, *nfp = NULL;
345 	struct sockaddr *sa = NULL;
346 	int error;
347 	struct socket *head, *so;
348 	int fd;
349 	u_int fflag;
350 	pid_t pgid;
351 	int tmp;
352 
353 	if (name) {
354 		*name = NULL;
355 		if (*namelen < 0)
356 			return (EINVAL);
357 	}
358 
359 	fdp = td->td_proc->p_fd;
360 	NET_LOCK_GIANT();
361 	error = getsock(fdp, s, &headfp, &fflag);
362 	if (error)
363 		goto done2;
364 	head = headfp->f_data;
365 	if ((head->so_options & SO_ACCEPTCONN) == 0) {
366 		error = EINVAL;
367 		goto done;
368 	}
369 #ifdef MAC
370 	SOCK_LOCK(head);
371 	error = mac_check_socket_accept(td->td_ucred, head);
372 	SOCK_UNLOCK(head);
373 	if (error != 0)
374 		goto done;
375 #endif
376 	error = falloc(td, &nfp, &fd);
377 	if (error)
378 		goto done;
379 	ACCEPT_LOCK();
380 	if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
381 		ACCEPT_UNLOCK();
382 		error = EWOULDBLOCK;
383 		goto noconnection;
384 	}
385 	while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
386 		if (head->so_rcv.sb_state & SBS_CANTRCVMORE) {
387 			head->so_error = ECONNABORTED;
388 			break;
389 		}
390 		error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH,
391 		    "accept", 0);
392 		if (error) {
393 			ACCEPT_UNLOCK();
394 			goto noconnection;
395 		}
396 	}
397 	if (head->so_error) {
398 		error = head->so_error;
399 		head->so_error = 0;
400 		ACCEPT_UNLOCK();
401 		goto noconnection;
402 	}
403 	so = TAILQ_FIRST(&head->so_comp);
404 	KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
405 	KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
406 
407 	/*
408 	 * Before changing the flags on the socket, we have to bump the
409 	 * reference count.  Otherwise, if the protocol calls sofree(),
410 	 * the socket will be released due to a zero refcount.
411 	 */
412 	SOCK_LOCK(so);			/* soref() and so_state update */
413 	soref(so);			/* file descriptor reference */
414 
415 	TAILQ_REMOVE(&head->so_comp, so, so_list);
416 	head->so_qlen--;
417 	so->so_state |= (head->so_state & SS_NBIO);
418 	so->so_qstate &= ~SQ_COMP;
419 	so->so_head = NULL;
420 
421 	SOCK_UNLOCK(so);
422 	ACCEPT_UNLOCK();
423 
424 	/* An extra reference on `nfp' has been held for us by falloc(). */
425 	td->td_retval[0] = fd;
426 
427 	/* connection has been removed from the listen queue */
428 	KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
429 
430 	pgid = fgetown(&head->so_sigio);
431 	if (pgid != 0)
432 		fsetown(pgid, &so->so_sigio);
433 
434 	FILE_LOCK(nfp);
435 	nfp->f_data = so;	/* nfp has ref count from falloc */
436 	nfp->f_flag = fflag;
437 	nfp->f_type = DTYPE_SOCKET;
438 	nfp->f_ops = &socketops;
439 	FILE_UNLOCK(nfp);
440 	/* Sync socket nonblocking/async state with file flags */
441 	tmp = fflag & FNONBLOCK;
442 	(void) fo_ioctl(nfp, FIONBIO, &tmp, td->td_ucred, td);
443 	tmp = fflag & FASYNC;
444 	(void) fo_ioctl(nfp, FIOASYNC, &tmp, td->td_ucred, td);
445 	sa = 0;
446 	error = soaccept(so, &sa);
447 	if (error) {
448 		/*
449 		 * return a namelen of zero for older code which might
450 		 * ignore the return value from accept.
451 		 */
452 		if (name)
453 			*namelen = 0;
454 		goto noconnection;
455 	}
456 	if (sa == NULL) {
457 		if (name)
458 			*namelen = 0;
459 		goto done;
460 	}
461 	if (name) {
462 		/* check sa_len before it is destroyed */
463 		if (*namelen > sa->sa_len)
464 			*namelen = sa->sa_len;
465 		*name = sa;
466 		sa = NULL;
467 	}
468 noconnection:
469 	if (sa)
470 		FREE(sa, M_SONAME);
471 
472 	/*
473 	 * close the new descriptor, assuming someone hasn't ripped it
474 	 * out from under us.
475 	 */
476 	if (error)
477 		fdclose(fdp, nfp, fd, td);
478 
479 	/*
480 	 * Release explicitly held references before returning.  We return
481 	 * a reference on nfp to the caller on success if they request it.
482 	 */
483 done:
484 	if (fp != NULL) {
485 		if (error == 0) {
486 			*fp = nfp;
487 			nfp = NULL;
488 		} else
489 			*fp = NULL;
490 	}
491 	if (nfp != NULL)
492 		fdrop(nfp, td);
493 	fdrop(headfp, td);
494 done2:
495 	NET_UNLOCK_GIANT();
496 	return (error);
497 }
498 
499 int
500 accept(td, uap)
501 	struct thread *td;
502 	struct accept_args *uap;
503 {
504 
505 	return (accept1(td, uap, 0));
506 }
507 
508 #ifdef COMPAT_OLDSOCK
509 int
510 oaccept(td, uap)
511 	struct thread *td;
512 	struct accept_args *uap;
513 {
514 
515 	return (accept1(td, uap, 1));
516 }
517 #endif /* COMPAT_OLDSOCK */
518 
519 /* ARGSUSED */
520 int
521 connect(td, uap)
522 	struct thread *td;
523 	struct connect_args /* {
524 		int	s;
525 		caddr_t	name;
526 		int	namelen;
527 	} */ *uap;
528 {
529 	struct sockaddr *sa;
530 	int error;
531 
532 	error = getsockaddr(&sa, uap->name, uap->namelen);
533 	if (error)
534 		return (error);
535 
536 	error = kern_connect(td, uap->s, sa);
537 	free(sa, M_SONAME);
538 	return (error);
539 }
540 
541 
542 int
543 kern_connect(td, fd, sa)
544 	struct thread *td;
545 	int fd;
546 	struct sockaddr *sa;
547 {
548 	struct socket *so;
549 	struct file *fp;
550 	int error;
551 	int interrupted = 0;
552 
553 	NET_LOCK_GIANT();
554 	error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
555 	if (error)
556 		goto done2;
557 	so = fp->f_data;
558 	if (so->so_state & SS_ISCONNECTING) {
559 		error = EALREADY;
560 		goto done1;
561 	}
562 #ifdef MAC
563 	SOCK_LOCK(so);
564 	error = mac_check_socket_connect(td->td_ucred, so, sa);
565 	SOCK_UNLOCK(so);
566 	if (error)
567 		goto bad;
568 #endif
569 	error = soconnect(so, sa, td);
570 	if (error)
571 		goto bad;
572 	if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
573 		error = EINPROGRESS;
574 		goto done1;
575 	}
576 	SOCK_LOCK(so);
577 	while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
578 		error = msleep(&so->so_timeo, SOCK_MTX(so), PSOCK | PCATCH,
579 		    "connec", 0);
580 		if (error) {
581 			if (error == EINTR || error == ERESTART)
582 				interrupted = 1;
583 			break;
584 		}
585 	}
586 	if (error == 0) {
587 		error = so->so_error;
588 		so->so_error = 0;
589 	}
590 	SOCK_UNLOCK(so);
591 bad:
592 	if (!interrupted)
593 		so->so_state &= ~SS_ISCONNECTING;
594 	if (error == ERESTART)
595 		error = EINTR;
596 done1:
597 	fdrop(fp, td);
598 done2:
599 	NET_UNLOCK_GIANT();
600 	return (error);
601 }
602 
603 int
604 socketpair(td, uap)
605 	struct thread *td;
606 	struct socketpair_args /* {
607 		int	domain;
608 		int	type;
609 		int	protocol;
610 		int	*rsv;
611 	} */ *uap;
612 {
613 	struct filedesc *fdp = td->td_proc->p_fd;
614 	struct file *fp1, *fp2;
615 	struct socket *so1, *so2;
616 	int fd, error, sv[2];
617 
618 #ifdef MAC
619 	/* We might want to have a separate check for socket pairs. */
620 	error = mac_check_socket_create(td->td_ucred, uap->domain, uap->type,
621 	    uap->protocol);
622 	if (error)
623 		return (error);
624 #endif
625 
626 	NET_LOCK_GIANT();
627 	error = socreate(uap->domain, &so1, uap->type, uap->protocol,
628 	    td->td_ucred, td);
629 	if (error)
630 		goto done2;
631 	error = socreate(uap->domain, &so2, uap->type, uap->protocol,
632 	    td->td_ucred, td);
633 	if (error)
634 		goto free1;
635 	/* On success extra reference to `fp1' and 'fp2' is set by falloc. */
636 	error = falloc(td, &fp1, &fd);
637 	if (error)
638 		goto free2;
639 	sv[0] = fd;
640 	fp1->f_data = so1;	/* so1 already has ref count */
641 	error = falloc(td, &fp2, &fd);
642 	if (error)
643 		goto free3;
644 	fp2->f_data = so2;	/* so2 already has ref count */
645 	sv[1] = fd;
646 	error = soconnect2(so1, so2);
647 	if (error)
648 		goto free4;
649 	if (uap->type == SOCK_DGRAM) {
650 		/*
651 		 * Datagram socket connection is asymmetric.
652 		 */
653 		 error = soconnect2(so2, so1);
654 		 if (error)
655 			goto free4;
656 	}
657 	FILE_LOCK(fp1);
658 	fp1->f_flag = FREAD|FWRITE;
659 	fp1->f_type = DTYPE_SOCKET;
660 	fp1->f_ops = &socketops;
661 	FILE_UNLOCK(fp1);
662 	FILE_LOCK(fp2);
663 	fp2->f_flag = FREAD|FWRITE;
664 	fp2->f_type = DTYPE_SOCKET;
665 	fp2->f_ops = &socketops;
666 	FILE_UNLOCK(fp2);
667 	so1 = so2 = NULL;
668 	error = copyout(sv, uap->rsv, 2 * sizeof (int));
669 	if (error)
670 		goto free4;
671 	fdrop(fp1, td);
672 	fdrop(fp2, td);
673 	goto done2;
674 free4:
675 	fdclose(fdp, fp2, sv[1], td);
676 	fdrop(fp2, td);
677 free3:
678 	fdclose(fdp, fp1, sv[0], td);
679 	fdrop(fp1, td);
680 free2:
681 	if (so2 != NULL)
682 		(void)soclose(so2);
683 free1:
684 	if (so1 != NULL)
685 		(void)soclose(so1);
686 done2:
687 	NET_UNLOCK_GIANT();
688 	return (error);
689 }
690 
691 static int
692 sendit(td, s, mp, flags)
693 	struct thread *td;
694 	int s;
695 	struct msghdr *mp;
696 	int flags;
697 {
698 	struct mbuf *control;
699 	struct sockaddr *to;
700 	int error;
701 
702 	if (mp->msg_name != NULL) {
703 		error = getsockaddr(&to, mp->msg_name, mp->msg_namelen);
704 		if (error) {
705 			to = NULL;
706 			goto bad;
707 		}
708 		mp->msg_name = to;
709 	} else {
710 		to = NULL;
711 	}
712 
713 	if (mp->msg_control) {
714 		if (mp->msg_controllen < sizeof(struct cmsghdr)
715 #ifdef COMPAT_OLDSOCK
716 		    && mp->msg_flags != MSG_COMPAT
717 #endif
718 		) {
719 			error = EINVAL;
720 			goto bad;
721 		}
722 		error = sockargs(&control, mp->msg_control,
723 		    mp->msg_controllen, MT_CONTROL);
724 		if (error)
725 			goto bad;
726 #ifdef COMPAT_OLDSOCK
727 		if (mp->msg_flags == MSG_COMPAT) {
728 			struct cmsghdr *cm;
729 
730 			M_PREPEND(control, sizeof(*cm), M_TRYWAIT);
731 			if (control == 0) {
732 				error = ENOBUFS;
733 				goto bad;
734 			} else {
735 				cm = mtod(control, struct cmsghdr *);
736 				cm->cmsg_len = control->m_len;
737 				cm->cmsg_level = SOL_SOCKET;
738 				cm->cmsg_type = SCM_RIGHTS;
739 			}
740 		}
741 #endif
742 	} else {
743 		control = NULL;
744 	}
745 
746 	error = kern_sendit(td, s, mp, flags, control, UIO_USERSPACE);
747 
748 bad:
749 	if (to)
750 		FREE(to, M_SONAME);
751 	return (error);
752 }
753 
754 int
755 kern_sendit(td, s, mp, flags, control, segflg)
756 	struct thread *td;
757 	int s;
758 	struct msghdr *mp;
759 	int flags;
760 	struct mbuf *control;
761 	enum uio_seg segflg;
762 {
763 	struct file *fp;
764 	struct uio auio;
765 	struct iovec *iov;
766 	struct socket *so;
767 	int i;
768 	int len, error;
769 #ifdef KTRACE
770 	struct uio *ktruio = NULL;
771 #endif
772 
773 	NET_LOCK_GIANT();
774 	error = getsock(td->td_proc->p_fd, s, &fp, NULL);
775 	if (error)
776 		goto bad2;
777 	so = (struct socket *)fp->f_data;
778 
779 #ifdef MAC
780 	SOCK_LOCK(so);
781 	error = mac_check_socket_send(td->td_ucred, so);
782 	SOCK_UNLOCK(so);
783 	if (error)
784 		goto bad;
785 #endif
786 
787 	auio.uio_iov = mp->msg_iov;
788 	auio.uio_iovcnt = mp->msg_iovlen;
789 	auio.uio_segflg = segflg;
790 	auio.uio_rw = UIO_WRITE;
791 	auio.uio_td = td;
792 	auio.uio_offset = 0;			/* XXX */
793 	auio.uio_resid = 0;
794 	iov = mp->msg_iov;
795 	for (i = 0; i < mp->msg_iovlen; i++, iov++) {
796 		if ((auio.uio_resid += iov->iov_len) < 0) {
797 			error = EINVAL;
798 			goto bad;
799 		}
800 	}
801 #ifdef KTRACE
802 	if (KTRPOINT(td, KTR_GENIO))
803 		ktruio = cloneuio(&auio);
804 #endif
805 	len = auio.uio_resid;
806 	error = sosend(so, mp->msg_name, &auio, 0, control, flags, td);
807 	if (error) {
808 		if (auio.uio_resid != len && (error == ERESTART ||
809 		    error == EINTR || error == EWOULDBLOCK))
810 			error = 0;
811 		/* Generation of SIGPIPE can be controlled per socket */
812 		if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
813 		    !(flags & MSG_NOSIGNAL)) {
814 			PROC_LOCK(td->td_proc);
815 			psignal(td->td_proc, SIGPIPE);
816 			PROC_UNLOCK(td->td_proc);
817 		}
818 	}
819 	if (error == 0)
820 		td->td_retval[0] = len - auio.uio_resid;
821 #ifdef KTRACE
822 	if (ktruio != NULL) {
823 		ktruio->uio_resid = td->td_retval[0];
824 		ktrgenio(s, UIO_WRITE, ktruio, error);
825 	}
826 #endif
827 bad:
828 	fdrop(fp, td);
829 bad2:
830 	NET_UNLOCK_GIANT();
831 	return (error);
832 }
833 
834 int
835 sendto(td, uap)
836 	struct thread *td;
837 	struct sendto_args /* {
838 		int	s;
839 		caddr_t	buf;
840 		size_t	len;
841 		int	flags;
842 		caddr_t	to;
843 		int	tolen;
844 	} */ *uap;
845 {
846 	struct msghdr msg;
847 	struct iovec aiov;
848 	int error;
849 
850 	msg.msg_name = uap->to;
851 	msg.msg_namelen = uap->tolen;
852 	msg.msg_iov = &aiov;
853 	msg.msg_iovlen = 1;
854 	msg.msg_control = 0;
855 #ifdef COMPAT_OLDSOCK
856 	msg.msg_flags = 0;
857 #endif
858 	aiov.iov_base = uap->buf;
859 	aiov.iov_len = uap->len;
860 	error = sendit(td, uap->s, &msg, uap->flags);
861 	return (error);
862 }
863 
864 #ifdef COMPAT_OLDSOCK
865 int
866 osend(td, uap)
867 	struct thread *td;
868 	struct osend_args /* {
869 		int	s;
870 		caddr_t	buf;
871 		int	len;
872 		int	flags;
873 	} */ *uap;
874 {
875 	struct msghdr msg;
876 	struct iovec aiov;
877 	int error;
878 
879 	msg.msg_name = 0;
880 	msg.msg_namelen = 0;
881 	msg.msg_iov = &aiov;
882 	msg.msg_iovlen = 1;
883 	aiov.iov_base = uap->buf;
884 	aiov.iov_len = uap->len;
885 	msg.msg_control = 0;
886 	msg.msg_flags = 0;
887 	error = sendit(td, uap->s, &msg, uap->flags);
888 	return (error);
889 }
890 
891 int
892 osendmsg(td, uap)
893 	struct thread *td;
894 	struct osendmsg_args /* {
895 		int	s;
896 		caddr_t	msg;
897 		int	flags;
898 	} */ *uap;
899 {
900 	struct msghdr msg;
901 	struct iovec *iov;
902 	int error;
903 
904 	error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
905 	if (error)
906 		return (error);
907 	error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
908 	if (error)
909 		return (error);
910 	msg.msg_iov = iov;
911 	msg.msg_flags = MSG_COMPAT;
912 	error = sendit(td, uap->s, &msg, uap->flags);
913 	free(iov, M_IOV);
914 	return (error);
915 }
916 #endif
917 
918 int
919 sendmsg(td, uap)
920 	struct thread *td;
921 	struct sendmsg_args /* {
922 		int	s;
923 		caddr_t	msg;
924 		int	flags;
925 	} */ *uap;
926 {
927 	struct msghdr msg;
928 	struct iovec *iov;
929 	int error;
930 
931 	error = copyin(uap->msg, &msg, sizeof (msg));
932 	if (error)
933 		return (error);
934 	error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
935 	if (error)
936 		return (error);
937 	msg.msg_iov = iov;
938 #ifdef COMPAT_OLDSOCK
939 	msg.msg_flags = 0;
940 #endif
941 	error = sendit(td, uap->s, &msg, uap->flags);
942 	free(iov, M_IOV);
943 	return (error);
944 }
945 
946 int
947 kern_recvit(td, s, mp, fromseg, controlp)
948 	struct thread *td;
949 	int s;
950 	struct msghdr *mp;
951 	enum uio_seg fromseg;
952 	struct mbuf **controlp;
953 {
954 	struct uio auio;
955 	struct iovec *iov;
956 	int i;
957 	socklen_t len;
958 	int error;
959 	struct mbuf *m, *control = 0;
960 	caddr_t ctlbuf;
961 	struct file *fp;
962 	struct socket *so;
963 	struct sockaddr *fromsa = 0;
964 #ifdef KTRACE
965 	struct uio *ktruio = NULL;
966 #endif
967 
968 	if(controlp != NULL)
969 		*controlp = 0;
970 
971 	NET_LOCK_GIANT();
972 	error = getsock(td->td_proc->p_fd, s, &fp, NULL);
973 	if (error) {
974 		NET_UNLOCK_GIANT();
975 		return (error);
976 	}
977 	so = fp->f_data;
978 
979 #ifdef MAC
980 	SOCK_LOCK(so);
981 	error = mac_check_socket_receive(td->td_ucred, so);
982 	SOCK_UNLOCK(so);
983 	if (error) {
984 		fdrop(fp, td);
985 		NET_UNLOCK_GIANT();
986 		return (error);
987 	}
988 #endif
989 
990 	auio.uio_iov = mp->msg_iov;
991 	auio.uio_iovcnt = mp->msg_iovlen;
992 	auio.uio_segflg = UIO_USERSPACE;
993 	auio.uio_rw = UIO_READ;
994 	auio.uio_td = td;
995 	auio.uio_offset = 0;			/* XXX */
996 	auio.uio_resid = 0;
997 	iov = mp->msg_iov;
998 	for (i = 0; i < mp->msg_iovlen; i++, iov++) {
999 		if ((auio.uio_resid += iov->iov_len) < 0) {
1000 			fdrop(fp, td);
1001 			NET_UNLOCK_GIANT();
1002 			return (EINVAL);
1003 		}
1004 	}
1005 #ifdef KTRACE
1006 	if (KTRPOINT(td, KTR_GENIO))
1007 		ktruio = cloneuio(&auio);
1008 #endif
1009 	len = auio.uio_resid;
1010 	error = soreceive(so, &fromsa, &auio, (struct mbuf **)0,
1011 	    (mp->msg_control || controlp) ? &control : (struct mbuf **)0,
1012 	    &mp->msg_flags);
1013 	if (error) {
1014 		if (auio.uio_resid != (int)len && (error == ERESTART ||
1015 		    error == EINTR || error == EWOULDBLOCK))
1016 			error = 0;
1017 	}
1018 #ifdef KTRACE
1019 	if (ktruio != NULL) {
1020 		ktruio->uio_resid = (int)len - auio.uio_resid;
1021 		ktrgenio(s, UIO_READ, ktruio, error);
1022 	}
1023 #endif
1024 	if (error)
1025 		goto out;
1026 	td->td_retval[0] = (int)len - auio.uio_resid;
1027 	if (mp->msg_name) {
1028 		len = mp->msg_namelen;
1029 		if (len <= 0 || fromsa == 0)
1030 			len = 0;
1031 		else {
1032 			/* save sa_len before it is destroyed by MSG_COMPAT */
1033 			len = MIN(len, fromsa->sa_len);
1034 #ifdef COMPAT_OLDSOCK
1035 			if (mp->msg_flags & MSG_COMPAT)
1036 				((struct osockaddr *)fromsa)->sa_family =
1037 				    fromsa->sa_family;
1038 #endif
1039 			if (fromseg == UIO_USERSPACE) {
1040 				error = copyout(fromsa, mp->msg_name,
1041 				    (unsigned)len);
1042 				if (error)
1043 					goto out;
1044 			} else
1045 				bcopy(fromsa, mp->msg_name, len);
1046 		}
1047 		mp->msg_namelen = len;
1048 	}
1049 	if (mp->msg_control && controlp == NULL) {
1050 #ifdef COMPAT_OLDSOCK
1051 		/*
1052 		 * We assume that old recvmsg calls won't receive access
1053 		 * rights and other control info, esp. as control info
1054 		 * is always optional and those options didn't exist in 4.3.
1055 		 * If we receive rights, trim the cmsghdr; anything else
1056 		 * is tossed.
1057 		 */
1058 		if (control && mp->msg_flags & MSG_COMPAT) {
1059 			if (mtod(control, struct cmsghdr *)->cmsg_level !=
1060 			    SOL_SOCKET ||
1061 			    mtod(control, struct cmsghdr *)->cmsg_type !=
1062 			    SCM_RIGHTS) {
1063 				mp->msg_controllen = 0;
1064 				goto out;
1065 			}
1066 			control->m_len -= sizeof (struct cmsghdr);
1067 			control->m_data += sizeof (struct cmsghdr);
1068 		}
1069 #endif
1070 		len = mp->msg_controllen;
1071 		m = control;
1072 		mp->msg_controllen = 0;
1073 		ctlbuf = mp->msg_control;
1074 
1075 		while (m && len > 0) {
1076 			unsigned int tocopy;
1077 
1078 			if (len >= m->m_len)
1079 				tocopy = m->m_len;
1080 			else {
1081 				mp->msg_flags |= MSG_CTRUNC;
1082 				tocopy = len;
1083 			}
1084 
1085 			if ((error = copyout(mtod(m, caddr_t),
1086 					ctlbuf, tocopy)) != 0)
1087 				goto out;
1088 
1089 			ctlbuf += tocopy;
1090 			len -= tocopy;
1091 			m = m->m_next;
1092 		}
1093 		mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control;
1094 	}
1095 out:
1096 	fdrop(fp, td);
1097 	NET_UNLOCK_GIANT();
1098 	if (fromsa)
1099 		FREE(fromsa, M_SONAME);
1100 
1101 	if (error == 0 && controlp != NULL)
1102 		*controlp = control;
1103 	else  if (control)
1104 		m_freem(control);
1105 
1106 	return (error);
1107 }
1108 
1109 static int
1110 recvit(td, s, mp, namelenp)
1111 	struct thread *td;
1112 	int s;
1113 	struct msghdr *mp;
1114 	void *namelenp;
1115 {
1116 	int error;
1117 
1118 	error = kern_recvit(td, s, mp, UIO_USERSPACE, NULL);
1119 	if (error)
1120 		return (error);
1121 	if (namelenp) {
1122 		error = copyout(&mp->msg_namelen, namelenp, sizeof (socklen_t));
1123 #ifdef COMPAT_OLDSOCK
1124 		if (mp->msg_flags & MSG_COMPAT)
1125 			error = 0;	/* old recvfrom didn't check */
1126 #endif
1127 	}
1128 	return (error);
1129 }
1130 
1131 int
1132 recvfrom(td, uap)
1133 	struct thread *td;
1134 	struct recvfrom_args /* {
1135 		int	s;
1136 		caddr_t	buf;
1137 		size_t	len;
1138 		int	flags;
1139 		struct sockaddr * __restrict	from;
1140 		socklen_t * __restrict fromlenaddr;
1141 	} */ *uap;
1142 {
1143 	struct msghdr msg;
1144 	struct iovec aiov;
1145 	int error;
1146 
1147 	if (uap->fromlenaddr) {
1148 		error = copyin(uap->fromlenaddr,
1149 		    &msg.msg_namelen, sizeof (msg.msg_namelen));
1150 		if (error)
1151 			goto done2;
1152 	} else {
1153 		msg.msg_namelen = 0;
1154 	}
1155 	msg.msg_name = uap->from;
1156 	msg.msg_iov = &aiov;
1157 	msg.msg_iovlen = 1;
1158 	aiov.iov_base = uap->buf;
1159 	aiov.iov_len = uap->len;
1160 	msg.msg_control = 0;
1161 	msg.msg_flags = uap->flags;
1162 	error = recvit(td, uap->s, &msg, uap->fromlenaddr);
1163 done2:
1164 	return(error);
1165 }
1166 
1167 #ifdef COMPAT_OLDSOCK
1168 int
1169 orecvfrom(td, uap)
1170 	struct thread *td;
1171 	struct recvfrom_args *uap;
1172 {
1173 
1174 	uap->flags |= MSG_COMPAT;
1175 	return (recvfrom(td, uap));
1176 }
1177 #endif
1178 
1179 #ifdef COMPAT_OLDSOCK
1180 int
1181 orecv(td, uap)
1182 	struct thread *td;
1183 	struct orecv_args /* {
1184 		int	s;
1185 		caddr_t	buf;
1186 		int	len;
1187 		int	flags;
1188 	} */ *uap;
1189 {
1190 	struct msghdr msg;
1191 	struct iovec aiov;
1192 	int error;
1193 
1194 	msg.msg_name = 0;
1195 	msg.msg_namelen = 0;
1196 	msg.msg_iov = &aiov;
1197 	msg.msg_iovlen = 1;
1198 	aiov.iov_base = uap->buf;
1199 	aiov.iov_len = uap->len;
1200 	msg.msg_control = 0;
1201 	msg.msg_flags = uap->flags;
1202 	error = recvit(td, uap->s, &msg, NULL);
1203 	return (error);
1204 }
1205 
1206 /*
1207  * Old recvmsg.  This code takes advantage of the fact that the old msghdr
1208  * overlays the new one, missing only the flags, and with the (old) access
1209  * rights where the control fields are now.
1210  */
1211 int
1212 orecvmsg(td, uap)
1213 	struct thread *td;
1214 	struct orecvmsg_args /* {
1215 		int	s;
1216 		struct	omsghdr *msg;
1217 		int	flags;
1218 	} */ *uap;
1219 {
1220 	struct msghdr msg;
1221 	struct iovec *iov;
1222 	int error;
1223 
1224 	error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
1225 	if (error)
1226 		return (error);
1227 	error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1228 	if (error)
1229 		return (error);
1230 	msg.msg_flags = uap->flags | MSG_COMPAT;
1231 	msg.msg_iov = iov;
1232 	error = recvit(td, uap->s, &msg, &uap->msg->msg_namelen);
1233 	if (msg.msg_controllen && error == 0)
1234 		error = copyout(&msg.msg_controllen,
1235 		    &uap->msg->msg_accrightslen, sizeof (int));
1236 	free(iov, M_IOV);
1237 	return (error);
1238 }
1239 #endif
1240 
1241 int
1242 recvmsg(td, uap)
1243 	struct thread *td;
1244 	struct recvmsg_args /* {
1245 		int	s;
1246 		struct	msghdr *msg;
1247 		int	flags;
1248 	} */ *uap;
1249 {
1250 	struct msghdr msg;
1251 	struct iovec *uiov, *iov;
1252 	int error;
1253 
1254 	error = copyin(uap->msg, &msg, sizeof (msg));
1255 	if (error)
1256 		return (error);
1257 	error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
1258 	if (error)
1259 		return (error);
1260 	msg.msg_flags = uap->flags;
1261 #ifdef COMPAT_OLDSOCK
1262 	msg.msg_flags &= ~MSG_COMPAT;
1263 #endif
1264 	uiov = msg.msg_iov;
1265 	msg.msg_iov = iov;
1266 	error = recvit(td, uap->s, &msg, NULL);
1267 	if (error == 0) {
1268 		msg.msg_iov = uiov;
1269 		error = copyout(&msg, uap->msg, sizeof(msg));
1270 	}
1271 	free(iov, M_IOV);
1272 	return (error);
1273 }
1274 
1275 /* ARGSUSED */
1276 int
1277 shutdown(td, uap)
1278 	struct thread *td;
1279 	struct shutdown_args /* {
1280 		int	s;
1281 		int	how;
1282 	} */ *uap;
1283 {
1284 	struct socket *so;
1285 	struct file *fp;
1286 	int error;
1287 
1288 	NET_LOCK_GIANT();
1289 	error = getsock(td->td_proc->p_fd, uap->s, &fp, NULL);
1290 	if (error == 0) {
1291 		so = fp->f_data;
1292 		error = soshutdown(so, uap->how);
1293 		fdrop(fp, td);
1294 	}
1295 	NET_UNLOCK_GIANT();
1296 	return (error);
1297 }
1298 
1299 /* ARGSUSED */
1300 int
1301 setsockopt(td, uap)
1302 	struct thread *td;
1303 	struct setsockopt_args /* {
1304 		int	s;
1305 		int	level;
1306 		int	name;
1307 		caddr_t	val;
1308 		int	valsize;
1309 	} */ *uap;
1310 {
1311 
1312 	return (kern_setsockopt(td, uap->s, uap->level, uap->name,
1313 	    uap->val, UIO_USERSPACE, uap->valsize));
1314 }
1315 
1316 int
1317 kern_setsockopt(td, s, level, name, val, valseg, valsize)
1318 	struct thread *td;
1319 	int s;
1320 	int level;
1321 	int name;
1322 	void *val;
1323 	enum uio_seg valseg;
1324 	socklen_t valsize;
1325 {
1326 	int error;
1327 	struct socket *so;
1328 	struct file *fp;
1329 	struct sockopt sopt;
1330 
1331 	if (val == NULL && valsize != 0)
1332 		return (EFAULT);
1333 	if ((int)valsize < 0)
1334 		return (EINVAL);
1335 
1336 	sopt.sopt_dir = SOPT_SET;
1337 	sopt.sopt_level = level;
1338 	sopt.sopt_name = name;
1339 	sopt.sopt_val = val;
1340 	sopt.sopt_valsize = valsize;
1341 	switch (valseg) {
1342 	case UIO_USERSPACE:
1343 		sopt.sopt_td = td;
1344 		break;
1345 	case UIO_SYSSPACE:
1346 		sopt.sopt_td = NULL;
1347 		break;
1348 	default:
1349 		panic("kern_setsockopt called with bad valseg");
1350 	}
1351 
1352 	NET_LOCK_GIANT();
1353 	error = getsock(td->td_proc->p_fd, s, &fp, NULL);
1354 	if (error == 0) {
1355 		so = fp->f_data;
1356 		error = sosetopt(so, &sopt);
1357 		fdrop(fp, td);
1358 	}
1359 	NET_UNLOCK_GIANT();
1360 	return(error);
1361 }
1362 
1363 /* ARGSUSED */
1364 int
1365 getsockopt(td, uap)
1366 	struct thread *td;
1367 	struct getsockopt_args /* {
1368 		int	s;
1369 		int	level;
1370 		int	name;
1371 		void * __restrict	val;
1372 		socklen_t * __restrict avalsize;
1373 	} */ *uap;
1374 {
1375 	socklen_t valsize;
1376 	int	error;
1377 
1378 	if (uap->val) {
1379 		error = copyin(uap->avalsize, &valsize, sizeof (valsize));
1380 		if (error)
1381 			return (error);
1382 	}
1383 
1384 	error = kern_getsockopt(td, uap->s, uap->level, uap->name,
1385 	    uap->val, UIO_USERSPACE, &valsize);
1386 
1387 	if (error == 0)
1388 		error = copyout(&valsize, uap->avalsize, sizeof (valsize));
1389 	return (error);
1390 }
1391 
1392 /*
1393  * Kernel version of getsockopt.
1394  * optval can be a userland or userspace. optlen is always a kernel pointer.
1395  */
1396 int
1397 kern_getsockopt(td, s, level, name, val, valseg, valsize)
1398 	struct thread *td;
1399 	int s;
1400 	int level;
1401 	int name;
1402 	void *val;
1403 	enum uio_seg valseg;
1404 	socklen_t *valsize;
1405 {
1406 	int error;
1407 	struct  socket *so;
1408 	struct file *fp;
1409 	struct	sockopt sopt;
1410 
1411 	if (val == NULL)
1412 		*valsize = 0;
1413 	if ((int)*valsize < 0)
1414 		return (EINVAL);
1415 
1416 	sopt.sopt_dir = SOPT_GET;
1417 	sopt.sopt_level = level;
1418 	sopt.sopt_name = name;
1419 	sopt.sopt_val = val;
1420 	sopt.sopt_valsize = (size_t)*valsize; /* checked non-negative above */
1421 	switch (valseg) {
1422 	case UIO_USERSPACE:
1423 		sopt.sopt_td = td;
1424 		break;
1425 	case UIO_SYSSPACE:
1426 		sopt.sopt_td = NULL;
1427 		break;
1428 	default:
1429 		panic("kern_getsockopt called with bad valseg");
1430 	}
1431 
1432 	NET_LOCK_GIANT();
1433 	error = getsock(td->td_proc->p_fd, s, &fp, NULL);
1434 	if (error == 0) {
1435 		so = fp->f_data;
1436 		error = sogetopt(so, &sopt);
1437 		*valsize = sopt.sopt_valsize;
1438 		fdrop(fp, td);
1439 	}
1440 	NET_UNLOCK_GIANT();
1441 	return (error);
1442 }
1443 
1444 /*
1445  * getsockname1() - Get socket name.
1446  */
1447 /* ARGSUSED */
1448 static int
1449 getsockname1(td, uap, compat)
1450 	struct thread *td;
1451 	struct getsockname_args /* {
1452 		int	fdes;
1453 		struct sockaddr * __restrict asa;
1454 		socklen_t * __restrict alen;
1455 	} */ *uap;
1456 	int compat;
1457 {
1458 	struct sockaddr *sa;
1459 	socklen_t len;
1460 	int error;
1461 
1462 	error = copyin(uap->alen, &len, sizeof(len));
1463 	if (error)
1464 		return (error);
1465 
1466 	error = kern_getsockname(td, uap->fdes, &sa, &len);
1467 	if (error)
1468 		return (error);
1469 
1470 	if (len != 0) {
1471 #ifdef COMPAT_OLDSOCK
1472 		if (compat)
1473 			((struct osockaddr *)sa)->sa_family = sa->sa_family;
1474 #endif
1475 		error = copyout(sa, uap->asa, (u_int)len);
1476 	}
1477 	free(sa, M_SONAME);
1478 	if (error == 0)
1479 		error = copyout(&len, uap->alen, sizeof(len));
1480 	return (error);
1481 }
1482 
1483 int
1484 kern_getsockname(struct thread *td, int fd, struct sockaddr **sa,
1485     socklen_t *alen)
1486 {
1487 	struct socket *so;
1488 	struct file *fp;
1489 	socklen_t len;
1490 	int error;
1491 
1492 	if (*alen < 0)
1493 		return (EINVAL);
1494 
1495 	NET_LOCK_GIANT();
1496 	error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
1497 	if (error)
1498 		goto done;
1499 	so = fp->f_data;
1500 	*sa = NULL;
1501 	error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, sa);
1502 	if (error)
1503 		goto bad;
1504 	if (*sa == NULL)
1505 		len = 0;
1506 	else
1507 		len = MIN(*alen, (*sa)->sa_len);
1508 	*alen = len;
1509 bad:
1510 	fdrop(fp, td);
1511 	if (error && *sa) {
1512 		free(*sa, M_SONAME);
1513 		*sa = NULL;
1514 	}
1515 done:
1516 	NET_UNLOCK_GIANT();
1517 	return (error);
1518 }
1519 
1520 int
1521 getsockname(td, uap)
1522 	struct thread *td;
1523 	struct getsockname_args *uap;
1524 {
1525 
1526 	return (getsockname1(td, uap, 0));
1527 }
1528 
1529 #ifdef COMPAT_OLDSOCK
1530 int
1531 ogetsockname(td, uap)
1532 	struct thread *td;
1533 	struct getsockname_args *uap;
1534 {
1535 
1536 	return (getsockname1(td, uap, 1));
1537 }
1538 #endif /* COMPAT_OLDSOCK */
1539 
1540 /*
1541  * getpeername1() - Get name of peer for connected socket.
1542  */
1543 /* ARGSUSED */
1544 static int
1545 getpeername1(td, uap, compat)
1546 	struct thread *td;
1547 	struct getpeername_args /* {
1548 		int	fdes;
1549 		struct sockaddr * __restrict	asa;
1550 		socklen_t * __restrict	alen;
1551 	} */ *uap;
1552 	int compat;
1553 {
1554 	struct sockaddr *sa;
1555 	socklen_t len;
1556 	int error;
1557 
1558 	error = copyin(uap->alen, &len, sizeof (len));
1559 	if (error)
1560 		return (error);
1561 
1562 	error = kern_getpeername(td, uap->fdes, &sa, &len);
1563 	if (error)
1564 		return (error);
1565 
1566 	if (len != 0) {
1567 #ifdef COMPAT_OLDSOCK
1568 		if (compat)
1569 			((struct osockaddr *)sa)->sa_family = sa->sa_family;
1570 #endif
1571 		error = copyout(sa, uap->asa, (u_int)len);
1572 	}
1573 	free(sa, M_SONAME);
1574 	if (error == 0)
1575 		error = copyout(&len, uap->alen, sizeof(len));
1576 	return (error);
1577 }
1578 
1579 int
1580 kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
1581     socklen_t *alen)
1582 {
1583 	struct socket *so;
1584 	struct file *fp;
1585 	socklen_t len;
1586 	int error;
1587 
1588 	if (*alen < 0)
1589 		return (EINVAL);
1590 
1591 	NET_LOCK_GIANT();
1592 	error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
1593 	if (error)
1594 		goto done2;
1595 	so = fp->f_data;
1596 	if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1597 		error = ENOTCONN;
1598 		goto done1;
1599 	}
1600 	*sa = NULL;
1601 	error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, sa);
1602 	if (error)
1603 		goto bad;
1604 	if (*sa == NULL)
1605 		len = 0;
1606 	else
1607 		len = MIN(*alen, (*sa)->sa_len);
1608 	*alen = len;
1609 bad:
1610 	if (error && *sa) {
1611 		free(*sa, M_SONAME);
1612 		*sa = NULL;
1613 	}
1614 done1:
1615 	fdrop(fp, td);
1616 done2:
1617 	NET_UNLOCK_GIANT();
1618 	return (error);
1619 }
1620 
1621 int
1622 getpeername(td, uap)
1623 	struct thread *td;
1624 	struct getpeername_args *uap;
1625 {
1626 
1627 	return (getpeername1(td, uap, 0));
1628 }
1629 
1630 #ifdef COMPAT_OLDSOCK
1631 int
1632 ogetpeername(td, uap)
1633 	struct thread *td;
1634 	struct ogetpeername_args *uap;
1635 {
1636 
1637 	/* XXX uap should have type `getpeername_args *' to begin with. */
1638 	return (getpeername1(td, (struct getpeername_args *)uap, 1));
1639 }
1640 #endif /* COMPAT_OLDSOCK */
1641 
1642 int
1643 sockargs(mp, buf, buflen, type)
1644 	struct mbuf **mp;
1645 	caddr_t buf;
1646 	int buflen, type;
1647 {
1648 	struct sockaddr *sa;
1649 	struct mbuf *m;
1650 	int error;
1651 
1652 	if ((u_int)buflen > MLEN) {
1653 #ifdef COMPAT_OLDSOCK
1654 		if (type == MT_SONAME && (u_int)buflen <= 112)
1655 			buflen = MLEN;		/* unix domain compat. hack */
1656 		else
1657 #endif
1658 			if ((u_int)buflen > MCLBYTES)
1659 				return (EINVAL);
1660 	}
1661 	m = m_get(M_TRYWAIT, type);
1662 	if (m == NULL)
1663 		return (ENOBUFS);
1664 	if ((u_int)buflen > MLEN) {
1665 		MCLGET(m, M_TRYWAIT);
1666 		if ((m->m_flags & M_EXT) == 0) {
1667 			m_free(m);
1668 			return (ENOBUFS);
1669 		}
1670 	}
1671 	m->m_len = buflen;
1672 	error = copyin(buf, mtod(m, caddr_t), (u_int)buflen);
1673 	if (error)
1674 		(void) m_free(m);
1675 	else {
1676 		*mp = m;
1677 		if (type == MT_SONAME) {
1678 			sa = mtod(m, struct sockaddr *);
1679 
1680 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1681 			if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1682 				sa->sa_family = sa->sa_len;
1683 #endif
1684 			sa->sa_len = buflen;
1685 		}
1686 	}
1687 	return (error);
1688 }
1689 
1690 int
1691 getsockaddr(namp, uaddr, len)
1692 	struct sockaddr **namp;
1693 	caddr_t uaddr;
1694 	size_t len;
1695 {
1696 	struct sockaddr *sa;
1697 	int error;
1698 
1699 	if (len > SOCK_MAXADDRLEN)
1700 		return (ENAMETOOLONG);
1701 	if (len < offsetof(struct sockaddr, sa_data[0]))
1702 		return (EINVAL);
1703 	MALLOC(sa, struct sockaddr *, len, M_SONAME, M_WAITOK);
1704 	error = copyin(uaddr, sa, len);
1705 	if (error) {
1706 		FREE(sa, M_SONAME);
1707 	} else {
1708 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1709 		if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1710 			sa->sa_family = sa->sa_len;
1711 #endif
1712 		sa->sa_len = len;
1713 		*namp = sa;
1714 	}
1715 	return (error);
1716 }
1717 
1718 /*
1719  * Detach mapped page and release resources back to the system.
1720  */
1721 void
1722 sf_buf_mext(void *addr, void *args)
1723 {
1724 	vm_page_t m;
1725 
1726 	m = sf_buf_page(args);
1727 	sf_buf_free(args);
1728 	vm_page_lock_queues();
1729 	vm_page_unwire(m, 0);
1730 	/*
1731 	 * Check for the object going away on us. This can
1732 	 * happen since we don't hold a reference to it.
1733 	 * If so, we're responsible for freeing the page.
1734 	 */
1735 	if (m->wire_count == 0 && m->object == NULL)
1736 		vm_page_free(m);
1737 	vm_page_unlock_queues();
1738 }
1739 
1740 /*
1741  * sendfile(2)
1742  *
1743  * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1744  *	 struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1745  *
1746  * Send a file specified by 'fd' and starting at 'offset' to a socket
1747  * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes ==
1748  * 0.  Optionally add a header and/or trailer to the socket output.  If
1749  * specified, write the total number of bytes sent into *sbytes.
1750  */
1751 int
1752 sendfile(struct thread *td, struct sendfile_args *uap)
1753 {
1754 
1755 	return (do_sendfile(td, uap, 0));
1756 }
1757 
1758 static int
1759 do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
1760 {
1761 	struct sf_hdtr hdtr;
1762 	struct uio *hdr_uio, *trl_uio;
1763 	int error;
1764 
1765 	hdr_uio = trl_uio = NULL;
1766 
1767 	if (uap->hdtr != NULL) {
1768 		error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1769 		if (error)
1770 			goto out;
1771 		if (hdtr.headers != NULL) {
1772 			error = copyinuio(hdtr.headers, hdtr.hdr_cnt, &hdr_uio);
1773 			if (error)
1774 				goto out;
1775 		}
1776 		if (hdtr.trailers != NULL) {
1777 			error = copyinuio(hdtr.trailers, hdtr.trl_cnt, &trl_uio);
1778 			if (error)
1779 				goto out;
1780 
1781 		}
1782 	}
1783 
1784 	error = kern_sendfile(td, uap, hdr_uio, trl_uio, compat);
1785 out:
1786 	if (hdr_uio)
1787 		free(hdr_uio, M_IOV);
1788 	if (trl_uio)
1789 		free(trl_uio, M_IOV);
1790 	return (error);
1791 }
1792 
1793 #ifdef COMPAT_FREEBSD4
1794 int
1795 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap)
1796 {
1797 	struct sendfile_args args;
1798 
1799 	args.fd = uap->fd;
1800 	args.s = uap->s;
1801 	args.offset = uap->offset;
1802 	args.nbytes = uap->nbytes;
1803 	args.hdtr = uap->hdtr;
1804 	args.sbytes = uap->sbytes;
1805 	args.flags = uap->flags;
1806 
1807 	return (do_sendfile(td, &args, 1));
1808 }
1809 #endif /* COMPAT_FREEBSD4 */
1810 
1811 int
1812 kern_sendfile(struct thread *td, struct sendfile_args *uap,
1813     struct uio *hdr_uio, struct uio *trl_uio, int compat)
1814 {
1815 	struct file *sock_fp;
1816 	struct vnode *vp;
1817 	struct vm_object *obj = NULL;
1818 	struct socket *so = NULL;
1819 	struct mbuf *m = NULL;
1820 	struct sf_buf *sf;
1821 	struct vm_page *pg;
1822 	off_t off, xfsize, fsbytes = 0, sbytes = 0, rem = 0;
1823 	int error, hdrlen = 0, mnw = 0;
1824 	int vfslocked;
1825 
1826 	NET_LOCK_GIANT();
1827 
1828 	/*
1829 	 * The file descriptor must be a regular file and have a
1830 	 * backing VM object.
1831 	 * File offset must be positive.  If it goes beyond EOF
1832 	 * we send only the header/trailer and no payload data.
1833 	 */
1834 	if ((error = fgetvp_read(td, uap->fd, &vp)) != 0)
1835 		goto out;
1836 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1837 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1838 	obj = vp->v_object;
1839 	if (obj != NULL) {
1840 		/*
1841 		 * Temporarily increase the backing VM object's reference
1842 		 * count so that a forced reclamation of its vnode does not
1843 		 * immediately destroy it.
1844 		 */
1845 		VM_OBJECT_LOCK(obj);
1846 		if ((obj->flags & OBJ_DEAD) == 0) {
1847 			vm_object_reference_locked(obj);
1848 			VM_OBJECT_UNLOCK(obj);
1849 		} else {
1850 			VM_OBJECT_UNLOCK(obj);
1851 			obj = NULL;
1852 		}
1853 	}
1854 	VOP_UNLOCK(vp, 0, td);
1855 	VFS_UNLOCK_GIANT(vfslocked);
1856 	if (obj == NULL) {
1857 		error = EINVAL;
1858 		goto out;
1859 	}
1860 	if (uap->offset < 0) {
1861 		error = EINVAL;
1862 		goto out;
1863 	}
1864 
1865 	/*
1866 	 * The socket must be a stream socket and connected.
1867 	 * Remember if it a blocking or non-blocking socket.
1868 	 */
1869 	if ((error = getsock(td->td_proc->p_fd, uap->s, &sock_fp,
1870 	    NULL)) != 0)
1871 		goto out;
1872 	so = sock_fp->f_data;
1873 	if (so->so_type != SOCK_STREAM) {
1874 		error = EINVAL;
1875 		goto out;
1876 	}
1877 	if ((so->so_state & SS_ISCONNECTED) == 0) {
1878 		error = ENOTCONN;
1879 		goto out;
1880 	}
1881 	/*
1882 	 * Do not wait on memory allocations but return ENOMEM for
1883 	 * caller to retry later.
1884 	 * XXX: Experimental.
1885 	 */
1886 	if (uap->flags & SF_MNOWAIT)
1887 		mnw = 1;
1888 
1889 #ifdef MAC
1890 	SOCK_LOCK(so);
1891 	error = mac_check_socket_send(td->td_ucred, so);
1892 	SOCK_UNLOCK(so);
1893 	if (error)
1894 		goto out;
1895 #endif
1896 
1897 	/* If headers are specified copy them into mbufs. */
1898 	if (hdr_uio != NULL) {
1899 		hdr_uio->uio_td = td;
1900 		hdr_uio->uio_rw = UIO_WRITE;
1901 		if (hdr_uio->uio_resid > 0) {
1902 			/*
1903 			 * In FBSD < 5.0 the nbytes to send also included
1904 			 * the header.  If compat is specified subtract the
1905 			 * header size from nbytes.
1906 			 */
1907 			if (compat) {
1908 				if (uap->nbytes > hdr_uio->uio_resid)
1909 					uap->nbytes -= hdr_uio->uio_resid;
1910 				else
1911 					uap->nbytes = 0;
1912 			}
1913 			m = m_uiotombuf(hdr_uio, (mnw ? M_NOWAIT : M_WAITOK),
1914 			    0, 0, 0);
1915 			if (m == NULL) {
1916 				error = mnw ? EAGAIN : ENOBUFS;
1917 				goto out;
1918 			}
1919 			hdrlen = m_length(m, NULL);
1920 		}
1921 	}
1922 
1923 	/* Protect against multiple writers to the socket. */
1924 	(void) sblock(&so->so_snd, M_WAITOK);
1925 
1926 	/*
1927 	 * Loop through the pages of the file, starting with the requested
1928 	 * offset. Get a file page (do I/O if necessary), map the file page
1929 	 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1930 	 * it on the socket.
1931 	 * This is done in two loops.  The inner loop turns as many pages
1932 	 * as it can, up to available socket buffer space, without blocking
1933 	 * into mbufs to have it bulk delivered into the socket send buffer.
1934 	 * The outer loop checks the state and available space of the socket
1935 	 * and takes care of the overall progress.
1936 	 */
1937 	for (off = uap->offset, rem = uap->nbytes; ; ) {
1938 		int loopbytes = 0;
1939 		int space = 0;
1940 		int done = 0;
1941 
1942 		/*
1943 		 * Check the socket state for ongoing connection,
1944 		 * no errors and space in socket buffer.
1945 		 * If space is low allow for the remainder of the
1946 		 * file to be processed if it fits the socket buffer.
1947 		 * Otherwise block in waiting for sufficient space
1948 		 * to proceed, or if the socket is nonblocking, return
1949 		 * to userland with EAGAIN while reporting how far
1950 		 * we've come.
1951 		 * We wait until the socket buffer has significant free
1952 		 * space to do bulk sends.  This makes good use of file
1953 		 * system read ahead and allows packet segmentation
1954 		 * offloading hardware to take over lots of work.  If
1955 		 * we were not careful here we would send off only one
1956 		 * sfbuf at a time.
1957 		 */
1958 		SOCKBUF_LOCK(&so->so_snd);
1959 		if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2)
1960 			so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2;
1961 retry_space:
1962 		if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1963 			error = EPIPE;
1964 			SOCKBUF_UNLOCK(&so->so_snd);
1965 			goto done;
1966 		} else if (so->so_error) {
1967 			error = so->so_error;
1968 			so->so_error = 0;
1969 			SOCKBUF_UNLOCK(&so->so_snd);
1970 			goto done;
1971 		}
1972 		space = sbspace(&so->so_snd);
1973 		if (space < rem &&
1974 		    (space <= 0 ||
1975 		     space < so->so_snd.sb_lowat)) {
1976 			if (so->so_state & SS_NBIO) {
1977 				SOCKBUF_UNLOCK(&so->so_snd);
1978 				error = EAGAIN;
1979 				goto done;
1980 			}
1981 			/*
1982 			 * sbwait drops the lock while sleeping.
1983 			 * When we loop back to retry_space the
1984 			 * state may have changed and we retest
1985 			 * for it.
1986 			 */
1987 			error = sbwait(&so->so_snd);
1988 			/*
1989 			 * An error from sbwait usually indicates that we've
1990 			 * been interrupted by a signal. If we've sent anything
1991 			 * then return bytes sent, otherwise return the error.
1992 			 */
1993 			if (error) {
1994 				SOCKBUF_UNLOCK(&so->so_snd);
1995 				goto done;
1996 			}
1997 			goto retry_space;
1998 		}
1999 		SOCKBUF_UNLOCK(&so->so_snd);
2000 
2001 		/*
2002 		 * Reduce space in the socket buffer by the size of
2003 		 * the header mbuf chain.
2004 		 * hdrlen is set to 0 after the first loop.
2005 		 */
2006 		space -= hdrlen;
2007 
2008 		/*
2009 		 * Loop and construct maximum sized mbuf chain to be bulk
2010 		 * dumped into socket buffer.
2011 		 */
2012 		while(space > loopbytes) {
2013 			vm_pindex_t pindex;
2014 			vm_offset_t pgoff;
2015 			struct mbuf *m0;
2016 
2017 			VM_OBJECT_LOCK(obj);
2018 			/*
2019 			 * Calculate the amount to transfer.
2020 			 * Not to exceed a page, the EOF,
2021 			 * or the passed in nbytes.
2022 			 */
2023 			pgoff = (vm_offset_t)(off & PAGE_MASK);
2024 			xfsize = omin(PAGE_SIZE - pgoff,
2025 			    obj->un_pager.vnp.vnp_size - uap->offset -
2026 			    fsbytes - loopbytes);
2027 			if (uap->nbytes)
2028 				rem = (uap->nbytes - fsbytes - loopbytes);
2029 			else
2030 				rem = obj->un_pager.vnp.vnp_size -
2031 				    uap->offset - fsbytes - loopbytes;
2032 			xfsize = omin(rem, xfsize);
2033 			if (xfsize <= 0) {
2034 				VM_OBJECT_UNLOCK(obj);
2035 				done = 1;		/* all data sent */
2036 				break;
2037 			}
2038 			/*
2039 			 * Don't overflow the send buffer.
2040 			 * Stop here and send out what we've
2041 			 * already got.
2042 			 */
2043 			if (space < loopbytes + xfsize) {
2044 				VM_OBJECT_UNLOCK(obj);
2045 				break;
2046 			}
2047 
2048 			/*
2049 			 * Attempt to look up the page.  Allocate
2050 			 * if not found or wait and loop if busy.
2051 			 */
2052 			pindex = OFF_TO_IDX(off);
2053 			pg = vm_page_grab(obj, pindex, VM_ALLOC_NOBUSY |
2054 			    VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_RETRY);
2055 
2056 			/*
2057 			 * Check if page is valid for what we need,
2058 			 * otherwise initiate I/O.
2059 			 * If we already turned some pages into mbufs,
2060 			 * send them off before we come here again and
2061 			 * block.
2062 			 */
2063 			if (pg->valid && vm_page_is_valid(pg, pgoff, xfsize))
2064 				VM_OBJECT_UNLOCK(obj);
2065 			else if (m != NULL)
2066 				error = EAGAIN;	/* send what we already got */
2067 			else if (uap->flags & SF_NODISKIO)
2068 				error = EBUSY;
2069 			else {
2070 				int bsize, resid;
2071 
2072 				/*
2073 				 * Ensure that our page is still around
2074 				 * when the I/O completes.
2075 				 */
2076 				vm_page_io_start(pg);
2077 				VM_OBJECT_UNLOCK(obj);
2078 
2079 				/*
2080 				 * Get the page from backing store.
2081 				 */
2082 				bsize = vp->v_mount->mnt_stat.f_iosize;
2083 				vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2084 				vn_lock(vp, LK_SHARED | LK_RETRY, td);
2085 
2086 				/*
2087 				 * XXXMAC: Because we don't have fp->f_cred
2088 				 * here, we pass in NOCRED.  This is probably
2089 				 * wrong, but is consistent with our original
2090 				 * implementation.
2091 				 */
2092 				error = vn_rdwr(UIO_READ, vp, NULL, MAXBSIZE,
2093 				    trunc_page(off), UIO_NOCOPY, IO_NODELOCKED |
2094 				    IO_VMIO | ((MAXBSIZE / bsize) << IO_SEQSHIFT),
2095 				    td->td_ucred, NOCRED, &resid, td);
2096 				VOP_UNLOCK(vp, 0, td);
2097 				VFS_UNLOCK_GIANT(vfslocked);
2098 				VM_OBJECT_LOCK(obj);
2099 				vm_page_io_finish(pg);
2100 				if (!error)
2101 					VM_OBJECT_UNLOCK(obj);
2102 				mbstat.sf_iocnt++;
2103 			}
2104 			if (error) {
2105 				vm_page_lock_queues();
2106 				vm_page_unwire(pg, 0);
2107 				/*
2108 				 * See if anyone else might know about
2109 				 * this page.  If not and it is not valid,
2110 				 * then free it.
2111 				 */
2112 				if (pg->wire_count == 0 && pg->valid == 0 &&
2113 				    pg->busy == 0 && !(pg->oflags & VPO_BUSY) &&
2114 				    pg->hold_count == 0) {
2115 					vm_page_free(pg);
2116 				}
2117 				vm_page_unlock_queues();
2118 				VM_OBJECT_UNLOCK(obj);
2119 				if (error == EAGAIN)
2120 					error = 0;	/* not a real error */
2121 				break;
2122 			}
2123 
2124 			/*
2125 			 * Get a sendfile buf.  We usually wait as long
2126 			 * as necessary, but this wait can be interrupted.
2127 			 */
2128 			if ((sf = sf_buf_alloc(pg,
2129 			    (mnw ? SFB_NOWAIT : SFB_CATCH))) == NULL) {
2130 				mbstat.sf_allocfail++;
2131 				vm_page_lock_queues();
2132 				vm_page_unwire(pg, 0);
2133 				/*
2134 				 * XXX: Not same check as above!?
2135 				 */
2136 				if (pg->wire_count == 0 && pg->object == NULL)
2137 					vm_page_free(pg);
2138 				vm_page_unlock_queues();
2139 				error = (mnw ? EAGAIN : EINTR);
2140 				break;
2141 			}
2142 
2143 			/*
2144 			 * Get an mbuf and set it up as having
2145 			 * external storage.
2146 			 */
2147 			m0 = m_get((mnw ? M_NOWAIT : M_WAITOK), MT_DATA);
2148 			if (m0 == NULL) {
2149 				error = (mnw ? EAGAIN : ENOBUFS);
2150 				sf_buf_mext((void *)sf_buf_kva(sf), sf);
2151 				break;
2152 			}
2153 			MEXTADD(m0, sf_buf_kva(sf), PAGE_SIZE, sf_buf_mext,
2154 			    sf, M_RDONLY, EXT_SFBUF);
2155 			m0->m_data = (char *)sf_buf_kva(sf) + pgoff;
2156 			m0->m_len = xfsize;
2157 
2158 			/* Append to mbuf chain. */
2159 			if (m != NULL)
2160 				m_cat(m, m0);
2161 			else
2162 				m = m0;
2163 
2164 			/* Keep track of bits processed. */
2165 			loopbytes += xfsize;
2166 			off += xfsize;
2167 		}
2168 
2169 		/* Add the buffer chain to the socket buffer. */
2170 		if (m != NULL) {
2171 			int mlen, err;
2172 
2173 			mlen = m_length(m, NULL);
2174 			SOCKBUF_LOCK(&so->so_snd);
2175 			if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2176 				error = EPIPE;
2177 				SOCKBUF_UNLOCK(&so->so_snd);
2178 				goto done;
2179 			}
2180 			SOCKBUF_UNLOCK(&so->so_snd);
2181 			/* Avoid error aliasing. */
2182 			err = (*so->so_proto->pr_usrreqs->pru_send)
2183 				    (so, 0, m, NULL, NULL, td);
2184 			if (err == 0) {
2185 				/*
2186 				 * We need two counters to get the
2187 				 * file offset and nbytes to send
2188 				 * right:
2189 				 * - sbytes contains the total amount
2190 				 *   of bytes sent, including headers.
2191 				 * - fsbytes contains the total amount
2192 				 *   of bytes sent from the file.
2193 				 */
2194 				sbytes += mlen;
2195 				fsbytes += mlen;
2196 				if (hdrlen) {
2197 					fsbytes -= hdrlen;
2198 					hdrlen = 0;
2199 				}
2200 			} else if (error == 0)
2201 				error = err;
2202 			m = NULL;	/* pru_send always consumes */
2203 		}
2204 
2205 		/* Quit outer loop on error or when we're done. */
2206 		if (error || done)
2207 			goto done;
2208 	}
2209 
2210 	/*
2211 	 * Send trailers. Wimp out and use writev(2).
2212 	 */
2213 	if (trl_uio != NULL) {
2214 		error = kern_writev(td, uap->s, trl_uio);
2215 		if (error)
2216 			goto done;
2217 		sbytes += td->td_retval[0];
2218 	}
2219 
2220 done:
2221 	sbunlock(&so->so_snd);
2222 out:
2223 	/*
2224 	 * If there was no error we have to clear td->td_retval[0]
2225 	 * because it may have been set by writev.
2226 	 */
2227 	if (error == 0) {
2228 		td->td_retval[0] = 0;
2229 	}
2230 	if (uap->sbytes != NULL) {
2231 		copyout(&sbytes, uap->sbytes, sizeof(off_t));
2232 	}
2233 	if (obj != NULL)
2234 		vm_object_deallocate(obj);
2235 	if (vp != NULL) {
2236 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2237 		vrele(vp);
2238 		VFS_UNLOCK_GIANT(vfslocked);
2239 	}
2240 	if (so)
2241 		fdrop(sock_fp, td);
2242 	if (m)
2243 		m_freem(m);
2244 
2245 	NET_UNLOCK_GIANT();
2246 
2247 	if (error == ERESTART)
2248 		error = EINTR;
2249 
2250 	return (error);
2251 }
2252 
2253 /*
2254  * SCTP syscalls.
2255  * Functionality only compiled in if SCTP is defined in the kernel Makefile,
2256  * otherwise all return EOPNOTSUPP.
2257  * XXX: We should make this loadable one day.
2258  */
2259 int
2260 sctp_peeloff(td, uap)
2261 	struct thread *td;
2262 	struct sctp_peeloff_args /* {
2263 		int	sd;
2264 		caddr_t	name;
2265 	} */ *uap;
2266 {
2267 #ifdef SCTP
2268 	struct filedesc *fdp;
2269 	struct file *nfp = NULL;
2270 	int error;
2271 	struct socket *head, *so;
2272 	int fd;
2273 	u_int fflag;
2274 
2275 	fdp = td->td_proc->p_fd;
2276 	error = fgetsock(td, uap->sd, &head, &fflag);
2277 	if (error)
2278 		goto done2;
2279 	error = sctp_can_peel_off(head, (sctp_assoc_t)uap->name);
2280 	if (error)
2281 		goto done2;
2282 	/*
2283 	 * At this point we know we do have a assoc to pull
2284 	 * we proceed to get the fd setup. This may block
2285 	 * but that is ok.
2286 	 */
2287 
2288 	error = falloc(td, &nfp, &fd);
2289 	if (error)
2290 		goto done;
2291 	td->td_retval[0] = fd;
2292 
2293 	so = sonewconn(head, SS_ISCONNECTED);
2294 	if (so == NULL)
2295 		goto noconnection;
2296 	/*
2297 	 * Before changing the flags on the socket, we have to bump the
2298 	 * reference count.  Otherwise, if the protocol calls sofree(),
2299 	 * the socket will be released due to a zero refcount.
2300 	 */
2301         SOCK_LOCK(so);
2302         soref(so);                      /* file descriptor reference */
2303         SOCK_UNLOCK(so);
2304 
2305 	ACCEPT_LOCK();
2306 
2307 	TAILQ_REMOVE(&head->so_comp, so, so_list);
2308 	head->so_qlen--;
2309 	so->so_state |= (head->so_state & SS_NBIO);
2310 	so->so_state &= ~SS_NOFDREF;
2311 	so->so_qstate &= ~SQ_COMP;
2312 	so->so_head = NULL;
2313 
2314 	ACCEPT_UNLOCK();
2315 
2316 	error = sctp_do_peeloff(head, so, (sctp_assoc_t)uap->name);
2317 	if (error)
2318 		goto noconnection;
2319 	if (head->so_sigio != NULL)
2320 		fsetown(fgetown(&head->so_sigio), &so->so_sigio);
2321 
2322 	FILE_LOCK(nfp);
2323 	nfp->f_data = so;
2324 	nfp->f_flag = fflag;
2325 	nfp->f_type = DTYPE_SOCKET;
2326 	nfp->f_ops = &socketops;
2327 	FILE_UNLOCK(nfp);
2328 
2329 noconnection:
2330 	/*
2331 	 * close the new descriptor, assuming someone hasn't ripped it
2332 	 * out from under us.
2333 	 */
2334 	if (error)
2335 		fdclose(fdp, nfp, fd, td);
2336 
2337 	/*
2338 	 * Release explicitly held references before returning.
2339 	 */
2340 done:
2341 	if (nfp != NULL)
2342 		fdrop(nfp, td);
2343 	fputsock(head);
2344 done2:
2345 	return (error);
2346 #else  /* SCTP */
2347 	return (EOPNOTSUPP);
2348 #endif /* SCTP */
2349 }
2350 
2351 int
2352 sctp_generic_sendmsg (td, uap)
2353 	struct thread *td;
2354 	struct sctp_generic_sendmsg_args /* {
2355 		int sd,
2356 		caddr_t msg,
2357 		int mlen,
2358 		caddr_t to,
2359 		__socklen_t tolen,
2360 		struct sctp_sndrcvinfo *sinfo,
2361 		int flags
2362 	} */ *uap;
2363 {
2364 #ifdef SCTP
2365 	struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
2366 	struct socket *so;
2367 	struct file *fp = NULL;
2368 	int use_rcvinfo = 1;
2369 	int error = 0, len;
2370 	struct sockaddr *to = NULL;
2371 #ifdef KTRACE
2372 	struct uio *ktruio = NULL;
2373 #endif
2374 	struct uio auio;
2375 	struct iovec iov[1];
2376 
2377 	if (uap->sinfo) {
2378 		error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
2379 		if (error)
2380 			return (error);
2381 		u_sinfo = &sinfo;
2382 	}
2383 	if (uap->tolen) {
2384 		error = getsockaddr(&to, uap->to, uap->tolen);
2385 		if (error) {
2386 			to = NULL;
2387 			goto sctp_bad2;
2388 		}
2389 	}
2390 
2391 	error = getsock(td->td_proc->p_fd, uap->sd, &fp, NULL);
2392 	if (error)
2393 		goto sctp_bad;
2394 
2395 	iov[0].iov_base = uap->msg;
2396 	iov[0].iov_len = uap->mlen;
2397 
2398 	so = (struct socket *)fp->f_data;
2399 #ifdef MAC
2400 	SOCK_LOCK(so);
2401 	error = mac_check_socket_send(td->td_ucred, so);
2402 	SOCK_UNLOCK(so);
2403 	if (error)
2404 		goto sctp_bad;
2405 #endif /* MAC */
2406 
2407 	auio.uio_iov =  iov;
2408 	auio.uio_iovcnt = 1;
2409 	auio.uio_segflg = UIO_USERSPACE;
2410 	auio.uio_rw = UIO_WRITE;
2411 	auio.uio_td = td;
2412 	auio.uio_offset = 0;			/* XXX */
2413 	auio.uio_resid = 0;
2414 	len = auio.uio_resid = uap->mlen;
2415 	error = sctp_lower_sosend(so, to, &auio,
2416 		    (struct mbuf *)NULL, (struct mbuf *)NULL,
2417 		    uap->flags, use_rcvinfo, u_sinfo, td);
2418 	if (error) {
2419 		if (auio.uio_resid != len && (error == ERESTART ||
2420 		    error == EINTR || error == EWOULDBLOCK))
2421 			error = 0;
2422 		/* Generation of SIGPIPE can be controlled per socket. */
2423 		if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
2424 		    !(uap->flags & MSG_NOSIGNAL)) {
2425 			PROC_LOCK(td->td_proc);
2426 			psignal(td->td_proc, SIGPIPE);
2427 			PROC_UNLOCK(td->td_proc);
2428 		}
2429 	}
2430 	if (error == 0)
2431 		td->td_retval[0] = len - auio.uio_resid;
2432 #ifdef KTRACE
2433 	if (ktruio != NULL) {
2434 		ktruio->uio_resid = td->td_retval[0];
2435 		ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
2436 	}
2437 #endif /* KTRACE */
2438 sctp_bad:
2439 	if (fp)
2440 		fdrop(fp, td);
2441 sctp_bad2:
2442 	if (to)
2443 		free(to, M_SONAME);
2444 	return (error);
2445 #else  /* SCTP */
2446 	return (EOPNOTSUPP);
2447 #endif /* SCTP */
2448 }
2449 
2450 int
2451 sctp_generic_sendmsg_iov(td, uap)
2452 	struct thread *td;
2453 	struct sctp_generic_sendmsg_iov_args /* {
2454 		int sd,
2455 		struct iovec *iov,
2456 		int iovlen,
2457 		caddr_t to,
2458 		__socklen_t tolen,
2459 		struct sctp_sndrcvinfo *sinfo,
2460 		int flags
2461 	} */ *uap;
2462 {
2463 #ifdef SCTP
2464 	struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
2465 	struct socket *so;
2466 	struct file *fp = NULL;
2467 	int use_rcvinfo = 1;
2468 	int error=0, len, i;
2469 	struct sockaddr *to = NULL;
2470 #ifdef KTRACE
2471 	struct uio *ktruio = NULL;
2472 #endif
2473 	struct uio auio;
2474 	struct iovec *iov, *tiov;
2475 
2476 	if (uap->sinfo) {
2477 		error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
2478 		if (error)
2479 			return (error);
2480 		u_sinfo = &sinfo;
2481 	}
2482 	if (uap->tolen) {
2483 		error = getsockaddr(&to, uap->to, uap->tolen);
2484 		if (error) {
2485 			to = NULL;
2486 			goto sctp_bad2;
2487 		}
2488 	}
2489 
2490 	error = getsock(td->td_proc->p_fd, uap->sd, &fp, NULL);
2491 	if (error)
2492 		goto sctp_bad1;
2493 
2494 	error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
2495 	if (error)
2496 		goto sctp_bad1;
2497 
2498 	so = (struct socket *)fp->f_data;
2499 #ifdef MAC
2500 	SOCK_LOCK(so);
2501 	error = mac_check_socket_send(td->td_ucred, so);
2502 	SOCK_UNLOCK(so);
2503 	if (error)
2504 		goto sctp_bad;
2505 #endif /* MAC */
2506 
2507 	auio.uio_iov =  iov;
2508 	auio.uio_iovcnt = uap->iovlen;
2509 	auio.uio_segflg = UIO_USERSPACE;
2510 	auio.uio_rw = UIO_WRITE;
2511 	auio.uio_td = td;
2512 	auio.uio_offset = 0;			/* XXX */
2513 	auio.uio_resid = 0;
2514 	tiov = iov;
2515 	for (i = 0; i <uap->iovlen; i++, tiov++) {
2516 		if ((auio.uio_resid += tiov->iov_len) < 0) {
2517 			error = EINVAL;
2518 			goto sctp_bad;
2519 		}
2520 	}
2521 	len = auio.uio_resid;
2522 	error = sctp_lower_sosend(so, to, &auio,
2523 		    (struct mbuf *)NULL, (struct mbuf *)NULL,
2524 		    uap->flags, use_rcvinfo, u_sinfo, td);
2525 	if (error) {
2526 		if (auio.uio_resid != len && (error == ERESTART ||
2527 		    error == EINTR || error == EWOULDBLOCK))
2528 			error = 0;
2529 		/* Generation of SIGPIPE can be controlled per socket */
2530 		if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
2531 		    !(uap->flags & MSG_NOSIGNAL)) {
2532 			PROC_LOCK(td->td_proc);
2533 			psignal(td->td_proc, SIGPIPE);
2534 			PROC_UNLOCK(td->td_proc);
2535 		}
2536 	}
2537 	if (error == 0)
2538 		td->td_retval[0] = len - auio.uio_resid;
2539 #ifdef KTRACE
2540 	if (ktruio != NULL) {
2541 		ktruio->uio_resid = td->td_retval[0];
2542 		ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
2543 	}
2544 #endif /* KTRACE */
2545 sctp_bad:
2546 	free(iov, M_IOV);
2547 sctp_bad1:
2548 	if (fp)
2549 		fdrop(fp, td);
2550 sctp_bad2:
2551 	if (to)
2552 		free(to, M_SONAME);
2553 	return (error);
2554 #else  /* SCTP */
2555 	return (EOPNOTSUPP);
2556 #endif /* SCTP */
2557 }
2558 
2559 int
2560 sctp_generic_recvmsg(td, uap)
2561 	struct thread *td;
2562 	struct sctp_generic_recvmsg_args /* {
2563 		int sd,
2564 		struct iovec *iov,
2565 		int iovlen,
2566 		struct sockaddr *from,
2567 		__socklen_t *fromlenaddr,
2568 		struct sctp_sndrcvinfo *sinfo,
2569 		int *msg_flags
2570 	} */ *uap;
2571 {
2572 #ifdef SCTP
2573 	u_int8_t sockbufstore[256];
2574 	struct uio auio;
2575 	struct iovec *iov, *tiov;
2576 	struct sctp_sndrcvinfo sinfo;
2577 	struct socket *so;
2578 	struct file *fp = NULL;
2579 	struct sockaddr *fromsa;
2580 	int fromlen;
2581 	int len, i, msg_flags;
2582 	int error = 0;
2583 #ifdef KTRACE
2584 	struct uio *ktruio = NULL;
2585 #endif
2586 	error = getsock(td->td_proc->p_fd, uap->sd, &fp, NULL);
2587 	if (error) {
2588 		return (error);
2589 	}
2590 	error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
2591 	if (error) {
2592 		goto out1;
2593 	}
2594 
2595 	so = fp->f_data;
2596 #ifdef MAC
2597 	SOCK_LOCK(so);
2598 	error = mac_check_socket_receive(td->td_ucred, so);
2599 	SOCK_UNLOCK(so);
2600 	if (error) {
2601 		goto out;
2602 		return (error);
2603 	}
2604 #endif /* MAC */
2605 
2606 	if (uap->fromlenaddr) {
2607 		error = copyin(uap->fromlenaddr,
2608 		    &fromlen, sizeof (fromlen));
2609 		if (error) {
2610 			goto out;
2611 		}
2612 	} else {
2613 		fromlen = 0;
2614 	}
2615 	if(uap->msg_flags) {
2616 		error = copyin(uap->msg_flags, &msg_flags, sizeof (int));
2617 		if (error) {
2618 			goto out;
2619 		}
2620 	} else {
2621 		msg_flags = 0;
2622 	}
2623 	auio.uio_iov = iov;
2624 	auio.uio_iovcnt = uap->iovlen;
2625   	auio.uio_segflg = UIO_USERSPACE;
2626 	auio.uio_rw = UIO_READ;
2627 	auio.uio_td = td;
2628 	auio.uio_offset = 0;			/* XXX */
2629 	auio.uio_resid = 0;
2630 	tiov = iov;
2631 	for (i = 0; i <uap->iovlen; i++, tiov++) {
2632 		if ((auio.uio_resid += tiov->iov_len) < 0) {
2633 			error = EINVAL;
2634 			goto out;
2635 		}
2636 	}
2637 	len = auio.uio_resid;
2638 	fromsa = (struct sockaddr *)sockbufstore;
2639 
2640 #ifdef KTRACE
2641 	if (KTRPOINT(td, KTR_GENIO))
2642 		ktruio = cloneuio(&auio);
2643 #endif /* KTRACE */
2644 	error = sctp_sorecvmsg(so, &auio, (struct mbuf **)NULL,
2645 		    fromsa, fromlen, &msg_flags,
2646 		    (struct sctp_sndrcvinfo *)&sinfo, 1);
2647 	if (error) {
2648 		if (auio.uio_resid != (int)len && (error == ERESTART ||
2649 		    error == EINTR || error == EWOULDBLOCK))
2650 			error = 0;
2651 	} else {
2652 		if (uap->sinfo)
2653 			error = copyout(&sinfo, uap->sinfo, sizeof (sinfo));
2654 	}
2655 #ifdef KTRACE
2656 	if (ktruio != NULL) {
2657 		ktruio->uio_resid = (int)len - auio.uio_resid;
2658 		ktrgenio(uap->sd, UIO_READ, ktruio, error);
2659 	}
2660 #endif /* KTRACE */
2661 	if (error)
2662 		goto out;
2663 	td->td_retval[0] = (int)len - auio.uio_resid;
2664 
2665 	if (fromlen && uap->from) {
2666 		len = fromlen;
2667 		if (len <= 0 || fromsa == 0)
2668 			len = 0;
2669 		else {
2670 			len = MIN(len, fromsa->sa_len);
2671 			error = copyout(fromsa, uap->from, (unsigned)len);
2672 			if (error)
2673 				goto out;
2674 		}
2675 		error = copyout(&len, uap->fromlenaddr, sizeof (socklen_t));
2676 		if (error) {
2677 			goto out;
2678 		}
2679 	}
2680 	if (uap->msg_flags) {
2681 		error = copyout(&msg_flags, uap->msg_flags, sizeof (int));
2682 		if (error) {
2683 			goto out;
2684 		}
2685 	}
2686 out:
2687 	free(iov, M_IOV);
2688 out1:
2689 	if (fp)
2690 		fdrop(fp, td);
2691 
2692 	return (error);
2693 #else  /* SCTP */
2694 	return (EOPNOTSUPP);
2695 #endif /* SCTP */
2696 }
2697