xref: /freebsd/sys/kern/sys_socket.c (revision ab00ac327a66a53edaac95b536b209db3ae2cd9f)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)sys_socket.c	8.1 (Berkeley) 6/10/93
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/aio.h>
38 #include <sys/domain.h>
39 #include <sys/file.h>
40 #include <sys/filedesc.h>
41 #include <sys/kernel.h>
42 #include <sys/kthread.h>
43 #include <sys/malloc.h>
44 #include <sys/proc.h>
45 #include <sys/protosw.h>
46 #include <sys/sigio.h>
47 #include <sys/signal.h>
48 #include <sys/signalvar.h>
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/filio.h>			/* XXX */
52 #include <sys/sockio.h>
53 #include <sys/stat.h>
54 #include <sys/sysctl.h>
55 #include <sys/sysproto.h>
56 #include <sys/taskqueue.h>
57 #include <sys/uio.h>
58 #include <sys/ucred.h>
59 #include <sys/un.h>
60 #include <sys/unpcb.h>
61 #include <sys/user.h>
62 
63 #include <net/if.h>
64 #include <net/if_var.h>
65 #include <net/route.h>
66 #include <net/vnet.h>
67 
68 #include <netinet/in.h>
69 #include <netinet/in_pcb.h>
70 
71 #include <security/mac/mac_framework.h>
72 
73 #include <vm/vm.h>
74 #include <vm/pmap.h>
75 #include <vm/vm_extern.h>
76 #include <vm/vm_map.h>
77 
78 static SYSCTL_NODE(_kern_ipc, OID_AUTO, aio, CTLFLAG_RD, NULL,
79     "socket AIO stats");
80 
81 static int empty_results;
82 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, empty_results, CTLFLAG_RD, &empty_results,
83     0, "socket operation returned EAGAIN");
84 
85 static int empty_retries;
86 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, empty_retries, CTLFLAG_RD, &empty_retries,
87     0, "socket operation retries");
88 
89 static fo_rdwr_t soo_read;
90 static fo_rdwr_t soo_write;
91 static fo_ioctl_t soo_ioctl;
92 static fo_poll_t soo_poll;
93 extern fo_kqfilter_t soo_kqfilter;
94 static fo_stat_t soo_stat;
95 static fo_close_t soo_close;
96 static fo_fill_kinfo_t soo_fill_kinfo;
97 static fo_aio_queue_t soo_aio_queue;
98 
99 static void	soo_aio_cancel(struct kaiocb *job);
100 
101 struct fileops	socketops = {
102 	.fo_read = soo_read,
103 	.fo_write = soo_write,
104 	.fo_truncate = invfo_truncate,
105 	.fo_ioctl = soo_ioctl,
106 	.fo_poll = soo_poll,
107 	.fo_kqfilter = soo_kqfilter,
108 	.fo_stat = soo_stat,
109 	.fo_close = soo_close,
110 	.fo_chmod = invfo_chmod,
111 	.fo_chown = invfo_chown,
112 	.fo_sendfile = invfo_sendfile,
113 	.fo_fill_kinfo = soo_fill_kinfo,
114 	.fo_aio_queue = soo_aio_queue,
115 	.fo_flags = DFLAG_PASSABLE
116 };
117 
118 static int
119 soo_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
120     int flags, struct thread *td)
121 {
122 	struct socket *so = fp->f_data;
123 	int error;
124 
125 #ifdef MAC
126 	error = mac_socket_check_receive(active_cred, so);
127 	if (error)
128 		return (error);
129 #endif
130 	error = soreceive(so, 0, uio, 0, 0, 0);
131 	return (error);
132 }
133 
134 static int
135 soo_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
136     int flags, struct thread *td)
137 {
138 	struct socket *so = fp->f_data;
139 	int error;
140 
141 #ifdef MAC
142 	error = mac_socket_check_send(active_cred, so);
143 	if (error)
144 		return (error);
145 #endif
146 	error = sosend(so, 0, uio, 0, 0, 0, uio->uio_td);
147 	if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) {
148 		PROC_LOCK(uio->uio_td->td_proc);
149 		tdsignal(uio->uio_td, SIGPIPE);
150 		PROC_UNLOCK(uio->uio_td->td_proc);
151 	}
152 	return (error);
153 }
154 
155 static int
156 soo_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred,
157     struct thread *td)
158 {
159 	struct socket *so = fp->f_data;
160 	int error = 0;
161 
162 	switch (cmd) {
163 	case FIONBIO:
164 		SOCK_LOCK(so);
165 		if (*(int *)data)
166 			so->so_state |= SS_NBIO;
167 		else
168 			so->so_state &= ~SS_NBIO;
169 		SOCK_UNLOCK(so);
170 		break;
171 
172 	case FIOASYNC:
173 		/*
174 		 * XXXRW: This code separately acquires SOCK_LOCK(so) and
175 		 * SOCKBUF_LOCK(&so->so_rcv) even though they are the same
176 		 * mutex to avoid introducing the assumption that they are
177 		 * the same.
178 		 */
179 		if (*(int *)data) {
180 			SOCK_LOCK(so);
181 			so->so_state |= SS_ASYNC;
182 			SOCK_UNLOCK(so);
183 			SOCKBUF_LOCK(&so->so_rcv);
184 			so->so_rcv.sb_flags |= SB_ASYNC;
185 			SOCKBUF_UNLOCK(&so->so_rcv);
186 			SOCKBUF_LOCK(&so->so_snd);
187 			so->so_snd.sb_flags |= SB_ASYNC;
188 			SOCKBUF_UNLOCK(&so->so_snd);
189 		} else {
190 			SOCK_LOCK(so);
191 			so->so_state &= ~SS_ASYNC;
192 			SOCK_UNLOCK(so);
193 			SOCKBUF_LOCK(&so->so_rcv);
194 			so->so_rcv.sb_flags &= ~SB_ASYNC;
195 			SOCKBUF_UNLOCK(&so->so_rcv);
196 			SOCKBUF_LOCK(&so->so_snd);
197 			so->so_snd.sb_flags &= ~SB_ASYNC;
198 			SOCKBUF_UNLOCK(&so->so_snd);
199 		}
200 		break;
201 
202 	case FIONREAD:
203 		/* Unlocked read. */
204 		*(int *)data = sbavail(&so->so_rcv);
205 		break;
206 
207 	case FIONWRITE:
208 		/* Unlocked read. */
209 		*(int *)data = sbavail(&so->so_snd);
210 		break;
211 
212 	case FIONSPACE:
213 		/* Unlocked read. */
214 		if ((so->so_snd.sb_hiwat < sbused(&so->so_snd)) ||
215 		    (so->so_snd.sb_mbmax < so->so_snd.sb_mbcnt))
216 			*(int *)data = 0;
217 		else
218 			*(int *)data = sbspace(&so->so_snd);
219 		break;
220 
221 	case FIOSETOWN:
222 		error = fsetown(*(int *)data, &so->so_sigio);
223 		break;
224 
225 	case FIOGETOWN:
226 		*(int *)data = fgetown(&so->so_sigio);
227 		break;
228 
229 	case SIOCSPGRP:
230 		error = fsetown(-(*(int *)data), &so->so_sigio);
231 		break;
232 
233 	case SIOCGPGRP:
234 		*(int *)data = -fgetown(&so->so_sigio);
235 		break;
236 
237 	case SIOCATMARK:
238 		/* Unlocked read. */
239 		*(int *)data = (so->so_rcv.sb_state & SBS_RCVATMARK) != 0;
240 		break;
241 	default:
242 		/*
243 		 * Interface/routing/protocol specific ioctls: interface and
244 		 * routing ioctls should have a different entry since a
245 		 * socket is unnecessary.
246 		 */
247 		if (IOCGROUP(cmd) == 'i')
248 			error = ifioctl(so, cmd, data, td);
249 		else if (IOCGROUP(cmd) == 'r') {
250 			CURVNET_SET(so->so_vnet);
251 			error = rtioctl_fib(cmd, data, so->so_fibnum);
252 			CURVNET_RESTORE();
253 		} else {
254 			CURVNET_SET(so->so_vnet);
255 			error = ((*so->so_proto->pr_usrreqs->pru_control)
256 			    (so, cmd, data, 0, td));
257 			CURVNET_RESTORE();
258 		}
259 		break;
260 	}
261 	return (error);
262 }
263 
264 static int
265 soo_poll(struct file *fp, int events, struct ucred *active_cred,
266     struct thread *td)
267 {
268 	struct socket *so = fp->f_data;
269 #ifdef MAC
270 	int error;
271 
272 	error = mac_socket_check_poll(active_cred, so);
273 	if (error)
274 		return (error);
275 #endif
276 	return (sopoll(so, events, fp->f_cred, td));
277 }
278 
279 static int
280 soo_stat(struct file *fp, struct stat *ub, struct ucred *active_cred,
281     struct thread *td)
282 {
283 	struct socket *so = fp->f_data;
284 	struct sockbuf *sb;
285 #ifdef MAC
286 	int error;
287 #endif
288 
289 	bzero((caddr_t)ub, sizeof (*ub));
290 	ub->st_mode = S_IFSOCK;
291 #ifdef MAC
292 	error = mac_socket_check_stat(active_cred, so);
293 	if (error)
294 		return (error);
295 #endif
296 	/*
297 	 * If SBS_CANTRCVMORE is set, but there's still data left in the
298 	 * receive buffer, the socket is still readable.
299 	 */
300 	sb = &so->so_rcv;
301 	SOCKBUF_LOCK(sb);
302 	if ((sb->sb_state & SBS_CANTRCVMORE) == 0 || sbavail(sb))
303 		ub->st_mode |= S_IRUSR | S_IRGRP | S_IROTH;
304 	ub->st_size = sbavail(sb) - sb->sb_ctl;
305 	SOCKBUF_UNLOCK(sb);
306 
307 	sb = &so->so_snd;
308 	SOCKBUF_LOCK(sb);
309 	if ((sb->sb_state & SBS_CANTSENDMORE) == 0)
310 		ub->st_mode |= S_IWUSR | S_IWGRP | S_IWOTH;
311 	SOCKBUF_UNLOCK(sb);
312 	ub->st_uid = so->so_cred->cr_uid;
313 	ub->st_gid = so->so_cred->cr_gid;
314 	return (*so->so_proto->pr_usrreqs->pru_sense)(so, ub);
315 }
316 
317 /*
318  * API socket close on file pointer.  We call soclose() to close the socket
319  * (including initiating closing protocols).  soclose() will sorele() the
320  * file reference but the actual socket will not go away until the socket's
321  * ref count hits 0.
322  */
323 static int
324 soo_close(struct file *fp, struct thread *td)
325 {
326 	int error = 0;
327 	struct socket *so;
328 
329 	so = fp->f_data;
330 	fp->f_ops = &badfileops;
331 	fp->f_data = NULL;
332 
333 	if (so)
334 		error = soclose(so);
335 	return (error);
336 }
337 
338 static int
339 soo_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
340 {
341 	struct sockaddr *sa;
342 	struct inpcb *inpcb;
343 	struct unpcb *unpcb;
344 	struct socket *so;
345 	int error;
346 
347 	kif->kf_type = KF_TYPE_SOCKET;
348 	so = fp->f_data;
349 	kif->kf_un.kf_sock.kf_sock_domain0 =
350 	    so->so_proto->pr_domain->dom_family;
351 	kif->kf_un.kf_sock.kf_sock_type0 = so->so_type;
352 	kif->kf_un.kf_sock.kf_sock_protocol0 = so->so_proto->pr_protocol;
353 	kif->kf_un.kf_sock.kf_sock_pcb = (uintptr_t)so->so_pcb;
354 	switch (kif->kf_un.kf_sock.kf_sock_domain0) {
355 	case AF_INET:
356 	case AF_INET6:
357 		if (kif->kf_un.kf_sock.kf_sock_protocol0 == IPPROTO_TCP) {
358 			if (so->so_pcb != NULL) {
359 				inpcb = (struct inpcb *)(so->so_pcb);
360 				kif->kf_un.kf_sock.kf_sock_inpcb =
361 				    (uintptr_t)inpcb->inp_ppcb;
362 			}
363 		}
364 		break;
365 	case AF_UNIX:
366 		if (so->so_pcb != NULL) {
367 			unpcb = (struct unpcb *)(so->so_pcb);
368 			if (unpcb->unp_conn) {
369 				kif->kf_un.kf_sock.kf_sock_unpconn =
370 				    (uintptr_t)unpcb->unp_conn;
371 				kif->kf_un.kf_sock.kf_sock_rcv_sb_state =
372 				    so->so_rcv.sb_state;
373 				kif->kf_un.kf_sock.kf_sock_snd_sb_state =
374 				    so->so_snd.sb_state;
375 			}
376 		}
377 		break;
378 	}
379 	error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
380 	if (error == 0 &&
381 	    sa->sa_len <= sizeof(kif->kf_un.kf_sock.kf_sa_local)) {
382 		bcopy(sa, &kif->kf_un.kf_sock.kf_sa_local, sa->sa_len);
383 		free(sa, M_SONAME);
384 	}
385 	error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
386 	if (error == 0 &&
387 	    sa->sa_len <= sizeof(kif->kf_un.kf_sock.kf_sa_peer)) {
388 		bcopy(sa, &kif->kf_un.kf_sock.kf_sa_peer, sa->sa_len);
389 		free(sa, M_SONAME);
390 	}
391 	strncpy(kif->kf_path, so->so_proto->pr_domain->dom_name,
392 	    sizeof(kif->kf_path));
393 	return (0);
394 }
395 
396 /*
397  * Use the 'backend3' field in AIO jobs to store the amount of data
398  * completed by the AIO job so far.
399  */
400 #define	aio_done	backend3
401 
402 static STAILQ_HEAD(, task) soaio_jobs;
403 static struct mtx soaio_jobs_lock;
404 static struct task soaio_kproc_task;
405 static int soaio_starting, soaio_idle, soaio_queued;
406 static struct unrhdr *soaio_kproc_unr;
407 
408 static int soaio_max_procs = MAX_AIO_PROCS;
409 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, max_procs, CTLFLAG_RW, &soaio_max_procs, 0,
410     "Maximum number of kernel processes to use for async socket IO");
411 
412 static int soaio_num_procs;
413 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, num_procs, CTLFLAG_RD, &soaio_num_procs, 0,
414     "Number of active kernel processes for async socket IO");
415 
416 static int soaio_target_procs = TARGET_AIO_PROCS;
417 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, target_procs, CTLFLAG_RD,
418     &soaio_target_procs, 0,
419     "Preferred number of ready kernel processes for async socket IO");
420 
421 static int soaio_lifetime;
422 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, lifetime, CTLFLAG_RW, &soaio_lifetime, 0,
423     "Maximum lifetime for idle aiod");
424 
425 static void
426 soaio_kproc_loop(void *arg)
427 {
428 	struct proc *p;
429 	struct vmspace *myvm;
430 	struct task *task;
431 	int error, id, pending;
432 
433 	id = (intptr_t)arg;
434 
435 	/*
436 	 * Grab an extra reference on the daemon's vmspace so that it
437 	 * doesn't get freed by jobs that switch to a different
438 	 * vmspace.
439 	 */
440 	p = curproc;
441 	myvm = vmspace_acquire_ref(p);
442 
443 	mtx_lock(&soaio_jobs_lock);
444 	MPASS(soaio_starting > 0);
445 	soaio_starting--;
446 	for (;;) {
447 		while (!STAILQ_EMPTY(&soaio_jobs)) {
448 			task = STAILQ_FIRST(&soaio_jobs);
449 			STAILQ_REMOVE_HEAD(&soaio_jobs, ta_link);
450 			soaio_queued--;
451 			pending = task->ta_pending;
452 			task->ta_pending = 0;
453 			mtx_unlock(&soaio_jobs_lock);
454 
455 			task->ta_func(task->ta_context, pending);
456 
457 			mtx_lock(&soaio_jobs_lock);
458 		}
459 		MPASS(soaio_queued == 0);
460 
461 		if (p->p_vmspace != myvm) {
462 			mtx_unlock(&soaio_jobs_lock);
463 			vmspace_switch_aio(myvm);
464 			mtx_lock(&soaio_jobs_lock);
465 			continue;
466 		}
467 
468 		soaio_idle++;
469 		error = mtx_sleep(&soaio_idle, &soaio_jobs_lock, 0, "-",
470 		    soaio_lifetime);
471 		soaio_idle--;
472 		if (error == EWOULDBLOCK && STAILQ_EMPTY(&soaio_jobs) &&
473 		    soaio_num_procs > soaio_target_procs)
474 			break;
475 	}
476 	soaio_num_procs--;
477 	mtx_unlock(&soaio_jobs_lock);
478 	free_unr(soaio_kproc_unr, id);
479 	kproc_exit(0);
480 }
481 
482 static void
483 soaio_kproc_create(void *context, int pending)
484 {
485 	struct proc *p;
486 	int error, id;
487 
488 	mtx_lock(&soaio_jobs_lock);
489 	for (;;) {
490 		if (soaio_num_procs < soaio_target_procs) {
491 			/* Must create */
492 		} else if (soaio_num_procs >= soaio_max_procs) {
493 			/*
494 			 * Hit the limit on kernel processes, don't
495 			 * create another one.
496 			 */
497 			break;
498 		} else if (soaio_queued <= soaio_idle + soaio_starting) {
499 			/*
500 			 * No more AIO jobs waiting for a process to be
501 			 * created, so stop.
502 			 */
503 			break;
504 		}
505 		soaio_starting++;
506 		mtx_unlock(&soaio_jobs_lock);
507 
508 		id = alloc_unr(soaio_kproc_unr);
509 		error = kproc_create(soaio_kproc_loop, (void *)(intptr_t)id,
510 		    &p, 0, 0, "soaiod%d", id);
511 		if (error != 0) {
512 			free_unr(soaio_kproc_unr, id);
513 			mtx_lock(&soaio_jobs_lock);
514 			soaio_starting--;
515 			break;
516 		}
517 
518 		mtx_lock(&soaio_jobs_lock);
519 		soaio_num_procs++;
520 	}
521 	mtx_unlock(&soaio_jobs_lock);
522 }
523 
524 void
525 soaio_enqueue(struct task *task)
526 {
527 
528 	mtx_lock(&soaio_jobs_lock);
529 	MPASS(task->ta_pending == 0);
530 	task->ta_pending++;
531 	STAILQ_INSERT_TAIL(&soaio_jobs, task, ta_link);
532 	soaio_queued++;
533 	if (soaio_queued <= soaio_idle)
534 		wakeup_one(&soaio_idle);
535 	else if (soaio_num_procs < soaio_max_procs)
536 		taskqueue_enqueue(taskqueue_thread, &soaio_kproc_task);
537 	mtx_unlock(&soaio_jobs_lock);
538 }
539 
540 static void
541 soaio_init(void)
542 {
543 
544 	soaio_lifetime = AIOD_LIFETIME_DEFAULT;
545 	STAILQ_INIT(&soaio_jobs);
546 	mtx_init(&soaio_jobs_lock, "soaio jobs", NULL, MTX_DEF);
547 	soaio_kproc_unr = new_unrhdr(1, INT_MAX, NULL);
548 	TASK_INIT(&soaio_kproc_task, 0, soaio_kproc_create, NULL);
549 	if (soaio_target_procs > 0)
550 		taskqueue_enqueue(taskqueue_thread, &soaio_kproc_task);
551 }
552 SYSINIT(soaio, SI_SUB_VFS, SI_ORDER_ANY, soaio_init, NULL);
553 
554 static __inline int
555 soaio_ready(struct socket *so, struct sockbuf *sb)
556 {
557 	return (sb == &so->so_rcv ? soreadable(so) : sowriteable(so));
558 }
559 
560 static void
561 soaio_process_job(struct socket *so, struct sockbuf *sb, struct kaiocb *job)
562 {
563 	struct ucred *td_savedcred;
564 	struct thread *td;
565 	struct file *fp;
566 	struct uio uio;
567 	struct iovec iov;
568 	size_t cnt, done;
569 	long ru_before;
570 	int error, flags;
571 
572 	SOCKBUF_UNLOCK(sb);
573 	aio_switch_vmspace(job);
574 	td = curthread;
575 	fp = job->fd_file;
576 retry:
577 	td_savedcred = td->td_ucred;
578 	td->td_ucred = job->cred;
579 
580 	done = job->aio_done;
581 	cnt = job->uaiocb.aio_nbytes - done;
582 	iov.iov_base = (void *)((uintptr_t)job->uaiocb.aio_buf + done);
583 	iov.iov_len = cnt;
584 	uio.uio_iov = &iov;
585 	uio.uio_iovcnt = 1;
586 	uio.uio_offset = 0;
587 	uio.uio_resid = cnt;
588 	uio.uio_segflg = UIO_USERSPACE;
589 	uio.uio_td = td;
590 	flags = MSG_NBIO;
591 
592 	/*
593 	 * For resource usage accounting, only count a completed request
594 	 * as a single message to avoid counting multiple calls to
595 	 * sosend/soreceive on a blocking socket.
596 	 */
597 
598 	if (sb == &so->so_rcv) {
599 		uio.uio_rw = UIO_READ;
600 		ru_before = td->td_ru.ru_msgrcv;
601 #ifdef MAC
602 		error = mac_socket_check_receive(fp->f_cred, so);
603 		if (error == 0)
604 
605 #endif
606 			error = soreceive(so, NULL, &uio, NULL, NULL, &flags);
607 		if (td->td_ru.ru_msgrcv != ru_before)
608 			job->msgrcv = 1;
609 	} else {
610 		if (!TAILQ_EMPTY(&sb->sb_aiojobq))
611 			flags |= MSG_MORETOCOME;
612 		uio.uio_rw = UIO_WRITE;
613 		ru_before = td->td_ru.ru_msgsnd;
614 #ifdef MAC
615 		error = mac_socket_check_send(fp->f_cred, so);
616 		if (error == 0)
617 #endif
618 			error = sosend(so, NULL, &uio, NULL, NULL, flags, td);
619 		if (td->td_ru.ru_msgsnd != ru_before)
620 			job->msgsnd = 1;
621 		if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) {
622 			PROC_LOCK(job->userproc);
623 			kern_psignal(job->userproc, SIGPIPE);
624 			PROC_UNLOCK(job->userproc);
625 		}
626 	}
627 
628 	done += cnt - uio.uio_resid;
629 	job->aio_done = done;
630 	td->td_ucred = td_savedcred;
631 
632 	if (error == EWOULDBLOCK) {
633 		/*
634 		 * The request was either partially completed or not
635 		 * completed at all due to racing with a read() or
636 		 * write() on the socket.  If the socket is
637 		 * non-blocking, return with any partial completion.
638 		 * If the socket is blocking or if no progress has
639 		 * been made, requeue this request at the head of the
640 		 * queue to try again when the socket is ready.
641 		 */
642 		MPASS(done != job->uaiocb.aio_nbytes);
643 		SOCKBUF_LOCK(sb);
644 		if (done == 0 || !(so->so_state & SS_NBIO)) {
645 			empty_results++;
646 			if (soaio_ready(so, sb)) {
647 				empty_retries++;
648 				SOCKBUF_UNLOCK(sb);
649 				goto retry;
650 			}
651 
652 			if (!aio_set_cancel_function(job, soo_aio_cancel)) {
653 				SOCKBUF_UNLOCK(sb);
654 				if (done != 0)
655 					aio_complete(job, done, 0);
656 				else
657 					aio_cancel(job);
658 				SOCKBUF_LOCK(sb);
659 			} else {
660 				TAILQ_INSERT_HEAD(&sb->sb_aiojobq, job, list);
661 			}
662 			return;
663 		}
664 		SOCKBUF_UNLOCK(sb);
665 	}
666 	if (done != 0 && (error == ERESTART || error == EINTR ||
667 	    error == EWOULDBLOCK))
668 		error = 0;
669 	if (error)
670 		aio_complete(job, -1, error);
671 	else
672 		aio_complete(job, done, 0);
673 	SOCKBUF_LOCK(sb);
674 }
675 
676 static void
677 soaio_process_sb(struct socket *so, struct sockbuf *sb)
678 {
679 	struct kaiocb *job;
680 
681 	SOCKBUF_LOCK(sb);
682 	while (!TAILQ_EMPTY(&sb->sb_aiojobq) && soaio_ready(so, sb)) {
683 		job = TAILQ_FIRST(&sb->sb_aiojobq);
684 		TAILQ_REMOVE(&sb->sb_aiojobq, job, list);
685 		if (!aio_clear_cancel_function(job))
686 			continue;
687 
688 		soaio_process_job(so, sb, job);
689 	}
690 
691 	/*
692 	 * If there are still pending requests, the socket must not be
693 	 * ready so set SB_AIO to request a wakeup when the socket
694 	 * becomes ready.
695 	 */
696 	if (!TAILQ_EMPTY(&sb->sb_aiojobq))
697 		sb->sb_flags |= SB_AIO;
698 	sb->sb_flags &= ~SB_AIO_RUNNING;
699 	SOCKBUF_UNLOCK(sb);
700 
701 	ACCEPT_LOCK();
702 	SOCK_LOCK(so);
703 	sorele(so);
704 }
705 
706 void
707 soaio_rcv(void *context, int pending)
708 {
709 	struct socket *so;
710 
711 	so = context;
712 	soaio_process_sb(so, &so->so_rcv);
713 }
714 
715 void
716 soaio_snd(void *context, int pending)
717 {
718 	struct socket *so;
719 
720 	so = context;
721 	soaio_process_sb(so, &so->so_snd);
722 }
723 
724 void
725 sowakeup_aio(struct socket *so, struct sockbuf *sb)
726 {
727 
728 	SOCKBUF_LOCK_ASSERT(sb);
729 	sb->sb_flags &= ~SB_AIO;
730 	if (sb->sb_flags & SB_AIO_RUNNING)
731 		return;
732 	sb->sb_flags |= SB_AIO_RUNNING;
733 	if (sb == &so->so_snd)
734 		SOCK_LOCK(so);
735 	soref(so);
736 	if (sb == &so->so_snd)
737 		SOCK_UNLOCK(so);
738 	soaio_enqueue(&sb->sb_aiotask);
739 }
740 
741 static void
742 soo_aio_cancel(struct kaiocb *job)
743 {
744 	struct socket *so;
745 	struct sockbuf *sb;
746 	long done;
747 	int opcode;
748 
749 	so = job->fd_file->f_data;
750 	opcode = job->uaiocb.aio_lio_opcode;
751 	if (opcode == LIO_READ)
752 		sb = &so->so_rcv;
753 	else {
754 		MPASS(opcode == LIO_WRITE);
755 		sb = &so->so_snd;
756 	}
757 
758 	SOCKBUF_LOCK(sb);
759 	if (!aio_cancel_cleared(job))
760 		TAILQ_REMOVE(&sb->sb_aiojobq, job, list);
761 	if (TAILQ_EMPTY(&sb->sb_aiojobq))
762 		sb->sb_flags &= ~SB_AIO;
763 	SOCKBUF_UNLOCK(sb);
764 
765 	done = job->aio_done;
766 	if (done != 0)
767 		aio_complete(job, done, 0);
768 	else
769 		aio_cancel(job);
770 }
771 
772 static int
773 soo_aio_queue(struct file *fp, struct kaiocb *job)
774 {
775 	struct socket *so;
776 	struct sockbuf *sb;
777 	int error;
778 
779 	so = fp->f_data;
780 	error = (*so->so_proto->pr_usrreqs->pru_aio_queue)(so, job);
781 	if (error == 0)
782 		return (0);
783 
784 	switch (job->uaiocb.aio_lio_opcode) {
785 	case LIO_READ:
786 		sb = &so->so_rcv;
787 		break;
788 	case LIO_WRITE:
789 		sb = &so->so_snd;
790 		break;
791 	default:
792 		return (EINVAL);
793 	}
794 
795 	SOCKBUF_LOCK(sb);
796 	if (!aio_set_cancel_function(job, soo_aio_cancel))
797 		panic("new job was cancelled");
798 	TAILQ_INSERT_TAIL(&sb->sb_aiojobq, job, list);
799 	if (!(sb->sb_flags & SB_AIO_RUNNING)) {
800 		if (soaio_ready(so, sb))
801 			sowakeup_aio(so, sb);
802 		else
803 			sb->sb_flags |= SB_AIO;
804 	}
805 	SOCKBUF_UNLOCK(sb);
806 	return (0);
807 }
808