xref: /freebsd/sys/kern/uipc_sockbuf.c (revision 87569f75a91f298c52a71823c04d41cf53c88889)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)uipc_socket2.c	8.1 (Berkeley) 6/10/93
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_mac.h"
36 #include "opt_param.h"
37 
38 #include <sys/param.h>
39 #include <sys/aio.h> /* for aio_swake proto */
40 #include <sys/domain.h>
41 #include <sys/event.h>
42 #include <sys/file.h>	/* for maxfiles */
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/mac.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/protosw.h>
51 #include <sys/resourcevar.h>
52 #include <sys/signalvar.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/stat.h>
56 #include <sys/sysctl.h>
57 #include <sys/systm.h>
58 
59 int	maxsockets;
60 
61 void (*aio_swake)(struct socket *, struct sockbuf *);
62 
63 /*
64  * Primitive routines for operating on sockets and socket buffers
65  */
66 
67 u_long	sb_max = SB_MAX;
68 static	u_long sb_max_adj =
69     SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */
70 
71 static	u_long sb_efficiency = 8;	/* parameter for sbreserve() */
72 
73 /*
74  * Procedures to manipulate state flags of socket
75  * and do appropriate wakeups.  Normal sequence from the
76  * active (originating) side is that soisconnecting() is
77  * called during processing of connect() call,
78  * resulting in an eventual call to soisconnected() if/when the
79  * connection is established.  When the connection is torn down
80  * soisdisconnecting() is called during processing of disconnect() call,
81  * and soisdisconnected() is called when the connection to the peer
82  * is totally severed.  The semantics of these routines are such that
83  * connectionless protocols can call soisconnected() and soisdisconnected()
84  * only, bypassing the in-progress calls when setting up a ``connection''
85  * takes no time.
86  *
87  * From the passive side, a socket is created with
88  * two queues of sockets: so_incomp for connections in progress
89  * and so_comp for connections already made and awaiting user acceptance.
90  * As a protocol is preparing incoming connections, it creates a socket
91  * structure queued on so_incomp by calling sonewconn().  When the connection
92  * is established, soisconnected() is called, and transfers the
93  * socket structure to so_comp, making it available to accept().
94  *
95  * If a socket is closed with sockets on either
96  * so_incomp or so_comp, these sockets are dropped.
97  *
98  * If higher level protocols are implemented in
99  * the kernel, the wakeups done here will sometimes
100  * cause software-interrupt process scheduling.
101  */
102 
103 void
104 soisconnecting(so)
105 	register struct socket *so;
106 {
107 
108 	SOCK_LOCK(so);
109 	so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
110 	so->so_state |= SS_ISCONNECTING;
111 	SOCK_UNLOCK(so);
112 }
113 
114 void
115 soisconnected(so)
116 	struct socket *so;
117 {
118 	struct socket *head;
119 
120 	ACCEPT_LOCK();
121 	SOCK_LOCK(so);
122 	so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
123 	so->so_state |= SS_ISCONNECTED;
124 	head = so->so_head;
125 	if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
126 		if ((so->so_options & SO_ACCEPTFILTER) == 0) {
127 			SOCK_UNLOCK(so);
128 			TAILQ_REMOVE(&head->so_incomp, so, so_list);
129 			head->so_incqlen--;
130 			so->so_qstate &= ~SQ_INCOMP;
131 			TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
132 			head->so_qlen++;
133 			so->so_qstate |= SQ_COMP;
134 			ACCEPT_UNLOCK();
135 			sorwakeup(head);
136 			wakeup_one(&head->so_timeo);
137 		} else {
138 			ACCEPT_UNLOCK();
139 			so->so_upcall =
140 			    head->so_accf->so_accept_filter->accf_callback;
141 			so->so_upcallarg = head->so_accf->so_accept_filter_arg;
142 			so->so_rcv.sb_flags |= SB_UPCALL;
143 			so->so_options &= ~SO_ACCEPTFILTER;
144 			SOCK_UNLOCK(so);
145 			so->so_upcall(so, so->so_upcallarg, M_DONTWAIT);
146 		}
147 		return;
148 	}
149 	SOCK_UNLOCK(so);
150 	ACCEPT_UNLOCK();
151 	wakeup(&so->so_timeo);
152 	sorwakeup(so);
153 	sowwakeup(so);
154 }
155 
156 void
157 soisdisconnecting(so)
158 	register struct socket *so;
159 {
160 
161 	/*
162 	 * XXXRW: This code assumes that SOCK_LOCK(so) and
163 	 * SOCKBUF_LOCK(&so->so_rcv) are the same.
164 	 */
165 	SOCKBUF_LOCK(&so->so_rcv);
166 	so->so_state &= ~SS_ISCONNECTING;
167 	so->so_state |= SS_ISDISCONNECTING;
168 	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
169 	sorwakeup_locked(so);
170 	SOCKBUF_LOCK(&so->so_snd);
171 	so->so_snd.sb_state |= SBS_CANTSENDMORE;
172 	sowwakeup_locked(so);
173 	wakeup(&so->so_timeo);
174 }
175 
176 void
177 soisdisconnected(so)
178 	register struct socket *so;
179 {
180 
181 	/*
182 	 * XXXRW: This code assumes that SOCK_LOCK(so) and
183 	 * SOCKBUF_LOCK(&so->so_rcv) are the same.
184 	 */
185 	SOCKBUF_LOCK(&so->so_rcv);
186 	so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
187 	so->so_state |= SS_ISDISCONNECTED;
188 	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
189 	sorwakeup_locked(so);
190 	SOCKBUF_LOCK(&so->so_snd);
191 	so->so_snd.sb_state |= SBS_CANTSENDMORE;
192 	sbdrop_locked(&so->so_snd, so->so_snd.sb_cc);
193 	sowwakeup_locked(so);
194 	wakeup(&so->so_timeo);
195 }
196 
197 /*
198  * When an attempt at a new connection is noted on a socket
199  * which accepts connections, sonewconn is called.  If the
200  * connection is possible (subject to space constraints, etc.)
201  * then we allocate a new structure, propoerly linked into the
202  * data structure of the original socket, and return this.
203  * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
204  *
205  * note: the ref count on the socket is 0 on return
206  */
207 struct socket *
208 sonewconn(head, connstatus)
209 	register struct socket *head;
210 	int connstatus;
211 {
212 	register struct socket *so;
213 	int over;
214 
215 	ACCEPT_LOCK();
216 	over = (head->so_qlen > 3 * head->so_qlimit / 2);
217 	ACCEPT_UNLOCK();
218 	if (over)
219 		return (NULL);
220 	so = soalloc(M_NOWAIT);
221 	if (so == NULL)
222 		return (NULL);
223 	if ((head->so_options & SO_ACCEPTFILTER) != 0)
224 		connstatus = 0;
225 	so->so_head = head;
226 	so->so_type = head->so_type;
227 	so->so_options = head->so_options &~ SO_ACCEPTCONN;
228 	so->so_linger = head->so_linger;
229 	so->so_state = head->so_state | SS_NOFDREF;
230 	so->so_proto = head->so_proto;
231 	so->so_timeo = head->so_timeo;
232 	so->so_cred = crhold(head->so_cred);
233 #ifdef MAC
234 	SOCK_LOCK(head);
235 	mac_create_socket_from_socket(head, so);
236 	SOCK_UNLOCK(head);
237 #endif
238 	knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv),
239 	    NULL, NULL, NULL);
240 	knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd),
241 	    NULL, NULL, NULL);
242 	if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) ||
243 	    (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
244 		sodealloc(so);
245 		return (NULL);
246 	}
247 	so->so_state |= connstatus;
248 	ACCEPT_LOCK();
249 	if (connstatus) {
250 		TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
251 		so->so_qstate |= SQ_COMP;
252 		head->so_qlen++;
253 	} else {
254 		/*
255 		 * Keep removing sockets from the head until there's room for
256 		 * us to insert on the tail.  In pre-locking revisions, this
257 		 * was a simple if(), but as we could be racing with other
258 		 * threads and soabort() requires dropping locks, we must
259 		 * loop waiting for the condition to be true.
260 		 */
261 		while (head->so_incqlen > head->so_qlimit) {
262 			struct socket *sp;
263 			sp = TAILQ_FIRST(&head->so_incomp);
264 			TAILQ_REMOVE(&head->so_incomp, sp, so_list);
265 			head->so_incqlen--;
266 			sp->so_qstate &= ~SQ_INCOMP;
267 			sp->so_head = NULL;
268 			ACCEPT_UNLOCK();
269 			soabort(sp);
270 			ACCEPT_LOCK();
271 		}
272 		TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
273 		so->so_qstate |= SQ_INCOMP;
274 		head->so_incqlen++;
275 	}
276 	ACCEPT_UNLOCK();
277 	if (connstatus) {
278 		sorwakeup(head);
279 		wakeup_one(&head->so_timeo);
280 	}
281 	return (so);
282 }
283 
284 /*
285  * Socantsendmore indicates that no more data will be sent on the
286  * socket; it would normally be applied to a socket when the user
287  * informs the system that no more data is to be sent, by the protocol
288  * code (in case PRU_SHUTDOWN).  Socantrcvmore indicates that no more data
289  * will be received, and will normally be applied to the socket by a
290  * protocol when it detects that the peer will send no more data.
291  * Data queued for reading in the socket may yet be read.
292  */
293 void
294 socantsendmore_locked(so)
295 	struct socket *so;
296 {
297 
298 	SOCKBUF_LOCK_ASSERT(&so->so_snd);
299 
300 	so->so_snd.sb_state |= SBS_CANTSENDMORE;
301 	sowwakeup_locked(so);
302 	mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
303 }
304 
305 void
306 socantsendmore(so)
307 	struct socket *so;
308 {
309 
310 	SOCKBUF_LOCK(&so->so_snd);
311 	socantsendmore_locked(so);
312 	mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
313 }
314 
315 void
316 socantrcvmore_locked(so)
317 	struct socket *so;
318 {
319 
320 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
321 
322 	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
323 	sorwakeup_locked(so);
324 	mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
325 }
326 
327 void
328 socantrcvmore(so)
329 	struct socket *so;
330 {
331 
332 	SOCKBUF_LOCK(&so->so_rcv);
333 	socantrcvmore_locked(so);
334 	mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
335 }
336 
337 /*
338  * Wait for data to arrive at/drain from a socket buffer.
339  */
340 int
341 sbwait(sb)
342 	struct sockbuf *sb;
343 {
344 
345 	SOCKBUF_LOCK_ASSERT(sb);
346 
347 	sb->sb_flags |= SB_WAIT;
348 	return (msleep(&sb->sb_cc, &sb->sb_mtx,
349 	    (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait",
350 	    sb->sb_timeo));
351 }
352 
353 /*
354  * Lock a sockbuf already known to be locked;
355  * return any error returned from sleep (EINTR).
356  */
357 int
358 sb_lock(sb)
359 	register struct sockbuf *sb;
360 {
361 	int error;
362 
363 	SOCKBUF_LOCK_ASSERT(sb);
364 
365 	while (sb->sb_flags & SB_LOCK) {
366 		sb->sb_flags |= SB_WANT;
367 		error = msleep(&sb->sb_flags, &sb->sb_mtx,
368 		    (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK|PCATCH,
369 		    "sblock", 0);
370 		if (error)
371 			return (error);
372 	}
373 	sb->sb_flags |= SB_LOCK;
374 	return (0);
375 }
376 
377 /*
378  * Wakeup processes waiting on a socket buffer.  Do asynchronous
379  * notification via SIGIO if the socket has the SS_ASYNC flag set.
380  *
381  * Called with the socket buffer lock held; will release the lock by the end
382  * of the function.  This allows the caller to acquire the socket buffer lock
383  * while testing for the need for various sorts of wakeup and hold it through
384  * to the point where it's no longer required.  We currently hold the lock
385  * through calls out to other subsystems (with the exception of kqueue), and
386  * then release it to avoid lock order issues.  It's not clear that's
387  * correct.
388  */
389 void
390 sowakeup(so, sb)
391 	register struct socket *so;
392 	register struct sockbuf *sb;
393 {
394 
395 	SOCKBUF_LOCK_ASSERT(sb);
396 
397 	selwakeuppri(&sb->sb_sel, PSOCK);
398 	sb->sb_flags &= ~SB_SEL;
399 	if (sb->sb_flags & SB_WAIT) {
400 		sb->sb_flags &= ~SB_WAIT;
401 		wakeup(&sb->sb_cc);
402 	}
403 	KNOTE_LOCKED(&sb->sb_sel.si_note, 0);
404 	SOCKBUF_UNLOCK(sb);
405 	if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
406 		pgsigio(&so->so_sigio, SIGIO, 0);
407 	if (sb->sb_flags & SB_UPCALL)
408 		(*so->so_upcall)(so, so->so_upcallarg, M_DONTWAIT);
409 	if (sb->sb_flags & SB_AIO)
410 		aio_swake(so, sb);
411 	mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED);
412 }
413 
414 /*
415  * Socket buffer (struct sockbuf) utility routines.
416  *
417  * Each socket contains two socket buffers: one for sending data and
418  * one for receiving data.  Each buffer contains a queue of mbufs,
419  * information about the number of mbufs and amount of data in the
420  * queue, and other fields allowing select() statements and notification
421  * on data availability to be implemented.
422  *
423  * Data stored in a socket buffer is maintained as a list of records.
424  * Each record is a list of mbufs chained together with the m_next
425  * field.  Records are chained together with the m_nextpkt field. The upper
426  * level routine soreceive() expects the following conventions to be
427  * observed when placing information in the receive buffer:
428  *
429  * 1. If the protocol requires each message be preceded by the sender's
430  *    name, then a record containing that name must be present before
431  *    any associated data (mbuf's must be of type MT_SONAME).
432  * 2. If the protocol supports the exchange of ``access rights'' (really
433  *    just additional data associated with the message), and there are
434  *    ``rights'' to be received, then a record containing this data
435  *    should be present (mbuf's must be of type MT_RIGHTS).
436  * 3. If a name or rights record exists, then it must be followed by
437  *    a data record, perhaps of zero length.
438  *
439  * Before using a new socket structure it is first necessary to reserve
440  * buffer space to the socket, by calling sbreserve().  This should commit
441  * some of the available buffer space in the system buffer pool for the
442  * socket (currently, it does nothing but enforce limits).  The space
443  * should be released by calling sbrelease() when the socket is destroyed.
444  */
445 
446 int
447 soreserve(so, sndcc, rcvcc)
448 	register struct socket *so;
449 	u_long sndcc, rcvcc;
450 {
451 	struct thread *td = curthread;
452 
453 	SOCKBUF_LOCK(&so->so_snd);
454 	SOCKBUF_LOCK(&so->so_rcv);
455 	if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0)
456 		goto bad;
457 	if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0)
458 		goto bad2;
459 	if (so->so_rcv.sb_lowat == 0)
460 		so->so_rcv.sb_lowat = 1;
461 	if (so->so_snd.sb_lowat == 0)
462 		so->so_snd.sb_lowat = MCLBYTES;
463 	if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
464 		so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
465 	SOCKBUF_UNLOCK(&so->so_rcv);
466 	SOCKBUF_UNLOCK(&so->so_snd);
467 	return (0);
468 bad2:
469 	sbrelease_locked(&so->so_snd, so);
470 bad:
471 	SOCKBUF_UNLOCK(&so->so_rcv);
472 	SOCKBUF_UNLOCK(&so->so_snd);
473 	return (ENOBUFS);
474 }
475 
476 static int
477 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS)
478 {
479 	int error = 0;
480 	u_long old_sb_max = sb_max;
481 
482 	error = SYSCTL_OUT(req, arg1, sizeof(u_long));
483 	if (error || !req->newptr)
484 		return (error);
485 	error = SYSCTL_IN(req, arg1, sizeof(u_long));
486 	if (error)
487 		return (error);
488 	if (sb_max < MSIZE + MCLBYTES) {
489 		sb_max = old_sb_max;
490 		return (EINVAL);
491 	}
492 	sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
493 	return (0);
494 }
495 
496 /*
497  * Allot mbufs to a sockbuf.
498  * Attempt to scale mbmax so that mbcnt doesn't become limiting
499  * if buffering efficiency is near the normal case.
500  */
501 int
502 sbreserve_locked(sb, cc, so, td)
503 	struct sockbuf *sb;
504 	u_long cc;
505 	struct socket *so;
506 	struct thread *td;
507 {
508 	rlim_t sbsize_limit;
509 
510 	SOCKBUF_LOCK_ASSERT(sb);
511 
512 	/*
513 	 * td will only be NULL when we're in an interrupt
514 	 * (e.g. in tcp_input())
515 	 */
516 	if (cc > sb_max_adj)
517 		return (0);
518 	if (td != NULL) {
519 		PROC_LOCK(td->td_proc);
520 		sbsize_limit = lim_cur(td->td_proc, RLIMIT_SBSIZE);
521 		PROC_UNLOCK(td->td_proc);
522 	} else
523 		sbsize_limit = RLIM_INFINITY;
524 	if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc,
525 	    sbsize_limit))
526 		return (0);
527 	sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
528 	if (sb->sb_lowat > sb->sb_hiwat)
529 		sb->sb_lowat = sb->sb_hiwat;
530 	return (1);
531 }
532 
533 int
534 sbreserve(sb, cc, so, td)
535 	struct sockbuf *sb;
536 	u_long cc;
537 	struct socket *so;
538 	struct thread *td;
539 {
540 	int error;
541 
542 	SOCKBUF_LOCK(sb);
543 	error = sbreserve_locked(sb, cc, so, td);
544 	SOCKBUF_UNLOCK(sb);
545 	return (error);
546 }
547 
548 /*
549  * Free mbufs held by a socket, and reserved mbuf space.
550  */
551 void
552 sbrelease_locked(sb, so)
553 	struct sockbuf *sb;
554 	struct socket *so;
555 {
556 
557 	SOCKBUF_LOCK_ASSERT(sb);
558 
559 	sbflush_locked(sb);
560 	(void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
561 	    RLIM_INFINITY);
562 	sb->sb_mbmax = 0;
563 }
564 
565 void
566 sbrelease(sb, so)
567 	struct sockbuf *sb;
568 	struct socket *so;
569 {
570 
571 	SOCKBUF_LOCK(sb);
572 	sbrelease_locked(sb, so);
573 	SOCKBUF_UNLOCK(sb);
574 }
575 /*
576  * Routines to add and remove
577  * data from an mbuf queue.
578  *
579  * The routines sbappend() or sbappendrecord() are normally called to
580  * append new mbufs to a socket buffer, after checking that adequate
581  * space is available, comparing the function sbspace() with the amount
582  * of data to be added.  sbappendrecord() differs from sbappend() in
583  * that data supplied is treated as the beginning of a new record.
584  * To place a sender's address, optional access rights, and data in a
585  * socket receive buffer, sbappendaddr() should be used.  To place
586  * access rights and data in a socket receive buffer, sbappendrights()
587  * should be used.  In either case, the new data begins a new record.
588  * Note that unlike sbappend() and sbappendrecord(), these routines check
589  * for the caller that there will be enough space to store the data.
590  * Each fails if there is not enough space, or if it cannot find mbufs
591  * to store additional information in.
592  *
593  * Reliable protocols may use the socket send buffer to hold data
594  * awaiting acknowledgement.  Data is normally copied from a socket
595  * send buffer in a protocol with m_copy for output to a peer,
596  * and then removing the data from the socket buffer with sbdrop()
597  * or sbdroprecord() when the data is acknowledged by the peer.
598  */
599 
600 #ifdef SOCKBUF_DEBUG
601 void
602 sblastrecordchk(struct sockbuf *sb, const char *file, int line)
603 {
604 	struct mbuf *m = sb->sb_mb;
605 
606 	SOCKBUF_LOCK_ASSERT(sb);
607 
608 	while (m && m->m_nextpkt)
609 		m = m->m_nextpkt;
610 
611 	if (m != sb->sb_lastrecord) {
612 		printf("%s: sb_mb %p sb_lastrecord %p last %p\n",
613 			__func__, sb->sb_mb, sb->sb_lastrecord, m);
614 		printf("packet chain:\n");
615 		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
616 			printf("\t%p\n", m);
617 		panic("%s from %s:%u", __func__, file, line);
618 	}
619 }
620 
621 void
622 sblastmbufchk(struct sockbuf *sb, const char *file, int line)
623 {
624 	struct mbuf *m = sb->sb_mb;
625 	struct mbuf *n;
626 
627 	SOCKBUF_LOCK_ASSERT(sb);
628 
629 	while (m && m->m_nextpkt)
630 		m = m->m_nextpkt;
631 
632 	while (m && m->m_next)
633 		m = m->m_next;
634 
635 	if (m != sb->sb_mbtail) {
636 		printf("%s: sb_mb %p sb_mbtail %p last %p\n",
637 			__func__, sb->sb_mb, sb->sb_mbtail, m);
638 		printf("packet tree:\n");
639 		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
640 			printf("\t");
641 			for (n = m; n != NULL; n = n->m_next)
642 				printf("%p ", n);
643 			printf("\n");
644 		}
645 		panic("%s from %s:%u", __func__, file, line);
646 	}
647 }
648 #endif /* SOCKBUF_DEBUG */
649 
650 #define SBLINKRECORD(sb, m0) do {					\
651 	SOCKBUF_LOCK_ASSERT(sb);					\
652 	if ((sb)->sb_lastrecord != NULL)				\
653 		(sb)->sb_lastrecord->m_nextpkt = (m0);			\
654 	else								\
655 		(sb)->sb_mb = (m0);					\
656 	(sb)->sb_lastrecord = (m0);					\
657 } while (/*CONSTCOND*/0)
658 
659 /*
660  * Append mbuf chain m to the last record in the
661  * socket buffer sb.  The additional space associated
662  * the mbuf chain is recorded in sb.  Empty mbufs are
663  * discarded and mbufs are compacted where possible.
664  */
665 void
666 sbappend_locked(sb, m)
667 	struct sockbuf *sb;
668 	struct mbuf *m;
669 {
670 	register struct mbuf *n;
671 
672 	SOCKBUF_LOCK_ASSERT(sb);
673 
674 	if (m == 0)
675 		return;
676 
677 	SBLASTRECORDCHK(sb);
678 	n = sb->sb_mb;
679 	if (n) {
680 		while (n->m_nextpkt)
681 			n = n->m_nextpkt;
682 		do {
683 			if (n->m_flags & M_EOR) {
684 				sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
685 				return;
686 			}
687 		} while (n->m_next && (n = n->m_next));
688 	} else {
689 		/*
690 		 * XXX Would like to simply use sb_mbtail here, but
691 		 * XXX I need to verify that I won't miss an EOR that
692 		 * XXX way.
693 		 */
694 		if ((n = sb->sb_lastrecord) != NULL) {
695 			do {
696 				if (n->m_flags & M_EOR) {
697 					sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
698 					return;
699 				}
700 			} while (n->m_next && (n = n->m_next));
701 		} else {
702 			/*
703 			 * If this is the first record in the socket buffer,
704 			 * it's also the last record.
705 			 */
706 			sb->sb_lastrecord = m;
707 		}
708 	}
709 	sbcompress(sb, m, n);
710 	SBLASTRECORDCHK(sb);
711 }
712 
713 /*
714  * Append mbuf chain m to the last record in the
715  * socket buffer sb.  The additional space associated
716  * the mbuf chain is recorded in sb.  Empty mbufs are
717  * discarded and mbufs are compacted where possible.
718  */
719 void
720 sbappend(sb, m)
721 	struct sockbuf *sb;
722 	struct mbuf *m;
723 {
724 
725 	SOCKBUF_LOCK(sb);
726 	sbappend_locked(sb, m);
727 	SOCKBUF_UNLOCK(sb);
728 }
729 
730 /*
731  * This version of sbappend() should only be used when the caller
732  * absolutely knows that there will never be more than one record
733  * in the socket buffer, that is, a stream protocol (such as TCP).
734  */
735 void
736 sbappendstream_locked(struct sockbuf *sb, struct mbuf *m)
737 {
738 	SOCKBUF_LOCK_ASSERT(sb);
739 
740 	KASSERT(m->m_nextpkt == NULL,("sbappendstream 0"));
741 	KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1"));
742 
743 	SBLASTMBUFCHK(sb);
744 
745 	sbcompress(sb, m, sb->sb_mbtail);
746 
747 	sb->sb_lastrecord = sb->sb_mb;
748 	SBLASTRECORDCHK(sb);
749 }
750 
751 /*
752  * This version of sbappend() should only be used when the caller
753  * absolutely knows that there will never be more than one record
754  * in the socket buffer, that is, a stream protocol (such as TCP).
755  */
756 void
757 sbappendstream(struct sockbuf *sb, struct mbuf *m)
758 {
759 
760 	SOCKBUF_LOCK(sb);
761 	sbappendstream_locked(sb, m);
762 	SOCKBUF_UNLOCK(sb);
763 }
764 
765 #ifdef SOCKBUF_DEBUG
766 void
767 sbcheck(sb)
768 	struct sockbuf *sb;
769 {
770 	struct mbuf *m;
771 	struct mbuf *n = 0;
772 	u_long len = 0, mbcnt = 0;
773 
774 	SOCKBUF_LOCK_ASSERT(sb);
775 
776 	for (m = sb->sb_mb; m; m = n) {
777 	    n = m->m_nextpkt;
778 	    for (; m; m = m->m_next) {
779 		len += m->m_len;
780 		mbcnt += MSIZE;
781 		if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */
782 			mbcnt += m->m_ext.ext_size;
783 	    }
784 	}
785 	if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
786 		printf("cc %ld != %u || mbcnt %ld != %u\n", len, sb->sb_cc,
787 		    mbcnt, sb->sb_mbcnt);
788 		panic("sbcheck");
789 	}
790 }
791 #endif
792 
793 /*
794  * As above, except the mbuf chain
795  * begins a new record.
796  */
797 void
798 sbappendrecord_locked(sb, m0)
799 	register struct sockbuf *sb;
800 	register struct mbuf *m0;
801 {
802 	register struct mbuf *m;
803 
804 	SOCKBUF_LOCK_ASSERT(sb);
805 
806 	if (m0 == 0)
807 		return;
808 	m = sb->sb_mb;
809 	if (m)
810 		while (m->m_nextpkt)
811 			m = m->m_nextpkt;
812 	/*
813 	 * Put the first mbuf on the queue.
814 	 * Note this permits zero length records.
815 	 */
816 	sballoc(sb, m0);
817 	SBLASTRECORDCHK(sb);
818 	SBLINKRECORD(sb, m0);
819 	if (m)
820 		m->m_nextpkt = m0;
821 	else
822 		sb->sb_mb = m0;
823 	m = m0->m_next;
824 	m0->m_next = 0;
825 	if (m && (m0->m_flags & M_EOR)) {
826 		m0->m_flags &= ~M_EOR;
827 		m->m_flags |= M_EOR;
828 	}
829 	sbcompress(sb, m, m0);
830 }
831 
832 /*
833  * As above, except the mbuf chain
834  * begins a new record.
835  */
836 void
837 sbappendrecord(sb, m0)
838 	register struct sockbuf *sb;
839 	register struct mbuf *m0;
840 {
841 
842 	SOCKBUF_LOCK(sb);
843 	sbappendrecord_locked(sb, m0);
844 	SOCKBUF_UNLOCK(sb);
845 }
846 
847 /*
848  * As above except that OOB data
849  * is inserted at the beginning of the sockbuf,
850  * but after any other OOB data.
851  */
852 void
853 sbinsertoob_locked(sb, m0)
854 	register struct sockbuf *sb;
855 	register struct mbuf *m0;
856 {
857 	register struct mbuf *m;
858 	register struct mbuf **mp;
859 
860 	SOCKBUF_LOCK_ASSERT(sb);
861 
862 	if (m0 == 0)
863 		return;
864 	for (mp = &sb->sb_mb; *mp ; mp = &((*mp)->m_nextpkt)) {
865 	    m = *mp;
866 	    again:
867 		switch (m->m_type) {
868 
869 		case MT_OOBDATA:
870 			continue;		/* WANT next train */
871 
872 		case MT_CONTROL:
873 			m = m->m_next;
874 			if (m)
875 				goto again;	/* inspect THIS train further */
876 		}
877 		break;
878 	}
879 	/*
880 	 * Put the first mbuf on the queue.
881 	 * Note this permits zero length records.
882 	 */
883 	sballoc(sb, m0);
884 	m0->m_nextpkt = *mp;
885 	*mp = m0;
886 	m = m0->m_next;
887 	m0->m_next = 0;
888 	if (m && (m0->m_flags & M_EOR)) {
889 		m0->m_flags &= ~M_EOR;
890 		m->m_flags |= M_EOR;
891 	}
892 	sbcompress(sb, m, m0);
893 }
894 
895 /*
896  * As above except that OOB data
897  * is inserted at the beginning of the sockbuf,
898  * but after any other OOB data.
899  */
900 void
901 sbinsertoob(sb, m0)
902 	register struct sockbuf *sb;
903 	register struct mbuf *m0;
904 {
905 
906 	SOCKBUF_LOCK(sb);
907 	sbinsertoob_locked(sb, m0);
908 	SOCKBUF_UNLOCK(sb);
909 }
910 
911 /*
912  * Append address and data, and optionally, control (ancillary) data
913  * to the receive queue of a socket.  If present,
914  * m0 must include a packet header with total length.
915  * Returns 0 if no space in sockbuf or insufficient mbufs.
916  */
917 int
918 sbappendaddr_locked(sb, asa, m0, control)
919 	struct sockbuf *sb;
920 	const struct sockaddr *asa;
921 	struct mbuf *m0, *control;
922 {
923 	struct mbuf *m, *n, *nlast;
924 	int space = asa->sa_len;
925 
926 	SOCKBUF_LOCK_ASSERT(sb);
927 
928 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
929 		panic("sbappendaddr_locked");
930 	if (m0)
931 		space += m0->m_pkthdr.len;
932 	space += m_length(control, &n);
933 
934 	if (space > sbspace(sb))
935 		return (0);
936 #if MSIZE <= 256
937 	if (asa->sa_len > MLEN)
938 		return (0);
939 #endif
940 	MGET(m, M_DONTWAIT, MT_SONAME);
941 	if (m == 0)
942 		return (0);
943 	m->m_len = asa->sa_len;
944 	bcopy(asa, mtod(m, caddr_t), asa->sa_len);
945 	if (n)
946 		n->m_next = m0;		/* concatenate data to control */
947 	else
948 		control = m0;
949 	m->m_next = control;
950 	for (n = m; n->m_next != NULL; n = n->m_next)
951 		sballoc(sb, n);
952 	sballoc(sb, n);
953 	nlast = n;
954 	SBLINKRECORD(sb, m);
955 
956 	sb->sb_mbtail = nlast;
957 	SBLASTMBUFCHK(sb);
958 
959 	SBLASTRECORDCHK(sb);
960 	return (1);
961 }
962 
963 /*
964  * Append address and data, and optionally, control (ancillary) data
965  * to the receive queue of a socket.  If present,
966  * m0 must include a packet header with total length.
967  * Returns 0 if no space in sockbuf or insufficient mbufs.
968  */
969 int
970 sbappendaddr(sb, asa, m0, control)
971 	struct sockbuf *sb;
972 	const struct sockaddr *asa;
973 	struct mbuf *m0, *control;
974 {
975 	int retval;
976 
977 	SOCKBUF_LOCK(sb);
978 	retval = sbappendaddr_locked(sb, asa, m0, control);
979 	SOCKBUF_UNLOCK(sb);
980 	return (retval);
981 }
982 
983 int
984 sbappendcontrol_locked(sb, m0, control)
985 	struct sockbuf *sb;
986 	struct mbuf *control, *m0;
987 {
988 	struct mbuf *m, *n, *mlast;
989 	int space;
990 
991 	SOCKBUF_LOCK_ASSERT(sb);
992 
993 	if (control == 0)
994 		panic("sbappendcontrol_locked");
995 	space = m_length(control, &n) + m_length(m0, NULL);
996 
997 	if (space > sbspace(sb))
998 		return (0);
999 	n->m_next = m0;			/* concatenate data to control */
1000 
1001 	SBLASTRECORDCHK(sb);
1002 
1003 	for (m = control; m->m_next; m = m->m_next)
1004 		sballoc(sb, m);
1005 	sballoc(sb, m);
1006 	mlast = m;
1007 	SBLINKRECORD(sb, control);
1008 
1009 	sb->sb_mbtail = mlast;
1010 	SBLASTMBUFCHK(sb);
1011 
1012 	SBLASTRECORDCHK(sb);
1013 	return (1);
1014 }
1015 
1016 int
1017 sbappendcontrol(sb, m0, control)
1018 	struct sockbuf *sb;
1019 	struct mbuf *control, *m0;
1020 {
1021 	int retval;
1022 
1023 	SOCKBUF_LOCK(sb);
1024 	retval = sbappendcontrol_locked(sb, m0, control);
1025 	SOCKBUF_UNLOCK(sb);
1026 	return (retval);
1027 }
1028 
1029 /*
1030  * Append the data in mbuf chain (m) into the socket buffer sb following mbuf
1031  * (n).  If (n) is NULL, the buffer is presumed empty.
1032  *
1033  * When the data is compressed, mbufs in the chain may be handled in one of
1034  * three ways:
1035  *
1036  * (1) The mbuf may simply be dropped, if it contributes nothing (no data, no
1037  *     record boundary, and no change in data type).
1038  *
1039  * (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into
1040  *     an mbuf already in the socket buffer.  This can occur if an
1041  *     appropriate mbuf exists, there is room, and no merging of data types
1042  *     will occur.
1043  *
1044  * (3) The mbuf may be appended to the end of the existing mbuf chain.
1045  *
1046  * If any of the new mbufs is marked as M_EOR, mark the last mbuf appended as
1047  * end-of-record.
1048  */
1049 void
1050 sbcompress(sb, m, n)
1051 	register struct sockbuf *sb;
1052 	register struct mbuf *m, *n;
1053 {
1054 	register int eor = 0;
1055 	register struct mbuf *o;
1056 
1057 	SOCKBUF_LOCK_ASSERT(sb);
1058 
1059 	while (m) {
1060 		eor |= m->m_flags & M_EOR;
1061 		if (m->m_len == 0 &&
1062 		    (eor == 0 ||
1063 		     (((o = m->m_next) || (o = n)) &&
1064 		      o->m_type == m->m_type))) {
1065 			if (sb->sb_lastrecord == m)
1066 				sb->sb_lastrecord = m->m_next;
1067 			m = m_free(m);
1068 			continue;
1069 		}
1070 		if (n && (n->m_flags & M_EOR) == 0 &&
1071 		    M_WRITABLE(n) &&
1072 		    m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
1073 		    m->m_len <= M_TRAILINGSPACE(n) &&
1074 		    n->m_type == m->m_type) {
1075 			bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
1076 			    (unsigned)m->m_len);
1077 			n->m_len += m->m_len;
1078 			sb->sb_cc += m->m_len;
1079 			if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
1080 				/* XXX: Probably don't need.*/
1081 				sb->sb_ctl += m->m_len;
1082 			m = m_free(m);
1083 			continue;
1084 		}
1085 		if (n)
1086 			n->m_next = m;
1087 		else
1088 			sb->sb_mb = m;
1089 		sb->sb_mbtail = m;
1090 		sballoc(sb, m);
1091 		n = m;
1092 		m->m_flags &= ~M_EOR;
1093 		m = m->m_next;
1094 		n->m_next = 0;
1095 	}
1096 	if (eor) {
1097 		KASSERT(n != NULL, ("sbcompress: eor && n == NULL"));
1098 		n->m_flags |= eor;
1099 	}
1100 	SBLASTMBUFCHK(sb);
1101 }
1102 
1103 /*
1104  * Free all mbufs in a sockbuf.
1105  * Check that all resources are reclaimed.
1106  */
1107 void
1108 sbflush_locked(sb)
1109 	register struct sockbuf *sb;
1110 {
1111 
1112 	SOCKBUF_LOCK_ASSERT(sb);
1113 
1114 	if (sb->sb_flags & SB_LOCK)
1115 		panic("sbflush_locked: locked");
1116 	while (sb->sb_mbcnt) {
1117 		/*
1118 		 * Don't call sbdrop(sb, 0) if the leading mbuf is non-empty:
1119 		 * we would loop forever. Panic instead.
1120 		 */
1121 		if (!sb->sb_cc && (sb->sb_mb == NULL || sb->sb_mb->m_len))
1122 			break;
1123 		sbdrop_locked(sb, (int)sb->sb_cc);
1124 	}
1125 	if (sb->sb_cc || sb->sb_mb || sb->sb_mbcnt)
1126 		panic("sbflush_locked: cc %u || mb %p || mbcnt %u", sb->sb_cc, (void *)sb->sb_mb, sb->sb_mbcnt);
1127 }
1128 
1129 void
1130 sbflush(sb)
1131 	register struct sockbuf *sb;
1132 {
1133 
1134 	SOCKBUF_LOCK(sb);
1135 	sbflush_locked(sb);
1136 	SOCKBUF_UNLOCK(sb);
1137 }
1138 
1139 /*
1140  * Drop data from (the front of) a sockbuf.
1141  */
1142 void
1143 sbdrop_locked(sb, len)
1144 	register struct sockbuf *sb;
1145 	register int len;
1146 {
1147 	register struct mbuf *m;
1148 	struct mbuf *next;
1149 
1150 	SOCKBUF_LOCK_ASSERT(sb);
1151 
1152 	next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
1153 	while (len > 0) {
1154 		if (m == 0) {
1155 			if (next == 0)
1156 				panic("sbdrop");
1157 			m = next;
1158 			next = m->m_nextpkt;
1159 			continue;
1160 		}
1161 		if (m->m_len > len) {
1162 			m->m_len -= len;
1163 			m->m_data += len;
1164 			sb->sb_cc -= len;
1165 			if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
1166 				sb->sb_ctl -= len;
1167 			break;
1168 		}
1169 		len -= m->m_len;
1170 		sbfree(sb, m);
1171 		m = m_free(m);
1172 	}
1173 	while (m && m->m_len == 0) {
1174 		sbfree(sb, m);
1175 		m = m_free(m);
1176 	}
1177 	if (m) {
1178 		sb->sb_mb = m;
1179 		m->m_nextpkt = next;
1180 	} else
1181 		sb->sb_mb = next;
1182 	/*
1183 	 * First part is an inline SB_EMPTY_FIXUP().  Second part
1184 	 * makes sure sb_lastrecord is up-to-date if we dropped
1185 	 * part of the last record.
1186 	 */
1187 	m = sb->sb_mb;
1188 	if (m == NULL) {
1189 		sb->sb_mbtail = NULL;
1190 		sb->sb_lastrecord = NULL;
1191 	} else if (m->m_nextpkt == NULL) {
1192 		sb->sb_lastrecord = m;
1193 	}
1194 }
1195 
1196 /*
1197  * Drop data from (the front of) a sockbuf.
1198  */
1199 void
1200 sbdrop(sb, len)
1201 	register struct sockbuf *sb;
1202 	register int len;
1203 {
1204 
1205 	SOCKBUF_LOCK(sb);
1206 	sbdrop_locked(sb, len);
1207 	SOCKBUF_UNLOCK(sb);
1208 }
1209 
1210 /*
1211  * Drop a record off the front of a sockbuf
1212  * and move the next record to the front.
1213  */
1214 void
1215 sbdroprecord_locked(sb)
1216 	register struct sockbuf *sb;
1217 {
1218 	register struct mbuf *m;
1219 
1220 	SOCKBUF_LOCK_ASSERT(sb);
1221 
1222 	m = sb->sb_mb;
1223 	if (m) {
1224 		sb->sb_mb = m->m_nextpkt;
1225 		do {
1226 			sbfree(sb, m);
1227 			m = m_free(m);
1228 		} while (m);
1229 	}
1230 	SB_EMPTY_FIXUP(sb);
1231 }
1232 
1233 /*
1234  * Drop a record off the front of a sockbuf
1235  * and move the next record to the front.
1236  */
1237 void
1238 sbdroprecord(sb)
1239 	register struct sockbuf *sb;
1240 {
1241 
1242 	SOCKBUF_LOCK(sb);
1243 	sbdroprecord_locked(sb);
1244 	SOCKBUF_UNLOCK(sb);
1245 }
1246 
1247 /*
1248  * Create a "control" mbuf containing the specified data
1249  * with the specified type for presentation on a socket buffer.
1250  */
1251 struct mbuf *
1252 sbcreatecontrol(p, size, type, level)
1253 	caddr_t p;
1254 	register int size;
1255 	int type, level;
1256 {
1257 	register struct cmsghdr *cp;
1258 	struct mbuf *m;
1259 
1260 	if (CMSG_SPACE((u_int)size) > MCLBYTES)
1261 		return ((struct mbuf *) NULL);
1262 	if (CMSG_SPACE((u_int)size) > MLEN)
1263 		m = m_getcl(M_DONTWAIT, MT_CONTROL, 0);
1264 	else
1265 		m = m_get(M_DONTWAIT, MT_CONTROL);
1266 	if (m == NULL)
1267 		return ((struct mbuf *) NULL);
1268 	cp = mtod(m, struct cmsghdr *);
1269 	m->m_len = 0;
1270 	KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m),
1271 	    ("sbcreatecontrol: short mbuf"));
1272 	if (p != NULL)
1273 		(void)memcpy(CMSG_DATA(cp), p, size);
1274 	m->m_len = CMSG_SPACE(size);
1275 	cp->cmsg_len = CMSG_LEN(size);
1276 	cp->cmsg_level = level;
1277 	cp->cmsg_type = type;
1278 	return (m);
1279 }
1280 
1281 /*
1282  * Some routines that return EOPNOTSUPP for entry points that are not
1283  * supported by a protocol.  Fill in as needed.
1284  */
1285 int
1286 pru_abort_notsupp(struct socket *so)
1287 {
1288 	return EOPNOTSUPP;
1289 }
1290 
1291 int
1292 pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
1293 {
1294 	return EOPNOTSUPP;
1295 }
1296 
1297 int
1298 pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
1299 {
1300 	return EOPNOTSUPP;
1301 }
1302 
1303 int
1304 pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
1305 {
1306 	return EOPNOTSUPP;
1307 }
1308 
1309 int
1310 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
1311 {
1312 	return EOPNOTSUPP;
1313 }
1314 
1315 int
1316 pru_connect2_notsupp(struct socket *so1, struct socket *so2)
1317 {
1318 	return EOPNOTSUPP;
1319 }
1320 
1321 int
1322 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
1323 	struct ifnet *ifp, struct thread *td)
1324 {
1325 	return EOPNOTSUPP;
1326 }
1327 
1328 int
1329 pru_detach_notsupp(struct socket *so)
1330 {
1331 	return EOPNOTSUPP;
1332 }
1333 
1334 int
1335 pru_disconnect_notsupp(struct socket *so)
1336 {
1337 	return EOPNOTSUPP;
1338 }
1339 
1340 int
1341 pru_listen_notsupp(struct socket *so, int backlog, struct thread *td)
1342 {
1343 	return EOPNOTSUPP;
1344 }
1345 
1346 int
1347 pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
1348 {
1349 	return EOPNOTSUPP;
1350 }
1351 
1352 int
1353 pru_rcvd_notsupp(struct socket *so, int flags)
1354 {
1355 	return EOPNOTSUPP;
1356 }
1357 
1358 int
1359 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
1360 {
1361 	return EOPNOTSUPP;
1362 }
1363 
1364 int
1365 pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
1366 	struct sockaddr *addr, struct mbuf *control, struct thread *td)
1367 {
1368 	return EOPNOTSUPP;
1369 }
1370 
1371 /*
1372  * This isn't really a ``null'' operation, but it's the default one
1373  * and doesn't do anything destructive.
1374  */
1375 int
1376 pru_sense_null(struct socket *so, struct stat *sb)
1377 {
1378 	sb->st_blksize = so->so_snd.sb_hiwat;
1379 	return 0;
1380 }
1381 
1382 int
1383 pru_shutdown_notsupp(struct socket *so)
1384 {
1385 	return EOPNOTSUPP;
1386 }
1387 
1388 int
1389 pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
1390 {
1391 	return EOPNOTSUPP;
1392 }
1393 
1394 int
1395 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
1396 	struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1397 {
1398 	return EOPNOTSUPP;
1399 }
1400 
1401 int
1402 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
1403 	struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
1404 	int *flagsp)
1405 {
1406 	return EOPNOTSUPP;
1407 }
1408 
1409 int
1410 pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
1411 	struct thread *td)
1412 {
1413 	return EOPNOTSUPP;
1414 }
1415 
1416 /*
1417  * For protocol types that don't keep cached copies of labels in their
1418  * pcbs, provide a null sosetlabel that does a NOOP.
1419  */
1420 void
1421 pru_sosetlabel_null(struct socket *so)
1422 {
1423 
1424 }
1425 
1426 /*
1427  * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
1428  */
1429 struct sockaddr *
1430 sodupsockaddr(const struct sockaddr *sa, int mflags)
1431 {
1432 	struct sockaddr *sa2;
1433 
1434 	sa2 = malloc(sa->sa_len, M_SONAME, mflags);
1435 	if (sa2)
1436 		bcopy(sa, sa2, sa->sa_len);
1437 	return sa2;
1438 }
1439 
1440 /*
1441  * Create an external-format (``xsocket'') structure using the information
1442  * in the kernel-format socket structure pointed to by so.  This is done
1443  * to reduce the spew of irrelevant information over this interface,
1444  * to isolate user code from changes in the kernel structure, and
1445  * potentially to provide information-hiding if we decide that
1446  * some of this information should be hidden from users.
1447  */
1448 void
1449 sotoxsocket(struct socket *so, struct xsocket *xso)
1450 {
1451 	xso->xso_len = sizeof *xso;
1452 	xso->xso_so = so;
1453 	xso->so_type = so->so_type;
1454 	xso->so_options = so->so_options;
1455 	xso->so_linger = so->so_linger;
1456 	xso->so_state = so->so_state;
1457 	xso->so_pcb = so->so_pcb;
1458 	xso->xso_protocol = so->so_proto->pr_protocol;
1459 	xso->xso_family = so->so_proto->pr_domain->dom_family;
1460 	xso->so_qlen = so->so_qlen;
1461 	xso->so_incqlen = so->so_incqlen;
1462 	xso->so_qlimit = so->so_qlimit;
1463 	xso->so_timeo = so->so_timeo;
1464 	xso->so_error = so->so_error;
1465 	xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
1466 	xso->so_oobmark = so->so_oobmark;
1467 	sbtoxsockbuf(&so->so_snd, &xso->so_snd);
1468 	sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
1469 	xso->so_uid = so->so_cred->cr_uid;
1470 }
1471 
1472 /*
1473  * This does the same for sockbufs.  Note that the xsockbuf structure,
1474  * since it is always embedded in a socket, does not include a self
1475  * pointer nor a length.  We make this entry point public in case
1476  * some other mechanism needs it.
1477  */
1478 void
1479 sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb)
1480 {
1481 	xsb->sb_cc = sb->sb_cc;
1482 	xsb->sb_hiwat = sb->sb_hiwat;
1483 	xsb->sb_mbcnt = sb->sb_mbcnt;
1484 	xsb->sb_mbmax = sb->sb_mbmax;
1485 	xsb->sb_lowat = sb->sb_lowat;
1486 	xsb->sb_flags = sb->sb_flags;
1487 	xsb->sb_timeo = sb->sb_timeo;
1488 }
1489 
1490 /*
1491  * Here is the definition of some of the basic objects in the kern.ipc
1492  * branch of the MIB.
1493  */
1494 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
1495 
1496 /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */
1497 static int dummy;
1498 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
1499 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_ULONG|CTLFLAG_RW,
1500     &sb_max, 0, sysctl_handle_sb_max, "LU", "Maximum socket buffer size");
1501 SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RDTUN,
1502     &maxsockets, 0, "Maximum number of sockets avaliable");
1503 SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
1504     &sb_efficiency, 0, "");
1505 
1506 /*
1507  * Initialise maxsockets
1508  */
1509 static void init_maxsockets(void *ignored)
1510 {
1511 	TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
1512 	maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters));
1513 }
1514 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
1515