xref: /freebsd/sys/kern/uipc_sockbuf.c (revision 7afc53b8dfcc7d5897920ce6cc7e842fbb4ab813)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)uipc_socket2.c	8.1 (Berkeley) 6/10/93
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_mac.h"
36 #include "opt_param.h"
37 
38 #include <sys/param.h>
39 #include <sys/aio.h> /* for aio_swake proto */
40 #include <sys/domain.h>
41 #include <sys/event.h>
42 #include <sys/file.h>	/* for maxfiles */
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/mac.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/protosw.h>
51 #include <sys/resourcevar.h>
52 #include <sys/signalvar.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/stat.h>
56 #include <sys/sysctl.h>
57 #include <sys/systm.h>
58 
59 int	maxsockets;
60 
61 void (*aio_swake)(struct socket *, struct sockbuf *);
62 
63 /*
64  * Primitive routines for operating on sockets and socket buffers
65  */
66 
67 u_long	sb_max = SB_MAX;
68 static	u_long sb_max_adj =
69     SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */
70 
71 static	u_long sb_efficiency = 8;	/* parameter for sbreserve() */
72 
73 /*
74  * Procedures to manipulate state flags of socket
75  * and do appropriate wakeups.  Normal sequence from the
76  * active (originating) side is that soisconnecting() is
77  * called during processing of connect() call,
78  * resulting in an eventual call to soisconnected() if/when the
79  * connection is established.  When the connection is torn down
80  * soisdisconnecting() is called during processing of disconnect() call,
81  * and soisdisconnected() is called when the connection to the peer
82  * is totally severed.  The semantics of these routines are such that
83  * connectionless protocols can call soisconnected() and soisdisconnected()
84  * only, bypassing the in-progress calls when setting up a ``connection''
85  * takes no time.
86  *
87  * From the passive side, a socket is created with
88  * two queues of sockets: so_incomp for connections in progress
89  * and so_comp for connections already made and awaiting user acceptance.
90  * As a protocol is preparing incoming connections, it creates a socket
91  * structure queued on so_incomp by calling sonewconn().  When the connection
92  * is established, soisconnected() is called, and transfers the
93  * socket structure to so_comp, making it available to accept().
94  *
95  * If a socket is closed with sockets on either
96  * so_incomp or so_comp, these sockets are dropped.
97  *
98  * If higher level protocols are implemented in
99  * the kernel, the wakeups done here will sometimes
100  * cause software-interrupt process scheduling.
101  */
102 
103 void
104 soisconnecting(so)
105 	register struct socket *so;
106 {
107 
108 	SOCK_LOCK(so);
109 	so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
110 	so->so_state |= SS_ISCONNECTING;
111 	SOCK_UNLOCK(so);
112 }
113 
114 void
115 soisconnected(so)
116 	struct socket *so;
117 {
118 	struct socket *head;
119 
120 	ACCEPT_LOCK();
121 	SOCK_LOCK(so);
122 	so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
123 	so->so_state |= SS_ISCONNECTED;
124 	head = so->so_head;
125 	if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
126 		if ((so->so_options & SO_ACCEPTFILTER) == 0) {
127 			SOCK_UNLOCK(so);
128 			TAILQ_REMOVE(&head->so_incomp, so, so_list);
129 			head->so_incqlen--;
130 			so->so_qstate &= ~SQ_INCOMP;
131 			TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
132 			head->so_qlen++;
133 			so->so_qstate |= SQ_COMP;
134 			ACCEPT_UNLOCK();
135 			sorwakeup(head);
136 			wakeup_one(&head->so_timeo);
137 		} else {
138 			ACCEPT_UNLOCK();
139 			so->so_upcall =
140 			    head->so_accf->so_accept_filter->accf_callback;
141 			so->so_upcallarg = head->so_accf->so_accept_filter_arg;
142 			so->so_rcv.sb_flags |= SB_UPCALL;
143 			so->so_options &= ~SO_ACCEPTFILTER;
144 			SOCK_UNLOCK(so);
145 			so->so_upcall(so, so->so_upcallarg, M_DONTWAIT);
146 		}
147 		return;
148 	}
149 	SOCK_UNLOCK(so);
150 	ACCEPT_UNLOCK();
151 	wakeup(&so->so_timeo);
152 	sorwakeup(so);
153 	sowwakeup(so);
154 }
155 
156 void
157 soisdisconnecting(so)
158 	register struct socket *so;
159 {
160 
161 	/*
162 	 * XXXRW: This code assumes that SOCK_LOCK(so) and
163 	 * SOCKBUF_LOCK(&so->so_rcv) are the same.
164 	 */
165 	SOCKBUF_LOCK(&so->so_rcv);
166 	so->so_state &= ~SS_ISCONNECTING;
167 	so->so_state |= SS_ISDISCONNECTING;
168 	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
169 	sorwakeup_locked(so);
170 	SOCKBUF_LOCK(&so->so_snd);
171 	so->so_snd.sb_state |= SBS_CANTSENDMORE;
172 	sowwakeup_locked(so);
173 	wakeup(&so->so_timeo);
174 }
175 
176 void
177 soisdisconnected(so)
178 	register struct socket *so;
179 {
180 
181 	/*
182 	 * XXXRW: This code assumes that SOCK_LOCK(so) and
183 	 * SOCKBUF_LOCK(&so->so_rcv) are the same.
184 	 */
185 	SOCKBUF_LOCK(&so->so_rcv);
186 	so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
187 	so->so_state |= SS_ISDISCONNECTED;
188 	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
189 	sorwakeup_locked(so);
190 	SOCKBUF_LOCK(&so->so_snd);
191 	so->so_snd.sb_state |= SBS_CANTSENDMORE;
192 	sbdrop_locked(&so->so_snd, so->so_snd.sb_cc);
193 	sowwakeup_locked(so);
194 	wakeup(&so->so_timeo);
195 }
196 
197 /*
198  * When an attempt at a new connection is noted on a socket
199  * which accepts connections, sonewconn is called.  If the
200  * connection is possible (subject to space constraints, etc.)
201  * then we allocate a new structure, propoerly linked into the
202  * data structure of the original socket, and return this.
203  * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
204  *
205  * note: the ref count on the socket is 0 on return
206  */
207 struct socket *
208 sonewconn(head, connstatus)
209 	register struct socket *head;
210 	int connstatus;
211 {
212 	register struct socket *so;
213 	int over;
214 
215 	ACCEPT_LOCK();
216 	over = (head->so_qlen > 3 * head->so_qlimit / 2);
217 	ACCEPT_UNLOCK();
218 	if (over)
219 		return (NULL);
220 	so = soalloc(M_NOWAIT);
221 	if (so == NULL)
222 		return (NULL);
223 	if ((head->so_options & SO_ACCEPTFILTER) != 0)
224 		connstatus = 0;
225 	so->so_head = head;
226 	so->so_type = head->so_type;
227 	so->so_options = head->so_options &~ SO_ACCEPTCONN;
228 	so->so_linger = head->so_linger;
229 	so->so_state = head->so_state | SS_NOFDREF;
230 	so->so_proto = head->so_proto;
231 	so->so_timeo = head->so_timeo;
232 	so->so_cred = crhold(head->so_cred);
233 #ifdef MAC
234 	SOCK_LOCK(head);
235 	mac_create_socket_from_socket(head, so);
236 	SOCK_UNLOCK(head);
237 #endif
238 	knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
239 	knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
240 	if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) ||
241 	    (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
242 		sodealloc(so);
243 		return (NULL);
244 	}
245 	so->so_state |= connstatus;
246 	ACCEPT_LOCK();
247 	if (connstatus) {
248 		TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
249 		so->so_qstate |= SQ_COMP;
250 		head->so_qlen++;
251 	} else {
252 		/*
253 		 * Keep removing sockets from the head until there's room for
254 		 * us to insert on the tail.  In pre-locking revisions, this
255 		 * was a simple if(), but as we could be racing with other
256 		 * threads and soabort() requires dropping locks, we must
257 		 * loop waiting for the condition to be true.
258 		 */
259 		while (head->so_incqlen > head->so_qlimit) {
260 			struct socket *sp;
261 			sp = TAILQ_FIRST(&head->so_incomp);
262 			TAILQ_REMOVE(&so->so_incomp, sp, so_list);
263 			head->so_incqlen--;
264 			sp->so_qstate &= ~SQ_INCOMP;
265 			sp->so_head = NULL;
266 			ACCEPT_UNLOCK();
267 			(void) soabort(sp);
268 			ACCEPT_LOCK();
269 		}
270 		TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
271 		so->so_qstate |= SQ_INCOMP;
272 		head->so_incqlen++;
273 	}
274 	ACCEPT_UNLOCK();
275 	if (connstatus) {
276 		sorwakeup(head);
277 		wakeup_one(&head->so_timeo);
278 	}
279 	return (so);
280 }
281 
282 /*
283  * Socantsendmore indicates that no more data will be sent on the
284  * socket; it would normally be applied to a socket when the user
285  * informs the system that no more data is to be sent, by the protocol
286  * code (in case PRU_SHUTDOWN).  Socantrcvmore indicates that no more data
287  * will be received, and will normally be applied to the socket by a
288  * protocol when it detects that the peer will send no more data.
289  * Data queued for reading in the socket may yet be read.
290  */
291 void
292 socantsendmore_locked(so)
293 	struct socket *so;
294 {
295 
296 	SOCKBUF_LOCK_ASSERT(&so->so_snd);
297 
298 	so->so_snd.sb_state |= SBS_CANTSENDMORE;
299 	sowwakeup_locked(so);
300 	mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
301 }
302 
303 void
304 socantsendmore(so)
305 	struct socket *so;
306 {
307 
308 	SOCKBUF_LOCK(&so->so_snd);
309 	socantsendmore_locked(so);
310 	mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
311 }
312 
313 void
314 socantrcvmore_locked(so)
315 	struct socket *so;
316 {
317 
318 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
319 
320 	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
321 	sorwakeup_locked(so);
322 	mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
323 }
324 
325 void
326 socantrcvmore(so)
327 	struct socket *so;
328 {
329 
330 	SOCKBUF_LOCK(&so->so_rcv);
331 	socantrcvmore_locked(so);
332 	mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
333 }
334 
335 /*
336  * Wait for data to arrive at/drain from a socket buffer.
337  */
338 int
339 sbwait(sb)
340 	struct sockbuf *sb;
341 {
342 
343 	SOCKBUF_LOCK_ASSERT(sb);
344 
345 	sb->sb_flags |= SB_WAIT;
346 	return (msleep(&sb->sb_cc, &sb->sb_mtx,
347 	    (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait",
348 	    sb->sb_timeo));
349 }
350 
351 /*
352  * Lock a sockbuf already known to be locked;
353  * return any error returned from sleep (EINTR).
354  */
355 int
356 sb_lock(sb)
357 	register struct sockbuf *sb;
358 {
359 	int error;
360 
361 	SOCKBUF_LOCK_ASSERT(sb);
362 
363 	while (sb->sb_flags & SB_LOCK) {
364 		sb->sb_flags |= SB_WANT;
365 		error = msleep(&sb->sb_flags, &sb->sb_mtx,
366 		    (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK|PCATCH,
367 		    "sblock", 0);
368 		if (error)
369 			return (error);
370 	}
371 	sb->sb_flags |= SB_LOCK;
372 	return (0);
373 }
374 
375 /*
376  * Wakeup processes waiting on a socket buffer.  Do asynchronous
377  * notification via SIGIO if the socket has the SS_ASYNC flag set.
378  *
379  * Called with the socket buffer lock held; will release the lock by the end
380  * of the function.  This allows the caller to acquire the socket buffer lock
381  * while testing for the need for various sorts of wakeup and hold it through
382  * to the point where it's no longer required.  We currently hold the lock
383  * through calls out to other subsystems (with the exception of kqueue), and
384  * then release it to avoid lock order issues.  It's not clear that's
385  * correct.
386  */
387 void
388 sowakeup(so, sb)
389 	register struct socket *so;
390 	register struct sockbuf *sb;
391 {
392 
393 	SOCKBUF_LOCK_ASSERT(sb);
394 
395 	selwakeuppri(&sb->sb_sel, PSOCK);
396 	sb->sb_flags &= ~SB_SEL;
397 	if (sb->sb_flags & SB_WAIT) {
398 		sb->sb_flags &= ~SB_WAIT;
399 		wakeup(&sb->sb_cc);
400 	}
401 	KNOTE_LOCKED(&sb->sb_sel.si_note, 0);
402 	SOCKBUF_UNLOCK(sb);
403 	if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
404 		pgsigio(&so->so_sigio, SIGIO, 0);
405 	if (sb->sb_flags & SB_UPCALL)
406 		(*so->so_upcall)(so, so->so_upcallarg, M_DONTWAIT);
407 	if (sb->sb_flags & SB_AIO)
408 		aio_swake(so, sb);
409 	mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED);
410 }
411 
412 /*
413  * Socket buffer (struct sockbuf) utility routines.
414  *
415  * Each socket contains two socket buffers: one for sending data and
416  * one for receiving data.  Each buffer contains a queue of mbufs,
417  * information about the number of mbufs and amount of data in the
418  * queue, and other fields allowing select() statements and notification
419  * on data availability to be implemented.
420  *
421  * Data stored in a socket buffer is maintained as a list of records.
422  * Each record is a list of mbufs chained together with the m_next
423  * field.  Records are chained together with the m_nextpkt field. The upper
424  * level routine soreceive() expects the following conventions to be
425  * observed when placing information in the receive buffer:
426  *
427  * 1. If the protocol requires each message be preceded by the sender's
428  *    name, then a record containing that name must be present before
429  *    any associated data (mbuf's must be of type MT_SONAME).
430  * 2. If the protocol supports the exchange of ``access rights'' (really
431  *    just additional data associated with the message), and there are
432  *    ``rights'' to be received, then a record containing this data
433  *    should be present (mbuf's must be of type MT_RIGHTS).
434  * 3. If a name or rights record exists, then it must be followed by
435  *    a data record, perhaps of zero length.
436  *
437  * Before using a new socket structure it is first necessary to reserve
438  * buffer space to the socket, by calling sbreserve().  This should commit
439  * some of the available buffer space in the system buffer pool for the
440  * socket (currently, it does nothing but enforce limits).  The space
441  * should be released by calling sbrelease() when the socket is destroyed.
442  */
443 
444 int
445 soreserve(so, sndcc, rcvcc)
446 	register struct socket *so;
447 	u_long sndcc, rcvcc;
448 {
449 	struct thread *td = curthread;
450 
451 	SOCKBUF_LOCK(&so->so_snd);
452 	SOCKBUF_LOCK(&so->so_rcv);
453 	if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0)
454 		goto bad;
455 	if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0)
456 		goto bad2;
457 	if (so->so_rcv.sb_lowat == 0)
458 		so->so_rcv.sb_lowat = 1;
459 	if (so->so_snd.sb_lowat == 0)
460 		so->so_snd.sb_lowat = MCLBYTES;
461 	if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
462 		so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
463 	SOCKBUF_UNLOCK(&so->so_rcv);
464 	SOCKBUF_UNLOCK(&so->so_snd);
465 	return (0);
466 bad2:
467 	sbrelease_locked(&so->so_snd, so);
468 bad:
469 	SOCKBUF_UNLOCK(&so->so_rcv);
470 	SOCKBUF_UNLOCK(&so->so_snd);
471 	return (ENOBUFS);
472 }
473 
474 static int
475 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS)
476 {
477 	int error = 0;
478 	u_long old_sb_max = sb_max;
479 
480 	error = SYSCTL_OUT(req, arg1, sizeof(u_long));
481 	if (error || !req->newptr)
482 		return (error);
483 	error = SYSCTL_IN(req, arg1, sizeof(u_long));
484 	if (error)
485 		return (error);
486 	if (sb_max < MSIZE + MCLBYTES) {
487 		sb_max = old_sb_max;
488 		return (EINVAL);
489 	}
490 	sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
491 	return (0);
492 }
493 
494 /*
495  * Allot mbufs to a sockbuf.
496  * Attempt to scale mbmax so that mbcnt doesn't become limiting
497  * if buffering efficiency is near the normal case.
498  */
499 int
500 sbreserve_locked(sb, cc, so, td)
501 	struct sockbuf *sb;
502 	u_long cc;
503 	struct socket *so;
504 	struct thread *td;
505 {
506 	rlim_t sbsize_limit;
507 
508 	SOCKBUF_LOCK_ASSERT(sb);
509 
510 	/*
511 	 * td will only be NULL when we're in an interrupt
512 	 * (e.g. in tcp_input())
513 	 */
514 	if (cc > sb_max_adj)
515 		return (0);
516 	if (td != NULL) {
517 		PROC_LOCK(td->td_proc);
518 		sbsize_limit = lim_cur(td->td_proc, RLIMIT_SBSIZE);
519 		PROC_UNLOCK(td->td_proc);
520 	} else
521 		sbsize_limit = RLIM_INFINITY;
522 	if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc,
523 	    sbsize_limit))
524 		return (0);
525 	sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
526 	if (sb->sb_lowat > sb->sb_hiwat)
527 		sb->sb_lowat = sb->sb_hiwat;
528 	return (1);
529 }
530 
531 int
532 sbreserve(sb, cc, so, td)
533 	struct sockbuf *sb;
534 	u_long cc;
535 	struct socket *so;
536 	struct thread *td;
537 {
538 	int error;
539 
540 	SOCKBUF_LOCK(sb);
541 	error = sbreserve_locked(sb, cc, so, td);
542 	SOCKBUF_UNLOCK(sb);
543 	return (error);
544 }
545 
546 /*
547  * Free mbufs held by a socket, and reserved mbuf space.
548  */
549 void
550 sbrelease_locked(sb, so)
551 	struct sockbuf *sb;
552 	struct socket *so;
553 {
554 
555 	SOCKBUF_LOCK_ASSERT(sb);
556 
557 	sbflush_locked(sb);
558 	(void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
559 	    RLIM_INFINITY);
560 	sb->sb_mbmax = 0;
561 }
562 
563 void
564 sbrelease(sb, so)
565 	struct sockbuf *sb;
566 	struct socket *so;
567 {
568 
569 	SOCKBUF_LOCK(sb);
570 	sbrelease_locked(sb, so);
571 	SOCKBUF_UNLOCK(sb);
572 }
573 /*
574  * Routines to add and remove
575  * data from an mbuf queue.
576  *
577  * The routines sbappend() or sbappendrecord() are normally called to
578  * append new mbufs to a socket buffer, after checking that adequate
579  * space is available, comparing the function sbspace() with the amount
580  * of data to be added.  sbappendrecord() differs from sbappend() in
581  * that data supplied is treated as the beginning of a new record.
582  * To place a sender's address, optional access rights, and data in a
583  * socket receive buffer, sbappendaddr() should be used.  To place
584  * access rights and data in a socket receive buffer, sbappendrights()
585  * should be used.  In either case, the new data begins a new record.
586  * Note that unlike sbappend() and sbappendrecord(), these routines check
587  * for the caller that there will be enough space to store the data.
588  * Each fails if there is not enough space, or if it cannot find mbufs
589  * to store additional information in.
590  *
591  * Reliable protocols may use the socket send buffer to hold data
592  * awaiting acknowledgement.  Data is normally copied from a socket
593  * send buffer in a protocol with m_copy for output to a peer,
594  * and then removing the data from the socket buffer with sbdrop()
595  * or sbdroprecord() when the data is acknowledged by the peer.
596  */
597 
598 #ifdef SOCKBUF_DEBUG
599 void
600 sblastrecordchk(struct sockbuf *sb, const char *file, int line)
601 {
602 	struct mbuf *m = sb->sb_mb;
603 
604 	SOCKBUF_LOCK_ASSERT(sb);
605 
606 	while (m && m->m_nextpkt)
607 		m = m->m_nextpkt;
608 
609 	if (m != sb->sb_lastrecord) {
610 		printf("%s: sb_mb %p sb_lastrecord %p last %p\n",
611 			__func__, sb->sb_mb, sb->sb_lastrecord, m);
612 		printf("packet chain:\n");
613 		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
614 			printf("\t%p\n", m);
615 		panic("%s from %s:%u", __func__, file, line);
616 	}
617 }
618 
619 void
620 sblastmbufchk(struct sockbuf *sb, const char *file, int line)
621 {
622 	struct mbuf *m = sb->sb_mb;
623 	struct mbuf *n;
624 
625 	SOCKBUF_LOCK_ASSERT(sb);
626 
627 	while (m && m->m_nextpkt)
628 		m = m->m_nextpkt;
629 
630 	while (m && m->m_next)
631 		m = m->m_next;
632 
633 	if (m != sb->sb_mbtail) {
634 		printf("%s: sb_mb %p sb_mbtail %p last %p\n",
635 			__func__, sb->sb_mb, sb->sb_mbtail, m);
636 		printf("packet tree:\n");
637 		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
638 			printf("\t");
639 			for (n = m; n != NULL; n = n->m_next)
640 				printf("%p ", n);
641 			printf("\n");
642 		}
643 		panic("%s from %s:%u", __func__, file, line);
644 	}
645 }
646 #endif /* SOCKBUF_DEBUG */
647 
648 #define SBLINKRECORD(sb, m0) do {					\
649 	SOCKBUF_LOCK_ASSERT(sb);					\
650 	if ((sb)->sb_lastrecord != NULL)				\
651 		(sb)->sb_lastrecord->m_nextpkt = (m0);			\
652 	else								\
653 		(sb)->sb_mb = (m0);					\
654 	(sb)->sb_lastrecord = (m0);					\
655 } while (/*CONSTCOND*/0)
656 
657 /*
658  * Append mbuf chain m to the last record in the
659  * socket buffer sb.  The additional space associated
660  * the mbuf chain is recorded in sb.  Empty mbufs are
661  * discarded and mbufs are compacted where possible.
662  */
663 void
664 sbappend_locked(sb, m)
665 	struct sockbuf *sb;
666 	struct mbuf *m;
667 {
668 	register struct mbuf *n;
669 
670 	SOCKBUF_LOCK_ASSERT(sb);
671 
672 	if (m == 0)
673 		return;
674 
675 	SBLASTRECORDCHK(sb);
676 	n = sb->sb_mb;
677 	if (n) {
678 		while (n->m_nextpkt)
679 			n = n->m_nextpkt;
680 		do {
681 			if (n->m_flags & M_EOR) {
682 				sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
683 				return;
684 			}
685 		} while (n->m_next && (n = n->m_next));
686 	} else {
687 		/*
688 		 * XXX Would like to simply use sb_mbtail here, but
689 		 * XXX I need to verify that I won't miss an EOR that
690 		 * XXX way.
691 		 */
692 		if ((n = sb->sb_lastrecord) != NULL) {
693 			do {
694 				if (n->m_flags & M_EOR) {
695 					sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
696 					return;
697 				}
698 			} while (n->m_next && (n = n->m_next));
699 		} else {
700 			/*
701 			 * If this is the first record in the socket buffer,
702 			 * it's also the last record.
703 			 */
704 			sb->sb_lastrecord = m;
705 		}
706 	}
707 	sbcompress(sb, m, n);
708 	SBLASTRECORDCHK(sb);
709 }
710 
711 /*
712  * Append mbuf chain m to the last record in the
713  * socket buffer sb.  The additional space associated
714  * the mbuf chain is recorded in sb.  Empty mbufs are
715  * discarded and mbufs are compacted where possible.
716  */
717 void
718 sbappend(sb, m)
719 	struct sockbuf *sb;
720 	struct mbuf *m;
721 {
722 
723 	SOCKBUF_LOCK(sb);
724 	sbappend_locked(sb, m);
725 	SOCKBUF_UNLOCK(sb);
726 }
727 
728 /*
729  * This version of sbappend() should only be used when the caller
730  * absolutely knows that there will never be more than one record
731  * in the socket buffer, that is, a stream protocol (such as TCP).
732  */
733 void
734 sbappendstream_locked(struct sockbuf *sb, struct mbuf *m)
735 {
736 	SOCKBUF_LOCK_ASSERT(sb);
737 
738 	KASSERT(m->m_nextpkt == NULL,("sbappendstream 0"));
739 	KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1"));
740 
741 	SBLASTMBUFCHK(sb);
742 
743 	sbcompress(sb, m, sb->sb_mbtail);
744 
745 	sb->sb_lastrecord = sb->sb_mb;
746 	SBLASTRECORDCHK(sb);
747 }
748 
749 /*
750  * This version of sbappend() should only be used when the caller
751  * absolutely knows that there will never be more than one record
752  * in the socket buffer, that is, a stream protocol (such as TCP).
753  */
754 void
755 sbappendstream(struct sockbuf *sb, struct mbuf *m)
756 {
757 
758 	SOCKBUF_LOCK(sb);
759 	sbappendstream_locked(sb, m);
760 	SOCKBUF_UNLOCK(sb);
761 }
762 
763 #ifdef SOCKBUF_DEBUG
764 void
765 sbcheck(sb)
766 	struct sockbuf *sb;
767 {
768 	struct mbuf *m;
769 	struct mbuf *n = 0;
770 	u_long len = 0, mbcnt = 0;
771 
772 	SOCKBUF_LOCK_ASSERT(sb);
773 
774 	for (m = sb->sb_mb; m; m = n) {
775 	    n = m->m_nextpkt;
776 	    for (; m; m = m->m_next) {
777 		len += m->m_len;
778 		mbcnt += MSIZE;
779 		if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */
780 			mbcnt += m->m_ext.ext_size;
781 	    }
782 	}
783 	if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
784 		printf("cc %ld != %u || mbcnt %ld != %u\n", len, sb->sb_cc,
785 		    mbcnt, sb->sb_mbcnt);
786 		panic("sbcheck");
787 	}
788 }
789 #endif
790 
791 /*
792  * As above, except the mbuf chain
793  * begins a new record.
794  */
795 void
796 sbappendrecord_locked(sb, m0)
797 	register struct sockbuf *sb;
798 	register struct mbuf *m0;
799 {
800 	register struct mbuf *m;
801 
802 	SOCKBUF_LOCK_ASSERT(sb);
803 
804 	if (m0 == 0)
805 		return;
806 	m = sb->sb_mb;
807 	if (m)
808 		while (m->m_nextpkt)
809 			m = m->m_nextpkt;
810 	/*
811 	 * Put the first mbuf on the queue.
812 	 * Note this permits zero length records.
813 	 */
814 	sballoc(sb, m0);
815 	SBLASTRECORDCHK(sb);
816 	SBLINKRECORD(sb, m0);
817 	if (m)
818 		m->m_nextpkt = m0;
819 	else
820 		sb->sb_mb = m0;
821 	m = m0->m_next;
822 	m0->m_next = 0;
823 	if (m && (m0->m_flags & M_EOR)) {
824 		m0->m_flags &= ~M_EOR;
825 		m->m_flags |= M_EOR;
826 	}
827 	sbcompress(sb, m, m0);
828 }
829 
830 /*
831  * As above, except the mbuf chain
832  * begins a new record.
833  */
834 void
835 sbappendrecord(sb, m0)
836 	register struct sockbuf *sb;
837 	register struct mbuf *m0;
838 {
839 
840 	SOCKBUF_LOCK(sb);
841 	sbappendrecord_locked(sb, m0);
842 	SOCKBUF_UNLOCK(sb);
843 }
844 
845 /*
846  * As above except that OOB data
847  * is inserted at the beginning of the sockbuf,
848  * but after any other OOB data.
849  */
850 void
851 sbinsertoob_locked(sb, m0)
852 	register struct sockbuf *sb;
853 	register struct mbuf *m0;
854 {
855 	register struct mbuf *m;
856 	register struct mbuf **mp;
857 
858 	SOCKBUF_LOCK_ASSERT(sb);
859 
860 	if (m0 == 0)
861 		return;
862 	for (mp = &sb->sb_mb; *mp ; mp = &((*mp)->m_nextpkt)) {
863 	    m = *mp;
864 	    again:
865 		switch (m->m_type) {
866 
867 		case MT_OOBDATA:
868 			continue;		/* WANT next train */
869 
870 		case MT_CONTROL:
871 			m = m->m_next;
872 			if (m)
873 				goto again;	/* inspect THIS train further */
874 		}
875 		break;
876 	}
877 	/*
878 	 * Put the first mbuf on the queue.
879 	 * Note this permits zero length records.
880 	 */
881 	sballoc(sb, m0);
882 	m0->m_nextpkt = *mp;
883 	*mp = m0;
884 	m = m0->m_next;
885 	m0->m_next = 0;
886 	if (m && (m0->m_flags & M_EOR)) {
887 		m0->m_flags &= ~M_EOR;
888 		m->m_flags |= M_EOR;
889 	}
890 	sbcompress(sb, m, m0);
891 }
892 
893 /*
894  * As above except that OOB data
895  * is inserted at the beginning of the sockbuf,
896  * but after any other OOB data.
897  */
898 void
899 sbinsertoob(sb, m0)
900 	register struct sockbuf *sb;
901 	register struct mbuf *m0;
902 {
903 
904 	SOCKBUF_LOCK(sb);
905 	sbinsertoob_locked(sb, m0);
906 	SOCKBUF_UNLOCK(sb);
907 }
908 
909 /*
910  * Append address and data, and optionally, control (ancillary) data
911  * to the receive queue of a socket.  If present,
912  * m0 must include a packet header with total length.
913  * Returns 0 if no space in sockbuf or insufficient mbufs.
914  */
915 int
916 sbappendaddr_locked(sb, asa, m0, control)
917 	struct sockbuf *sb;
918 	const struct sockaddr *asa;
919 	struct mbuf *m0, *control;
920 {
921 	struct mbuf *m, *n, *nlast;
922 	int space = asa->sa_len;
923 
924 	SOCKBUF_LOCK_ASSERT(sb);
925 
926 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
927 		panic("sbappendaddr_locked");
928 	if (m0)
929 		space += m0->m_pkthdr.len;
930 	space += m_length(control, &n);
931 
932 	if (space > sbspace(sb))
933 		return (0);
934 #if MSIZE <= 256
935 	if (asa->sa_len > MLEN)
936 		return (0);
937 #endif
938 	MGET(m, M_DONTWAIT, MT_SONAME);
939 	if (m == 0)
940 		return (0);
941 	m->m_len = asa->sa_len;
942 	bcopy(asa, mtod(m, caddr_t), asa->sa_len);
943 	if (n)
944 		n->m_next = m0;		/* concatenate data to control */
945 	else
946 		control = m0;
947 	m->m_next = control;
948 	for (n = m; n->m_next != NULL; n = n->m_next)
949 		sballoc(sb, n);
950 	sballoc(sb, n);
951 	nlast = n;
952 	SBLINKRECORD(sb, m);
953 
954 	sb->sb_mbtail = nlast;
955 	SBLASTMBUFCHK(sb);
956 
957 	SBLASTRECORDCHK(sb);
958 	return (1);
959 }
960 
961 /*
962  * Append address and data, and optionally, control (ancillary) data
963  * to the receive queue of a socket.  If present,
964  * m0 must include a packet header with total length.
965  * Returns 0 if no space in sockbuf or insufficient mbufs.
966  */
967 int
968 sbappendaddr(sb, asa, m0, control)
969 	struct sockbuf *sb;
970 	const struct sockaddr *asa;
971 	struct mbuf *m0, *control;
972 {
973 	int retval;
974 
975 	SOCKBUF_LOCK(sb);
976 	retval = sbappendaddr_locked(sb, asa, m0, control);
977 	SOCKBUF_UNLOCK(sb);
978 	return (retval);
979 }
980 
981 int
982 sbappendcontrol_locked(sb, m0, control)
983 	struct sockbuf *sb;
984 	struct mbuf *control, *m0;
985 {
986 	struct mbuf *m, *n, *mlast;
987 	int space;
988 
989 	SOCKBUF_LOCK_ASSERT(sb);
990 
991 	if (control == 0)
992 		panic("sbappendcontrol_locked");
993 	space = m_length(control, &n) + m_length(m0, NULL);
994 
995 	if (space > sbspace(sb))
996 		return (0);
997 	n->m_next = m0;			/* concatenate data to control */
998 
999 	SBLASTRECORDCHK(sb);
1000 
1001 	for (m = control; m->m_next; m = m->m_next)
1002 		sballoc(sb, m);
1003 	sballoc(sb, m);
1004 	mlast = m;
1005 	SBLINKRECORD(sb, control);
1006 
1007 	sb->sb_mbtail = mlast;
1008 	SBLASTMBUFCHK(sb);
1009 
1010 	SBLASTRECORDCHK(sb);
1011 	return (1);
1012 }
1013 
1014 int
1015 sbappendcontrol(sb, m0, control)
1016 	struct sockbuf *sb;
1017 	struct mbuf *control, *m0;
1018 {
1019 	int retval;
1020 
1021 	SOCKBUF_LOCK(sb);
1022 	retval = sbappendcontrol_locked(sb, m0, control);
1023 	SOCKBUF_UNLOCK(sb);
1024 	return (retval);
1025 }
1026 
1027 /*
1028  * Compress mbuf chain m into the socket
1029  * buffer sb following mbuf n.  If n
1030  * is null, the buffer is presumed empty.
1031  */
1032 void
1033 sbcompress(sb, m, n)
1034 	register struct sockbuf *sb;
1035 	register struct mbuf *m, *n;
1036 {
1037 	register int eor = 0;
1038 	register struct mbuf *o;
1039 
1040 	SOCKBUF_LOCK_ASSERT(sb);
1041 
1042 	while (m) {
1043 		eor |= m->m_flags & M_EOR;
1044 		if (m->m_len == 0 &&
1045 		    (eor == 0 ||
1046 		     (((o = m->m_next) || (o = n)) &&
1047 		      o->m_type == m->m_type))) {
1048 			if (sb->sb_lastrecord == m)
1049 				sb->sb_lastrecord = m->m_next;
1050 			m = m_free(m);
1051 			continue;
1052 		}
1053 		if (n && (n->m_flags & M_EOR) == 0 &&
1054 		    M_WRITABLE(n) &&
1055 		    m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
1056 		    m->m_len <= M_TRAILINGSPACE(n) &&
1057 		    n->m_type == m->m_type) {
1058 			bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
1059 			    (unsigned)m->m_len);
1060 			n->m_len += m->m_len;
1061 			sb->sb_cc += m->m_len;
1062 			if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
1063 			    m->m_type != MT_OOBDATA)
1064 				/* XXX: Probably don't need.*/
1065 				sb->sb_ctl += m->m_len;
1066 			m = m_free(m);
1067 			continue;
1068 		}
1069 		if (n)
1070 			n->m_next = m;
1071 		else
1072 			sb->sb_mb = m;
1073 		sb->sb_mbtail = m;
1074 		sballoc(sb, m);
1075 		n = m;
1076 		m->m_flags &= ~M_EOR;
1077 		m = m->m_next;
1078 		n->m_next = 0;
1079 	}
1080 	if (eor) {
1081 		if (n)
1082 			n->m_flags |= eor;
1083 		else
1084 			printf("semi-panic: sbcompress\n");
1085 	}
1086 	SBLASTMBUFCHK(sb);
1087 }
1088 
1089 /*
1090  * Free all mbufs in a sockbuf.
1091  * Check that all resources are reclaimed.
1092  */
1093 void
1094 sbflush_locked(sb)
1095 	register struct sockbuf *sb;
1096 {
1097 
1098 	SOCKBUF_LOCK_ASSERT(sb);
1099 
1100 	if (sb->sb_flags & SB_LOCK)
1101 		panic("sbflush_locked: locked");
1102 	while (sb->sb_mbcnt) {
1103 		/*
1104 		 * Don't call sbdrop(sb, 0) if the leading mbuf is non-empty:
1105 		 * we would loop forever. Panic instead.
1106 		 */
1107 		if (!sb->sb_cc && (sb->sb_mb == NULL || sb->sb_mb->m_len))
1108 			break;
1109 		sbdrop_locked(sb, (int)sb->sb_cc);
1110 	}
1111 	if (sb->sb_cc || sb->sb_mb || sb->sb_mbcnt)
1112 		panic("sbflush_locked: cc %u || mb %p || mbcnt %u", sb->sb_cc, (void *)sb->sb_mb, sb->sb_mbcnt);
1113 }
1114 
1115 void
1116 sbflush(sb)
1117 	register struct sockbuf *sb;
1118 {
1119 
1120 	SOCKBUF_LOCK(sb);
1121 	sbflush_locked(sb);
1122 	SOCKBUF_UNLOCK(sb);
1123 }
1124 
1125 /*
1126  * Drop data from (the front of) a sockbuf.
1127  */
1128 void
1129 sbdrop_locked(sb, len)
1130 	register struct sockbuf *sb;
1131 	register int len;
1132 {
1133 	register struct mbuf *m;
1134 	struct mbuf *next;
1135 
1136 	SOCKBUF_LOCK_ASSERT(sb);
1137 
1138 	next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
1139 	while (len > 0) {
1140 		if (m == 0) {
1141 			if (next == 0)
1142 				panic("sbdrop");
1143 			m = next;
1144 			next = m->m_nextpkt;
1145 			continue;
1146 		}
1147 		if (m->m_len > len) {
1148 			m->m_len -= len;
1149 			m->m_data += len;
1150 			sb->sb_cc -= len;
1151 			if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
1152 			    m->m_type != MT_OOBDATA)
1153 				sb->sb_ctl -= len;
1154 			break;
1155 		}
1156 		len -= m->m_len;
1157 		sbfree(sb, m);
1158 		m = m_free(m);
1159 	}
1160 	while (m && m->m_len == 0) {
1161 		sbfree(sb, m);
1162 		m = m_free(m);
1163 	}
1164 	if (m) {
1165 		sb->sb_mb = m;
1166 		m->m_nextpkt = next;
1167 	} else
1168 		sb->sb_mb = next;
1169 	/*
1170 	 * First part is an inline SB_EMPTY_FIXUP().  Second part
1171 	 * makes sure sb_lastrecord is up-to-date if we dropped
1172 	 * part of the last record.
1173 	 */
1174 	m = sb->sb_mb;
1175 	if (m == NULL) {
1176 		sb->sb_mbtail = NULL;
1177 		sb->sb_lastrecord = NULL;
1178 	} else if (m->m_nextpkt == NULL) {
1179 		sb->sb_lastrecord = m;
1180 	}
1181 }
1182 
1183 /*
1184  * Drop data from (the front of) a sockbuf.
1185  */
1186 void
1187 sbdrop(sb, len)
1188 	register struct sockbuf *sb;
1189 	register int len;
1190 {
1191 
1192 	SOCKBUF_LOCK(sb);
1193 	sbdrop_locked(sb, len);
1194 	SOCKBUF_UNLOCK(sb);
1195 }
1196 
1197 /*
1198  * Drop a record off the front of a sockbuf
1199  * and move the next record to the front.
1200  */
1201 void
1202 sbdroprecord_locked(sb)
1203 	register struct sockbuf *sb;
1204 {
1205 	register struct mbuf *m;
1206 
1207 	SOCKBUF_LOCK_ASSERT(sb);
1208 
1209 	m = sb->sb_mb;
1210 	if (m) {
1211 		sb->sb_mb = m->m_nextpkt;
1212 		do {
1213 			sbfree(sb, m);
1214 			m = m_free(m);
1215 		} while (m);
1216 	}
1217 	SB_EMPTY_FIXUP(sb);
1218 }
1219 
1220 /*
1221  * Drop a record off the front of a sockbuf
1222  * and move the next record to the front.
1223  */
1224 void
1225 sbdroprecord(sb)
1226 	register struct sockbuf *sb;
1227 {
1228 
1229 	SOCKBUF_LOCK(sb);
1230 	sbdroprecord_locked(sb);
1231 	SOCKBUF_UNLOCK(sb);
1232 }
1233 
1234 /*
1235  * Create a "control" mbuf containing the specified data
1236  * with the specified type for presentation on a socket buffer.
1237  */
1238 struct mbuf *
1239 sbcreatecontrol(p, size, type, level)
1240 	caddr_t p;
1241 	register int size;
1242 	int type, level;
1243 {
1244 	register struct cmsghdr *cp;
1245 	struct mbuf *m;
1246 
1247 	if (CMSG_SPACE((u_int)size) > MCLBYTES)
1248 		return ((struct mbuf *) NULL);
1249 	if (CMSG_SPACE((u_int)size) > MLEN)
1250 		m = m_getcl(M_DONTWAIT, MT_CONTROL, 0);
1251 	else
1252 		m = m_get(M_DONTWAIT, MT_CONTROL);
1253 	if (m == NULL)
1254 		return ((struct mbuf *) NULL);
1255 	cp = mtod(m, struct cmsghdr *);
1256 	m->m_len = 0;
1257 	KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m),
1258 	    ("sbcreatecontrol: short mbuf"));
1259 	if (p != NULL)
1260 		(void)memcpy(CMSG_DATA(cp), p, size);
1261 	m->m_len = CMSG_SPACE(size);
1262 	cp->cmsg_len = CMSG_LEN(size);
1263 	cp->cmsg_level = level;
1264 	cp->cmsg_type = type;
1265 	return (m);
1266 }
1267 
1268 /*
1269  * Some routines that return EOPNOTSUPP for entry points that are not
1270  * supported by a protocol.  Fill in as needed.
1271  */
1272 int
1273 pru_abort_notsupp(struct socket *so)
1274 {
1275 	return EOPNOTSUPP;
1276 }
1277 
1278 int
1279 pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
1280 {
1281 	return EOPNOTSUPP;
1282 }
1283 
1284 int
1285 pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
1286 {
1287 	return EOPNOTSUPP;
1288 }
1289 
1290 int
1291 pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
1292 {
1293 	return EOPNOTSUPP;
1294 }
1295 
1296 int
1297 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
1298 {
1299 	return EOPNOTSUPP;
1300 }
1301 
1302 int
1303 pru_connect2_notsupp(struct socket *so1, struct socket *so2)
1304 {
1305 	return EOPNOTSUPP;
1306 }
1307 
1308 int
1309 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
1310 	struct ifnet *ifp, struct thread *td)
1311 {
1312 	return EOPNOTSUPP;
1313 }
1314 
1315 int
1316 pru_detach_notsupp(struct socket *so)
1317 {
1318 	return EOPNOTSUPP;
1319 }
1320 
1321 int
1322 pru_disconnect_notsupp(struct socket *so)
1323 {
1324 	return EOPNOTSUPP;
1325 }
1326 
1327 int
1328 pru_listen_notsupp(struct socket *so, struct thread *td)
1329 {
1330 	return EOPNOTSUPP;
1331 }
1332 
1333 int
1334 pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
1335 {
1336 	return EOPNOTSUPP;
1337 }
1338 
1339 int
1340 pru_rcvd_notsupp(struct socket *so, int flags)
1341 {
1342 	return EOPNOTSUPP;
1343 }
1344 
1345 int
1346 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
1347 {
1348 	return EOPNOTSUPP;
1349 }
1350 
1351 int
1352 pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
1353 	struct sockaddr *addr, struct mbuf *control, struct thread *td)
1354 {
1355 	return EOPNOTSUPP;
1356 }
1357 
1358 /*
1359  * This isn't really a ``null'' operation, but it's the default one
1360  * and doesn't do anything destructive.
1361  */
1362 int
1363 pru_sense_null(struct socket *so, struct stat *sb)
1364 {
1365 	sb->st_blksize = so->so_snd.sb_hiwat;
1366 	return 0;
1367 }
1368 
1369 int
1370 pru_shutdown_notsupp(struct socket *so)
1371 {
1372 	return EOPNOTSUPP;
1373 }
1374 
1375 int
1376 pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
1377 {
1378 	return EOPNOTSUPP;
1379 }
1380 
1381 int
1382 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
1383 	struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1384 {
1385 	return EOPNOTSUPP;
1386 }
1387 
1388 int
1389 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
1390 	struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
1391 	int *flagsp)
1392 {
1393 	return EOPNOTSUPP;
1394 }
1395 
1396 int
1397 pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
1398 	struct thread *td)
1399 {
1400 	return EOPNOTSUPP;
1401 }
1402 
1403 /*
1404  * For protocol types that don't keep cached copies of labels in their
1405  * pcbs, provide a null sosetlabel that does a NOOP.
1406  */
1407 void
1408 pru_sosetlabel_null(struct socket *so)
1409 {
1410 
1411 }
1412 
1413 /*
1414  * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
1415  */
1416 struct sockaddr *
1417 sodupsockaddr(const struct sockaddr *sa, int mflags)
1418 {
1419 	struct sockaddr *sa2;
1420 
1421 	sa2 = malloc(sa->sa_len, M_SONAME, mflags);
1422 	if (sa2)
1423 		bcopy(sa, sa2, sa->sa_len);
1424 	return sa2;
1425 }
1426 
1427 /*
1428  * Create an external-format (``xsocket'') structure using the information
1429  * in the kernel-format socket structure pointed to by so.  This is done
1430  * to reduce the spew of irrelevant information over this interface,
1431  * to isolate user code from changes in the kernel structure, and
1432  * potentially to provide information-hiding if we decide that
1433  * some of this information should be hidden from users.
1434  */
1435 void
1436 sotoxsocket(struct socket *so, struct xsocket *xso)
1437 {
1438 	xso->xso_len = sizeof *xso;
1439 	xso->xso_so = so;
1440 	xso->so_type = so->so_type;
1441 	xso->so_options = so->so_options;
1442 	xso->so_linger = so->so_linger;
1443 	xso->so_state = so->so_state;
1444 	xso->so_pcb = so->so_pcb;
1445 	xso->xso_protocol = so->so_proto->pr_protocol;
1446 	xso->xso_family = so->so_proto->pr_domain->dom_family;
1447 	xso->so_qlen = so->so_qlen;
1448 	xso->so_incqlen = so->so_incqlen;
1449 	xso->so_qlimit = so->so_qlimit;
1450 	xso->so_timeo = so->so_timeo;
1451 	xso->so_error = so->so_error;
1452 	xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
1453 	xso->so_oobmark = so->so_oobmark;
1454 	sbtoxsockbuf(&so->so_snd, &xso->so_snd);
1455 	sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
1456 	xso->so_uid = so->so_cred->cr_uid;
1457 }
1458 
1459 /*
1460  * This does the same for sockbufs.  Note that the xsockbuf structure,
1461  * since it is always embedded in a socket, does not include a self
1462  * pointer nor a length.  We make this entry point public in case
1463  * some other mechanism needs it.
1464  */
1465 void
1466 sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb)
1467 {
1468 	xsb->sb_cc = sb->sb_cc;
1469 	xsb->sb_hiwat = sb->sb_hiwat;
1470 	xsb->sb_mbcnt = sb->sb_mbcnt;
1471 	xsb->sb_mbmax = sb->sb_mbmax;
1472 	xsb->sb_lowat = sb->sb_lowat;
1473 	xsb->sb_flags = sb->sb_flags;
1474 	xsb->sb_timeo = sb->sb_timeo;
1475 }
1476 
1477 /*
1478  * Here is the definition of some of the basic objects in the kern.ipc
1479  * branch of the MIB.
1480  */
1481 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
1482 
1483 /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */
1484 static int dummy;
1485 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
1486 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_ULONG|CTLFLAG_RW,
1487     &sb_max, 0, sysctl_handle_sb_max, "LU", "Maximum socket buffer size");
1488 SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RDTUN,
1489     &maxsockets, 0, "Maximum number of sockets avaliable");
1490 SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
1491     &sb_efficiency, 0, "");
1492 
1493 /*
1494  * Initialise maxsockets
1495  */
1496 static void init_maxsockets(void *ignored)
1497 {
1498 	TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
1499 	maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters));
1500 }
1501 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
1502