xref: /freebsd/sys/netinet/in_pcb.h (revision 0957b409a90fd597c1e9124cbaf3edd2b488f4ac)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1990, 1993
5  *	The Regents of the University of California.
6  * Copyright (c) 2010-2011 Juniper Networks, Inc.
7  * All rights reserved.
8  *
9  * Portions of this software were developed by Robert N. M. Watson under
10  * contract to Juniper Networks, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)in_pcb.h	8.1 (Berkeley) 6/10/93
37  * $FreeBSD$
38  */
39 
40 #ifndef _NETINET_IN_PCB_H_
41 #define _NETINET_IN_PCB_H_
42 
43 #include <sys/queue.h>
44 #include <sys/epoch.h>
45 #include <sys/_lock.h>
46 #include <sys/_mutex.h>
47 #include <sys/_rwlock.h>
48 #include <net/route.h>
49 
50 #ifdef _KERNEL
51 #include <sys/lock.h>
52 #include <sys/rwlock.h>
53 #include <net/vnet.h>
54 #include <net/if.h>
55 #include <net/if_var.h>
56 #include <vm/uma.h>
57 #endif
58 #include <sys/ck.h>
59 
60 #define	in6pcb		inpcb	/* for KAME src sync over BSD*'s */
61 #define	in6p_sp		inp_sp	/* for KAME src sync over BSD*'s */
62 
63 /*
64  * struct inpcb is the common protocol control block structure used in most
65  * IP transport protocols.
66  *
67  * Pointers to local and foreign host table entries, local and foreign socket
68  * numbers, and pointers up (to a socket structure) and down (to a
69  * protocol-specific control block) are stored here.
70  */
71 CK_LIST_HEAD(inpcbhead, inpcb);
72 CK_LIST_HEAD(inpcbporthead, inpcbport);
73 CK_LIST_HEAD(inpcblbgrouphead, inpcblbgroup);
74 typedef	uint64_t	inp_gen_t;
75 
76 /*
77  * PCB with AF_INET6 null bind'ed laddr can receive AF_INET input packet.
78  * So, AF_INET6 null laddr is also used as AF_INET null laddr, by utilizing
79  * the following structure.
80  */
81 struct in_addr_4in6 {
82 	u_int32_t	ia46_pad32[3];
83 	struct	in_addr	ia46_addr4;
84 };
85 
86 union in_dependaddr {
87 	struct in_addr_4in6 id46_addr;
88 	struct in6_addr	id6_addr;
89 };
90 
91 /*
92  * NOTE: ipv6 addrs should be 64-bit aligned, per RFC 2553.  in_conninfo has
93  * some extra padding to accomplish this.
94  * NOTE 2: tcp_syncache.c uses first 5 32-bit words, which identify fport,
95  * lport, faddr to generate hash, so these fields shouldn't be moved.
96  */
97 struct in_endpoints {
98 	u_int16_t	ie_fport;		/* foreign port */
99 	u_int16_t	ie_lport;		/* local port */
100 	/* protocol dependent part, local and foreign addr */
101 	union in_dependaddr ie_dependfaddr;	/* foreign host table entry */
102 	union in_dependaddr ie_dependladdr;	/* local host table entry */
103 #define	ie_faddr	ie_dependfaddr.id46_addr.ia46_addr4
104 #define	ie_laddr	ie_dependladdr.id46_addr.ia46_addr4
105 #define	ie6_faddr	ie_dependfaddr.id6_addr
106 #define	ie6_laddr	ie_dependladdr.id6_addr
107 	u_int32_t	ie6_zoneid;		/* scope zone id */
108 };
109 
110 /*
111  * XXX The defines for inc_* are hacks and should be changed to direct
112  * references.
113  */
114 struct in_conninfo {
115 	u_int8_t	inc_flags;
116 	u_int8_t	inc_len;
117 	u_int16_t	inc_fibnum;	/* XXX was pad, 16 bits is plenty */
118 	/* protocol dependent part */
119 	struct	in_endpoints inc_ie;
120 };
121 
122 /*
123  * Flags for inc_flags.
124  */
125 #define	INC_ISIPV6	0x01
126 #define	INC_IPV6MINMTU	0x02
127 
128 #define	inc_fport	inc_ie.ie_fport
129 #define	inc_lport	inc_ie.ie_lport
130 #define	inc_faddr	inc_ie.ie_faddr
131 #define	inc_laddr	inc_ie.ie_laddr
132 #define	inc6_faddr	inc_ie.ie6_faddr
133 #define	inc6_laddr	inc_ie.ie6_laddr
134 #define	inc6_zoneid	inc_ie.ie6_zoneid
135 
136 #if defined(_KERNEL) || defined(_WANT_INPCB)
137 /*
138  * struct inpcb captures the network layer state for TCP, UDP, and raw IPv4 and
139  * IPv6 sockets.  In the case of TCP and UDP, further per-connection state is
140  * hung off of inp_ppcb most of the time.  Almost all fields of struct inpcb
141  * are static after creation or protected by a per-inpcb rwlock, inp_lock.  A
142  * few fields are protected by multiple locks as indicated in the locking notes
143  * below.  For these fields, all of the listed locks must be write-locked for
144  * any modifications.  However, these fields can be safely read while any one of
145  * the listed locks are read-locked.  This model can permit greater concurrency
146  * for read operations.  For example, connections can be looked up while only
147  * holding a read lock on the global pcblist lock.  This is important for
148  * performance when attempting to find the connection for a packet given its IP
149  * and port tuple.
150  *
151  * One noteworthy exception is that the global pcbinfo lock follows a different
152  * set of rules in relation to the inp_list field.  Rather than being
153  * write-locked for modifications and read-locked for list iterations, it must
154  * be read-locked during modifications and write-locked during list iterations.
155  * This ensures that the relatively rare global list iterations safely walk a
156  * stable snapshot of connections while allowing more common list modifications
157  * to safely grab the pcblist lock just while adding or removing a connection
158  * from the global list.
159  *
160  * Key:
161  * (b) - Protected by the hpts lock.
162  * (c) - Constant after initialization
163  * (e) - Protected by the net_epoch_prempt epoch
164  * (g) - Protected by the pcbgroup lock
165  * (i) - Protected by the inpcb lock
166  * (p) - Protected by the pcbinfo lock for the inpcb
167  * (l) - Protected by the pcblist lock for the inpcb
168  * (h) - Protected by the pcbhash lock for the inpcb
169  * (s) - Protected by another subsystem's locks
170  * (x) - Undefined locking
171  *
172  * Notes on the tcp_hpts:
173  *
174  * First Hpts lock order is
175  * 1) INP_WLOCK()
176  * 2) HPTS_LOCK() i.e. hpts->pmtx
177  *
178  * To insert a TCB on the hpts you *must* be holding the INP_WLOCK().
179  * You may check the inp->inp_in_hpts flag without the hpts lock.
180  * The hpts is the only one that will clear this flag holding
181  * only the hpts lock. This means that in your tcp_output()
182  * routine when you test for the inp_in_hpts flag to be 1
183  * it may be transitioning to 0 (by the hpts).
184  * That's ok since that will just mean an extra call to tcp_output
185  * that most likely will find the call you executed
186  * (when the mis-match occured) will have put the TCB back
187  * on the hpts and it will return. If your
188  * call did not add the inp back to the hpts then you will either
189  * over-send or the cwnd will block you from sending more.
190  *
191  * Note you should also be holding the INP_WLOCK() when you
192  * call the remove from the hpts as well. Though usually
193  * you are either doing this from a timer, where you need and have
194  * the INP_WLOCK() or from destroying your TCB where again
195  * you should already have the INP_WLOCK().
196  *
197  * The inp_hpts_cpu, inp_hpts_cpu_set, inp_input_cpu and
198  * inp_input_cpu_set fields are controlled completely by
199  * the hpts. Do not ever set these. The inp_hpts_cpu_set
200  * and inp_input_cpu_set fields indicate if the hpts has
201  * setup the respective cpu field. It is advised if this
202  * field is 0, to enqueue the packet with the appropriate
203  * hpts_immediate() call. If the _set field is 1, then
204  * you may compare the inp_*_cpu field to the curcpu and
205  * may want to again insert onto the hpts if these fields
206  * are not equal (i.e. you are not on the expected CPU).
207  *
208  * A note on inp_hpts_calls and inp_input_calls, these
209  * flags are set when the hpts calls either the output
210  * or do_segment routines respectively. If the routine
211  * being called wants to use this, then it needs to
212  * clear the flag before returning. The hpts will not
213  * clear the flag. The flags can be used to tell if
214  * the hpts is the function calling the respective
215  * routine.
216  *
217  * A few other notes:
218  *
219  * When a read lock is held, stability of the field is guaranteed; to write
220  * to a field, a write lock must generally be held.
221  *
222  * netinet/netinet6-layer code should not assume that the inp_socket pointer
223  * is safe to dereference without inp_lock being held, even for protocols
224  * other than TCP (where the inpcb persists during TIMEWAIT even after the
225  * socket has been freed), or there may be close(2)-related races.
226  *
227  * The inp_vflag field is overloaded, and would otherwise ideally be (c).
228  *
229  * TODO:  Currently only the TCP stack is leveraging the global pcbinfo lock
230  * read-lock usage during modification, this model can be applied to other
231  * protocols (especially SCTP).
232  */
233 struct icmp6_filter;
234 struct inpcbpolicy;
235 struct m_snd_tag;
236 struct inpcb {
237 	/* Cache line #1 (amd64) */
238 	CK_LIST_ENTRY(inpcb) inp_hash;	/* [w](h/i) [r](e/i)  hash list */
239 	CK_LIST_ENTRY(inpcb) inp_pcbgrouphash;	/* (g/i) hash list */
240 	struct rwlock	inp_lock;
241 	/* Cache line #2 (amd64) */
242 #define	inp_start_zero	inp_hpts
243 #define	inp_zero_size	(sizeof(struct inpcb) - \
244 			    offsetof(struct inpcb, inp_start_zero))
245 	TAILQ_ENTRY(inpcb) inp_hpts;	/* pacing out queue next lock(b) */
246 
247 	uint32_t inp_hpts_request;	/* Current hpts request, zero if
248 					 * fits in the pacing window (i&b). */
249 	/*
250 	 * Note the next fields are protected by a
251 	 * different lock (hpts-lock). This means that
252 	 * they must correspond in size to the smallest
253 	 * protectable bit field (uint8_t on x86, and
254 	 * other platfomrs potentially uint32_t?). Also
255 	 * since CPU switches can occur at different times the two
256 	 * fields can *not* be collapsed into a signal bit field.
257 	 */
258 #if defined(__amd64__) || defined(__i386__)
259 	volatile uint8_t inp_in_hpts; /* on output hpts (lock b) */
260 	volatile uint8_t inp_in_input; /* on input hpts (lock b) */
261 #else
262 	volatile uint32_t inp_in_hpts; /* on output hpts (lock b) */
263 	volatile uint32_t inp_in_input; /* on input hpts (lock b) */
264 #endif
265 	volatile uint16_t  inp_hpts_cpu; /* Lock (i) */
266 	u_int	inp_refcount;		/* (i) refcount */
267 	int	inp_flags;		/* (i) generic IP/datagram flags */
268 	int	inp_flags2;		/* (i) generic IP/datagram flags #2*/
269 	volatile uint16_t  inp_input_cpu; /* Lock (i) */
270 	volatile uint8_t inp_hpts_cpu_set :1,  /* on output hpts (i) */
271 			 inp_input_cpu_set : 1,	/* on input hpts (i) */
272 			 inp_hpts_calls :1,	/* (i) from output hpts */
273 			 inp_input_calls :1,	/* (i) from input hpts */
274 			 inp_spare_bits2 : 4;
275 	uint8_t inp_spare_byte;		/* Compiler hole */
276 	void	*inp_ppcb;		/* (i) pointer to per-protocol pcb */
277 	struct	socket *inp_socket;	/* (i) back pointer to socket */
278 	uint32_t 	 inp_hptsslot;	/* Hpts wheel slot this tcb is Lock(i&b) */
279 	uint32_t         inp_hpts_drop_reas;	/* reason we are dropping the PCB (lock i&b) */
280 	TAILQ_ENTRY(inpcb) inp_input;	/* pacing in  queue next lock(b) */
281 	struct	inpcbinfo *inp_pcbinfo;	/* (c) PCB list info */
282 	struct	inpcbgroup *inp_pcbgroup; /* (g/i) PCB group list */
283 	CK_LIST_ENTRY(inpcb) inp_pcbgroup_wild; /* (g/i/h) group wildcard entry */
284 	struct	ucred	*inp_cred;	/* (c) cache of socket cred */
285 	u_int32_t inp_flow;		/* (i) IPv6 flow information */
286 	u_char	inp_vflag;		/* (i) IP version flag (v4/v6) */
287 	u_char	inp_ip_ttl;		/* (i) time to live proto */
288 	u_char	inp_ip_p;		/* (c) protocol proto */
289 	u_char	inp_ip_minttl;		/* (i) minimum TTL or drop */
290 	uint32_t inp_flowid;		/* (x) flow id / queue id */
291 	struct m_snd_tag *inp_snd_tag;	/* (i) send tag for outgoing mbufs */
292 	uint32_t inp_flowtype;		/* (x) M_HASHTYPE value */
293 	uint32_t inp_rss_listen_bucket;	/* (x) overridden RSS listen bucket */
294 
295 	/* Local and foreign ports, local and foreign addr. */
296 	struct	in_conninfo inp_inc;	/* (i) list for PCB's local port */
297 
298 	/* MAC and IPSEC policy information. */
299 	struct	label *inp_label;	/* (i) MAC label */
300 	struct	inpcbpolicy *inp_sp;    /* (s) for IPSEC */
301 
302 	/* Protocol-dependent part; options. */
303 	struct {
304 		u_char	inp_ip_tos;		/* (i) type of service proto */
305 		struct mbuf		*inp_options;	/* (i) IP options */
306 		struct ip_moptions	*inp_moptions;	/* (i) mcast options */
307 	};
308 	struct {
309 		/* (i) IP options */
310 		struct mbuf		*in6p_options;
311 		/* (i) IP6 options for outgoing packets */
312 		struct ip6_pktopts	*in6p_outputopts;
313 		/* (i) IP multicast options */
314 		struct ip6_moptions	*in6p_moptions;
315 		/* (i) ICMPv6 code type filter */
316 		struct icmp6_filter	*in6p_icmp6filt;
317 		/* (i) IPV6_CHECKSUM setsockopt */
318 		int	in6p_cksum;
319 		short	in6p_hops;
320 	};
321 	CK_LIST_ENTRY(inpcb) inp_portlist;	/* (i/h) */
322 	struct	inpcbport *inp_phd;	/* (i/h) head of this list */
323 	inp_gen_t	inp_gencnt;	/* (c) generation count */
324 	void		*spare_ptr;	/* Spare pointer. */
325 	rt_gen_t	inp_rt_cookie;	/* generation for route entry */
326 	union {				/* cached L3 information */
327 		struct route inp_route;
328 		struct route_in6 inp_route6;
329 	};
330 	CK_LIST_ENTRY(inpcb) inp_list;	/* (p/l) list for all PCBs for proto */
331 	                                /* (e[r]) for list iteration */
332 	                                /* (p[w]/l) for addition/removal */
333 	struct epoch_context inp_epoch_ctx;
334 };
335 #endif	/* _KERNEL */
336 
337 #define	inp_fport	inp_inc.inc_fport
338 #define	inp_lport	inp_inc.inc_lport
339 #define	inp_faddr	inp_inc.inc_faddr
340 #define	inp_laddr	inp_inc.inc_laddr
341 
342 #define	in6p_faddr	inp_inc.inc6_faddr
343 #define	in6p_laddr	inp_inc.inc6_laddr
344 #define	in6p_zoneid	inp_inc.inc6_zoneid
345 #define	in6p_flowinfo	inp_flow
346 
347 #define	inp_vnet	inp_pcbinfo->ipi_vnet
348 
349 /*
350  * The range of the generation count, as used in this implementation, is 9e19.
351  * We would have to create 300 billion connections per second for this number
352  * to roll over in a year.  This seems sufficiently unlikely that we simply
353  * don't concern ourselves with that possibility.
354  */
355 
356 /*
357  * Interface exported to userland by various protocols which use inpcbs.  Hack
358  * alert -- only define if struct xsocket is in scope.
359  * Fields prefixed with "xi_" are unique to this structure, and the rest
360  * match fields in the struct inpcb, to ease coding and porting.
361  *
362  * Legend:
363  * (s) - used by userland utilities in src
364  * (p) - used by utilities in ports
365  * (3) - is known to be used by third party software not in ports
366  * (n) - no known usage
367  */
368 #ifdef _SYS_SOCKETVAR_H_
369 struct xinpcb {
370 	ksize_t		xi_len;			/* length of this structure */
371 	struct xsocket	xi_socket;		/* (s,p) */
372 	struct in_conninfo inp_inc;		/* (s,p) */
373 	uint64_t	inp_gencnt;		/* (s,p) */
374 	kvaddr_t	inp_ppcb;		/* (s) netstat(1) */
375 	int64_t		inp_spare64[4];
376 	uint32_t	inp_flow;		/* (s) */
377 	uint32_t	inp_flowid;		/* (s) */
378 	uint32_t	inp_flowtype;		/* (s) */
379 	int32_t		inp_flags;		/* (s,p) */
380 	int32_t		inp_flags2;		/* (s) */
381 	int32_t		inp_rss_listen_bucket;	/* (n) */
382 	int32_t		in6p_cksum;		/* (n) */
383 	int32_t		inp_spare32[4];
384 	uint16_t	in6p_hops;		/* (n) */
385 	uint8_t		inp_ip_tos;		/* (n) */
386 	int8_t		pad8;
387 	uint8_t		inp_vflag;		/* (s,p) */
388 	uint8_t		inp_ip_ttl;		/* (n) */
389 	uint8_t		inp_ip_p;		/* (n) */
390 	uint8_t		inp_ip_minttl;		/* (n) */
391 	int8_t		inp_spare8[4];
392 } __aligned(8);
393 
394 struct xinpgen {
395 	ksize_t	xig_len;	/* length of this structure */
396 	u_int		xig_count;	/* number of PCBs at this time */
397 	uint32_t	_xig_spare32;
398 	inp_gen_t	xig_gen;	/* generation count at this time */
399 	so_gen_t	xig_sogen;	/* socket generation count this time */
400 	uint64_t	_xig_spare64[4];
401 } __aligned(8);
402 #ifdef	_KERNEL
403 void	in_pcbtoxinpcb(const struct inpcb *, struct xinpcb *);
404 #endif
405 #endif /* _SYS_SOCKETVAR_H_ */
406 
407 struct inpcbport {
408 	struct epoch_context phd_epoch_ctx;
409 	CK_LIST_ENTRY(inpcbport) phd_hash;
410 	struct inpcbhead phd_pcblist;
411 	u_short phd_port;
412 };
413 
414 struct in_pcblist {
415 	int il_count;
416 	struct epoch_context il_epoch_ctx;
417 	struct inpcbinfo *il_pcbinfo;
418 	struct inpcb *il_inp_list[0];
419 };
420 
421 /*-
422  * Global data structure for each high-level protocol (UDP, TCP, ...) in both
423  * IPv4 and IPv6.  Holds inpcb lists and information for managing them.
424  *
425  * Each pcbinfo is protected by three locks: ipi_lock, ipi_hash_lock and
426  * ipi_list_lock:
427  *  - ipi_lock covering the global pcb list stability during loop iteration,
428  *  - ipi_hash_lock covering the hashed lookup tables,
429  *  - ipi_list_lock covering mutable global fields (such as the global
430  *    pcb list)
431  *
432  * The lock order is:
433  *
434  *    ipi_lock (before)
435  *        inpcb locks (before)
436  *            ipi_list locks (before)
437  *                {ipi_hash_lock, pcbgroup locks}
438  *
439  * Locking key:
440  *
441  * (c) Constant or nearly constant after initialisation
442  * (e) - Protected by the net_epoch_prempt epoch
443  * (g) Locked by ipi_lock
444  * (l) Locked by ipi_list_lock
445  * (h) Read using either net_epoch_preempt or inpcb lock; write requires both ipi_hash_lock and inpcb lock
446  * (p) Protected by one or more pcbgroup locks
447  * (x) Synchronisation properties poorly defined
448  */
449 struct inpcbinfo {
450 	/*
451 	 * Global lock protecting inpcb list modification
452 	 */
453 	struct mtx		 ipi_lock;
454 
455 	/*
456 	 * Global list of inpcbs on the protocol.
457 	 */
458 	struct inpcbhead	*ipi_listhead;		/* [r](e) [w](g/l) */
459 	u_int			 ipi_count;		/* (l) */
460 
461 	/*
462 	 * Generation count -- incremented each time a connection is allocated
463 	 * or freed.
464 	 */
465 	u_quad_t		 ipi_gencnt;		/* (l) */
466 
467 	/*
468 	 * Fields associated with port lookup and allocation.
469 	 */
470 	u_short			 ipi_lastport;		/* (x) */
471 	u_short			 ipi_lastlow;		/* (x) */
472 	u_short			 ipi_lasthi;		/* (x) */
473 
474 	/*
475 	 * UMA zone from which inpcbs are allocated for this protocol.
476 	 */
477 	struct	uma_zone	*ipi_zone;		/* (c) */
478 
479 	/*
480 	 * Connection groups associated with this protocol.  These fields are
481 	 * constant, but pcbgroup structures themselves are protected by
482 	 * per-pcbgroup locks.
483 	 */
484 	struct inpcbgroup	*ipi_pcbgroups;		/* (c) */
485 	u_int			 ipi_npcbgroups;	/* (c) */
486 	u_int			 ipi_hashfields;	/* (c) */
487 
488 	/*
489 	 * Global lock protecting modification non-pcbgroup hash lookup tables.
490 	 */
491 	struct mtx		 ipi_hash_lock;
492 
493 	/*
494 	 * Global hash of inpcbs, hashed by local and foreign addresses and
495 	 * port numbers.
496 	 */
497 	struct inpcbhead	*ipi_hashbase;		/* (h) */
498 	u_long			 ipi_hashmask;		/* (h) */
499 
500 	/*
501 	 * Global hash of inpcbs, hashed by only local port number.
502 	 */
503 	struct inpcbporthead	*ipi_porthashbase;	/* (h) */
504 	u_long			 ipi_porthashmask;	/* (h) */
505 
506 	/*
507 	 * List of wildcard inpcbs for use with pcbgroups.  In the past, was
508 	 * per-pcbgroup but is now global.  All pcbgroup locks must be held
509 	 * to modify the list, so any is sufficient to read it.
510 	 */
511 	struct inpcbhead	*ipi_wildbase;		/* (p) */
512 	u_long			 ipi_wildmask;		/* (p) */
513 
514 	/*
515 	 * Load balance groups used for the SO_REUSEPORT_LB option,
516 	 * hashed by local port.
517 	 */
518 	struct	inpcblbgrouphead *ipi_lbgrouphashbase;	/* (h) */
519 	u_long			 ipi_lbgrouphashmask;	/* (h) */
520 
521 	/*
522 	 * Pointer to network stack instance
523 	 */
524 	struct vnet		*ipi_vnet;		/* (c) */
525 
526 	/*
527 	 * general use 2
528 	 */
529 	void 			*ipi_pspare[2];
530 
531 	/*
532 	 * Global lock protecting global inpcb list, inpcb count, etc.
533 	 */
534 	struct rwlock		 ipi_list_lock;
535 };
536 
537 #ifdef _KERNEL
538 /*
539  * Connection groups hold sets of connections that have similar CPU/thread
540  * affinity.  Each connection belongs to exactly one connection group.
541  */
542 struct inpcbgroup {
543 	/*
544 	 * Per-connection group hash of inpcbs, hashed by local and foreign
545 	 * addresses and port numbers.
546 	 */
547 	struct inpcbhead	*ipg_hashbase;		/* (c) */
548 	u_long			 ipg_hashmask;		/* (c) */
549 
550 	/*
551 	 * Notional affinity of this pcbgroup.
552 	 */
553 	u_int			 ipg_cpu;		/* (p) */
554 
555 	/*
556 	 * Per-connection group lock, not to be confused with ipi_lock.
557 	 * Protects the hash table hung off the group, but also the global
558 	 * wildcard list in inpcbinfo.
559 	 */
560 	struct mtx		 ipg_lock;
561 } __aligned(CACHE_LINE_SIZE);
562 
563 /*
564  * Load balance groups used for the SO_REUSEPORT_LB socket option. Each group
565  * (or unique address:port combination) can be re-used at most
566  * INPCBLBGROUP_SIZMAX (256) times. The inpcbs are stored in il_inp which
567  * is dynamically resized as processes bind/unbind to that specific group.
568  */
569 struct inpcblbgroup {
570 	CK_LIST_ENTRY(inpcblbgroup) il_list;
571 	struct epoch_context il_epoch_ctx;
572 	uint16_t	il_lport;			/* (c) */
573 	u_char		il_vflag;			/* (c) */
574 	u_char		il_pad;
575 	uint32_t	il_pad2;
576 	union in_dependaddr il_dependladdr;		/* (c) */
577 #define	il_laddr	il_dependladdr.id46_addr.ia46_addr4
578 #define	il6_laddr	il_dependladdr.id6_addr
579 	uint32_t	il_inpsiz; /* max count in il_inp[] (h) */
580 	uint32_t	il_inpcnt; /* cur count in il_inp[] (h) */
581 	struct inpcb	*il_inp[];			/* (h) */
582 };
583 
584 #define INP_LOCK_INIT(inp, d, t) \
585 	rw_init_flags(&(inp)->inp_lock, (t), RW_RECURSE |  RW_DUPOK)
586 #define INP_LOCK_DESTROY(inp)	rw_destroy(&(inp)->inp_lock)
587 #define INP_RLOCK(inp)		rw_rlock(&(inp)->inp_lock)
588 #define INP_WLOCK(inp)		rw_wlock(&(inp)->inp_lock)
589 #define INP_TRY_RLOCK(inp)	rw_try_rlock(&(inp)->inp_lock)
590 #define INP_TRY_WLOCK(inp)	rw_try_wlock(&(inp)->inp_lock)
591 #define INP_RUNLOCK(inp)	rw_runlock(&(inp)->inp_lock)
592 #define INP_WUNLOCK(inp)	rw_wunlock(&(inp)->inp_lock)
593 #define	INP_TRY_UPGRADE(inp)	rw_try_upgrade(&(inp)->inp_lock)
594 #define	INP_DOWNGRADE(inp)	rw_downgrade(&(inp)->inp_lock)
595 #define	INP_WLOCKED(inp)	rw_wowned(&(inp)->inp_lock)
596 #define	INP_LOCK_ASSERT(inp)	rw_assert(&(inp)->inp_lock, RA_LOCKED)
597 #define	INP_RLOCK_ASSERT(inp)	rw_assert(&(inp)->inp_lock, RA_RLOCKED)
598 #define	INP_WLOCK_ASSERT(inp)	rw_assert(&(inp)->inp_lock, RA_WLOCKED)
599 #define	INP_UNLOCK_ASSERT(inp)	rw_assert(&(inp)->inp_lock, RA_UNLOCKED)
600 
601 /*
602  * These locking functions are for inpcb consumers outside of sys/netinet,
603  * more specifically, they were added for the benefit of TOE drivers. The
604  * macros are reserved for use by the stack.
605  */
606 void inp_wlock(struct inpcb *);
607 void inp_wunlock(struct inpcb *);
608 void inp_rlock(struct inpcb *);
609 void inp_runlock(struct inpcb *);
610 
611 #ifdef INVARIANT_SUPPORT
612 void inp_lock_assert(struct inpcb *);
613 void inp_unlock_assert(struct inpcb *);
614 #else
615 #define	inp_lock_assert(inp)	do {} while (0)
616 #define	inp_unlock_assert(inp)	do {} while (0)
617 #endif
618 
619 void	inp_apply_all(void (*func)(struct inpcb *, void *), void *arg);
620 int 	inp_ip_tos_get(const struct inpcb *inp);
621 void 	inp_ip_tos_set(struct inpcb *inp, int val);
622 struct socket *
623 	inp_inpcbtosocket(struct inpcb *inp);
624 struct tcpcb *
625 	inp_inpcbtotcpcb(struct inpcb *inp);
626 void 	inp_4tuple_get(struct inpcb *inp, uint32_t *laddr, uint16_t *lp,
627 		uint32_t *faddr, uint16_t *fp);
628 int	inp_so_options(const struct inpcb *inp);
629 
630 #endif /* _KERNEL */
631 
632 #define INP_INFO_LOCK_INIT(ipi, d) \
633 	mtx_init(&(ipi)->ipi_lock, (d), NULL, MTX_DEF| MTX_RECURSE)
634 #define INP_INFO_LOCK_DESTROY(ipi)  mtx_destroy(&(ipi)->ipi_lock)
635 #define INP_INFO_RLOCK_ET(ipi, et)	NET_EPOCH_ENTER((et))
636 #define INP_INFO_WLOCK(ipi) mtx_lock(&(ipi)->ipi_lock)
637 #define INP_INFO_TRY_WLOCK(ipi)	mtx_trylock(&(ipi)->ipi_lock)
638 #define INP_INFO_WLOCKED(ipi)	mtx_owned(&(ipi)->ipi_lock)
639 #define INP_INFO_RUNLOCK_ET(ipi, et)	NET_EPOCH_EXIT((et))
640 #define INP_INFO_RUNLOCK_TP(ipi, tp)	NET_EPOCH_EXIT(*(tp)->t_inpcb->inp_et)
641 #define INP_INFO_WUNLOCK(ipi)	mtx_unlock(&(ipi)->ipi_lock)
642 #define	INP_INFO_LOCK_ASSERT(ipi)	MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(ipi)->ipi_lock))
643 #define INP_INFO_RLOCK_ASSERT(ipi)	MPASS(in_epoch(net_epoch_preempt))
644 #define INP_INFO_WLOCK_ASSERT(ipi)	mtx_assert(&(ipi)->ipi_lock, MA_OWNED)
645 #define INP_INFO_WUNLOCK_ASSERT(ipi)	\
646 	mtx_assert(&(ipi)->ipi_lock, MA_NOTOWNED)
647 #define INP_INFO_UNLOCK_ASSERT(ipi)	MPASS(!in_epoch(net_epoch_preempt) && !mtx_owned(&(ipi)->ipi_lock))
648 
649 #define INP_LIST_LOCK_INIT(ipi, d) \
650         rw_init_flags(&(ipi)->ipi_list_lock, (d), 0)
651 #define INP_LIST_LOCK_DESTROY(ipi)  rw_destroy(&(ipi)->ipi_list_lock)
652 #define INP_LIST_RLOCK(ipi)     rw_rlock(&(ipi)->ipi_list_lock)
653 #define INP_LIST_WLOCK(ipi)     rw_wlock(&(ipi)->ipi_list_lock)
654 #define INP_LIST_TRY_RLOCK(ipi) rw_try_rlock(&(ipi)->ipi_list_lock)
655 #define INP_LIST_TRY_WLOCK(ipi) rw_try_wlock(&(ipi)->ipi_list_lock)
656 #define INP_LIST_TRY_UPGRADE(ipi)       rw_try_upgrade(&(ipi)->ipi_list_lock)
657 #define INP_LIST_RUNLOCK(ipi)   rw_runlock(&(ipi)->ipi_list_lock)
658 #define INP_LIST_WUNLOCK(ipi)   rw_wunlock(&(ipi)->ipi_list_lock)
659 #define INP_LIST_LOCK_ASSERT(ipi) \
660 	rw_assert(&(ipi)->ipi_list_lock, RA_LOCKED)
661 #define INP_LIST_RLOCK_ASSERT(ipi) \
662 	rw_assert(&(ipi)->ipi_list_lock, RA_RLOCKED)
663 #define INP_LIST_WLOCK_ASSERT(ipi) \
664 	rw_assert(&(ipi)->ipi_list_lock, RA_WLOCKED)
665 #define INP_LIST_UNLOCK_ASSERT(ipi) \
666 	rw_assert(&(ipi)->ipi_list_lock, RA_UNLOCKED)
667 
668 #define	INP_HASH_LOCK_INIT(ipi, d) mtx_init(&(ipi)->ipi_hash_lock, (d), NULL, MTX_DEF)
669 #define	INP_HASH_LOCK_DESTROY(ipi)	mtx_destroy(&(ipi)->ipi_hash_lock)
670 #define	INP_HASH_RLOCK(ipi)		struct epoch_tracker inp_hash_et; epoch_enter_preempt(net_epoch_preempt, &inp_hash_et)
671 #define	INP_HASH_RLOCK_ET(ipi, et)		epoch_enter_preempt(net_epoch_preempt, &(et))
672 #define	INP_HASH_WLOCK(ipi)		mtx_lock(&(ipi)->ipi_hash_lock)
673 #define	INP_HASH_RUNLOCK(ipi)		NET_EPOCH_EXIT(inp_hash_et)
674 #define	INP_HASH_RUNLOCK_ET(ipi, et)	NET_EPOCH_EXIT((et))
675 #define	INP_HASH_WUNLOCK(ipi)		mtx_unlock(&(ipi)->ipi_hash_lock)
676 #define	INP_HASH_LOCK_ASSERT(ipi)	MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(ipi)->ipi_hash_lock))
677 #define	INP_HASH_WLOCK_ASSERT(ipi)	mtx_assert(&(ipi)->ipi_hash_lock, MA_OWNED);
678 
679 #define	INP_GROUP_LOCK_INIT(ipg, d)	mtx_init(&(ipg)->ipg_lock, (d), NULL, \
680 					    MTX_DEF | MTX_DUPOK)
681 #define	INP_GROUP_LOCK_DESTROY(ipg)	mtx_destroy(&(ipg)->ipg_lock)
682 
683 #define	INP_GROUP_LOCK(ipg)		mtx_lock(&(ipg)->ipg_lock)
684 #define	INP_GROUP_LOCK_ASSERT(ipg)	mtx_assert(&(ipg)->ipg_lock, MA_OWNED)
685 #define	INP_GROUP_UNLOCK(ipg)		mtx_unlock(&(ipg)->ipg_lock)
686 
687 #define INP_PCBHASH(faddr, lport, fport, mask) \
688 	(((faddr) ^ ((faddr) >> 16) ^ ntohs((lport) ^ (fport))) & (mask))
689 #define INP_PCBPORTHASH(lport, mask) \
690 	(ntohs((lport)) & (mask))
691 #define	INP_PCBLBGROUP_PKTHASH(faddr, lport, fport) \
692 	((faddr) ^ ((faddr) >> 16) ^ ntohs((lport) ^ (fport)))
693 #define	INP6_PCBHASHKEY(faddr)	((faddr)->s6_addr32[3])
694 
695 /*
696  * Flags for inp_vflags -- historically version flags only
697  */
698 #define	INP_IPV4	0x1
699 #define	INP_IPV6	0x2
700 #define	INP_IPV6PROTO	0x4		/* opened under IPv6 protocol */
701 
702 /*
703  * Flags for inp_flags.
704  */
705 #define	INP_RECVOPTS		0x00000001 /* receive incoming IP options */
706 #define	INP_RECVRETOPTS		0x00000002 /* receive IP options for reply */
707 #define	INP_RECVDSTADDR		0x00000004 /* receive IP dst address */
708 #define	INP_HDRINCL		0x00000008 /* user supplies entire IP header */
709 #define	INP_HIGHPORT		0x00000010 /* user wants "high" port binding */
710 #define	INP_LOWPORT		0x00000020 /* user wants "low" port binding */
711 #define	INP_ANONPORT		0x00000040 /* port chosen for user */
712 #define	INP_RECVIF		0x00000080 /* receive incoming interface */
713 #define	INP_MTUDISC		0x00000100 /* user can do MTU discovery */
714 				   	   /* 0x000200 unused: was INP_FAITH */
715 #define	INP_RECVTTL		0x00000400 /* receive incoming IP TTL */
716 #define	INP_DONTFRAG		0x00000800 /* don't fragment packet */
717 #define	INP_BINDANY		0x00001000 /* allow bind to any address */
718 #define	INP_INHASHLIST		0x00002000 /* in_pcbinshash() has been called */
719 #define	INP_RECVTOS		0x00004000 /* receive incoming IP TOS */
720 #define	IN6P_IPV6_V6ONLY	0x00008000 /* restrict AF_INET6 socket for v6 */
721 #define	IN6P_PKTINFO		0x00010000 /* receive IP6 dst and I/F */
722 #define	IN6P_HOPLIMIT		0x00020000 /* receive hoplimit */
723 #define	IN6P_HOPOPTS		0x00040000 /* receive hop-by-hop options */
724 #define	IN6P_DSTOPTS		0x00080000 /* receive dst options after rthdr */
725 #define	IN6P_RTHDR		0x00100000 /* receive routing header */
726 #define	IN6P_RTHDRDSTOPTS	0x00200000 /* receive dstoptions before rthdr */
727 #define	IN6P_TCLASS		0x00400000 /* receive traffic class value */
728 #define	IN6P_AUTOFLOWLABEL	0x00800000 /* attach flowlabel automatically */
729 #define	INP_TIMEWAIT		0x01000000 /* in TIMEWAIT, ppcb is tcptw */
730 #define	INP_ONESBCAST		0x02000000 /* send all-ones broadcast */
731 #define	INP_DROPPED		0x04000000 /* protocol drop flag */
732 #define	INP_SOCKREF		0x08000000 /* strong socket reference */
733 #define	INP_RESERVED_0          0x10000000 /* reserved field */
734 #define	INP_RESERVED_1          0x20000000 /* reserved field */
735 #define	IN6P_RFC2292		0x40000000 /* used RFC2292 API on the socket */
736 #define	IN6P_MTU		0x80000000 /* receive path MTU */
737 
738 #define	INP_CONTROLOPTS		(INP_RECVOPTS|INP_RECVRETOPTS|INP_RECVDSTADDR|\
739 				 INP_RECVIF|INP_RECVTTL|INP_RECVTOS|\
740 				 IN6P_PKTINFO|IN6P_HOPLIMIT|IN6P_HOPOPTS|\
741 				 IN6P_DSTOPTS|IN6P_RTHDR|IN6P_RTHDRDSTOPTS|\
742 				 IN6P_TCLASS|IN6P_AUTOFLOWLABEL|IN6P_RFC2292|\
743 				 IN6P_MTU)
744 
745 /*
746  * Flags for inp_flags2.
747  */
748 #define	INP_2UNUSED1		0x00000001
749 #define	INP_2UNUSED2		0x00000002
750 #define	INP_PCBGROUPWILD	0x00000004 /* in pcbgroup wildcard list */
751 #define	INP_REUSEPORT		0x00000008 /* SO_REUSEPORT option is set */
752 #define	INP_FREED		0x00000010 /* inp itself is not valid */
753 #define	INP_REUSEADDR		0x00000020 /* SO_REUSEADDR option is set */
754 #define	INP_BINDMULTI		0x00000040 /* IP_BINDMULTI option is set */
755 #define	INP_RSS_BUCKET_SET	0x00000080 /* IP_RSS_LISTEN_BUCKET is set */
756 #define	INP_RECVFLOWID		0x00000100 /* populate recv datagram with flow info */
757 #define	INP_RECVRSSBUCKETID	0x00000200 /* populate recv datagram with bucket id */
758 #define	INP_RATE_LIMIT_CHANGED	0x00000400 /* rate limit needs attention */
759 #define	INP_ORIGDSTADDR		0x00000800 /* receive IP dst address/port */
760 #define INP_CANNOT_DO_ECN	0x00001000 /* The stack does not do ECN */
761 #define	INP_REUSEPORT_LB	0x00002000 /* SO_REUSEPORT_LB option is set */
762 
763 /*
764  * Flags passed to in_pcblookup*() functions.
765  */
766 #define	INPLOOKUP_WILDCARD	0x00000001	/* Allow wildcard sockets. */
767 #define	INPLOOKUP_RLOCKPCB	0x00000002	/* Return inpcb read-locked. */
768 #define	INPLOOKUP_WLOCKPCB	0x00000004	/* Return inpcb write-locked. */
769 
770 #define	INPLOOKUP_MASK	(INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB | \
771 			    INPLOOKUP_WLOCKPCB)
772 
773 #define	sotoinpcb(so)	((struct inpcb *)(so)->so_pcb)
774 #define	sotoin6pcb(so)	sotoinpcb(so) /* for KAME src sync over BSD*'s */
775 
776 #define	INP_SOCKAF(so) so->so_proto->pr_domain->dom_family
777 
778 #define	INP_CHECK_SOCKAF(so, af)	(INP_SOCKAF(so) == af)
779 
780 /*
781  * Constants for pcbinfo.ipi_hashfields.
782  */
783 #define	IPI_HASHFIELDS_NONE	0
784 #define	IPI_HASHFIELDS_2TUPLE	1
785 #define	IPI_HASHFIELDS_4TUPLE	2
786 
787 #ifdef _KERNEL
788 VNET_DECLARE(int, ipport_reservedhigh);
789 VNET_DECLARE(int, ipport_reservedlow);
790 VNET_DECLARE(int, ipport_lowfirstauto);
791 VNET_DECLARE(int, ipport_lowlastauto);
792 VNET_DECLARE(int, ipport_firstauto);
793 VNET_DECLARE(int, ipport_lastauto);
794 VNET_DECLARE(int, ipport_hifirstauto);
795 VNET_DECLARE(int, ipport_hilastauto);
796 VNET_DECLARE(int, ipport_randomized);
797 VNET_DECLARE(int, ipport_randomcps);
798 VNET_DECLARE(int, ipport_randomtime);
799 VNET_DECLARE(int, ipport_stoprandom);
800 VNET_DECLARE(int, ipport_tcpallocs);
801 
802 #define	V_ipport_reservedhigh	VNET(ipport_reservedhigh)
803 #define	V_ipport_reservedlow	VNET(ipport_reservedlow)
804 #define	V_ipport_lowfirstauto	VNET(ipport_lowfirstauto)
805 #define	V_ipport_lowlastauto	VNET(ipport_lowlastauto)
806 #define	V_ipport_firstauto	VNET(ipport_firstauto)
807 #define	V_ipport_lastauto	VNET(ipport_lastauto)
808 #define	V_ipport_hifirstauto	VNET(ipport_hifirstauto)
809 #define	V_ipport_hilastauto	VNET(ipport_hilastauto)
810 #define	V_ipport_randomized	VNET(ipport_randomized)
811 #define	V_ipport_randomcps	VNET(ipport_randomcps)
812 #define	V_ipport_randomtime	VNET(ipport_randomtime)
813 #define	V_ipport_stoprandom	VNET(ipport_stoprandom)
814 #define	V_ipport_tcpallocs	VNET(ipport_tcpallocs)
815 
816 void	in_pcbinfo_destroy(struct inpcbinfo *);
817 void	in_pcbinfo_init(struct inpcbinfo *, const char *, struct inpcbhead *,
818 	    int, int, char *, uma_init, u_int);
819 
820 int	in_pcbbind_check_bindmulti(const struct inpcb *ni,
821 	    const struct inpcb *oi);
822 
823 struct inpcbgroup *
824 	in_pcbgroup_byhash(struct inpcbinfo *, u_int, uint32_t);
825 struct inpcbgroup *
826 	in_pcbgroup_byinpcb(struct inpcb *);
827 struct inpcbgroup *
828 	in_pcbgroup_bytuple(struct inpcbinfo *, struct in_addr, u_short,
829 	    struct in_addr, u_short);
830 void	in_pcbgroup_destroy(struct inpcbinfo *);
831 int	in_pcbgroup_enabled(struct inpcbinfo *);
832 void	in_pcbgroup_init(struct inpcbinfo *, u_int, int);
833 void	in_pcbgroup_remove(struct inpcb *);
834 void	in_pcbgroup_update(struct inpcb *);
835 void	in_pcbgroup_update_mbuf(struct inpcb *, struct mbuf *);
836 
837 void	in_pcbpurgeif0(struct inpcbinfo *, struct ifnet *);
838 int	in_pcballoc(struct socket *, struct inpcbinfo *);
839 int	in_pcbbind(struct inpcb *, struct sockaddr *, struct ucred *);
840 int	in_pcb_lport(struct inpcb *, struct in_addr *, u_short *,
841 	    struct ucred *, int);
842 int	in_pcbbind_setup(struct inpcb *, struct sockaddr *, in_addr_t *,
843 	    u_short *, struct ucred *);
844 int	in_pcbconnect(struct inpcb *, struct sockaddr *, struct ucred *);
845 int	in_pcbconnect_mbuf(struct inpcb *, struct sockaddr *, struct ucred *,
846 	    struct mbuf *);
847 int	in_pcbconnect_setup(struct inpcb *, struct sockaddr *, in_addr_t *,
848 	    u_short *, in_addr_t *, u_short *, struct inpcb **,
849 	    struct ucred *);
850 void	in_pcbdetach(struct inpcb *);
851 void	in_pcbdisconnect(struct inpcb *);
852 void	in_pcbdrop(struct inpcb *);
853 void	in_pcbfree(struct inpcb *);
854 int	in_pcbinshash(struct inpcb *);
855 int	in_pcbinshash_nopcbgroup(struct inpcb *);
856 int	in_pcbladdr(struct inpcb *, struct in_addr *, struct in_addr *,
857 	    struct ucred *);
858 struct inpcb *
859 	in_pcblookup_local(struct inpcbinfo *,
860 	    struct in_addr, u_short, int, struct ucred *);
861 struct inpcb *
862 	in_pcblookup(struct inpcbinfo *, struct in_addr, u_int,
863 	    struct in_addr, u_int, int, struct ifnet *);
864 struct inpcb *
865 	in_pcblookup_mbuf(struct inpcbinfo *, struct in_addr, u_int,
866 	    struct in_addr, u_int, int, struct ifnet *, struct mbuf *);
867 void	in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr,
868 	    int, struct inpcb *(*)(struct inpcb *, int));
869 void	in_pcbref(struct inpcb *);
870 void	in_pcbrehash(struct inpcb *);
871 void	in_pcbrehash_mbuf(struct inpcb *, struct mbuf *);
872 int	in_pcbrele(struct inpcb *);
873 int	in_pcbrele_rlocked(struct inpcb *);
874 int	in_pcbrele_wlocked(struct inpcb *);
875 void	in_pcblist_rele_rlocked(epoch_context_t ctx);
876 void	in_losing(struct inpcb *);
877 void	in_pcbsetsolabel(struct socket *so);
878 int	in_getpeeraddr(struct socket *so, struct sockaddr **nam);
879 int	in_getsockaddr(struct socket *so, struct sockaddr **nam);
880 struct sockaddr *
881 	in_sockaddr(in_port_t port, struct in_addr *addr);
882 void	in_pcbsosetlabel(struct socket *so);
883 #ifdef RATELIMIT
884 int	in_pcbattach_txrtlmt(struct inpcb *, struct ifnet *, uint32_t, uint32_t, uint32_t);
885 void	in_pcbdetach_txrtlmt(struct inpcb *);
886 int	in_pcbmodify_txrtlmt(struct inpcb *, uint32_t);
887 int	in_pcbquery_txrtlmt(struct inpcb *, uint32_t *);
888 int	in_pcbquery_txrlevel(struct inpcb *, uint32_t *);
889 void	in_pcboutput_txrtlmt(struct inpcb *, struct ifnet *, struct mbuf *);
890 void	in_pcboutput_eagain(struct inpcb *);
891 #endif
892 #endif /* _KERNEL */
893 
894 #endif /* !_NETINET_IN_PCB_H_ */
895