xref: /freebsd/sys/netinet/in_pcb.h (revision 78b9f0095b4af3aca6c931b2c7b009ddb8a05125)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1990, 1993
5  *	The Regents of the University of California.
6  * Copyright (c) 2010-2011 Juniper Networks, Inc.
7  * All rights reserved.
8  *
9  * Portions of this software were developed by Robert N. M. Watson under
10  * contract to Juniper Networks, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)in_pcb.h	8.1 (Berkeley) 6/10/93
37  * $FreeBSD$
38  */
39 
40 #ifndef _NETINET_IN_PCB_H_
41 #define _NETINET_IN_PCB_H_
42 
43 #include <sys/queue.h>
44 #include <sys/epoch.h>
45 #include <sys/_lock.h>
46 #include <sys/_mutex.h>
47 #include <sys/_rwlock.h>
48 #include <net/route.h>
49 
50 #ifdef _KERNEL
51 #include <sys/lock.h>
52 #include <sys/rwlock.h>
53 #include <net/vnet.h>
54 #include <net/if.h>
55 #include <net/if_var.h>
56 #include <vm/uma.h>
57 #endif
58 #include <sys/ck.h>
59 
60 #define	in6pcb		inpcb	/* for KAME src sync over BSD*'s */
61 #define	in6p_sp		inp_sp	/* for KAME src sync over BSD*'s */
62 
63 /*
64  * struct inpcb is the common protocol control block structure used in most
65  * IP transport protocols.
66  *
67  * Pointers to local and foreign host table entries, local and foreign socket
68  * numbers, and pointers up (to a socket structure) and down (to a
69  * protocol-specific control block) are stored here.
70  */
71 CK_LIST_HEAD(inpcbhead, inpcb);
72 CK_LIST_HEAD(inpcbporthead, inpcbport);
73 typedef	uint64_t	inp_gen_t;
74 
75 /*
76  * PCB with AF_INET6 null bind'ed laddr can receive AF_INET input packet.
77  * So, AF_INET6 null laddr is also used as AF_INET null laddr, by utilizing
78  * the following structure.
79  */
80 struct in_addr_4in6 {
81 	u_int32_t	ia46_pad32[3];
82 	struct	in_addr	ia46_addr4;
83 };
84 
85 union in_dependaddr {
86 	struct in_addr_4in6 id46_addr;
87 	struct in6_addr	id6_addr;
88 };
89 
90 /*
91  * NOTE: ipv6 addrs should be 64-bit aligned, per RFC 2553.  in_conninfo has
92  * some extra padding to accomplish this.
93  * NOTE 2: tcp_syncache.c uses first 5 32-bit words, which identify fport,
94  * lport, faddr to generate hash, so these fields shouldn't be moved.
95  */
96 struct in_endpoints {
97 	u_int16_t	ie_fport;		/* foreign port */
98 	u_int16_t	ie_lport;		/* local port */
99 	/* protocol dependent part, local and foreign addr */
100 	union in_dependaddr ie_dependfaddr;	/* foreign host table entry */
101 	union in_dependaddr ie_dependladdr;	/* local host table entry */
102 #define	ie_faddr	ie_dependfaddr.id46_addr.ia46_addr4
103 #define	ie_laddr	ie_dependladdr.id46_addr.ia46_addr4
104 #define	ie6_faddr	ie_dependfaddr.id6_addr
105 #define	ie6_laddr	ie_dependladdr.id6_addr
106 	u_int32_t	ie6_zoneid;		/* scope zone id */
107 };
108 
109 /*
110  * XXX The defines for inc_* are hacks and should be changed to direct
111  * references.
112  */
113 struct in_conninfo {
114 	u_int8_t	inc_flags;
115 	u_int8_t	inc_len;
116 	u_int16_t	inc_fibnum;	/* XXX was pad, 16 bits is plenty */
117 	/* protocol dependent part */
118 	struct	in_endpoints inc_ie;
119 };
120 
121 /*
122  * Flags for inc_flags.
123  */
124 #define	INC_ISIPV6	0x01
125 
126 #define	inc_isipv6	inc_flags	/* temp compatibility */
127 #define	inc_fport	inc_ie.ie_fport
128 #define	inc_lport	inc_ie.ie_lport
129 #define	inc_faddr	inc_ie.ie_faddr
130 #define	inc_laddr	inc_ie.ie_laddr
131 #define	inc6_faddr	inc_ie.ie6_faddr
132 #define	inc6_laddr	inc_ie.ie6_laddr
133 #define	inc6_zoneid	inc_ie.ie6_zoneid
134 
135 #if defined(_KERNEL) || defined(_WANT_INPCB)
136 /*
137  * struct inpcb captures the network layer state for TCP, UDP, and raw IPv4 and
138  * IPv6 sockets.  In the case of TCP and UDP, further per-connection state is
139  * hung off of inp_ppcb most of the time.  Almost all fields of struct inpcb
140  * are static after creation or protected by a per-inpcb rwlock, inp_lock.  A
141  * few fields are protected by multiple locks as indicated in the locking notes
142  * below.  For these fields, all of the listed locks must be write-locked for
143  * any modifications.  However, these fields can be safely read while any one of
144  * the listed locks are read-locked.  This model can permit greater concurrency
145  * for read operations.  For example, connections can be looked up while only
146  * holding a read lock on the global pcblist lock.  This is important for
147  * performance when attempting to find the connection for a packet given its IP
148  * and port tuple.
149  *
150  * One noteworthy exception is that the global pcbinfo lock follows a different
151  * set of rules in relation to the inp_list field.  Rather than being
152  * write-locked for modifications and read-locked for list iterations, it must
153  * be read-locked during modifications and write-locked during list iterations.
154  * This ensures that the relatively rare global list iterations safely walk a
155  * stable snapshot of connections while allowing more common list modifications
156  * to safely grab the pcblist lock just while adding or removing a connection
157  * from the global list.
158  *
159  * Key:
160  * (b) - Protected by the hpts lock.
161  * (c) - Constant after initialization
162  * (e) - Protected by the net_epoch_prempt epoch
163  * (g) - Protected by the pcbgroup lock
164  * (i) - Protected by the inpcb lock
165  * (p) - Protected by the pcbinfo lock for the inpcb
166  * (l) - Protected by the pcblist lock for the inpcb
167  * (h) - Protected by the pcbhash lock for the inpcb
168  * (s) - Protected by another subsystem's locks
169  * (x) - Undefined locking
170  *
171  * Notes on the tcp_hpts:
172  *
173  * First Hpts lock order is
174  * 1) INP_WLOCK()
175  * 2) HPTS_LOCK() i.e. hpts->pmtx
176  *
177  * To insert a TCB on the hpts you *must* be holding the INP_WLOCK().
178  * You may check the inp->inp_in_hpts flag without the hpts lock.
179  * The hpts is the only one that will clear this flag holding
180  * only the hpts lock. This means that in your tcp_output()
181  * routine when you test for the inp_in_hpts flag to be 1
182  * it may be transitioning to 0 (by the hpts).
183  * That's ok since that will just mean an extra call to tcp_output
184  * that most likely will find the call you executed
185  * (when the mis-match occured) will have put the TCB back
186  * on the hpts and it will return. If your
187  * call did not add the inp back to the hpts then you will either
188  * over-send or the cwnd will block you from sending more.
189  *
190  * Note you should also be holding the INP_WLOCK() when you
191  * call the remove from the hpts as well. Though usually
192  * you are either doing this from a timer, where you need and have
193  * the INP_WLOCK() or from destroying your TCB where again
194  * you should already have the INP_WLOCK().
195  *
196  * The inp_hpts_cpu, inp_hpts_cpu_set, inp_input_cpu and
197  * inp_input_cpu_set fields are controlled completely by
198  * the hpts. Do not ever set these. The inp_hpts_cpu_set
199  * and inp_input_cpu_set fields indicate if the hpts has
200  * setup the respective cpu field. It is advised if this
201  * field is 0, to enqueue the packet with the appropriate
202  * hpts_immediate() call. If the _set field is 1, then
203  * you may compare the inp_*_cpu field to the curcpu and
204  * may want to again insert onto the hpts if these fields
205  * are not equal (i.e. you are not on the expected CPU).
206  *
207  * A note on inp_hpts_calls and inp_input_calls, these
208  * flags are set when the hpts calls either the output
209  * or do_segment routines respectively. If the routine
210  * being called wants to use this, then it needs to
211  * clear the flag before returning. The hpts will not
212  * clear the flag. The flags can be used to tell if
213  * the hpts is the function calling the respective
214  * routine.
215  *
216  * A few other notes:
217  *
218  * When a read lock is held, stability of the field is guaranteed; to write
219  * to a field, a write lock must generally be held.
220  *
221  * netinet/netinet6-layer code should not assume that the inp_socket pointer
222  * is safe to dereference without inp_lock being held, even for protocols
223  * other than TCP (where the inpcb persists during TIMEWAIT even after the
224  * socket has been freed), or there may be close(2)-related races.
225  *
226  * The inp_vflag field is overloaded, and would otherwise ideally be (c).
227  *
228  * TODO:  Currently only the TCP stack is leveraging the global pcbinfo lock
229  * read-lock usage during modification, this model can be applied to other
230  * protocols (especially SCTP).
231  */
232 struct icmp6_filter;
233 struct inpcbpolicy;
234 struct m_snd_tag;
235 struct inpcb {
236 	/* Cache line #1 (amd64) */
237 	CK_LIST_ENTRY(inpcb) inp_hash;	/* [w](h/i) [r](e/i)  hash list */
238 	CK_LIST_ENTRY(inpcb) inp_pcbgrouphash;	/* (g/i) hash list */
239 	struct rwlock	inp_lock;
240 	/* Cache line #2 (amd64) */
241 #define	inp_start_zero	inp_hpts
242 #define	inp_zero_size	(sizeof(struct inpcb) - \
243 			    offsetof(struct inpcb, inp_start_zero))
244 	TAILQ_ENTRY(inpcb) inp_hpts;	/* pacing out queue next lock(b) */
245 
246 	uint32_t inp_hpts_request;	/* Current hpts request, zero if
247 					 * fits in the pacing window (i&b). */
248 	/*
249 	 * Note the next fields are protected by a
250 	 * different lock (hpts-lock). This means that
251 	 * they must correspond in size to the smallest
252 	 * protectable bit field (uint8_t on x86, and
253 	 * other platfomrs potentially uint32_t?). Also
254 	 * since CPU switches can occur at different times the two
255 	 * fields can *not* be collapsed into a signal bit field.
256 	 */
257 #if defined(__amd64__) || defined(__i386__)
258 	volatile uint8_t inp_in_hpts; /* on output hpts (lock b) */
259 	volatile uint8_t inp_in_input; /* on input hpts (lock b) */
260 #else
261 	volatile uint32_t inp_in_hpts; /* on output hpts (lock b) */
262 	volatile uint32_t inp_in_input; /* on input hpts (lock b) */
263 #endif
264 	volatile uint16_t  inp_hpts_cpu; /* Lock (i) */
265 	u_int	inp_refcount;		/* (i) refcount */
266 	int	inp_flags;		/* (i) generic IP/datagram flags */
267 	int	inp_flags2;		/* (i) generic IP/datagram flags #2*/
268 	volatile uint16_t  inp_input_cpu; /* Lock (i) */
269 	volatile uint8_t inp_hpts_cpu_set :1,  /* on output hpts (i) */
270 			 inp_input_cpu_set : 1,	/* on input hpts (i) */
271 			 inp_hpts_calls :1,	/* (i) from output hpts */
272 			 inp_input_calls :1,	/* (i) from input hpts */
273 			 inp_spare_bits2 : 4;
274 	uint8_t inp_spare_byte;		/* Compiler hole */
275 	void	*inp_ppcb;		/* (i) pointer to per-protocol pcb */
276 	struct	socket *inp_socket;	/* (i) back pointer to socket */
277 	uint32_t 	 inp_hptsslot;	/* Hpts wheel slot this tcb is Lock(i&b) */
278 	uint32_t         inp_hpts_drop_reas;	/* reason we are dropping the PCB (lock i&b) */
279 	TAILQ_ENTRY(inpcb) inp_input;	/* pacing in  queue next lock(b) */
280 	struct	inpcbinfo *inp_pcbinfo;	/* (c) PCB list info */
281 	struct	inpcbgroup *inp_pcbgroup; /* (g/i) PCB group list */
282 	CK_LIST_ENTRY(inpcb) inp_pcbgroup_wild; /* (g/i/h) group wildcard entry */
283 	struct	ucred	*inp_cred;	/* (c) cache of socket cred */
284 	u_int32_t inp_flow;		/* (i) IPv6 flow information */
285 	u_char	inp_vflag;		/* (i) IP version flag (v4/v6) */
286 	u_char	inp_ip_ttl;		/* (i) time to live proto */
287 	u_char	inp_ip_p;		/* (c) protocol proto */
288 	u_char	inp_ip_minttl;		/* (i) minimum TTL or drop */
289 	uint32_t inp_flowid;		/* (x) flow id / queue id */
290 	struct m_snd_tag *inp_snd_tag;	/* (i) send tag for outgoing mbufs */
291 	uint32_t inp_flowtype;		/* (x) M_HASHTYPE value */
292 	uint32_t inp_rss_listen_bucket;	/* (x) overridden RSS listen bucket */
293 
294 	/* Local and foreign ports, local and foreign addr. */
295 	struct	in_conninfo inp_inc;	/* (i) list for PCB's local port */
296 
297 	/* MAC and IPSEC policy information. */
298 	struct	label *inp_label;	/* (i) MAC label */
299 	struct	inpcbpolicy *inp_sp;    /* (s) for IPSEC */
300 
301 	/* Protocol-dependent part; options. */
302 	struct {
303 		u_char	inp_ip_tos;		/* (i) type of service proto */
304 		struct mbuf		*inp_options;	/* (i) IP options */
305 		struct ip_moptions	*inp_moptions;	/* (i) mcast options */
306 	};
307 	struct {
308 		/* (i) IP options */
309 		struct mbuf		*in6p_options;
310 		/* (i) IP6 options for outgoing packets */
311 		struct ip6_pktopts	*in6p_outputopts;
312 		/* (i) IP multicast options */
313 		struct ip6_moptions	*in6p_moptions;
314 		/* (i) ICMPv6 code type filter */
315 		struct icmp6_filter	*in6p_icmp6filt;
316 		/* (i) IPV6_CHECKSUM setsockopt */
317 		int	in6p_cksum;
318 		short	in6p_hops;
319 	};
320 	CK_LIST_ENTRY(inpcb) inp_portlist;	/* (i/h) */
321 	struct	inpcbport *inp_phd;	/* (i/h) head of this list */
322 	inp_gen_t	inp_gencnt;	/* (c) generation count */
323 	struct llentry	*inp_lle;	/* cached L2 information */
324 	rt_gen_t	inp_rt_cookie;	/* generation for route entry */
325 	union {				/* cached L3 information */
326 		struct route inp_route;
327 		struct route_in6 inp_route6;
328 	};
329 	CK_LIST_ENTRY(inpcb) inp_list;	/* (p/l) list for all PCBs for proto */
330 	                                /* (e[r]) for list iteration */
331 	                                /* (p[w]/l) for addition/removal */
332 	struct epoch_context inp_epoch_ctx;
333 };
334 #endif	/* _KERNEL */
335 
336 #define	inp_fport	inp_inc.inc_fport
337 #define	inp_lport	inp_inc.inc_lport
338 #define	inp_faddr	inp_inc.inc_faddr
339 #define	inp_laddr	inp_inc.inc_laddr
340 
341 #define	in6p_faddr	inp_inc.inc6_faddr
342 #define	in6p_laddr	inp_inc.inc6_laddr
343 #define	in6p_zoneid	inp_inc.inc6_zoneid
344 #define	in6p_flowinfo	inp_flow
345 
346 #define	inp_vnet	inp_pcbinfo->ipi_vnet
347 
348 /*
349  * The range of the generation count, as used in this implementation, is 9e19.
350  * We would have to create 300 billion connections per second for this number
351  * to roll over in a year.  This seems sufficiently unlikely that we simply
352  * don't concern ourselves with that possibility.
353  */
354 
355 /*
356  * Interface exported to userland by various protocols which use inpcbs.  Hack
357  * alert -- only define if struct xsocket is in scope.
358  * Fields prefixed with "xi_" are unique to this structure, and the rest
359  * match fields in the struct inpcb, to ease coding and porting.
360  *
361  * Legend:
362  * (s) - used by userland utilities in src
363  * (p) - used by utilities in ports
364  * (3) - is known to be used by third party software not in ports
365  * (n) - no known usage
366  */
367 #ifdef _SYS_SOCKETVAR_H_
368 struct xinpcb {
369 	ksize_t		xi_len;			/* length of this structure */
370 	struct xsocket	xi_socket;		/* (s,p) */
371 	struct in_conninfo inp_inc;		/* (s,p) */
372 	uint64_t	inp_gencnt;		/* (s,p) */
373 	kvaddr_t	inp_ppcb;		/* (s) netstat(1) */
374 	int64_t		inp_spare64[4];
375 	uint32_t	inp_flow;		/* (s) */
376 	uint32_t	inp_flowid;		/* (s) */
377 	uint32_t	inp_flowtype;		/* (s) */
378 	int32_t		inp_flags;		/* (s,p) */
379 	int32_t		inp_flags2;		/* (s) */
380 	int32_t		inp_rss_listen_bucket;	/* (n) */
381 	int32_t		in6p_cksum;		/* (n) */
382 	int32_t		inp_spare32[4];
383 	uint16_t	in6p_hops;		/* (n) */
384 	uint8_t		inp_ip_tos;		/* (n) */
385 	int8_t		pad8;
386 	uint8_t		inp_vflag;		/* (s,p) */
387 	uint8_t		inp_ip_ttl;		/* (n) */
388 	uint8_t		inp_ip_p;		/* (n) */
389 	uint8_t		inp_ip_minttl;		/* (n) */
390 	int8_t		inp_spare8[4];
391 } __aligned(8);
392 
393 struct xinpgen {
394 	ksize_t	xig_len;	/* length of this structure */
395 	u_int		xig_count;	/* number of PCBs at this time */
396 	uint32_t	_xig_spare32;
397 	inp_gen_t	xig_gen;	/* generation count at this time */
398 	so_gen_t	xig_sogen;	/* socket generation count this time */
399 	uint64_t	_xig_spare64[4];
400 } __aligned(8);
401 #ifdef	_KERNEL
402 void	in_pcbtoxinpcb(const struct inpcb *, struct xinpcb *);
403 #endif
404 #endif /* _SYS_SOCKETVAR_H_ */
405 
406 struct inpcbport {
407 	struct epoch_context phd_epoch_ctx;
408 	CK_LIST_ENTRY(inpcbport) phd_hash;
409 	struct inpcbhead phd_pcblist;
410 	u_short phd_port;
411 };
412 
413 struct in_pcblist {
414 	int il_count;
415 	struct epoch_context il_epoch_ctx;
416 	struct inpcbinfo *il_pcbinfo;
417 	struct inpcb *il_inp_list[0];
418 };
419 
420 /*-
421  * Global data structure for each high-level protocol (UDP, TCP, ...) in both
422  * IPv4 and IPv6.  Holds inpcb lists and information for managing them.
423  *
424  * Each pcbinfo is protected by three locks: ipi_lock, ipi_hash_lock and
425  * ipi_list_lock:
426  *  - ipi_lock covering the global pcb list stability during loop iteration,
427  *  - ipi_hash_lock covering the hashed lookup tables,
428  *  - ipi_list_lock covering mutable global fields (such as the global
429  *    pcb list)
430  *
431  * The lock order is:
432  *
433  *    ipi_lock (before)
434  *        inpcb locks (before)
435  *            ipi_list locks (before)
436  *                {ipi_hash_lock, pcbgroup locks}
437  *
438  * Locking key:
439  *
440  * (c) Constant or nearly constant after initialisation
441  * (e) - Protected by the net_epoch_prempt epoch
442  * (g) Locked by ipi_lock
443  * (l) Locked by ipi_list_lock
444  * (h) Read using either net_epoch_preempt or inpcb lock; write requires both ipi_hash_lock and inpcb lock
445  * (p) Protected by one or more pcbgroup locks
446  * (x) Synchronisation properties poorly defined
447  */
448 struct inpcbinfo {
449 	/*
450 	 * Global lock protecting inpcb list modification
451 	 */
452 	struct mtx		 ipi_lock;
453 
454 	/*
455 	 * Global list of inpcbs on the protocol.
456 	 */
457 	struct inpcbhead	*ipi_listhead;		/* [r](e) [w](g/l) */
458 	u_int			 ipi_count;		/* (l) */
459 
460 	/*
461 	 * Generation count -- incremented each time a connection is allocated
462 	 * or freed.
463 	 */
464 	u_quad_t		 ipi_gencnt;		/* (l) */
465 
466 	/*
467 	 * Fields associated with port lookup and allocation.
468 	 */
469 	u_short			 ipi_lastport;		/* (x) */
470 	u_short			 ipi_lastlow;		/* (x) */
471 	u_short			 ipi_lasthi;		/* (x) */
472 
473 	/*
474 	 * UMA zone from which inpcbs are allocated for this protocol.
475 	 */
476 	struct	uma_zone	*ipi_zone;		/* (c) */
477 
478 	/*
479 	 * Connection groups associated with this protocol.  These fields are
480 	 * constant, but pcbgroup structures themselves are protected by
481 	 * per-pcbgroup locks.
482 	 */
483 	struct inpcbgroup	*ipi_pcbgroups;		/* (c) */
484 	u_int			 ipi_npcbgroups;	/* (c) */
485 	u_int			 ipi_hashfields;	/* (c) */
486 
487 	/*
488 	 * Global lock protecting modification non-pcbgroup hash lookup tables.
489 	 */
490 	struct mtx		 ipi_hash_lock;
491 
492 	/*
493 	 * Global hash of inpcbs, hashed by local and foreign addresses and
494 	 * port numbers.
495 	 */
496 	struct inpcbhead	*ipi_hashbase;		/* (h) */
497 	u_long			 ipi_hashmask;		/* (h) */
498 
499 	/*
500 	 * Global hash of inpcbs, hashed by only local port number.
501 	 */
502 	struct inpcbporthead	*ipi_porthashbase;	/* (h) */
503 	u_long			 ipi_porthashmask;	/* (h) */
504 
505 	/*
506 	 * List of wildcard inpcbs for use with pcbgroups.  In the past, was
507 	 * per-pcbgroup but is now global.  All pcbgroup locks must be held
508 	 * to modify the list, so any is sufficient to read it.
509 	 */
510 	struct inpcbhead	*ipi_wildbase;		/* (p) */
511 	u_long			 ipi_wildmask;		/* (p) */
512 
513 	/*
514 	 * Load balance groups used for the SO_REUSEPORT_LB option,
515 	 * hashed by local port.
516 	 */
517 	struct	inpcblbgrouphead *ipi_lbgrouphashbase;	/* (h) */
518 	u_long			 ipi_lbgrouphashmask;	/* (h) */
519 
520 	/*
521 	 * Pointer to network stack instance
522 	 */
523 	struct vnet		*ipi_vnet;		/* (c) */
524 
525 	/*
526 	 * general use 2
527 	 */
528 	void 			*ipi_pspare[2];
529 
530 	/*
531 	 * Global lock protecting global inpcb list, inpcb count, etc.
532 	 */
533 	struct rwlock		 ipi_list_lock;
534 };
535 
536 #ifdef _KERNEL
537 /*
538  * Connection groups hold sets of connections that have similar CPU/thread
539  * affinity.  Each connection belongs to exactly one connection group.
540  */
541 struct inpcbgroup {
542 	/*
543 	 * Per-connection group hash of inpcbs, hashed by local and foreign
544 	 * addresses and port numbers.
545 	 */
546 	struct inpcbhead	*ipg_hashbase;		/* (c) */
547 	u_long			 ipg_hashmask;		/* (c) */
548 
549 	/*
550 	 * Notional affinity of this pcbgroup.
551 	 */
552 	u_int			 ipg_cpu;		/* (p) */
553 
554 	/*
555 	 * Per-connection group lock, not to be confused with ipi_lock.
556 	 * Protects the hash table hung off the group, but also the global
557 	 * wildcard list in inpcbinfo.
558 	 */
559 	struct mtx		 ipg_lock;
560 } __aligned(CACHE_LINE_SIZE);
561 
562 /*
563  * Load balance groups used for the SO_REUSEPORT_LB socket option. Each group
564  * (or unique address:port combination) can be re-used at most
565  * INPCBLBGROUP_SIZMAX (256) times. The inpcbs are stored in il_inp which
566  * is dynamically resized as processes bind/unbind to that specific group.
567  */
568 struct inpcblbgroup {
569 	LIST_ENTRY(inpcblbgroup) il_list;
570 	uint16_t	il_lport;			/* (c) */
571 	u_char		il_vflag;			/* (c) */
572 	u_char		il_pad;
573 	uint32_t	il_pad2;
574 	union in_dependaddr il_dependladdr;		/* (c) */
575 #define	il_laddr	il_dependladdr.id46_addr.ia46_addr4
576 #define	il6_laddr	il_dependladdr.id6_addr
577 	uint32_t	il_inpsiz; /* max count in il_inp[] (h) */
578 	uint32_t	il_inpcnt; /* cur count in il_inp[] (h) */
579 	struct inpcb	*il_inp[];			/* (h) */
580 };
581 LIST_HEAD(inpcblbgrouphead, inpcblbgroup);
582 
583 #define INP_LOCK_INIT(inp, d, t) \
584 	rw_init_flags(&(inp)->inp_lock, (t), RW_RECURSE |  RW_DUPOK)
585 #define INP_LOCK_DESTROY(inp)	rw_destroy(&(inp)->inp_lock)
586 #define INP_RLOCK(inp)		rw_rlock(&(inp)->inp_lock)
587 #define INP_WLOCK(inp)		rw_wlock(&(inp)->inp_lock)
588 #define INP_TRY_RLOCK(inp)	rw_try_rlock(&(inp)->inp_lock)
589 #define INP_TRY_WLOCK(inp)	rw_try_wlock(&(inp)->inp_lock)
590 #define INP_RUNLOCK(inp)	rw_runlock(&(inp)->inp_lock)
591 #define INP_WUNLOCK(inp)	rw_wunlock(&(inp)->inp_lock)
592 #define	INP_TRY_UPGRADE(inp)	rw_try_upgrade(&(inp)->inp_lock)
593 #define	INP_DOWNGRADE(inp)	rw_downgrade(&(inp)->inp_lock)
594 #define	INP_WLOCKED(inp)	rw_wowned(&(inp)->inp_lock)
595 #define	INP_LOCK_ASSERT(inp)	rw_assert(&(inp)->inp_lock, RA_LOCKED)
596 #define	INP_RLOCK_ASSERT(inp)	rw_assert(&(inp)->inp_lock, RA_RLOCKED)
597 #define	INP_WLOCK_ASSERT(inp)	rw_assert(&(inp)->inp_lock, RA_WLOCKED)
598 #define	INP_UNLOCK_ASSERT(inp)	rw_assert(&(inp)->inp_lock, RA_UNLOCKED)
599 
600 /*
601  * These locking functions are for inpcb consumers outside of sys/netinet,
602  * more specifically, they were added for the benefit of TOE drivers. The
603  * macros are reserved for use by the stack.
604  */
605 void inp_wlock(struct inpcb *);
606 void inp_wunlock(struct inpcb *);
607 void inp_rlock(struct inpcb *);
608 void inp_runlock(struct inpcb *);
609 
610 #ifdef INVARIANT_SUPPORT
611 void inp_lock_assert(struct inpcb *);
612 void inp_unlock_assert(struct inpcb *);
613 #else
614 #define	inp_lock_assert(inp)	do {} while (0)
615 #define	inp_unlock_assert(inp)	do {} while (0)
616 #endif
617 
618 void	inp_apply_all(void (*func)(struct inpcb *, void *), void *arg);
619 int 	inp_ip_tos_get(const struct inpcb *inp);
620 void 	inp_ip_tos_set(struct inpcb *inp, int val);
621 struct socket *
622 	inp_inpcbtosocket(struct inpcb *inp);
623 struct tcpcb *
624 	inp_inpcbtotcpcb(struct inpcb *inp);
625 void 	inp_4tuple_get(struct inpcb *inp, uint32_t *laddr, uint16_t *lp,
626 		uint32_t *faddr, uint16_t *fp);
627 int	inp_so_options(const struct inpcb *inp);
628 
629 #endif /* _KERNEL */
630 
631 #define INP_INFO_LOCK_INIT(ipi, d) \
632 	mtx_init(&(ipi)->ipi_lock, (d), NULL, MTX_DEF| MTX_RECURSE)
633 #define INP_INFO_LOCK_DESTROY(ipi)  mtx_destroy(&(ipi)->ipi_lock)
634 #define INP_INFO_RLOCK_ET(ipi, et)	NET_EPOCH_ENTER_ET((et))
635 #define INP_INFO_WLOCK(ipi) mtx_lock(&(ipi)->ipi_lock)
636 #define INP_INFO_TRY_WLOCK(ipi)	mtx_trylock(&(ipi)->ipi_lock)
637 #define INP_INFO_WLOCKED(ipi)	mtx_owned(&(ipi)->ipi_lock)
638 #define INP_INFO_RUNLOCK_ET(ipi, et)	NET_EPOCH_EXIT_ET((et))
639 #define INP_INFO_RUNLOCK_TP(ipi, tp)	NET_EPOCH_EXIT_ET(*(tp)->t_inpcb->inp_et)
640 #define INP_INFO_WUNLOCK(ipi)	mtx_unlock(&(ipi)->ipi_lock)
641 #define	INP_INFO_LOCK_ASSERT(ipi)	MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(ipi)->ipi_lock))
642 #define INP_INFO_RLOCK_ASSERT(ipi)	MPASS(in_epoch(net_epoch_preempt))
643 #define INP_INFO_WLOCK_ASSERT(ipi)	mtx_assert(&(ipi)->ipi_lock, MA_OWNED)
644 #define INP_INFO_UNLOCK_ASSERT(ipi)	MPASS(!in_epoch(net_epoch_preempt) && !mtx_owned(&(ipi)->ipi_lock))
645 
646 #define INP_LIST_LOCK_INIT(ipi, d) \
647         rw_init_flags(&(ipi)->ipi_list_lock, (d), 0)
648 #define INP_LIST_LOCK_DESTROY(ipi)  rw_destroy(&(ipi)->ipi_list_lock)
649 #define INP_LIST_RLOCK(ipi)     rw_rlock(&(ipi)->ipi_list_lock)
650 #define INP_LIST_WLOCK(ipi)     rw_wlock(&(ipi)->ipi_list_lock)
651 #define INP_LIST_TRY_RLOCK(ipi) rw_try_rlock(&(ipi)->ipi_list_lock)
652 #define INP_LIST_TRY_WLOCK(ipi) rw_try_wlock(&(ipi)->ipi_list_lock)
653 #define INP_LIST_TRY_UPGRADE(ipi)       rw_try_upgrade(&(ipi)->ipi_list_lock)
654 #define INP_LIST_RUNLOCK(ipi)   rw_runlock(&(ipi)->ipi_list_lock)
655 #define INP_LIST_WUNLOCK(ipi)   rw_wunlock(&(ipi)->ipi_list_lock)
656 #define INP_LIST_LOCK_ASSERT(ipi) \
657 	rw_assert(&(ipi)->ipi_list_lock, RA_LOCKED)
658 #define INP_LIST_RLOCK_ASSERT(ipi) \
659 	rw_assert(&(ipi)->ipi_list_lock, RA_RLOCKED)
660 #define INP_LIST_WLOCK_ASSERT(ipi) \
661 	rw_assert(&(ipi)->ipi_list_lock, RA_WLOCKED)
662 #define INP_LIST_UNLOCK_ASSERT(ipi) \
663 	rw_assert(&(ipi)->ipi_list_lock, RA_UNLOCKED)
664 
665 #define	INP_HASH_LOCK_INIT(ipi, d) mtx_init(&(ipi)->ipi_hash_lock, (d), NULL, MTX_DEF)
666 #define	INP_HASH_LOCK_DESTROY(ipi)	mtx_destroy(&(ipi)->ipi_hash_lock)
667 #define	INP_HASH_RLOCK(ipi)		struct epoch_tracker inp_hash_et; epoch_enter_preempt(net_epoch_preempt, &inp_hash_et)
668 #define	INP_HASH_RLOCK_ET(ipi, et)		epoch_enter_preempt(net_epoch_preempt, &(et))
669 #define	INP_HASH_WLOCK(ipi)		mtx_lock(&(ipi)->ipi_hash_lock)
670 #define	INP_HASH_RUNLOCK(ipi)		NET_EPOCH_EXIT_ET(inp_hash_et)
671 #define	INP_HASH_RUNLOCK_ET(ipi, et)		NET_EPOCH_EXIT_ET((et))
672 #define	INP_HASH_WUNLOCK(ipi)		mtx_unlock(&(ipi)->ipi_hash_lock)
673 #define	INP_HASH_LOCK_ASSERT(ipi)	MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(ipi)->ipi_hash_lock))
674 #define	INP_HASH_WLOCK_ASSERT(ipi)	mtx_assert(&(ipi)->ipi_hash_lock, MA_OWNED);
675 
676 #define	INP_GROUP_LOCK_INIT(ipg, d)	mtx_init(&(ipg)->ipg_lock, (d), NULL, \
677 					    MTX_DEF | MTX_DUPOK)
678 #define	INP_GROUP_LOCK_DESTROY(ipg)	mtx_destroy(&(ipg)->ipg_lock)
679 
680 #define	INP_GROUP_LOCK(ipg)		mtx_lock(&(ipg)->ipg_lock)
681 #define	INP_GROUP_LOCK_ASSERT(ipg)	mtx_assert(&(ipg)->ipg_lock, MA_OWNED)
682 #define	INP_GROUP_UNLOCK(ipg)		mtx_unlock(&(ipg)->ipg_lock)
683 
684 #define INP_PCBHASH(faddr, lport, fport, mask) \
685 	(((faddr) ^ ((faddr) >> 16) ^ ntohs((lport) ^ (fport))) & (mask))
686 #define INP_PCBPORTHASH(lport, mask) \
687 	(ntohs((lport)) & (mask))
688 #define	INP_PCBLBGROUP_PORTHASH(lport, mask) \
689 	(ntohs((lport)) & (mask))
690 #define	INP_PCBLBGROUP_PKTHASH(faddr, lport, fport) \
691 	((faddr) ^ ((faddr) >> 16) ^ ntohs((lport) ^ (fport)))
692 #define	INP6_PCBHASHKEY(faddr)	((faddr)->s6_addr32[3])
693 
694 /*
695  * Flags for inp_vflags -- historically version flags only
696  */
697 #define	INP_IPV4	0x1
698 #define	INP_IPV6	0x2
699 #define	INP_IPV6PROTO	0x4		/* opened under IPv6 protocol */
700 
701 /*
702  * Flags for inp_flags.
703  */
704 #define	INP_RECVOPTS		0x00000001 /* receive incoming IP options */
705 #define	INP_RECVRETOPTS		0x00000002 /* receive IP options for reply */
706 #define	INP_RECVDSTADDR		0x00000004 /* receive IP dst address */
707 #define	INP_HDRINCL		0x00000008 /* user supplies entire IP header */
708 #define	INP_HIGHPORT		0x00000010 /* user wants "high" port binding */
709 #define	INP_LOWPORT		0x00000020 /* user wants "low" port binding */
710 #define	INP_ANONPORT		0x00000040 /* port chosen for user */
711 #define	INP_RECVIF		0x00000080 /* receive incoming interface */
712 #define	INP_MTUDISC		0x00000100 /* user can do MTU discovery */
713 				   	   /* 0x000200 unused: was INP_FAITH */
714 #define	INP_RECVTTL		0x00000400 /* receive incoming IP TTL */
715 #define	INP_DONTFRAG		0x00000800 /* don't fragment packet */
716 #define	INP_BINDANY		0x00001000 /* allow bind to any address */
717 #define	INP_INHASHLIST		0x00002000 /* in_pcbinshash() has been called */
718 #define	INP_RECVTOS		0x00004000 /* receive incoming IP TOS */
719 #define	IN6P_IPV6_V6ONLY	0x00008000 /* restrict AF_INET6 socket for v6 */
720 #define	IN6P_PKTINFO		0x00010000 /* receive IP6 dst and I/F */
721 #define	IN6P_HOPLIMIT		0x00020000 /* receive hoplimit */
722 #define	IN6P_HOPOPTS		0x00040000 /* receive hop-by-hop options */
723 #define	IN6P_DSTOPTS		0x00080000 /* receive dst options after rthdr */
724 #define	IN6P_RTHDR		0x00100000 /* receive routing header */
725 #define	IN6P_RTHDRDSTOPTS	0x00200000 /* receive dstoptions before rthdr */
726 #define	IN6P_TCLASS		0x00400000 /* receive traffic class value */
727 #define	IN6P_AUTOFLOWLABEL	0x00800000 /* attach flowlabel automatically */
728 #define	INP_TIMEWAIT		0x01000000 /* in TIMEWAIT, ppcb is tcptw */
729 #define	INP_ONESBCAST		0x02000000 /* send all-ones broadcast */
730 #define	INP_DROPPED		0x04000000 /* protocol drop flag */
731 #define	INP_SOCKREF		0x08000000 /* strong socket reference */
732 #define	INP_RESERVED_0          0x10000000 /* reserved field */
733 #define	INP_RESERVED_1          0x20000000 /* reserved field */
734 #define	IN6P_RFC2292		0x40000000 /* used RFC2292 API on the socket */
735 #define	IN6P_MTU		0x80000000 /* receive path MTU */
736 
737 #define	INP_CONTROLOPTS		(INP_RECVOPTS|INP_RECVRETOPTS|INP_RECVDSTADDR|\
738 				 INP_RECVIF|INP_RECVTTL|INP_RECVTOS|\
739 				 IN6P_PKTINFO|IN6P_HOPLIMIT|IN6P_HOPOPTS|\
740 				 IN6P_DSTOPTS|IN6P_RTHDR|IN6P_RTHDRDSTOPTS|\
741 				 IN6P_TCLASS|IN6P_AUTOFLOWLABEL|IN6P_RFC2292|\
742 				 IN6P_MTU)
743 
744 /*
745  * Flags for inp_flags2.
746  */
747 #define	INP_LLE_VALID		0x00000001 /* cached lle is valid */
748 #define	INP_RT_VALID		0x00000002 /* cached rtentry is valid */
749 #define	INP_PCBGROUPWILD	0x00000004 /* in pcbgroup wildcard list */
750 #define	INP_REUSEPORT		0x00000008 /* SO_REUSEPORT option is set */
751 #define	INP_FREED		0x00000010 /* inp itself is not valid */
752 #define	INP_REUSEADDR		0x00000020 /* SO_REUSEADDR option is set */
753 #define	INP_BINDMULTI		0x00000040 /* IP_BINDMULTI option is set */
754 #define	INP_RSS_BUCKET_SET	0x00000080 /* IP_RSS_LISTEN_BUCKET is set */
755 #define	INP_RECVFLOWID		0x00000100 /* populate recv datagram with flow info */
756 #define	INP_RECVRSSBUCKETID	0x00000200 /* populate recv datagram with bucket id */
757 #define	INP_RATE_LIMIT_CHANGED	0x00000400 /* rate limit needs attention */
758 #define	INP_ORIGDSTADDR		0x00000800 /* receive IP dst address/port */
759 #define INP_CANNOT_DO_ECN	0x00001000 /* The stack does not do ECN */
760 #define	INP_REUSEPORT_LB	0x00002000 /* SO_REUSEPORT_LB option is set */
761 
762 /*
763  * Flags passed to in_pcblookup*() functions.
764  */
765 #define	INPLOOKUP_WILDCARD	0x00000001	/* Allow wildcard sockets. */
766 #define	INPLOOKUP_RLOCKPCB	0x00000002	/* Return inpcb read-locked. */
767 #define	INPLOOKUP_WLOCKPCB	0x00000004	/* Return inpcb write-locked. */
768 
769 #define	INPLOOKUP_MASK	(INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB | \
770 			    INPLOOKUP_WLOCKPCB)
771 
772 #define	sotoinpcb(so)	((struct inpcb *)(so)->so_pcb)
773 #define	sotoin6pcb(so)	sotoinpcb(so) /* for KAME src sync over BSD*'s */
774 
775 #define	INP_SOCKAF(so) so->so_proto->pr_domain->dom_family
776 
777 #define	INP_CHECK_SOCKAF(so, af)	(INP_SOCKAF(so) == af)
778 
779 /*
780  * Constants for pcbinfo.ipi_hashfields.
781  */
782 #define	IPI_HASHFIELDS_NONE	0
783 #define	IPI_HASHFIELDS_2TUPLE	1
784 #define	IPI_HASHFIELDS_4TUPLE	2
785 
786 #ifdef _KERNEL
787 VNET_DECLARE(int, ipport_reservedhigh);
788 VNET_DECLARE(int, ipport_reservedlow);
789 VNET_DECLARE(int, ipport_lowfirstauto);
790 VNET_DECLARE(int, ipport_lowlastauto);
791 VNET_DECLARE(int, ipport_firstauto);
792 VNET_DECLARE(int, ipport_lastauto);
793 VNET_DECLARE(int, ipport_hifirstauto);
794 VNET_DECLARE(int, ipport_hilastauto);
795 VNET_DECLARE(int, ipport_randomized);
796 VNET_DECLARE(int, ipport_randomcps);
797 VNET_DECLARE(int, ipport_randomtime);
798 VNET_DECLARE(int, ipport_stoprandom);
799 VNET_DECLARE(int, ipport_tcpallocs);
800 
801 #define	V_ipport_reservedhigh	VNET(ipport_reservedhigh)
802 #define	V_ipport_reservedlow	VNET(ipport_reservedlow)
803 #define	V_ipport_lowfirstauto	VNET(ipport_lowfirstauto)
804 #define	V_ipport_lowlastauto	VNET(ipport_lowlastauto)
805 #define	V_ipport_firstauto	VNET(ipport_firstauto)
806 #define	V_ipport_lastauto	VNET(ipport_lastauto)
807 #define	V_ipport_hifirstauto	VNET(ipport_hifirstauto)
808 #define	V_ipport_hilastauto	VNET(ipport_hilastauto)
809 #define	V_ipport_randomized	VNET(ipport_randomized)
810 #define	V_ipport_randomcps	VNET(ipport_randomcps)
811 #define	V_ipport_randomtime	VNET(ipport_randomtime)
812 #define	V_ipport_stoprandom	VNET(ipport_stoprandom)
813 #define	V_ipport_tcpallocs	VNET(ipport_tcpallocs)
814 
815 void	in_pcbinfo_destroy(struct inpcbinfo *);
816 void	in_pcbinfo_init(struct inpcbinfo *, const char *, struct inpcbhead *,
817 	    int, int, char *, uma_init, u_int);
818 
819 int	in_pcbbind_check_bindmulti(const struct inpcb *ni,
820 	    const struct inpcb *oi);
821 
822 struct inpcbgroup *
823 	in_pcbgroup_byhash(struct inpcbinfo *, u_int, uint32_t);
824 struct inpcbgroup *
825 	in_pcbgroup_byinpcb(struct inpcb *);
826 struct inpcbgroup *
827 	in_pcbgroup_bytuple(struct inpcbinfo *, struct in_addr, u_short,
828 	    struct in_addr, u_short);
829 void	in_pcbgroup_destroy(struct inpcbinfo *);
830 int	in_pcbgroup_enabled(struct inpcbinfo *);
831 void	in_pcbgroup_init(struct inpcbinfo *, u_int, int);
832 void	in_pcbgroup_remove(struct inpcb *);
833 void	in_pcbgroup_update(struct inpcb *);
834 void	in_pcbgroup_update_mbuf(struct inpcb *, struct mbuf *);
835 
836 void	in_pcbpurgeif0(struct inpcbinfo *, struct ifnet *);
837 int	in_pcballoc(struct socket *, struct inpcbinfo *);
838 int	in_pcbbind(struct inpcb *, struct sockaddr *, struct ucred *);
839 int	in_pcb_lport(struct inpcb *, struct in_addr *, u_short *,
840 	    struct ucred *, int);
841 int	in_pcbbind_setup(struct inpcb *, struct sockaddr *, in_addr_t *,
842 	    u_short *, struct ucred *);
843 int	in_pcbconnect(struct inpcb *, struct sockaddr *, struct ucred *);
844 int	in_pcbconnect_mbuf(struct inpcb *, struct sockaddr *, struct ucred *,
845 	    struct mbuf *);
846 int	in_pcbconnect_setup(struct inpcb *, struct sockaddr *, in_addr_t *,
847 	    u_short *, in_addr_t *, u_short *, struct inpcb **,
848 	    struct ucred *);
849 void	in_pcbdetach(struct inpcb *);
850 void	in_pcbdisconnect(struct inpcb *);
851 void	in_pcbdrop(struct inpcb *);
852 void	in_pcbfree(struct inpcb *);
853 int	in_pcbinshash(struct inpcb *);
854 int	in_pcbinshash_nopcbgroup(struct inpcb *);
855 int	in_pcbladdr(struct inpcb *, struct in_addr *, struct in_addr *,
856 	    struct ucred *);
857 struct inpcb *
858 	in_pcblookup_local(struct inpcbinfo *,
859 	    struct in_addr, u_short, int, struct ucred *);
860 struct inpcb *
861 	in_pcblookup(struct inpcbinfo *, struct in_addr, u_int,
862 	    struct in_addr, u_int, int, struct ifnet *);
863 struct inpcb *
864 	in_pcblookup_mbuf(struct inpcbinfo *, struct in_addr, u_int,
865 	    struct in_addr, u_int, int, struct ifnet *, struct mbuf *);
866 void	in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr,
867 	    int, struct inpcb *(*)(struct inpcb *, int));
868 void	in_pcbref(struct inpcb *);
869 void	in_pcbrehash(struct inpcb *);
870 void	in_pcbrehash_mbuf(struct inpcb *, struct mbuf *);
871 int	in_pcbrele(struct inpcb *);
872 int	in_pcbrele_rlocked(struct inpcb *);
873 int	in_pcbrele_wlocked(struct inpcb *);
874 void	in_pcblist_rele_rlocked(epoch_context_t ctx);
875 void	in_losing(struct inpcb *);
876 void	in_pcbsetsolabel(struct socket *so);
877 int	in_getpeeraddr(struct socket *so, struct sockaddr **nam);
878 int	in_getsockaddr(struct socket *so, struct sockaddr **nam);
879 struct sockaddr *
880 	in_sockaddr(in_port_t port, struct in_addr *addr);
881 void	in_pcbsosetlabel(struct socket *so);
882 #ifdef RATELIMIT
883 int	in_pcbattach_txrtlmt(struct inpcb *, struct ifnet *, uint32_t, uint32_t, uint32_t);
884 void	in_pcbdetach_txrtlmt(struct inpcb *);
885 int	in_pcbmodify_txrtlmt(struct inpcb *, uint32_t);
886 int	in_pcbquery_txrtlmt(struct inpcb *, uint32_t *);
887 int	in_pcbquery_txrlevel(struct inpcb *, uint32_t *);
888 void	in_pcboutput_txrtlmt(struct inpcb *, struct ifnet *, struct mbuf *);
889 void	in_pcboutput_eagain(struct inpcb *);
890 #endif
891 #endif /* _KERNEL */
892 
893 #endif /* !_NETINET_IN_PCB_H_ */
894