xref: /freebsd/sys/netinet/in_pcb.h (revision 4e1ef62a367de01ccb7156bfe6ec2d613d6eb860)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1990, 1993
5  *	The Regents of the University of California.
6  * Copyright (c) 2010-2011 Juniper Networks, Inc.
7  * All rights reserved.
8  *
9  * Portions of this software were developed by Robert N. M. Watson under
10  * contract to Juniper Networks, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)in_pcb.h	8.1 (Berkeley) 6/10/93
37  * $FreeBSD$
38  */
39 
40 #ifndef _NETINET_IN_PCB_H_
41 #define _NETINET_IN_PCB_H_
42 
43 #include <sys/queue.h>
44 #include <sys/epoch.h>
45 #include <sys/_lock.h>
46 #include <sys/_mutex.h>
47 #include <sys/_rwlock.h>
48 #include <net/route.h>
49 
50 #ifdef _KERNEL
51 #include <sys/lock.h>
52 #include <sys/rwlock.h>
53 #include <net/vnet.h>
54 #include <net/if.h>
55 #include <net/if_var.h>
56 #include <vm/uma.h>
57 #endif
58 #include <sys/ck.h>
59 
60 #define	in6pcb		inpcb	/* for KAME src sync over BSD*'s */
61 #define	in6p_sp		inp_sp	/* for KAME src sync over BSD*'s */
62 
63 /*
64  * struct inpcb is the common protocol control block structure used in most
65  * IP transport protocols.
66  *
67  * Pointers to local and foreign host table entries, local and foreign socket
68  * numbers, and pointers up (to a socket structure) and down (to a
69  * protocol-specific control block) are stored here.
70  */
71 CK_LIST_HEAD(inpcbhead, inpcb);
72 CK_LIST_HEAD(inpcbporthead, inpcbport);
73 typedef	uint64_t	inp_gen_t;
74 
75 /*
76  * PCB with AF_INET6 null bind'ed laddr can receive AF_INET input packet.
77  * So, AF_INET6 null laddr is also used as AF_INET null laddr, by utilizing
78  * the following structure.
79  */
80 struct in_addr_4in6 {
81 	u_int32_t	ia46_pad32[3];
82 	struct	in_addr	ia46_addr4;
83 };
84 
85 union in_dependaddr {
86 	struct in_addr_4in6 id46_addr;
87 	struct in6_addr	id6_addr;
88 };
89 
90 /*
91  * NOTE: ipv6 addrs should be 64-bit aligned, per RFC 2553.  in_conninfo has
92  * some extra padding to accomplish this.
93  * NOTE 2: tcp_syncache.c uses first 5 32-bit words, which identify fport,
94  * lport, faddr to generate hash, so these fields shouldn't be moved.
95  */
96 struct in_endpoints {
97 	u_int16_t	ie_fport;		/* foreign port */
98 	u_int16_t	ie_lport;		/* local port */
99 	/* protocol dependent part, local and foreign addr */
100 	union in_dependaddr ie_dependfaddr;	/* foreign host table entry */
101 	union in_dependaddr ie_dependladdr;	/* local host table entry */
102 #define	ie_faddr	ie_dependfaddr.id46_addr.ia46_addr4
103 #define	ie_laddr	ie_dependladdr.id46_addr.ia46_addr4
104 #define	ie6_faddr	ie_dependfaddr.id6_addr
105 #define	ie6_laddr	ie_dependladdr.id6_addr
106 	u_int32_t	ie6_zoneid;		/* scope zone id */
107 };
108 
109 /*
110  * XXX The defines for inc_* are hacks and should be changed to direct
111  * references.
112  */
113 struct in_conninfo {
114 	u_int8_t	inc_flags;
115 	u_int8_t	inc_len;
116 	u_int16_t	inc_fibnum;	/* XXX was pad, 16 bits is plenty */
117 	/* protocol dependent part */
118 	struct	in_endpoints inc_ie;
119 };
120 
121 /*
122  * Flags for inc_flags.
123  */
124 #define	INC_ISIPV6	0x01
125 
126 #define	inc_fport	inc_ie.ie_fport
127 #define	inc_lport	inc_ie.ie_lport
128 #define	inc_faddr	inc_ie.ie_faddr
129 #define	inc_laddr	inc_ie.ie_laddr
130 #define	inc6_faddr	inc_ie.ie6_faddr
131 #define	inc6_laddr	inc_ie.ie6_laddr
132 #define	inc6_zoneid	inc_ie.ie6_zoneid
133 
134 #if defined(_KERNEL) || defined(_WANT_INPCB)
135 /*
136  * struct inpcb captures the network layer state for TCP, UDP, and raw IPv4 and
137  * IPv6 sockets.  In the case of TCP and UDP, further per-connection state is
138  * hung off of inp_ppcb most of the time.  Almost all fields of struct inpcb
139  * are static after creation or protected by a per-inpcb rwlock, inp_lock.  A
140  * few fields are protected by multiple locks as indicated in the locking notes
141  * below.  For these fields, all of the listed locks must be write-locked for
142  * any modifications.  However, these fields can be safely read while any one of
143  * the listed locks are read-locked.  This model can permit greater concurrency
144  * for read operations.  For example, connections can be looked up while only
145  * holding a read lock on the global pcblist lock.  This is important for
146  * performance when attempting to find the connection for a packet given its IP
147  * and port tuple.
148  *
149  * One noteworthy exception is that the global pcbinfo lock follows a different
150  * set of rules in relation to the inp_list field.  Rather than being
151  * write-locked for modifications and read-locked for list iterations, it must
152  * be read-locked during modifications and write-locked during list iterations.
153  * This ensures that the relatively rare global list iterations safely walk a
154  * stable snapshot of connections while allowing more common list modifications
155  * to safely grab the pcblist lock just while adding or removing a connection
156  * from the global list.
157  *
158  * Key:
159  * (b) - Protected by the hpts lock.
160  * (c) - Constant after initialization
161  * (e) - Protected by the net_epoch_prempt epoch
162  * (g) - Protected by the pcbgroup lock
163  * (i) - Protected by the inpcb lock
164  * (p) - Protected by the pcbinfo lock for the inpcb
165  * (l) - Protected by the pcblist lock for the inpcb
166  * (h) - Protected by the pcbhash lock for the inpcb
167  * (s) - Protected by another subsystem's locks
168  * (x) - Undefined locking
169  *
170  * Notes on the tcp_hpts:
171  *
172  * First Hpts lock order is
173  * 1) INP_WLOCK()
174  * 2) HPTS_LOCK() i.e. hpts->pmtx
175  *
176  * To insert a TCB on the hpts you *must* be holding the INP_WLOCK().
177  * You may check the inp->inp_in_hpts flag without the hpts lock.
178  * The hpts is the only one that will clear this flag holding
179  * only the hpts lock. This means that in your tcp_output()
180  * routine when you test for the inp_in_hpts flag to be 1
181  * it may be transitioning to 0 (by the hpts).
182  * That's ok since that will just mean an extra call to tcp_output
183  * that most likely will find the call you executed
184  * (when the mis-match occured) will have put the TCB back
185  * on the hpts and it will return. If your
186  * call did not add the inp back to the hpts then you will either
187  * over-send or the cwnd will block you from sending more.
188  *
189  * Note you should also be holding the INP_WLOCK() when you
190  * call the remove from the hpts as well. Though usually
191  * you are either doing this from a timer, where you need and have
192  * the INP_WLOCK() or from destroying your TCB where again
193  * you should already have the INP_WLOCK().
194  *
195  * The inp_hpts_cpu, inp_hpts_cpu_set, inp_input_cpu and
196  * inp_input_cpu_set fields are controlled completely by
197  * the hpts. Do not ever set these. The inp_hpts_cpu_set
198  * and inp_input_cpu_set fields indicate if the hpts has
199  * setup the respective cpu field. It is advised if this
200  * field is 0, to enqueue the packet with the appropriate
201  * hpts_immediate() call. If the _set field is 1, then
202  * you may compare the inp_*_cpu field to the curcpu and
203  * may want to again insert onto the hpts if these fields
204  * are not equal (i.e. you are not on the expected CPU).
205  *
206  * A note on inp_hpts_calls and inp_input_calls, these
207  * flags are set when the hpts calls either the output
208  * or do_segment routines respectively. If the routine
209  * being called wants to use this, then it needs to
210  * clear the flag before returning. The hpts will not
211  * clear the flag. The flags can be used to tell if
212  * the hpts is the function calling the respective
213  * routine.
214  *
215  * A few other notes:
216  *
217  * When a read lock is held, stability of the field is guaranteed; to write
218  * to a field, a write lock must generally be held.
219  *
220  * netinet/netinet6-layer code should not assume that the inp_socket pointer
221  * is safe to dereference without inp_lock being held, even for protocols
222  * other than TCP (where the inpcb persists during TIMEWAIT even after the
223  * socket has been freed), or there may be close(2)-related races.
224  *
225  * The inp_vflag field is overloaded, and would otherwise ideally be (c).
226  *
227  * TODO:  Currently only the TCP stack is leveraging the global pcbinfo lock
228  * read-lock usage during modification, this model can be applied to other
229  * protocols (especially SCTP).
230  */
231 struct icmp6_filter;
232 struct inpcbpolicy;
233 struct m_snd_tag;
234 struct inpcb {
235 	/* Cache line #1 (amd64) */
236 	CK_LIST_ENTRY(inpcb) inp_hash;	/* [w](h/i) [r](e/i)  hash list */
237 	CK_LIST_ENTRY(inpcb) inp_pcbgrouphash;	/* (g/i) hash list */
238 	struct rwlock	inp_lock;
239 	/* Cache line #2 (amd64) */
240 #define	inp_start_zero	inp_hpts
241 #define	inp_zero_size	(sizeof(struct inpcb) - \
242 			    offsetof(struct inpcb, inp_start_zero))
243 	TAILQ_ENTRY(inpcb) inp_hpts;	/* pacing out queue next lock(b) */
244 
245 	uint32_t inp_hpts_request;	/* Current hpts request, zero if
246 					 * fits in the pacing window (i&b). */
247 	/*
248 	 * Note the next fields are protected by a
249 	 * different lock (hpts-lock). This means that
250 	 * they must correspond in size to the smallest
251 	 * protectable bit field (uint8_t on x86, and
252 	 * other platfomrs potentially uint32_t?). Also
253 	 * since CPU switches can occur at different times the two
254 	 * fields can *not* be collapsed into a signal bit field.
255 	 */
256 #if defined(__amd64__) || defined(__i386__)
257 	volatile uint8_t inp_in_hpts; /* on output hpts (lock b) */
258 	volatile uint8_t inp_in_input; /* on input hpts (lock b) */
259 #else
260 	volatile uint32_t inp_in_hpts; /* on output hpts (lock b) */
261 	volatile uint32_t inp_in_input; /* on input hpts (lock b) */
262 #endif
263 	volatile uint16_t  inp_hpts_cpu; /* Lock (i) */
264 	u_int	inp_refcount;		/* (i) refcount */
265 	int	inp_flags;		/* (i) generic IP/datagram flags */
266 	int	inp_flags2;		/* (i) generic IP/datagram flags #2*/
267 	volatile uint16_t  inp_input_cpu; /* Lock (i) */
268 	volatile uint8_t inp_hpts_cpu_set :1,  /* on output hpts (i) */
269 			 inp_input_cpu_set : 1,	/* on input hpts (i) */
270 			 inp_hpts_calls :1,	/* (i) from output hpts */
271 			 inp_input_calls :1,	/* (i) from input hpts */
272 			 inp_spare_bits2 : 4;
273 	uint8_t inp_spare_byte;		/* Compiler hole */
274 	void	*inp_ppcb;		/* (i) pointer to per-protocol pcb */
275 	struct	socket *inp_socket;	/* (i) back pointer to socket */
276 	uint32_t 	 inp_hptsslot;	/* Hpts wheel slot this tcb is Lock(i&b) */
277 	uint32_t         inp_hpts_drop_reas;	/* reason we are dropping the PCB (lock i&b) */
278 	TAILQ_ENTRY(inpcb) inp_input;	/* pacing in  queue next lock(b) */
279 	struct	inpcbinfo *inp_pcbinfo;	/* (c) PCB list info */
280 	struct	inpcbgroup *inp_pcbgroup; /* (g/i) PCB group list */
281 	CK_LIST_ENTRY(inpcb) inp_pcbgroup_wild; /* (g/i/h) group wildcard entry */
282 	struct	ucred	*inp_cred;	/* (c) cache of socket cred */
283 	u_int32_t inp_flow;		/* (i) IPv6 flow information */
284 	u_char	inp_vflag;		/* (i) IP version flag (v4/v6) */
285 	u_char	inp_ip_ttl;		/* (i) time to live proto */
286 	u_char	inp_ip_p;		/* (c) protocol proto */
287 	u_char	inp_ip_minttl;		/* (i) minimum TTL or drop */
288 	uint32_t inp_flowid;		/* (x) flow id / queue id */
289 	struct m_snd_tag *inp_snd_tag;	/* (i) send tag for outgoing mbufs */
290 	uint32_t inp_flowtype;		/* (x) M_HASHTYPE value */
291 	uint32_t inp_rss_listen_bucket;	/* (x) overridden RSS listen bucket */
292 
293 	/* Local and foreign ports, local and foreign addr. */
294 	struct	in_conninfo inp_inc;	/* (i) list for PCB's local port */
295 
296 	/* MAC and IPSEC policy information. */
297 	struct	label *inp_label;	/* (i) MAC label */
298 	struct	inpcbpolicy *inp_sp;    /* (s) for IPSEC */
299 
300 	/* Protocol-dependent part; options. */
301 	struct {
302 		u_char	inp_ip_tos;		/* (i) type of service proto */
303 		struct mbuf		*inp_options;	/* (i) IP options */
304 		struct ip_moptions	*inp_moptions;	/* (i) mcast options */
305 	};
306 	struct {
307 		/* (i) IP options */
308 		struct mbuf		*in6p_options;
309 		/* (i) IP6 options for outgoing packets */
310 		struct ip6_pktopts	*in6p_outputopts;
311 		/* (i) IP multicast options */
312 		struct ip6_moptions	*in6p_moptions;
313 		/* (i) ICMPv6 code type filter */
314 		struct icmp6_filter	*in6p_icmp6filt;
315 		/* (i) IPV6_CHECKSUM setsockopt */
316 		int	in6p_cksum;
317 		short	in6p_hops;
318 	};
319 	CK_LIST_ENTRY(inpcb) inp_portlist;	/* (i/h) */
320 	struct	inpcbport *inp_phd;	/* (i/h) head of this list */
321 	inp_gen_t	inp_gencnt;	/* (c) generation count */
322 	struct llentry	*inp_lle;	/* cached L2 information */
323 	rt_gen_t	inp_rt_cookie;	/* generation for route entry */
324 	union {				/* cached L3 information */
325 		struct route inp_route;
326 		struct route_in6 inp_route6;
327 	};
328 	CK_LIST_ENTRY(inpcb) inp_list;	/* (p/l) list for all PCBs for proto */
329 	                                /* (e[r]) for list iteration */
330 	                                /* (p[w]/l) for addition/removal */
331 	struct epoch_context inp_epoch_ctx;
332 };
333 #endif	/* _KERNEL */
334 
335 #define	inp_fport	inp_inc.inc_fport
336 #define	inp_lport	inp_inc.inc_lport
337 #define	inp_faddr	inp_inc.inc_faddr
338 #define	inp_laddr	inp_inc.inc_laddr
339 
340 #define	in6p_faddr	inp_inc.inc6_faddr
341 #define	in6p_laddr	inp_inc.inc6_laddr
342 #define	in6p_zoneid	inp_inc.inc6_zoneid
343 #define	in6p_flowinfo	inp_flow
344 
345 #define	inp_vnet	inp_pcbinfo->ipi_vnet
346 
347 /*
348  * The range of the generation count, as used in this implementation, is 9e19.
349  * We would have to create 300 billion connections per second for this number
350  * to roll over in a year.  This seems sufficiently unlikely that we simply
351  * don't concern ourselves with that possibility.
352  */
353 
354 /*
355  * Interface exported to userland by various protocols which use inpcbs.  Hack
356  * alert -- only define if struct xsocket is in scope.
357  * Fields prefixed with "xi_" are unique to this structure, and the rest
358  * match fields in the struct inpcb, to ease coding and porting.
359  *
360  * Legend:
361  * (s) - used by userland utilities in src
362  * (p) - used by utilities in ports
363  * (3) - is known to be used by third party software not in ports
364  * (n) - no known usage
365  */
366 #ifdef _SYS_SOCKETVAR_H_
367 struct xinpcb {
368 	ksize_t		xi_len;			/* length of this structure */
369 	struct xsocket	xi_socket;		/* (s,p) */
370 	struct in_conninfo inp_inc;		/* (s,p) */
371 	uint64_t	inp_gencnt;		/* (s,p) */
372 	kvaddr_t	inp_ppcb;		/* (s) netstat(1) */
373 	int64_t		inp_spare64[4];
374 	uint32_t	inp_flow;		/* (s) */
375 	uint32_t	inp_flowid;		/* (s) */
376 	uint32_t	inp_flowtype;		/* (s) */
377 	int32_t		inp_flags;		/* (s,p) */
378 	int32_t		inp_flags2;		/* (s) */
379 	int32_t		inp_rss_listen_bucket;	/* (n) */
380 	int32_t		in6p_cksum;		/* (n) */
381 	int32_t		inp_spare32[4];
382 	uint16_t	in6p_hops;		/* (n) */
383 	uint8_t		inp_ip_tos;		/* (n) */
384 	int8_t		pad8;
385 	uint8_t		inp_vflag;		/* (s,p) */
386 	uint8_t		inp_ip_ttl;		/* (n) */
387 	uint8_t		inp_ip_p;		/* (n) */
388 	uint8_t		inp_ip_minttl;		/* (n) */
389 	int8_t		inp_spare8[4];
390 } __aligned(8);
391 
392 struct xinpgen {
393 	ksize_t	xig_len;	/* length of this structure */
394 	u_int		xig_count;	/* number of PCBs at this time */
395 	uint32_t	_xig_spare32;
396 	inp_gen_t	xig_gen;	/* generation count at this time */
397 	so_gen_t	xig_sogen;	/* socket generation count this time */
398 	uint64_t	_xig_spare64[4];
399 } __aligned(8);
400 #ifdef	_KERNEL
401 void	in_pcbtoxinpcb(const struct inpcb *, struct xinpcb *);
402 #endif
403 #endif /* _SYS_SOCKETVAR_H_ */
404 
405 struct inpcbport {
406 	struct epoch_context phd_epoch_ctx;
407 	CK_LIST_ENTRY(inpcbport) phd_hash;
408 	struct inpcbhead phd_pcblist;
409 	u_short phd_port;
410 };
411 
412 struct in_pcblist {
413 	int il_count;
414 	struct epoch_context il_epoch_ctx;
415 	struct inpcbinfo *il_pcbinfo;
416 	struct inpcb *il_inp_list[0];
417 };
418 
419 /*-
420  * Global data structure for each high-level protocol (UDP, TCP, ...) in both
421  * IPv4 and IPv6.  Holds inpcb lists and information for managing them.
422  *
423  * Each pcbinfo is protected by three locks: ipi_lock, ipi_hash_lock and
424  * ipi_list_lock:
425  *  - ipi_lock covering the global pcb list stability during loop iteration,
426  *  - ipi_hash_lock covering the hashed lookup tables,
427  *  - ipi_list_lock covering mutable global fields (such as the global
428  *    pcb list)
429  *
430  * The lock order is:
431  *
432  *    ipi_lock (before)
433  *        inpcb locks (before)
434  *            ipi_list locks (before)
435  *                {ipi_hash_lock, pcbgroup locks}
436  *
437  * Locking key:
438  *
439  * (c) Constant or nearly constant after initialisation
440  * (e) - Protected by the net_epoch_prempt epoch
441  * (g) Locked by ipi_lock
442  * (l) Locked by ipi_list_lock
443  * (h) Read using either net_epoch_preempt or inpcb lock; write requires both ipi_hash_lock and inpcb lock
444  * (p) Protected by one or more pcbgroup locks
445  * (x) Synchronisation properties poorly defined
446  */
447 struct inpcbinfo {
448 	/*
449 	 * Global lock protecting inpcb list modification
450 	 */
451 	struct mtx		 ipi_lock;
452 
453 	/*
454 	 * Global list of inpcbs on the protocol.
455 	 */
456 	struct inpcbhead	*ipi_listhead;		/* [r](e) [w](g/l) */
457 	u_int			 ipi_count;		/* (l) */
458 
459 	/*
460 	 * Generation count -- incremented each time a connection is allocated
461 	 * or freed.
462 	 */
463 	u_quad_t		 ipi_gencnt;		/* (l) */
464 
465 	/*
466 	 * Fields associated with port lookup and allocation.
467 	 */
468 	u_short			 ipi_lastport;		/* (x) */
469 	u_short			 ipi_lastlow;		/* (x) */
470 	u_short			 ipi_lasthi;		/* (x) */
471 
472 	/*
473 	 * UMA zone from which inpcbs are allocated for this protocol.
474 	 */
475 	struct	uma_zone	*ipi_zone;		/* (c) */
476 
477 	/*
478 	 * Connection groups associated with this protocol.  These fields are
479 	 * constant, but pcbgroup structures themselves are protected by
480 	 * per-pcbgroup locks.
481 	 */
482 	struct inpcbgroup	*ipi_pcbgroups;		/* (c) */
483 	u_int			 ipi_npcbgroups;	/* (c) */
484 	u_int			 ipi_hashfields;	/* (c) */
485 
486 	/*
487 	 * Global lock protecting modification non-pcbgroup hash lookup tables.
488 	 */
489 	struct mtx		 ipi_hash_lock;
490 
491 	/*
492 	 * Global hash of inpcbs, hashed by local and foreign addresses and
493 	 * port numbers.
494 	 */
495 	struct inpcbhead	*ipi_hashbase;		/* (h) */
496 	u_long			 ipi_hashmask;		/* (h) */
497 
498 	/*
499 	 * Global hash of inpcbs, hashed by only local port number.
500 	 */
501 	struct inpcbporthead	*ipi_porthashbase;	/* (h) */
502 	u_long			 ipi_porthashmask;	/* (h) */
503 
504 	/*
505 	 * List of wildcard inpcbs for use with pcbgroups.  In the past, was
506 	 * per-pcbgroup but is now global.  All pcbgroup locks must be held
507 	 * to modify the list, so any is sufficient to read it.
508 	 */
509 	struct inpcbhead	*ipi_wildbase;		/* (p) */
510 	u_long			 ipi_wildmask;		/* (p) */
511 
512 	/*
513 	 * Load balance groups used for the SO_REUSEPORT_LB option,
514 	 * hashed by local port.
515 	 */
516 	struct	inpcblbgrouphead *ipi_lbgrouphashbase;	/* (h) */
517 	u_long			 ipi_lbgrouphashmask;	/* (h) */
518 
519 	/*
520 	 * Pointer to network stack instance
521 	 */
522 	struct vnet		*ipi_vnet;		/* (c) */
523 
524 	/*
525 	 * general use 2
526 	 */
527 	void 			*ipi_pspare[2];
528 
529 	/*
530 	 * Global lock protecting global inpcb list, inpcb count, etc.
531 	 */
532 	struct rwlock		 ipi_list_lock;
533 };
534 
535 #ifdef _KERNEL
536 /*
537  * Connection groups hold sets of connections that have similar CPU/thread
538  * affinity.  Each connection belongs to exactly one connection group.
539  */
540 struct inpcbgroup {
541 	/*
542 	 * Per-connection group hash of inpcbs, hashed by local and foreign
543 	 * addresses and port numbers.
544 	 */
545 	struct inpcbhead	*ipg_hashbase;		/* (c) */
546 	u_long			 ipg_hashmask;		/* (c) */
547 
548 	/*
549 	 * Notional affinity of this pcbgroup.
550 	 */
551 	u_int			 ipg_cpu;		/* (p) */
552 
553 	/*
554 	 * Per-connection group lock, not to be confused with ipi_lock.
555 	 * Protects the hash table hung off the group, but also the global
556 	 * wildcard list in inpcbinfo.
557 	 */
558 	struct mtx		 ipg_lock;
559 } __aligned(CACHE_LINE_SIZE);
560 
561 /*
562  * Load balance groups used for the SO_REUSEPORT_LB socket option. Each group
563  * (or unique address:port combination) can be re-used at most
564  * INPCBLBGROUP_SIZMAX (256) times. The inpcbs are stored in il_inp which
565  * is dynamically resized as processes bind/unbind to that specific group.
566  */
567 struct inpcblbgroup {
568 	LIST_ENTRY(inpcblbgroup) il_list;
569 	uint16_t	il_lport;			/* (c) */
570 	u_char		il_vflag;			/* (c) */
571 	u_char		il_pad;
572 	uint32_t	il_pad2;
573 	union in_dependaddr il_dependladdr;		/* (c) */
574 #define	il_laddr	il_dependladdr.id46_addr.ia46_addr4
575 #define	il6_laddr	il_dependladdr.id6_addr
576 	uint32_t	il_inpsiz; /* max count in il_inp[] (h) */
577 	uint32_t	il_inpcnt; /* cur count in il_inp[] (h) */
578 	struct inpcb	*il_inp[];			/* (h) */
579 };
580 LIST_HEAD(inpcblbgrouphead, inpcblbgroup);
581 
582 #define INP_LOCK_INIT(inp, d, t) \
583 	rw_init_flags(&(inp)->inp_lock, (t), RW_RECURSE |  RW_DUPOK)
584 #define INP_LOCK_DESTROY(inp)	rw_destroy(&(inp)->inp_lock)
585 #define INP_RLOCK(inp)		rw_rlock(&(inp)->inp_lock)
586 #define INP_WLOCK(inp)		rw_wlock(&(inp)->inp_lock)
587 #define INP_TRY_RLOCK(inp)	rw_try_rlock(&(inp)->inp_lock)
588 #define INP_TRY_WLOCK(inp)	rw_try_wlock(&(inp)->inp_lock)
589 #define INP_RUNLOCK(inp)	rw_runlock(&(inp)->inp_lock)
590 #define INP_WUNLOCK(inp)	rw_wunlock(&(inp)->inp_lock)
591 #define	INP_TRY_UPGRADE(inp)	rw_try_upgrade(&(inp)->inp_lock)
592 #define	INP_DOWNGRADE(inp)	rw_downgrade(&(inp)->inp_lock)
593 #define	INP_WLOCKED(inp)	rw_wowned(&(inp)->inp_lock)
594 #define	INP_LOCK_ASSERT(inp)	rw_assert(&(inp)->inp_lock, RA_LOCKED)
595 #define	INP_RLOCK_ASSERT(inp)	rw_assert(&(inp)->inp_lock, RA_RLOCKED)
596 #define	INP_WLOCK_ASSERT(inp)	rw_assert(&(inp)->inp_lock, RA_WLOCKED)
597 #define	INP_UNLOCK_ASSERT(inp)	rw_assert(&(inp)->inp_lock, RA_UNLOCKED)
598 
599 /*
600  * These locking functions are for inpcb consumers outside of sys/netinet,
601  * more specifically, they were added for the benefit of TOE drivers. The
602  * macros are reserved for use by the stack.
603  */
604 void inp_wlock(struct inpcb *);
605 void inp_wunlock(struct inpcb *);
606 void inp_rlock(struct inpcb *);
607 void inp_runlock(struct inpcb *);
608 
609 #ifdef INVARIANT_SUPPORT
610 void inp_lock_assert(struct inpcb *);
611 void inp_unlock_assert(struct inpcb *);
612 #else
613 #define	inp_lock_assert(inp)	do {} while (0)
614 #define	inp_unlock_assert(inp)	do {} while (0)
615 #endif
616 
617 void	inp_apply_all(void (*func)(struct inpcb *, void *), void *arg);
618 int 	inp_ip_tos_get(const struct inpcb *inp);
619 void 	inp_ip_tos_set(struct inpcb *inp, int val);
620 struct socket *
621 	inp_inpcbtosocket(struct inpcb *inp);
622 struct tcpcb *
623 	inp_inpcbtotcpcb(struct inpcb *inp);
624 void 	inp_4tuple_get(struct inpcb *inp, uint32_t *laddr, uint16_t *lp,
625 		uint32_t *faddr, uint16_t *fp);
626 int	inp_so_options(const struct inpcb *inp);
627 
628 #endif /* _KERNEL */
629 
630 #define INP_INFO_LOCK_INIT(ipi, d) \
631 	mtx_init(&(ipi)->ipi_lock, (d), NULL, MTX_DEF| MTX_RECURSE)
632 #define INP_INFO_LOCK_DESTROY(ipi)  mtx_destroy(&(ipi)->ipi_lock)
633 #define INP_INFO_RLOCK_ET(ipi, et)	NET_EPOCH_ENTER_ET((et))
634 #define INP_INFO_WLOCK(ipi) mtx_lock(&(ipi)->ipi_lock)
635 #define INP_INFO_TRY_WLOCK(ipi)	mtx_trylock(&(ipi)->ipi_lock)
636 #define INP_INFO_WLOCKED(ipi)	mtx_owned(&(ipi)->ipi_lock)
637 #define INP_INFO_RUNLOCK_ET(ipi, et)	NET_EPOCH_EXIT_ET((et))
638 #define INP_INFO_RUNLOCK_TP(ipi, tp)	NET_EPOCH_EXIT_ET(*(tp)->t_inpcb->inp_et)
639 #define INP_INFO_WUNLOCK(ipi)	mtx_unlock(&(ipi)->ipi_lock)
640 #define	INP_INFO_LOCK_ASSERT(ipi)	MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(ipi)->ipi_lock))
641 #define INP_INFO_RLOCK_ASSERT(ipi)	MPASS(in_epoch(net_epoch_preempt))
642 #define INP_INFO_WLOCK_ASSERT(ipi)	mtx_assert(&(ipi)->ipi_lock, MA_OWNED)
643 #define INP_INFO_UNLOCK_ASSERT(ipi)	MPASS(!in_epoch(net_epoch_preempt) && !mtx_owned(&(ipi)->ipi_lock))
644 
645 #define INP_LIST_LOCK_INIT(ipi, d) \
646         rw_init_flags(&(ipi)->ipi_list_lock, (d), 0)
647 #define INP_LIST_LOCK_DESTROY(ipi)  rw_destroy(&(ipi)->ipi_list_lock)
648 #define INP_LIST_RLOCK(ipi)     rw_rlock(&(ipi)->ipi_list_lock)
649 #define INP_LIST_WLOCK(ipi)     rw_wlock(&(ipi)->ipi_list_lock)
650 #define INP_LIST_TRY_RLOCK(ipi) rw_try_rlock(&(ipi)->ipi_list_lock)
651 #define INP_LIST_TRY_WLOCK(ipi) rw_try_wlock(&(ipi)->ipi_list_lock)
652 #define INP_LIST_TRY_UPGRADE(ipi)       rw_try_upgrade(&(ipi)->ipi_list_lock)
653 #define INP_LIST_RUNLOCK(ipi)   rw_runlock(&(ipi)->ipi_list_lock)
654 #define INP_LIST_WUNLOCK(ipi)   rw_wunlock(&(ipi)->ipi_list_lock)
655 #define INP_LIST_LOCK_ASSERT(ipi) \
656 	rw_assert(&(ipi)->ipi_list_lock, RA_LOCKED)
657 #define INP_LIST_RLOCK_ASSERT(ipi) \
658 	rw_assert(&(ipi)->ipi_list_lock, RA_RLOCKED)
659 #define INP_LIST_WLOCK_ASSERT(ipi) \
660 	rw_assert(&(ipi)->ipi_list_lock, RA_WLOCKED)
661 #define INP_LIST_UNLOCK_ASSERT(ipi) \
662 	rw_assert(&(ipi)->ipi_list_lock, RA_UNLOCKED)
663 
664 #define	INP_HASH_LOCK_INIT(ipi, d) mtx_init(&(ipi)->ipi_hash_lock, (d), NULL, MTX_DEF)
665 #define	INP_HASH_LOCK_DESTROY(ipi)	mtx_destroy(&(ipi)->ipi_hash_lock)
666 #define	INP_HASH_RLOCK(ipi)		struct epoch_tracker inp_hash_et; epoch_enter_preempt(net_epoch_preempt, &inp_hash_et)
667 #define	INP_HASH_RLOCK_ET(ipi, et)		epoch_enter_preempt(net_epoch_preempt, &(et))
668 #define	INP_HASH_WLOCK(ipi)		mtx_lock(&(ipi)->ipi_hash_lock)
669 #define	INP_HASH_RUNLOCK(ipi)		NET_EPOCH_EXIT_ET(inp_hash_et)
670 #define	INP_HASH_RUNLOCK_ET(ipi, et)		NET_EPOCH_EXIT_ET((et))
671 #define	INP_HASH_WUNLOCK(ipi)		mtx_unlock(&(ipi)->ipi_hash_lock)
672 #define	INP_HASH_LOCK_ASSERT(ipi)	MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(ipi)->ipi_hash_lock))
673 #define	INP_HASH_WLOCK_ASSERT(ipi)	mtx_assert(&(ipi)->ipi_hash_lock, MA_OWNED);
674 
675 #define	INP_GROUP_LOCK_INIT(ipg, d)	mtx_init(&(ipg)->ipg_lock, (d), NULL, \
676 					    MTX_DEF | MTX_DUPOK)
677 #define	INP_GROUP_LOCK_DESTROY(ipg)	mtx_destroy(&(ipg)->ipg_lock)
678 
679 #define	INP_GROUP_LOCK(ipg)		mtx_lock(&(ipg)->ipg_lock)
680 #define	INP_GROUP_LOCK_ASSERT(ipg)	mtx_assert(&(ipg)->ipg_lock, MA_OWNED)
681 #define	INP_GROUP_UNLOCK(ipg)		mtx_unlock(&(ipg)->ipg_lock)
682 
683 #define INP_PCBHASH(faddr, lport, fport, mask) \
684 	(((faddr) ^ ((faddr) >> 16) ^ ntohs((lport) ^ (fport))) & (mask))
685 #define INP_PCBPORTHASH(lport, mask) \
686 	(ntohs((lport)) & (mask))
687 #define	INP_PCBLBGROUP_PORTHASH(lport, mask) \
688 	(ntohs((lport)) & (mask))
689 #define	INP_PCBLBGROUP_PKTHASH(faddr, lport, fport) \
690 	((faddr) ^ ((faddr) >> 16) ^ ntohs((lport) ^ (fport)))
691 #define	INP6_PCBHASHKEY(faddr)	((faddr)->s6_addr32[3])
692 
693 /*
694  * Flags for inp_vflags -- historically version flags only
695  */
696 #define	INP_IPV4	0x1
697 #define	INP_IPV6	0x2
698 #define	INP_IPV6PROTO	0x4		/* opened under IPv6 protocol */
699 
700 /*
701  * Flags for inp_flags.
702  */
703 #define	INP_RECVOPTS		0x00000001 /* receive incoming IP options */
704 #define	INP_RECVRETOPTS		0x00000002 /* receive IP options for reply */
705 #define	INP_RECVDSTADDR		0x00000004 /* receive IP dst address */
706 #define	INP_HDRINCL		0x00000008 /* user supplies entire IP header */
707 #define	INP_HIGHPORT		0x00000010 /* user wants "high" port binding */
708 #define	INP_LOWPORT		0x00000020 /* user wants "low" port binding */
709 #define	INP_ANONPORT		0x00000040 /* port chosen for user */
710 #define	INP_RECVIF		0x00000080 /* receive incoming interface */
711 #define	INP_MTUDISC		0x00000100 /* user can do MTU discovery */
712 				   	   /* 0x000200 unused: was INP_FAITH */
713 #define	INP_RECVTTL		0x00000400 /* receive incoming IP TTL */
714 #define	INP_DONTFRAG		0x00000800 /* don't fragment packet */
715 #define	INP_BINDANY		0x00001000 /* allow bind to any address */
716 #define	INP_INHASHLIST		0x00002000 /* in_pcbinshash() has been called */
717 #define	INP_RECVTOS		0x00004000 /* receive incoming IP TOS */
718 #define	IN6P_IPV6_V6ONLY	0x00008000 /* restrict AF_INET6 socket for v6 */
719 #define	IN6P_PKTINFO		0x00010000 /* receive IP6 dst and I/F */
720 #define	IN6P_HOPLIMIT		0x00020000 /* receive hoplimit */
721 #define	IN6P_HOPOPTS		0x00040000 /* receive hop-by-hop options */
722 #define	IN6P_DSTOPTS		0x00080000 /* receive dst options after rthdr */
723 #define	IN6P_RTHDR		0x00100000 /* receive routing header */
724 #define	IN6P_RTHDRDSTOPTS	0x00200000 /* receive dstoptions before rthdr */
725 #define	IN6P_TCLASS		0x00400000 /* receive traffic class value */
726 #define	IN6P_AUTOFLOWLABEL	0x00800000 /* attach flowlabel automatically */
727 #define	INP_TIMEWAIT		0x01000000 /* in TIMEWAIT, ppcb is tcptw */
728 #define	INP_ONESBCAST		0x02000000 /* send all-ones broadcast */
729 #define	INP_DROPPED		0x04000000 /* protocol drop flag */
730 #define	INP_SOCKREF		0x08000000 /* strong socket reference */
731 #define	INP_RESERVED_0          0x10000000 /* reserved field */
732 #define	INP_RESERVED_1          0x20000000 /* reserved field */
733 #define	IN6P_RFC2292		0x40000000 /* used RFC2292 API on the socket */
734 #define	IN6P_MTU		0x80000000 /* receive path MTU */
735 
736 #define	INP_CONTROLOPTS		(INP_RECVOPTS|INP_RECVRETOPTS|INP_RECVDSTADDR|\
737 				 INP_RECVIF|INP_RECVTTL|INP_RECVTOS|\
738 				 IN6P_PKTINFO|IN6P_HOPLIMIT|IN6P_HOPOPTS|\
739 				 IN6P_DSTOPTS|IN6P_RTHDR|IN6P_RTHDRDSTOPTS|\
740 				 IN6P_TCLASS|IN6P_AUTOFLOWLABEL|IN6P_RFC2292|\
741 				 IN6P_MTU)
742 
743 /*
744  * Flags for inp_flags2.
745  */
746 #define	INP_LLE_VALID		0x00000001 /* cached lle is valid */
747 #define	INP_RT_VALID		0x00000002 /* cached rtentry is valid */
748 #define	INP_PCBGROUPWILD	0x00000004 /* in pcbgroup wildcard list */
749 #define	INP_REUSEPORT		0x00000008 /* SO_REUSEPORT option is set */
750 #define	INP_FREED		0x00000010 /* inp itself is not valid */
751 #define	INP_REUSEADDR		0x00000020 /* SO_REUSEADDR option is set */
752 #define	INP_BINDMULTI		0x00000040 /* IP_BINDMULTI option is set */
753 #define	INP_RSS_BUCKET_SET	0x00000080 /* IP_RSS_LISTEN_BUCKET is set */
754 #define	INP_RECVFLOWID		0x00000100 /* populate recv datagram with flow info */
755 #define	INP_RECVRSSBUCKETID	0x00000200 /* populate recv datagram with bucket id */
756 #define	INP_RATE_LIMIT_CHANGED	0x00000400 /* rate limit needs attention */
757 #define	INP_ORIGDSTADDR		0x00000800 /* receive IP dst address/port */
758 #define INP_CANNOT_DO_ECN	0x00001000 /* The stack does not do ECN */
759 #define	INP_REUSEPORT_LB	0x00002000 /* SO_REUSEPORT_LB option is set */
760 
761 /*
762  * Flags passed to in_pcblookup*() functions.
763  */
764 #define	INPLOOKUP_WILDCARD	0x00000001	/* Allow wildcard sockets. */
765 #define	INPLOOKUP_RLOCKPCB	0x00000002	/* Return inpcb read-locked. */
766 #define	INPLOOKUP_WLOCKPCB	0x00000004	/* Return inpcb write-locked. */
767 
768 #define	INPLOOKUP_MASK	(INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB | \
769 			    INPLOOKUP_WLOCKPCB)
770 
771 #define	sotoinpcb(so)	((struct inpcb *)(so)->so_pcb)
772 #define	sotoin6pcb(so)	sotoinpcb(so) /* for KAME src sync over BSD*'s */
773 
774 #define	INP_SOCKAF(so) so->so_proto->pr_domain->dom_family
775 
776 #define	INP_CHECK_SOCKAF(so, af)	(INP_SOCKAF(so) == af)
777 
778 /*
779  * Constants for pcbinfo.ipi_hashfields.
780  */
781 #define	IPI_HASHFIELDS_NONE	0
782 #define	IPI_HASHFIELDS_2TUPLE	1
783 #define	IPI_HASHFIELDS_4TUPLE	2
784 
785 #ifdef _KERNEL
786 VNET_DECLARE(int, ipport_reservedhigh);
787 VNET_DECLARE(int, ipport_reservedlow);
788 VNET_DECLARE(int, ipport_lowfirstauto);
789 VNET_DECLARE(int, ipport_lowlastauto);
790 VNET_DECLARE(int, ipport_firstauto);
791 VNET_DECLARE(int, ipport_lastauto);
792 VNET_DECLARE(int, ipport_hifirstauto);
793 VNET_DECLARE(int, ipport_hilastauto);
794 VNET_DECLARE(int, ipport_randomized);
795 VNET_DECLARE(int, ipport_randomcps);
796 VNET_DECLARE(int, ipport_randomtime);
797 VNET_DECLARE(int, ipport_stoprandom);
798 VNET_DECLARE(int, ipport_tcpallocs);
799 
800 #define	V_ipport_reservedhigh	VNET(ipport_reservedhigh)
801 #define	V_ipport_reservedlow	VNET(ipport_reservedlow)
802 #define	V_ipport_lowfirstauto	VNET(ipport_lowfirstauto)
803 #define	V_ipport_lowlastauto	VNET(ipport_lowlastauto)
804 #define	V_ipport_firstauto	VNET(ipport_firstauto)
805 #define	V_ipport_lastauto	VNET(ipport_lastauto)
806 #define	V_ipport_hifirstauto	VNET(ipport_hifirstauto)
807 #define	V_ipport_hilastauto	VNET(ipport_hilastauto)
808 #define	V_ipport_randomized	VNET(ipport_randomized)
809 #define	V_ipport_randomcps	VNET(ipport_randomcps)
810 #define	V_ipport_randomtime	VNET(ipport_randomtime)
811 #define	V_ipport_stoprandom	VNET(ipport_stoprandom)
812 #define	V_ipport_tcpallocs	VNET(ipport_tcpallocs)
813 
814 void	in_pcbinfo_destroy(struct inpcbinfo *);
815 void	in_pcbinfo_init(struct inpcbinfo *, const char *, struct inpcbhead *,
816 	    int, int, char *, uma_init, u_int);
817 
818 int	in_pcbbind_check_bindmulti(const struct inpcb *ni,
819 	    const struct inpcb *oi);
820 
821 struct inpcbgroup *
822 	in_pcbgroup_byhash(struct inpcbinfo *, u_int, uint32_t);
823 struct inpcbgroup *
824 	in_pcbgroup_byinpcb(struct inpcb *);
825 struct inpcbgroup *
826 	in_pcbgroup_bytuple(struct inpcbinfo *, struct in_addr, u_short,
827 	    struct in_addr, u_short);
828 void	in_pcbgroup_destroy(struct inpcbinfo *);
829 int	in_pcbgroup_enabled(struct inpcbinfo *);
830 void	in_pcbgroup_init(struct inpcbinfo *, u_int, int);
831 void	in_pcbgroup_remove(struct inpcb *);
832 void	in_pcbgroup_update(struct inpcb *);
833 void	in_pcbgroup_update_mbuf(struct inpcb *, struct mbuf *);
834 
835 void	in_pcbpurgeif0(struct inpcbinfo *, struct ifnet *);
836 int	in_pcballoc(struct socket *, struct inpcbinfo *);
837 int	in_pcbbind(struct inpcb *, struct sockaddr *, struct ucred *);
838 int	in_pcb_lport(struct inpcb *, struct in_addr *, u_short *,
839 	    struct ucred *, int);
840 int	in_pcbbind_setup(struct inpcb *, struct sockaddr *, in_addr_t *,
841 	    u_short *, struct ucred *);
842 int	in_pcbconnect(struct inpcb *, struct sockaddr *, struct ucred *);
843 int	in_pcbconnect_mbuf(struct inpcb *, struct sockaddr *, struct ucred *,
844 	    struct mbuf *);
845 int	in_pcbconnect_setup(struct inpcb *, struct sockaddr *, in_addr_t *,
846 	    u_short *, in_addr_t *, u_short *, struct inpcb **,
847 	    struct ucred *);
848 void	in_pcbdetach(struct inpcb *);
849 void	in_pcbdisconnect(struct inpcb *);
850 void	in_pcbdrop(struct inpcb *);
851 void	in_pcbfree(struct inpcb *);
852 int	in_pcbinshash(struct inpcb *);
853 int	in_pcbinshash_nopcbgroup(struct inpcb *);
854 int	in_pcbladdr(struct inpcb *, struct in_addr *, struct in_addr *,
855 	    struct ucred *);
856 struct inpcb *
857 	in_pcblookup_local(struct inpcbinfo *,
858 	    struct in_addr, u_short, int, struct ucred *);
859 struct inpcb *
860 	in_pcblookup(struct inpcbinfo *, struct in_addr, u_int,
861 	    struct in_addr, u_int, int, struct ifnet *);
862 struct inpcb *
863 	in_pcblookup_mbuf(struct inpcbinfo *, struct in_addr, u_int,
864 	    struct in_addr, u_int, int, struct ifnet *, struct mbuf *);
865 void	in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr,
866 	    int, struct inpcb *(*)(struct inpcb *, int));
867 void	in_pcbref(struct inpcb *);
868 void	in_pcbrehash(struct inpcb *);
869 void	in_pcbrehash_mbuf(struct inpcb *, struct mbuf *);
870 int	in_pcbrele(struct inpcb *);
871 int	in_pcbrele_rlocked(struct inpcb *);
872 int	in_pcbrele_wlocked(struct inpcb *);
873 void	in_pcblist_rele_rlocked(epoch_context_t ctx);
874 void	in_losing(struct inpcb *);
875 void	in_pcbsetsolabel(struct socket *so);
876 int	in_getpeeraddr(struct socket *so, struct sockaddr **nam);
877 int	in_getsockaddr(struct socket *so, struct sockaddr **nam);
878 struct sockaddr *
879 	in_sockaddr(in_port_t port, struct in_addr *addr);
880 void	in_pcbsosetlabel(struct socket *so);
881 #ifdef RATELIMIT
882 int	in_pcbattach_txrtlmt(struct inpcb *, struct ifnet *, uint32_t, uint32_t, uint32_t);
883 void	in_pcbdetach_txrtlmt(struct inpcb *);
884 int	in_pcbmodify_txrtlmt(struct inpcb *, uint32_t);
885 int	in_pcbquery_txrtlmt(struct inpcb *, uint32_t *);
886 int	in_pcbquery_txrlevel(struct inpcb *, uint32_t *);
887 void	in_pcboutput_txrtlmt(struct inpcb *, struct ifnet *, struct mbuf *);
888 void	in_pcboutput_eagain(struct inpcb *);
889 #endif
890 #endif /* _KERNEL */
891 
892 #endif /* !_NETINET_IN_PCB_H_ */
893