1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1990, 1993 5 * The Regents of the University of California. 6 * Copyright (c) 2010-2011 Juniper Networks, Inc. 7 * All rights reserved. 8 * 9 * Portions of this software were developed by Robert N. M. Watson under 10 * contract to Juniper Networks, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)in_pcb.h 8.1 (Berkeley) 6/10/93 37 * $FreeBSD$ 38 */ 39 40 #ifndef _NETINET_IN_PCB_H_ 41 #define _NETINET_IN_PCB_H_ 42 43 #include <sys/queue.h> 44 #include <sys/epoch.h> 45 #include <sys/_lock.h> 46 #include <sys/_mutex.h> 47 #include <sys/_rwlock.h> 48 #include <net/route.h> 49 50 #ifdef _KERNEL 51 #include <sys/lock.h> 52 #include <sys/rwlock.h> 53 #include <net/vnet.h> 54 #include <vm/uma.h> 55 #endif 56 #include <sys/ck.h> 57 58 /* 59 * struct inpcb is the common protocol control block structure used in most 60 * IP transport protocols. 61 * 62 * Pointers to local and foreign host table entries, local and foreign socket 63 * numbers, and pointers up (to a socket structure) and down (to a 64 * protocol-specific control block) are stored here. 65 */ 66 CK_LIST_HEAD(inpcbhead, inpcb); 67 CK_LIST_HEAD(inpcbporthead, inpcbport); 68 CK_LIST_HEAD(inpcblbgrouphead, inpcblbgroup); 69 typedef uint64_t inp_gen_t; 70 71 /* 72 * PCB with AF_INET6 null bind'ed laddr can receive AF_INET input packet. 73 * So, AF_INET6 null laddr is also used as AF_INET null laddr, by utilizing 74 * the following structure. 75 */ 76 struct in_addr_4in6 { 77 u_int32_t ia46_pad32[3]; 78 struct in_addr ia46_addr4; 79 }; 80 81 union in_dependaddr { 82 struct in_addr_4in6 id46_addr; 83 struct in6_addr id6_addr; 84 }; 85 86 /* 87 * NOTE: ipv6 addrs should be 64-bit aligned, per RFC 2553. in_conninfo has 88 * some extra padding to accomplish this. 89 * NOTE 2: tcp_syncache.c uses first 5 32-bit words, which identify fport, 90 * lport, faddr to generate hash, so these fields shouldn't be moved. 91 */ 92 struct in_endpoints { 93 u_int16_t ie_fport; /* foreign port */ 94 u_int16_t ie_lport; /* local port */ 95 /* protocol dependent part, local and foreign addr */ 96 union in_dependaddr ie_dependfaddr; /* foreign host table entry */ 97 union in_dependaddr ie_dependladdr; /* local host table entry */ 98 #define ie_faddr ie_dependfaddr.id46_addr.ia46_addr4 99 #define ie_laddr ie_dependladdr.id46_addr.ia46_addr4 100 #define ie6_faddr ie_dependfaddr.id6_addr 101 #define ie6_laddr ie_dependladdr.id6_addr 102 u_int32_t ie6_zoneid; /* scope zone id */ 103 }; 104 105 /* 106 * XXX The defines for inc_* are hacks and should be changed to direct 107 * references. 108 */ 109 struct in_conninfo { 110 u_int8_t inc_flags; 111 u_int8_t inc_len; 112 u_int16_t inc_fibnum; /* XXX was pad, 16 bits is plenty */ 113 /* protocol dependent part */ 114 struct in_endpoints inc_ie; 115 }; 116 117 /* 118 * Flags for inc_flags. 119 */ 120 #define INC_ISIPV6 0x01 121 #define INC_IPV6MINMTU 0x02 122 123 #define inc_fport inc_ie.ie_fport 124 #define inc_lport inc_ie.ie_lport 125 #define inc_faddr inc_ie.ie_faddr 126 #define inc_laddr inc_ie.ie_laddr 127 #define inc6_faddr inc_ie.ie6_faddr 128 #define inc6_laddr inc_ie.ie6_laddr 129 #define inc6_zoneid inc_ie.ie6_zoneid 130 131 #if defined(_KERNEL) || defined(_WANT_INPCB) 132 /* 133 * struct inpcb captures the network layer state for TCP, UDP, and raw IPv4 and 134 * IPv6 sockets. In the case of TCP and UDP, further per-connection state is 135 * hung off of inp_ppcb most of the time. Almost all fields of struct inpcb 136 * are static after creation or protected by a per-inpcb rwlock, inp_lock. A 137 * few fields are protected by multiple locks as indicated in the locking notes 138 * below. For these fields, all of the listed locks must be write-locked for 139 * any modifications. However, these fields can be safely read while any one of 140 * the listed locks are read-locked. This model can permit greater concurrency 141 * for read operations. For example, connections can be looked up while only 142 * holding a read lock on the global pcblist lock. This is important for 143 * performance when attempting to find the connection for a packet given its IP 144 * and port tuple. 145 * 146 * One noteworthy exception is that the global pcbinfo lock follows a different 147 * set of rules in relation to the inp_list field. Rather than being 148 * write-locked for modifications and read-locked for list iterations, it must 149 * be read-locked during modifications and write-locked during list iterations. 150 * This ensures that the relatively rare global list iterations safely walk a 151 * stable snapshot of connections while allowing more common list modifications 152 * to safely grab the pcblist lock just while adding or removing a connection 153 * from the global list. 154 * 155 * Key: 156 * (b) - Protected by the hpts lock. 157 * (c) - Constant after initialization 158 * (e) - Protected by the net_epoch_prempt epoch 159 * (g) - Protected by the pcbgroup lock 160 * (i) - Protected by the inpcb lock 161 * (p) - Protected by the pcbinfo lock for the inpcb 162 * (l) - Protected by the pcblist lock for the inpcb 163 * (h) - Protected by the pcbhash lock for the inpcb 164 * (s) - Protected by another subsystem's locks 165 * (x) - Undefined locking 166 * 167 * Notes on the tcp_hpts: 168 * 169 * First Hpts lock order is 170 * 1) INP_WLOCK() 171 * 2) HPTS_LOCK() i.e. hpts->pmtx 172 * 173 * To insert a TCB on the hpts you *must* be holding the INP_WLOCK(). 174 * You may check the inp->inp_in_hpts flag without the hpts lock. 175 * The hpts is the only one that will clear this flag holding 176 * only the hpts lock. This means that in your tcp_output() 177 * routine when you test for the inp_in_hpts flag to be 1 178 * it may be transitioning to 0 (by the hpts). 179 * That's ok since that will just mean an extra call to tcp_output 180 * that most likely will find the call you executed 181 * (when the mis-match occured) will have put the TCB back 182 * on the hpts and it will return. If your 183 * call did not add the inp back to the hpts then you will either 184 * over-send or the cwnd will block you from sending more. 185 * 186 * Note you should also be holding the INP_WLOCK() when you 187 * call the remove from the hpts as well. Though usually 188 * you are either doing this from a timer, where you need and have 189 * the INP_WLOCK() or from destroying your TCB where again 190 * you should already have the INP_WLOCK(). 191 * 192 * The inp_hpts_cpu, inp_hpts_cpu_set, inp_input_cpu and 193 * inp_input_cpu_set fields are controlled completely by 194 * the hpts. Do not ever set these. The inp_hpts_cpu_set 195 * and inp_input_cpu_set fields indicate if the hpts has 196 * setup the respective cpu field. It is advised if this 197 * field is 0, to enqueue the packet with the appropriate 198 * hpts_immediate() call. If the _set field is 1, then 199 * you may compare the inp_*_cpu field to the curcpu and 200 * may want to again insert onto the hpts if these fields 201 * are not equal (i.e. you are not on the expected CPU). 202 * 203 * A note on inp_hpts_calls and inp_input_calls, these 204 * flags are set when the hpts calls either the output 205 * or do_segment routines respectively. If the routine 206 * being called wants to use this, then it needs to 207 * clear the flag before returning. The hpts will not 208 * clear the flag. The flags can be used to tell if 209 * the hpts is the function calling the respective 210 * routine. 211 * 212 * A few other notes: 213 * 214 * When a read lock is held, stability of the field is guaranteed; to write 215 * to a field, a write lock must generally be held. 216 * 217 * netinet/netinet6-layer code should not assume that the inp_socket pointer 218 * is safe to dereference without inp_lock being held, even for protocols 219 * other than TCP (where the inpcb persists during TIMEWAIT even after the 220 * socket has been freed), or there may be close(2)-related races. 221 * 222 * The inp_vflag field is overloaded, and would otherwise ideally be (c). 223 * 224 * TODO: Currently only the TCP stack is leveraging the global pcbinfo lock 225 * read-lock usage during modification, this model can be applied to other 226 * protocols (especially SCTP). 227 */ 228 struct icmp6_filter; 229 struct inpcbpolicy; 230 struct m_snd_tag; 231 struct inpcb { 232 /* Cache line #1 (amd64) */ 233 CK_LIST_ENTRY(inpcb) inp_hash; /* [w](h/i) [r](e/i) hash list */ 234 CK_LIST_ENTRY(inpcb) inp_pcbgrouphash; /* (g/i) hash list */ 235 struct rwlock inp_lock; 236 /* Cache line #2 (amd64) */ 237 #define inp_start_zero inp_hpts 238 #define inp_zero_size (sizeof(struct inpcb) - \ 239 offsetof(struct inpcb, inp_start_zero)) 240 TAILQ_ENTRY(inpcb) inp_hpts; /* pacing out queue next lock(b) */ 241 242 uint32_t inp_hpts_request; /* Current hpts request, zero if 243 * fits in the pacing window (i&b). */ 244 /* 245 * Note the next fields are protected by a 246 * different lock (hpts-lock). This means that 247 * they must correspond in size to the smallest 248 * protectable bit field (uint8_t on x86, and 249 * other platfomrs potentially uint32_t?). Also 250 * since CPU switches can occur at different times the two 251 * fields can *not* be collapsed into a signal bit field. 252 */ 253 #if defined(__amd64__) || defined(__i386__) 254 volatile uint8_t inp_in_hpts; /* on output hpts (lock b) */ 255 volatile uint8_t inp_in_input; /* on input hpts (lock b) */ 256 #else 257 volatile uint32_t inp_in_hpts; /* on output hpts (lock b) */ 258 volatile uint32_t inp_in_input; /* on input hpts (lock b) */ 259 #endif 260 volatile uint16_t inp_hpts_cpu; /* Lock (i) */ 261 volatile uint16_t inp_irq_cpu; /* Set by LRO in behalf of or the driver */ 262 u_int inp_refcount; /* (i) refcount */ 263 int inp_flags; /* (i) generic IP/datagram flags */ 264 int inp_flags2; /* (i) generic IP/datagram flags #2*/ 265 volatile uint16_t inp_input_cpu; /* Lock (i) */ 266 volatile uint8_t inp_hpts_cpu_set :1, /* on output hpts (i) */ 267 inp_input_cpu_set : 1, /* on input hpts (i) */ 268 inp_hpts_calls :1, /* (i) from output hpts */ 269 inp_input_calls :1, /* (i) from input hpts */ 270 inp_irq_cpu_set :1, /* (i) from LRO/Driver */ 271 inp_spare_bits2 : 3; 272 uint8_t inp_numa_domain; /* numa domain */ 273 void *inp_ppcb; /* (i) pointer to per-protocol pcb */ 274 struct socket *inp_socket; /* (i) back pointer to socket */ 275 uint32_t inp_hptsslot; /* Hpts wheel slot this tcb is Lock(i&b) */ 276 uint32_t inp_hpts_drop_reas; /* reason we are dropping the PCB (lock i&b) */ 277 TAILQ_ENTRY(inpcb) inp_input; /* pacing in queue next lock(b) */ 278 struct inpcbinfo *inp_pcbinfo; /* (c) PCB list info */ 279 struct inpcbgroup *inp_pcbgroup; /* (g/i) PCB group list */ 280 CK_LIST_ENTRY(inpcb) inp_pcbgroup_wild; /* (g/i/h) group wildcard entry */ 281 struct ucred *inp_cred; /* (c) cache of socket cred */ 282 u_int32_t inp_flow; /* (i) IPv6 flow information */ 283 u_char inp_vflag; /* (i) IP version flag (v4/v6) */ 284 u_char inp_ip_ttl; /* (i) time to live proto */ 285 u_char inp_ip_p; /* (c) protocol proto */ 286 u_char inp_ip_minttl; /* (i) minimum TTL or drop */ 287 uint32_t inp_flowid; /* (x) flow id / queue id */ 288 struct m_snd_tag *inp_snd_tag; /* (i) send tag for outgoing mbufs */ 289 uint32_t inp_flowtype; /* (x) M_HASHTYPE value */ 290 uint32_t inp_rss_listen_bucket; /* (x) overridden RSS listen bucket */ 291 292 /* Local and foreign ports, local and foreign addr. */ 293 struct in_conninfo inp_inc; /* (i) list for PCB's local port */ 294 295 /* MAC and IPSEC policy information. */ 296 struct label *inp_label; /* (i) MAC label */ 297 struct inpcbpolicy *inp_sp; /* (s) for IPSEC */ 298 299 /* Protocol-dependent part; options. */ 300 struct { 301 u_char inp_ip_tos; /* (i) type of service proto */ 302 struct mbuf *inp_options; /* (i) IP options */ 303 struct ip_moptions *inp_moptions; /* (i) mcast options */ 304 }; 305 struct { 306 /* (i) IP options */ 307 struct mbuf *in6p_options; 308 /* (i) IP6 options for outgoing packets */ 309 struct ip6_pktopts *in6p_outputopts; 310 /* (i) IP multicast options */ 311 struct ip6_moptions *in6p_moptions; 312 /* (i) ICMPv6 code type filter */ 313 struct icmp6_filter *in6p_icmp6filt; 314 /* (i) IPV6_CHECKSUM setsockopt */ 315 int in6p_cksum; 316 short in6p_hops; 317 }; 318 CK_LIST_ENTRY(inpcb) inp_portlist; /* (i/h) */ 319 struct inpcbport *inp_phd; /* (i/h) head of this list */ 320 inp_gen_t inp_gencnt; /* (c) generation count */ 321 void *spare_ptr; /* Spare pointer. */ 322 rt_gen_t inp_rt_cookie; /* generation for route entry */ 323 union { /* cached L3 information */ 324 struct route inp_route; 325 struct route_in6 inp_route6; 326 }; 327 CK_LIST_ENTRY(inpcb) inp_list; /* (p/l) list for all PCBs for proto */ 328 /* (e[r]) for list iteration */ 329 /* (p[w]/l) for addition/removal */ 330 struct epoch_context inp_epoch_ctx; 331 }; 332 #endif /* _KERNEL */ 333 334 #define inp_fport inp_inc.inc_fport 335 #define inp_lport inp_inc.inc_lport 336 #define inp_faddr inp_inc.inc_faddr 337 #define inp_laddr inp_inc.inc_laddr 338 339 #define in6p_faddr inp_inc.inc6_faddr 340 #define in6p_laddr inp_inc.inc6_laddr 341 #define in6p_zoneid inp_inc.inc6_zoneid 342 343 #define inp_vnet inp_pcbinfo->ipi_vnet 344 345 /* 346 * The range of the generation count, as used in this implementation, is 9e19. 347 * We would have to create 300 billion connections per second for this number 348 * to roll over in a year. This seems sufficiently unlikely that we simply 349 * don't concern ourselves with that possibility. 350 */ 351 352 /* 353 * Interface exported to userland by various protocols which use inpcbs. Hack 354 * alert -- only define if struct xsocket is in scope. 355 * Fields prefixed with "xi_" are unique to this structure, and the rest 356 * match fields in the struct inpcb, to ease coding and porting. 357 * 358 * Legend: 359 * (s) - used by userland utilities in src 360 * (p) - used by utilities in ports 361 * (3) - is known to be used by third party software not in ports 362 * (n) - no known usage 363 */ 364 #ifdef _SYS_SOCKETVAR_H_ 365 struct xinpcb { 366 ksize_t xi_len; /* length of this structure */ 367 struct xsocket xi_socket; /* (s,p) */ 368 struct in_conninfo inp_inc; /* (s,p) */ 369 uint64_t inp_gencnt; /* (s,p) */ 370 kvaddr_t inp_ppcb; /* (s) netstat(1) */ 371 int64_t inp_spare64[4]; 372 uint32_t inp_flow; /* (s) */ 373 uint32_t inp_flowid; /* (s) */ 374 uint32_t inp_flowtype; /* (s) */ 375 int32_t inp_flags; /* (s,p) */ 376 int32_t inp_flags2; /* (s) */ 377 int32_t inp_rss_listen_bucket; /* (n) */ 378 int32_t in6p_cksum; /* (n) */ 379 int32_t inp_spare32[4]; 380 uint16_t in6p_hops; /* (n) */ 381 uint8_t inp_ip_tos; /* (n) */ 382 int8_t pad8; 383 uint8_t inp_vflag; /* (s,p) */ 384 uint8_t inp_ip_ttl; /* (n) */ 385 uint8_t inp_ip_p; /* (n) */ 386 uint8_t inp_ip_minttl; /* (n) */ 387 int8_t inp_spare8[4]; 388 } __aligned(8); 389 390 struct xinpgen { 391 ksize_t xig_len; /* length of this structure */ 392 u_int xig_count; /* number of PCBs at this time */ 393 uint32_t _xig_spare32; 394 inp_gen_t xig_gen; /* generation count at this time */ 395 so_gen_t xig_sogen; /* socket generation count this time */ 396 uint64_t _xig_spare64[4]; 397 } __aligned(8); 398 #ifdef _KERNEL 399 void in_pcbtoxinpcb(const struct inpcb *, struct xinpcb *); 400 #endif 401 #endif /* _SYS_SOCKETVAR_H_ */ 402 403 struct inpcbport { 404 struct epoch_context phd_epoch_ctx; 405 CK_LIST_ENTRY(inpcbport) phd_hash; 406 struct inpcbhead phd_pcblist; 407 u_short phd_port; 408 }; 409 410 struct in_pcblist { 411 int il_count; 412 struct epoch_context il_epoch_ctx; 413 struct inpcbinfo *il_pcbinfo; 414 struct inpcb *il_inp_list[0]; 415 }; 416 417 /*- 418 * Global data structure for each high-level protocol (UDP, TCP, ...) in both 419 * IPv4 and IPv6. Holds inpcb lists and information for managing them. 420 * 421 * Each pcbinfo is protected by three locks: ipi_lock, ipi_hash_lock and 422 * ipi_list_lock: 423 * - ipi_lock covering the global pcb list stability during loop iteration, 424 * - ipi_hash_lock covering the hashed lookup tables, 425 * - ipi_list_lock covering mutable global fields (such as the global 426 * pcb list) 427 * 428 * The lock order is: 429 * 430 * ipi_lock (before) 431 * inpcb locks (before) 432 * ipi_list locks (before) 433 * {ipi_hash_lock, pcbgroup locks} 434 * 435 * Locking key: 436 * 437 * (c) Constant or nearly constant after initialisation 438 * (e) - Protected by the net_epoch_prempt epoch 439 * (g) Locked by ipi_lock 440 * (l) Locked by ipi_list_lock 441 * (h) Read using either net_epoch_preempt or inpcb lock; write requires both ipi_hash_lock and inpcb lock 442 * (p) Protected by one or more pcbgroup locks 443 * (x) Synchronisation properties poorly defined 444 */ 445 struct inpcbinfo { 446 /* 447 * Global lock protecting inpcb list modification 448 */ 449 struct mtx ipi_lock; 450 451 /* 452 * Global list of inpcbs on the protocol. 453 */ 454 struct inpcbhead *ipi_listhead; /* [r](e) [w](g/l) */ 455 u_int ipi_count; /* (l) */ 456 457 /* 458 * Generation count -- incremented each time a connection is allocated 459 * or freed. 460 */ 461 u_quad_t ipi_gencnt; /* (l) */ 462 463 /* 464 * Fields associated with port lookup and allocation. 465 */ 466 u_short ipi_lastport; /* (x) */ 467 u_short ipi_lastlow; /* (x) */ 468 u_short ipi_lasthi; /* (x) */ 469 470 /* 471 * UMA zone from which inpcbs are allocated for this protocol. 472 */ 473 struct uma_zone *ipi_zone; /* (c) */ 474 475 /* 476 * Connection groups associated with this protocol. These fields are 477 * constant, but pcbgroup structures themselves are protected by 478 * per-pcbgroup locks. 479 */ 480 struct inpcbgroup *ipi_pcbgroups; /* (c) */ 481 u_int ipi_npcbgroups; /* (c) */ 482 u_int ipi_hashfields; /* (c) */ 483 484 /* 485 * Global lock protecting modification non-pcbgroup hash lookup tables. 486 */ 487 struct mtx ipi_hash_lock; 488 489 /* 490 * Global hash of inpcbs, hashed by local and foreign addresses and 491 * port numbers. 492 */ 493 struct inpcbhead *ipi_hashbase; /* (h) */ 494 u_long ipi_hashmask; /* (h) */ 495 496 /* 497 * Global hash of inpcbs, hashed by only local port number. 498 */ 499 struct inpcbporthead *ipi_porthashbase; /* (h) */ 500 u_long ipi_porthashmask; /* (h) */ 501 502 /* 503 * List of wildcard inpcbs for use with pcbgroups. In the past, was 504 * per-pcbgroup but is now global. All pcbgroup locks must be held 505 * to modify the list, so any is sufficient to read it. 506 */ 507 struct inpcbhead *ipi_wildbase; /* (p) */ 508 u_long ipi_wildmask; /* (p) */ 509 510 /* 511 * Load balance groups used for the SO_REUSEPORT_LB option, 512 * hashed by local port. 513 */ 514 struct inpcblbgrouphead *ipi_lbgrouphashbase; /* (h) */ 515 u_long ipi_lbgrouphashmask; /* (h) */ 516 517 /* 518 * Pointer to network stack instance 519 */ 520 struct vnet *ipi_vnet; /* (c) */ 521 522 /* 523 * general use 2 524 */ 525 void *ipi_pspare[2]; 526 527 /* 528 * Global lock protecting global inpcb list, inpcb count, etc. 529 */ 530 struct rwlock ipi_list_lock; 531 }; 532 533 #ifdef _KERNEL 534 /* 535 * Connection groups hold sets of connections that have similar CPU/thread 536 * affinity. Each connection belongs to exactly one connection group. 537 */ 538 struct inpcbgroup { 539 /* 540 * Per-connection group hash of inpcbs, hashed by local and foreign 541 * addresses and port numbers. 542 */ 543 struct inpcbhead *ipg_hashbase; /* (c) */ 544 u_long ipg_hashmask; /* (c) */ 545 546 /* 547 * Notional affinity of this pcbgroup. 548 */ 549 u_int ipg_cpu; /* (p) */ 550 551 /* 552 * Per-connection group lock, not to be confused with ipi_lock. 553 * Protects the hash table hung off the group, but also the global 554 * wildcard list in inpcbinfo. 555 */ 556 struct mtx ipg_lock; 557 } __aligned(CACHE_LINE_SIZE); 558 559 /* 560 * Load balance groups used for the SO_REUSEPORT_LB socket option. Each group 561 * (or unique address:port combination) can be re-used at most 562 * INPCBLBGROUP_SIZMAX (256) times. The inpcbs are stored in il_inp which 563 * is dynamically resized as processes bind/unbind to that specific group. 564 */ 565 struct inpcblbgroup { 566 CK_LIST_ENTRY(inpcblbgroup) il_list; 567 struct epoch_context il_epoch_ctx; 568 uint16_t il_lport; /* (c) */ 569 u_char il_vflag; /* (c) */ 570 u_int8_t il_numa_domain; 571 uint32_t il_pad2; 572 union in_dependaddr il_dependladdr; /* (c) */ 573 #define il_laddr il_dependladdr.id46_addr.ia46_addr4 574 #define il6_laddr il_dependladdr.id6_addr 575 uint32_t il_inpsiz; /* max count in il_inp[] (h) */ 576 uint32_t il_inpcnt; /* cur count in il_inp[] (h) */ 577 struct inpcb *il_inp[]; /* (h) */ 578 }; 579 580 #define INP_LOCK_INIT(inp, d, t) \ 581 rw_init_flags(&(inp)->inp_lock, (t), RW_RECURSE | RW_DUPOK) 582 #define INP_LOCK_DESTROY(inp) rw_destroy(&(inp)->inp_lock) 583 #define INP_RLOCK(inp) rw_rlock(&(inp)->inp_lock) 584 #define INP_WLOCK(inp) rw_wlock(&(inp)->inp_lock) 585 #define INP_TRY_RLOCK(inp) rw_try_rlock(&(inp)->inp_lock) 586 #define INP_TRY_WLOCK(inp) rw_try_wlock(&(inp)->inp_lock) 587 #define INP_RUNLOCK(inp) rw_runlock(&(inp)->inp_lock) 588 #define INP_WUNLOCK(inp) rw_wunlock(&(inp)->inp_lock) 589 #define INP_UNLOCK(inp) rw_unlock(&(inp)->inp_lock) 590 #define INP_TRY_UPGRADE(inp) rw_try_upgrade(&(inp)->inp_lock) 591 #define INP_DOWNGRADE(inp) rw_downgrade(&(inp)->inp_lock) 592 #define INP_WLOCKED(inp) rw_wowned(&(inp)->inp_lock) 593 #define INP_LOCK_ASSERT(inp) rw_assert(&(inp)->inp_lock, RA_LOCKED) 594 #define INP_RLOCK_ASSERT(inp) rw_assert(&(inp)->inp_lock, RA_RLOCKED) 595 #define INP_WLOCK_ASSERT(inp) rw_assert(&(inp)->inp_lock, RA_WLOCKED) 596 #define INP_UNLOCK_ASSERT(inp) rw_assert(&(inp)->inp_lock, RA_UNLOCKED) 597 598 /* 599 * These locking functions are for inpcb consumers outside of sys/netinet, 600 * more specifically, they were added for the benefit of TOE drivers. The 601 * macros are reserved for use by the stack. 602 */ 603 void inp_wlock(struct inpcb *); 604 void inp_wunlock(struct inpcb *); 605 void inp_rlock(struct inpcb *); 606 void inp_runlock(struct inpcb *); 607 608 #ifdef INVARIANT_SUPPORT 609 void inp_lock_assert(struct inpcb *); 610 void inp_unlock_assert(struct inpcb *); 611 #else 612 #define inp_lock_assert(inp) do {} while (0) 613 #define inp_unlock_assert(inp) do {} while (0) 614 #endif 615 616 void inp_apply_all(void (*func)(struct inpcb *, void *), void *arg); 617 int inp_ip_tos_get(const struct inpcb *inp); 618 void inp_ip_tos_set(struct inpcb *inp, int val); 619 struct socket * 620 inp_inpcbtosocket(struct inpcb *inp); 621 struct tcpcb * 622 inp_inpcbtotcpcb(struct inpcb *inp); 623 void inp_4tuple_get(struct inpcb *inp, uint32_t *laddr, uint16_t *lp, 624 uint32_t *faddr, uint16_t *fp); 625 int inp_so_options(const struct inpcb *inp); 626 627 #endif /* _KERNEL */ 628 629 #define INP_INFO_LOCK_INIT(ipi, d) \ 630 mtx_init(&(ipi)->ipi_lock, (d), NULL, MTX_DEF| MTX_RECURSE) 631 #define INP_INFO_LOCK_DESTROY(ipi) mtx_destroy(&(ipi)->ipi_lock) 632 #define INP_INFO_WLOCK(ipi) mtx_lock(&(ipi)->ipi_lock) 633 #define INP_INFO_TRY_WLOCK(ipi) mtx_trylock(&(ipi)->ipi_lock) 634 #define INP_INFO_WLOCKED(ipi) mtx_owned(&(ipi)->ipi_lock) 635 #define INP_INFO_WUNLOCK(ipi) mtx_unlock(&(ipi)->ipi_lock) 636 #define INP_INFO_LOCK_ASSERT(ipi) MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(ipi)->ipi_lock)) 637 #define INP_INFO_WLOCK_ASSERT(ipi) mtx_assert(&(ipi)->ipi_lock, MA_OWNED) 638 #define INP_INFO_WUNLOCK_ASSERT(ipi) \ 639 mtx_assert(&(ipi)->ipi_lock, MA_NOTOWNED) 640 641 #define INP_LIST_LOCK_INIT(ipi, d) \ 642 rw_init_flags(&(ipi)->ipi_list_lock, (d), 0) 643 #define INP_LIST_LOCK_DESTROY(ipi) rw_destroy(&(ipi)->ipi_list_lock) 644 #define INP_LIST_RLOCK(ipi) rw_rlock(&(ipi)->ipi_list_lock) 645 #define INP_LIST_WLOCK(ipi) rw_wlock(&(ipi)->ipi_list_lock) 646 #define INP_LIST_TRY_RLOCK(ipi) rw_try_rlock(&(ipi)->ipi_list_lock) 647 #define INP_LIST_TRY_WLOCK(ipi) rw_try_wlock(&(ipi)->ipi_list_lock) 648 #define INP_LIST_TRY_UPGRADE(ipi) rw_try_upgrade(&(ipi)->ipi_list_lock) 649 #define INP_LIST_RUNLOCK(ipi) rw_runlock(&(ipi)->ipi_list_lock) 650 #define INP_LIST_WUNLOCK(ipi) rw_wunlock(&(ipi)->ipi_list_lock) 651 #define INP_LIST_LOCK_ASSERT(ipi) \ 652 rw_assert(&(ipi)->ipi_list_lock, RA_LOCKED) 653 #define INP_LIST_RLOCK_ASSERT(ipi) \ 654 rw_assert(&(ipi)->ipi_list_lock, RA_RLOCKED) 655 #define INP_LIST_WLOCK_ASSERT(ipi) \ 656 rw_assert(&(ipi)->ipi_list_lock, RA_WLOCKED) 657 #define INP_LIST_UNLOCK_ASSERT(ipi) \ 658 rw_assert(&(ipi)->ipi_list_lock, RA_UNLOCKED) 659 660 #define INP_HASH_LOCK_INIT(ipi, d) mtx_init(&(ipi)->ipi_hash_lock, (d), NULL, MTX_DEF) 661 #define INP_HASH_LOCK_DESTROY(ipi) mtx_destroy(&(ipi)->ipi_hash_lock) 662 #define INP_HASH_WLOCK(ipi) mtx_lock(&(ipi)->ipi_hash_lock) 663 #define INP_HASH_WUNLOCK(ipi) mtx_unlock(&(ipi)->ipi_hash_lock) 664 #define INP_HASH_LOCK_ASSERT(ipi) MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(ipi)->ipi_hash_lock)) 665 #define INP_HASH_WLOCK_ASSERT(ipi) mtx_assert(&(ipi)->ipi_hash_lock, MA_OWNED); 666 667 #define INP_GROUP_LOCK_INIT(ipg, d) mtx_init(&(ipg)->ipg_lock, (d), NULL, \ 668 MTX_DEF | MTX_DUPOK) 669 #define INP_GROUP_LOCK_DESTROY(ipg) mtx_destroy(&(ipg)->ipg_lock) 670 671 #define INP_GROUP_LOCK(ipg) mtx_lock(&(ipg)->ipg_lock) 672 #define INP_GROUP_LOCK_ASSERT(ipg) mtx_assert(&(ipg)->ipg_lock, MA_OWNED) 673 #define INP_GROUP_UNLOCK(ipg) mtx_unlock(&(ipg)->ipg_lock) 674 675 #define INP_PCBHASH(faddr, lport, fport, mask) \ 676 (((faddr) ^ ((faddr) >> 16) ^ ntohs((lport) ^ (fport))) & (mask)) 677 #define INP_PCBPORTHASH(lport, mask) \ 678 (ntohs((lport)) & (mask)) 679 #define INP_PCBLBGROUP_PKTHASH(faddr, lport, fport) \ 680 ((faddr) ^ ((faddr) >> 16) ^ ntohs((lport) ^ (fport))) 681 #define INP6_PCBHASHKEY(faddr) ((faddr)->s6_addr32[3]) 682 683 /* 684 * Flags for inp_vflags -- historically version flags only 685 */ 686 #define INP_IPV4 0x1 687 #define INP_IPV6 0x2 688 #define INP_IPV6PROTO 0x4 /* opened under IPv6 protocol */ 689 690 /* 691 * Flags for inp_flags. 692 */ 693 #define INP_RECVOPTS 0x00000001 /* receive incoming IP options */ 694 #define INP_RECVRETOPTS 0x00000002 /* receive IP options for reply */ 695 #define INP_RECVDSTADDR 0x00000004 /* receive IP dst address */ 696 #define INP_HDRINCL 0x00000008 /* user supplies entire IP header */ 697 #define INP_HIGHPORT 0x00000010 /* user wants "high" port binding */ 698 #define INP_LOWPORT 0x00000020 /* user wants "low" port binding */ 699 #define INP_ANONPORT 0x00000040 /* port chosen for user */ 700 #define INP_RECVIF 0x00000080 /* receive incoming interface */ 701 #define INP_MTUDISC 0x00000100 /* user can do MTU discovery */ 702 /* 0x000200 unused: was INP_FAITH */ 703 #define INP_RECVTTL 0x00000400 /* receive incoming IP TTL */ 704 #define INP_DONTFRAG 0x00000800 /* don't fragment packet */ 705 #define INP_BINDANY 0x00001000 /* allow bind to any address */ 706 #define INP_INHASHLIST 0x00002000 /* in_pcbinshash() has been called */ 707 #define INP_RECVTOS 0x00004000 /* receive incoming IP TOS */ 708 #define IN6P_IPV6_V6ONLY 0x00008000 /* restrict AF_INET6 socket for v6 */ 709 #define IN6P_PKTINFO 0x00010000 /* receive IP6 dst and I/F */ 710 #define IN6P_HOPLIMIT 0x00020000 /* receive hoplimit */ 711 #define IN6P_HOPOPTS 0x00040000 /* receive hop-by-hop options */ 712 #define IN6P_DSTOPTS 0x00080000 /* receive dst options after rthdr */ 713 #define IN6P_RTHDR 0x00100000 /* receive routing header */ 714 #define IN6P_RTHDRDSTOPTS 0x00200000 /* receive dstoptions before rthdr */ 715 #define IN6P_TCLASS 0x00400000 /* receive traffic class value */ 716 #define IN6P_AUTOFLOWLABEL 0x00800000 /* attach flowlabel automatically */ 717 #define INP_TIMEWAIT 0x01000000 /* in TIMEWAIT, ppcb is tcptw */ 718 #define INP_ONESBCAST 0x02000000 /* send all-ones broadcast */ 719 #define INP_DROPPED 0x04000000 /* protocol drop flag */ 720 #define INP_SOCKREF 0x08000000 /* strong socket reference */ 721 #define INP_RESERVED_0 0x10000000 /* reserved field */ 722 #define INP_RESERVED_1 0x20000000 /* reserved field */ 723 #define IN6P_RFC2292 0x40000000 /* used RFC2292 API on the socket */ 724 #define IN6P_MTU 0x80000000 /* receive path MTU */ 725 726 #define INP_CONTROLOPTS (INP_RECVOPTS|INP_RECVRETOPTS|INP_RECVDSTADDR|\ 727 INP_RECVIF|INP_RECVTTL|INP_RECVTOS|\ 728 IN6P_PKTINFO|IN6P_HOPLIMIT|IN6P_HOPOPTS|\ 729 IN6P_DSTOPTS|IN6P_RTHDR|IN6P_RTHDRDSTOPTS|\ 730 IN6P_TCLASS|IN6P_AUTOFLOWLABEL|IN6P_RFC2292|\ 731 IN6P_MTU) 732 733 /* 734 * Flags for inp_flags2. 735 */ 736 #define INP_MBUF_L_ACKS 0x00000001 /* We need large mbufs for ack compression */ 737 #define INP_MBUF_ACKCMP 0x00000002 /* TCP mbuf ack compression ok */ 738 #define INP_PCBGROUPWILD 0x00000004 /* in pcbgroup wildcard list */ 739 #define INP_REUSEPORT 0x00000008 /* SO_REUSEPORT option is set */ 740 #define INP_FREED 0x00000010 /* inp itself is not valid */ 741 #define INP_REUSEADDR 0x00000020 /* SO_REUSEADDR option is set */ 742 #define INP_BINDMULTI 0x00000040 /* IP_BINDMULTI option is set */ 743 #define INP_RSS_BUCKET_SET 0x00000080 /* IP_RSS_LISTEN_BUCKET is set */ 744 #define INP_RECVFLOWID 0x00000100 /* populate recv datagram with flow info */ 745 #define INP_RECVRSSBUCKETID 0x00000200 /* populate recv datagram with bucket id */ 746 #define INP_RATE_LIMIT_CHANGED 0x00000400 /* rate limit needs attention */ 747 #define INP_ORIGDSTADDR 0x00000800 /* receive IP dst address/port */ 748 #define INP_CANNOT_DO_ECN 0x00001000 /* The stack does not do ECN */ 749 #define INP_REUSEPORT_LB 0x00002000 /* SO_REUSEPORT_LB option is set */ 750 #define INP_SUPPORTS_MBUFQ 0x00004000 /* Supports the mbuf queue method of LRO */ 751 #define INP_MBUF_QUEUE_READY 0x00008000 /* The transport is pacing, inputs can be queued */ 752 #define INP_DONT_SACK_QUEUE 0x00010000 /* If a sack arrives do not wake me */ 753 #define INP_2PCP_SET 0x00020000 /* If the Eth PCP should be set explicitly */ 754 #define INP_2PCP_BIT0 0x00040000 /* Eth PCP Bit 0 */ 755 #define INP_2PCP_BIT1 0x00080000 /* Eth PCP Bit 1 */ 756 #define INP_2PCP_BIT2 0x00100000 /* Eth PCP Bit 2 */ 757 #define INP_2PCP_BASE INP_2PCP_BIT0 758 #define INP_2PCP_MASK (INP_2PCP_BIT0 | INP_2PCP_BIT1 | INP_2PCP_BIT2) 759 #define INP_2PCP_SHIFT 18 /* shift PCP field in/out of inp_flags2 */ 760 /* 761 * Flags passed to in_pcblookup*() functions. 762 */ 763 #define INPLOOKUP_WILDCARD 0x00000001 /* Allow wildcard sockets. */ 764 #define INPLOOKUP_RLOCKPCB 0x00000002 /* Return inpcb read-locked. */ 765 #define INPLOOKUP_WLOCKPCB 0x00000004 /* Return inpcb write-locked. */ 766 767 #define INPLOOKUP_MASK (INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB | \ 768 INPLOOKUP_WLOCKPCB) 769 770 #define sotoinpcb(so) ((struct inpcb *)(so)->so_pcb) 771 772 #define INP_SOCKAF(so) so->so_proto->pr_domain->dom_family 773 774 #define INP_CHECK_SOCKAF(so, af) (INP_SOCKAF(so) == af) 775 776 /* 777 * Constants for pcbinfo.ipi_hashfields. 778 */ 779 #define IPI_HASHFIELDS_NONE 0 780 #define IPI_HASHFIELDS_2TUPLE 1 781 #define IPI_HASHFIELDS_4TUPLE 2 782 783 #ifdef _KERNEL 784 VNET_DECLARE(int, ipport_reservedhigh); 785 VNET_DECLARE(int, ipport_reservedlow); 786 VNET_DECLARE(int, ipport_lowfirstauto); 787 VNET_DECLARE(int, ipport_lowlastauto); 788 VNET_DECLARE(int, ipport_firstauto); 789 VNET_DECLARE(int, ipport_lastauto); 790 VNET_DECLARE(int, ipport_hifirstauto); 791 VNET_DECLARE(int, ipport_hilastauto); 792 VNET_DECLARE(int, ipport_randomized); 793 VNET_DECLARE(int, ipport_randomcps); 794 VNET_DECLARE(int, ipport_randomtime); 795 VNET_DECLARE(int, ipport_stoprandom); 796 VNET_DECLARE(int, ipport_tcpallocs); 797 798 #define V_ipport_reservedhigh VNET(ipport_reservedhigh) 799 #define V_ipport_reservedlow VNET(ipport_reservedlow) 800 #define V_ipport_lowfirstauto VNET(ipport_lowfirstauto) 801 #define V_ipport_lowlastauto VNET(ipport_lowlastauto) 802 #define V_ipport_firstauto VNET(ipport_firstauto) 803 #define V_ipport_lastauto VNET(ipport_lastauto) 804 #define V_ipport_hifirstauto VNET(ipport_hifirstauto) 805 #define V_ipport_hilastauto VNET(ipport_hilastauto) 806 #define V_ipport_randomized VNET(ipport_randomized) 807 #define V_ipport_randomcps VNET(ipport_randomcps) 808 #define V_ipport_randomtime VNET(ipport_randomtime) 809 #define V_ipport_stoprandom VNET(ipport_stoprandom) 810 #define V_ipport_tcpallocs VNET(ipport_tcpallocs) 811 812 void in_pcbinfo_destroy(struct inpcbinfo *); 813 void in_pcbinfo_init(struct inpcbinfo *, const char *, struct inpcbhead *, 814 int, int, char *, uma_init, u_int); 815 816 int in_pcbbind_check_bindmulti(const struct inpcb *ni, 817 const struct inpcb *oi); 818 819 struct inpcbgroup * 820 in_pcbgroup_byhash(struct inpcbinfo *, u_int, uint32_t); 821 struct inpcbgroup * 822 in_pcbgroup_byinpcb(struct inpcb *); 823 struct inpcbgroup * 824 in_pcbgroup_bytuple(struct inpcbinfo *, struct in_addr, u_short, 825 struct in_addr, u_short); 826 void in_pcbgroup_destroy(struct inpcbinfo *); 827 int in_pcbgroup_enabled(struct inpcbinfo *); 828 void in_pcbgroup_init(struct inpcbinfo *, u_int, int); 829 void in_pcbgroup_remove(struct inpcb *); 830 void in_pcbgroup_update(struct inpcb *); 831 void in_pcbgroup_update_mbuf(struct inpcb *, struct mbuf *); 832 833 void in_pcbpurgeif0(struct inpcbinfo *, struct ifnet *); 834 int in_pcballoc(struct socket *, struct inpcbinfo *); 835 int in_pcbbind(struct inpcb *, struct sockaddr *, struct ucred *); 836 int in_pcb_lport_dest(struct inpcb *inp, struct sockaddr *lsa, 837 u_short *lportp, struct sockaddr *fsa, u_short fport, 838 struct ucred *cred, int lookupflags); 839 int in_pcb_lport(struct inpcb *, struct in_addr *, u_short *, 840 struct ucred *, int); 841 int in_pcbbind_setup(struct inpcb *, struct sockaddr *, in_addr_t *, 842 u_short *, struct ucred *); 843 int in_pcbconnect(struct inpcb *, struct sockaddr *, struct ucred *); 844 int in_pcbconnect_mbuf(struct inpcb *, struct sockaddr *, struct ucred *, 845 struct mbuf *, bool); 846 int in_pcbconnect_setup(struct inpcb *, struct sockaddr *, in_addr_t *, 847 u_short *, in_addr_t *, u_short *, struct inpcb **, 848 struct ucred *); 849 void in_pcbdetach(struct inpcb *); 850 void in_pcbdisconnect(struct inpcb *); 851 void in_pcbdrop(struct inpcb *); 852 void in_pcbfree(struct inpcb *); 853 int in_pcbinshash(struct inpcb *); 854 int in_pcbinshash_mbuf(struct inpcb *, struct mbuf *); 855 int in_pcbladdr(struct inpcb *, struct in_addr *, struct in_addr *, 856 struct ucred *); 857 int in_pcblbgroup_numa(struct inpcb *, int arg); 858 struct inpcb * 859 in_pcblookup_local(struct inpcbinfo *, 860 struct in_addr, u_short, int, struct ucred *); 861 struct inpcb * 862 in_pcblookup(struct inpcbinfo *, struct in_addr, u_int, 863 struct in_addr, u_int, int, struct ifnet *); 864 struct inpcb * 865 in_pcblookup_mbuf(struct inpcbinfo *, struct in_addr, u_int, 866 struct in_addr, u_int, int, struct ifnet *, struct mbuf *); 867 void in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr, 868 int, struct inpcb *(*)(struct inpcb *, int)); 869 void in_pcbref(struct inpcb *); 870 void in_pcbrehash(struct inpcb *); 871 void in_pcbrehash_mbuf(struct inpcb *, struct mbuf *); 872 int in_pcbrele(struct inpcb *); 873 int in_pcbrele_rlocked(struct inpcb *); 874 int in_pcbrele_wlocked(struct inpcb *); 875 void in_pcblist_rele_rlocked(epoch_context_t ctx); 876 void in_losing(struct inpcb *); 877 void in_pcbsetsolabel(struct socket *so); 878 int in_getpeeraddr(struct socket *so, struct sockaddr **nam); 879 int in_getsockaddr(struct socket *so, struct sockaddr **nam); 880 struct sockaddr * 881 in_sockaddr(in_port_t port, struct in_addr *addr); 882 void in_pcbsosetlabel(struct socket *so); 883 #ifdef RATELIMIT 884 int 885 in_pcboutput_txrtlmt_locked(struct inpcb *, struct ifnet *, 886 struct mbuf *, uint32_t); 887 int in_pcbattach_txrtlmt(struct inpcb *, struct ifnet *, uint32_t, uint32_t, 888 uint32_t, struct m_snd_tag **); 889 void in_pcbdetach_txrtlmt(struct inpcb *); 890 void in_pcbdetach_tag(struct m_snd_tag *); 891 int in_pcbmodify_txrtlmt(struct inpcb *, uint32_t); 892 int in_pcbquery_txrtlmt(struct inpcb *, uint32_t *); 893 int in_pcbquery_txrlevel(struct inpcb *, uint32_t *); 894 void in_pcboutput_txrtlmt(struct inpcb *, struct ifnet *, struct mbuf *); 895 void in_pcboutput_eagain(struct inpcb *); 896 #endif 897 #endif /* _KERNEL */ 898 899 #endif /* !_NETINET_IN_PCB_H_ */ 900