1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Generic INET transport hashtables 8 * 9 * Authors: Lotsa people, from code originally in tcp 10 */ 11 12 #include <linux/module.h> 13 #include <linux/random.h> 14 #include <linux/sched.h> 15 #include <linux/slab.h> 16 #include <linux/wait.h> 17 #include <linux/vmalloc.h> 18 #include <linux/memblock.h> 19 20 #include <net/addrconf.h> 21 #include <net/inet_connection_sock.h> 22 #include <net/inet_hashtables.h> 23 #if IS_ENABLED(CONFIG_IPV6) 24 #include <net/inet6_hashtables.h> 25 #endif 26 #include <net/secure_seq.h> 27 #include <net/hotdata.h> 28 #include <net/ip.h> 29 #include <net/tcp.h> 30 #include <net/sock_reuseport.h> 31 32 u32 inet_ehashfn(const struct net *net, const __be32 laddr, 33 const __u16 lport, const __be32 faddr, 34 const __be16 fport) 35 { 36 net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret)); 37 38 return __inet_ehashfn(laddr, lport, faddr, fport, 39 inet_ehash_secret + net_hash_mix(net)); 40 } 41 EXPORT_SYMBOL_GPL(inet_ehashfn); 42 43 /* This function handles inet_sock, but also timewait and request sockets 44 * for IPv4/IPv6. 45 */ 46 static u32 sk_ehashfn(const struct sock *sk) 47 { 48 #if IS_ENABLED(CONFIG_IPV6) 49 if (sk->sk_family == AF_INET6 && 50 !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) 51 return inet6_ehashfn(sock_net(sk), 52 &sk->sk_v6_rcv_saddr, sk->sk_num, 53 &sk->sk_v6_daddr, sk->sk_dport); 54 #endif 55 return inet_ehashfn(sock_net(sk), 56 sk->sk_rcv_saddr, sk->sk_num, 57 sk->sk_daddr, sk->sk_dport); 58 } 59 60 /* 61 * Allocate and initialize a new local port bind bucket. 62 * The bindhash mutex for snum's hash chain must be held here. 63 */ 64 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, 65 struct net *net, 66 struct inet_bind_hashbucket *head, 67 const unsigned short snum, 68 int l3mdev) 69 { 70 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); 71 72 if (tb) { 73 write_pnet(&tb->ib_net, net); 74 tb->l3mdev = l3mdev; 75 tb->port = snum; 76 tb->fastreuse = 0; 77 tb->fastreuseport = 0; 78 INIT_HLIST_HEAD(&tb->bhash2); 79 hlist_add_head(&tb->node, &head->chain); 80 } 81 return tb; 82 } 83 84 /* 85 * Caller must hold hashbucket lock for this tb with local BH disabled 86 */ 87 void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) 88 { 89 if (hlist_empty(&tb->bhash2)) { 90 __hlist_del(&tb->node); 91 kmem_cache_free(cachep, tb); 92 } 93 } 94 95 bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, const struct net *net, 96 unsigned short port, int l3mdev) 97 { 98 return net_eq(ib_net(tb), net) && tb->port == port && 99 tb->l3mdev == l3mdev; 100 } 101 102 static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb2, 103 struct net *net, 104 struct inet_bind_hashbucket *head, 105 struct inet_bind_bucket *tb, 106 const struct sock *sk) 107 { 108 write_pnet(&tb2->ib_net, net); 109 tb2->l3mdev = tb->l3mdev; 110 tb2->port = tb->port; 111 #if IS_ENABLED(CONFIG_IPV6) 112 BUILD_BUG_ON(USHRT_MAX < (IPV6_ADDR_ANY | IPV6_ADDR_MAPPED)); 113 if (sk->sk_family == AF_INET6) { 114 tb2->addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); 115 tb2->v6_rcv_saddr = sk->sk_v6_rcv_saddr; 116 } else { 117 tb2->addr_type = IPV6_ADDR_MAPPED; 118 ipv6_addr_set_v4mapped(sk->sk_rcv_saddr, &tb2->v6_rcv_saddr); 119 } 120 #else 121 tb2->rcv_saddr = sk->sk_rcv_saddr; 122 #endif 123 INIT_HLIST_HEAD(&tb2->owners); 124 hlist_add_head(&tb2->node, &head->chain); 125 hlist_add_head(&tb2->bhash_node, &tb->bhash2); 126 } 127 128 struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep, 129 struct net *net, 130 struct inet_bind_hashbucket *head, 131 struct inet_bind_bucket *tb, 132 const struct sock *sk) 133 { 134 struct inet_bind2_bucket *tb2 = kmem_cache_alloc(cachep, GFP_ATOMIC); 135 136 if (tb2) 137 inet_bind2_bucket_init(tb2, net, head, tb, sk); 138 139 return tb2; 140 } 141 142 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 143 void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb) 144 { 145 if (hlist_empty(&tb->owners)) { 146 __hlist_del(&tb->node); 147 __hlist_del(&tb->bhash_node); 148 kmem_cache_free(cachep, tb); 149 } 150 } 151 152 static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2, 153 const struct sock *sk) 154 { 155 #if IS_ENABLED(CONFIG_IPV6) 156 if (sk->sk_family == AF_INET6) 157 return ipv6_addr_equal(&tb2->v6_rcv_saddr, &sk->sk_v6_rcv_saddr); 158 159 if (tb2->addr_type != IPV6_ADDR_MAPPED) 160 return false; 161 #endif 162 return tb2->rcv_saddr == sk->sk_rcv_saddr; 163 } 164 165 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, 166 struct inet_bind2_bucket *tb2, unsigned short port) 167 { 168 inet_sk(sk)->inet_num = port; 169 inet_csk(sk)->icsk_bind_hash = tb; 170 inet_csk(sk)->icsk_bind2_hash = tb2; 171 sk_add_bind_node(sk, &tb2->owners); 172 } 173 174 /* 175 * Get rid of any references to a local port held by the given sock. 176 */ 177 static void __inet_put_port(struct sock *sk) 178 { 179 struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); 180 struct inet_bind_hashbucket *head, *head2; 181 struct net *net = sock_net(sk); 182 struct inet_bind_bucket *tb; 183 int bhash; 184 185 bhash = inet_bhashfn(net, inet_sk(sk)->inet_num, hashinfo->bhash_size); 186 head = &hashinfo->bhash[bhash]; 187 head2 = inet_bhashfn_portaddr(hashinfo, sk, net, inet_sk(sk)->inet_num); 188 189 spin_lock(&head->lock); 190 tb = inet_csk(sk)->icsk_bind_hash; 191 inet_csk(sk)->icsk_bind_hash = NULL; 192 inet_sk(sk)->inet_num = 0; 193 194 spin_lock(&head2->lock); 195 if (inet_csk(sk)->icsk_bind2_hash) { 196 struct inet_bind2_bucket *tb2 = inet_csk(sk)->icsk_bind2_hash; 197 198 __sk_del_bind_node(sk); 199 inet_csk(sk)->icsk_bind2_hash = NULL; 200 inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2); 201 } 202 spin_unlock(&head2->lock); 203 204 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); 205 spin_unlock(&head->lock); 206 } 207 208 void inet_put_port(struct sock *sk) 209 { 210 local_bh_disable(); 211 __inet_put_port(sk); 212 local_bh_enable(); 213 } 214 EXPORT_SYMBOL(inet_put_port); 215 216 int __inet_inherit_port(const struct sock *sk, struct sock *child) 217 { 218 struct inet_hashinfo *table = tcp_or_dccp_get_hashinfo(sk); 219 unsigned short port = inet_sk(child)->inet_num; 220 struct inet_bind_hashbucket *head, *head2; 221 bool created_inet_bind_bucket = false; 222 struct net *net = sock_net(sk); 223 bool update_fastreuse = false; 224 struct inet_bind2_bucket *tb2; 225 struct inet_bind_bucket *tb; 226 int bhash, l3mdev; 227 228 bhash = inet_bhashfn(net, port, table->bhash_size); 229 head = &table->bhash[bhash]; 230 head2 = inet_bhashfn_portaddr(table, child, net, port); 231 232 spin_lock(&head->lock); 233 spin_lock(&head2->lock); 234 tb = inet_csk(sk)->icsk_bind_hash; 235 tb2 = inet_csk(sk)->icsk_bind2_hash; 236 if (unlikely(!tb || !tb2)) { 237 spin_unlock(&head2->lock); 238 spin_unlock(&head->lock); 239 return -ENOENT; 240 } 241 if (tb->port != port) { 242 l3mdev = inet_sk_bound_l3mdev(sk); 243 244 /* NOTE: using tproxy and redirecting skbs to a proxy 245 * on a different listener port breaks the assumption 246 * that the listener socket's icsk_bind_hash is the same 247 * as that of the child socket. We have to look up or 248 * create a new bind bucket for the child here. */ 249 inet_bind_bucket_for_each(tb, &head->chain) { 250 if (inet_bind_bucket_match(tb, net, port, l3mdev)) 251 break; 252 } 253 if (!tb) { 254 tb = inet_bind_bucket_create(table->bind_bucket_cachep, 255 net, head, port, l3mdev); 256 if (!tb) { 257 spin_unlock(&head2->lock); 258 spin_unlock(&head->lock); 259 return -ENOMEM; 260 } 261 created_inet_bind_bucket = true; 262 } 263 update_fastreuse = true; 264 265 goto bhash2_find; 266 } else if (!inet_bind2_bucket_addr_match(tb2, child)) { 267 l3mdev = inet_sk_bound_l3mdev(sk); 268 269 bhash2_find: 270 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, child); 271 if (!tb2) { 272 tb2 = inet_bind2_bucket_create(table->bind2_bucket_cachep, 273 net, head2, tb, child); 274 if (!tb2) 275 goto error; 276 } 277 } 278 if (update_fastreuse) 279 inet_csk_update_fastreuse(tb, child); 280 inet_bind_hash(child, tb, tb2, port); 281 spin_unlock(&head2->lock); 282 spin_unlock(&head->lock); 283 284 return 0; 285 286 error: 287 if (created_inet_bind_bucket) 288 inet_bind_bucket_destroy(table->bind_bucket_cachep, tb); 289 spin_unlock(&head2->lock); 290 spin_unlock(&head->lock); 291 return -ENOMEM; 292 } 293 EXPORT_SYMBOL_GPL(__inet_inherit_port); 294 295 static struct inet_listen_hashbucket * 296 inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk) 297 { 298 u32 hash; 299 300 #if IS_ENABLED(CONFIG_IPV6) 301 if (sk->sk_family == AF_INET6) 302 hash = ipv6_portaddr_hash(sock_net(sk), 303 &sk->sk_v6_rcv_saddr, 304 inet_sk(sk)->inet_num); 305 else 306 #endif 307 hash = ipv4_portaddr_hash(sock_net(sk), 308 inet_sk(sk)->inet_rcv_saddr, 309 inet_sk(sk)->inet_num); 310 return inet_lhash2_bucket(h, hash); 311 } 312 313 static inline int compute_score(struct sock *sk, const struct net *net, 314 const unsigned short hnum, const __be32 daddr, 315 const int dif, const int sdif) 316 { 317 int score = -1; 318 319 if (net_eq(sock_net(sk), net) && sk->sk_num == hnum && 320 !ipv6_only_sock(sk)) { 321 if (sk->sk_rcv_saddr != daddr) 322 return -1; 323 324 if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) 325 return -1; 326 score = sk->sk_bound_dev_if ? 2 : 1; 327 328 if (sk->sk_family == PF_INET) 329 score++; 330 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 331 score++; 332 } 333 return score; 334 } 335 336 /** 337 * inet_lookup_reuseport() - execute reuseport logic on AF_INET socket if necessary. 338 * @net: network namespace. 339 * @sk: AF_INET socket, must be in TCP_LISTEN state for TCP or TCP_CLOSE for UDP. 340 * @skb: context for a potential SK_REUSEPORT program. 341 * @doff: header offset. 342 * @saddr: source address. 343 * @sport: source port. 344 * @daddr: destination address. 345 * @hnum: destination port in host byte order. 346 * @ehashfn: hash function used to generate the fallback hash. 347 * 348 * Return: NULL if sk doesn't have SO_REUSEPORT set, otherwise a pointer to 349 * the selected sock or an error. 350 */ 351 struct sock *inet_lookup_reuseport(const struct net *net, struct sock *sk, 352 struct sk_buff *skb, int doff, 353 __be32 saddr, __be16 sport, 354 __be32 daddr, unsigned short hnum, 355 inet_ehashfn_t *ehashfn) 356 { 357 struct sock *reuse_sk = NULL; 358 u32 phash; 359 360 if (sk->sk_reuseport) { 361 phash = INDIRECT_CALL_2(ehashfn, udp_ehashfn, inet_ehashfn, 362 net, daddr, hnum, saddr, sport); 363 reuse_sk = reuseport_select_sock(sk, phash, skb, doff); 364 } 365 return reuse_sk; 366 } 367 EXPORT_SYMBOL_GPL(inet_lookup_reuseport); 368 369 /* 370 * Here are some nice properties to exploit here. The BSD API 371 * does not allow a listening sock to specify the remote port nor the 372 * remote address for the connection. So always assume those are both 373 * wildcarded during the search since they can never be otherwise. 374 */ 375 376 /* called with rcu_read_lock() : No refcount taken on the socket */ 377 static struct sock *inet_lhash2_lookup(const struct net *net, 378 struct inet_listen_hashbucket *ilb2, 379 struct sk_buff *skb, int doff, 380 const __be32 saddr, __be16 sport, 381 const __be32 daddr, const unsigned short hnum, 382 const int dif, const int sdif) 383 { 384 struct sock *sk, *result = NULL; 385 struct hlist_nulls_node *node; 386 int score, hiscore = 0; 387 388 sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) { 389 score = compute_score(sk, net, hnum, daddr, dif, sdif); 390 if (score > hiscore) { 391 result = inet_lookup_reuseport(net, sk, skb, doff, 392 saddr, sport, daddr, hnum, inet_ehashfn); 393 if (result) 394 return result; 395 396 result = sk; 397 hiscore = score; 398 } 399 } 400 401 return result; 402 } 403 404 struct sock *inet_lookup_run_sk_lookup(const struct net *net, 405 int protocol, 406 struct sk_buff *skb, int doff, 407 __be32 saddr, __be16 sport, 408 __be32 daddr, u16 hnum, const int dif, 409 inet_ehashfn_t *ehashfn) 410 { 411 struct sock *sk, *reuse_sk; 412 bool no_reuseport; 413 414 no_reuseport = bpf_sk_lookup_run_v4(net, protocol, saddr, sport, 415 daddr, hnum, dif, &sk); 416 if (no_reuseport || IS_ERR_OR_NULL(sk)) 417 return sk; 418 419 reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum, 420 ehashfn); 421 if (reuse_sk) 422 sk = reuse_sk; 423 return sk; 424 } 425 426 struct sock *__inet_lookup_listener(const struct net *net, 427 struct inet_hashinfo *hashinfo, 428 struct sk_buff *skb, int doff, 429 const __be32 saddr, __be16 sport, 430 const __be32 daddr, const unsigned short hnum, 431 const int dif, const int sdif) 432 { 433 struct inet_listen_hashbucket *ilb2; 434 struct sock *result = NULL; 435 unsigned int hash2; 436 437 /* Lookup redirect from BPF */ 438 if (static_branch_unlikely(&bpf_sk_lookup_enabled) && 439 hashinfo == net->ipv4.tcp_death_row.hashinfo) { 440 result = inet_lookup_run_sk_lookup(net, IPPROTO_TCP, skb, doff, 441 saddr, sport, daddr, hnum, dif, 442 inet_ehashfn); 443 if (result) 444 goto done; 445 } 446 447 hash2 = ipv4_portaddr_hash(net, daddr, hnum); 448 ilb2 = inet_lhash2_bucket(hashinfo, hash2); 449 450 result = inet_lhash2_lookup(net, ilb2, skb, doff, 451 saddr, sport, daddr, hnum, 452 dif, sdif); 453 if (result) 454 goto done; 455 456 /* Lookup lhash2 with INADDR_ANY */ 457 hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum); 458 ilb2 = inet_lhash2_bucket(hashinfo, hash2); 459 460 result = inet_lhash2_lookup(net, ilb2, skb, doff, 461 saddr, sport, htonl(INADDR_ANY), hnum, 462 dif, sdif); 463 done: 464 if (IS_ERR(result)) 465 return NULL; 466 return result; 467 } 468 EXPORT_SYMBOL_GPL(__inet_lookup_listener); 469 470 /* All sockets share common refcount, but have different destructors */ 471 void sock_gen_put(struct sock *sk) 472 { 473 if (!refcount_dec_and_test(&sk->sk_refcnt)) 474 return; 475 476 if (sk->sk_state == TCP_TIME_WAIT) 477 inet_twsk_free(inet_twsk(sk)); 478 else if (sk->sk_state == TCP_NEW_SYN_RECV) 479 reqsk_free(inet_reqsk(sk)); 480 else 481 sk_free(sk); 482 } 483 EXPORT_SYMBOL_GPL(sock_gen_put); 484 485 void sock_edemux(struct sk_buff *skb) 486 { 487 sock_gen_put(skb->sk); 488 } 489 EXPORT_SYMBOL(sock_edemux); 490 491 struct sock *__inet_lookup_established(const struct net *net, 492 struct inet_hashinfo *hashinfo, 493 const __be32 saddr, const __be16 sport, 494 const __be32 daddr, const u16 hnum, 495 const int dif, const int sdif) 496 { 497 INET_ADDR_COOKIE(acookie, saddr, daddr); 498 const __portpair ports = INET_COMBINED_PORTS(sport, hnum); 499 struct sock *sk; 500 const struct hlist_nulls_node *node; 501 /* Optimize here for direct hit, only listening connections can 502 * have wildcards anyways. 503 */ 504 unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport); 505 unsigned int slot = hash & hashinfo->ehash_mask; 506 struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; 507 508 begin: 509 sk_nulls_for_each_rcu(sk, node, &head->chain) { 510 if (sk->sk_hash != hash) 511 continue; 512 if (likely(inet_match(net, sk, acookie, ports, dif, sdif))) { 513 if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) 514 goto out; 515 if (unlikely(!inet_match(net, sk, acookie, 516 ports, dif, sdif))) { 517 sock_gen_put(sk); 518 goto begin; 519 } 520 goto found; 521 } 522 } 523 /* 524 * if the nulls value we got at the end of this lookup is 525 * not the expected one, we must restart lookup. 526 * We probably met an item that was moved to another chain. 527 */ 528 if (get_nulls_value(node) != slot) 529 goto begin; 530 out: 531 sk = NULL; 532 found: 533 return sk; 534 } 535 EXPORT_SYMBOL_GPL(__inet_lookup_established); 536 537 /* called with local bh disabled */ 538 static int __inet_check_established(struct inet_timewait_death_row *death_row, 539 struct sock *sk, __u16 lport, 540 struct inet_timewait_sock **twp) 541 { 542 struct inet_hashinfo *hinfo = death_row->hashinfo; 543 struct inet_sock *inet = inet_sk(sk); 544 __be32 daddr = inet->inet_rcv_saddr; 545 __be32 saddr = inet->inet_daddr; 546 int dif = sk->sk_bound_dev_if; 547 struct net *net = sock_net(sk); 548 int sdif = l3mdev_master_ifindex_by_index(net, dif); 549 INET_ADDR_COOKIE(acookie, saddr, daddr); 550 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); 551 unsigned int hash = inet_ehashfn(net, daddr, lport, 552 saddr, inet->inet_dport); 553 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 554 spinlock_t *lock = inet_ehash_lockp(hinfo, hash); 555 struct sock *sk2; 556 const struct hlist_nulls_node *node; 557 struct inet_timewait_sock *tw = NULL; 558 559 spin_lock(lock); 560 561 sk_nulls_for_each(sk2, node, &head->chain) { 562 if (sk2->sk_hash != hash) 563 continue; 564 565 if (likely(inet_match(net, sk2, acookie, ports, dif, sdif))) { 566 if (sk2->sk_state == TCP_TIME_WAIT) { 567 tw = inet_twsk(sk2); 568 if (sk->sk_protocol == IPPROTO_TCP && 569 tcp_twsk_unique(sk, sk2, twp)) 570 break; 571 } 572 goto not_unique; 573 } 574 } 575 576 /* Must record num and sport now. Otherwise we will see 577 * in hash table socket with a funny identity. 578 */ 579 inet->inet_num = lport; 580 inet->inet_sport = htons(lport); 581 sk->sk_hash = hash; 582 WARN_ON(!sk_unhashed(sk)); 583 __sk_nulls_add_node_rcu(sk, &head->chain); 584 if (tw) { 585 sk_nulls_del_node_init_rcu((struct sock *)tw); 586 __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED); 587 } 588 spin_unlock(lock); 589 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 590 591 if (twp) { 592 *twp = tw; 593 } else if (tw) { 594 /* Silly. Should hash-dance instead... */ 595 inet_twsk_deschedule_put(tw); 596 } 597 return 0; 598 599 not_unique: 600 spin_unlock(lock); 601 return -EADDRNOTAVAIL; 602 } 603 604 static u64 inet_sk_port_offset(const struct sock *sk) 605 { 606 const struct inet_sock *inet = inet_sk(sk); 607 608 return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr, 609 inet->inet_daddr, 610 inet->inet_dport); 611 } 612 613 /* Searches for an exsiting socket in the ehash bucket list. 614 * Returns true if found, false otherwise. 615 */ 616 static bool inet_ehash_lookup_by_sk(struct sock *sk, 617 struct hlist_nulls_head *list) 618 { 619 const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num); 620 const int sdif = sk->sk_bound_dev_if; 621 const int dif = sk->sk_bound_dev_if; 622 const struct hlist_nulls_node *node; 623 struct net *net = sock_net(sk); 624 struct sock *esk; 625 626 INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr); 627 628 sk_nulls_for_each_rcu(esk, node, list) { 629 if (esk->sk_hash != sk->sk_hash) 630 continue; 631 if (sk->sk_family == AF_INET) { 632 if (unlikely(inet_match(net, esk, acookie, 633 ports, dif, sdif))) { 634 return true; 635 } 636 } 637 #if IS_ENABLED(CONFIG_IPV6) 638 else if (sk->sk_family == AF_INET6) { 639 if (unlikely(inet6_match(net, esk, 640 &sk->sk_v6_daddr, 641 &sk->sk_v6_rcv_saddr, 642 ports, dif, sdif))) { 643 return true; 644 } 645 } 646 #endif 647 } 648 return false; 649 } 650 651 /* Insert a socket into ehash, and eventually remove another one 652 * (The another one can be a SYN_RECV or TIMEWAIT) 653 * If an existing socket already exists, socket sk is not inserted, 654 * and sets found_dup_sk parameter to true. 655 */ 656 bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk) 657 { 658 struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); 659 struct inet_ehash_bucket *head; 660 struct hlist_nulls_head *list; 661 spinlock_t *lock; 662 bool ret = true; 663 664 WARN_ON_ONCE(!sk_unhashed(sk)); 665 666 sk->sk_hash = sk_ehashfn(sk); 667 head = inet_ehash_bucket(hashinfo, sk->sk_hash); 668 list = &head->chain; 669 lock = inet_ehash_lockp(hashinfo, sk->sk_hash); 670 671 spin_lock(lock); 672 if (osk) { 673 WARN_ON_ONCE(sk->sk_hash != osk->sk_hash); 674 ret = sk_nulls_del_node_init_rcu(osk); 675 } else if (found_dup_sk) { 676 *found_dup_sk = inet_ehash_lookup_by_sk(sk, list); 677 if (*found_dup_sk) 678 ret = false; 679 } 680 681 if (ret) 682 __sk_nulls_add_node_rcu(sk, list); 683 684 spin_unlock(lock); 685 686 return ret; 687 } 688 689 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk) 690 { 691 bool ok = inet_ehash_insert(sk, osk, found_dup_sk); 692 693 if (ok) { 694 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 695 } else { 696 this_cpu_inc(*sk->sk_prot->orphan_count); 697 inet_sk_set_state(sk, TCP_CLOSE); 698 sock_set_flag(sk, SOCK_DEAD); 699 inet_csk_destroy_sock(sk); 700 } 701 return ok; 702 } 703 EXPORT_SYMBOL_GPL(inet_ehash_nolisten); 704 705 static int inet_reuseport_add_sock(struct sock *sk, 706 struct inet_listen_hashbucket *ilb) 707 { 708 struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; 709 const struct hlist_nulls_node *node; 710 struct sock *sk2; 711 kuid_t uid = sock_i_uid(sk); 712 713 sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) { 714 if (sk2 != sk && 715 sk2->sk_family == sk->sk_family && 716 ipv6_only_sock(sk2) == ipv6_only_sock(sk) && 717 sk2->sk_bound_dev_if == sk->sk_bound_dev_if && 718 inet_csk(sk2)->icsk_bind_hash == tb && 719 sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && 720 inet_rcv_saddr_equal(sk, sk2, false)) 721 return reuseport_add_sock(sk, sk2, 722 inet_rcv_saddr_any(sk)); 723 } 724 725 return reuseport_alloc(sk, inet_rcv_saddr_any(sk)); 726 } 727 728 int __inet_hash(struct sock *sk, struct sock *osk) 729 { 730 struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); 731 struct inet_listen_hashbucket *ilb2; 732 int err = 0; 733 734 if (sk->sk_state != TCP_LISTEN) { 735 local_bh_disable(); 736 inet_ehash_nolisten(sk, osk, NULL); 737 local_bh_enable(); 738 return 0; 739 } 740 WARN_ON(!sk_unhashed(sk)); 741 ilb2 = inet_lhash2_bucket_sk(hashinfo, sk); 742 743 spin_lock(&ilb2->lock); 744 if (sk->sk_reuseport) { 745 err = inet_reuseport_add_sock(sk, ilb2); 746 if (err) 747 goto unlock; 748 } 749 sock_set_flag(sk, SOCK_RCU_FREE); 750 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 751 sk->sk_family == AF_INET6) 752 __sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head); 753 else 754 __sk_nulls_add_node_rcu(sk, &ilb2->nulls_head); 755 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 756 unlock: 757 spin_unlock(&ilb2->lock); 758 759 return err; 760 } 761 EXPORT_SYMBOL(__inet_hash); 762 763 int inet_hash(struct sock *sk) 764 { 765 int err = 0; 766 767 if (sk->sk_state != TCP_CLOSE) 768 err = __inet_hash(sk, NULL); 769 770 return err; 771 } 772 EXPORT_SYMBOL_GPL(inet_hash); 773 774 void inet_unhash(struct sock *sk) 775 { 776 struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); 777 778 if (sk_unhashed(sk)) 779 return; 780 781 if (sk->sk_state == TCP_LISTEN) { 782 struct inet_listen_hashbucket *ilb2; 783 784 ilb2 = inet_lhash2_bucket_sk(hashinfo, sk); 785 /* Don't disable bottom halves while acquiring the lock to 786 * avoid circular locking dependency on PREEMPT_RT. 787 */ 788 spin_lock(&ilb2->lock); 789 if (sk_unhashed(sk)) { 790 spin_unlock(&ilb2->lock); 791 return; 792 } 793 794 if (rcu_access_pointer(sk->sk_reuseport_cb)) 795 reuseport_stop_listen_sock(sk); 796 797 __sk_nulls_del_node_init_rcu(sk); 798 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 799 spin_unlock(&ilb2->lock); 800 } else { 801 spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); 802 803 spin_lock_bh(lock); 804 if (sk_unhashed(sk)) { 805 spin_unlock_bh(lock); 806 return; 807 } 808 __sk_nulls_del_node_init_rcu(sk); 809 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 810 spin_unlock_bh(lock); 811 } 812 } 813 EXPORT_SYMBOL_GPL(inet_unhash); 814 815 static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb, 816 const struct net *net, unsigned short port, 817 int l3mdev, const struct sock *sk) 818 { 819 if (!net_eq(ib2_net(tb), net) || tb->port != port || 820 tb->l3mdev != l3mdev) 821 return false; 822 823 return inet_bind2_bucket_addr_match(tb, sk); 824 } 825 826 bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net, 827 unsigned short port, int l3mdev, const struct sock *sk) 828 { 829 if (!net_eq(ib2_net(tb), net) || tb->port != port || 830 tb->l3mdev != l3mdev) 831 return false; 832 833 #if IS_ENABLED(CONFIG_IPV6) 834 if (tb->addr_type == IPV6_ADDR_ANY) 835 return true; 836 837 if (tb->addr_type != IPV6_ADDR_MAPPED) 838 return false; 839 840 if (sk->sk_family == AF_INET6 && 841 !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) 842 return false; 843 #endif 844 return tb->rcv_saddr == 0; 845 } 846 847 /* The socket's bhash2 hashbucket spinlock must be held when this is called */ 848 struct inet_bind2_bucket * 849 inet_bind2_bucket_find(const struct inet_bind_hashbucket *head, const struct net *net, 850 unsigned short port, int l3mdev, const struct sock *sk) 851 { 852 struct inet_bind2_bucket *bhash2 = NULL; 853 854 inet_bind_bucket_for_each(bhash2, &head->chain) 855 if (inet_bind2_bucket_match(bhash2, net, port, l3mdev, sk)) 856 break; 857 858 return bhash2; 859 } 860 861 struct inet_bind_hashbucket * 862 inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port) 863 { 864 struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk); 865 u32 hash; 866 867 #if IS_ENABLED(CONFIG_IPV6) 868 if (sk->sk_family == AF_INET6) 869 hash = ipv6_portaddr_hash(net, &in6addr_any, port); 870 else 871 #endif 872 hash = ipv4_portaddr_hash(net, 0, port); 873 874 return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)]; 875 } 876 877 static void inet_update_saddr(struct sock *sk, void *saddr, int family) 878 { 879 if (family == AF_INET) { 880 inet_sk(sk)->inet_saddr = *(__be32 *)saddr; 881 sk_rcv_saddr_set(sk, inet_sk(sk)->inet_saddr); 882 } 883 #if IS_ENABLED(CONFIG_IPV6) 884 else { 885 sk->sk_v6_rcv_saddr = *(struct in6_addr *)saddr; 886 } 887 #endif 888 } 889 890 static int __inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family, bool reset) 891 { 892 struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk); 893 struct inet_bind_hashbucket *head, *head2; 894 struct inet_bind2_bucket *tb2, *new_tb2; 895 int l3mdev = inet_sk_bound_l3mdev(sk); 896 int port = inet_sk(sk)->inet_num; 897 struct net *net = sock_net(sk); 898 int bhash; 899 900 if (!inet_csk(sk)->icsk_bind2_hash) { 901 /* Not bind()ed before. */ 902 if (reset) 903 inet_reset_saddr(sk); 904 else 905 inet_update_saddr(sk, saddr, family); 906 907 return 0; 908 } 909 910 /* Allocate a bind2 bucket ahead of time to avoid permanently putting 911 * the bhash2 table in an inconsistent state if a new tb2 bucket 912 * allocation fails. 913 */ 914 new_tb2 = kmem_cache_alloc(hinfo->bind2_bucket_cachep, GFP_ATOMIC); 915 if (!new_tb2) { 916 if (reset) { 917 /* The (INADDR_ANY, port) bucket might have already 918 * been freed, then we cannot fixup icsk_bind2_hash, 919 * so we give up and unlink sk from bhash/bhash2 not 920 * to leave inconsistency in bhash2. 921 */ 922 inet_put_port(sk); 923 inet_reset_saddr(sk); 924 } 925 926 return -ENOMEM; 927 } 928 929 bhash = inet_bhashfn(net, port, hinfo->bhash_size); 930 head = &hinfo->bhash[bhash]; 931 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); 932 933 /* If we change saddr locklessly, another thread 934 * iterating over bhash might see corrupted address. 935 */ 936 spin_lock_bh(&head->lock); 937 938 spin_lock(&head2->lock); 939 __sk_del_bind_node(sk); 940 inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, inet_csk(sk)->icsk_bind2_hash); 941 spin_unlock(&head2->lock); 942 943 if (reset) 944 inet_reset_saddr(sk); 945 else 946 inet_update_saddr(sk, saddr, family); 947 948 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); 949 950 spin_lock(&head2->lock); 951 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); 952 if (!tb2) { 953 tb2 = new_tb2; 954 inet_bind2_bucket_init(tb2, net, head2, inet_csk(sk)->icsk_bind_hash, sk); 955 } 956 inet_csk(sk)->icsk_bind2_hash = tb2; 957 sk_add_bind_node(sk, &tb2->owners); 958 spin_unlock(&head2->lock); 959 960 spin_unlock_bh(&head->lock); 961 962 if (tb2 != new_tb2) 963 kmem_cache_free(hinfo->bind2_bucket_cachep, new_tb2); 964 965 return 0; 966 } 967 968 int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family) 969 { 970 return __inet_bhash2_update_saddr(sk, saddr, family, false); 971 } 972 EXPORT_SYMBOL_GPL(inet_bhash2_update_saddr); 973 974 void inet_bhash2_reset_saddr(struct sock *sk) 975 { 976 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 977 __inet_bhash2_update_saddr(sk, NULL, 0, true); 978 } 979 EXPORT_SYMBOL_GPL(inet_bhash2_reset_saddr); 980 981 /* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm 982 * Note that we use 32bit integers (vs RFC 'short integers') 983 * because 2^16 is not a multiple of num_ephemeral and this 984 * property might be used by clever attacker. 985 * 986 * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though 987 * attacks were since demonstrated, thus we use 65536 by default instead 988 * to really give more isolation and privacy, at the expense of 256kB 989 * of kernel memory. 990 */ 991 #define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER) 992 static u32 *table_perturb; 993 994 int __inet_hash_connect(struct inet_timewait_death_row *death_row, 995 struct sock *sk, u64 port_offset, 996 int (*check_established)(struct inet_timewait_death_row *, 997 struct sock *, __u16, struct inet_timewait_sock **)) 998 { 999 struct inet_hashinfo *hinfo = death_row->hashinfo; 1000 struct inet_bind_hashbucket *head, *head2; 1001 struct inet_timewait_sock *tw = NULL; 1002 int port = inet_sk(sk)->inet_num; 1003 struct net *net = sock_net(sk); 1004 struct inet_bind2_bucket *tb2; 1005 struct inet_bind_bucket *tb; 1006 bool tb_created = false; 1007 u32 remaining, offset; 1008 int ret, i, low, high; 1009 bool local_ports; 1010 int step, l3mdev; 1011 u32 index; 1012 1013 if (port) { 1014 local_bh_disable(); 1015 ret = check_established(death_row, sk, port, NULL); 1016 local_bh_enable(); 1017 return ret; 1018 } 1019 1020 l3mdev = inet_sk_bound_l3mdev(sk); 1021 1022 local_ports = inet_sk_get_local_port_range(sk, &low, &high); 1023 step = local_ports ? 1 : 2; 1024 1025 high++; /* [32768, 60999] -> [32768, 61000[ */ 1026 remaining = high - low; 1027 if (!local_ports && remaining > 1) 1028 remaining &= ~1U; 1029 1030 get_random_sleepable_once(table_perturb, 1031 INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb)); 1032 index = port_offset & (INET_TABLE_PERTURB_SIZE - 1); 1033 1034 offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32); 1035 offset %= remaining; 1036 1037 /* In first pass we try ports of @low parity. 1038 * inet_csk_get_port() does the opposite choice. 1039 */ 1040 if (!local_ports) 1041 offset &= ~1U; 1042 other_parity_scan: 1043 port = low + offset; 1044 for (i = 0; i < remaining; i += step, port += step) { 1045 if (unlikely(port >= high)) 1046 port -= remaining; 1047 if (inet_is_local_reserved_port(net, port)) 1048 continue; 1049 head = &hinfo->bhash[inet_bhashfn(net, port, 1050 hinfo->bhash_size)]; 1051 spin_lock_bh(&head->lock); 1052 1053 /* Does not bother with rcv_saddr checks, because 1054 * the established check is already unique enough. 1055 */ 1056 inet_bind_bucket_for_each(tb, &head->chain) { 1057 if (inet_bind_bucket_match(tb, net, port, l3mdev)) { 1058 if (tb->fastreuse >= 0 || 1059 tb->fastreuseport >= 0) 1060 goto next_port; 1061 WARN_ON(hlist_empty(&tb->bhash2)); 1062 if (!check_established(death_row, sk, 1063 port, &tw)) 1064 goto ok; 1065 goto next_port; 1066 } 1067 } 1068 1069 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, 1070 net, head, port, l3mdev); 1071 if (!tb) { 1072 spin_unlock_bh(&head->lock); 1073 return -ENOMEM; 1074 } 1075 tb_created = true; 1076 tb->fastreuse = -1; 1077 tb->fastreuseport = -1; 1078 goto ok; 1079 next_port: 1080 spin_unlock_bh(&head->lock); 1081 cond_resched(); 1082 } 1083 1084 if (!local_ports) { 1085 offset++; 1086 if ((offset & 1) && remaining > 1) 1087 goto other_parity_scan; 1088 } 1089 return -EADDRNOTAVAIL; 1090 1091 ok: 1092 /* Find the corresponding tb2 bucket since we need to 1093 * add the socket to the bhash2 table as well 1094 */ 1095 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); 1096 spin_lock(&head2->lock); 1097 1098 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); 1099 if (!tb2) { 1100 tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, net, 1101 head2, tb, sk); 1102 if (!tb2) 1103 goto error; 1104 } 1105 1106 /* Here we want to add a little bit of randomness to the next source 1107 * port that will be chosen. We use a max() with a random here so that 1108 * on low contention the randomness is maximal and on high contention 1109 * it may be inexistent. 1110 */ 1111 i = max_t(int, i, get_random_u32_below(8) * step); 1112 WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + step); 1113 1114 /* Head lock still held and bh's disabled */ 1115 inet_bind_hash(sk, tb, tb2, port); 1116 1117 if (sk_unhashed(sk)) { 1118 inet_sk(sk)->inet_sport = htons(port); 1119 inet_ehash_nolisten(sk, (struct sock *)tw, NULL); 1120 } 1121 if (tw) 1122 inet_twsk_bind_unhash(tw, hinfo); 1123 1124 spin_unlock(&head2->lock); 1125 spin_unlock(&head->lock); 1126 1127 if (tw) 1128 inet_twsk_deschedule_put(tw); 1129 local_bh_enable(); 1130 return 0; 1131 1132 error: 1133 if (sk_hashed(sk)) { 1134 spinlock_t *lock = inet_ehash_lockp(hinfo, sk->sk_hash); 1135 1136 sock_prot_inuse_add(net, sk->sk_prot, -1); 1137 1138 spin_lock(lock); 1139 __sk_nulls_del_node_init_rcu(sk); 1140 spin_unlock(lock); 1141 1142 sk->sk_hash = 0; 1143 inet_sk(sk)->inet_sport = 0; 1144 inet_sk(sk)->inet_num = 0; 1145 1146 if (tw) 1147 inet_twsk_bind_unhash(tw, hinfo); 1148 } 1149 1150 spin_unlock(&head2->lock); 1151 if (tb_created) 1152 inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb); 1153 spin_unlock(&head->lock); 1154 1155 if (tw) 1156 inet_twsk_deschedule_put(tw); 1157 1158 local_bh_enable(); 1159 1160 return -ENOMEM; 1161 } 1162 1163 /* 1164 * Bind a port for a connect operation and hash it. 1165 */ 1166 int inet_hash_connect(struct inet_timewait_death_row *death_row, 1167 struct sock *sk) 1168 { 1169 u64 port_offset = 0; 1170 1171 if (!inet_sk(sk)->inet_num) 1172 port_offset = inet_sk_port_offset(sk); 1173 return __inet_hash_connect(death_row, sk, port_offset, 1174 __inet_check_established); 1175 } 1176 EXPORT_SYMBOL_GPL(inet_hash_connect); 1177 1178 static void init_hashinfo_lhash2(struct inet_hashinfo *h) 1179 { 1180 int i; 1181 1182 for (i = 0; i <= h->lhash2_mask; i++) { 1183 spin_lock_init(&h->lhash2[i].lock); 1184 INIT_HLIST_NULLS_HEAD(&h->lhash2[i].nulls_head, 1185 i + LISTENING_NULLS_BASE); 1186 } 1187 } 1188 1189 void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, 1190 unsigned long numentries, int scale, 1191 unsigned long low_limit, 1192 unsigned long high_limit) 1193 { 1194 h->lhash2 = alloc_large_system_hash(name, 1195 sizeof(*h->lhash2), 1196 numentries, 1197 scale, 1198 0, 1199 NULL, 1200 &h->lhash2_mask, 1201 low_limit, 1202 high_limit); 1203 init_hashinfo_lhash2(h); 1204 1205 /* this one is used for source ports of outgoing connections */ 1206 table_perturb = alloc_large_system_hash("Table-perturb", 1207 sizeof(*table_perturb), 1208 INET_TABLE_PERTURB_SIZE, 1209 0, 0, NULL, NULL, 1210 INET_TABLE_PERTURB_SIZE, 1211 INET_TABLE_PERTURB_SIZE); 1212 } 1213 1214 int inet_hashinfo2_init_mod(struct inet_hashinfo *h) 1215 { 1216 h->lhash2 = kmalloc_array(INET_LHTABLE_SIZE, sizeof(*h->lhash2), GFP_KERNEL); 1217 if (!h->lhash2) 1218 return -ENOMEM; 1219 1220 h->lhash2_mask = INET_LHTABLE_SIZE - 1; 1221 /* INET_LHTABLE_SIZE must be a power of 2 */ 1222 BUG_ON(INET_LHTABLE_SIZE & h->lhash2_mask); 1223 1224 init_hashinfo_lhash2(h); 1225 return 0; 1226 } 1227 EXPORT_SYMBOL_GPL(inet_hashinfo2_init_mod); 1228 1229 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) 1230 { 1231 unsigned int locksz = sizeof(spinlock_t); 1232 unsigned int i, nblocks = 1; 1233 1234 if (locksz != 0) { 1235 /* allocate 2 cache lines or at least one spinlock per cpu */ 1236 nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U); 1237 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); 1238 1239 /* no more locks than number of hash buckets */ 1240 nblocks = min(nblocks, hashinfo->ehash_mask + 1); 1241 1242 hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL); 1243 if (!hashinfo->ehash_locks) 1244 return -ENOMEM; 1245 1246 for (i = 0; i < nblocks; i++) 1247 spin_lock_init(&hashinfo->ehash_locks[i]); 1248 } 1249 hashinfo->ehash_locks_mask = nblocks - 1; 1250 return 0; 1251 } 1252 EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc); 1253 1254 struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo, 1255 unsigned int ehash_entries) 1256 { 1257 struct inet_hashinfo *new_hashinfo; 1258 int i; 1259 1260 new_hashinfo = kmemdup(hashinfo, sizeof(*hashinfo), GFP_KERNEL); 1261 if (!new_hashinfo) 1262 goto err; 1263 1264 new_hashinfo->ehash = vmalloc_huge(ehash_entries * sizeof(struct inet_ehash_bucket), 1265 GFP_KERNEL_ACCOUNT); 1266 if (!new_hashinfo->ehash) 1267 goto free_hashinfo; 1268 1269 new_hashinfo->ehash_mask = ehash_entries - 1; 1270 1271 if (inet_ehash_locks_alloc(new_hashinfo)) 1272 goto free_ehash; 1273 1274 for (i = 0; i < ehash_entries; i++) 1275 INIT_HLIST_NULLS_HEAD(&new_hashinfo->ehash[i].chain, i); 1276 1277 new_hashinfo->pernet = true; 1278 1279 return new_hashinfo; 1280 1281 free_ehash: 1282 vfree(new_hashinfo->ehash); 1283 free_hashinfo: 1284 kfree(new_hashinfo); 1285 err: 1286 return NULL; 1287 } 1288 EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_alloc); 1289 1290 void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo) 1291 { 1292 if (!hashinfo->pernet) 1293 return; 1294 1295 inet_ehash_locks_free(hashinfo); 1296 vfree(hashinfo->ehash); 1297 kfree(hashinfo); 1298 } 1299 EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_free); 1300