1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Generic INET transport hashtables 8 * 9 * Authors: Lotsa people, from code originally in tcp 10 */ 11 12 #include <linux/module.h> 13 #include <linux/random.h> 14 #include <linux/sched.h> 15 #include <linux/slab.h> 16 #include <linux/wait.h> 17 #include <linux/vmalloc.h> 18 #include <linux/memblock.h> 19 20 #include <net/addrconf.h> 21 #include <net/inet_connection_sock.h> 22 #include <net/inet_hashtables.h> 23 #if IS_ENABLED(CONFIG_IPV6) 24 #include <net/inet6_hashtables.h> 25 #endif 26 #include <net/hotdata.h> 27 #include <net/ip.h> 28 #include <net/rps.h> 29 #include <net/secure_seq.h> 30 #include <net/sock_reuseport.h> 31 #include <net/tcp.h> 32 33 u32 inet_ehashfn(const struct net *net, const __be32 laddr, 34 const __u16 lport, const __be32 faddr, 35 const __be16 fport) 36 { 37 net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret)); 38 39 return lport + __inet_ehashfn(laddr, 0, faddr, fport, 40 inet_ehash_secret + net_hash_mix(net)); 41 } 42 EXPORT_SYMBOL_GPL(inet_ehashfn); 43 44 /* This function handles inet_sock, but also timewait and request sockets 45 * for IPv4/IPv6. 46 */ 47 static u32 sk_ehashfn(const struct sock *sk) 48 { 49 #if IS_ENABLED(CONFIG_IPV6) 50 if (sk->sk_family == AF_INET6 && 51 !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) 52 return inet6_ehashfn(sock_net(sk), 53 &sk->sk_v6_rcv_saddr, sk->sk_num, 54 &sk->sk_v6_daddr, sk->sk_dport); 55 #endif 56 return inet_ehashfn(sock_net(sk), 57 sk->sk_rcv_saddr, sk->sk_num, 58 sk->sk_daddr, sk->sk_dport); 59 } 60 61 /* 62 * Allocate and initialize a new local port bind bucket. 63 * The bindhash mutex for snum's hash chain must be held here. 64 */ 65 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, 66 struct net *net, 67 struct inet_bind_hashbucket *head, 68 const unsigned short snum, 69 int l3mdev) 70 { 71 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); 72 73 if (tb) { 74 write_pnet(&tb->ib_net, net); 75 tb->l3mdev = l3mdev; 76 tb->port = snum; 77 tb->fastreuse = 0; 78 tb->fastreuseport = 0; 79 INIT_HLIST_HEAD(&tb->bhash2); 80 hlist_add_head_rcu(&tb->node, &head->chain); 81 } 82 return tb; 83 } 84 85 /* 86 * Caller must hold hashbucket lock for this tb with local BH disabled 87 */ 88 void inet_bind_bucket_destroy(struct inet_bind_bucket *tb) 89 { 90 if (hlist_empty(&tb->bhash2)) { 91 hlist_del_rcu(&tb->node); 92 kfree_rcu(tb, rcu); 93 } 94 } 95 96 bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, const struct net *net, 97 unsigned short port, int l3mdev) 98 { 99 return net_eq(ib_net(tb), net) && tb->port == port && 100 tb->l3mdev == l3mdev; 101 } 102 103 static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb2, 104 struct net *net, 105 struct inet_bind_hashbucket *head, 106 struct inet_bind_bucket *tb, 107 const struct sock *sk) 108 { 109 write_pnet(&tb2->ib_net, net); 110 tb2->l3mdev = tb->l3mdev; 111 tb2->port = tb->port; 112 #if IS_ENABLED(CONFIG_IPV6) 113 BUILD_BUG_ON(USHRT_MAX < (IPV6_ADDR_ANY | IPV6_ADDR_MAPPED)); 114 if (sk->sk_family == AF_INET6) { 115 tb2->addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); 116 tb2->v6_rcv_saddr = sk->sk_v6_rcv_saddr; 117 } else { 118 tb2->addr_type = IPV6_ADDR_MAPPED; 119 ipv6_addr_set_v4mapped(sk->sk_rcv_saddr, &tb2->v6_rcv_saddr); 120 } 121 #else 122 tb2->rcv_saddr = sk->sk_rcv_saddr; 123 #endif 124 INIT_HLIST_HEAD(&tb2->owners); 125 hlist_add_head(&tb2->node, &head->chain); 126 hlist_add_head(&tb2->bhash_node, &tb->bhash2); 127 } 128 129 struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep, 130 struct net *net, 131 struct inet_bind_hashbucket *head, 132 struct inet_bind_bucket *tb, 133 const struct sock *sk) 134 { 135 struct inet_bind2_bucket *tb2 = kmem_cache_alloc(cachep, GFP_ATOMIC); 136 137 if (tb2) 138 inet_bind2_bucket_init(tb2, net, head, tb, sk); 139 140 return tb2; 141 } 142 143 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 144 void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb) 145 { 146 if (hlist_empty(&tb->owners)) { 147 __hlist_del(&tb->node); 148 __hlist_del(&tb->bhash_node); 149 kmem_cache_free(cachep, tb); 150 } 151 } 152 153 static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2, 154 const struct sock *sk) 155 { 156 #if IS_ENABLED(CONFIG_IPV6) 157 if (sk->sk_family == AF_INET6) 158 return ipv6_addr_equal(&tb2->v6_rcv_saddr, &sk->sk_v6_rcv_saddr); 159 160 if (tb2->addr_type != IPV6_ADDR_MAPPED) 161 return false; 162 #endif 163 return tb2->rcv_saddr == sk->sk_rcv_saddr; 164 } 165 166 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, 167 struct inet_bind2_bucket *tb2, unsigned short port) 168 { 169 inet_sk(sk)->inet_num = port; 170 inet_csk(sk)->icsk_bind_hash = tb; 171 inet_csk(sk)->icsk_bind2_hash = tb2; 172 sk_add_bind_node(sk, &tb2->owners); 173 } 174 175 /* 176 * Get rid of any references to a local port held by the given sock. 177 */ 178 static void __inet_put_port(struct sock *sk) 179 { 180 struct inet_hashinfo *hashinfo = tcp_get_hashinfo(sk); 181 struct inet_bind_hashbucket *head, *head2; 182 struct net *net = sock_net(sk); 183 struct inet_bind_bucket *tb; 184 int bhash; 185 186 bhash = inet_bhashfn(net, inet_sk(sk)->inet_num, hashinfo->bhash_size); 187 head = &hashinfo->bhash[bhash]; 188 head2 = inet_bhashfn_portaddr(hashinfo, sk, net, inet_sk(sk)->inet_num); 189 190 spin_lock(&head->lock); 191 tb = inet_csk(sk)->icsk_bind_hash; 192 inet_csk(sk)->icsk_bind_hash = NULL; 193 inet_sk(sk)->inet_num = 0; 194 195 spin_lock(&head2->lock); 196 if (inet_csk(sk)->icsk_bind2_hash) { 197 struct inet_bind2_bucket *tb2 = inet_csk(sk)->icsk_bind2_hash; 198 199 __sk_del_bind_node(sk); 200 inet_csk(sk)->icsk_bind2_hash = NULL; 201 inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2); 202 } 203 spin_unlock(&head2->lock); 204 205 inet_bind_bucket_destroy(tb); 206 spin_unlock(&head->lock); 207 } 208 209 void inet_put_port(struct sock *sk) 210 { 211 local_bh_disable(); 212 __inet_put_port(sk); 213 local_bh_enable(); 214 } 215 EXPORT_SYMBOL(inet_put_port); 216 217 int __inet_inherit_port(const struct sock *sk, struct sock *child) 218 { 219 struct inet_hashinfo *table = tcp_get_hashinfo(sk); 220 unsigned short port = inet_sk(child)->inet_num; 221 struct inet_bind_hashbucket *head, *head2; 222 bool created_inet_bind_bucket = false; 223 struct net *net = sock_net(sk); 224 bool update_fastreuse = false; 225 struct inet_bind2_bucket *tb2; 226 struct inet_bind_bucket *tb; 227 int bhash, l3mdev; 228 229 bhash = inet_bhashfn(net, port, table->bhash_size); 230 head = &table->bhash[bhash]; 231 head2 = inet_bhashfn_portaddr(table, child, net, port); 232 233 spin_lock(&head->lock); 234 spin_lock(&head2->lock); 235 tb = inet_csk(sk)->icsk_bind_hash; 236 tb2 = inet_csk(sk)->icsk_bind2_hash; 237 if (unlikely(!tb || !tb2)) { 238 spin_unlock(&head2->lock); 239 spin_unlock(&head->lock); 240 return -ENOENT; 241 } 242 if (tb->port != port) { 243 l3mdev = inet_sk_bound_l3mdev(sk); 244 245 /* NOTE: using tproxy and redirecting skbs to a proxy 246 * on a different listener port breaks the assumption 247 * that the listener socket's icsk_bind_hash is the same 248 * as that of the child socket. We have to look up or 249 * create a new bind bucket for the child here. */ 250 inet_bind_bucket_for_each(tb, &head->chain) { 251 if (inet_bind_bucket_match(tb, net, port, l3mdev)) 252 break; 253 } 254 if (!tb) { 255 tb = inet_bind_bucket_create(table->bind_bucket_cachep, 256 net, head, port, l3mdev); 257 if (!tb) { 258 spin_unlock(&head2->lock); 259 spin_unlock(&head->lock); 260 return -ENOMEM; 261 } 262 created_inet_bind_bucket = true; 263 } 264 update_fastreuse = true; 265 266 goto bhash2_find; 267 } else if (!inet_bind2_bucket_addr_match(tb2, child)) { 268 l3mdev = inet_sk_bound_l3mdev(sk); 269 270 bhash2_find: 271 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, child); 272 if (!tb2) { 273 tb2 = inet_bind2_bucket_create(table->bind2_bucket_cachep, 274 net, head2, tb, child); 275 if (!tb2) 276 goto error; 277 } 278 } 279 if (update_fastreuse) 280 inet_csk_update_fastreuse(tb, child); 281 inet_bind_hash(child, tb, tb2, port); 282 spin_unlock(&head2->lock); 283 spin_unlock(&head->lock); 284 285 return 0; 286 287 error: 288 if (created_inet_bind_bucket) 289 inet_bind_bucket_destroy(tb); 290 spin_unlock(&head2->lock); 291 spin_unlock(&head->lock); 292 return -ENOMEM; 293 } 294 EXPORT_SYMBOL_GPL(__inet_inherit_port); 295 296 static struct inet_listen_hashbucket * 297 inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk) 298 { 299 u32 hash; 300 301 #if IS_ENABLED(CONFIG_IPV6) 302 if (sk->sk_family == AF_INET6) 303 hash = ipv6_portaddr_hash(sock_net(sk), 304 &sk->sk_v6_rcv_saddr, 305 inet_sk(sk)->inet_num); 306 else 307 #endif 308 hash = ipv4_portaddr_hash(sock_net(sk), 309 inet_sk(sk)->inet_rcv_saddr, 310 inet_sk(sk)->inet_num); 311 return inet_lhash2_bucket(h, hash); 312 } 313 314 static inline int compute_score(struct sock *sk, const struct net *net, 315 const unsigned short hnum, const __be32 daddr, 316 const int dif, const int sdif) 317 { 318 int score = -1; 319 320 if (net_eq(sock_net(sk), net) && sk->sk_num == hnum && 321 !ipv6_only_sock(sk)) { 322 if (sk->sk_rcv_saddr != daddr) 323 return -1; 324 325 if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) 326 return -1; 327 score = sk->sk_bound_dev_if ? 2 : 1; 328 329 if (sk->sk_family == PF_INET) 330 score++; 331 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 332 score++; 333 } 334 return score; 335 } 336 337 /** 338 * inet_lookup_reuseport() - execute reuseport logic on AF_INET socket if necessary. 339 * @net: network namespace. 340 * @sk: AF_INET socket, must be in TCP_LISTEN state for TCP or TCP_CLOSE for UDP. 341 * @skb: context for a potential SK_REUSEPORT program. 342 * @doff: header offset. 343 * @saddr: source address. 344 * @sport: source port. 345 * @daddr: destination address. 346 * @hnum: destination port in host byte order. 347 * @ehashfn: hash function used to generate the fallback hash. 348 * 349 * Return: NULL if sk doesn't have SO_REUSEPORT set, otherwise a pointer to 350 * the selected sock or an error. 351 */ 352 struct sock *inet_lookup_reuseport(const struct net *net, struct sock *sk, 353 struct sk_buff *skb, int doff, 354 __be32 saddr, __be16 sport, 355 __be32 daddr, unsigned short hnum, 356 inet_ehashfn_t *ehashfn) 357 { 358 struct sock *reuse_sk = NULL; 359 u32 phash; 360 361 if (sk->sk_reuseport) { 362 phash = INDIRECT_CALL_2(ehashfn, udp_ehashfn, inet_ehashfn, 363 net, daddr, hnum, saddr, sport); 364 reuse_sk = reuseport_select_sock(sk, phash, skb, doff); 365 } 366 return reuse_sk; 367 } 368 EXPORT_SYMBOL_GPL(inet_lookup_reuseport); 369 370 /* 371 * Here are some nice properties to exploit here. The BSD API 372 * does not allow a listening sock to specify the remote port nor the 373 * remote address for the connection. So always assume those are both 374 * wildcarded during the search since they can never be otherwise. 375 */ 376 377 /* called with rcu_read_lock() : No refcount taken on the socket */ 378 static struct sock *inet_lhash2_lookup(const struct net *net, 379 struct inet_listen_hashbucket *ilb2, 380 struct sk_buff *skb, int doff, 381 const __be32 saddr, __be16 sport, 382 const __be32 daddr, const unsigned short hnum, 383 const int dif, const int sdif) 384 { 385 struct sock *sk, *result = NULL; 386 struct hlist_nulls_node *node; 387 int score, hiscore = 0; 388 389 sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) { 390 score = compute_score(sk, net, hnum, daddr, dif, sdif); 391 if (score > hiscore) { 392 result = inet_lookup_reuseport(net, sk, skb, doff, 393 saddr, sport, daddr, hnum, inet_ehashfn); 394 if (result) 395 return result; 396 397 result = sk; 398 hiscore = score; 399 } 400 } 401 402 return result; 403 } 404 405 struct sock *inet_lookup_run_sk_lookup(const struct net *net, 406 int protocol, 407 struct sk_buff *skb, int doff, 408 __be32 saddr, __be16 sport, 409 __be32 daddr, u16 hnum, const int dif, 410 inet_ehashfn_t *ehashfn) 411 { 412 struct sock *sk, *reuse_sk; 413 bool no_reuseport; 414 415 no_reuseport = bpf_sk_lookup_run_v4(net, protocol, saddr, sport, 416 daddr, hnum, dif, &sk); 417 if (no_reuseport || IS_ERR_OR_NULL(sk)) 418 return sk; 419 420 reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum, 421 ehashfn); 422 if (reuse_sk) 423 sk = reuse_sk; 424 return sk; 425 } 426 427 struct sock *__inet_lookup_listener(const struct net *net, 428 struct sk_buff *skb, int doff, 429 const __be32 saddr, __be16 sport, 430 const __be32 daddr, const unsigned short hnum, 431 const int dif, const int sdif) 432 { 433 struct inet_listen_hashbucket *ilb2; 434 struct inet_hashinfo *hashinfo; 435 struct sock *result = NULL; 436 unsigned int hash2; 437 438 /* Lookup redirect from BPF */ 439 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { 440 result = inet_lookup_run_sk_lookup(net, IPPROTO_TCP, skb, doff, 441 saddr, sport, daddr, hnum, dif, 442 inet_ehashfn); 443 if (result) 444 goto done; 445 } 446 447 hashinfo = net->ipv4.tcp_death_row.hashinfo; 448 hash2 = ipv4_portaddr_hash(net, daddr, hnum); 449 ilb2 = inet_lhash2_bucket(hashinfo, hash2); 450 451 result = inet_lhash2_lookup(net, ilb2, skb, doff, 452 saddr, sport, daddr, hnum, 453 dif, sdif); 454 if (result) 455 goto done; 456 457 /* Lookup lhash2 with INADDR_ANY */ 458 hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum); 459 ilb2 = inet_lhash2_bucket(hashinfo, hash2); 460 461 result = inet_lhash2_lookup(net, ilb2, skb, doff, 462 saddr, sport, htonl(INADDR_ANY), hnum, 463 dif, sdif); 464 done: 465 if (IS_ERR(result)) 466 return NULL; 467 return result; 468 } 469 EXPORT_SYMBOL_GPL(__inet_lookup_listener); 470 471 /* All sockets share common refcount, but have different destructors */ 472 void sock_gen_put(struct sock *sk) 473 { 474 if (!refcount_dec_and_test(&sk->sk_refcnt)) 475 return; 476 477 if (sk->sk_state == TCP_TIME_WAIT) 478 inet_twsk_free(inet_twsk(sk)); 479 else if (sk->sk_state == TCP_NEW_SYN_RECV) 480 reqsk_free(inet_reqsk(sk)); 481 else 482 sk_free(sk); 483 } 484 EXPORT_SYMBOL_GPL(sock_gen_put); 485 486 void sock_edemux(struct sk_buff *skb) 487 { 488 sock_gen_put(skb->sk); 489 } 490 EXPORT_SYMBOL(sock_edemux); 491 492 struct sock *__inet_lookup_established(const struct net *net, 493 const __be32 saddr, const __be16 sport, 494 const __be32 daddr, const u16 hnum, 495 const int dif, const int sdif) 496 { 497 const __portpair ports = INET_COMBINED_PORTS(sport, hnum); 498 INET_ADDR_COOKIE(acookie, saddr, daddr); 499 const struct hlist_nulls_node *node; 500 struct inet_ehash_bucket *head; 501 struct inet_hashinfo *hashinfo; 502 unsigned int hash, slot; 503 struct sock *sk; 504 505 hashinfo = net->ipv4.tcp_death_row.hashinfo; 506 hash = inet_ehashfn(net, daddr, hnum, saddr, sport); 507 slot = hash & hashinfo->ehash_mask; 508 head = &hashinfo->ehash[slot]; 509 510 begin: 511 sk_nulls_for_each_rcu(sk, node, &head->chain) { 512 if (sk->sk_hash != hash) 513 continue; 514 if (likely(inet_match(net, sk, acookie, ports, dif, sdif))) { 515 if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) 516 goto out; 517 if (unlikely(!inet_match(net, sk, acookie, 518 ports, dif, sdif))) { 519 sock_gen_put(sk); 520 goto begin; 521 } 522 goto found; 523 } 524 } 525 /* 526 * if the nulls value we got at the end of this lookup is 527 * not the expected one, we must restart lookup. 528 * We probably met an item that was moved to another chain. 529 */ 530 if (get_nulls_value(node) != slot) 531 goto begin; 532 out: 533 sk = NULL; 534 found: 535 return sk; 536 } 537 EXPORT_SYMBOL_GPL(__inet_lookup_established); 538 539 /* called with local bh disabled */ 540 static int __inet_check_established(struct inet_timewait_death_row *death_row, 541 struct sock *sk, __u16 lport, 542 struct inet_timewait_sock **twp, 543 bool rcu_lookup, 544 u32 hash) 545 { 546 struct inet_hashinfo *hinfo = death_row->hashinfo; 547 struct inet_sock *inet = inet_sk(sk); 548 __be32 daddr = inet->inet_rcv_saddr; 549 __be32 saddr = inet->inet_daddr; 550 int dif = sk->sk_bound_dev_if; 551 struct net *net = sock_net(sk); 552 int sdif = l3mdev_master_ifindex_by_index(net, dif); 553 INET_ADDR_COOKIE(acookie, saddr, daddr); 554 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); 555 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 556 struct inet_timewait_sock *tw = NULL; 557 const struct hlist_nulls_node *node; 558 struct sock *sk2; 559 spinlock_t *lock; 560 561 if (rcu_lookup) { 562 sk_nulls_for_each(sk2, node, &head->chain) { 563 if (sk2->sk_hash != hash || 564 !inet_match(net, sk2, acookie, ports, dif, sdif)) 565 continue; 566 if (sk2->sk_state == TCP_TIME_WAIT) 567 break; 568 return -EADDRNOTAVAIL; 569 } 570 return 0; 571 } 572 573 lock = inet_ehash_lockp(hinfo, hash); 574 spin_lock(lock); 575 576 sk_nulls_for_each(sk2, node, &head->chain) { 577 if (sk2->sk_hash != hash) 578 continue; 579 580 if (likely(inet_match(net, sk2, acookie, ports, dif, sdif))) { 581 if (sk2->sk_state == TCP_TIME_WAIT) { 582 tw = inet_twsk(sk2); 583 if (tcp_twsk_unique(sk, sk2, twp)) 584 break; 585 } 586 goto not_unique; 587 } 588 } 589 590 /* Must record num and sport now. Otherwise we will see 591 * in hash table socket with a funny identity. 592 */ 593 inet->inet_num = lport; 594 inet->inet_sport = htons(lport); 595 sk->sk_hash = hash; 596 WARN_ON(!sk_unhashed(sk)); 597 __sk_nulls_add_node_rcu(sk, &head->chain); 598 if (tw) { 599 sk_nulls_del_node_init_rcu((struct sock *)tw); 600 __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED); 601 } 602 spin_unlock(lock); 603 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 604 605 if (twp) { 606 *twp = tw; 607 } else if (tw) { 608 /* Silly. Should hash-dance instead... */ 609 inet_twsk_deschedule_put(tw); 610 } 611 return 0; 612 613 not_unique: 614 spin_unlock(lock); 615 return -EADDRNOTAVAIL; 616 } 617 618 static u64 inet_sk_port_offset(const struct sock *sk) 619 { 620 const struct inet_sock *inet = inet_sk(sk); 621 622 return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr, 623 inet->inet_daddr, 624 inet->inet_dport); 625 } 626 627 /* Searches for an exsiting socket in the ehash bucket list. 628 * Returns true if found, false otherwise. 629 */ 630 static bool inet_ehash_lookup_by_sk(struct sock *sk, 631 struct hlist_nulls_head *list) 632 { 633 const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num); 634 const int sdif = sk->sk_bound_dev_if; 635 const int dif = sk->sk_bound_dev_if; 636 const struct hlist_nulls_node *node; 637 struct net *net = sock_net(sk); 638 struct sock *esk; 639 640 INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr); 641 642 sk_nulls_for_each_rcu(esk, node, list) { 643 if (esk->sk_hash != sk->sk_hash) 644 continue; 645 if (sk->sk_family == AF_INET) { 646 if (unlikely(inet_match(net, esk, acookie, 647 ports, dif, sdif))) { 648 return true; 649 } 650 } 651 #if IS_ENABLED(CONFIG_IPV6) 652 else if (sk->sk_family == AF_INET6) { 653 if (unlikely(inet6_match(net, esk, 654 &sk->sk_v6_daddr, 655 &sk->sk_v6_rcv_saddr, 656 ports, dif, sdif))) { 657 return true; 658 } 659 } 660 #endif 661 } 662 return false; 663 } 664 665 /* Insert a socket into ehash, and eventually remove another one 666 * (The another one can be a SYN_RECV or TIMEWAIT) 667 * If an existing socket already exists, socket sk is not inserted, 668 * and sets found_dup_sk parameter to true. 669 */ 670 bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk) 671 { 672 struct inet_hashinfo *hashinfo = tcp_get_hashinfo(sk); 673 struct inet_ehash_bucket *head; 674 struct hlist_nulls_head *list; 675 spinlock_t *lock; 676 bool ret = true; 677 678 WARN_ON_ONCE(!sk_unhashed(sk)); 679 680 sk->sk_hash = sk_ehashfn(sk); 681 head = inet_ehash_bucket(hashinfo, sk->sk_hash); 682 list = &head->chain; 683 lock = inet_ehash_lockp(hashinfo, sk->sk_hash); 684 685 spin_lock(lock); 686 if (osk) { 687 WARN_ON_ONCE(sk->sk_hash != osk->sk_hash); 688 ret = sk_nulls_del_node_init_rcu(osk); 689 } else if (found_dup_sk) { 690 *found_dup_sk = inet_ehash_lookup_by_sk(sk, list); 691 if (*found_dup_sk) 692 ret = false; 693 } 694 695 if (ret) 696 __sk_nulls_add_node_rcu(sk, list); 697 698 spin_unlock(lock); 699 700 return ret; 701 } 702 703 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk) 704 { 705 bool ok = inet_ehash_insert(sk, osk, found_dup_sk); 706 707 if (ok) { 708 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 709 } else { 710 tcp_orphan_count_inc(); 711 inet_sk_set_state(sk, TCP_CLOSE); 712 sock_set_flag(sk, SOCK_DEAD); 713 inet_csk_destroy_sock(sk); 714 } 715 return ok; 716 } 717 EXPORT_IPV6_MOD(inet_ehash_nolisten); 718 719 static int inet_reuseport_add_sock(struct sock *sk, 720 struct inet_listen_hashbucket *ilb) 721 { 722 struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; 723 const struct hlist_nulls_node *node; 724 kuid_t uid = sk_uid(sk); 725 struct sock *sk2; 726 727 sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) { 728 if (sk2 != sk && 729 sk2->sk_family == sk->sk_family && 730 ipv6_only_sock(sk2) == ipv6_only_sock(sk) && 731 sk2->sk_bound_dev_if == sk->sk_bound_dev_if && 732 inet_csk(sk2)->icsk_bind_hash == tb && 733 sk2->sk_reuseport && uid_eq(uid, sk_uid(sk2)) && 734 inet_rcv_saddr_equal(sk, sk2, false)) 735 return reuseport_add_sock(sk, sk2, 736 inet_rcv_saddr_any(sk)); 737 } 738 739 return reuseport_alloc(sk, inet_rcv_saddr_any(sk)); 740 } 741 742 int inet_hash(struct sock *sk) 743 { 744 struct inet_hashinfo *hashinfo = tcp_get_hashinfo(sk); 745 struct inet_listen_hashbucket *ilb2; 746 int err = 0; 747 748 if (sk->sk_state == TCP_CLOSE) 749 return 0; 750 751 if (sk->sk_state != TCP_LISTEN) { 752 local_bh_disable(); 753 inet_ehash_nolisten(sk, NULL, NULL); 754 local_bh_enable(); 755 return 0; 756 } 757 WARN_ON(!sk_unhashed(sk)); 758 ilb2 = inet_lhash2_bucket_sk(hashinfo, sk); 759 760 spin_lock(&ilb2->lock); 761 if (sk->sk_reuseport) { 762 err = inet_reuseport_add_sock(sk, ilb2); 763 if (err) 764 goto unlock; 765 } 766 sock_set_flag(sk, SOCK_RCU_FREE); 767 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 768 sk->sk_family == AF_INET6) 769 __sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head); 770 else 771 __sk_nulls_add_node_rcu(sk, &ilb2->nulls_head); 772 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 773 unlock: 774 spin_unlock(&ilb2->lock); 775 776 return err; 777 } 778 EXPORT_IPV6_MOD(inet_hash); 779 780 void inet_unhash(struct sock *sk) 781 { 782 struct inet_hashinfo *hashinfo = tcp_get_hashinfo(sk); 783 784 if (sk_unhashed(sk)) 785 return; 786 787 sock_rps_delete_flow(sk); 788 if (sk->sk_state == TCP_LISTEN) { 789 struct inet_listen_hashbucket *ilb2; 790 791 ilb2 = inet_lhash2_bucket_sk(hashinfo, sk); 792 /* Don't disable bottom halves while acquiring the lock to 793 * avoid circular locking dependency on PREEMPT_RT. 794 */ 795 spin_lock(&ilb2->lock); 796 if (rcu_access_pointer(sk->sk_reuseport_cb)) 797 reuseport_stop_listen_sock(sk); 798 799 __sk_nulls_del_node_init_rcu(sk); 800 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 801 spin_unlock(&ilb2->lock); 802 } else { 803 spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); 804 805 spin_lock_bh(lock); 806 __sk_nulls_del_node_init_rcu(sk); 807 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 808 spin_unlock_bh(lock); 809 } 810 } 811 EXPORT_IPV6_MOD(inet_unhash); 812 813 static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb, 814 const struct net *net, unsigned short port, 815 int l3mdev, const struct sock *sk) 816 { 817 if (!net_eq(ib2_net(tb), net) || tb->port != port || 818 tb->l3mdev != l3mdev) 819 return false; 820 821 return inet_bind2_bucket_addr_match(tb, sk); 822 } 823 824 bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net, 825 unsigned short port, int l3mdev, const struct sock *sk) 826 { 827 if (!net_eq(ib2_net(tb), net) || tb->port != port || 828 tb->l3mdev != l3mdev) 829 return false; 830 831 #if IS_ENABLED(CONFIG_IPV6) 832 if (tb->addr_type == IPV6_ADDR_ANY) 833 return true; 834 835 if (tb->addr_type != IPV6_ADDR_MAPPED) 836 return false; 837 838 if (sk->sk_family == AF_INET6 && 839 !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) 840 return false; 841 #endif 842 return tb->rcv_saddr == 0; 843 } 844 845 /* The socket's bhash2 hashbucket spinlock must be held when this is called */ 846 struct inet_bind2_bucket * 847 inet_bind2_bucket_find(const struct inet_bind_hashbucket *head, const struct net *net, 848 unsigned short port, int l3mdev, const struct sock *sk) 849 { 850 struct inet_bind2_bucket *bhash2 = NULL; 851 852 inet_bind_bucket_for_each(bhash2, &head->chain) 853 if (inet_bind2_bucket_match(bhash2, net, port, l3mdev, sk)) 854 break; 855 856 return bhash2; 857 } 858 859 struct inet_bind_hashbucket * 860 inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port) 861 { 862 struct inet_hashinfo *hinfo = tcp_get_hashinfo(sk); 863 u32 hash; 864 865 #if IS_ENABLED(CONFIG_IPV6) 866 if (sk->sk_family == AF_INET6) 867 hash = ipv6_portaddr_hash(net, &in6addr_any, port); 868 else 869 #endif 870 hash = ipv4_portaddr_hash(net, 0, port); 871 872 return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)]; 873 } 874 875 static void inet_update_saddr(struct sock *sk, void *saddr, int family) 876 { 877 if (family == AF_INET) { 878 inet_sk(sk)->inet_saddr = *(__be32 *)saddr; 879 sk_rcv_saddr_set(sk, inet_sk(sk)->inet_saddr); 880 } 881 #if IS_ENABLED(CONFIG_IPV6) 882 else { 883 sk->sk_v6_rcv_saddr = *(struct in6_addr *)saddr; 884 } 885 #endif 886 } 887 888 static int __inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family, bool reset) 889 { 890 struct inet_hashinfo *hinfo = tcp_get_hashinfo(sk); 891 struct inet_bind_hashbucket *head, *head2; 892 struct inet_bind2_bucket *tb2, *new_tb2; 893 int l3mdev = inet_sk_bound_l3mdev(sk); 894 int port = inet_sk(sk)->inet_num; 895 struct net *net = sock_net(sk); 896 int bhash; 897 898 if (!inet_csk(sk)->icsk_bind2_hash) { 899 /* Not bind()ed before. */ 900 if (reset) 901 inet_reset_saddr(sk); 902 else 903 inet_update_saddr(sk, saddr, family); 904 905 return 0; 906 } 907 908 /* Allocate a bind2 bucket ahead of time to avoid permanently putting 909 * the bhash2 table in an inconsistent state if a new tb2 bucket 910 * allocation fails. 911 */ 912 new_tb2 = kmem_cache_alloc(hinfo->bind2_bucket_cachep, GFP_ATOMIC); 913 if (!new_tb2) { 914 if (reset) { 915 /* The (INADDR_ANY, port) bucket might have already 916 * been freed, then we cannot fixup icsk_bind2_hash, 917 * so we give up and unlink sk from bhash/bhash2 not 918 * to leave inconsistency in bhash2. 919 */ 920 inet_put_port(sk); 921 inet_reset_saddr(sk); 922 } 923 924 return -ENOMEM; 925 } 926 927 bhash = inet_bhashfn(net, port, hinfo->bhash_size); 928 head = &hinfo->bhash[bhash]; 929 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); 930 931 /* If we change saddr locklessly, another thread 932 * iterating over bhash might see corrupted address. 933 */ 934 spin_lock_bh(&head->lock); 935 936 spin_lock(&head2->lock); 937 __sk_del_bind_node(sk); 938 inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, inet_csk(sk)->icsk_bind2_hash); 939 spin_unlock(&head2->lock); 940 941 if (reset) 942 inet_reset_saddr(sk); 943 else 944 inet_update_saddr(sk, saddr, family); 945 946 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); 947 948 spin_lock(&head2->lock); 949 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); 950 if (!tb2) { 951 tb2 = new_tb2; 952 inet_bind2_bucket_init(tb2, net, head2, inet_csk(sk)->icsk_bind_hash, sk); 953 } 954 inet_csk(sk)->icsk_bind2_hash = tb2; 955 sk_add_bind_node(sk, &tb2->owners); 956 spin_unlock(&head2->lock); 957 958 spin_unlock_bh(&head->lock); 959 960 if (tb2 != new_tb2) 961 kmem_cache_free(hinfo->bind2_bucket_cachep, new_tb2); 962 963 return 0; 964 } 965 966 int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family) 967 { 968 return __inet_bhash2_update_saddr(sk, saddr, family, false); 969 } 970 EXPORT_IPV6_MOD(inet_bhash2_update_saddr); 971 972 void inet_bhash2_reset_saddr(struct sock *sk) 973 { 974 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 975 __inet_bhash2_update_saddr(sk, NULL, 0, true); 976 } 977 EXPORT_IPV6_MOD(inet_bhash2_reset_saddr); 978 979 /* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm 980 * Note that we use 32bit integers (vs RFC 'short integers') 981 * because 2^16 is not a multiple of num_ephemeral and this 982 * property might be used by clever attacker. 983 * 984 * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though 985 * attacks were since demonstrated, thus we use 65536 by default instead 986 * to really give more isolation and privacy, at the expense of 256kB 987 * of kernel memory. 988 */ 989 #define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER) 990 static u32 *table_perturb; 991 992 int __inet_hash_connect(struct inet_timewait_death_row *death_row, 993 struct sock *sk, u64 port_offset, 994 u32 hash_port0, 995 int (*check_established)(struct inet_timewait_death_row *, 996 struct sock *, __u16, struct inet_timewait_sock **, 997 bool rcu_lookup, u32 hash)) 998 { 999 struct inet_hashinfo *hinfo = death_row->hashinfo; 1000 struct inet_bind_hashbucket *head, *head2; 1001 struct inet_timewait_sock *tw = NULL; 1002 int port = inet_sk(sk)->inet_num; 1003 struct net *net = sock_net(sk); 1004 struct inet_bind2_bucket *tb2; 1005 struct inet_bind_bucket *tb; 1006 bool tb_created = false; 1007 u32 remaining, offset; 1008 int ret, i, low, high; 1009 bool local_ports; 1010 int step, l3mdev; 1011 u32 index; 1012 1013 if (port) { 1014 local_bh_disable(); 1015 ret = check_established(death_row, sk, port, NULL, false, 1016 hash_port0 + port); 1017 local_bh_enable(); 1018 return ret; 1019 } 1020 1021 l3mdev = inet_sk_bound_l3mdev(sk); 1022 1023 local_ports = inet_sk_get_local_port_range(sk, &low, &high); 1024 step = local_ports ? 1 : 2; 1025 1026 high++; /* [32768, 60999] -> [32768, 61000[ */ 1027 remaining = high - low; 1028 if (!local_ports && remaining > 1) 1029 remaining &= ~1U; 1030 1031 get_random_sleepable_once(table_perturb, 1032 INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb)); 1033 index = port_offset & (INET_TABLE_PERTURB_SIZE - 1); 1034 1035 offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32); 1036 offset %= remaining; 1037 1038 /* In first pass we try ports of @low parity. 1039 * inet_csk_get_port() does the opposite choice. 1040 */ 1041 if (!local_ports) 1042 offset &= ~1U; 1043 other_parity_scan: 1044 port = low + offset; 1045 for (i = 0; i < remaining; i += step, port += step) { 1046 if (unlikely(port >= high)) 1047 port -= remaining; 1048 if (inet_is_local_reserved_port(net, port)) 1049 continue; 1050 head = &hinfo->bhash[inet_bhashfn(net, port, 1051 hinfo->bhash_size)]; 1052 rcu_read_lock(); 1053 hlist_for_each_entry_rcu(tb, &head->chain, node) { 1054 if (!inet_bind_bucket_match(tb, net, port, l3mdev)) 1055 continue; 1056 if (tb->fastreuse >= 0 || tb->fastreuseport >= 0) { 1057 rcu_read_unlock(); 1058 goto next_port; 1059 } 1060 if (!check_established(death_row, sk, port, &tw, true, 1061 hash_port0 + port)) 1062 break; 1063 rcu_read_unlock(); 1064 goto next_port; 1065 } 1066 rcu_read_unlock(); 1067 1068 spin_lock_bh(&head->lock); 1069 1070 /* Does not bother with rcv_saddr checks, because 1071 * the established check is already unique enough. 1072 */ 1073 inet_bind_bucket_for_each(tb, &head->chain) { 1074 if (inet_bind_bucket_match(tb, net, port, l3mdev)) { 1075 if (tb->fastreuse >= 0 || 1076 tb->fastreuseport >= 0) 1077 goto next_port_unlock; 1078 WARN_ON(hlist_empty(&tb->bhash2)); 1079 if (!check_established(death_row, sk, 1080 port, &tw, false, 1081 hash_port0 + port)) 1082 goto ok; 1083 goto next_port_unlock; 1084 } 1085 } 1086 1087 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, 1088 net, head, port, l3mdev); 1089 if (!tb) { 1090 spin_unlock_bh(&head->lock); 1091 return -ENOMEM; 1092 } 1093 tb_created = true; 1094 tb->fastreuse = -1; 1095 tb->fastreuseport = -1; 1096 goto ok; 1097 next_port_unlock: 1098 spin_unlock_bh(&head->lock); 1099 next_port: 1100 cond_resched(); 1101 } 1102 1103 if (!local_ports) { 1104 offset++; 1105 if ((offset & 1) && remaining > 1) 1106 goto other_parity_scan; 1107 } 1108 return -EADDRNOTAVAIL; 1109 1110 ok: 1111 /* Find the corresponding tb2 bucket since we need to 1112 * add the socket to the bhash2 table as well 1113 */ 1114 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); 1115 spin_lock(&head2->lock); 1116 1117 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); 1118 if (!tb2) { 1119 tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, net, 1120 head2, tb, sk); 1121 if (!tb2) 1122 goto error; 1123 } 1124 1125 /* Here we want to add a little bit of randomness to the next source 1126 * port that will be chosen. We use a max() with a random here so that 1127 * on low contention the randomness is maximal and on high contention 1128 * it may be inexistent. 1129 */ 1130 i = max_t(int, i, get_random_u32_below(8) * step); 1131 WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + step); 1132 1133 /* Head lock still held and bh's disabled */ 1134 inet_bind_hash(sk, tb, tb2, port); 1135 1136 if (sk_unhashed(sk)) { 1137 inet_sk(sk)->inet_sport = htons(port); 1138 inet_ehash_nolisten(sk, (struct sock *)tw, NULL); 1139 } 1140 if (tw) 1141 inet_twsk_bind_unhash(tw, hinfo); 1142 1143 spin_unlock(&head2->lock); 1144 spin_unlock(&head->lock); 1145 1146 if (tw) 1147 inet_twsk_deschedule_put(tw); 1148 local_bh_enable(); 1149 return 0; 1150 1151 error: 1152 if (sk_hashed(sk)) { 1153 spinlock_t *lock = inet_ehash_lockp(hinfo, sk->sk_hash); 1154 1155 sock_prot_inuse_add(net, sk->sk_prot, -1); 1156 1157 spin_lock(lock); 1158 __sk_nulls_del_node_init_rcu(sk); 1159 spin_unlock(lock); 1160 1161 sk->sk_hash = 0; 1162 inet_sk(sk)->inet_sport = 0; 1163 inet_sk(sk)->inet_num = 0; 1164 1165 if (tw) 1166 inet_twsk_bind_unhash(tw, hinfo); 1167 } 1168 1169 spin_unlock(&head2->lock); 1170 if (tb_created) 1171 inet_bind_bucket_destroy(tb); 1172 spin_unlock(&head->lock); 1173 1174 if (tw) 1175 inet_twsk_deschedule_put(tw); 1176 1177 local_bh_enable(); 1178 1179 return -ENOMEM; 1180 } 1181 1182 /* 1183 * Bind a port for a connect operation and hash it. 1184 */ 1185 int inet_hash_connect(struct inet_timewait_death_row *death_row, 1186 struct sock *sk) 1187 { 1188 const struct inet_sock *inet = inet_sk(sk); 1189 const struct net *net = sock_net(sk); 1190 u64 port_offset = 0; 1191 u32 hash_port0; 1192 1193 if (!inet_sk(sk)->inet_num) 1194 port_offset = inet_sk_port_offset(sk); 1195 1196 hash_port0 = inet_ehashfn(net, inet->inet_rcv_saddr, 0, 1197 inet->inet_daddr, inet->inet_dport); 1198 1199 return __inet_hash_connect(death_row, sk, port_offset, hash_port0, 1200 __inet_check_established); 1201 } 1202 1203 static void init_hashinfo_lhash2(struct inet_hashinfo *h) 1204 { 1205 int i; 1206 1207 for (i = 0; i <= h->lhash2_mask; i++) { 1208 spin_lock_init(&h->lhash2[i].lock); 1209 INIT_HLIST_NULLS_HEAD(&h->lhash2[i].nulls_head, 1210 i + LISTENING_NULLS_BASE); 1211 } 1212 } 1213 1214 void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, 1215 unsigned long numentries, int scale, 1216 unsigned long low_limit, 1217 unsigned long high_limit) 1218 { 1219 h->lhash2 = alloc_large_system_hash(name, 1220 sizeof(*h->lhash2), 1221 numentries, 1222 scale, 1223 0, 1224 NULL, 1225 &h->lhash2_mask, 1226 low_limit, 1227 high_limit); 1228 init_hashinfo_lhash2(h); 1229 1230 /* this one is used for source ports of outgoing connections */ 1231 table_perturb = alloc_large_system_hash("Table-perturb", 1232 sizeof(*table_perturb), 1233 INET_TABLE_PERTURB_SIZE, 1234 0, 0, NULL, NULL, 1235 INET_TABLE_PERTURB_SIZE, 1236 INET_TABLE_PERTURB_SIZE); 1237 } 1238 1239 int inet_hashinfo2_init_mod(struct inet_hashinfo *h) 1240 { 1241 h->lhash2 = kmalloc_array(INET_LHTABLE_SIZE, sizeof(*h->lhash2), GFP_KERNEL); 1242 if (!h->lhash2) 1243 return -ENOMEM; 1244 1245 h->lhash2_mask = INET_LHTABLE_SIZE - 1; 1246 /* INET_LHTABLE_SIZE must be a power of 2 */ 1247 BUG_ON(INET_LHTABLE_SIZE & h->lhash2_mask); 1248 1249 init_hashinfo_lhash2(h); 1250 return 0; 1251 } 1252 1253 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) 1254 { 1255 unsigned int locksz = sizeof(spinlock_t); 1256 unsigned int i, nblocks = 1; 1257 spinlock_t *ptr = NULL; 1258 1259 if (locksz == 0) 1260 goto set_mask; 1261 1262 /* Allocate 2 cache lines or at least one spinlock per cpu. */ 1263 nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U) * num_possible_cpus(); 1264 1265 /* At least one page per NUMA node. */ 1266 nblocks = max(nblocks, num_online_nodes() * PAGE_SIZE / locksz); 1267 1268 nblocks = roundup_pow_of_two(nblocks); 1269 1270 /* No more locks than number of hash buckets. */ 1271 nblocks = min(nblocks, hashinfo->ehash_mask + 1); 1272 1273 if (num_online_nodes() > 1) { 1274 /* Use vmalloc() to allow NUMA policy to spread pages 1275 * on all available nodes if desired. 1276 */ 1277 ptr = vmalloc_array(nblocks, locksz); 1278 } 1279 if (!ptr) { 1280 ptr = kvmalloc_array(nblocks, locksz, GFP_KERNEL); 1281 if (!ptr) 1282 return -ENOMEM; 1283 } 1284 for (i = 0; i < nblocks; i++) 1285 spin_lock_init(&ptr[i]); 1286 hashinfo->ehash_locks = ptr; 1287 set_mask: 1288 hashinfo->ehash_locks_mask = nblocks - 1; 1289 return 0; 1290 } 1291 1292 struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo, 1293 unsigned int ehash_entries) 1294 { 1295 struct inet_hashinfo *new_hashinfo; 1296 int i; 1297 1298 new_hashinfo = kmemdup(hashinfo, sizeof(*hashinfo), GFP_KERNEL); 1299 if (!new_hashinfo) 1300 goto err; 1301 1302 new_hashinfo->ehash = vmalloc_huge(ehash_entries * sizeof(struct inet_ehash_bucket), 1303 GFP_KERNEL_ACCOUNT); 1304 if (!new_hashinfo->ehash) 1305 goto free_hashinfo; 1306 1307 new_hashinfo->ehash_mask = ehash_entries - 1; 1308 1309 if (inet_ehash_locks_alloc(new_hashinfo)) 1310 goto free_ehash; 1311 1312 for (i = 0; i < ehash_entries; i++) 1313 INIT_HLIST_NULLS_HEAD(&new_hashinfo->ehash[i].chain, i); 1314 1315 new_hashinfo->pernet = true; 1316 1317 return new_hashinfo; 1318 1319 free_ehash: 1320 vfree(new_hashinfo->ehash); 1321 free_hashinfo: 1322 kfree(new_hashinfo); 1323 err: 1324 return NULL; 1325 } 1326 1327 void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo) 1328 { 1329 if (!hashinfo->pernet) 1330 return; 1331 1332 inet_ehash_locks_free(hashinfo); 1333 vfree(hashinfo->ehash); 1334 kfree(hashinfo); 1335 } 1336