1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IPVS An implementation of the IP virtual server support for the 4 * LINUX operating system. IPVS is now implemented as a module 5 * over the Netfilter framework. IPVS can be used to build a 6 * high-performance and highly available server based on a 7 * cluster of servers. 8 * 9 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 10 * Peter Kese <peter.kese@ijs.si> 11 * Julian Anastasov <ja@ssi.bg> 12 * 13 * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese, 14 * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms 15 * and others. Many code here is taken from IP MASQ code of kernel 2.2. 16 * 17 * Changes: 18 */ 19 20 #define KMSG_COMPONENT "IPVS" 21 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 22 23 #include <linux/interrupt.h> 24 #include <linux/in.h> 25 #include <linux/inet.h> 26 #include <linux/net.h> 27 #include <linux/kernel.h> 28 #include <linux/module.h> 29 #include <linux/proc_fs.h> /* for proc_net_* */ 30 #include <linux/slab.h> 31 #include <linux/seq_file.h> 32 #include <linux/jhash.h> 33 #include <linux/random.h> 34 #include <linux/rcupdate_wait.h> 35 36 #include <net/net_namespace.h> 37 #include <net/ip_vs.h> 38 39 40 #ifndef CONFIG_IP_VS_TAB_BITS 41 #define CONFIG_IP_VS_TAB_BITS 12 42 #endif 43 44 /* 45 * Connection hash size. Default is what was selected at compile time. 46 */ 47 static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS; 48 module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444); 49 MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size"); 50 51 /* size and mask values */ 52 int ip_vs_conn_tab_size __read_mostly; 53 static int ip_vs_conn_tab_mask __read_mostly; 54 55 /* 56 * Connection hash table: for input and output packets lookups of IPVS 57 */ 58 static struct hlist_head *ip_vs_conn_tab __read_mostly; 59 60 /* SLAB cache for IPVS connections */ 61 static struct kmem_cache *ip_vs_conn_cachep __read_mostly; 62 63 /* counter for no client port connections */ 64 static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0); 65 66 /* random value for IPVS connection hash */ 67 static unsigned int ip_vs_conn_rnd __read_mostly; 68 69 /* 70 * Fine locking granularity for big connection hash table 71 */ 72 #define CT_LOCKARRAY_BITS 5 73 #define CT_LOCKARRAY_SIZE (1<<CT_LOCKARRAY_BITS) 74 #define CT_LOCKARRAY_MASK (CT_LOCKARRAY_SIZE-1) 75 76 /* We need an addrstrlen that works with or without v6 */ 77 #ifdef CONFIG_IP_VS_IPV6 78 #define IP_VS_ADDRSTRLEN INET6_ADDRSTRLEN 79 #else 80 #define IP_VS_ADDRSTRLEN (8+1) 81 #endif 82 83 struct ip_vs_aligned_lock 84 { 85 spinlock_t l; 86 } __attribute__((__aligned__(SMP_CACHE_BYTES))); 87 88 /* lock array for conn table */ 89 static struct ip_vs_aligned_lock 90 __ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned; 91 92 static inline void ct_write_lock_bh(unsigned int key) 93 { 94 spin_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 95 } 96 97 static inline void ct_write_unlock_bh(unsigned int key) 98 { 99 spin_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 100 } 101 102 static void ip_vs_conn_expire(struct timer_list *t); 103 104 /* 105 * Returns hash value for IPVS connection entry 106 */ 107 static unsigned int ip_vs_conn_hashkey(struct netns_ipvs *ipvs, int af, unsigned int proto, 108 const union nf_inet_addr *addr, 109 __be16 port) 110 { 111 #ifdef CONFIG_IP_VS_IPV6 112 if (af == AF_INET6) 113 return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd), 114 (__force u32)port, proto, ip_vs_conn_rnd) ^ 115 ((size_t)ipvs>>8)) & ip_vs_conn_tab_mask; 116 #endif 117 return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto, 118 ip_vs_conn_rnd) ^ 119 ((size_t)ipvs>>8)) & ip_vs_conn_tab_mask; 120 } 121 122 static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p, 123 bool inverse) 124 { 125 const union nf_inet_addr *addr; 126 __be16 port; 127 128 if (p->pe_data && p->pe->hashkey_raw) 129 return p->pe->hashkey_raw(p, ip_vs_conn_rnd, inverse) & 130 ip_vs_conn_tab_mask; 131 132 if (likely(!inverse)) { 133 addr = p->caddr; 134 port = p->cport; 135 } else { 136 addr = p->vaddr; 137 port = p->vport; 138 } 139 140 return ip_vs_conn_hashkey(p->ipvs, p->af, p->protocol, addr, port); 141 } 142 143 static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp) 144 { 145 struct ip_vs_conn_param p; 146 147 ip_vs_conn_fill_param(cp->ipvs, cp->af, cp->protocol, 148 &cp->caddr, cp->cport, NULL, 0, &p); 149 150 if (cp->pe) { 151 p.pe = cp->pe; 152 p.pe_data = cp->pe_data; 153 p.pe_data_len = cp->pe_data_len; 154 } 155 156 return ip_vs_conn_hashkey_param(&p, false); 157 } 158 159 /* 160 * Hashes ip_vs_conn in ip_vs_conn_tab by netns,proto,addr,port. 161 * returns bool success. 162 */ 163 static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) 164 { 165 unsigned int hash; 166 int ret; 167 168 if (cp->flags & IP_VS_CONN_F_ONE_PACKET) 169 return 0; 170 171 /* Hash by protocol, client address and port */ 172 hash = ip_vs_conn_hashkey_conn(cp); 173 174 ct_write_lock_bh(hash); 175 spin_lock(&cp->lock); 176 177 if (!(cp->flags & IP_VS_CONN_F_HASHED)) { 178 cp->flags |= IP_VS_CONN_F_HASHED; 179 refcount_inc(&cp->refcnt); 180 hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]); 181 ret = 1; 182 } else { 183 pr_err("%s(): request for already hashed, called from %pS\n", 184 __func__, __builtin_return_address(0)); 185 ret = 0; 186 } 187 188 spin_unlock(&cp->lock); 189 ct_write_unlock_bh(hash); 190 191 return ret; 192 } 193 194 195 /* 196 * UNhashes ip_vs_conn from ip_vs_conn_tab. 197 * returns bool success. Caller should hold conn reference. 198 */ 199 static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) 200 { 201 unsigned int hash; 202 int ret; 203 204 /* unhash it and decrease its reference counter */ 205 hash = ip_vs_conn_hashkey_conn(cp); 206 207 ct_write_lock_bh(hash); 208 spin_lock(&cp->lock); 209 210 if (cp->flags & IP_VS_CONN_F_HASHED) { 211 hlist_del_rcu(&cp->c_list); 212 cp->flags &= ~IP_VS_CONN_F_HASHED; 213 refcount_dec(&cp->refcnt); 214 ret = 1; 215 } else 216 ret = 0; 217 218 spin_unlock(&cp->lock); 219 ct_write_unlock_bh(hash); 220 221 return ret; 222 } 223 224 /* Try to unlink ip_vs_conn from ip_vs_conn_tab. 225 * returns bool success. 226 */ 227 static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp) 228 { 229 unsigned int hash; 230 bool ret = false; 231 232 if (cp->flags & IP_VS_CONN_F_ONE_PACKET) 233 return refcount_dec_if_one(&cp->refcnt); 234 235 hash = ip_vs_conn_hashkey_conn(cp); 236 237 ct_write_lock_bh(hash); 238 spin_lock(&cp->lock); 239 240 if (cp->flags & IP_VS_CONN_F_HASHED) { 241 /* Decrease refcnt and unlink conn only if we are last user */ 242 if (refcount_dec_if_one(&cp->refcnt)) { 243 hlist_del_rcu(&cp->c_list); 244 cp->flags &= ~IP_VS_CONN_F_HASHED; 245 ret = true; 246 } 247 } 248 249 spin_unlock(&cp->lock); 250 ct_write_unlock_bh(hash); 251 252 return ret; 253 } 254 255 256 /* 257 * Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab. 258 * Called for pkts coming from OUTside-to-INside. 259 * p->caddr, p->cport: pkt source address (foreign host) 260 * p->vaddr, p->vport: pkt dest address (load balancer) 261 */ 262 static inline struct ip_vs_conn * 263 __ip_vs_conn_in_get(const struct ip_vs_conn_param *p) 264 { 265 unsigned int hash; 266 struct ip_vs_conn *cp; 267 268 hash = ip_vs_conn_hashkey_param(p, false); 269 270 rcu_read_lock(); 271 272 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { 273 if (p->cport == cp->cport && p->vport == cp->vport && 274 cp->af == p->af && 275 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) && 276 ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) && 277 ((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) && 278 p->protocol == cp->protocol && 279 cp->ipvs == p->ipvs) { 280 if (!__ip_vs_conn_get(cp)) 281 continue; 282 /* HIT */ 283 rcu_read_unlock(); 284 return cp; 285 } 286 } 287 288 rcu_read_unlock(); 289 290 return NULL; 291 } 292 293 struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p) 294 { 295 struct ip_vs_conn *cp; 296 297 cp = __ip_vs_conn_in_get(p); 298 if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) { 299 struct ip_vs_conn_param cport_zero_p = *p; 300 cport_zero_p.cport = 0; 301 cp = __ip_vs_conn_in_get(&cport_zero_p); 302 } 303 304 IP_VS_DBG_BUF(9, "lookup/in %s %s:%d->%s:%d %s\n", 305 ip_vs_proto_name(p->protocol), 306 IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport), 307 IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport), 308 cp ? "hit" : "not hit"); 309 310 return cp; 311 } 312 313 static int 314 ip_vs_conn_fill_param_proto(struct netns_ipvs *ipvs, 315 int af, const struct sk_buff *skb, 316 const struct ip_vs_iphdr *iph, 317 struct ip_vs_conn_param *p) 318 { 319 __be16 _ports[2], *pptr; 320 321 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports); 322 if (pptr == NULL) 323 return 1; 324 325 if (likely(!ip_vs_iph_inverse(iph))) 326 ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->saddr, 327 pptr[0], &iph->daddr, pptr[1], p); 328 else 329 ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->daddr, 330 pptr[1], &iph->saddr, pptr[0], p); 331 return 0; 332 } 333 334 struct ip_vs_conn * 335 ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af, 336 const struct sk_buff *skb, 337 const struct ip_vs_iphdr *iph) 338 { 339 struct ip_vs_conn_param p; 340 341 if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p)) 342 return NULL; 343 344 return ip_vs_conn_in_get(&p); 345 } 346 EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto); 347 348 /* Get reference to connection template */ 349 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p) 350 { 351 unsigned int hash; 352 struct ip_vs_conn *cp; 353 354 hash = ip_vs_conn_hashkey_param(p, false); 355 356 rcu_read_lock(); 357 358 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { 359 if (unlikely(p->pe_data && p->pe->ct_match)) { 360 if (cp->ipvs != p->ipvs) 361 continue; 362 if (p->pe == cp->pe && p->pe->ct_match(p, cp)) { 363 if (__ip_vs_conn_get(cp)) 364 goto out; 365 } 366 continue; 367 } 368 369 if (cp->af == p->af && 370 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) && 371 /* protocol should only be IPPROTO_IP if 372 * p->vaddr is a fwmark */ 373 ip_vs_addr_equal(p->protocol == IPPROTO_IP ? AF_UNSPEC : 374 p->af, p->vaddr, &cp->vaddr) && 375 p->vport == cp->vport && p->cport == cp->cport && 376 cp->flags & IP_VS_CONN_F_TEMPLATE && 377 p->protocol == cp->protocol && 378 cp->ipvs == p->ipvs) { 379 if (__ip_vs_conn_get(cp)) 380 goto out; 381 } 382 } 383 cp = NULL; 384 385 out: 386 rcu_read_unlock(); 387 388 IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n", 389 ip_vs_proto_name(p->protocol), 390 IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport), 391 IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport), 392 cp ? "hit" : "not hit"); 393 394 return cp; 395 } 396 397 /* Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab. 398 * Called for pkts coming from inside-to-OUTside. 399 * p->caddr, p->cport: pkt source address (inside host) 400 * p->vaddr, p->vport: pkt dest address (foreign host) */ 401 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p) 402 { 403 unsigned int hash; 404 struct ip_vs_conn *cp, *ret=NULL; 405 const union nf_inet_addr *saddr; 406 __be16 sport; 407 408 /* 409 * Check for "full" addressed entries 410 */ 411 hash = ip_vs_conn_hashkey_param(p, true); 412 413 rcu_read_lock(); 414 415 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { 416 if (p->vport != cp->cport) 417 continue; 418 419 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) { 420 sport = cp->vport; 421 saddr = &cp->vaddr; 422 } else { 423 sport = cp->dport; 424 saddr = &cp->daddr; 425 } 426 427 if (p->cport == sport && cp->af == p->af && 428 ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) && 429 ip_vs_addr_equal(p->af, p->caddr, saddr) && 430 p->protocol == cp->protocol && 431 cp->ipvs == p->ipvs) { 432 if (!__ip_vs_conn_get(cp)) 433 continue; 434 /* HIT */ 435 ret = cp; 436 break; 437 } 438 } 439 440 rcu_read_unlock(); 441 442 IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n", 443 ip_vs_proto_name(p->protocol), 444 IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport), 445 IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport), 446 ret ? "hit" : "not hit"); 447 448 return ret; 449 } 450 451 struct ip_vs_conn * 452 ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af, 453 const struct sk_buff *skb, 454 const struct ip_vs_iphdr *iph) 455 { 456 struct ip_vs_conn_param p; 457 458 if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p)) 459 return NULL; 460 461 return ip_vs_conn_out_get(&p); 462 } 463 EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto); 464 465 /* 466 * Put back the conn and restart its timer with its timeout 467 */ 468 static void __ip_vs_conn_put_timer(struct ip_vs_conn *cp) 469 { 470 unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ? 471 0 : cp->timeout; 472 mod_timer(&cp->timer, jiffies+t); 473 474 __ip_vs_conn_put(cp); 475 } 476 477 void ip_vs_conn_put(struct ip_vs_conn *cp) 478 { 479 if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && 480 (refcount_read(&cp->refcnt) == 1) && 481 !timer_pending(&cp->timer)) 482 /* expire connection immediately */ 483 ip_vs_conn_expire(&cp->timer); 484 else 485 __ip_vs_conn_put_timer(cp); 486 } 487 488 /* 489 * Fill a no_client_port connection with a client port number 490 */ 491 void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport) 492 { 493 if (ip_vs_conn_unhash(cp)) { 494 spin_lock_bh(&cp->lock); 495 if (cp->flags & IP_VS_CONN_F_NO_CPORT) { 496 atomic_dec(&ip_vs_conn_no_cport_cnt); 497 cp->flags &= ~IP_VS_CONN_F_NO_CPORT; 498 cp->cport = cport; 499 } 500 spin_unlock_bh(&cp->lock); 501 502 /* hash on new dport */ 503 ip_vs_conn_hash(cp); 504 } 505 } 506 507 508 /* 509 * Bind a connection entry with the corresponding packet_xmit. 510 * Called by ip_vs_conn_new. 511 */ 512 static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp) 513 { 514 switch (IP_VS_FWD_METHOD(cp)) { 515 case IP_VS_CONN_F_MASQ: 516 cp->packet_xmit = ip_vs_nat_xmit; 517 break; 518 519 case IP_VS_CONN_F_TUNNEL: 520 #ifdef CONFIG_IP_VS_IPV6 521 if (cp->daf == AF_INET6) 522 cp->packet_xmit = ip_vs_tunnel_xmit_v6; 523 else 524 #endif 525 cp->packet_xmit = ip_vs_tunnel_xmit; 526 break; 527 528 case IP_VS_CONN_F_DROUTE: 529 cp->packet_xmit = ip_vs_dr_xmit; 530 break; 531 532 case IP_VS_CONN_F_LOCALNODE: 533 cp->packet_xmit = ip_vs_null_xmit; 534 break; 535 536 case IP_VS_CONN_F_BYPASS: 537 cp->packet_xmit = ip_vs_bypass_xmit; 538 break; 539 } 540 } 541 542 #ifdef CONFIG_IP_VS_IPV6 543 static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp) 544 { 545 switch (IP_VS_FWD_METHOD(cp)) { 546 case IP_VS_CONN_F_MASQ: 547 cp->packet_xmit = ip_vs_nat_xmit_v6; 548 break; 549 550 case IP_VS_CONN_F_TUNNEL: 551 if (cp->daf == AF_INET6) 552 cp->packet_xmit = ip_vs_tunnel_xmit_v6; 553 else 554 cp->packet_xmit = ip_vs_tunnel_xmit; 555 break; 556 557 case IP_VS_CONN_F_DROUTE: 558 cp->packet_xmit = ip_vs_dr_xmit_v6; 559 break; 560 561 case IP_VS_CONN_F_LOCALNODE: 562 cp->packet_xmit = ip_vs_null_xmit; 563 break; 564 565 case IP_VS_CONN_F_BYPASS: 566 cp->packet_xmit = ip_vs_bypass_xmit_v6; 567 break; 568 } 569 } 570 #endif 571 572 573 static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest) 574 { 575 return atomic_read(&dest->activeconns) 576 + atomic_read(&dest->inactconns); 577 } 578 579 /* 580 * Bind a connection entry with a virtual service destination 581 * Called just after a new connection entry is created. 582 */ 583 static inline void 584 ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) 585 { 586 unsigned int conn_flags; 587 __u32 flags; 588 589 /* if dest is NULL, then return directly */ 590 if (!dest) 591 return; 592 593 /* Increase the refcnt counter of the dest */ 594 ip_vs_dest_hold(dest); 595 596 conn_flags = atomic_read(&dest->conn_flags); 597 if (cp->protocol != IPPROTO_UDP) 598 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET; 599 flags = cp->flags; 600 /* Bind with the destination and its corresponding transmitter */ 601 if (flags & IP_VS_CONN_F_SYNC) { 602 /* if the connection is not template and is created 603 * by sync, preserve the activity flag. 604 */ 605 if (!(flags & IP_VS_CONN_F_TEMPLATE)) 606 conn_flags &= ~IP_VS_CONN_F_INACTIVE; 607 /* connections inherit forwarding method from dest */ 608 flags &= ~(IP_VS_CONN_F_FWD_MASK | IP_VS_CONN_F_NOOUTPUT); 609 } 610 flags |= conn_flags; 611 cp->flags = flags; 612 cp->dest = dest; 613 614 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d " 615 "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " 616 "dest->refcnt:%d\n", 617 ip_vs_proto_name(cp->protocol), 618 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), 619 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), 620 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), 621 ip_vs_fwd_tag(cp), cp->state, 622 cp->flags, refcount_read(&cp->refcnt), 623 refcount_read(&dest->refcnt)); 624 625 /* Update the connection counters */ 626 if (!(flags & IP_VS_CONN_F_TEMPLATE)) { 627 /* It is a normal connection, so modify the counters 628 * according to the flags, later the protocol can 629 * update them on state change 630 */ 631 if (!(flags & IP_VS_CONN_F_INACTIVE)) 632 atomic_inc(&dest->activeconns); 633 else 634 atomic_inc(&dest->inactconns); 635 } else { 636 /* It is a persistent connection/template, so increase 637 the persistent connection counter */ 638 atomic_inc(&dest->persistconns); 639 } 640 641 if (dest->u_threshold != 0 && 642 ip_vs_dest_totalconns(dest) >= dest->u_threshold) 643 dest->flags |= IP_VS_DEST_F_OVERLOAD; 644 } 645 646 647 /* 648 * Check if there is a destination for the connection, if so 649 * bind the connection to the destination. 650 */ 651 void ip_vs_try_bind_dest(struct ip_vs_conn *cp) 652 { 653 struct ip_vs_dest *dest; 654 655 rcu_read_lock(); 656 657 /* This function is only invoked by the synchronization code. We do 658 * not currently support heterogeneous pools with synchronization, 659 * so we can make the assumption that the svc_af is the same as the 660 * dest_af 661 */ 662 dest = ip_vs_find_dest(cp->ipvs, cp->af, cp->af, &cp->daddr, 663 cp->dport, &cp->vaddr, cp->vport, 664 cp->protocol, cp->fwmark, cp->flags); 665 if (dest) { 666 struct ip_vs_proto_data *pd; 667 668 spin_lock_bh(&cp->lock); 669 if (cp->dest) { 670 spin_unlock_bh(&cp->lock); 671 rcu_read_unlock(); 672 return; 673 } 674 675 /* Applications work depending on the forwarding method 676 * but better to reassign them always when binding dest */ 677 if (cp->app) 678 ip_vs_unbind_app(cp); 679 680 ip_vs_bind_dest(cp, dest); 681 spin_unlock_bh(&cp->lock); 682 683 /* Update its packet transmitter */ 684 cp->packet_xmit = NULL; 685 #ifdef CONFIG_IP_VS_IPV6 686 if (cp->af == AF_INET6) 687 ip_vs_bind_xmit_v6(cp); 688 else 689 #endif 690 ip_vs_bind_xmit(cp); 691 692 pd = ip_vs_proto_data_get(cp->ipvs, cp->protocol); 693 if (pd && atomic_read(&pd->appcnt)) 694 ip_vs_bind_app(cp, pd->pp); 695 } 696 rcu_read_unlock(); 697 } 698 699 700 /* 701 * Unbind a connection entry with its VS destination 702 * Called by the ip_vs_conn_expire function. 703 */ 704 static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp) 705 { 706 struct ip_vs_dest *dest = cp->dest; 707 708 if (!dest) 709 return; 710 711 IP_VS_DBG_BUF(7, "Unbind-dest %s c:%s:%d v:%s:%d " 712 "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " 713 "dest->refcnt:%d\n", 714 ip_vs_proto_name(cp->protocol), 715 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), 716 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), 717 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), 718 ip_vs_fwd_tag(cp), cp->state, 719 cp->flags, refcount_read(&cp->refcnt), 720 refcount_read(&dest->refcnt)); 721 722 /* Update the connection counters */ 723 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { 724 /* It is a normal connection, so decrease the inactconns 725 or activeconns counter */ 726 if (cp->flags & IP_VS_CONN_F_INACTIVE) { 727 atomic_dec(&dest->inactconns); 728 } else { 729 atomic_dec(&dest->activeconns); 730 } 731 } else { 732 /* It is a persistent connection/template, so decrease 733 the persistent connection counter */ 734 atomic_dec(&dest->persistconns); 735 } 736 737 if (dest->l_threshold != 0) { 738 if (ip_vs_dest_totalconns(dest) < dest->l_threshold) 739 dest->flags &= ~IP_VS_DEST_F_OVERLOAD; 740 } else if (dest->u_threshold != 0) { 741 if (ip_vs_dest_totalconns(dest) * 4 < dest->u_threshold * 3) 742 dest->flags &= ~IP_VS_DEST_F_OVERLOAD; 743 } else { 744 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 745 dest->flags &= ~IP_VS_DEST_F_OVERLOAD; 746 } 747 748 ip_vs_dest_put(dest); 749 } 750 751 static int expire_quiescent_template(struct netns_ipvs *ipvs, 752 struct ip_vs_dest *dest) 753 { 754 #ifdef CONFIG_SYSCTL 755 return ipvs->sysctl_expire_quiescent_template && 756 (atomic_read(&dest->weight) == 0); 757 #else 758 return 0; 759 #endif 760 } 761 762 /* 763 * Checking if the destination of a connection template is available. 764 * If available, return 1, otherwise invalidate this connection 765 * template and return 0. 766 */ 767 int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest) 768 { 769 struct ip_vs_dest *dest = ct->dest; 770 struct netns_ipvs *ipvs = ct->ipvs; 771 772 /* 773 * Checking the dest server status. 774 */ 775 if ((dest == NULL) || 776 !(dest->flags & IP_VS_DEST_F_AVAILABLE) || 777 expire_quiescent_template(ipvs, dest) || 778 (cdest && (dest != cdest))) { 779 IP_VS_DBG_BUF(9, "check_template: dest not available for " 780 "protocol %s s:%s:%d v:%s:%d " 781 "-> d:%s:%d\n", 782 ip_vs_proto_name(ct->protocol), 783 IP_VS_DBG_ADDR(ct->af, &ct->caddr), 784 ntohs(ct->cport), 785 IP_VS_DBG_ADDR(ct->af, &ct->vaddr), 786 ntohs(ct->vport), 787 IP_VS_DBG_ADDR(ct->daf, &ct->daddr), 788 ntohs(ct->dport)); 789 790 /* 791 * Invalidate the connection template 792 */ 793 if (ct->vport != htons(0xffff)) { 794 if (ip_vs_conn_unhash(ct)) { 795 ct->dport = htons(0xffff); 796 ct->vport = htons(0xffff); 797 ct->cport = 0; 798 ip_vs_conn_hash(ct); 799 } 800 } 801 802 /* 803 * Simply decrease the refcnt of the template, 804 * don't restart its timer. 805 */ 806 __ip_vs_conn_put(ct); 807 return 0; 808 } 809 return 1; 810 } 811 812 static void ip_vs_conn_rcu_free(struct rcu_head *head) 813 { 814 struct ip_vs_conn *cp = container_of(head, struct ip_vs_conn, 815 rcu_head); 816 817 ip_vs_pe_put(cp->pe); 818 kfree(cp->pe_data); 819 kmem_cache_free(ip_vs_conn_cachep, cp); 820 } 821 822 /* Try to delete connection while not holding reference */ 823 static void ip_vs_conn_del(struct ip_vs_conn *cp) 824 { 825 if (timer_delete(&cp->timer)) { 826 /* Drop cp->control chain too */ 827 if (cp->control) 828 cp->timeout = 0; 829 ip_vs_conn_expire(&cp->timer); 830 } 831 } 832 833 /* Try to delete connection while holding reference */ 834 static void ip_vs_conn_del_put(struct ip_vs_conn *cp) 835 { 836 if (timer_delete(&cp->timer)) { 837 /* Drop cp->control chain too */ 838 if (cp->control) 839 cp->timeout = 0; 840 __ip_vs_conn_put(cp); 841 ip_vs_conn_expire(&cp->timer); 842 } else { 843 __ip_vs_conn_put(cp); 844 } 845 } 846 847 static void ip_vs_conn_expire(struct timer_list *t) 848 { 849 struct ip_vs_conn *cp = timer_container_of(cp, t, timer); 850 struct netns_ipvs *ipvs = cp->ipvs; 851 852 /* 853 * do I control anybody? 854 */ 855 if (atomic_read(&cp->n_control)) 856 goto expire_later; 857 858 /* Unlink conn if not referenced anymore */ 859 if (likely(ip_vs_conn_unlink(cp))) { 860 struct ip_vs_conn *ct = cp->control; 861 862 /* delete the timer if it is activated by other users */ 863 timer_delete(&cp->timer); 864 865 /* does anybody control me? */ 866 if (ct) { 867 bool has_ref = !cp->timeout && __ip_vs_conn_get(ct); 868 869 ip_vs_control_del(cp); 870 /* Drop CTL or non-assured TPL if not used anymore */ 871 if (has_ref && !atomic_read(&ct->n_control) && 872 (!(ct->flags & IP_VS_CONN_F_TEMPLATE) || 873 !(ct->state & IP_VS_CTPL_S_ASSURED))) { 874 IP_VS_DBG(4, "drop controlling connection\n"); 875 ip_vs_conn_del_put(ct); 876 } else if (has_ref) { 877 __ip_vs_conn_put(ct); 878 } 879 } 880 881 if ((cp->flags & IP_VS_CONN_F_NFCT) && 882 !(cp->flags & IP_VS_CONN_F_ONE_PACKET)) { 883 /* Do not access conntracks during subsys cleanup 884 * because nf_conntrack_find_get can not be used after 885 * conntrack cleanup for the net. 886 */ 887 smp_rmb(); 888 if (READ_ONCE(ipvs->enable)) 889 ip_vs_conn_drop_conntrack(cp); 890 } 891 892 if (unlikely(cp->app != NULL)) 893 ip_vs_unbind_app(cp); 894 ip_vs_unbind_dest(cp); 895 if (cp->flags & IP_VS_CONN_F_NO_CPORT) 896 atomic_dec(&ip_vs_conn_no_cport_cnt); 897 if (cp->flags & IP_VS_CONN_F_ONE_PACKET) 898 ip_vs_conn_rcu_free(&cp->rcu_head); 899 else 900 call_rcu(&cp->rcu_head, ip_vs_conn_rcu_free); 901 atomic_dec(&ipvs->conn_count); 902 return; 903 } 904 905 expire_later: 906 IP_VS_DBG(7, "delayed: conn->refcnt=%d conn->n_control=%d\n", 907 refcount_read(&cp->refcnt), 908 atomic_read(&cp->n_control)); 909 910 refcount_inc(&cp->refcnt); 911 cp->timeout = 60*HZ; 912 913 if (ipvs->sync_state & IP_VS_STATE_MASTER) 914 ip_vs_sync_conn(ipvs, cp, sysctl_sync_threshold(ipvs)); 915 916 __ip_vs_conn_put_timer(cp); 917 } 918 919 /* Modify timer, so that it expires as soon as possible. 920 * Can be called without reference only if under RCU lock. 921 * We can have such chain of conns linked with ->control: DATA->CTL->TPL 922 * - DATA (eg. FTP) and TPL (persistence) can be present depending on setup 923 * - cp->timeout=0 indicates all conns from chain should be dropped but 924 * TPL is not dropped if in assured state 925 */ 926 void ip_vs_conn_expire_now(struct ip_vs_conn *cp) 927 { 928 /* Using mod_timer_pending will ensure the timer is not 929 * modified after the final timer_delete in ip_vs_conn_expire. 930 */ 931 if (timer_pending(&cp->timer) && 932 time_after(cp->timer.expires, jiffies)) 933 mod_timer_pending(&cp->timer, jiffies); 934 } 935 936 937 /* 938 * Create a new connection entry and hash it into the ip_vs_conn_tab 939 */ 940 struct ip_vs_conn * 941 ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af, 942 const union nf_inet_addr *daddr, __be16 dport, unsigned int flags, 943 struct ip_vs_dest *dest, __u32 fwmark) 944 { 945 struct ip_vs_conn *cp; 946 struct netns_ipvs *ipvs = p->ipvs; 947 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->ipvs, 948 p->protocol); 949 950 cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC); 951 if (cp == NULL) { 952 IP_VS_ERR_RL("%s(): no memory\n", __func__); 953 return NULL; 954 } 955 956 INIT_HLIST_NODE(&cp->c_list); 957 timer_setup(&cp->timer, ip_vs_conn_expire, 0); 958 cp->ipvs = ipvs; 959 cp->af = p->af; 960 cp->daf = dest_af; 961 cp->protocol = p->protocol; 962 ip_vs_addr_set(p->af, &cp->caddr, p->caddr); 963 cp->cport = p->cport; 964 /* proto should only be IPPROTO_IP if p->vaddr is a fwmark */ 965 ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af, 966 &cp->vaddr, p->vaddr); 967 cp->vport = p->vport; 968 ip_vs_addr_set(cp->daf, &cp->daddr, daddr); 969 cp->dport = dport; 970 cp->flags = flags; 971 cp->fwmark = fwmark; 972 if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) { 973 ip_vs_pe_get(p->pe); 974 cp->pe = p->pe; 975 cp->pe_data = p->pe_data; 976 cp->pe_data_len = p->pe_data_len; 977 } else { 978 cp->pe = NULL; 979 cp->pe_data = NULL; 980 cp->pe_data_len = 0; 981 } 982 spin_lock_init(&cp->lock); 983 984 /* 985 * Set the entry is referenced by the current thread before hashing 986 * it in the table, so that other thread run ip_vs_random_dropentry 987 * but cannot drop this entry. 988 */ 989 refcount_set(&cp->refcnt, 1); 990 991 cp->control = NULL; 992 atomic_set(&cp->n_control, 0); 993 atomic_set(&cp->in_pkts, 0); 994 995 cp->packet_xmit = NULL; 996 cp->app = NULL; 997 cp->app_data = NULL; 998 /* reset struct ip_vs_seq */ 999 cp->in_seq.delta = 0; 1000 cp->out_seq.delta = 0; 1001 1002 atomic_inc(&ipvs->conn_count); 1003 if (flags & IP_VS_CONN_F_NO_CPORT) 1004 atomic_inc(&ip_vs_conn_no_cport_cnt); 1005 1006 /* Bind the connection with a destination server */ 1007 cp->dest = NULL; 1008 ip_vs_bind_dest(cp, dest); 1009 1010 /* Set its state and timeout */ 1011 cp->state = 0; 1012 cp->old_state = 0; 1013 cp->timeout = 3*HZ; 1014 cp->sync_endtime = jiffies & ~3UL; 1015 1016 /* Bind its packet transmitter */ 1017 #ifdef CONFIG_IP_VS_IPV6 1018 if (p->af == AF_INET6) 1019 ip_vs_bind_xmit_v6(cp); 1020 else 1021 #endif 1022 ip_vs_bind_xmit(cp); 1023 1024 if (unlikely(pd && atomic_read(&pd->appcnt))) 1025 ip_vs_bind_app(cp, pd->pp); 1026 1027 /* 1028 * Allow conntrack to be preserved. By default, conntrack 1029 * is created and destroyed for every packet. 1030 * Sometimes keeping conntrack can be useful for 1031 * IP_VS_CONN_F_ONE_PACKET too. 1032 */ 1033 1034 if (ip_vs_conntrack_enabled(ipvs)) 1035 cp->flags |= IP_VS_CONN_F_NFCT; 1036 1037 /* Hash it in the ip_vs_conn_tab finally */ 1038 ip_vs_conn_hash(cp); 1039 1040 return cp; 1041 } 1042 1043 /* 1044 * /proc/net/ip_vs_conn entries 1045 */ 1046 #ifdef CONFIG_PROC_FS 1047 struct ip_vs_iter_state { 1048 struct seq_net_private p; 1049 unsigned int bucket; 1050 unsigned int skip_elems; 1051 }; 1052 1053 static void *ip_vs_conn_array(struct ip_vs_iter_state *iter) 1054 { 1055 int idx; 1056 struct ip_vs_conn *cp; 1057 1058 for (idx = iter->bucket; idx < ip_vs_conn_tab_size; idx++) { 1059 unsigned int skip = 0; 1060 1061 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) { 1062 /* __ip_vs_conn_get() is not needed by 1063 * ip_vs_conn_seq_show and ip_vs_conn_sync_seq_show 1064 */ 1065 if (skip >= iter->skip_elems) { 1066 iter->bucket = idx; 1067 return cp; 1068 } 1069 1070 ++skip; 1071 } 1072 1073 iter->skip_elems = 0; 1074 cond_resched_rcu(); 1075 } 1076 1077 iter->bucket = idx; 1078 return NULL; 1079 } 1080 1081 static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos) 1082 __acquires(RCU) 1083 { 1084 struct ip_vs_iter_state *iter = seq->private; 1085 1086 rcu_read_lock(); 1087 if (*pos == 0) { 1088 iter->skip_elems = 0; 1089 iter->bucket = 0; 1090 return SEQ_START_TOKEN; 1091 } 1092 1093 return ip_vs_conn_array(iter); 1094 } 1095 1096 static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1097 { 1098 struct ip_vs_conn *cp = v; 1099 struct ip_vs_iter_state *iter = seq->private; 1100 struct hlist_node *e; 1101 1102 ++*pos; 1103 if (v == SEQ_START_TOKEN) 1104 return ip_vs_conn_array(iter); 1105 1106 /* more on same hash chain? */ 1107 e = rcu_dereference(hlist_next_rcu(&cp->c_list)); 1108 if (e) { 1109 iter->skip_elems++; 1110 return hlist_entry(e, struct ip_vs_conn, c_list); 1111 } 1112 1113 iter->skip_elems = 0; 1114 iter->bucket++; 1115 1116 return ip_vs_conn_array(iter); 1117 } 1118 1119 static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v) 1120 __releases(RCU) 1121 { 1122 rcu_read_unlock(); 1123 } 1124 1125 static int ip_vs_conn_seq_show(struct seq_file *seq, void *v) 1126 { 1127 1128 if (v == SEQ_START_TOKEN) 1129 seq_puts(seq, 1130 "Pro FromIP FPrt ToIP TPrt DestIP DPrt State Expires PEName PEData\n"); 1131 else { 1132 const struct ip_vs_conn *cp = v; 1133 struct net *net = seq_file_net(seq); 1134 char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3]; 1135 size_t len = 0; 1136 char dbuf[IP_VS_ADDRSTRLEN]; 1137 1138 if (!net_eq(cp->ipvs->net, net)) 1139 return 0; 1140 if (cp->pe_data) { 1141 pe_data[0] = ' '; 1142 len = strlen(cp->pe->name); 1143 memcpy(pe_data + 1, cp->pe->name, len); 1144 pe_data[len + 1] = ' '; 1145 len += 2; 1146 len += cp->pe->show_pe_data(cp, pe_data + len); 1147 } 1148 pe_data[len] = '\0'; 1149 1150 #ifdef CONFIG_IP_VS_IPV6 1151 if (cp->daf == AF_INET6) 1152 snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6); 1153 else 1154 #endif 1155 snprintf(dbuf, sizeof(dbuf), "%08X", 1156 ntohl(cp->daddr.ip)); 1157 1158 #ifdef CONFIG_IP_VS_IPV6 1159 if (cp->af == AF_INET6) 1160 seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X " 1161 "%s %04X %-11s %7u%s\n", 1162 ip_vs_proto_name(cp->protocol), 1163 &cp->caddr.in6, ntohs(cp->cport), 1164 &cp->vaddr.in6, ntohs(cp->vport), 1165 dbuf, ntohs(cp->dport), 1166 ip_vs_state_name(cp), 1167 jiffies_delta_to_msecs(cp->timer.expires - 1168 jiffies) / 1000, 1169 pe_data); 1170 else 1171 #endif 1172 seq_printf(seq, 1173 "%-3s %08X %04X %08X %04X" 1174 " %s %04X %-11s %7u%s\n", 1175 ip_vs_proto_name(cp->protocol), 1176 ntohl(cp->caddr.ip), ntohs(cp->cport), 1177 ntohl(cp->vaddr.ip), ntohs(cp->vport), 1178 dbuf, ntohs(cp->dport), 1179 ip_vs_state_name(cp), 1180 jiffies_delta_to_msecs(cp->timer.expires - 1181 jiffies) / 1000, 1182 pe_data); 1183 } 1184 return 0; 1185 } 1186 1187 static const struct seq_operations ip_vs_conn_seq_ops = { 1188 .start = ip_vs_conn_seq_start, 1189 .next = ip_vs_conn_seq_next, 1190 .stop = ip_vs_conn_seq_stop, 1191 .show = ip_vs_conn_seq_show, 1192 }; 1193 1194 static const char *ip_vs_origin_name(unsigned int flags) 1195 { 1196 if (flags & IP_VS_CONN_F_SYNC) 1197 return "SYNC"; 1198 else 1199 return "LOCAL"; 1200 } 1201 1202 static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v) 1203 { 1204 char dbuf[IP_VS_ADDRSTRLEN]; 1205 1206 if (v == SEQ_START_TOKEN) 1207 seq_puts(seq, 1208 "Pro FromIP FPrt ToIP TPrt DestIP DPrt State Origin Expires\n"); 1209 else { 1210 const struct ip_vs_conn *cp = v; 1211 struct net *net = seq_file_net(seq); 1212 1213 if (!net_eq(cp->ipvs->net, net)) 1214 return 0; 1215 1216 #ifdef CONFIG_IP_VS_IPV6 1217 if (cp->daf == AF_INET6) 1218 snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6); 1219 else 1220 #endif 1221 snprintf(dbuf, sizeof(dbuf), "%08X", 1222 ntohl(cp->daddr.ip)); 1223 1224 #ifdef CONFIG_IP_VS_IPV6 1225 if (cp->af == AF_INET6) 1226 seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X " 1227 "%s %04X %-11s %-6s %7u\n", 1228 ip_vs_proto_name(cp->protocol), 1229 &cp->caddr.in6, ntohs(cp->cport), 1230 &cp->vaddr.in6, ntohs(cp->vport), 1231 dbuf, ntohs(cp->dport), 1232 ip_vs_state_name(cp), 1233 ip_vs_origin_name(cp->flags), 1234 jiffies_delta_to_msecs(cp->timer.expires - 1235 jiffies) / 1000); 1236 else 1237 #endif 1238 seq_printf(seq, 1239 "%-3s %08X %04X %08X %04X " 1240 "%s %04X %-11s %-6s %7u\n", 1241 ip_vs_proto_name(cp->protocol), 1242 ntohl(cp->caddr.ip), ntohs(cp->cport), 1243 ntohl(cp->vaddr.ip), ntohs(cp->vport), 1244 dbuf, ntohs(cp->dport), 1245 ip_vs_state_name(cp), 1246 ip_vs_origin_name(cp->flags), 1247 jiffies_delta_to_msecs(cp->timer.expires - 1248 jiffies) / 1000); 1249 } 1250 return 0; 1251 } 1252 1253 static const struct seq_operations ip_vs_conn_sync_seq_ops = { 1254 .start = ip_vs_conn_seq_start, 1255 .next = ip_vs_conn_seq_next, 1256 .stop = ip_vs_conn_seq_stop, 1257 .show = ip_vs_conn_sync_seq_show, 1258 }; 1259 #endif 1260 1261 1262 /* Randomly drop connection entries before running out of memory 1263 * Can be used for DATA and CTL conns. For TPL conns there are exceptions: 1264 * - traffic for services in OPS mode increases ct->in_pkts, so it is supported 1265 * - traffic for services not in OPS mode does not increase ct->in_pkts in 1266 * all cases, so it is not supported 1267 */ 1268 static inline int todrop_entry(struct ip_vs_conn *cp) 1269 { 1270 /* 1271 * The drop rate array needs tuning for real environments. 1272 * Called from timer bh only => no locking 1273 */ 1274 static const signed char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; 1275 static signed char todrop_counter[9] = {0}; 1276 int i; 1277 1278 /* if the conn entry hasn't lasted for 60 seconds, don't drop it. 1279 This will leave enough time for normal connection to get 1280 through. */ 1281 if (time_before(cp->timeout + jiffies, cp->timer.expires + 60*HZ)) 1282 return 0; 1283 1284 /* Don't drop the entry if its number of incoming packets is not 1285 located in [0, 8] */ 1286 i = atomic_read(&cp->in_pkts); 1287 if (i > 8 || i < 0) return 0; 1288 1289 if (!todrop_rate[i]) return 0; 1290 if (--todrop_counter[i] > 0) return 0; 1291 1292 todrop_counter[i] = todrop_rate[i]; 1293 return 1; 1294 } 1295 1296 static inline bool ip_vs_conn_ops_mode(struct ip_vs_conn *cp) 1297 { 1298 struct ip_vs_service *svc; 1299 1300 if (!cp->dest) 1301 return false; 1302 svc = rcu_dereference(cp->dest->svc); 1303 return svc && (svc->flags & IP_VS_SVC_F_ONEPACKET); 1304 } 1305 1306 /* Called from keventd and must protect itself from softirqs */ 1307 void ip_vs_random_dropentry(struct netns_ipvs *ipvs) 1308 { 1309 int idx; 1310 struct ip_vs_conn *cp; 1311 1312 rcu_read_lock(); 1313 /* 1314 * Randomly scan 1/32 of the whole table every second 1315 */ 1316 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { 1317 unsigned int hash = get_random_u32() & ip_vs_conn_tab_mask; 1318 1319 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { 1320 if (cp->ipvs != ipvs) 1321 continue; 1322 if (atomic_read(&cp->n_control)) 1323 continue; 1324 if (cp->flags & IP_VS_CONN_F_TEMPLATE) { 1325 /* connection template of OPS */ 1326 if (ip_vs_conn_ops_mode(cp)) 1327 goto try_drop; 1328 if (!(cp->state & IP_VS_CTPL_S_ASSURED)) 1329 goto drop; 1330 continue; 1331 } 1332 if (cp->protocol == IPPROTO_TCP) { 1333 switch(cp->state) { 1334 case IP_VS_TCP_S_SYN_RECV: 1335 case IP_VS_TCP_S_SYNACK: 1336 break; 1337 1338 case IP_VS_TCP_S_ESTABLISHED: 1339 if (todrop_entry(cp)) 1340 break; 1341 continue; 1342 1343 default: 1344 continue; 1345 } 1346 } else if (cp->protocol == IPPROTO_SCTP) { 1347 switch (cp->state) { 1348 case IP_VS_SCTP_S_INIT1: 1349 case IP_VS_SCTP_S_INIT: 1350 break; 1351 case IP_VS_SCTP_S_ESTABLISHED: 1352 if (todrop_entry(cp)) 1353 break; 1354 continue; 1355 default: 1356 continue; 1357 } 1358 } else { 1359 try_drop: 1360 if (!todrop_entry(cp)) 1361 continue; 1362 } 1363 1364 drop: 1365 IP_VS_DBG(4, "drop connection\n"); 1366 ip_vs_conn_del(cp); 1367 } 1368 cond_resched_rcu(); 1369 } 1370 rcu_read_unlock(); 1371 } 1372 1373 1374 /* 1375 * Flush all the connection entries in the ip_vs_conn_tab 1376 */ 1377 static void ip_vs_conn_flush(struct netns_ipvs *ipvs) 1378 { 1379 int idx; 1380 struct ip_vs_conn *cp, *cp_c; 1381 1382 flush_again: 1383 rcu_read_lock(); 1384 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { 1385 1386 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) { 1387 if (cp->ipvs != ipvs) 1388 continue; 1389 if (atomic_read(&cp->n_control)) 1390 continue; 1391 cp_c = cp->control; 1392 IP_VS_DBG(4, "del connection\n"); 1393 ip_vs_conn_del(cp); 1394 if (cp_c && !atomic_read(&cp_c->n_control)) { 1395 IP_VS_DBG(4, "del controlling connection\n"); 1396 ip_vs_conn_del(cp_c); 1397 } 1398 } 1399 cond_resched_rcu(); 1400 } 1401 rcu_read_unlock(); 1402 1403 /* the counter may be not NULL, because maybe some conn entries 1404 are run by slow timer handler or unhashed but still referred */ 1405 if (atomic_read(&ipvs->conn_count) != 0) { 1406 schedule(); 1407 goto flush_again; 1408 } 1409 } 1410 1411 #ifdef CONFIG_SYSCTL 1412 void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs) 1413 { 1414 int idx; 1415 struct ip_vs_conn *cp, *cp_c; 1416 struct ip_vs_dest *dest; 1417 1418 rcu_read_lock(); 1419 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { 1420 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) { 1421 if (cp->ipvs != ipvs) 1422 continue; 1423 1424 dest = cp->dest; 1425 if (!dest || (dest->flags & IP_VS_DEST_F_AVAILABLE)) 1426 continue; 1427 1428 if (atomic_read(&cp->n_control)) 1429 continue; 1430 1431 cp_c = cp->control; 1432 IP_VS_DBG(4, "del connection\n"); 1433 ip_vs_conn_del(cp); 1434 if (cp_c && !atomic_read(&cp_c->n_control)) { 1435 IP_VS_DBG(4, "del controlling connection\n"); 1436 ip_vs_conn_del(cp_c); 1437 } 1438 } 1439 cond_resched_rcu(); 1440 1441 /* netns clean up started, abort delayed work */ 1442 if (!READ_ONCE(ipvs->enable)) 1443 break; 1444 } 1445 rcu_read_unlock(); 1446 } 1447 #endif 1448 1449 /* 1450 * per netns init and exit 1451 */ 1452 int __net_init ip_vs_conn_net_init(struct netns_ipvs *ipvs) 1453 { 1454 atomic_set(&ipvs->conn_count, 0); 1455 1456 #ifdef CONFIG_PROC_FS 1457 if (!proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net, 1458 &ip_vs_conn_seq_ops, 1459 sizeof(struct ip_vs_iter_state))) 1460 goto err_conn; 1461 1462 if (!proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net, 1463 &ip_vs_conn_sync_seq_ops, 1464 sizeof(struct ip_vs_iter_state))) 1465 goto err_conn_sync; 1466 #endif 1467 1468 return 0; 1469 1470 #ifdef CONFIG_PROC_FS 1471 err_conn_sync: 1472 remove_proc_entry("ip_vs_conn", ipvs->net->proc_net); 1473 err_conn: 1474 return -ENOMEM; 1475 #endif 1476 } 1477 1478 void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs) 1479 { 1480 /* flush all the connection entries first */ 1481 ip_vs_conn_flush(ipvs); 1482 #ifdef CONFIG_PROC_FS 1483 remove_proc_entry("ip_vs_conn", ipvs->net->proc_net); 1484 remove_proc_entry("ip_vs_conn_sync", ipvs->net->proc_net); 1485 #endif 1486 } 1487 1488 int __init ip_vs_conn_init(void) 1489 { 1490 size_t tab_array_size; 1491 int max_avail; 1492 #if BITS_PER_LONG > 32 1493 int max = 27; 1494 #else 1495 int max = 20; 1496 #endif 1497 int min = 8; 1498 int idx; 1499 1500 max_avail = order_base_2(totalram_pages()) + PAGE_SHIFT; 1501 max_avail -= 2; /* ~4 in hash row */ 1502 max_avail -= 1; /* IPVS up to 1/2 of mem */ 1503 max_avail -= order_base_2(sizeof(struct ip_vs_conn)); 1504 max = clamp(max_avail, min, max); 1505 ip_vs_conn_tab_bits = clamp(ip_vs_conn_tab_bits, min, max); 1506 ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; 1507 ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1; 1508 1509 /* 1510 * Allocate the connection hash table and initialize its list heads 1511 */ 1512 tab_array_size = array_size(ip_vs_conn_tab_size, 1513 sizeof(*ip_vs_conn_tab)); 1514 ip_vs_conn_tab = kvmalloc_array(ip_vs_conn_tab_size, 1515 sizeof(*ip_vs_conn_tab), GFP_KERNEL); 1516 if (!ip_vs_conn_tab) 1517 return -ENOMEM; 1518 1519 /* Allocate ip_vs_conn slab cache */ 1520 ip_vs_conn_cachep = KMEM_CACHE(ip_vs_conn, SLAB_HWCACHE_ALIGN); 1521 if (!ip_vs_conn_cachep) { 1522 kvfree(ip_vs_conn_tab); 1523 return -ENOMEM; 1524 } 1525 1526 pr_info("Connection hash table configured (size=%d, memory=%zdKbytes)\n", 1527 ip_vs_conn_tab_size, tab_array_size / 1024); 1528 IP_VS_DBG(0, "Each connection entry needs %zd bytes at least\n", 1529 sizeof(struct ip_vs_conn)); 1530 1531 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) 1532 INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]); 1533 1534 for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) { 1535 spin_lock_init(&__ip_vs_conntbl_lock_array[idx].l); 1536 } 1537 1538 /* calculate the random value for connection hash */ 1539 get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd)); 1540 1541 return 0; 1542 } 1543 1544 void ip_vs_conn_cleanup(void) 1545 { 1546 /* Wait all ip_vs_conn_rcu_free() callbacks to complete */ 1547 rcu_barrier(); 1548 /* Release the empty cache */ 1549 kmem_cache_destroy(ip_vs_conn_cachep); 1550 kvfree(ip_vs_conn_tab); 1551 } 1552