1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IPVS: Locality-Based Least-Connection with Replication scheduler 4 * 5 * Authors: Wensong Zhang <wensong@gnuchina.org> 6 * 7 * Changes: 8 * Julian Anastasov : Added the missing (dest->weight>0) 9 * condition in the ip_vs_dest_set_max. 10 */ 11 12 /* 13 * The lblc/r algorithm is as follows (pseudo code): 14 * 15 * if serverSet[dest_ip] is null then 16 * n, serverSet[dest_ip] <- {weighted least-conn node}; 17 * else 18 * n <- {least-conn (alive) node in serverSet[dest_ip]}; 19 * if (n is null) OR 20 * (n.conns>n.weight AND 21 * there is a node m with m.conns<m.weight/2) then 22 * n <- {weighted least-conn node}; 23 * add n to serverSet[dest_ip]; 24 * if |serverSet[dest_ip]| > 1 AND 25 * now - serverSet[dest_ip].lastMod > T then 26 * m <- {most conn node in serverSet[dest_ip]}; 27 * remove m from serverSet[dest_ip]; 28 * if serverSet[dest_ip] changed then 29 * serverSet[dest_ip].lastMod <- now; 30 * 31 * return n; 32 * 33 */ 34 35 #define pr_fmt(fmt) "IPVS: " fmt 36 37 #include <linux/ip.h> 38 #include <linux/module.h> 39 #include <linux/kernel.h> 40 #include <linux/skbuff.h> 41 #include <linux/jiffies.h> 42 #include <linux/list.h> 43 #include <linux/slab.h> 44 #include <linux/hash.h> 45 46 /* for sysctl */ 47 #include <linux/fs.h> 48 #include <linux/sysctl.h> 49 #include <net/net_namespace.h> 50 51 #include <net/ip_vs.h> 52 53 54 /* 55 * It is for garbage collection of stale IPVS lblcr entries, 56 * when the table is full. 57 */ 58 #define CHECK_EXPIRE_INTERVAL (60*HZ) 59 #define ENTRY_TIMEOUT (6*60*HZ) 60 61 #define DEFAULT_EXPIRATION (24*60*60*HZ) 62 63 /* 64 * It is for full expiration check. 65 * When there is no partial expiration check (garbage collection) 66 * in a half hour, do a full expiration check to collect stale 67 * entries that haven't been touched for a day. 68 */ 69 #define COUNT_FOR_FULL_EXPIRATION 30 70 71 /* 72 * for IPVS lblcr entry hash table 73 */ 74 #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS 75 #define CONFIG_IP_VS_LBLCR_TAB_BITS 10 76 #endif 77 #define IP_VS_LBLCR_TAB_BITS CONFIG_IP_VS_LBLCR_TAB_BITS 78 #define IP_VS_LBLCR_TAB_SIZE (1 << IP_VS_LBLCR_TAB_BITS) 79 #define IP_VS_LBLCR_TAB_MASK (IP_VS_LBLCR_TAB_SIZE - 1) 80 81 82 /* 83 * IPVS destination set structure and operations 84 */ 85 struct ip_vs_dest_set_elem { 86 struct list_head list; /* list link */ 87 struct ip_vs_dest *dest; /* destination server */ 88 struct rcu_head rcu_head; 89 }; 90 91 struct ip_vs_dest_set { 92 atomic_t size; /* set size */ 93 unsigned long lastmod; /* last modified time */ 94 struct list_head list; /* destination list */ 95 }; 96 97 98 static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set, 99 struct ip_vs_dest *dest, bool check) 100 { 101 struct ip_vs_dest_set_elem *e; 102 103 if (check) { 104 list_for_each_entry(e, &set->list, list) { 105 if (e->dest == dest) 106 return; 107 } 108 } 109 110 e = kmalloc(sizeof(*e), GFP_ATOMIC); 111 if (e == NULL) 112 return; 113 114 ip_vs_dest_hold(dest); 115 e->dest = dest; 116 117 list_add_rcu(&e->list, &set->list); 118 atomic_inc(&set->size); 119 120 set->lastmod = jiffies; 121 } 122 123 static void ip_vs_lblcr_elem_rcu_free(struct rcu_head *head) 124 { 125 struct ip_vs_dest_set_elem *e; 126 127 e = container_of(head, struct ip_vs_dest_set_elem, rcu_head); 128 ip_vs_dest_put_and_free(e->dest); 129 kfree(e); 130 } 131 132 static void 133 ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) 134 { 135 struct ip_vs_dest_set_elem *e; 136 137 list_for_each_entry(e, &set->list, list) { 138 if (e->dest == dest) { 139 /* HIT */ 140 atomic_dec(&set->size); 141 set->lastmod = jiffies; 142 list_del_rcu(&e->list); 143 call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free); 144 break; 145 } 146 } 147 } 148 149 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set) 150 { 151 struct ip_vs_dest_set_elem *e, *ep; 152 153 list_for_each_entry_safe(e, ep, &set->list, list) { 154 list_del_rcu(&e->list); 155 call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free); 156 } 157 } 158 159 /* get weighted least-connection node in the destination set */ 160 static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) 161 { 162 struct ip_vs_dest_set_elem *e; 163 struct ip_vs_dest *dest, *least; 164 int loh, doh; 165 166 /* select the first destination server, whose weight > 0 */ 167 list_for_each_entry_rcu(e, &set->list, list) { 168 least = e->dest; 169 if (least->flags & IP_VS_DEST_F_OVERLOAD) 170 continue; 171 172 if ((atomic_read(&least->weight) > 0) 173 && (least->flags & IP_VS_DEST_F_AVAILABLE)) { 174 loh = ip_vs_dest_conn_overhead(least); 175 goto nextstage; 176 } 177 } 178 return NULL; 179 180 /* find the destination with the weighted least load */ 181 nextstage: 182 list_for_each_entry_continue_rcu(e, &set->list, list) { 183 dest = e->dest; 184 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 185 continue; 186 187 doh = ip_vs_dest_conn_overhead(dest); 188 if (((__s64)loh * atomic_read(&dest->weight) > 189 (__s64)doh * atomic_read(&least->weight)) 190 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 191 least = dest; 192 loh = doh; 193 } 194 } 195 196 IP_VS_DBG_BUF(6, "%s(): server %s:%d " 197 "activeconns %d refcnt %d weight %d overhead %d\n", 198 __func__, 199 IP_VS_DBG_ADDR(least->af, &least->addr), 200 ntohs(least->port), 201 atomic_read(&least->activeconns), 202 refcount_read(&least->refcnt), 203 atomic_read(&least->weight), loh); 204 return least; 205 } 206 207 208 /* get weighted most-connection node in the destination set */ 209 static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) 210 { 211 struct ip_vs_dest_set_elem *e; 212 struct ip_vs_dest *dest, *most; 213 int moh, doh; 214 215 if (set == NULL) 216 return NULL; 217 218 /* select the first destination server, whose weight > 0 */ 219 list_for_each_entry(e, &set->list, list) { 220 most = e->dest; 221 if (atomic_read(&most->weight) > 0) { 222 moh = ip_vs_dest_conn_overhead(most); 223 goto nextstage; 224 } 225 } 226 return NULL; 227 228 /* find the destination with the weighted most load */ 229 nextstage: 230 list_for_each_entry_continue(e, &set->list, list) { 231 dest = e->dest; 232 doh = ip_vs_dest_conn_overhead(dest); 233 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */ 234 if (((__s64)moh * atomic_read(&dest->weight) < 235 (__s64)doh * atomic_read(&most->weight)) 236 && (atomic_read(&dest->weight) > 0)) { 237 most = dest; 238 moh = doh; 239 } 240 } 241 242 IP_VS_DBG_BUF(6, "%s(): server %s:%d " 243 "activeconns %d refcnt %d weight %d overhead %d\n", 244 __func__, 245 IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port), 246 atomic_read(&most->activeconns), 247 refcount_read(&most->refcnt), 248 atomic_read(&most->weight), moh); 249 return most; 250 } 251 252 253 /* 254 * IPVS lblcr entry represents an association between destination 255 * IP address and its destination server set 256 */ 257 struct ip_vs_lblcr_entry { 258 struct hlist_node list; 259 int af; /* address family */ 260 union nf_inet_addr addr; /* destination IP address */ 261 struct ip_vs_dest_set set; /* destination server set */ 262 unsigned long lastuse; /* last used time */ 263 struct rcu_head rcu_head; 264 }; 265 266 267 /* 268 * IPVS lblcr hash table 269 */ 270 struct ip_vs_lblcr_table { 271 struct rcu_head rcu_head; 272 struct hlist_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ 273 atomic_t entries; /* number of entries */ 274 int max_size; /* maximum size of entries */ 275 struct timer_list periodic_timer; /* collect stale entries */ 276 struct ip_vs_service *svc; /* pointer back to service */ 277 int rover; /* rover for expire check */ 278 int counter; /* counter for no expire */ 279 bool dead; 280 }; 281 282 283 #ifdef CONFIG_SYSCTL 284 /* 285 * IPVS LBLCR sysctl table 286 */ 287 288 static struct ctl_table vs_vars_table[] = { 289 { 290 .procname = "lblcr_expiration", 291 .data = NULL, 292 .maxlen = sizeof(int), 293 .mode = 0644, 294 .proc_handler = proc_dointvec_jiffies, 295 }, 296 }; 297 #endif 298 299 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en) 300 { 301 hlist_del_rcu(&en->list); 302 ip_vs_dest_set_eraseall(&en->set); 303 kfree_rcu(en, rcu_head); 304 } 305 306 307 /* 308 * Returns hash value for IPVS LBLCR entry 309 */ 310 static inline unsigned int 311 ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr) 312 { 313 __be32 addr_fold = addr->ip; 314 315 #ifdef CONFIG_IP_VS_IPV6 316 if (af == AF_INET6) 317 addr_fold = addr->ip6[0]^addr->ip6[1]^ 318 addr->ip6[2]^addr->ip6[3]; 319 #endif 320 return hash_32(ntohl(addr_fold), IP_VS_LBLCR_TAB_BITS); 321 } 322 323 324 /* 325 * Hash an entry in the ip_vs_lblcr_table. 326 * returns bool success. 327 */ 328 static void 329 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) 330 { 331 unsigned int hash = ip_vs_lblcr_hashkey(en->af, &en->addr); 332 333 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); 334 atomic_inc(&tbl->entries); 335 } 336 337 338 /* Get ip_vs_lblcr_entry associated with supplied parameters. */ 339 static inline struct ip_vs_lblcr_entry * 340 ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl, 341 const union nf_inet_addr *addr) 342 { 343 unsigned int hash = ip_vs_lblcr_hashkey(af, addr); 344 struct ip_vs_lblcr_entry *en; 345 346 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) 347 if (ip_vs_addr_equal(af, &en->addr, addr)) 348 return en; 349 350 return NULL; 351 } 352 353 354 /* 355 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination 356 * IP address to a server. Called under spin lock. 357 */ 358 static inline struct ip_vs_lblcr_entry * 359 ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr, 360 u16 af, struct ip_vs_dest *dest) 361 { 362 struct ip_vs_lblcr_entry *en; 363 364 en = ip_vs_lblcr_get(af, tbl, daddr); 365 if (!en) { 366 en = kmalloc(sizeof(*en), GFP_ATOMIC); 367 if (!en) 368 return NULL; 369 370 en->af = af; 371 ip_vs_addr_copy(af, &en->addr, daddr); 372 en->lastuse = jiffies; 373 374 /* initialize its dest set */ 375 atomic_set(&(en->set.size), 0); 376 INIT_LIST_HEAD(&en->set.list); 377 378 ip_vs_dest_set_insert(&en->set, dest, false); 379 380 ip_vs_lblcr_hash(tbl, en); 381 return en; 382 } 383 384 ip_vs_dest_set_insert(&en->set, dest, true); 385 386 return en; 387 } 388 389 390 /* 391 * Flush all the entries of the specified table. 392 */ 393 static void ip_vs_lblcr_flush(struct ip_vs_service *svc) 394 { 395 struct ip_vs_lblcr_table *tbl = svc->sched_data; 396 int i; 397 struct ip_vs_lblcr_entry *en; 398 struct hlist_node *next; 399 400 spin_lock_bh(&svc->sched_lock); 401 tbl->dead = true; 402 for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) { 403 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { 404 ip_vs_lblcr_free(en); 405 } 406 } 407 spin_unlock_bh(&svc->sched_lock); 408 } 409 410 static int sysctl_lblcr_expiration(struct ip_vs_service *svc) 411 { 412 #ifdef CONFIG_SYSCTL 413 return svc->ipvs->sysctl_lblcr_expiration; 414 #else 415 return DEFAULT_EXPIRATION; 416 #endif 417 } 418 419 static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc) 420 { 421 struct ip_vs_lblcr_table *tbl = svc->sched_data; 422 unsigned long now = jiffies; 423 int i, j; 424 struct ip_vs_lblcr_entry *en; 425 struct hlist_node *next; 426 427 for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) { 428 j = (j + 1) & IP_VS_LBLCR_TAB_MASK; 429 430 spin_lock(&svc->sched_lock); 431 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { 432 if (time_after(en->lastuse + 433 sysctl_lblcr_expiration(svc), now)) 434 continue; 435 436 ip_vs_lblcr_free(en); 437 atomic_dec(&tbl->entries); 438 } 439 spin_unlock(&svc->sched_lock); 440 } 441 tbl->rover = j; 442 } 443 444 445 /* 446 * Periodical timer handler for IPVS lblcr table 447 * It is used to collect stale entries when the number of entries 448 * exceeds the maximum size of the table. 449 * 450 * Fixme: we probably need more complicated algorithm to collect 451 * entries that have not been used for a long time even 452 * if the number of entries doesn't exceed the maximum size 453 * of the table. 454 * The full expiration check is for this purpose now. 455 */ 456 static void ip_vs_lblcr_check_expire(struct timer_list *t) 457 { 458 struct ip_vs_lblcr_table *tbl = timer_container_of(tbl, t, 459 periodic_timer); 460 struct ip_vs_service *svc = tbl->svc; 461 unsigned long now = jiffies; 462 int goal; 463 int i, j; 464 struct ip_vs_lblcr_entry *en; 465 struct hlist_node *next; 466 467 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { 468 /* do full expiration check */ 469 ip_vs_lblcr_full_check(svc); 470 tbl->counter = 1; 471 goto out; 472 } 473 474 if (atomic_read(&tbl->entries) <= tbl->max_size) { 475 tbl->counter++; 476 goto out; 477 } 478 479 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; 480 if (goal > tbl->max_size/2) 481 goal = tbl->max_size/2; 482 483 for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) { 484 j = (j + 1) & IP_VS_LBLCR_TAB_MASK; 485 486 spin_lock(&svc->sched_lock); 487 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { 488 if (time_before(now, en->lastuse+ENTRY_TIMEOUT)) 489 continue; 490 491 ip_vs_lblcr_free(en); 492 atomic_dec(&tbl->entries); 493 goal--; 494 } 495 spin_unlock(&svc->sched_lock); 496 if (goal <= 0) 497 break; 498 } 499 tbl->rover = j; 500 501 out: 502 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL); 503 } 504 505 static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) 506 { 507 int i; 508 struct ip_vs_lblcr_table *tbl; 509 510 /* 511 * Allocate the ip_vs_lblcr_table for this service 512 */ 513 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); 514 if (tbl == NULL) 515 return -ENOMEM; 516 517 svc->sched_data = tbl; 518 IP_VS_DBG(6, "LBLCR hash table (memory=%zdbytes) allocated for " 519 "current service\n", sizeof(*tbl)); 520 521 /* 522 * Initialize the hash buckets 523 */ 524 for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) { 525 INIT_HLIST_HEAD(&tbl->bucket[i]); 526 } 527 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16; 528 tbl->rover = 0; 529 tbl->counter = 1; 530 tbl->dead = false; 531 tbl->svc = svc; 532 atomic_set(&tbl->entries, 0); 533 534 /* 535 * Hook periodic timer for garbage collection 536 */ 537 timer_setup(&tbl->periodic_timer, ip_vs_lblcr_check_expire, 0); 538 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL); 539 540 return 0; 541 } 542 543 544 static void ip_vs_lblcr_done_svc(struct ip_vs_service *svc) 545 { 546 struct ip_vs_lblcr_table *tbl = svc->sched_data; 547 548 /* remove periodic timer */ 549 timer_shutdown_sync(&tbl->periodic_timer); 550 551 /* got to clean up table entries here */ 552 ip_vs_lblcr_flush(svc); 553 554 /* release the table itself */ 555 kfree_rcu(tbl, rcu_head); 556 IP_VS_DBG(6, "LBLCR hash table (memory=%zdbytes) released\n", 557 sizeof(*tbl)); 558 } 559 560 561 static inline struct ip_vs_dest * 562 __ip_vs_lblcr_schedule(struct ip_vs_service *svc) 563 { 564 struct ip_vs_dest *dest, *least; 565 int loh, doh; 566 567 /* 568 * We use the following formula to estimate the load: 569 * (dest overhead) / dest->weight 570 * 571 * Remember -- no floats in kernel mode!!! 572 * The comparison of h1*w2 > h2*w1 is equivalent to that of 573 * h1/w1 > h2/w2 574 * if every weight is larger than zero. 575 * 576 * The server with weight=0 is quiesced and will not receive any 577 * new connection. 578 */ 579 list_for_each_entry_rcu(dest, &svc->destinations, n_list) { 580 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 581 continue; 582 583 if (atomic_read(&dest->weight) > 0) { 584 least = dest; 585 loh = ip_vs_dest_conn_overhead(least); 586 goto nextstage; 587 } 588 } 589 return NULL; 590 591 /* 592 * Find the destination with the least load. 593 */ 594 nextstage: 595 list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) { 596 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 597 continue; 598 599 doh = ip_vs_dest_conn_overhead(dest); 600 if ((__s64)loh * atomic_read(&dest->weight) > 601 (__s64)doh * atomic_read(&least->weight)) { 602 least = dest; 603 loh = doh; 604 } 605 } 606 607 IP_VS_DBG_BUF(6, "LBLCR: server %s:%d " 608 "activeconns %d refcnt %d weight %d overhead %d\n", 609 IP_VS_DBG_ADDR(least->af, &least->addr), 610 ntohs(least->port), 611 atomic_read(&least->activeconns), 612 refcount_read(&least->refcnt), 613 atomic_read(&least->weight), loh); 614 615 return least; 616 } 617 618 619 /* 620 * If this destination server is overloaded and there is a less loaded 621 * server, then return true. 622 */ 623 static inline int 624 is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc) 625 { 626 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) { 627 struct ip_vs_dest *d; 628 629 list_for_each_entry_rcu(d, &svc->destinations, n_list) { 630 if (atomic_read(&d->activeconns)*2 631 < atomic_read(&d->weight)) { 632 return 1; 633 } 634 } 635 } 636 return 0; 637 } 638 639 640 /* 641 * Locality-Based (weighted) Least-Connection scheduling 642 */ 643 static struct ip_vs_dest * 644 ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, 645 struct ip_vs_iphdr *iph) 646 { 647 struct ip_vs_lblcr_table *tbl = svc->sched_data; 648 struct ip_vs_dest *dest; 649 struct ip_vs_lblcr_entry *en; 650 651 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 652 653 /* First look in our cache */ 654 en = ip_vs_lblcr_get(svc->af, tbl, &iph->daddr); 655 if (en) { 656 en->lastuse = jiffies; 657 658 /* Get the least loaded destination */ 659 dest = ip_vs_dest_set_min(&en->set); 660 661 /* More than one destination + enough time passed by, cleanup */ 662 if (atomic_read(&en->set.size) > 1 && 663 time_after(jiffies, en->set.lastmod + 664 sysctl_lblcr_expiration(svc))) { 665 spin_lock_bh(&svc->sched_lock); 666 if (atomic_read(&en->set.size) > 1) { 667 struct ip_vs_dest *m; 668 669 m = ip_vs_dest_set_max(&en->set); 670 if (m) 671 ip_vs_dest_set_erase(&en->set, m); 672 } 673 spin_unlock_bh(&svc->sched_lock); 674 } 675 676 /* If the destination is not overloaded, use it */ 677 if (dest && !is_overloaded(dest, svc)) 678 goto out; 679 680 /* The cache entry is invalid, time to schedule */ 681 dest = __ip_vs_lblcr_schedule(svc); 682 if (!dest) { 683 ip_vs_scheduler_err(svc, "no destination available"); 684 return NULL; 685 } 686 687 /* Update our cache entry */ 688 spin_lock_bh(&svc->sched_lock); 689 if (!tbl->dead) 690 ip_vs_dest_set_insert(&en->set, dest, true); 691 spin_unlock_bh(&svc->sched_lock); 692 goto out; 693 } 694 695 /* No cache entry, time to schedule */ 696 dest = __ip_vs_lblcr_schedule(svc); 697 if (!dest) { 698 IP_VS_DBG(1, "no destination available\n"); 699 return NULL; 700 } 701 702 /* If we fail to create a cache entry, we'll just use the valid dest */ 703 spin_lock_bh(&svc->sched_lock); 704 if (!tbl->dead) 705 ip_vs_lblcr_new(tbl, &iph->daddr, svc->af, dest); 706 spin_unlock_bh(&svc->sched_lock); 707 708 out: 709 IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n", 710 IP_VS_DBG_ADDR(svc->af, &iph->daddr), 711 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port)); 712 713 return dest; 714 } 715 716 717 /* 718 * IPVS LBLCR Scheduler structure 719 */ 720 static struct ip_vs_scheduler ip_vs_lblcr_scheduler = 721 { 722 .name = "lblcr", 723 .refcnt = ATOMIC_INIT(0), 724 .module = THIS_MODULE, 725 .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list), 726 .init_service = ip_vs_lblcr_init_svc, 727 .done_service = ip_vs_lblcr_done_svc, 728 .schedule = ip_vs_lblcr_schedule, 729 }; 730 731 /* 732 * per netns init. 733 */ 734 #ifdef CONFIG_SYSCTL 735 static int __net_init __ip_vs_lblcr_init(struct net *net) 736 { 737 struct netns_ipvs *ipvs = net_ipvs(net); 738 size_t vars_table_size = ARRAY_SIZE(vs_vars_table); 739 740 if (!ipvs) 741 return -ENOENT; 742 743 if (!net_eq(net, &init_net)) { 744 ipvs->lblcr_ctl_table = kmemdup(vs_vars_table, 745 sizeof(vs_vars_table), 746 GFP_KERNEL); 747 if (ipvs->lblcr_ctl_table == NULL) 748 return -ENOMEM; 749 750 /* Don't export sysctls to unprivileged users */ 751 if (net->user_ns != &init_user_ns) 752 vars_table_size = 0; 753 } else 754 ipvs->lblcr_ctl_table = vs_vars_table; 755 ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION; 756 ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration; 757 758 ipvs->lblcr_ctl_header = register_net_sysctl_sz(net, "net/ipv4/vs", 759 ipvs->lblcr_ctl_table, 760 vars_table_size); 761 if (!ipvs->lblcr_ctl_header) { 762 if (!net_eq(net, &init_net)) 763 kfree(ipvs->lblcr_ctl_table); 764 return -ENOMEM; 765 } 766 767 return 0; 768 } 769 770 static void __net_exit __ip_vs_lblcr_exit(struct net *net) 771 { 772 struct netns_ipvs *ipvs = net_ipvs(net); 773 774 unregister_net_sysctl_table(ipvs->lblcr_ctl_header); 775 776 if (!net_eq(net, &init_net)) 777 kfree(ipvs->lblcr_ctl_table); 778 } 779 780 #else 781 782 static int __net_init __ip_vs_lblcr_init(struct net *net) { return 0; } 783 static void __net_exit __ip_vs_lblcr_exit(struct net *net) { } 784 785 #endif 786 787 static struct pernet_operations ip_vs_lblcr_ops = { 788 .init = __ip_vs_lblcr_init, 789 .exit = __ip_vs_lblcr_exit, 790 }; 791 792 static int __init ip_vs_lblcr_init(void) 793 { 794 int ret; 795 796 ret = register_pernet_subsys(&ip_vs_lblcr_ops); 797 if (ret) 798 return ret; 799 800 ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); 801 if (ret) 802 unregister_pernet_subsys(&ip_vs_lblcr_ops); 803 return ret; 804 } 805 806 static void __exit ip_vs_lblcr_cleanup(void) 807 { 808 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler); 809 unregister_pernet_subsys(&ip_vs_lblcr_ops); 810 rcu_barrier(); 811 } 812 813 814 module_init(ip_vs_lblcr_init); 815 module_exit(ip_vs_lblcr_cleanup); 816 MODULE_LICENSE("GPL"); 817 MODULE_DESCRIPTION("ipvs locality-based least-connection with replication scheduler"); 818