1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPVS: Locality-Based Least-Connection scheduling module
4 *
5 * Authors: Wensong Zhang <wensong@gnuchina.org>
6 *
7 * Changes:
8 * Martin Hamilton : fixed the terrible locking bugs
9 * *lock(tbl->lock) ==> *lock(&tbl->lock)
10 * Wensong Zhang : fixed the uninitialized tbl->lock bug
11 * Wensong Zhang : added doing full expiration check to
12 * collect stale entries of 24+ hours when
13 * no partial expire check in a half hour
14 * Julian Anastasov : replaced del_timer call with del_timer_sync
15 * to avoid the possible race between timer
16 * handler and del_timer thread in SMP
17 */
18
19 /*
20 * The lblc algorithm is as follows (pseudo code):
21 *
22 * if cachenode[dest_ip] is null then
23 * n, cachenode[dest_ip] <- {weighted least-conn node};
24 * else
25 * n <- cachenode[dest_ip];
26 * if (n is dead) OR
27 * (n.conns>n.weight AND
28 * there is a node m with m.conns<m.weight/2) then
29 * n, cachenode[dest_ip] <- {weighted least-conn node};
30 *
31 * return n;
32 *
33 * Thanks must go to Wenzhuo Zhang for talking WCCP to me and pushing
34 * me to write this module.
35 */
36
37 #define pr_fmt(fmt) "IPVS: " fmt
38
39 #include <linux/ip.h>
40 #include <linux/slab.h>
41 #include <linux/module.h>
42 #include <linux/kernel.h>
43 #include <linux/skbuff.h>
44 #include <linux/jiffies.h>
45 #include <linux/hash.h>
46
47 /* for sysctl */
48 #include <linux/fs.h>
49 #include <linux/sysctl.h>
50
51 #include <net/ip_vs.h>
52
53
54 /*
55 * It is for garbage collection of stale IPVS lblc entries,
56 * when the table is full.
57 */
58 #define CHECK_EXPIRE_INTERVAL (60*HZ)
59 #define ENTRY_TIMEOUT (6*60*HZ)
60
61 #define DEFAULT_EXPIRATION (24*60*60*HZ)
62
63 /*
64 * It is for full expiration check.
65 * When there is no partial expiration check (garbage collection)
66 * in a half hour, do a full expiration check to collect stale
67 * entries that haven't been touched for a day.
68 */
69 #define COUNT_FOR_FULL_EXPIRATION 30
70
71
72 /*
73 * for IPVS lblc entry hash table
74 */
75 #ifndef CONFIG_IP_VS_LBLC_TAB_BITS
76 #define CONFIG_IP_VS_LBLC_TAB_BITS 10
77 #endif
78 #define IP_VS_LBLC_TAB_BITS CONFIG_IP_VS_LBLC_TAB_BITS
79 #define IP_VS_LBLC_TAB_SIZE (1 << IP_VS_LBLC_TAB_BITS)
80 #define IP_VS_LBLC_TAB_MASK (IP_VS_LBLC_TAB_SIZE - 1)
81
82
83 /*
84 * IPVS lblc entry represents an association between destination
85 * IP address and its destination server
86 */
87 struct ip_vs_lblc_entry {
88 struct hlist_node list;
89 int af; /* address family */
90 union nf_inet_addr addr; /* destination IP address */
91 struct ip_vs_dest *dest; /* real server (cache) */
92 unsigned long lastuse; /* last used time */
93 struct rcu_head rcu_head;
94 };
95
96
97 /*
98 * IPVS lblc hash table
99 */
100 struct ip_vs_lblc_table {
101 struct rcu_head rcu_head;
102 struct hlist_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */
103 struct timer_list periodic_timer; /* collect stale entries */
104 struct ip_vs_service *svc; /* pointer back to service */
105 atomic_t entries; /* number of entries */
106 int max_size; /* maximum size of entries */
107 int rover; /* rover for expire check */
108 int counter; /* counter for no expire */
109 bool dead;
110 };
111
112
113 /*
114 * IPVS LBLC sysctl table
115 */
116 #ifdef CONFIG_SYSCTL
117 static struct ctl_table vs_vars_table[] = {
118 {
119 .procname = "lblc_expiration",
120 .data = NULL,
121 .maxlen = sizeof(int),
122 .mode = 0644,
123 .proc_handler = proc_dointvec_jiffies,
124 },
125 };
126 #endif
127
ip_vs_lblc_rcu_free(struct rcu_head * head)128 static void ip_vs_lblc_rcu_free(struct rcu_head *head)
129 {
130 struct ip_vs_lblc_entry *en = container_of(head,
131 struct ip_vs_lblc_entry,
132 rcu_head);
133
134 ip_vs_dest_put_and_free(en->dest);
135 kfree(en);
136 }
137
ip_vs_lblc_del(struct ip_vs_lblc_entry * en)138 static inline void ip_vs_lblc_del(struct ip_vs_lblc_entry *en)
139 {
140 hlist_del_rcu(&en->list);
141 call_rcu(&en->rcu_head, ip_vs_lblc_rcu_free);
142 }
143
144 /*
145 * Returns hash value for IPVS LBLC entry
146 */
147 static inline unsigned int
ip_vs_lblc_hashkey(int af,const union nf_inet_addr * addr)148 ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
149 {
150 __be32 addr_fold = addr->ip;
151
152 #ifdef CONFIG_IP_VS_IPV6
153 if (af == AF_INET6)
154 addr_fold = addr->ip6[0]^addr->ip6[1]^
155 addr->ip6[2]^addr->ip6[3];
156 #endif
157 return hash_32(ntohl(addr_fold), IP_VS_LBLC_TAB_BITS);
158 }
159
160
161 /*
162 * Hash an entry in the ip_vs_lblc_table.
163 * returns bool success.
164 */
165 static void
ip_vs_lblc_hash(struct ip_vs_lblc_table * tbl,struct ip_vs_lblc_entry * en)166 ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
167 {
168 unsigned int hash = ip_vs_lblc_hashkey(en->af, &en->addr);
169
170 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
171 atomic_inc(&tbl->entries);
172 }
173
174
175 /* Get ip_vs_lblc_entry associated with supplied parameters. */
176 static inline struct ip_vs_lblc_entry *
ip_vs_lblc_get(int af,struct ip_vs_lblc_table * tbl,const union nf_inet_addr * addr)177 ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
178 const union nf_inet_addr *addr)
179 {
180 unsigned int hash = ip_vs_lblc_hashkey(af, addr);
181 struct ip_vs_lblc_entry *en;
182
183 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
184 if (ip_vs_addr_equal(af, &en->addr, addr))
185 return en;
186
187 return NULL;
188 }
189
190
191 /*
192 * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
193 * address to a server. Called under spin lock.
194 */
195 static inline struct ip_vs_lblc_entry *
ip_vs_lblc_new(struct ip_vs_lblc_table * tbl,const union nf_inet_addr * daddr,u16 af,struct ip_vs_dest * dest)196 ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
197 u16 af, struct ip_vs_dest *dest)
198 {
199 struct ip_vs_lblc_entry *en;
200
201 en = ip_vs_lblc_get(af, tbl, daddr);
202 if (en) {
203 if (en->dest == dest)
204 return en;
205 ip_vs_lblc_del(en);
206 }
207 en = kmalloc_obj(*en, GFP_ATOMIC);
208 if (!en)
209 return NULL;
210
211 en->af = af;
212 ip_vs_addr_copy(af, &en->addr, daddr);
213 en->lastuse = jiffies;
214
215 ip_vs_dest_hold(dest);
216 en->dest = dest;
217
218 ip_vs_lblc_hash(tbl, en);
219
220 return en;
221 }
222
223
224 /*
225 * Flush all the entries of the specified table.
226 */
ip_vs_lblc_flush(struct ip_vs_service * svc)227 static void ip_vs_lblc_flush(struct ip_vs_service *svc)
228 {
229 struct ip_vs_lblc_table *tbl = svc->sched_data;
230 struct ip_vs_lblc_entry *en;
231 struct hlist_node *next;
232 int i;
233
234 spin_lock_bh(&svc->sched_lock);
235 tbl->dead = true;
236 for (i = 0; i < IP_VS_LBLC_TAB_SIZE; i++) {
237 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
238 ip_vs_lblc_del(en);
239 atomic_dec(&tbl->entries);
240 }
241 }
242 spin_unlock_bh(&svc->sched_lock);
243 }
244
sysctl_lblc_expiration(struct ip_vs_service * svc)245 static int sysctl_lblc_expiration(struct ip_vs_service *svc)
246 {
247 #ifdef CONFIG_SYSCTL
248 return svc->ipvs->sysctl_lblc_expiration;
249 #else
250 return DEFAULT_EXPIRATION;
251 #endif
252 }
253
ip_vs_lblc_full_check(struct ip_vs_service * svc)254 static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
255 {
256 struct ip_vs_lblc_table *tbl = svc->sched_data;
257 struct ip_vs_lblc_entry *en;
258 struct hlist_node *next;
259 unsigned long now = jiffies;
260 int i, j;
261
262 for (i = 0, j = tbl->rover; i < IP_VS_LBLC_TAB_SIZE; i++) {
263 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
264
265 spin_lock(&svc->sched_lock);
266 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
267 if (time_before(now,
268 en->lastuse +
269 sysctl_lblc_expiration(svc)))
270 continue;
271
272 ip_vs_lblc_del(en);
273 atomic_dec(&tbl->entries);
274 }
275 spin_unlock(&svc->sched_lock);
276 }
277 tbl->rover = j;
278 }
279
280
281 /*
282 * Periodical timer handler for IPVS lblc table
283 * It is used to collect stale entries when the number of entries
284 * exceeds the maximum size of the table.
285 *
286 * Fixme: we probably need more complicated algorithm to collect
287 * entries that have not been used for a long time even
288 * if the number of entries doesn't exceed the maximum size
289 * of the table.
290 * The full expiration check is for this purpose now.
291 */
ip_vs_lblc_check_expire(struct timer_list * t)292 static void ip_vs_lblc_check_expire(struct timer_list *t)
293 {
294 struct ip_vs_lblc_table *tbl = timer_container_of(tbl, t,
295 periodic_timer);
296 struct ip_vs_service *svc = tbl->svc;
297 unsigned long now = jiffies;
298 int goal;
299 int i, j;
300 struct ip_vs_lblc_entry *en;
301 struct hlist_node *next;
302
303 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
304 /* do full expiration check */
305 ip_vs_lblc_full_check(svc);
306 tbl->counter = 1;
307 goto out;
308 }
309
310 if (atomic_read(&tbl->entries) <= tbl->max_size) {
311 tbl->counter++;
312 goto out;
313 }
314
315 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
316 if (goal > tbl->max_size/2)
317 goal = tbl->max_size/2;
318
319 for (i = 0, j = tbl->rover; i < IP_VS_LBLC_TAB_SIZE; i++) {
320 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
321
322 spin_lock(&svc->sched_lock);
323 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
324 if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
325 continue;
326
327 ip_vs_lblc_del(en);
328 atomic_dec(&tbl->entries);
329 goal--;
330 }
331 spin_unlock(&svc->sched_lock);
332 if (goal <= 0)
333 break;
334 }
335 tbl->rover = j;
336
337 out:
338 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
339 }
340
341
ip_vs_lblc_init_svc(struct ip_vs_service * svc)342 static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
343 {
344 int i;
345 struct ip_vs_lblc_table *tbl;
346
347 /*
348 * Allocate the ip_vs_lblc_table for this service
349 */
350 tbl = kmalloc_obj(*tbl);
351 if (tbl == NULL)
352 return -ENOMEM;
353
354 svc->sched_data = tbl;
355 IP_VS_DBG(6, "LBLC hash table (memory=%zdbytes) allocated for "
356 "current service\n", sizeof(*tbl));
357
358 /*
359 * Initialize the hash buckets
360 */
361 for (i = 0; i < IP_VS_LBLC_TAB_SIZE; i++) {
362 INIT_HLIST_HEAD(&tbl->bucket[i]);
363 }
364 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
365 tbl->rover = 0;
366 tbl->counter = 1;
367 tbl->dead = false;
368 tbl->svc = svc;
369 atomic_set(&tbl->entries, 0);
370
371 /*
372 * Hook periodic timer for garbage collection
373 */
374 timer_setup(&tbl->periodic_timer, ip_vs_lblc_check_expire, 0);
375 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
376
377 return 0;
378 }
379
380
ip_vs_lblc_done_svc(struct ip_vs_service * svc)381 static void ip_vs_lblc_done_svc(struct ip_vs_service *svc)
382 {
383 struct ip_vs_lblc_table *tbl = svc->sched_data;
384
385 /* remove periodic timer */
386 timer_shutdown_sync(&tbl->periodic_timer);
387
388 /* got to clean up table entries here */
389 ip_vs_lblc_flush(svc);
390
391 /* release the table itself */
392 kfree_rcu(tbl, rcu_head);
393 IP_VS_DBG(6, "LBLC hash table (memory=%zdbytes) released\n",
394 sizeof(*tbl));
395 }
396
397
398 static inline struct ip_vs_dest *
__ip_vs_lblc_schedule(struct ip_vs_service * svc)399 __ip_vs_lblc_schedule(struct ip_vs_service *svc)
400 {
401 struct ip_vs_dest *dest, *least;
402 int loh, doh;
403
404 /*
405 * We use the following formula to estimate the load:
406 * (dest overhead) / dest->weight
407 *
408 * Remember -- no floats in kernel mode!!!
409 * The comparison of h1*w2 > h2*w1 is equivalent to that of
410 * h1/w1 > h2/w2
411 * if every weight is larger than zero.
412 *
413 * The server with weight=0 is quiesced and will not receive any
414 * new connection.
415 */
416 list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
417 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
418 continue;
419 if (atomic_read(&dest->weight) > 0) {
420 least = dest;
421 loh = ip_vs_dest_conn_overhead(least);
422 goto nextstage;
423 }
424 }
425 return NULL;
426
427 /*
428 * Find the destination with the least load.
429 */
430 nextstage:
431 list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) {
432 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
433 continue;
434
435 doh = ip_vs_dest_conn_overhead(dest);
436 if ((__s64)loh * atomic_read(&dest->weight) >
437 (__s64)doh * atomic_read(&least->weight)) {
438 least = dest;
439 loh = doh;
440 }
441 }
442
443 IP_VS_DBG_BUF(6, "LBLC: server %s:%d "
444 "activeconns %d refcnt %d weight %d overhead %d\n",
445 IP_VS_DBG_ADDR(least->af, &least->addr),
446 ntohs(least->port),
447 atomic_read(&least->activeconns),
448 refcount_read(&least->refcnt),
449 atomic_read(&least->weight), loh);
450
451 return least;
452 }
453
454
455 /*
456 * If this destination server is overloaded and there is a less loaded
457 * server, then return true.
458 */
459 static inline int
is_overloaded(struct ip_vs_dest * dest,struct ip_vs_service * svc)460 is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
461 {
462 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
463 struct ip_vs_dest *d;
464
465 list_for_each_entry_rcu(d, &svc->destinations, n_list) {
466 if (atomic_read(&d->activeconns)*2
467 < atomic_read(&d->weight)) {
468 return 1;
469 }
470 }
471 }
472 return 0;
473 }
474
475
476 /*
477 * Locality-Based (weighted) Least-Connection scheduling
478 */
479 static struct ip_vs_dest *
ip_vs_lblc_schedule(struct ip_vs_service * svc,const struct sk_buff * skb,struct ip_vs_iphdr * iph)480 ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
481 struct ip_vs_iphdr *iph)
482 {
483 struct ip_vs_lblc_table *tbl = svc->sched_data;
484 struct ip_vs_dest *dest = NULL;
485 struct ip_vs_lblc_entry *en;
486
487 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
488
489 /* First look in our cache */
490 en = ip_vs_lblc_get(svc->af, tbl, &iph->daddr);
491 if (en) {
492 /* We only hold a read lock, but this is atomic */
493 en->lastuse = jiffies;
494
495 /*
496 * If the destination is not available, i.e. it's in the trash,
497 * we must ignore it, as it may be removed from under our feet,
498 * if someone drops our reference count. Our caller only makes
499 * sure that destinations, that are not in the trash, are not
500 * moved to the trash, while we are scheduling. But anyone can
501 * free up entries from the trash at any time.
502 */
503
504 dest = en->dest;
505 if ((dest->flags & IP_VS_DEST_F_AVAILABLE) &&
506 atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
507 goto out;
508 }
509
510 /* No cache entry or it is invalid, time to schedule */
511 dest = __ip_vs_lblc_schedule(svc);
512 if (!dest) {
513 ip_vs_scheduler_err(svc, "no destination available");
514 return NULL;
515 }
516
517 /* If we fail to create a cache entry, we'll just use the valid dest */
518 spin_lock_bh(&svc->sched_lock);
519 if (!tbl->dead)
520 ip_vs_lblc_new(tbl, &iph->daddr, svc->af, dest);
521 spin_unlock_bh(&svc->sched_lock);
522
523 out:
524 IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n",
525 IP_VS_DBG_ADDR(svc->af, &iph->daddr),
526 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port));
527
528 return dest;
529 }
530
531
532 /*
533 * IPVS LBLC Scheduler structure
534 */
535 static struct ip_vs_scheduler ip_vs_lblc_scheduler = {
536 .name = "lblc",
537 .refcnt = ATOMIC_INIT(0),
538 .module = THIS_MODULE,
539 .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
540 .init_service = ip_vs_lblc_init_svc,
541 .done_service = ip_vs_lblc_done_svc,
542 .schedule = ip_vs_lblc_schedule,
543 };
544
545 /*
546 * per netns init.
547 */
548 #ifdef CONFIG_SYSCTL
__ip_vs_lblc_init(struct net * net)549 static int __net_init __ip_vs_lblc_init(struct net *net)
550 {
551 struct netns_ipvs *ipvs = net_ipvs(net);
552 size_t vars_table_size = ARRAY_SIZE(vs_vars_table);
553
554 if (!ipvs)
555 return -ENOENT;
556
557 if (!net_eq(net, &init_net)) {
558 ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
559 sizeof(vs_vars_table),
560 GFP_KERNEL);
561 if (ipvs->lblc_ctl_table == NULL)
562 return -ENOMEM;
563
564 /* Don't export sysctls to unprivileged users */
565 if (net->user_ns != &init_user_ns)
566 vars_table_size = 0;
567
568 } else
569 ipvs->lblc_ctl_table = vs_vars_table;
570 ipvs->sysctl_lblc_expiration = DEFAULT_EXPIRATION;
571 ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
572
573 ipvs->lblc_ctl_header = register_net_sysctl_sz(net, "net/ipv4/vs",
574 ipvs->lblc_ctl_table,
575 vars_table_size);
576 if (!ipvs->lblc_ctl_header) {
577 if (!net_eq(net, &init_net))
578 kfree(ipvs->lblc_ctl_table);
579 return -ENOMEM;
580 }
581
582 return 0;
583 }
584
__ip_vs_lblc_exit(struct net * net)585 static void __net_exit __ip_vs_lblc_exit(struct net *net)
586 {
587 struct netns_ipvs *ipvs = net_ipvs(net);
588
589 unregister_net_sysctl_table(ipvs->lblc_ctl_header);
590
591 if (!net_eq(net, &init_net))
592 kfree(ipvs->lblc_ctl_table);
593 }
594
595 #else
596
__ip_vs_lblc_init(struct net * net)597 static int __net_init __ip_vs_lblc_init(struct net *net) { return 0; }
__ip_vs_lblc_exit(struct net * net)598 static void __net_exit __ip_vs_lblc_exit(struct net *net) { }
599
600 #endif
601
602 static struct pernet_operations ip_vs_lblc_ops = {
603 .init = __ip_vs_lblc_init,
604 .exit = __ip_vs_lblc_exit,
605 };
606
ip_vs_lblc_init(void)607 static int __init ip_vs_lblc_init(void)
608 {
609 int ret;
610
611 ret = register_pernet_subsys(&ip_vs_lblc_ops);
612 if (ret)
613 return ret;
614
615 ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
616 if (ret)
617 unregister_pernet_subsys(&ip_vs_lblc_ops);
618 return ret;
619 }
620
ip_vs_lblc_cleanup(void)621 static void __exit ip_vs_lblc_cleanup(void)
622 {
623 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
624 unregister_pernet_subsys(&ip_vs_lblc_ops);
625 rcu_barrier();
626 }
627
628
629 module_init(ip_vs_lblc_init);
630 module_exit(ip_vs_lblc_cleanup);
631 MODULE_LICENSE("GPL");
632 MODULE_DESCRIPTION("ipvs locality-based least-connection scheduler");
633