1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/team/team_mode_loadbalance.c - Load-balancing mode for team 4 * Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com> 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/errno.h> 12 #include <linux/netdevice.h> 13 #include <linux/etherdevice.h> 14 #include <linux/filter.h> 15 #include <linux/if_team.h> 16 17 static rx_handler_result_t lb_receive(struct team *team, struct team_port *port, 18 struct sk_buff *skb) 19 { 20 if (unlikely(skb->protocol == htons(ETH_P_SLOW))) { 21 /* LACPDU packets should go to exact delivery */ 22 const unsigned char *dest = eth_hdr(skb)->h_dest; 23 24 if (is_link_local_ether_addr(dest) && dest[5] == 0x02) 25 return RX_HANDLER_EXACT; 26 } 27 return RX_HANDLER_ANOTHER; 28 } 29 30 struct lb_priv; 31 32 typedef struct team_port *lb_select_tx_port_func_t(struct team *, 33 unsigned char); 34 35 #define LB_TX_HASHTABLE_SIZE 256 /* hash is a char */ 36 37 struct lb_stats { 38 u64 tx_bytes; 39 }; 40 41 struct lb_pcpu_stats { 42 struct lb_stats hash_stats[LB_TX_HASHTABLE_SIZE]; 43 struct u64_stats_sync syncp; 44 }; 45 46 struct lb_stats_info { 47 struct lb_stats stats; 48 struct lb_stats last_stats; 49 struct team_option_inst_info *opt_inst_info; 50 }; 51 52 struct lb_port_mapping { 53 struct team_port __rcu *port; 54 struct team_option_inst_info *opt_inst_info; 55 }; 56 57 struct lb_priv_ex { 58 struct team *team; 59 struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE]; 60 struct sock_fprog_kern *orig_fprog; 61 struct { 62 unsigned int refresh_interval; /* in tenths of second */ 63 struct delayed_work refresh_dw; 64 struct lb_stats_info info[LB_TX_HASHTABLE_SIZE]; 65 } stats; 66 }; 67 68 struct lb_priv { 69 struct bpf_prog __rcu *fp; 70 lb_select_tx_port_func_t __rcu *select_tx_port_func; 71 struct lb_pcpu_stats __percpu *pcpu_stats; 72 struct lb_priv_ex *ex; /* priv extension */ 73 }; 74 75 static struct lb_priv *get_lb_priv(struct team *team) 76 { 77 return (struct lb_priv *) &team->mode_priv; 78 } 79 80 struct lb_port_priv { 81 struct lb_stats __percpu *pcpu_stats; 82 struct lb_stats_info stats_info; 83 }; 84 85 static struct lb_port_priv *get_lb_port_priv(struct team_port *port) 86 { 87 return (struct lb_port_priv *) &port->mode_priv; 88 } 89 90 #define LB_HTPM_PORT_BY_HASH(lp_priv, hash) \ 91 (lb_priv)->ex->tx_hash_to_port_mapping[hash].port 92 93 #define LB_HTPM_OPT_INST_INFO_BY_HASH(lp_priv, hash) \ 94 (lb_priv)->ex->tx_hash_to_port_mapping[hash].opt_inst_info 95 96 static void lb_tx_hash_to_port_mapping_null_port(struct team *team, 97 struct team_port *port) 98 { 99 struct lb_priv *lb_priv = get_lb_priv(team); 100 bool changed = false; 101 int i; 102 103 for (i = 0; i < LB_TX_HASHTABLE_SIZE; i++) { 104 struct lb_port_mapping *pm; 105 106 pm = &lb_priv->ex->tx_hash_to_port_mapping[i]; 107 if (rcu_access_pointer(pm->port) == port) { 108 RCU_INIT_POINTER(pm->port, NULL); 109 team_option_inst_set_change(pm->opt_inst_info); 110 changed = true; 111 } 112 } 113 if (changed) 114 team_options_change_check(team); 115 } 116 117 /* Basic tx selection based solely by hash */ 118 static struct team_port *lb_hash_select_tx_port(struct team *team, 119 unsigned char hash) 120 { 121 int port_index = team_num_to_port_index(team, hash); 122 123 return team_get_port_by_index_rcu(team, port_index); 124 } 125 126 /* Hash to port mapping select tx port */ 127 static struct team_port *lb_htpm_select_tx_port(struct team *team, 128 unsigned char hash) 129 { 130 struct lb_priv *lb_priv = get_lb_priv(team); 131 struct team_port *port; 132 133 port = rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash)); 134 if (likely(port)) 135 return port; 136 /* If no valid port in the table, fall back to simple hash */ 137 return lb_hash_select_tx_port(team, hash); 138 } 139 140 struct lb_select_tx_port { 141 char *name; 142 lb_select_tx_port_func_t *func; 143 }; 144 145 static const struct lb_select_tx_port lb_select_tx_port_list[] = { 146 { 147 .name = "hash", 148 .func = lb_hash_select_tx_port, 149 }, 150 { 151 .name = "hash_to_port_mapping", 152 .func = lb_htpm_select_tx_port, 153 }, 154 }; 155 #define LB_SELECT_TX_PORT_LIST_COUNT ARRAY_SIZE(lb_select_tx_port_list) 156 157 static char *lb_select_tx_port_get_name(lb_select_tx_port_func_t *func) 158 { 159 int i; 160 161 for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) { 162 const struct lb_select_tx_port *item; 163 164 item = &lb_select_tx_port_list[i]; 165 if (item->func == func) 166 return item->name; 167 } 168 return NULL; 169 } 170 171 static lb_select_tx_port_func_t *lb_select_tx_port_get_func(const char *name) 172 { 173 int i; 174 175 for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) { 176 const struct lb_select_tx_port *item; 177 178 item = &lb_select_tx_port_list[i]; 179 if (!strcmp(item->name, name)) 180 return item->func; 181 } 182 return NULL; 183 } 184 185 static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv, 186 struct sk_buff *skb) 187 { 188 struct bpf_prog *fp; 189 uint32_t lhash; 190 unsigned char *c; 191 192 fp = rcu_dereference_bh(lb_priv->fp); 193 if (unlikely(!fp)) 194 return 0; 195 lhash = bpf_prog_run(fp, skb); 196 c = (char *) &lhash; 197 return c[0] ^ c[1] ^ c[2] ^ c[3]; 198 } 199 200 static void lb_update_tx_stats(unsigned int tx_bytes, struct lb_priv *lb_priv, 201 struct lb_port_priv *lb_port_priv, 202 unsigned char hash) 203 { 204 struct lb_pcpu_stats *pcpu_stats; 205 struct lb_stats *port_stats; 206 struct lb_stats *hash_stats; 207 208 pcpu_stats = this_cpu_ptr(lb_priv->pcpu_stats); 209 port_stats = this_cpu_ptr(lb_port_priv->pcpu_stats); 210 hash_stats = &pcpu_stats->hash_stats[hash]; 211 u64_stats_update_begin(&pcpu_stats->syncp); 212 port_stats->tx_bytes += tx_bytes; 213 hash_stats->tx_bytes += tx_bytes; 214 u64_stats_update_end(&pcpu_stats->syncp); 215 } 216 217 static bool lb_transmit(struct team *team, struct sk_buff *skb) 218 { 219 struct lb_priv *lb_priv = get_lb_priv(team); 220 lb_select_tx_port_func_t *select_tx_port_func; 221 struct team_port *port; 222 unsigned char hash; 223 unsigned int tx_bytes = skb->len; 224 225 hash = lb_get_skb_hash(lb_priv, skb); 226 select_tx_port_func = rcu_dereference_bh(lb_priv->select_tx_port_func); 227 port = select_tx_port_func(team, hash); 228 if (unlikely(!port)) 229 goto drop; 230 if (team_dev_queue_xmit(team, port, skb)) 231 return false; 232 lb_update_tx_stats(tx_bytes, lb_priv, get_lb_port_priv(port), hash); 233 return true; 234 235 drop: 236 dev_kfree_skb_any(skb); 237 return false; 238 } 239 240 static void lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx) 241 { 242 struct lb_priv *lb_priv = get_lb_priv(team); 243 244 if (!lb_priv->ex->orig_fprog) { 245 ctx->data.bin_val.len = 0; 246 ctx->data.bin_val.ptr = NULL; 247 return; 248 } 249 ctx->data.bin_val.len = lb_priv->ex->orig_fprog->len * 250 sizeof(struct sock_filter); 251 ctx->data.bin_val.ptr = lb_priv->ex->orig_fprog->filter; 252 } 253 254 static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len, 255 const void *data) 256 { 257 struct sock_fprog_kern *fprog; 258 struct sock_filter *filter = (struct sock_filter *) data; 259 260 if (data_len % sizeof(struct sock_filter)) 261 return -EINVAL; 262 fprog = kmalloc(sizeof(*fprog), GFP_KERNEL); 263 if (!fprog) 264 return -ENOMEM; 265 fprog->filter = kmemdup(filter, data_len, GFP_KERNEL); 266 if (!fprog->filter) { 267 kfree(fprog); 268 return -ENOMEM; 269 } 270 fprog->len = data_len / sizeof(struct sock_filter); 271 *pfprog = fprog; 272 return 0; 273 } 274 275 static void __fprog_destroy(struct sock_fprog_kern *fprog) 276 { 277 kfree(fprog->filter); 278 kfree(fprog); 279 } 280 281 static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) 282 { 283 struct lb_priv *lb_priv = get_lb_priv(team); 284 struct bpf_prog *fp = NULL; 285 struct bpf_prog *orig_fp = NULL; 286 struct sock_fprog_kern *fprog = NULL; 287 int err; 288 289 if (ctx->data.bin_val.len) { 290 err = __fprog_create(&fprog, ctx->data.bin_val.len, 291 ctx->data.bin_val.ptr); 292 if (err) 293 return err; 294 err = bpf_prog_create(&fp, fprog); 295 if (err) { 296 __fprog_destroy(fprog); 297 return err; 298 } 299 } 300 301 if (lb_priv->ex->orig_fprog) { 302 /* Clear old filter data */ 303 __fprog_destroy(lb_priv->ex->orig_fprog); 304 orig_fp = rtnl_dereference(lb_priv->fp); 305 } 306 307 rcu_assign_pointer(lb_priv->fp, fp); 308 lb_priv->ex->orig_fprog = fprog; 309 310 if (orig_fp) { 311 synchronize_rcu(); 312 bpf_prog_destroy(orig_fp); 313 } 314 return 0; 315 } 316 317 static void lb_bpf_func_free(struct team *team) 318 { 319 struct lb_priv *lb_priv = get_lb_priv(team); 320 struct bpf_prog *fp; 321 322 if (!lb_priv->ex->orig_fprog) 323 return; 324 325 __fprog_destroy(lb_priv->ex->orig_fprog); 326 fp = rtnl_dereference(lb_priv->fp); 327 bpf_prog_destroy(fp); 328 } 329 330 static void lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx) 331 { 332 struct lb_priv *lb_priv = get_lb_priv(team); 333 lb_select_tx_port_func_t *func; 334 char *name; 335 336 func = rtnl_dereference(lb_priv->select_tx_port_func); 337 name = lb_select_tx_port_get_name(func); 338 BUG_ON(!name); 339 ctx->data.str_val = name; 340 } 341 342 static int lb_tx_method_set(struct team *team, struct team_gsetter_ctx *ctx) 343 { 344 struct lb_priv *lb_priv = get_lb_priv(team); 345 lb_select_tx_port_func_t *func; 346 347 func = lb_select_tx_port_get_func(ctx->data.str_val); 348 if (!func) 349 return -EINVAL; 350 rcu_assign_pointer(lb_priv->select_tx_port_func, func); 351 return 0; 352 } 353 354 static void lb_tx_hash_to_port_mapping_init(struct team *team, 355 struct team_option_inst_info *info) 356 { 357 struct lb_priv *lb_priv = get_lb_priv(team); 358 unsigned char hash = info->array_index; 359 360 LB_HTPM_OPT_INST_INFO_BY_HASH(lb_priv, hash) = info; 361 } 362 363 static void lb_tx_hash_to_port_mapping_get(struct team *team, 364 struct team_gsetter_ctx *ctx) 365 { 366 struct lb_priv *lb_priv = get_lb_priv(team); 367 struct team_port *port; 368 unsigned char hash = ctx->info->array_index; 369 370 port = LB_HTPM_PORT_BY_HASH(lb_priv, hash); 371 ctx->data.u32_val = port ? port->dev->ifindex : 0; 372 } 373 374 static int lb_tx_hash_to_port_mapping_set(struct team *team, 375 struct team_gsetter_ctx *ctx) 376 { 377 struct lb_priv *lb_priv = get_lb_priv(team); 378 struct team_port *port; 379 unsigned char hash = ctx->info->array_index; 380 381 list_for_each_entry(port, &team->port_list, list) { 382 if (ctx->data.u32_val == port->dev->ifindex && 383 team_port_enabled(port)) { 384 rcu_assign_pointer(LB_HTPM_PORT_BY_HASH(lb_priv, hash), 385 port); 386 return 0; 387 } 388 } 389 return -ENODEV; 390 } 391 392 static void lb_hash_stats_init(struct team *team, 393 struct team_option_inst_info *info) 394 { 395 struct lb_priv *lb_priv = get_lb_priv(team); 396 unsigned char hash = info->array_index; 397 398 lb_priv->ex->stats.info[hash].opt_inst_info = info; 399 } 400 401 static void lb_hash_stats_get(struct team *team, struct team_gsetter_ctx *ctx) 402 { 403 struct lb_priv *lb_priv = get_lb_priv(team); 404 unsigned char hash = ctx->info->array_index; 405 406 ctx->data.bin_val.ptr = &lb_priv->ex->stats.info[hash].stats; 407 ctx->data.bin_val.len = sizeof(struct lb_stats); 408 } 409 410 static void lb_port_stats_init(struct team *team, 411 struct team_option_inst_info *info) 412 { 413 struct team_port *port = info->port; 414 struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); 415 416 lb_port_priv->stats_info.opt_inst_info = info; 417 } 418 419 static void lb_port_stats_get(struct team *team, struct team_gsetter_ctx *ctx) 420 { 421 struct team_port *port = ctx->info->port; 422 struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); 423 424 ctx->data.bin_val.ptr = &lb_port_priv->stats_info.stats; 425 ctx->data.bin_val.len = sizeof(struct lb_stats); 426 } 427 428 static void __lb_stats_info_refresh_prepare(struct lb_stats_info *s_info) 429 { 430 memcpy(&s_info->last_stats, &s_info->stats, sizeof(struct lb_stats)); 431 memset(&s_info->stats, 0, sizeof(struct lb_stats)); 432 } 433 434 static bool __lb_stats_info_refresh_check(struct lb_stats_info *s_info, 435 struct team *team) 436 { 437 if (memcmp(&s_info->last_stats, &s_info->stats, 438 sizeof(struct lb_stats))) { 439 team_option_inst_set_change(s_info->opt_inst_info); 440 return true; 441 } 442 return false; 443 } 444 445 static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats, 446 struct lb_stats *cpu_stats, 447 struct u64_stats_sync *syncp) 448 { 449 unsigned int start; 450 struct lb_stats tmp; 451 452 do { 453 start = u64_stats_fetch_begin(syncp); 454 tmp.tx_bytes = cpu_stats->tx_bytes; 455 } while (u64_stats_fetch_retry(syncp, start)); 456 acc_stats->tx_bytes += tmp.tx_bytes; 457 } 458 459 static void lb_stats_refresh(struct work_struct *work) 460 { 461 struct team *team; 462 struct lb_priv *lb_priv; 463 struct lb_priv_ex *lb_priv_ex; 464 struct lb_pcpu_stats *pcpu_stats; 465 struct lb_stats *stats; 466 struct lb_stats_info *s_info; 467 struct team_port *port; 468 bool changed = false; 469 int i; 470 int j; 471 472 lb_priv_ex = container_of(work, struct lb_priv_ex, 473 stats.refresh_dw.work); 474 475 team = lb_priv_ex->team; 476 lb_priv = get_lb_priv(team); 477 478 if (!rtnl_trylock()) { 479 schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, 0); 480 return; 481 } 482 483 for (j = 0; j < LB_TX_HASHTABLE_SIZE; j++) { 484 s_info = &lb_priv->ex->stats.info[j]; 485 __lb_stats_info_refresh_prepare(s_info); 486 for_each_possible_cpu(i) { 487 pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); 488 stats = &pcpu_stats->hash_stats[j]; 489 __lb_one_cpu_stats_add(&s_info->stats, stats, 490 &pcpu_stats->syncp); 491 } 492 changed |= __lb_stats_info_refresh_check(s_info, team); 493 } 494 495 list_for_each_entry(port, &team->port_list, list) { 496 struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); 497 498 s_info = &lb_port_priv->stats_info; 499 __lb_stats_info_refresh_prepare(s_info); 500 for_each_possible_cpu(i) { 501 pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); 502 stats = per_cpu_ptr(lb_port_priv->pcpu_stats, i); 503 __lb_one_cpu_stats_add(&s_info->stats, stats, 504 &pcpu_stats->syncp); 505 } 506 changed |= __lb_stats_info_refresh_check(s_info, team); 507 } 508 509 if (changed) 510 team_options_change_check(team); 511 512 schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, 513 (lb_priv_ex->stats.refresh_interval * HZ) / 10); 514 515 rtnl_unlock(); 516 } 517 518 static void lb_stats_refresh_interval_get(struct team *team, 519 struct team_gsetter_ctx *ctx) 520 { 521 struct lb_priv *lb_priv = get_lb_priv(team); 522 523 ctx->data.u32_val = lb_priv->ex->stats.refresh_interval; 524 } 525 526 static int lb_stats_refresh_interval_set(struct team *team, 527 struct team_gsetter_ctx *ctx) 528 { 529 struct lb_priv *lb_priv = get_lb_priv(team); 530 unsigned int interval; 531 532 interval = ctx->data.u32_val; 533 if (lb_priv->ex->stats.refresh_interval == interval) 534 return 0; 535 lb_priv->ex->stats.refresh_interval = interval; 536 if (interval) 537 schedule_delayed_work(&lb_priv->ex->stats.refresh_dw, 0); 538 else 539 cancel_delayed_work(&lb_priv->ex->stats.refresh_dw); 540 return 0; 541 } 542 543 static const struct team_option lb_options[] = { 544 { 545 .name = "bpf_hash_func", 546 .type = TEAM_OPTION_TYPE_BINARY, 547 .getter = lb_bpf_func_get, 548 .setter = lb_bpf_func_set, 549 }, 550 { 551 .name = "lb_tx_method", 552 .type = TEAM_OPTION_TYPE_STRING, 553 .getter = lb_tx_method_get, 554 .setter = lb_tx_method_set, 555 }, 556 { 557 .name = "lb_tx_hash_to_port_mapping", 558 .array_size = LB_TX_HASHTABLE_SIZE, 559 .type = TEAM_OPTION_TYPE_U32, 560 .init = lb_tx_hash_to_port_mapping_init, 561 .getter = lb_tx_hash_to_port_mapping_get, 562 .setter = lb_tx_hash_to_port_mapping_set, 563 }, 564 { 565 .name = "lb_hash_stats", 566 .array_size = LB_TX_HASHTABLE_SIZE, 567 .type = TEAM_OPTION_TYPE_BINARY, 568 .init = lb_hash_stats_init, 569 .getter = lb_hash_stats_get, 570 }, 571 { 572 .name = "lb_port_stats", 573 .per_port = true, 574 .type = TEAM_OPTION_TYPE_BINARY, 575 .init = lb_port_stats_init, 576 .getter = lb_port_stats_get, 577 }, 578 { 579 .name = "lb_stats_refresh_interval", 580 .type = TEAM_OPTION_TYPE_U32, 581 .getter = lb_stats_refresh_interval_get, 582 .setter = lb_stats_refresh_interval_set, 583 }, 584 }; 585 586 static int lb_init(struct team *team) 587 { 588 struct lb_priv *lb_priv = get_lb_priv(team); 589 lb_select_tx_port_func_t *func; 590 int i, err; 591 592 /* set default tx port selector */ 593 func = lb_select_tx_port_get_func("hash"); 594 BUG_ON(!func); 595 rcu_assign_pointer(lb_priv->select_tx_port_func, func); 596 597 lb_priv->ex = kzalloc(sizeof(*lb_priv->ex), GFP_KERNEL); 598 if (!lb_priv->ex) 599 return -ENOMEM; 600 lb_priv->ex->team = team; 601 602 lb_priv->pcpu_stats = alloc_percpu(struct lb_pcpu_stats); 603 if (!lb_priv->pcpu_stats) { 604 err = -ENOMEM; 605 goto err_alloc_pcpu_stats; 606 } 607 608 for_each_possible_cpu(i) { 609 struct lb_pcpu_stats *team_lb_stats; 610 team_lb_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); 611 u64_stats_init(&team_lb_stats->syncp); 612 } 613 614 615 INIT_DELAYED_WORK(&lb_priv->ex->stats.refresh_dw, lb_stats_refresh); 616 617 err = team_options_register(team, lb_options, ARRAY_SIZE(lb_options)); 618 if (err) 619 goto err_options_register; 620 return 0; 621 622 err_options_register: 623 free_percpu(lb_priv->pcpu_stats); 624 err_alloc_pcpu_stats: 625 kfree(lb_priv->ex); 626 return err; 627 } 628 629 static void lb_exit(struct team *team) 630 { 631 struct lb_priv *lb_priv = get_lb_priv(team); 632 633 team_options_unregister(team, lb_options, 634 ARRAY_SIZE(lb_options)); 635 lb_bpf_func_free(team); 636 cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw); 637 free_percpu(lb_priv->pcpu_stats); 638 kfree(lb_priv->ex); 639 } 640 641 static int lb_port_enter(struct team *team, struct team_port *port) 642 { 643 struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); 644 645 lb_port_priv->pcpu_stats = alloc_percpu(struct lb_stats); 646 if (!lb_port_priv->pcpu_stats) 647 return -ENOMEM; 648 return 0; 649 } 650 651 static void lb_port_leave(struct team *team, struct team_port *port) 652 { 653 struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); 654 655 free_percpu(lb_port_priv->pcpu_stats); 656 } 657 658 static void lb_port_disabled(struct team *team, struct team_port *port) 659 { 660 lb_tx_hash_to_port_mapping_null_port(team, port); 661 } 662 663 static const struct team_mode_ops lb_mode_ops = { 664 .init = lb_init, 665 .exit = lb_exit, 666 .port_enter = lb_port_enter, 667 .port_leave = lb_port_leave, 668 .port_disabled = lb_port_disabled, 669 .receive = lb_receive, 670 .transmit = lb_transmit, 671 }; 672 673 static const struct team_mode lb_mode = { 674 .kind = "loadbalance", 675 .owner = THIS_MODULE, 676 .priv_size = sizeof(struct lb_priv), 677 .port_priv_size = sizeof(struct lb_port_priv), 678 .ops = &lb_mode_ops, 679 .lag_tx_type = NETDEV_LAG_TX_TYPE_HASH, 680 }; 681 682 static int __init lb_init_module(void) 683 { 684 return team_mode_register(&lb_mode); 685 } 686 687 static void __exit lb_cleanup_module(void) 688 { 689 team_mode_unregister(&lb_mode); 690 } 691 692 module_init(lb_init_module); 693 module_exit(lb_cleanup_module); 694 695 MODULE_LICENSE("GPL v2"); 696 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>"); 697 MODULE_DESCRIPTION("Load-balancing mode for team"); 698 MODULE_ALIAS_TEAM_MODE("loadbalance"); 699