1 /* 2 * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'. 3 * 4 * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes 5 * NCM: Network and Communications Management, Inc. 6 * 7 * BUT, I'm the one who modified it for ethernet, so: 8 * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov 9 * 10 * This software may be used and distributed according to the terms 11 * of the GNU Public License, incorporated herein by reference. 12 * 13 */ 14 15 #ifndef _NET_BONDING_H 16 #define _NET_BONDING_H 17 18 #include <linux/timer.h> 19 #include <linux/proc_fs.h> 20 #include <linux/if_bonding.h> 21 #include <linux/cpumask.h> 22 #include <linux/in6.h> 23 #include <linux/netpoll.h> 24 #include <linux/inetdevice.h> 25 #include <linux/etherdevice.h> 26 #include <linux/reciprocal_div.h> 27 #include <linux/if_link.h> 28 29 #include <net/bond_3ad.h> 30 #include <net/bond_alb.h> 31 #include <net/bond_options.h> 32 33 #define BOND_MAX_ARP_TARGETS 16 34 35 #define BOND_DEFAULT_MIIMON 100 36 37 #ifndef __long_aligned 38 #define __long_aligned __attribute__((aligned((sizeof(long))))) 39 #endif 40 41 #define slave_info(bond_dev, slave_dev, fmt, ...) \ 42 netdev_info(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) 43 #define slave_warn(bond_dev, slave_dev, fmt, ...) \ 44 netdev_warn(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) 45 #define slave_dbg(bond_dev, slave_dev, fmt, ...) \ 46 netdev_dbg(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) 47 #define slave_err(bond_dev, slave_dev, fmt, ...) \ 48 netdev_err(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) 49 50 #define BOND_MODE(bond) ((bond)->params.mode) 51 52 /* slave list primitives */ 53 #define bond_slave_list(bond) (&(bond)->dev->adj_list.lower) 54 55 #define bond_has_slaves(bond) !list_empty(bond_slave_list(bond)) 56 57 /* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */ 58 #define bond_first_slave(bond) \ 59 (bond_has_slaves(bond) ? \ 60 netdev_adjacent_get_private(bond_slave_list(bond)->next) : \ 61 NULL) 62 #define bond_last_slave(bond) \ 63 (bond_has_slaves(bond) ? \ 64 netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \ 65 NULL) 66 67 /* Caller must have rcu_read_lock */ 68 #define bond_first_slave_rcu(bond) \ 69 netdev_lower_get_first_private_rcu(bond->dev) 70 71 #define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond)) 72 #define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond)) 73 74 /** 75 * bond_for_each_slave - iterate over all slaves 76 * @bond: the bond holding this list 77 * @pos: current slave 78 * @iter: list_head * iterator 79 * 80 * Caller must hold RTNL 81 */ 82 #define bond_for_each_slave(bond, pos, iter) \ 83 netdev_for_each_lower_private((bond)->dev, pos, iter) 84 85 /* Caller must have rcu_read_lock */ 86 #define bond_for_each_slave_rcu(bond, pos, iter) \ 87 netdev_for_each_lower_private_rcu((bond)->dev, pos, iter) 88 89 #define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \ 90 NETIF_F_GSO_ESP) 91 92 #define BOND_TLS_FEATURES (NETIF_F_HW_TLS_TX | NETIF_F_HW_TLS_RX) 93 94 #ifdef CONFIG_NET_POLL_CONTROLLER 95 extern atomic_t netpoll_block_tx; 96 97 static inline void block_netpoll_tx(void) 98 { 99 atomic_inc(&netpoll_block_tx); 100 } 101 102 static inline void unblock_netpoll_tx(void) 103 { 104 atomic_dec(&netpoll_block_tx); 105 } 106 107 static inline int is_netpoll_tx_blocked(struct net_device *dev) 108 { 109 if (unlikely(netpoll_tx_running(dev))) 110 return atomic_read(&netpoll_block_tx); 111 return 0; 112 } 113 #else 114 #define block_netpoll_tx() 115 #define unblock_netpoll_tx() 116 #define is_netpoll_tx_blocked(dev) (0) 117 #endif 118 119 struct bond_params { 120 int mode; 121 int xmit_policy; 122 int miimon; 123 u8 num_peer_notif; 124 int arp_interval; 125 int arp_validate; 126 int arp_all_targets; 127 int use_carrier; 128 int fail_over_mac; 129 int updelay; 130 int downdelay; 131 int peer_notif_delay; 132 int lacp_active; 133 int lacp_fast; 134 unsigned int min_links; 135 int ad_select; 136 char primary[IFNAMSIZ]; 137 int primary_reselect; 138 __be32 arp_targets[BOND_MAX_ARP_TARGETS]; 139 int tx_queues; 140 int all_slaves_active; 141 int resend_igmp; 142 int lp_interval; 143 int packets_per_slave; 144 int tlb_dynamic_lb; 145 struct reciprocal_value reciprocal_packets_per_slave; 146 u16 ad_actor_sys_prio; 147 u16 ad_user_port_key; 148 149 /* 2 bytes of padding : see ether_addr_equal_64bits() */ 150 u8 ad_actor_system[ETH_ALEN + 2]; 151 }; 152 153 struct slave { 154 struct net_device *dev; /* first - useful for panic debug */ 155 struct bonding *bond; /* our master */ 156 int delay; 157 /* all three in jiffies */ 158 unsigned long last_link_up; 159 unsigned long last_rx; 160 unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS]; 161 s8 link; /* one of BOND_LINK_XXXX */ 162 s8 link_new_state; /* one of BOND_LINK_XXXX */ 163 u8 backup:1, /* indicates backup slave. Value corresponds with 164 BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ 165 inactive:1, /* indicates inactive slave */ 166 should_notify:1, /* indicates whether the state changed */ 167 should_notify_link:1; /* indicates whether the link changed */ 168 u8 duplex; 169 u32 original_mtu; 170 u32 link_failure_count; 171 u32 speed; 172 u16 queue_id; 173 u8 perm_hwaddr[MAX_ADDR_LEN]; 174 struct ad_slave_info *ad_info; 175 struct tlb_slave_info tlb_info; 176 #ifdef CONFIG_NET_POLL_CONTROLLER 177 struct netpoll *np; 178 #endif 179 struct delayed_work notify_work; 180 struct kobject kobj; 181 struct rtnl_link_stats64 slave_stats; 182 }; 183 184 static inline struct slave *to_slave(struct kobject *kobj) 185 { 186 return container_of(kobj, struct slave, kobj); 187 } 188 189 struct bond_up_slave { 190 unsigned int count; 191 struct rcu_head rcu; 192 struct slave *arr[]; 193 }; 194 195 /* 196 * Link pseudo-state only used internally by monitors 197 */ 198 #define BOND_LINK_NOCHANGE -1 199 200 struct bond_ipsec { 201 struct list_head list; 202 struct xfrm_state *xs; 203 }; 204 205 /* 206 * Here are the locking policies for the two bonding locks: 207 * Get rcu_read_lock when reading or RTNL when writing slave list. 208 */ 209 struct bonding { 210 struct net_device *dev; /* first - useful for panic debug */ 211 struct slave __rcu *curr_active_slave; 212 struct slave __rcu *current_arp_slave; 213 struct slave __rcu *primary_slave; 214 struct bond_up_slave __rcu *usable_slaves; 215 struct bond_up_slave __rcu *all_slaves; 216 bool force_primary; 217 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ 218 int (*recv_probe)(const struct sk_buff *, struct bonding *, 219 struct slave *); 220 /* mode_lock is used for mode-specific locking needs, currently used by: 221 * 3ad mode (4) - protect against running bond_3ad_unbind_slave() and 222 * bond_3ad_state_machine_handler() concurrently and also 223 * the access to the state machine shared variables. 224 * TLB mode (5) - to sync the use and modifications of its hash table 225 * ALB mode (6) - to sync the use and modifications of its hash table 226 */ 227 spinlock_t mode_lock; 228 spinlock_t stats_lock; 229 u8 send_peer_notif; 230 u8 igmp_retrans; 231 #ifdef CONFIG_PROC_FS 232 struct proc_dir_entry *proc_entry; 233 char proc_file_name[IFNAMSIZ]; 234 #endif /* CONFIG_PROC_FS */ 235 struct list_head bond_list; 236 u32 __percpu *rr_tx_counter; 237 struct ad_bond_info ad_info; 238 struct alb_bond_info alb_info; 239 struct bond_params params; 240 struct workqueue_struct *wq; 241 struct delayed_work mii_work; 242 struct delayed_work arp_work; 243 struct delayed_work alb_work; 244 struct delayed_work ad_work; 245 struct delayed_work mcast_work; 246 struct delayed_work slave_arr_work; 247 #ifdef CONFIG_DEBUG_FS 248 /* debugging support via debugfs */ 249 struct dentry *debug_dir; 250 #endif /* CONFIG_DEBUG_FS */ 251 struct rtnl_link_stats64 bond_stats; 252 #ifdef CONFIG_XFRM_OFFLOAD 253 struct list_head ipsec_list; 254 /* protecting ipsec_list */ 255 spinlock_t ipsec_lock; 256 #endif /* CONFIG_XFRM_OFFLOAD */ 257 struct bpf_prog *xdp_prog; 258 }; 259 260 #define bond_slave_get_rcu(dev) \ 261 ((struct slave *) rcu_dereference(dev->rx_handler_data)) 262 263 #define bond_slave_get_rtnl(dev) \ 264 ((struct slave *) rtnl_dereference(dev->rx_handler_data)) 265 266 void bond_queue_slave_event(struct slave *slave); 267 void bond_lower_state_changed(struct slave *slave); 268 269 struct bond_vlan_tag { 270 __be16 vlan_proto; 271 unsigned short vlan_id; 272 }; 273 274 bool bond_sk_check(struct bonding *bond); 275 276 /** 277 * Returns NULL if the net_device does not belong to any of the bond's slaves 278 * 279 * Caller must hold bond lock for read 280 */ 281 static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, 282 struct net_device *slave_dev) 283 { 284 return netdev_lower_dev_get_private(bond->dev, slave_dev); 285 } 286 287 static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) 288 { 289 return slave->bond; 290 } 291 292 static inline bool bond_should_override_tx_queue(struct bonding *bond) 293 { 294 return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || 295 BOND_MODE(bond) == BOND_MODE_ROUNDROBIN; 296 } 297 298 static inline bool bond_is_lb(const struct bonding *bond) 299 { 300 return BOND_MODE(bond) == BOND_MODE_TLB || 301 BOND_MODE(bond) == BOND_MODE_ALB; 302 } 303 304 static inline bool bond_needs_speed_duplex(const struct bonding *bond) 305 { 306 return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond); 307 } 308 309 static inline bool bond_is_nondyn_tlb(const struct bonding *bond) 310 { 311 return (bond_is_lb(bond) && bond->params.tlb_dynamic_lb == 0); 312 } 313 314 static inline bool bond_mode_can_use_xmit_hash(const struct bonding *bond) 315 { 316 return (BOND_MODE(bond) == BOND_MODE_8023AD || 317 BOND_MODE(bond) == BOND_MODE_XOR || 318 BOND_MODE(bond) == BOND_MODE_TLB || 319 BOND_MODE(bond) == BOND_MODE_ALB); 320 } 321 322 static inline bool bond_mode_uses_xmit_hash(const struct bonding *bond) 323 { 324 return (BOND_MODE(bond) == BOND_MODE_8023AD || 325 BOND_MODE(bond) == BOND_MODE_XOR || 326 bond_is_nondyn_tlb(bond)); 327 } 328 329 static inline bool bond_mode_uses_arp(int mode) 330 { 331 return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB && 332 mode != BOND_MODE_ALB; 333 } 334 335 static inline bool bond_mode_uses_primary(int mode) 336 { 337 return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB || 338 mode == BOND_MODE_ALB; 339 } 340 341 static inline bool bond_uses_primary(struct bonding *bond) 342 { 343 return bond_mode_uses_primary(BOND_MODE(bond)); 344 } 345 346 static inline struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond) 347 { 348 struct slave *slave = rcu_dereference(bond->curr_active_slave); 349 350 return bond_uses_primary(bond) && slave ? slave->dev : NULL; 351 } 352 353 static inline bool bond_slave_is_up(struct slave *slave) 354 { 355 return netif_running(slave->dev) && netif_carrier_ok(slave->dev); 356 } 357 358 static inline void bond_set_active_slave(struct slave *slave) 359 { 360 if (slave->backup) { 361 slave->backup = 0; 362 bond_queue_slave_event(slave); 363 bond_lower_state_changed(slave); 364 } 365 } 366 367 static inline void bond_set_backup_slave(struct slave *slave) 368 { 369 if (!slave->backup) { 370 slave->backup = 1; 371 bond_queue_slave_event(slave); 372 bond_lower_state_changed(slave); 373 } 374 } 375 376 static inline void bond_set_slave_state(struct slave *slave, 377 int slave_state, bool notify) 378 { 379 if (slave->backup == slave_state) 380 return; 381 382 slave->backup = slave_state; 383 if (notify) { 384 bond_lower_state_changed(slave); 385 bond_queue_slave_event(slave); 386 slave->should_notify = 0; 387 } else { 388 if (slave->should_notify) 389 slave->should_notify = 0; 390 else 391 slave->should_notify = 1; 392 } 393 } 394 395 static inline void bond_slave_state_change(struct bonding *bond) 396 { 397 struct list_head *iter; 398 struct slave *tmp; 399 400 bond_for_each_slave(bond, tmp, iter) { 401 if (tmp->link == BOND_LINK_UP) 402 bond_set_active_slave(tmp); 403 else if (tmp->link == BOND_LINK_DOWN) 404 bond_set_backup_slave(tmp); 405 } 406 } 407 408 static inline void bond_slave_state_notify(struct bonding *bond) 409 { 410 struct list_head *iter; 411 struct slave *tmp; 412 413 bond_for_each_slave(bond, tmp, iter) { 414 if (tmp->should_notify) { 415 bond_lower_state_changed(tmp); 416 tmp->should_notify = 0; 417 } 418 } 419 } 420 421 static inline int bond_slave_state(struct slave *slave) 422 { 423 return slave->backup; 424 } 425 426 static inline bool bond_is_active_slave(struct slave *slave) 427 { 428 return !bond_slave_state(slave); 429 } 430 431 static inline bool bond_slave_can_tx(struct slave *slave) 432 { 433 return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP && 434 bond_is_active_slave(slave); 435 } 436 437 static inline bool bond_is_active_slave_dev(const struct net_device *slave_dev) 438 { 439 struct slave *slave; 440 bool active; 441 442 rcu_read_lock(); 443 slave = bond_slave_get_rcu(slave_dev); 444 active = bond_is_active_slave(slave); 445 rcu_read_unlock(); 446 447 return active; 448 } 449 450 static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len) 451 { 452 if (len == ETH_ALEN) { 453 ether_addr_copy(dst, src); 454 return; 455 } 456 457 memcpy(dst, src, len); 458 } 459 460 #define BOND_PRI_RESELECT_ALWAYS 0 461 #define BOND_PRI_RESELECT_BETTER 1 462 #define BOND_PRI_RESELECT_FAILURE 2 463 464 #define BOND_FOM_NONE 0 465 #define BOND_FOM_ACTIVE 1 466 #define BOND_FOM_FOLLOW 2 467 468 #define BOND_ARP_TARGETS_ANY 0 469 #define BOND_ARP_TARGETS_ALL 1 470 471 #define BOND_ARP_VALIDATE_NONE 0 472 #define BOND_ARP_VALIDATE_ACTIVE (1 << BOND_STATE_ACTIVE) 473 #define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP) 474 #define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \ 475 BOND_ARP_VALIDATE_BACKUP) 476 #define BOND_ARP_FILTER (BOND_ARP_VALIDATE_ALL + 1) 477 #define BOND_ARP_FILTER_ACTIVE (BOND_ARP_VALIDATE_ACTIVE | \ 478 BOND_ARP_FILTER) 479 #define BOND_ARP_FILTER_BACKUP (BOND_ARP_VALIDATE_BACKUP | \ 480 BOND_ARP_FILTER) 481 482 #define BOND_SLAVE_NOTIFY_NOW true 483 #define BOND_SLAVE_NOTIFY_LATER false 484 485 static inline int slave_do_arp_validate(struct bonding *bond, 486 struct slave *slave) 487 { 488 return bond->params.arp_validate & (1 << bond_slave_state(slave)); 489 } 490 491 static inline int slave_do_arp_validate_only(struct bonding *bond) 492 { 493 return bond->params.arp_validate & BOND_ARP_FILTER; 494 } 495 496 static inline int bond_is_ip_target_ok(__be32 addr) 497 { 498 return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr); 499 } 500 501 /* Get the oldest arp which we've received on this slave for bond's 502 * arp_targets. 503 */ 504 static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond, 505 struct slave *slave) 506 { 507 int i = 1; 508 unsigned long ret = slave->target_last_arp_rx[0]; 509 510 for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++) 511 if (time_before(slave->target_last_arp_rx[i], ret)) 512 ret = slave->target_last_arp_rx[i]; 513 514 return ret; 515 } 516 517 static inline unsigned long slave_last_rx(struct bonding *bond, 518 struct slave *slave) 519 { 520 if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL) 521 return slave_oldest_target_arp_rx(bond, slave); 522 523 return slave->last_rx; 524 } 525 526 #ifdef CONFIG_NET_POLL_CONTROLLER 527 static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave, 528 struct sk_buff *skb) 529 { 530 return netpoll_send_skb(slave->np, skb); 531 } 532 #else 533 static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave, 534 struct sk_buff *skb) 535 { 536 BUG(); 537 return NETDEV_TX_OK; 538 } 539 #endif 540 541 static inline void bond_set_slave_inactive_flags(struct slave *slave, 542 bool notify) 543 { 544 if (!bond_is_lb(slave->bond)) 545 bond_set_slave_state(slave, BOND_STATE_BACKUP, notify); 546 if (!slave->bond->params.all_slaves_active) 547 slave->inactive = 1; 548 } 549 550 static inline void bond_set_slave_active_flags(struct slave *slave, 551 bool notify) 552 { 553 bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify); 554 slave->inactive = 0; 555 } 556 557 static inline bool bond_is_slave_inactive(struct slave *slave) 558 { 559 return slave->inactive; 560 } 561 562 static inline void bond_propose_link_state(struct slave *slave, int state) 563 { 564 slave->link_new_state = state; 565 } 566 567 static inline void bond_commit_link_state(struct slave *slave, bool notify) 568 { 569 if (slave->link_new_state == BOND_LINK_NOCHANGE) 570 return; 571 572 slave->link = slave->link_new_state; 573 if (notify) { 574 bond_queue_slave_event(slave); 575 bond_lower_state_changed(slave); 576 slave->should_notify_link = 0; 577 } else { 578 if (slave->should_notify_link) 579 slave->should_notify_link = 0; 580 else 581 slave->should_notify_link = 1; 582 } 583 } 584 585 static inline void bond_set_slave_link_state(struct slave *slave, int state, 586 bool notify) 587 { 588 bond_propose_link_state(slave, state); 589 bond_commit_link_state(slave, notify); 590 } 591 592 static inline void bond_slave_link_notify(struct bonding *bond) 593 { 594 struct list_head *iter; 595 struct slave *tmp; 596 597 bond_for_each_slave(bond, tmp, iter) { 598 if (tmp->should_notify_link) { 599 bond_queue_slave_event(tmp); 600 bond_lower_state_changed(tmp); 601 tmp->should_notify_link = 0; 602 } 603 } 604 } 605 606 static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local) 607 { 608 struct in_device *in_dev; 609 __be32 addr = 0; 610 611 rcu_read_lock(); 612 in_dev = __in_dev_get_rcu(dev); 613 614 if (in_dev) 615 addr = inet_confirm_addr(dev_net(dev), in_dev, dst, local, 616 RT_SCOPE_HOST); 617 rcu_read_unlock(); 618 return addr; 619 } 620 621 struct bond_net { 622 struct net *net; /* Associated network namespace */ 623 struct list_head dev_list; 624 #ifdef CONFIG_PROC_FS 625 struct proc_dir_entry *proc_dir; 626 #endif 627 struct class_attribute class_attr_bonding_masters; 628 }; 629 630 int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); 631 netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 632 int bond_create(struct net *net, const char *name); 633 int bond_create_sysfs(struct bond_net *net); 634 void bond_destroy_sysfs(struct bond_net *net); 635 void bond_prepare_sysfs_group(struct bonding *bond); 636 int bond_sysfs_slave_add(struct slave *slave); 637 void bond_sysfs_slave_del(struct slave *slave); 638 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, 639 struct netlink_ext_ack *extack); 640 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); 641 u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb); 642 int bond_set_carrier(struct bonding *bond); 643 void bond_select_active_slave(struct bonding *bond); 644 void bond_change_active_slave(struct bonding *bond, struct slave *new_active); 645 void bond_create_debugfs(void); 646 void bond_destroy_debugfs(void); 647 void bond_debug_register(struct bonding *bond); 648 void bond_debug_unregister(struct bonding *bond); 649 void bond_debug_reregister(struct bonding *bond); 650 const char *bond_mode_name(int mode); 651 void bond_setup(struct net_device *bond_dev); 652 unsigned int bond_get_num_tx_queues(void); 653 int bond_netlink_init(void); 654 void bond_netlink_fini(void); 655 struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond); 656 const char *bond_slave_link_status(s8 link); 657 struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev, 658 struct net_device *end_dev, 659 int level); 660 int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave); 661 void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay); 662 void bond_work_init_all(struct bonding *bond); 663 664 #ifdef CONFIG_PROC_FS 665 void bond_create_proc_entry(struct bonding *bond); 666 void bond_remove_proc_entry(struct bonding *bond); 667 void bond_create_proc_dir(struct bond_net *bn); 668 void bond_destroy_proc_dir(struct bond_net *bn); 669 #else 670 static inline void bond_create_proc_entry(struct bonding *bond) 671 { 672 } 673 674 static inline void bond_remove_proc_entry(struct bonding *bond) 675 { 676 } 677 678 static inline void bond_create_proc_dir(struct bond_net *bn) 679 { 680 } 681 682 static inline void bond_destroy_proc_dir(struct bond_net *bn) 683 { 684 } 685 #endif 686 687 static inline struct slave *bond_slave_has_mac(struct bonding *bond, 688 const u8 *mac) 689 { 690 struct list_head *iter; 691 struct slave *tmp; 692 693 bond_for_each_slave(bond, tmp, iter) 694 if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) 695 return tmp; 696 697 return NULL; 698 } 699 700 /* Caller must hold rcu_read_lock() for read */ 701 static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond, 702 const u8 *mac) 703 { 704 struct list_head *iter; 705 struct slave *tmp; 706 707 bond_for_each_slave_rcu(bond, tmp, iter) 708 if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) 709 return tmp; 710 711 return NULL; 712 } 713 714 /* Caller must hold rcu_read_lock() for read */ 715 static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac) 716 { 717 struct list_head *iter; 718 struct slave *tmp; 719 struct netdev_hw_addr *ha; 720 721 bond_for_each_slave_rcu(bond, tmp, iter) 722 if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) 723 return true; 724 725 if (netdev_uc_empty(bond->dev)) 726 return false; 727 728 netdev_for_each_uc_addr(ha, bond->dev) 729 if (ether_addr_equal_64bits(mac, ha->addr)) 730 return true; 731 732 return false; 733 } 734 735 /* Check if the ip is present in arp ip list, or first free slot if ip == 0 736 * Returns -1 if not found, index if found 737 */ 738 static inline int bond_get_targets_ip(__be32 *targets, __be32 ip) 739 { 740 int i; 741 742 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) 743 if (targets[i] == ip) 744 return i; 745 else if (targets[i] == 0) 746 break; 747 748 return -1; 749 } 750 751 /* exported from bond_main.c */ 752 extern unsigned int bond_net_id; 753 754 /* exported from bond_netlink.c */ 755 extern struct rtnl_link_ops bond_link_ops; 756 757 /* exported from bond_sysfs_slave.c */ 758 extern const struct sysfs_ops slave_sysfs_ops; 759 760 static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb) 761 { 762 atomic_long_inc(&dev->tx_dropped); 763 dev_kfree_skb_any(skb); 764 return NET_XMIT_DROP; 765 } 766 767 #endif /* _NET_BONDING_H */ 768