1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2007 Patrick McHardy <kaber@trash.net> 4 * 5 * The code this is based on carried the following copyright notice: 6 * --- 7 * (C) Copyright 2001-2006 8 * Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com 9 * Re-worked by Ben Greear <greearb@candelatech.com> 10 * --- 11 */ 12 #include <linux/kernel.h> 13 #include <linux/types.h> 14 #include <linux/module.h> 15 #include <linux/init.h> 16 #include <linux/errno.h> 17 #include <linux/slab.h> 18 #include <linux/string.h> 19 #include <linux/rculist.h> 20 #include <linux/notifier.h> 21 #include <linux/netdevice.h> 22 #include <linux/etherdevice.h> 23 #include <linux/net_tstamp.h> 24 #include <linux/ethtool.h> 25 #include <linux/if_arp.h> 26 #include <linux/if_vlan.h> 27 #include <linux/if_link.h> 28 #include <linux/if_macvlan.h> 29 #include <linux/hash.h> 30 #include <linux/workqueue.h> 31 #include <net/netdev_lock.h> 32 #include <net/rtnetlink.h> 33 #include <net/xfrm.h> 34 #include <linux/netpoll.h> 35 #include <linux/phy.h> 36 37 #define MACVLAN_HASH_BITS 8 38 #define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS) 39 #define MACVLAN_DEFAULT_BC_QUEUE_LEN 1000 40 41 #define MACVLAN_F_PASSTHRU 1 42 #define MACVLAN_F_ADDRCHANGE 2 43 44 struct macvlan_port { 45 struct net_device *dev; 46 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; 47 struct list_head vlans; 48 struct sk_buff_head bc_queue; 49 struct work_struct bc_work; 50 u32 bc_queue_len_used; 51 int bc_cutoff; 52 u32 flags; 53 int count; 54 struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE]; 55 DECLARE_BITMAP(bc_filter, MACVLAN_MC_FILTER_SZ); 56 DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); 57 unsigned char perm_addr[ETH_ALEN]; 58 }; 59 60 struct macvlan_source_entry { 61 struct hlist_node hlist; 62 struct macvlan_dev __rcu *vlan; 63 unsigned char addr[6+2] __aligned(sizeof(u16)); 64 struct rcu_head rcu; 65 }; 66 67 struct macvlan_skb_cb { 68 const struct macvlan_dev *src; 69 }; 70 71 #define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0])) 72 73 static void macvlan_port_destroy(struct net_device *dev); 74 static void update_port_bc_queue_len(struct macvlan_port *port); 75 76 static inline bool macvlan_passthru(const struct macvlan_port *port) 77 { 78 return port->flags & MACVLAN_F_PASSTHRU; 79 } 80 81 static inline void macvlan_set_passthru(struct macvlan_port *port) 82 { 83 port->flags |= MACVLAN_F_PASSTHRU; 84 } 85 86 static inline bool macvlan_addr_change(const struct macvlan_port *port) 87 { 88 return port->flags & MACVLAN_F_ADDRCHANGE; 89 } 90 91 static inline void macvlan_set_addr_change(struct macvlan_port *port) 92 { 93 port->flags |= MACVLAN_F_ADDRCHANGE; 94 } 95 96 static inline void macvlan_clear_addr_change(struct macvlan_port *port) 97 { 98 port->flags &= ~MACVLAN_F_ADDRCHANGE; 99 } 100 101 /* Hash Ethernet address */ 102 static u32 macvlan_eth_hash(const unsigned char *addr) 103 { 104 u64 value = get_unaligned((u64 *)addr); 105 106 /* only want 6 bytes */ 107 #ifdef __BIG_ENDIAN 108 value >>= 16; 109 #else 110 value <<= 16; 111 #endif 112 return hash_64(value, MACVLAN_HASH_BITS); 113 } 114 115 static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev) 116 { 117 return rcu_dereference(dev->rx_handler_data); 118 } 119 120 static struct macvlan_port *macvlan_port_get_rtnl(const struct net_device *dev) 121 { 122 return rtnl_dereference(dev->rx_handler_data); 123 } 124 125 static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, 126 const unsigned char *addr) 127 { 128 struct macvlan_dev *vlan; 129 u32 idx = macvlan_eth_hash(addr); 130 131 hlist_for_each_entry_rcu(vlan, &port->vlan_hash[idx], hlist, 132 lockdep_rtnl_is_held()) { 133 if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr)) 134 return vlan; 135 } 136 return NULL; 137 } 138 139 static struct macvlan_source_entry *macvlan_hash_lookup_source( 140 const struct macvlan_dev *vlan, 141 const unsigned char *addr) 142 { 143 struct macvlan_source_entry *entry; 144 u32 idx = macvlan_eth_hash(addr); 145 struct hlist_head *h = &vlan->port->vlan_source_hash[idx]; 146 147 hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) { 148 if (ether_addr_equal_64bits(entry->addr, addr) && 149 rcu_access_pointer(entry->vlan) == vlan) 150 return entry; 151 } 152 return NULL; 153 } 154 155 static int macvlan_hash_add_source(struct macvlan_dev *vlan, 156 const unsigned char *addr) 157 { 158 struct macvlan_port *port = vlan->port; 159 struct macvlan_source_entry *entry; 160 struct hlist_head *h; 161 162 entry = macvlan_hash_lookup_source(vlan, addr); 163 if (entry) 164 return 0; 165 166 entry = kmalloc_obj(*entry); 167 if (!entry) 168 return -ENOMEM; 169 170 ether_addr_copy(entry->addr, addr); 171 RCU_INIT_POINTER(entry->vlan, vlan); 172 h = &port->vlan_source_hash[macvlan_eth_hash(addr)]; 173 hlist_add_head_rcu(&entry->hlist, h); 174 vlan->macaddr_count++; 175 176 return 0; 177 } 178 179 static void macvlan_hash_add(struct macvlan_dev *vlan) 180 { 181 struct macvlan_port *port = vlan->port; 182 const unsigned char *addr = vlan->dev->dev_addr; 183 u32 idx = macvlan_eth_hash(addr); 184 185 hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[idx]); 186 } 187 188 static void macvlan_hash_del_source(struct macvlan_source_entry *entry) 189 { 190 RCU_INIT_POINTER(entry->vlan, NULL); 191 hlist_del_rcu(&entry->hlist); 192 kfree_rcu(entry, rcu); 193 } 194 195 static void macvlan_hash_del(struct macvlan_dev *vlan, bool sync) 196 { 197 hlist_del_rcu(&vlan->hlist); 198 if (sync) 199 synchronize_rcu(); 200 } 201 202 static void macvlan_hash_change_addr(struct macvlan_dev *vlan, 203 const unsigned char *addr) 204 { 205 macvlan_hash_del(vlan, true); 206 /* Now that we are unhashed it is safe to change the device 207 * address without confusing packet delivery. 208 */ 209 eth_hw_addr_set(vlan->dev, addr); 210 macvlan_hash_add(vlan); 211 } 212 213 static bool macvlan_addr_busy(const struct macvlan_port *port, 214 const unsigned char *addr) 215 { 216 /* Test to see if the specified address is 217 * currently in use by the underlying device or 218 * another macvlan. 219 */ 220 if (!macvlan_passthru(port) && !macvlan_addr_change(port) && 221 ether_addr_equal_64bits(port->dev->dev_addr, addr)) 222 return true; 223 224 if (macvlan_hash_lookup(port, addr)) 225 return true; 226 227 return false; 228 } 229 230 231 static int macvlan_broadcast_one(struct sk_buff *skb, 232 const struct macvlan_dev *vlan, 233 const struct ethhdr *eth, bool local) 234 { 235 struct net_device *dev = vlan->dev; 236 237 if (local) 238 return __dev_forward_skb(dev, skb); 239 240 skb->dev = dev; 241 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast)) 242 skb->pkt_type = PACKET_BROADCAST; 243 else 244 skb->pkt_type = PACKET_MULTICAST; 245 246 return 0; 247 } 248 249 static u32 macvlan_hash_mix(const struct macvlan_dev *vlan) 250 { 251 return (u32)(((unsigned long)vlan) >> L1_CACHE_SHIFT); 252 } 253 254 255 static unsigned int mc_hash(const struct macvlan_dev *vlan, 256 const unsigned char *addr) 257 { 258 u32 val = get_unaligned((u32 *)(addr + 2)); 259 260 val ^= macvlan_hash_mix(vlan); 261 return hash_32(val, MACVLAN_MC_FILTER_BITS); 262 } 263 264 static void macvlan_broadcast(struct sk_buff *skb, 265 const struct macvlan_port *port, 266 struct net_device *src, 267 enum macvlan_mode mode) 268 { 269 const struct ethhdr *eth = eth_hdr(skb); 270 const struct macvlan_dev *vlan; 271 struct sk_buff *nskb; 272 unsigned int i; 273 int err; 274 unsigned int hash; 275 276 if (skb->protocol == htons(ETH_P_PAUSE)) 277 return; 278 279 hash_for_each_rcu(port->vlan_hash, i, vlan, hlist) { 280 if (vlan->dev == src || !(vlan->mode & mode)) 281 continue; 282 283 hash = mc_hash(vlan, eth->h_dest); 284 if (!test_bit(hash, vlan->mc_filter)) 285 continue; 286 287 err = NET_RX_DROP; 288 nskb = skb_clone(skb, GFP_ATOMIC); 289 if (likely(nskb)) 290 err = macvlan_broadcast_one(nskb, vlan, eth, 291 mode == MACVLAN_MODE_BRIDGE) ?: 292 netif_rx(nskb); 293 macvlan_count_rx(vlan, skb->len + ETH_HLEN, 294 err == NET_RX_SUCCESS, true); 295 } 296 } 297 298 static void macvlan_multicast_rx(const struct macvlan_port *port, 299 const struct macvlan_dev *src, 300 struct sk_buff *skb) 301 { 302 if (!src) 303 /* frame comes from an external address */ 304 macvlan_broadcast(skb, port, NULL, 305 MACVLAN_MODE_PRIVATE | 306 MACVLAN_MODE_VEPA | 307 MACVLAN_MODE_PASSTHRU| 308 MACVLAN_MODE_BRIDGE); 309 else if (src->mode == MACVLAN_MODE_VEPA) 310 /* flood to everyone except source */ 311 macvlan_broadcast(skb, port, src->dev, 312 MACVLAN_MODE_VEPA | 313 MACVLAN_MODE_BRIDGE); 314 else 315 /* 316 * Flood to VEPA and bridge ports. We cannot distinguish 317 * a looped-back locally-originated multicast from one 318 * sent by an external source sharing the same source MAC 319 * (e.g., VRRP virtual MAC), so deliver to bridge ports 320 * as well to ensure correct reception in all cases. 321 */ 322 macvlan_broadcast(skb, port, NULL, 323 MACVLAN_MODE_VEPA | 324 MACVLAN_MODE_BRIDGE); 325 } 326 327 static void macvlan_process_broadcast(struct work_struct *w) 328 { 329 struct macvlan_port *port = container_of(w, struct macvlan_port, 330 bc_work); 331 struct sk_buff *skb; 332 struct sk_buff_head list; 333 334 __skb_queue_head_init(&list); 335 336 spin_lock_bh(&port->bc_queue.lock); 337 skb_queue_splice_tail_init(&port->bc_queue, &list); 338 spin_unlock_bh(&port->bc_queue.lock); 339 340 while ((skb = __skb_dequeue(&list))) { 341 const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src; 342 343 rcu_read_lock(); 344 macvlan_multicast_rx(port, src, skb); 345 rcu_read_unlock(); 346 347 if (src) 348 dev_put(src->dev); 349 consume_skb(skb); 350 351 cond_resched(); 352 } 353 } 354 355 static void macvlan_broadcast_enqueue(struct macvlan_port *port, 356 const struct macvlan_dev *src, 357 struct sk_buff *skb) 358 { 359 u32 bc_queue_len_used = READ_ONCE(port->bc_queue_len_used); 360 struct sk_buff *nskb; 361 int err = -ENOMEM; 362 363 if (skb_queue_len_lockless(&port->bc_queue) >= bc_queue_len_used) 364 goto err; 365 366 nskb = skb_clone(skb, GFP_ATOMIC); 367 if (!nskb) 368 goto err; 369 370 MACVLAN_SKB_CB(nskb)->src = src; 371 372 spin_lock(&port->bc_queue.lock); 373 if (skb_queue_len(&port->bc_queue) < bc_queue_len_used) { 374 if (src) 375 dev_hold(src->dev); 376 __skb_queue_tail(&port->bc_queue, nskb); 377 err = 0; 378 } 379 spin_unlock(&port->bc_queue.lock); 380 381 queue_work(system_dfl_wq, &port->bc_work); 382 383 if (err) 384 goto free_nskb; 385 386 return; 387 388 free_nskb: 389 kfree_skb_reason(nskb, SKB_DROP_REASON_MACVLAN_BROADCAST_BACKLOG); 390 err: 391 dev_core_stats_rx_dropped_inc(skb->dev); 392 } 393 394 static void macvlan_flush_sources(struct macvlan_port *port, 395 struct macvlan_dev *vlan) 396 { 397 struct macvlan_source_entry *entry; 398 struct hlist_node *next; 399 int i; 400 401 hash_for_each_safe(port->vlan_source_hash, i, next, entry, hlist) 402 if (rcu_access_pointer(entry->vlan) == vlan) 403 macvlan_hash_del_source(entry); 404 405 vlan->macaddr_count = 0; 406 } 407 408 static void macvlan_forward_source_one(struct sk_buff *skb, 409 struct macvlan_dev *vlan) 410 { 411 struct sk_buff *nskb; 412 struct net_device *dev; 413 int len; 414 int ret; 415 416 dev = vlan->dev; 417 if (unlikely(!(dev->flags & IFF_UP))) 418 return; 419 420 nskb = skb_clone(skb, GFP_ATOMIC); 421 if (!nskb) 422 return; 423 424 len = nskb->len + ETH_HLEN; 425 nskb->dev = dev; 426 427 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, dev->dev_addr)) 428 nskb->pkt_type = PACKET_HOST; 429 430 ret = __netif_rx(nskb); 431 macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false); 432 } 433 434 static bool macvlan_forward_source(struct sk_buff *skb, 435 struct macvlan_port *port, 436 const unsigned char *addr) 437 { 438 struct macvlan_source_entry *entry; 439 u32 idx = macvlan_eth_hash(addr); 440 struct hlist_head *h = &port->vlan_source_hash[idx]; 441 bool consume = false; 442 443 hlist_for_each_entry_rcu(entry, h, hlist) { 444 if (ether_addr_equal_64bits(entry->addr, addr)) { 445 struct macvlan_dev *vlan = rcu_dereference(entry->vlan); 446 447 if (!vlan) 448 continue; 449 450 if (vlan->flags & MACVLAN_FLAG_NODST) 451 consume = true; 452 macvlan_forward_source_one(skb, vlan); 453 } 454 } 455 456 return consume; 457 } 458 459 /* called under rcu_read_lock() from netif_receive_skb */ 460 static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) 461 { 462 struct macvlan_port *port; 463 struct sk_buff *skb = *pskb; 464 const struct ethhdr *eth = eth_hdr(skb); 465 const struct macvlan_dev *vlan; 466 const struct macvlan_dev *src; 467 struct net_device *dev; 468 unsigned int len = 0; 469 int ret; 470 rx_handler_result_t handle_res; 471 472 /* Packets from dev_loopback_xmit() do not have L2 header, bail out */ 473 if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) 474 return RX_HANDLER_PASS; 475 476 port = macvlan_port_get_rcu(skb->dev); 477 if (is_multicast_ether_addr(eth->h_dest)) { 478 unsigned int hash; 479 480 skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN); 481 if (!skb) 482 return RX_HANDLER_CONSUMED; 483 *pskb = skb; 484 eth = eth_hdr(skb); 485 if (macvlan_forward_source(skb, port, eth->h_source)) { 486 kfree_skb(skb); 487 return RX_HANDLER_CONSUMED; 488 } 489 src = macvlan_hash_lookup(port, eth->h_source); 490 if (src && src->mode != MACVLAN_MODE_VEPA && 491 src->mode != MACVLAN_MODE_BRIDGE) { 492 /* forward to original port. */ 493 vlan = src; 494 ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?: 495 __netif_rx(skb); 496 handle_res = RX_HANDLER_CONSUMED; 497 goto out; 498 } 499 500 hash = mc_hash(NULL, eth->h_dest); 501 if (test_bit(hash, port->bc_filter)) 502 macvlan_broadcast_enqueue(port, src, skb); 503 else if (test_bit(hash, port->mc_filter)) 504 macvlan_multicast_rx(port, src, skb); 505 506 return RX_HANDLER_PASS; 507 } 508 509 if (macvlan_forward_source(skb, port, eth->h_source)) { 510 kfree_skb(skb); 511 return RX_HANDLER_CONSUMED; 512 } 513 if (macvlan_passthru(port)) 514 vlan = list_first_or_null_rcu(&port->vlans, 515 struct macvlan_dev, list); 516 else 517 vlan = macvlan_hash_lookup(port, eth->h_dest); 518 if (!vlan || vlan->mode == MACVLAN_MODE_SOURCE) 519 return RX_HANDLER_PASS; 520 521 dev = vlan->dev; 522 if (unlikely(!(dev->flags & IFF_UP))) { 523 kfree_skb(skb); 524 return RX_HANDLER_CONSUMED; 525 } 526 len = skb->len + ETH_HLEN; 527 skb = skb_share_check(skb, GFP_ATOMIC); 528 if (!skb) { 529 ret = NET_RX_DROP; 530 handle_res = RX_HANDLER_CONSUMED; 531 goto out; 532 } 533 534 *pskb = skb; 535 skb->dev = dev; 536 skb->pkt_type = PACKET_HOST; 537 538 ret = NET_RX_SUCCESS; 539 handle_res = RX_HANDLER_ANOTHER; 540 out: 541 macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false); 542 return handle_res; 543 } 544 545 static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) 546 { 547 const struct macvlan_dev *vlan = netdev_priv(dev); 548 const struct macvlan_port *port = vlan->port; 549 const struct macvlan_dev *dest; 550 551 if (vlan->mode == MACVLAN_MODE_BRIDGE) { 552 const struct ethhdr *eth = skb_eth_hdr(skb); 553 554 /* send to other bridge ports directly */ 555 if (is_multicast_ether_addr(eth->h_dest)) { 556 skb_reset_mac_header(skb); 557 macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE); 558 goto xmit_world; 559 } 560 561 dest = macvlan_hash_lookup(port, eth->h_dest); 562 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { 563 /* send to lowerdev first for its network taps */ 564 dev_forward_skb(vlan->lowerdev, skb); 565 566 return NET_XMIT_SUCCESS; 567 } 568 } 569 xmit_world: 570 skb->dev = vlan->lowerdev; 571 return dev_queue_xmit_accel(skb, 572 netdev_get_sb_channel(dev) ? dev : NULL); 573 } 574 575 static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb) 576 { 577 #ifdef CONFIG_NET_POLL_CONTROLLER 578 return netpoll_send_skb(vlan->netpoll, skb); 579 #else 580 BUG(); 581 return NETDEV_TX_OK; 582 #endif 583 } 584 585 static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, 586 struct net_device *dev) 587 { 588 struct macvlan_dev *vlan = netdev_priv(dev); 589 unsigned int len = skb->len; 590 int ret; 591 592 if (unlikely(netpoll_tx_running(dev))) 593 return macvlan_netpoll_send_skb(vlan, skb); 594 595 ret = macvlan_queue_xmit(skb, dev); 596 597 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 598 struct vlan_pcpu_stats *pcpu_stats; 599 600 pcpu_stats = this_cpu_ptr(vlan->pcpu_stats); 601 u64_stats_update_begin(&pcpu_stats->syncp); 602 u64_stats_inc(&pcpu_stats->tx_packets); 603 u64_stats_add(&pcpu_stats->tx_bytes, len); 604 u64_stats_update_end(&pcpu_stats->syncp); 605 } else { 606 this_cpu_inc(vlan->pcpu_stats->tx_dropped); 607 } 608 return ret; 609 } 610 611 static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, 612 unsigned short type, const void *daddr, 613 const void *saddr, unsigned len) 614 { 615 const struct macvlan_dev *vlan = netdev_priv(dev); 616 struct net_device *lowerdev = vlan->lowerdev; 617 618 return dev_hard_header(skb, lowerdev, type, daddr, 619 saddr ? : dev->dev_addr, len); 620 } 621 622 static const struct header_ops macvlan_hard_header_ops = { 623 .create = macvlan_hard_header, 624 .parse = eth_header_parse, 625 .cache = eth_header_cache, 626 .cache_update = eth_header_cache_update, 627 .parse_protocol = eth_header_parse_protocol, 628 }; 629 630 static int macvlan_open(struct net_device *dev) 631 { 632 struct macvlan_dev *vlan = netdev_priv(dev); 633 struct net_device *lowerdev = vlan->lowerdev; 634 int err; 635 636 if (macvlan_passthru(vlan->port)) { 637 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) { 638 err = dev_set_promiscuity(lowerdev, 1); 639 if (err < 0) 640 goto out; 641 } 642 goto hash_add; 643 } 644 645 err = -EADDRINUSE; 646 if (macvlan_addr_busy(vlan->port, dev->dev_addr)) 647 goto out; 648 649 /* Attempt to populate accel_priv which is used to offload the L2 650 * forwarding requests for unicast packets. 651 */ 652 if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) 653 vlan->accel_priv = 654 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); 655 656 /* If earlier attempt to offload failed, or accel_priv is not 657 * populated we must add the unicast address to the lower device. 658 */ 659 if (IS_ERR_OR_NULL(vlan->accel_priv)) { 660 vlan->accel_priv = NULL; 661 err = dev_uc_add(lowerdev, dev->dev_addr); 662 if (err < 0) 663 goto out; 664 } 665 666 if (dev->flags & IFF_ALLMULTI) { 667 err = dev_set_allmulti(lowerdev, 1); 668 if (err < 0) 669 goto del_unicast; 670 } 671 672 if (dev->flags & IFF_PROMISC) { 673 err = dev_set_promiscuity(lowerdev, 1); 674 if (err < 0) 675 goto clear_multi; 676 } 677 678 hash_add: 679 macvlan_hash_add(vlan); 680 return 0; 681 682 clear_multi: 683 if (dev->flags & IFF_ALLMULTI) 684 dev_set_allmulti(lowerdev, -1); 685 del_unicast: 686 if (vlan->accel_priv) { 687 lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, 688 vlan->accel_priv); 689 vlan->accel_priv = NULL; 690 } else { 691 dev_uc_del(lowerdev, dev->dev_addr); 692 } 693 out: 694 return err; 695 } 696 697 static int macvlan_stop(struct net_device *dev) 698 { 699 struct macvlan_dev *vlan = netdev_priv(dev); 700 struct net_device *lowerdev = vlan->lowerdev; 701 702 if (vlan->accel_priv) { 703 lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, 704 vlan->accel_priv); 705 vlan->accel_priv = NULL; 706 } 707 708 dev_uc_unsync(lowerdev, dev); 709 dev_mc_unsync(lowerdev, dev); 710 711 if (macvlan_passthru(vlan->port)) { 712 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) 713 dev_set_promiscuity(lowerdev, -1); 714 goto hash_del; 715 } 716 717 if (dev->flags & IFF_ALLMULTI) 718 dev_set_allmulti(lowerdev, -1); 719 720 if (dev->flags & IFF_PROMISC) 721 dev_set_promiscuity(lowerdev, -1); 722 723 dev_uc_del(lowerdev, dev->dev_addr); 724 725 hash_del: 726 macvlan_hash_del(vlan, !dev->dismantle); 727 return 0; 728 } 729 730 static int macvlan_sync_address(struct net_device *dev, 731 const unsigned char *addr) 732 { 733 struct macvlan_dev *vlan = netdev_priv(dev); 734 struct net_device *lowerdev = vlan->lowerdev; 735 struct macvlan_port *port = vlan->port; 736 int err; 737 738 if (!(dev->flags & IFF_UP)) { 739 /* Just copy in the new address */ 740 eth_hw_addr_set(dev, addr); 741 } else { 742 /* Rehash and update the device filters */ 743 if (macvlan_addr_busy(vlan->port, addr)) 744 return -EADDRINUSE; 745 746 if (!macvlan_passthru(port)) { 747 err = dev_uc_add(lowerdev, addr); 748 if (err) 749 return err; 750 751 dev_uc_del(lowerdev, dev->dev_addr); 752 } 753 754 macvlan_hash_change_addr(vlan, addr); 755 } 756 if (macvlan_passthru(port) && !macvlan_addr_change(port)) { 757 /* Since addr_change isn't set, we are here due to lower 758 * device change. Save the lower-dev address so we can 759 * restore it later. 760 */ 761 ether_addr_copy(vlan->port->perm_addr, 762 lowerdev->dev_addr); 763 } 764 macvlan_clear_addr_change(port); 765 return 0; 766 } 767 768 static int macvlan_set_mac_address(struct net_device *dev, void *p) 769 { 770 struct macvlan_dev *vlan = netdev_priv(dev); 771 struct sockaddr_storage *addr = p; 772 773 if (!is_valid_ether_addr(addr->__data)) 774 return -EADDRNOTAVAIL; 775 776 /* If the addresses are the same, this is a no-op */ 777 if (ether_addr_equal(dev->dev_addr, addr->__data)) 778 return 0; 779 780 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 781 macvlan_set_addr_change(vlan->port); 782 return dev_set_mac_address(vlan->lowerdev, addr, NULL); 783 } 784 785 if (macvlan_addr_busy(vlan->port, addr->__data)) 786 return -EADDRINUSE; 787 788 return macvlan_sync_address(dev, addr->__data); 789 } 790 791 static void macvlan_change_rx_flags(struct net_device *dev, int change) 792 { 793 struct macvlan_dev *vlan = netdev_priv(dev); 794 struct net_device *lowerdev = vlan->lowerdev; 795 796 if (dev->flags & IFF_UP) { 797 if (change & IFF_ALLMULTI) 798 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); 799 if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC) 800 dev_set_promiscuity(lowerdev, 801 dev->flags & IFF_PROMISC ? 1 : -1); 802 803 } 804 } 805 806 static void macvlan_compute_filter(unsigned long *mc_filter, 807 struct net_device *dev, 808 struct macvlan_dev *vlan, int cutoff) 809 { 810 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 811 bitmap_fill(mc_filter, MACVLAN_MC_FILTER_SZ); 812 } else { 813 DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ); 814 struct netdev_hw_addr *ha; 815 816 bitmap_zero(filter, MACVLAN_MC_FILTER_SZ); 817 netdev_for_each_mc_addr(ha, dev) { 818 if (!vlan && ha->synced <= cutoff) 819 continue; 820 821 __set_bit(mc_hash(vlan, ha->addr), filter); 822 } 823 824 __set_bit(mc_hash(vlan, dev->broadcast), filter); 825 826 bitmap_copy(mc_filter, filter, MACVLAN_MC_FILTER_SZ); 827 } 828 } 829 830 static void macvlan_recompute_bc_filter(struct macvlan_dev *vlan) 831 { 832 if (vlan->port->bc_cutoff < 0) { 833 bitmap_zero(vlan->port->bc_filter, MACVLAN_MC_FILTER_SZ); 834 return; 835 } 836 837 macvlan_compute_filter(vlan->port->bc_filter, vlan->lowerdev, NULL, 838 vlan->port->bc_cutoff); 839 } 840 841 static void macvlan_set_mac_lists(struct net_device *dev) 842 { 843 struct macvlan_dev *vlan = netdev_priv(dev); 844 845 macvlan_compute_filter(vlan->mc_filter, dev, vlan, 0); 846 847 dev_uc_sync(vlan->lowerdev, dev); 848 dev_mc_sync(vlan->lowerdev, dev); 849 850 /* This is slightly inaccurate as we're including the subscription 851 * list of vlan->lowerdev too. 852 * 853 * Bug alert: This only works if everyone has the same broadcast 854 * address as lowerdev. As soon as someone changes theirs this 855 * will break. 856 * 857 * However, this is already broken as when you change your broadcast 858 * address we don't get called. 859 * 860 * The solution is to maintain a list of broadcast addresses like 861 * we do for uc/mc, if you care. 862 */ 863 macvlan_compute_filter(vlan->port->mc_filter, vlan->lowerdev, NULL, 864 0); 865 macvlan_recompute_bc_filter(vlan); 866 } 867 868 static void update_port_bc_cutoff(struct macvlan_dev *vlan, int cutoff) 869 { 870 if (vlan->port->bc_cutoff == cutoff) 871 return; 872 873 vlan->port->bc_cutoff = cutoff; 874 macvlan_recompute_bc_filter(vlan); 875 } 876 877 static int macvlan_change_mtu(struct net_device *dev, int new_mtu) 878 { 879 struct macvlan_dev *vlan = netdev_priv(dev); 880 881 if (vlan->lowerdev->mtu < new_mtu) 882 return -EINVAL; 883 WRITE_ONCE(dev->mtu, new_mtu); 884 return 0; 885 } 886 887 static int macvlan_hwtstamp_get(struct net_device *dev, 888 struct kernel_hwtstamp_config *cfg) 889 { 890 struct net_device *real_dev = macvlan_dev_real_dev(dev); 891 892 return generic_hwtstamp_get_lower(real_dev, cfg); 893 } 894 895 static int macvlan_hwtstamp_set(struct net_device *dev, 896 struct kernel_hwtstamp_config *cfg, 897 struct netlink_ext_ack *extack) 898 { 899 struct net_device *real_dev = macvlan_dev_real_dev(dev); 900 901 if (!net_eq(dev_net(dev), &init_net)) 902 return -EOPNOTSUPP; 903 904 return generic_hwtstamp_set_lower(real_dev, cfg, extack); 905 } 906 907 /* 908 * macvlan network devices have devices nesting below it and are a special 909 * "super class" of normal network devices; split their locks off into a 910 * separate class since they always nest. 911 */ 912 static struct lock_class_key macvlan_netdev_addr_lock_key; 913 914 #define ALWAYS_ON_OFFLOADS \ 915 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \ 916 NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL) 917 918 #define ALWAYS_ON_FEATURES ALWAYS_ON_OFFLOADS 919 920 #define MACVLAN_FEATURES \ 921 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ 922 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_LRO | \ 923 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ 924 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) 925 926 #define MACVLAN_STATE_MASK \ 927 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) 928 929 static void macvlan_set_lockdep_class(struct net_device *dev) 930 { 931 netdev_lockdep_set_classes(dev); 932 lockdep_set_class(&dev->addr_list_lock, 933 &macvlan_netdev_addr_lock_key); 934 } 935 936 static int macvlan_init(struct net_device *dev) 937 { 938 struct macvlan_dev *vlan = netdev_priv(dev); 939 struct net_device *lowerdev = vlan->lowerdev; 940 struct macvlan_port *port = vlan->port; 941 942 dev->state = (dev->state & ~MACVLAN_STATE_MASK) | 943 (lowerdev->state & MACVLAN_STATE_MASK); 944 dev->features = lowerdev->features & MACVLAN_FEATURES; 945 dev->features |= ALWAYS_ON_FEATURES; 946 dev->hw_features |= NETIF_F_LRO; 947 dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; 948 dev->vlan_features |= ALWAYS_ON_OFFLOADS; 949 dev->hw_enc_features |= dev->features; 950 dev->lltx = true; 951 netif_inherit_tso_max(dev, lowerdev); 952 dev->hard_header_len = lowerdev->hard_header_len; 953 macvlan_set_lockdep_class(dev); 954 955 vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); 956 if (!vlan->pcpu_stats) 957 return -ENOMEM; 958 959 port->count += 1; 960 961 /* Get macvlan's reference to lowerdev */ 962 netdev_hold(lowerdev, &vlan->dev_tracker, GFP_KERNEL); 963 964 return 0; 965 } 966 967 static void macvlan_uninit(struct net_device *dev) 968 { 969 struct macvlan_dev *vlan = netdev_priv(dev); 970 struct macvlan_port *port = vlan->port; 971 972 free_percpu(vlan->pcpu_stats); 973 974 macvlan_flush_sources(port, vlan); 975 port->count -= 1; 976 if (!port->count) 977 macvlan_port_destroy(port->dev); 978 } 979 980 static void macvlan_dev_get_stats64(struct net_device *dev, 981 struct rtnl_link_stats64 *stats) 982 { 983 struct macvlan_dev *vlan = netdev_priv(dev); 984 985 if (vlan->pcpu_stats) { 986 struct vlan_pcpu_stats *p; 987 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; 988 u32 rx_errors = 0, tx_dropped = 0; 989 unsigned int start; 990 int i; 991 992 for_each_possible_cpu(i) { 993 p = per_cpu_ptr(vlan->pcpu_stats, i); 994 do { 995 start = u64_stats_fetch_begin(&p->syncp); 996 rx_packets = u64_stats_read(&p->rx_packets); 997 rx_bytes = u64_stats_read(&p->rx_bytes); 998 rx_multicast = u64_stats_read(&p->rx_multicast); 999 tx_packets = u64_stats_read(&p->tx_packets); 1000 tx_bytes = u64_stats_read(&p->tx_bytes); 1001 } while (u64_stats_fetch_retry(&p->syncp, start)); 1002 1003 stats->rx_packets += rx_packets; 1004 stats->rx_bytes += rx_bytes; 1005 stats->multicast += rx_multicast; 1006 stats->tx_packets += tx_packets; 1007 stats->tx_bytes += tx_bytes; 1008 /* rx_errors & tx_dropped are u32, updated 1009 * without syncp protection. 1010 */ 1011 rx_errors += READ_ONCE(p->rx_errors); 1012 tx_dropped += READ_ONCE(p->tx_dropped); 1013 } 1014 stats->rx_errors = rx_errors; 1015 stats->rx_dropped = rx_errors; 1016 stats->tx_dropped = tx_dropped; 1017 } 1018 } 1019 1020 static int macvlan_vlan_rx_add_vid(struct net_device *dev, 1021 __be16 proto, u16 vid) 1022 { 1023 struct macvlan_dev *vlan = netdev_priv(dev); 1024 struct net_device *lowerdev = vlan->lowerdev; 1025 1026 return vlan_vid_add(lowerdev, proto, vid); 1027 } 1028 1029 static int macvlan_vlan_rx_kill_vid(struct net_device *dev, 1030 __be16 proto, u16 vid) 1031 { 1032 struct macvlan_dev *vlan = netdev_priv(dev); 1033 struct net_device *lowerdev = vlan->lowerdev; 1034 1035 vlan_vid_del(lowerdev, proto, vid); 1036 return 0; 1037 } 1038 1039 static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 1040 struct net_device *dev, 1041 const unsigned char *addr, u16 vid, 1042 u16 flags, bool *notified, 1043 struct netlink_ext_ack *extack) 1044 { 1045 struct macvlan_dev *vlan = netdev_priv(dev); 1046 int err = -EINVAL; 1047 1048 /* Support unicast filter only on passthru devices. 1049 * Multicast filter should be allowed on all devices. 1050 */ 1051 if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr)) 1052 return -EOPNOTSUPP; 1053 1054 if (flags & NLM_F_REPLACE) 1055 return -EOPNOTSUPP; 1056 1057 if (is_unicast_ether_addr(addr)) 1058 err = dev_uc_add_excl(dev, addr); 1059 else if (is_multicast_ether_addr(addr)) 1060 err = dev_mc_add_excl(dev, addr); 1061 1062 return err; 1063 } 1064 1065 static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], 1066 struct net_device *dev, 1067 const unsigned char *addr, u16 vid, bool *notified, 1068 struct netlink_ext_ack *extack) 1069 { 1070 struct macvlan_dev *vlan = netdev_priv(dev); 1071 int err = -EINVAL; 1072 1073 /* Support unicast filter only on passthru devices. 1074 * Multicast filter should be allowed on all devices. 1075 */ 1076 if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr)) 1077 return -EOPNOTSUPP; 1078 1079 if (is_unicast_ether_addr(addr)) 1080 err = dev_uc_del(dev, addr); 1081 else if (is_multicast_ether_addr(addr)) 1082 err = dev_mc_del(dev, addr); 1083 1084 return err; 1085 } 1086 1087 static void macvlan_ethtool_get_drvinfo(struct net_device *dev, 1088 struct ethtool_drvinfo *drvinfo) 1089 { 1090 strscpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver)); 1091 strscpy(drvinfo->version, "0.1", sizeof(drvinfo->version)); 1092 } 1093 1094 static int macvlan_ethtool_get_link_ksettings(struct net_device *dev, 1095 struct ethtool_link_ksettings *cmd) 1096 { 1097 const struct macvlan_dev *vlan = netdev_priv(dev); 1098 1099 return __ethtool_get_link_ksettings(vlan->lowerdev, cmd); 1100 } 1101 1102 static int macvlan_ethtool_get_ts_info(struct net_device *dev, 1103 struct kernel_ethtool_ts_info *info) 1104 { 1105 struct net_device *real_dev = macvlan_dev_real_dev(dev); 1106 1107 return ethtool_get_ts_info_by_layer(real_dev, info); 1108 } 1109 1110 static netdev_features_t macvlan_fix_features(struct net_device *dev, 1111 netdev_features_t features) 1112 { 1113 struct macvlan_dev *vlan = netdev_priv(dev); 1114 netdev_features_t lowerdev_features = vlan->lowerdev->features; 1115 netdev_features_t mask; 1116 1117 features |= NETIF_F_ALL_FOR_ALL; 1118 features &= (vlan->set_features | ~MACVLAN_FEATURES); 1119 mask = features; 1120 1121 lowerdev_features &= (features | ~NETIF_F_LRO); 1122 features = netdev_increment_features(lowerdev_features, features, mask); 1123 features |= ALWAYS_ON_FEATURES; 1124 features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES); 1125 1126 return features; 1127 } 1128 1129 #ifdef CONFIG_NET_POLL_CONTROLLER 1130 static void macvlan_dev_poll_controller(struct net_device *dev) 1131 { 1132 return; 1133 } 1134 1135 static int macvlan_dev_netpoll_setup(struct net_device *dev) 1136 { 1137 struct macvlan_dev *vlan = netdev_priv(dev); 1138 struct net_device *real_dev = vlan->lowerdev; 1139 struct netpoll *netpoll; 1140 int err; 1141 1142 netpoll = kzalloc_obj(*netpoll); 1143 err = -ENOMEM; 1144 if (!netpoll) 1145 goto out; 1146 1147 err = __netpoll_setup(netpoll, real_dev); 1148 if (err) { 1149 kfree(netpoll); 1150 goto out; 1151 } 1152 1153 vlan->netpoll = netpoll; 1154 1155 out: 1156 return err; 1157 } 1158 1159 static void macvlan_dev_netpoll_cleanup(struct net_device *dev) 1160 { 1161 struct macvlan_dev *vlan = netdev_priv(dev); 1162 struct netpoll *netpoll = vlan->netpoll; 1163 1164 if (!netpoll) 1165 return; 1166 1167 vlan->netpoll = NULL; 1168 1169 __netpoll_free(netpoll); 1170 } 1171 #endif /* CONFIG_NET_POLL_CONTROLLER */ 1172 1173 static int macvlan_dev_get_iflink(const struct net_device *dev) 1174 { 1175 struct macvlan_dev *vlan = netdev_priv(dev); 1176 1177 return READ_ONCE(vlan->lowerdev->ifindex); 1178 } 1179 1180 static const struct ethtool_ops macvlan_ethtool_ops = { 1181 .get_link = ethtool_op_get_link, 1182 .get_link_ksettings = macvlan_ethtool_get_link_ksettings, 1183 .get_drvinfo = macvlan_ethtool_get_drvinfo, 1184 .get_ts_info = macvlan_ethtool_get_ts_info, 1185 }; 1186 1187 static const struct net_device_ops macvlan_netdev_ops = { 1188 .ndo_init = macvlan_init, 1189 .ndo_uninit = macvlan_uninit, 1190 .ndo_open = macvlan_open, 1191 .ndo_stop = macvlan_stop, 1192 .ndo_start_xmit = macvlan_start_xmit, 1193 .ndo_change_mtu = macvlan_change_mtu, 1194 .ndo_fix_features = macvlan_fix_features, 1195 .ndo_change_rx_flags = macvlan_change_rx_flags, 1196 .ndo_set_mac_address = macvlan_set_mac_address, 1197 .ndo_set_rx_mode = macvlan_set_mac_lists, 1198 .ndo_get_stats64 = macvlan_dev_get_stats64, 1199 .ndo_validate_addr = eth_validate_addr, 1200 .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid, 1201 .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid, 1202 .ndo_fdb_add = macvlan_fdb_add, 1203 .ndo_fdb_del = macvlan_fdb_del, 1204 .ndo_fdb_dump = ndo_dflt_fdb_dump, 1205 #ifdef CONFIG_NET_POLL_CONTROLLER 1206 .ndo_poll_controller = macvlan_dev_poll_controller, 1207 .ndo_netpoll_setup = macvlan_dev_netpoll_setup, 1208 .ndo_netpoll_cleanup = macvlan_dev_netpoll_cleanup, 1209 #endif 1210 .ndo_get_iflink = macvlan_dev_get_iflink, 1211 .ndo_features_check = passthru_features_check, 1212 .ndo_hwtstamp_get = macvlan_hwtstamp_get, 1213 .ndo_hwtstamp_set = macvlan_hwtstamp_set, 1214 }; 1215 1216 static void macvlan_dev_free(struct net_device *dev) 1217 { 1218 struct macvlan_dev *vlan = netdev_priv(dev); 1219 1220 /* Get rid of the macvlan's reference to lowerdev */ 1221 netdev_put(vlan->lowerdev, &vlan->dev_tracker); 1222 } 1223 1224 void macvlan_common_setup(struct net_device *dev) 1225 { 1226 ether_setup(dev); 1227 1228 /* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */ 1229 dev->max_mtu = ETH_MAX_MTU; 1230 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1231 netif_keep_dst(dev); 1232 dev->priv_flags |= IFF_UNICAST_FLT; 1233 dev->change_proto_down = true; 1234 dev->netdev_ops = &macvlan_netdev_ops; 1235 dev->needs_free_netdev = true; 1236 dev->priv_destructor = macvlan_dev_free; 1237 dev->header_ops = &macvlan_hard_header_ops; 1238 dev->ethtool_ops = &macvlan_ethtool_ops; 1239 } 1240 EXPORT_SYMBOL_GPL(macvlan_common_setup); 1241 1242 static void macvlan_setup(struct net_device *dev) 1243 { 1244 macvlan_common_setup(dev); 1245 dev->priv_flags |= IFF_NO_QUEUE; 1246 } 1247 1248 static int macvlan_port_create(struct net_device *dev) 1249 { 1250 struct macvlan_port *port; 1251 unsigned int i; 1252 int err; 1253 1254 if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) 1255 return -EINVAL; 1256 1257 if (netdev_is_rx_handler_busy(dev)) 1258 return -EBUSY; 1259 1260 port = kzalloc_obj(*port); 1261 if (port == NULL) 1262 return -ENOMEM; 1263 1264 port->dev = dev; 1265 ether_addr_copy(port->perm_addr, dev->dev_addr); 1266 INIT_LIST_HEAD(&port->vlans); 1267 for (i = 0; i < MACVLAN_HASH_SIZE; i++) 1268 INIT_HLIST_HEAD(&port->vlan_hash[i]); 1269 for (i = 0; i < MACVLAN_HASH_SIZE; i++) 1270 INIT_HLIST_HEAD(&port->vlan_source_hash[i]); 1271 1272 port->bc_queue_len_used = 0; 1273 port->bc_cutoff = 1; 1274 skb_queue_head_init(&port->bc_queue); 1275 INIT_WORK(&port->bc_work, macvlan_process_broadcast); 1276 1277 err = netdev_rx_handler_register(dev, macvlan_handle_frame, port); 1278 if (err) 1279 kfree(port); 1280 else 1281 dev->priv_flags |= IFF_MACVLAN_PORT; 1282 return err; 1283 } 1284 1285 static void macvlan_port_destroy(struct net_device *dev) 1286 { 1287 struct macvlan_port *port = macvlan_port_get_rtnl(dev); 1288 struct sk_buff *skb; 1289 1290 dev->priv_flags &= ~IFF_MACVLAN_PORT; 1291 netdev_rx_handler_unregister(dev); 1292 1293 /* After this point, no packet can schedule bc_work anymore, 1294 * but we need to cancel it and purge left skbs if any. 1295 */ 1296 cancel_work_sync(&port->bc_work); 1297 1298 while ((skb = __skb_dequeue(&port->bc_queue))) { 1299 const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src; 1300 1301 if (src) 1302 dev_put(src->dev); 1303 1304 kfree_skb(skb); 1305 } 1306 1307 /* If the lower device address has been changed by passthru 1308 * macvlan, put it back. 1309 */ 1310 if (macvlan_passthru(port) && 1311 !ether_addr_equal(port->dev->dev_addr, port->perm_addr)) { 1312 struct sockaddr_storage ss; 1313 1314 ss.ss_family = port->dev->type; 1315 memcpy(&ss.__data, port->perm_addr, port->dev->addr_len); 1316 dev_set_mac_address(port->dev, &ss, NULL); 1317 } 1318 1319 kfree(port); 1320 } 1321 1322 static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], 1323 struct netlink_ext_ack *extack) 1324 { 1325 struct nlattr *nla, *head; 1326 int rem, len; 1327 1328 if (tb[IFLA_ADDRESS]) { 1329 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 1330 return -EINVAL; 1331 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 1332 return -EADDRNOTAVAIL; 1333 } 1334 1335 if (!data) 1336 return 0; 1337 1338 if (data[IFLA_MACVLAN_FLAGS] && 1339 nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~(MACVLAN_FLAG_NOPROMISC | 1340 MACVLAN_FLAG_NODST)) 1341 return -EINVAL; 1342 1343 if (data[IFLA_MACVLAN_MODE]) { 1344 switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { 1345 case MACVLAN_MODE_PRIVATE: 1346 case MACVLAN_MODE_VEPA: 1347 case MACVLAN_MODE_BRIDGE: 1348 case MACVLAN_MODE_PASSTHRU: 1349 case MACVLAN_MODE_SOURCE: 1350 break; 1351 default: 1352 return -EINVAL; 1353 } 1354 } 1355 1356 if (data[IFLA_MACVLAN_MACADDR_MODE]) { 1357 switch (nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE])) { 1358 case MACVLAN_MACADDR_ADD: 1359 case MACVLAN_MACADDR_DEL: 1360 case MACVLAN_MACADDR_FLUSH: 1361 case MACVLAN_MACADDR_SET: 1362 break; 1363 default: 1364 return -EINVAL; 1365 } 1366 } 1367 1368 if (data[IFLA_MACVLAN_MACADDR]) { 1369 if (nla_len(data[IFLA_MACVLAN_MACADDR]) != ETH_ALEN) 1370 return -EINVAL; 1371 1372 if (!is_valid_ether_addr(nla_data(data[IFLA_MACVLAN_MACADDR]))) 1373 return -EADDRNOTAVAIL; 1374 } 1375 1376 if (data[IFLA_MACVLAN_MACADDR_DATA]) { 1377 head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]); 1378 len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); 1379 1380 nla_for_each_attr(nla, head, len, rem) { 1381 if (nla_type(nla) != IFLA_MACVLAN_MACADDR || 1382 nla_len(nla) != ETH_ALEN) 1383 return -EINVAL; 1384 1385 if (!is_valid_ether_addr(nla_data(nla))) 1386 return -EADDRNOTAVAIL; 1387 } 1388 } 1389 1390 if (data[IFLA_MACVLAN_MACADDR_COUNT]) 1391 return -EINVAL; 1392 1393 return 0; 1394 } 1395 1396 /* 1397 * reconfigure list of remote source mac address 1398 * (only for macvlan devices in source mode) 1399 * Note regarding alignment: all netlink data is aligned to 4 Byte, which 1400 * suffices for both ether_addr_copy and ether_addr_equal_64bits usage. 1401 */ 1402 static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode, 1403 struct nlattr *data[]) 1404 { 1405 char *addr = NULL; 1406 int ret, rem, len; 1407 struct nlattr *nla, *head; 1408 struct macvlan_source_entry *entry; 1409 1410 if (data[IFLA_MACVLAN_MACADDR]) 1411 addr = nla_data(data[IFLA_MACVLAN_MACADDR]); 1412 1413 if (mode == MACVLAN_MACADDR_ADD) { 1414 if (!addr) 1415 return -EINVAL; 1416 1417 return macvlan_hash_add_source(vlan, addr); 1418 1419 } else if (mode == MACVLAN_MACADDR_DEL) { 1420 if (!addr) 1421 return -EINVAL; 1422 1423 entry = macvlan_hash_lookup_source(vlan, addr); 1424 if (entry) { 1425 macvlan_hash_del_source(entry); 1426 vlan->macaddr_count--; 1427 } 1428 } else if (mode == MACVLAN_MACADDR_FLUSH) { 1429 macvlan_flush_sources(vlan->port, vlan); 1430 } else if (mode == MACVLAN_MACADDR_SET) { 1431 macvlan_flush_sources(vlan->port, vlan); 1432 1433 if (addr) { 1434 ret = macvlan_hash_add_source(vlan, addr); 1435 if (ret) 1436 return ret; 1437 } 1438 1439 if (!data[IFLA_MACVLAN_MACADDR_DATA]) 1440 return 0; 1441 1442 head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]); 1443 len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); 1444 1445 nla_for_each_attr(nla, head, len, rem) { 1446 addr = nla_data(nla); 1447 ret = macvlan_hash_add_source(vlan, addr); 1448 if (ret) 1449 return ret; 1450 } 1451 } else { 1452 return -EINVAL; 1453 } 1454 1455 return 0; 1456 } 1457 1458 int macvlan_common_newlink(struct net_device *dev, 1459 struct rtnl_newlink_params *params, 1460 struct netlink_ext_ack *extack) 1461 { 1462 struct net *link_net = rtnl_newlink_link_net(params); 1463 struct macvlan_dev *vlan = netdev_priv(dev); 1464 struct nlattr **data = params->data; 1465 struct nlattr **tb = params->tb; 1466 struct net_device *lowerdev; 1467 struct macvlan_port *port; 1468 bool create = false; 1469 int macmode; 1470 int err; 1471 1472 if (!tb[IFLA_LINK]) 1473 return -EINVAL; 1474 1475 lowerdev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK])); 1476 if (lowerdev == NULL) 1477 return -ENODEV; 1478 1479 /* When creating macvlans or macvtaps on top of other macvlans - use 1480 * the real device as the lowerdev. 1481 */ 1482 if (netif_is_macvlan(lowerdev)) 1483 lowerdev = macvlan_dev_real_dev(lowerdev); 1484 1485 if (!tb[IFLA_MTU]) 1486 dev->mtu = lowerdev->mtu; 1487 else if (dev->mtu > lowerdev->mtu) 1488 return -EINVAL; 1489 1490 /* MTU range: 68 - lowerdev->max_mtu */ 1491 dev->min_mtu = ETH_MIN_MTU; 1492 dev->max_mtu = lowerdev->max_mtu; 1493 1494 if (!tb[IFLA_ADDRESS]) 1495 eth_hw_addr_random(dev); 1496 1497 if (!netif_is_macvlan_port(lowerdev)) { 1498 err = macvlan_port_create(lowerdev); 1499 if (err < 0) 1500 return err; 1501 create = true; 1502 } 1503 port = macvlan_port_get_rtnl(lowerdev); 1504 1505 /* Only 1 macvlan device can be created in passthru mode */ 1506 if (macvlan_passthru(port)) { 1507 /* The macvlan port must be not created this time, 1508 * still goto destroy_macvlan_port for readability. 1509 */ 1510 err = -EINVAL; 1511 goto destroy_macvlan_port; 1512 } 1513 1514 vlan->lowerdev = lowerdev; 1515 vlan->dev = dev; 1516 vlan->port = port; 1517 vlan->set_features = MACVLAN_FEATURES; 1518 1519 vlan->mode = MACVLAN_MODE_VEPA; 1520 if (data && data[IFLA_MACVLAN_MODE]) 1521 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 1522 1523 if (data && data[IFLA_MACVLAN_FLAGS]) 1524 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 1525 1526 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 1527 if (port->count) { 1528 err = -EINVAL; 1529 goto destroy_macvlan_port; 1530 } 1531 macvlan_set_passthru(port); 1532 eth_hw_addr_inherit(dev, lowerdev); 1533 } 1534 1535 if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { 1536 if (vlan->mode != MACVLAN_MODE_SOURCE) { 1537 err = -EINVAL; 1538 goto destroy_macvlan_port; 1539 } 1540 macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); 1541 err = macvlan_changelink_sources(vlan, macmode, data); 1542 if (err) 1543 goto destroy_macvlan_port; 1544 } 1545 1546 vlan->bc_queue_len_req = MACVLAN_DEFAULT_BC_QUEUE_LEN; 1547 if (data && data[IFLA_MACVLAN_BC_QUEUE_LEN]) 1548 vlan->bc_queue_len_req = nla_get_u32(data[IFLA_MACVLAN_BC_QUEUE_LEN]); 1549 1550 if (data && data[IFLA_MACVLAN_BC_CUTOFF]) 1551 update_port_bc_cutoff( 1552 vlan, nla_get_s32(data[IFLA_MACVLAN_BC_CUTOFF])); 1553 1554 err = register_netdevice(dev); 1555 if (err < 0) 1556 goto destroy_macvlan_port; 1557 1558 dev->priv_flags |= IFF_MACVLAN; 1559 err = netdev_upper_dev_link(lowerdev, dev, extack); 1560 if (err) 1561 goto unregister_netdev; 1562 1563 list_add_tail_rcu(&vlan->list, &port->vlans); 1564 update_port_bc_queue_len(vlan->port); 1565 netif_stacked_transfer_operstate(lowerdev, dev); 1566 linkwatch_fire_event(dev); 1567 1568 return 0; 1569 1570 unregister_netdev: 1571 /* macvlan_uninit would free the macvlan port */ 1572 unregister_netdevice(dev); 1573 return err; 1574 destroy_macvlan_port: 1575 /* the macvlan port may be freed by macvlan_uninit when fail to register. 1576 * so we destroy the macvlan port only when it's valid. 1577 */ 1578 if (macvlan_port_get_rtnl(lowerdev)) { 1579 macvlan_flush_sources(port, vlan); 1580 if (create) 1581 macvlan_port_destroy(port->dev); 1582 } 1583 /* @dev might have been made visible before an error was detected. 1584 * Make sure to observe an RCU grace period before our caller 1585 * (rtnl_newlink()) frees it. 1586 */ 1587 synchronize_net(); 1588 return err; 1589 } 1590 EXPORT_SYMBOL_GPL(macvlan_common_newlink); 1591 1592 static int macvlan_newlink(struct net_device *dev, 1593 struct rtnl_newlink_params *params, 1594 struct netlink_ext_ack *extack) 1595 { 1596 return macvlan_common_newlink(dev, params, extack); 1597 } 1598 1599 void macvlan_dellink(struct net_device *dev, struct list_head *head) 1600 { 1601 struct macvlan_dev *vlan = netdev_priv(dev); 1602 1603 if (vlan->mode == MACVLAN_MODE_SOURCE) 1604 macvlan_flush_sources(vlan->port, vlan); 1605 list_del_rcu(&vlan->list); 1606 update_port_bc_queue_len(vlan->port); 1607 unregister_netdevice_queue(dev, head); 1608 netdev_upper_dev_unlink(vlan->lowerdev, dev); 1609 } 1610 EXPORT_SYMBOL_GPL(macvlan_dellink); 1611 1612 static int macvlan_changelink(struct net_device *dev, 1613 struct nlattr *tb[], struct nlattr *data[], 1614 struct netlink_ext_ack *extack) 1615 { 1616 struct macvlan_dev *vlan = netdev_priv(dev); 1617 enum macvlan_mode mode; 1618 bool set_mode = false; 1619 enum macvlan_macaddr_mode macmode; 1620 int ret; 1621 1622 /* Validate mode, but don't set yet: setting flags may fail. */ 1623 if (data && data[IFLA_MACVLAN_MODE]) { 1624 set_mode = true; 1625 mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 1626 /* Passthrough mode can't be set or cleared dynamically */ 1627 if ((mode == MACVLAN_MODE_PASSTHRU) != 1628 (vlan->mode == MACVLAN_MODE_PASSTHRU)) 1629 return -EINVAL; 1630 if (vlan->mode == MACVLAN_MODE_SOURCE && 1631 vlan->mode != mode) 1632 macvlan_flush_sources(vlan->port, vlan); 1633 } 1634 1635 if (data && data[IFLA_MACVLAN_FLAGS]) { 1636 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 1637 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; 1638 if (macvlan_passthru(vlan->port) && promisc) { 1639 int err; 1640 1641 if (flags & MACVLAN_FLAG_NOPROMISC) 1642 err = dev_set_promiscuity(vlan->lowerdev, -1); 1643 else 1644 err = dev_set_promiscuity(vlan->lowerdev, 1); 1645 if (err < 0) 1646 return err; 1647 } 1648 vlan->flags = flags; 1649 } 1650 1651 if (data && data[IFLA_MACVLAN_BC_QUEUE_LEN]) { 1652 vlan->bc_queue_len_req = nla_get_u32(data[IFLA_MACVLAN_BC_QUEUE_LEN]); 1653 update_port_bc_queue_len(vlan->port); 1654 } 1655 1656 if (data && data[IFLA_MACVLAN_BC_CUTOFF]) 1657 update_port_bc_cutoff( 1658 vlan, nla_get_s32(data[IFLA_MACVLAN_BC_CUTOFF])); 1659 1660 if (set_mode) 1661 vlan->mode = mode; 1662 if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { 1663 if (vlan->mode != MACVLAN_MODE_SOURCE) 1664 return -EINVAL; 1665 macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); 1666 ret = macvlan_changelink_sources(vlan, macmode, data); 1667 if (ret) 1668 return ret; 1669 } 1670 return 0; 1671 } 1672 1673 static size_t macvlan_get_size_mac(const struct macvlan_dev *vlan) 1674 { 1675 if (vlan->macaddr_count == 0) 1676 return 0; 1677 return nla_total_size(0) /* IFLA_MACVLAN_MACADDR_DATA */ 1678 + vlan->macaddr_count * nla_total_size(sizeof(u8) * ETH_ALEN); 1679 } 1680 1681 static size_t macvlan_get_size(const struct net_device *dev) 1682 { 1683 struct macvlan_dev *vlan = netdev_priv(dev); 1684 1685 return (0 1686 + nla_total_size(4) /* IFLA_MACVLAN_MODE */ 1687 + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */ 1688 + nla_total_size(4) /* IFLA_MACVLAN_MACADDR_COUNT */ 1689 + macvlan_get_size_mac(vlan) /* IFLA_MACVLAN_MACADDR */ 1690 + nla_total_size(4) /* IFLA_MACVLAN_BC_QUEUE_LEN */ 1691 + nla_total_size(4) /* IFLA_MACVLAN_BC_QUEUE_LEN_USED */ 1692 ); 1693 } 1694 1695 static int macvlan_fill_info_macaddr(struct sk_buff *skb, 1696 const struct macvlan_dev *vlan, 1697 const int i) 1698 { 1699 struct hlist_head *h = &vlan->port->vlan_source_hash[i]; 1700 struct macvlan_source_entry *entry; 1701 1702 hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) { 1703 if (rcu_access_pointer(entry->vlan) != vlan) 1704 continue; 1705 if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr)) 1706 return 1; 1707 } 1708 return 0; 1709 } 1710 1711 static int macvlan_fill_info(struct sk_buff *skb, 1712 const struct net_device *dev) 1713 { 1714 struct macvlan_dev *vlan = netdev_priv(dev); 1715 struct macvlan_port *port = vlan->port; 1716 int i; 1717 struct nlattr *nest; 1718 1719 if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode)) 1720 goto nla_put_failure; 1721 if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags)) 1722 goto nla_put_failure; 1723 if (nla_put_u32(skb, IFLA_MACVLAN_MACADDR_COUNT, vlan->macaddr_count)) 1724 goto nla_put_failure; 1725 if (vlan->macaddr_count > 0) { 1726 nest = nla_nest_start_noflag(skb, IFLA_MACVLAN_MACADDR_DATA); 1727 if (nest == NULL) 1728 goto nla_put_failure; 1729 1730 for (i = 0; i < MACVLAN_HASH_SIZE; i++) { 1731 if (macvlan_fill_info_macaddr(skb, vlan, i)) 1732 goto nla_put_failure; 1733 } 1734 nla_nest_end(skb, nest); 1735 } 1736 if (nla_put_u32(skb, IFLA_MACVLAN_BC_QUEUE_LEN, vlan->bc_queue_len_req)) 1737 goto nla_put_failure; 1738 if (nla_put_u32(skb, IFLA_MACVLAN_BC_QUEUE_LEN_USED, 1739 READ_ONCE(port->bc_queue_len_used))) 1740 goto nla_put_failure; 1741 if (port->bc_cutoff != 1 && 1742 nla_put_s32(skb, IFLA_MACVLAN_BC_CUTOFF, port->bc_cutoff)) 1743 goto nla_put_failure; 1744 return 0; 1745 1746 nla_put_failure: 1747 return -EMSGSIZE; 1748 } 1749 1750 static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = { 1751 [IFLA_MACVLAN_MODE] = { .type = NLA_U32 }, 1752 [IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 }, 1753 [IFLA_MACVLAN_MACADDR_MODE] = { .type = NLA_U32 }, 1754 [IFLA_MACVLAN_MACADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1755 [IFLA_MACVLAN_MACADDR_DATA] = { .type = NLA_NESTED }, 1756 [IFLA_MACVLAN_MACADDR_COUNT] = { .type = NLA_U32 }, 1757 [IFLA_MACVLAN_BC_QUEUE_LEN] = { .type = NLA_U32 }, 1758 [IFLA_MACVLAN_BC_QUEUE_LEN_USED] = { .type = NLA_REJECT }, 1759 [IFLA_MACVLAN_BC_CUTOFF] = { .type = NLA_S32 }, 1760 }; 1761 1762 int macvlan_link_register(struct rtnl_link_ops *ops) 1763 { 1764 /* common fields */ 1765 ops->validate = macvlan_validate; 1766 ops->maxtype = IFLA_MACVLAN_MAX; 1767 ops->policy = macvlan_policy; 1768 ops->changelink = macvlan_changelink; 1769 ops->get_size = macvlan_get_size; 1770 ops->fill_info = macvlan_fill_info; 1771 1772 return rtnl_link_register(ops); 1773 }; 1774 EXPORT_SYMBOL_GPL(macvlan_link_register); 1775 1776 static struct net *macvlan_get_link_net(const struct net_device *dev) 1777 { 1778 return dev_net(macvlan_dev_real_dev(dev)); 1779 } 1780 1781 static struct rtnl_link_ops macvlan_link_ops = { 1782 .kind = "macvlan", 1783 .setup = macvlan_setup, 1784 .newlink = macvlan_newlink, 1785 .dellink = macvlan_dellink, 1786 .get_link_net = macvlan_get_link_net, 1787 .priv_size = sizeof(struct macvlan_dev), 1788 }; 1789 1790 static void update_port_bc_queue_len(struct macvlan_port *port) 1791 { 1792 u32 max_bc_queue_len_req = 0; 1793 struct macvlan_dev *vlan; 1794 1795 list_for_each_entry(vlan, &port->vlans, list) { 1796 if (vlan->bc_queue_len_req > max_bc_queue_len_req) 1797 max_bc_queue_len_req = vlan->bc_queue_len_req; 1798 } 1799 WRITE_ONCE(port->bc_queue_len_used, max_bc_queue_len_req); 1800 } 1801 1802 static int macvlan_device_event(struct notifier_block *unused, 1803 unsigned long event, void *ptr) 1804 { 1805 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1806 struct macvlan_dev *vlan, *next; 1807 struct macvlan_port *port; 1808 LIST_HEAD(list_kill); 1809 1810 if (!netif_is_macvlan_port(dev)) 1811 return NOTIFY_DONE; 1812 1813 port = macvlan_port_get_rtnl(dev); 1814 1815 switch (event) { 1816 case NETDEV_UP: 1817 case NETDEV_DOWN: 1818 case NETDEV_CHANGE: 1819 list_for_each_entry(vlan, &port->vlans, list) 1820 netif_stacked_transfer_operstate(vlan->lowerdev, 1821 vlan->dev); 1822 break; 1823 case NETDEV_FEAT_CHANGE: 1824 list_for_each_entry(vlan, &port->vlans, list) { 1825 netif_inherit_tso_max(vlan->dev, dev); 1826 netdev_update_features(vlan->dev); 1827 } 1828 break; 1829 case NETDEV_CHANGEMTU: 1830 list_for_each_entry(vlan, &port->vlans, list) { 1831 if (vlan->dev->mtu <= dev->mtu) 1832 continue; 1833 dev_set_mtu(vlan->dev, dev->mtu); 1834 } 1835 break; 1836 case NETDEV_CHANGEADDR: 1837 if (!macvlan_passthru(port)) 1838 return NOTIFY_DONE; 1839 1840 vlan = list_first_entry_or_null(&port->vlans, 1841 struct macvlan_dev, 1842 list); 1843 1844 if (vlan && macvlan_sync_address(vlan->dev, dev->dev_addr)) 1845 return NOTIFY_BAD; 1846 1847 break; 1848 case NETDEV_UNREGISTER: 1849 /* twiddle thumbs on netns device moves */ 1850 if (dev->reg_state != NETREG_UNREGISTERING) 1851 break; 1852 1853 list_for_each_entry_safe(vlan, next, &port->vlans, list) 1854 vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill); 1855 unregister_netdevice_many(&list_kill); 1856 break; 1857 case NETDEV_PRE_TYPE_CHANGE: 1858 /* Forbid underlying device to change its type. */ 1859 return NOTIFY_BAD; 1860 1861 case NETDEV_NOTIFY_PEERS: 1862 case NETDEV_BONDING_FAILOVER: 1863 case NETDEV_RESEND_IGMP: 1864 /* Propagate to all vlans */ 1865 list_for_each_entry(vlan, &port->vlans, list) 1866 call_netdevice_notifiers(event, vlan->dev); 1867 } 1868 return NOTIFY_DONE; 1869 } 1870 1871 static struct notifier_block macvlan_notifier_block __read_mostly = { 1872 .notifier_call = macvlan_device_event, 1873 }; 1874 1875 static int __init macvlan_init_module(void) 1876 { 1877 int err; 1878 1879 register_netdevice_notifier(&macvlan_notifier_block); 1880 1881 err = macvlan_link_register(&macvlan_link_ops); 1882 if (err < 0) 1883 goto err1; 1884 return 0; 1885 err1: 1886 unregister_netdevice_notifier(&macvlan_notifier_block); 1887 return err; 1888 } 1889 1890 static void __exit macvlan_cleanup_module(void) 1891 { 1892 rtnl_link_unregister(&macvlan_link_ops); 1893 unregister_netdevice_notifier(&macvlan_notifier_block); 1894 } 1895 1896 module_init(macvlan_init_module); 1897 module_exit(macvlan_cleanup_module); 1898 1899 MODULE_LICENSE("GPL"); 1900 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 1901 MODULE_DESCRIPTION("Driver for MAC address based VLANs"); 1902 MODULE_ALIAS_RTNL_LINK("macvlan"); 1903