1 /* 2 * Copyright (c) 2007 Patrick McHardy <kaber@trash.net> 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation; either version 2 of 7 * the License, or (at your option) any later version. 8 * 9 * The code this is based on carried the following copyright notice: 10 * --- 11 * (C) Copyright 2001-2006 12 * Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com 13 * Re-worked by Ben Greear <greearb@candelatech.com> 14 * --- 15 */ 16 #include <linux/kernel.h> 17 #include <linux/types.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/errno.h> 21 #include <linux/slab.h> 22 #include <linux/string.h> 23 #include <linux/rculist.h> 24 #include <linux/notifier.h> 25 #include <linux/netdevice.h> 26 #include <linux/etherdevice.h> 27 #include <linux/ethtool.h> 28 #include <linux/if_arp.h> 29 #include <linux/if_vlan.h> 30 #include <linux/if_link.h> 31 #include <linux/if_macvlan.h> 32 #include <linux/hash.h> 33 #include <linux/workqueue.h> 34 #include <net/rtnetlink.h> 35 #include <net/xfrm.h> 36 37 #define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) 38 39 struct macvlan_port { 40 struct net_device *dev; 41 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; 42 struct list_head vlans; 43 struct rcu_head rcu; 44 struct sk_buff_head bc_queue; 45 struct work_struct bc_work; 46 bool passthru; 47 int count; 48 }; 49 50 struct macvlan_skb_cb { 51 const struct macvlan_dev *src; 52 }; 53 54 #define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0])) 55 56 static void macvlan_port_destroy(struct net_device *dev); 57 58 static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev) 59 { 60 return rcu_dereference(dev->rx_handler_data); 61 } 62 63 static struct macvlan_port *macvlan_port_get_rtnl(const struct net_device *dev) 64 { 65 return rtnl_dereference(dev->rx_handler_data); 66 } 67 68 #define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT) 69 70 static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, 71 const unsigned char *addr) 72 { 73 struct macvlan_dev *vlan; 74 75 hlist_for_each_entry_rcu(vlan, &port->vlan_hash[addr[5]], hlist) { 76 if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr)) 77 return vlan; 78 } 79 return NULL; 80 } 81 82 static void macvlan_hash_add(struct macvlan_dev *vlan) 83 { 84 struct macvlan_port *port = vlan->port; 85 const unsigned char *addr = vlan->dev->dev_addr; 86 87 hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[addr[5]]); 88 } 89 90 static void macvlan_hash_del(struct macvlan_dev *vlan, bool sync) 91 { 92 hlist_del_rcu(&vlan->hlist); 93 if (sync) 94 synchronize_rcu(); 95 } 96 97 static void macvlan_hash_change_addr(struct macvlan_dev *vlan, 98 const unsigned char *addr) 99 { 100 macvlan_hash_del(vlan, true); 101 /* Now that we are unhashed it is safe to change the device 102 * address without confusing packet delivery. 103 */ 104 memcpy(vlan->dev->dev_addr, addr, ETH_ALEN); 105 macvlan_hash_add(vlan); 106 } 107 108 static int macvlan_addr_busy(const struct macvlan_port *port, 109 const unsigned char *addr) 110 { 111 /* Test to see if the specified multicast address is 112 * currently in use by the underlying device or 113 * another macvlan. 114 */ 115 if (ether_addr_equal_64bits(port->dev->dev_addr, addr)) 116 return 1; 117 118 if (macvlan_hash_lookup(port, addr)) 119 return 1; 120 121 return 0; 122 } 123 124 125 static int macvlan_broadcast_one(struct sk_buff *skb, 126 const struct macvlan_dev *vlan, 127 const struct ethhdr *eth, bool local) 128 { 129 struct net_device *dev = vlan->dev; 130 131 if (local) 132 return __dev_forward_skb(dev, skb); 133 134 skb->dev = dev; 135 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast)) 136 skb->pkt_type = PACKET_BROADCAST; 137 else 138 skb->pkt_type = PACKET_MULTICAST; 139 140 return 0; 141 } 142 143 static u32 macvlan_hash_mix(const struct macvlan_dev *vlan) 144 { 145 return (u32)(((unsigned long)vlan) >> L1_CACHE_SHIFT); 146 } 147 148 149 static unsigned int mc_hash(const struct macvlan_dev *vlan, 150 const unsigned char *addr) 151 { 152 u32 val = __get_unaligned_cpu32(addr + 2); 153 154 val ^= macvlan_hash_mix(vlan); 155 return hash_32(val, MACVLAN_MC_FILTER_BITS); 156 } 157 158 static void macvlan_broadcast(struct sk_buff *skb, 159 const struct macvlan_port *port, 160 struct net_device *src, 161 enum macvlan_mode mode) 162 { 163 const struct ethhdr *eth = eth_hdr(skb); 164 const struct macvlan_dev *vlan; 165 struct sk_buff *nskb; 166 unsigned int i; 167 int err; 168 unsigned int hash; 169 170 if (skb->protocol == htons(ETH_P_PAUSE)) 171 return; 172 173 for (i = 0; i < MACVLAN_HASH_SIZE; i++) { 174 hlist_for_each_entry_rcu(vlan, &port->vlan_hash[i], hlist) { 175 if (vlan->dev == src || !(vlan->mode & mode)) 176 continue; 177 178 hash = mc_hash(vlan, eth->h_dest); 179 if (!test_bit(hash, vlan->mc_filter)) 180 continue; 181 182 err = NET_RX_DROP; 183 nskb = skb_clone(skb, GFP_ATOMIC); 184 if (likely(nskb)) 185 err = macvlan_broadcast_one( 186 nskb, vlan, eth, 187 mode == MACVLAN_MODE_BRIDGE) ?: 188 netif_rx_ni(nskb); 189 macvlan_count_rx(vlan, skb->len + ETH_HLEN, 190 err == NET_RX_SUCCESS, 1); 191 } 192 } 193 } 194 195 static void macvlan_process_broadcast(struct work_struct *w) 196 { 197 struct macvlan_port *port = container_of(w, struct macvlan_port, 198 bc_work); 199 struct sk_buff *skb; 200 struct sk_buff_head list; 201 202 skb_queue_head_init(&list); 203 204 spin_lock_bh(&port->bc_queue.lock); 205 skb_queue_splice_tail_init(&port->bc_queue, &list); 206 spin_unlock_bh(&port->bc_queue.lock); 207 208 while ((skb = __skb_dequeue(&list))) { 209 const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src; 210 211 rcu_read_lock(); 212 213 if (!src) 214 /* frame comes from an external address */ 215 macvlan_broadcast(skb, port, NULL, 216 MACVLAN_MODE_PRIVATE | 217 MACVLAN_MODE_VEPA | 218 MACVLAN_MODE_PASSTHRU| 219 MACVLAN_MODE_BRIDGE); 220 else if (src->mode == MACVLAN_MODE_VEPA) 221 /* flood to everyone except source */ 222 macvlan_broadcast(skb, port, src->dev, 223 MACVLAN_MODE_VEPA | 224 MACVLAN_MODE_BRIDGE); 225 else 226 /* 227 * flood only to VEPA ports, bridge ports 228 * already saw the frame on the way out. 229 */ 230 macvlan_broadcast(skb, port, src->dev, 231 MACVLAN_MODE_VEPA); 232 233 rcu_read_unlock(); 234 235 kfree_skb(skb); 236 } 237 } 238 239 static void macvlan_broadcast_enqueue(struct macvlan_port *port, 240 struct sk_buff *skb) 241 { 242 struct sk_buff *nskb; 243 int err = -ENOMEM; 244 245 nskb = skb_clone(skb, GFP_ATOMIC); 246 if (!nskb) 247 goto err; 248 249 spin_lock(&port->bc_queue.lock); 250 if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) { 251 __skb_queue_tail(&port->bc_queue, nskb); 252 err = 0; 253 } 254 spin_unlock(&port->bc_queue.lock); 255 256 if (err) 257 goto free_nskb; 258 259 schedule_work(&port->bc_work); 260 return; 261 262 free_nskb: 263 kfree_skb(nskb); 264 err: 265 atomic_long_inc(&skb->dev->rx_dropped); 266 } 267 268 /* called under rcu_read_lock() from netif_receive_skb */ 269 static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) 270 { 271 struct macvlan_port *port; 272 struct sk_buff *skb = *pskb; 273 const struct ethhdr *eth = eth_hdr(skb); 274 const struct macvlan_dev *vlan; 275 const struct macvlan_dev *src; 276 struct net_device *dev; 277 unsigned int len = 0; 278 int ret = NET_RX_DROP; 279 280 port = macvlan_port_get_rcu(skb->dev); 281 if (is_multicast_ether_addr(eth->h_dest)) { 282 skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN); 283 if (!skb) 284 return RX_HANDLER_CONSUMED; 285 eth = eth_hdr(skb); 286 src = macvlan_hash_lookup(port, eth->h_source); 287 if (src && src->mode != MACVLAN_MODE_VEPA && 288 src->mode != MACVLAN_MODE_BRIDGE) { 289 /* forward to original port. */ 290 vlan = src; 291 ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?: 292 netif_rx(skb); 293 goto out; 294 } 295 296 MACVLAN_SKB_CB(skb)->src = src; 297 macvlan_broadcast_enqueue(port, skb); 298 299 return RX_HANDLER_PASS; 300 } 301 302 if (port->passthru) 303 vlan = list_first_or_null_rcu(&port->vlans, 304 struct macvlan_dev, list); 305 else 306 vlan = macvlan_hash_lookup(port, eth->h_dest); 307 if (vlan == NULL) 308 return RX_HANDLER_PASS; 309 310 dev = vlan->dev; 311 if (unlikely(!(dev->flags & IFF_UP))) { 312 kfree_skb(skb); 313 return RX_HANDLER_CONSUMED; 314 } 315 len = skb->len + ETH_HLEN; 316 skb = skb_share_check(skb, GFP_ATOMIC); 317 if (!skb) 318 goto out; 319 320 skb->dev = dev; 321 skb->pkt_type = PACKET_HOST; 322 323 ret = netif_rx(skb); 324 325 out: 326 macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0); 327 return RX_HANDLER_CONSUMED; 328 } 329 330 static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) 331 { 332 const struct macvlan_dev *vlan = netdev_priv(dev); 333 const struct macvlan_port *port = vlan->port; 334 const struct macvlan_dev *dest; 335 __u8 ip_summed = skb->ip_summed; 336 337 if (vlan->mode == MACVLAN_MODE_BRIDGE) { 338 const struct ethhdr *eth = (void *)skb->data; 339 skb->ip_summed = CHECKSUM_UNNECESSARY; 340 341 /* send to other bridge ports directly */ 342 if (is_multicast_ether_addr(eth->h_dest)) { 343 macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE); 344 goto xmit_world; 345 } 346 347 dest = macvlan_hash_lookup(port, eth->h_dest); 348 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { 349 /* send to lowerdev first for its network taps */ 350 dev_forward_skb(vlan->lowerdev, skb); 351 352 return NET_XMIT_SUCCESS; 353 } 354 } 355 356 xmit_world: 357 skb->ip_summed = ip_summed; 358 skb->dev = vlan->lowerdev; 359 return dev_queue_xmit(skb); 360 } 361 362 static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, 363 struct net_device *dev) 364 { 365 unsigned int len = skb->len; 366 int ret; 367 const struct macvlan_dev *vlan = netdev_priv(dev); 368 369 if (vlan->fwd_priv) { 370 skb->dev = vlan->lowerdev; 371 ret = dev_queue_xmit_accel(skb, vlan->fwd_priv); 372 } else { 373 ret = macvlan_queue_xmit(skb, dev); 374 } 375 376 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 377 struct vlan_pcpu_stats *pcpu_stats; 378 379 pcpu_stats = this_cpu_ptr(vlan->pcpu_stats); 380 u64_stats_update_begin(&pcpu_stats->syncp); 381 pcpu_stats->tx_packets++; 382 pcpu_stats->tx_bytes += len; 383 u64_stats_update_end(&pcpu_stats->syncp); 384 } else { 385 this_cpu_inc(vlan->pcpu_stats->tx_dropped); 386 } 387 return ret; 388 } 389 390 static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, 391 unsigned short type, const void *daddr, 392 const void *saddr, unsigned len) 393 { 394 const struct macvlan_dev *vlan = netdev_priv(dev); 395 struct net_device *lowerdev = vlan->lowerdev; 396 397 return dev_hard_header(skb, lowerdev, type, daddr, 398 saddr ? : dev->dev_addr, len); 399 } 400 401 static const struct header_ops macvlan_hard_header_ops = { 402 .create = macvlan_hard_header, 403 .rebuild = eth_rebuild_header, 404 .parse = eth_header_parse, 405 .cache = eth_header_cache, 406 .cache_update = eth_header_cache_update, 407 }; 408 409 static struct rtnl_link_ops macvlan_link_ops; 410 411 static int macvlan_open(struct net_device *dev) 412 { 413 struct macvlan_dev *vlan = netdev_priv(dev); 414 struct net_device *lowerdev = vlan->lowerdev; 415 int err; 416 417 if (vlan->port->passthru) { 418 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) { 419 err = dev_set_promiscuity(lowerdev, 1); 420 if (err < 0) 421 goto out; 422 } 423 goto hash_add; 424 } 425 426 if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD && 427 dev->rtnl_link_ops == &macvlan_link_ops) { 428 vlan->fwd_priv = 429 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); 430 431 /* If we get a NULL pointer back, or if we get an error 432 * then we should just fall through to the non accelerated path 433 */ 434 if (IS_ERR_OR_NULL(vlan->fwd_priv)) { 435 vlan->fwd_priv = NULL; 436 } else 437 return 0; 438 } 439 440 err = -EBUSY; 441 if (macvlan_addr_busy(vlan->port, dev->dev_addr)) 442 goto out; 443 444 err = dev_uc_add(lowerdev, dev->dev_addr); 445 if (err < 0) 446 goto out; 447 if (dev->flags & IFF_ALLMULTI) { 448 err = dev_set_allmulti(lowerdev, 1); 449 if (err < 0) 450 goto del_unicast; 451 } 452 453 hash_add: 454 macvlan_hash_add(vlan); 455 return 0; 456 457 del_unicast: 458 dev_uc_del(lowerdev, dev->dev_addr); 459 out: 460 if (vlan->fwd_priv) { 461 lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, 462 vlan->fwd_priv); 463 vlan->fwd_priv = NULL; 464 } 465 return err; 466 } 467 468 static int macvlan_stop(struct net_device *dev) 469 { 470 struct macvlan_dev *vlan = netdev_priv(dev); 471 struct net_device *lowerdev = vlan->lowerdev; 472 473 if (vlan->fwd_priv) { 474 lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, 475 vlan->fwd_priv); 476 vlan->fwd_priv = NULL; 477 return 0; 478 } 479 480 dev_uc_unsync(lowerdev, dev); 481 dev_mc_unsync(lowerdev, dev); 482 483 if (vlan->port->passthru) { 484 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) 485 dev_set_promiscuity(lowerdev, -1); 486 goto hash_del; 487 } 488 489 if (dev->flags & IFF_ALLMULTI) 490 dev_set_allmulti(lowerdev, -1); 491 492 dev_uc_del(lowerdev, dev->dev_addr); 493 494 hash_del: 495 macvlan_hash_del(vlan, !dev->dismantle); 496 return 0; 497 } 498 499 static int macvlan_set_mac_address(struct net_device *dev, void *p) 500 { 501 struct macvlan_dev *vlan = netdev_priv(dev); 502 struct net_device *lowerdev = vlan->lowerdev; 503 struct sockaddr *addr = p; 504 int err; 505 506 if (!is_valid_ether_addr(addr->sa_data)) 507 return -EADDRNOTAVAIL; 508 509 if (!(dev->flags & IFF_UP)) { 510 /* Just copy in the new address */ 511 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 512 } else { 513 /* Rehash and update the device filters */ 514 if (macvlan_addr_busy(vlan->port, addr->sa_data)) 515 return -EBUSY; 516 517 err = dev_uc_add(lowerdev, addr->sa_data); 518 if (err) 519 return err; 520 521 dev_uc_del(lowerdev, dev->dev_addr); 522 523 macvlan_hash_change_addr(vlan, addr->sa_data); 524 } 525 return 0; 526 } 527 528 static void macvlan_change_rx_flags(struct net_device *dev, int change) 529 { 530 struct macvlan_dev *vlan = netdev_priv(dev); 531 struct net_device *lowerdev = vlan->lowerdev; 532 533 if (change & IFF_ALLMULTI) 534 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); 535 } 536 537 static void macvlan_set_mac_lists(struct net_device *dev) 538 { 539 struct macvlan_dev *vlan = netdev_priv(dev); 540 541 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 542 bitmap_fill(vlan->mc_filter, MACVLAN_MC_FILTER_SZ); 543 } else { 544 struct netdev_hw_addr *ha; 545 DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ); 546 547 bitmap_zero(filter, MACVLAN_MC_FILTER_SZ); 548 netdev_for_each_mc_addr(ha, dev) { 549 __set_bit(mc_hash(vlan, ha->addr), filter); 550 } 551 552 __set_bit(mc_hash(vlan, dev->broadcast), filter); 553 554 bitmap_copy(vlan->mc_filter, filter, MACVLAN_MC_FILTER_SZ); 555 } 556 dev_uc_sync(vlan->lowerdev, dev); 557 dev_mc_sync(vlan->lowerdev, dev); 558 } 559 560 static int macvlan_change_mtu(struct net_device *dev, int new_mtu) 561 { 562 struct macvlan_dev *vlan = netdev_priv(dev); 563 564 if (new_mtu < 68 || vlan->lowerdev->mtu < new_mtu) 565 return -EINVAL; 566 dev->mtu = new_mtu; 567 return 0; 568 } 569 570 /* 571 * macvlan network devices have devices nesting below it and are a special 572 * "super class" of normal network devices; split their locks off into a 573 * separate class since they always nest. 574 */ 575 static struct lock_class_key macvlan_netdev_xmit_lock_key; 576 static struct lock_class_key macvlan_netdev_addr_lock_key; 577 578 #define ALWAYS_ON_FEATURES \ 579 (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX) 580 581 #define MACVLAN_FEATURES \ 582 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ 583 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ 584 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ 585 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) 586 587 #define MACVLAN_STATE_MASK \ 588 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) 589 590 static void macvlan_set_lockdep_class_one(struct net_device *dev, 591 struct netdev_queue *txq, 592 void *_unused) 593 { 594 lockdep_set_class(&txq->_xmit_lock, 595 &macvlan_netdev_xmit_lock_key); 596 } 597 598 static void macvlan_set_lockdep_class(struct net_device *dev) 599 { 600 lockdep_set_class(&dev->addr_list_lock, 601 &macvlan_netdev_addr_lock_key); 602 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); 603 } 604 605 static int macvlan_init(struct net_device *dev) 606 { 607 struct macvlan_dev *vlan = netdev_priv(dev); 608 const struct net_device *lowerdev = vlan->lowerdev; 609 610 dev->state = (dev->state & ~MACVLAN_STATE_MASK) | 611 (lowerdev->state & MACVLAN_STATE_MASK); 612 dev->features = lowerdev->features & MACVLAN_FEATURES; 613 dev->features |= ALWAYS_ON_FEATURES; 614 dev->gso_max_size = lowerdev->gso_max_size; 615 dev->iflink = lowerdev->ifindex; 616 dev->hard_header_len = lowerdev->hard_header_len; 617 618 macvlan_set_lockdep_class(dev); 619 620 vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); 621 if (!vlan->pcpu_stats) 622 return -ENOMEM; 623 624 return 0; 625 } 626 627 static void macvlan_uninit(struct net_device *dev) 628 { 629 struct macvlan_dev *vlan = netdev_priv(dev); 630 struct macvlan_port *port = vlan->port; 631 632 free_percpu(vlan->pcpu_stats); 633 634 port->count -= 1; 635 if (!port->count) 636 macvlan_port_destroy(port->dev); 637 } 638 639 static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, 640 struct rtnl_link_stats64 *stats) 641 { 642 struct macvlan_dev *vlan = netdev_priv(dev); 643 644 if (vlan->pcpu_stats) { 645 struct vlan_pcpu_stats *p; 646 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; 647 u32 rx_errors = 0, tx_dropped = 0; 648 unsigned int start; 649 int i; 650 651 for_each_possible_cpu(i) { 652 p = per_cpu_ptr(vlan->pcpu_stats, i); 653 do { 654 start = u64_stats_fetch_begin_irq(&p->syncp); 655 rx_packets = p->rx_packets; 656 rx_bytes = p->rx_bytes; 657 rx_multicast = p->rx_multicast; 658 tx_packets = p->tx_packets; 659 tx_bytes = p->tx_bytes; 660 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 661 662 stats->rx_packets += rx_packets; 663 stats->rx_bytes += rx_bytes; 664 stats->multicast += rx_multicast; 665 stats->tx_packets += tx_packets; 666 stats->tx_bytes += tx_bytes; 667 /* rx_errors & tx_dropped are u32, updated 668 * without syncp protection. 669 */ 670 rx_errors += p->rx_errors; 671 tx_dropped += p->tx_dropped; 672 } 673 stats->rx_errors = rx_errors; 674 stats->rx_dropped = rx_errors; 675 stats->tx_dropped = tx_dropped; 676 } 677 return stats; 678 } 679 680 static int macvlan_vlan_rx_add_vid(struct net_device *dev, 681 __be16 proto, u16 vid) 682 { 683 struct macvlan_dev *vlan = netdev_priv(dev); 684 struct net_device *lowerdev = vlan->lowerdev; 685 686 return vlan_vid_add(lowerdev, proto, vid); 687 } 688 689 static int macvlan_vlan_rx_kill_vid(struct net_device *dev, 690 __be16 proto, u16 vid) 691 { 692 struct macvlan_dev *vlan = netdev_priv(dev); 693 struct net_device *lowerdev = vlan->lowerdev; 694 695 vlan_vid_del(lowerdev, proto, vid); 696 return 0; 697 } 698 699 static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 700 struct net_device *dev, 701 const unsigned char *addr, 702 u16 flags) 703 { 704 struct macvlan_dev *vlan = netdev_priv(dev); 705 int err = -EINVAL; 706 707 if (!vlan->port->passthru) 708 return -EOPNOTSUPP; 709 710 if (flags & NLM_F_REPLACE) 711 return -EOPNOTSUPP; 712 713 if (is_unicast_ether_addr(addr)) 714 err = dev_uc_add_excl(dev, addr); 715 else if (is_multicast_ether_addr(addr)) 716 err = dev_mc_add_excl(dev, addr); 717 718 return err; 719 } 720 721 static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], 722 struct net_device *dev, 723 const unsigned char *addr) 724 { 725 struct macvlan_dev *vlan = netdev_priv(dev); 726 int err = -EINVAL; 727 728 if (!vlan->port->passthru) 729 return -EOPNOTSUPP; 730 731 if (is_unicast_ether_addr(addr)) 732 err = dev_uc_del(dev, addr); 733 else if (is_multicast_ether_addr(addr)) 734 err = dev_mc_del(dev, addr); 735 736 return err; 737 } 738 739 static void macvlan_ethtool_get_drvinfo(struct net_device *dev, 740 struct ethtool_drvinfo *drvinfo) 741 { 742 strlcpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver)); 743 strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version)); 744 } 745 746 static int macvlan_ethtool_get_settings(struct net_device *dev, 747 struct ethtool_cmd *cmd) 748 { 749 const struct macvlan_dev *vlan = netdev_priv(dev); 750 751 return __ethtool_get_settings(vlan->lowerdev, cmd); 752 } 753 754 static netdev_features_t macvlan_fix_features(struct net_device *dev, 755 netdev_features_t features) 756 { 757 struct macvlan_dev *vlan = netdev_priv(dev); 758 netdev_features_t mask; 759 760 features |= NETIF_F_ALL_FOR_ALL; 761 features &= (vlan->set_features | ~MACVLAN_FEATURES); 762 mask = features; 763 764 features = netdev_increment_features(vlan->lowerdev->features, 765 features, 766 mask); 767 features |= ALWAYS_ON_FEATURES; 768 769 return features; 770 } 771 772 static const struct ethtool_ops macvlan_ethtool_ops = { 773 .get_link = ethtool_op_get_link, 774 .get_settings = macvlan_ethtool_get_settings, 775 .get_drvinfo = macvlan_ethtool_get_drvinfo, 776 }; 777 778 static const struct net_device_ops macvlan_netdev_ops = { 779 .ndo_init = macvlan_init, 780 .ndo_uninit = macvlan_uninit, 781 .ndo_open = macvlan_open, 782 .ndo_stop = macvlan_stop, 783 .ndo_start_xmit = macvlan_start_xmit, 784 .ndo_change_mtu = macvlan_change_mtu, 785 .ndo_fix_features = macvlan_fix_features, 786 .ndo_change_rx_flags = macvlan_change_rx_flags, 787 .ndo_set_mac_address = macvlan_set_mac_address, 788 .ndo_set_rx_mode = macvlan_set_mac_lists, 789 .ndo_get_stats64 = macvlan_dev_get_stats64, 790 .ndo_validate_addr = eth_validate_addr, 791 .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid, 792 .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid, 793 .ndo_fdb_add = macvlan_fdb_add, 794 .ndo_fdb_del = macvlan_fdb_del, 795 .ndo_fdb_dump = ndo_dflt_fdb_dump, 796 }; 797 798 void macvlan_common_setup(struct net_device *dev) 799 { 800 ether_setup(dev); 801 802 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 803 dev->priv_flags |= IFF_UNICAST_FLT; 804 dev->netdev_ops = &macvlan_netdev_ops; 805 dev->destructor = free_netdev; 806 dev->header_ops = &macvlan_hard_header_ops; 807 dev->ethtool_ops = &macvlan_ethtool_ops; 808 } 809 EXPORT_SYMBOL_GPL(macvlan_common_setup); 810 811 static void macvlan_setup(struct net_device *dev) 812 { 813 macvlan_common_setup(dev); 814 dev->tx_queue_len = 0; 815 } 816 817 static int macvlan_port_create(struct net_device *dev) 818 { 819 struct macvlan_port *port; 820 unsigned int i; 821 int err; 822 823 if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) 824 return -EINVAL; 825 826 port = kzalloc(sizeof(*port), GFP_KERNEL); 827 if (port == NULL) 828 return -ENOMEM; 829 830 port->passthru = false; 831 port->dev = dev; 832 INIT_LIST_HEAD(&port->vlans); 833 for (i = 0; i < MACVLAN_HASH_SIZE; i++) 834 INIT_HLIST_HEAD(&port->vlan_hash[i]); 835 836 skb_queue_head_init(&port->bc_queue); 837 INIT_WORK(&port->bc_work, macvlan_process_broadcast); 838 839 err = netdev_rx_handler_register(dev, macvlan_handle_frame, port); 840 if (err) 841 kfree(port); 842 else 843 dev->priv_flags |= IFF_MACVLAN_PORT; 844 return err; 845 } 846 847 static void macvlan_port_destroy(struct net_device *dev) 848 { 849 struct macvlan_port *port = macvlan_port_get_rtnl(dev); 850 851 cancel_work_sync(&port->bc_work); 852 dev->priv_flags &= ~IFF_MACVLAN_PORT; 853 netdev_rx_handler_unregister(dev); 854 kfree_rcu(port, rcu); 855 } 856 857 static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) 858 { 859 if (tb[IFLA_ADDRESS]) { 860 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 861 return -EINVAL; 862 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 863 return -EADDRNOTAVAIL; 864 } 865 866 if (data && data[IFLA_MACVLAN_FLAGS] && 867 nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC) 868 return -EINVAL; 869 870 if (data && data[IFLA_MACVLAN_MODE]) { 871 switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { 872 case MACVLAN_MODE_PRIVATE: 873 case MACVLAN_MODE_VEPA: 874 case MACVLAN_MODE_BRIDGE: 875 case MACVLAN_MODE_PASSTHRU: 876 break; 877 default: 878 return -EINVAL; 879 } 880 } 881 return 0; 882 } 883 884 int macvlan_common_newlink(struct net *src_net, struct net_device *dev, 885 struct nlattr *tb[], struct nlattr *data[]) 886 { 887 struct macvlan_dev *vlan = netdev_priv(dev); 888 struct macvlan_port *port; 889 struct net_device *lowerdev; 890 int err; 891 892 if (!tb[IFLA_LINK]) 893 return -EINVAL; 894 895 lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); 896 if (lowerdev == NULL) 897 return -ENODEV; 898 899 /* When creating macvlans or macvtaps on top of other macvlans - use 900 * the real device as the lowerdev. 901 */ 902 if (netif_is_macvlan(lowerdev)) 903 lowerdev = macvlan_dev_real_dev(lowerdev); 904 905 if (!tb[IFLA_MTU]) 906 dev->mtu = lowerdev->mtu; 907 else if (dev->mtu > lowerdev->mtu) 908 return -EINVAL; 909 910 if (!tb[IFLA_ADDRESS]) 911 eth_hw_addr_random(dev); 912 913 if (!macvlan_port_exists(lowerdev)) { 914 err = macvlan_port_create(lowerdev); 915 if (err < 0) 916 return err; 917 } 918 port = macvlan_port_get_rtnl(lowerdev); 919 920 /* Only 1 macvlan device can be created in passthru mode */ 921 if (port->passthru) 922 return -EINVAL; 923 924 vlan->lowerdev = lowerdev; 925 vlan->dev = dev; 926 vlan->port = port; 927 vlan->set_features = MACVLAN_FEATURES; 928 929 vlan->mode = MACVLAN_MODE_VEPA; 930 if (data && data[IFLA_MACVLAN_MODE]) 931 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 932 933 if (data && data[IFLA_MACVLAN_FLAGS]) 934 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 935 936 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 937 if (port->count) 938 return -EINVAL; 939 port->passthru = true; 940 eth_hw_addr_inherit(dev, lowerdev); 941 } 942 943 port->count += 1; 944 err = register_netdevice(dev); 945 if (err < 0) 946 goto destroy_port; 947 948 dev->priv_flags |= IFF_MACVLAN; 949 err = netdev_upper_dev_link(lowerdev, dev); 950 if (err) 951 goto unregister_netdev; 952 953 list_add_tail_rcu(&vlan->list, &port->vlans); 954 netif_stacked_transfer_operstate(lowerdev, dev); 955 956 return 0; 957 958 unregister_netdev: 959 unregister_netdevice(dev); 960 destroy_port: 961 port->count -= 1; 962 if (!port->count) 963 macvlan_port_destroy(lowerdev); 964 965 return err; 966 } 967 EXPORT_SYMBOL_GPL(macvlan_common_newlink); 968 969 static int macvlan_newlink(struct net *src_net, struct net_device *dev, 970 struct nlattr *tb[], struct nlattr *data[]) 971 { 972 return macvlan_common_newlink(src_net, dev, tb, data); 973 } 974 975 void macvlan_dellink(struct net_device *dev, struct list_head *head) 976 { 977 struct macvlan_dev *vlan = netdev_priv(dev); 978 979 list_del_rcu(&vlan->list); 980 unregister_netdevice_queue(dev, head); 981 netdev_upper_dev_unlink(vlan->lowerdev, dev); 982 } 983 EXPORT_SYMBOL_GPL(macvlan_dellink); 984 985 static int macvlan_changelink(struct net_device *dev, 986 struct nlattr *tb[], struct nlattr *data[]) 987 { 988 struct macvlan_dev *vlan = netdev_priv(dev); 989 enum macvlan_mode mode; 990 bool set_mode = false; 991 992 /* Validate mode, but don't set yet: setting flags may fail. */ 993 if (data && data[IFLA_MACVLAN_MODE]) { 994 set_mode = true; 995 mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 996 /* Passthrough mode can't be set or cleared dynamically */ 997 if ((mode == MACVLAN_MODE_PASSTHRU) != 998 (vlan->mode == MACVLAN_MODE_PASSTHRU)) 999 return -EINVAL; 1000 } 1001 1002 if (data && data[IFLA_MACVLAN_FLAGS]) { 1003 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 1004 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; 1005 if (vlan->port->passthru && promisc) { 1006 int err; 1007 1008 if (flags & MACVLAN_FLAG_NOPROMISC) 1009 err = dev_set_promiscuity(vlan->lowerdev, -1); 1010 else 1011 err = dev_set_promiscuity(vlan->lowerdev, 1); 1012 if (err < 0) 1013 return err; 1014 } 1015 vlan->flags = flags; 1016 } 1017 if (set_mode) 1018 vlan->mode = mode; 1019 return 0; 1020 } 1021 1022 static size_t macvlan_get_size(const struct net_device *dev) 1023 { 1024 return (0 1025 + nla_total_size(4) /* IFLA_MACVLAN_MODE */ 1026 + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */ 1027 ); 1028 } 1029 1030 static int macvlan_fill_info(struct sk_buff *skb, 1031 const struct net_device *dev) 1032 { 1033 struct macvlan_dev *vlan = netdev_priv(dev); 1034 1035 if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode)) 1036 goto nla_put_failure; 1037 if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags)) 1038 goto nla_put_failure; 1039 return 0; 1040 1041 nla_put_failure: 1042 return -EMSGSIZE; 1043 } 1044 1045 static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = { 1046 [IFLA_MACVLAN_MODE] = { .type = NLA_U32 }, 1047 [IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 }, 1048 }; 1049 1050 int macvlan_link_register(struct rtnl_link_ops *ops) 1051 { 1052 /* common fields */ 1053 ops->priv_size = sizeof(struct macvlan_dev); 1054 ops->validate = macvlan_validate; 1055 ops->maxtype = IFLA_MACVLAN_MAX; 1056 ops->policy = macvlan_policy; 1057 ops->changelink = macvlan_changelink; 1058 ops->get_size = macvlan_get_size; 1059 ops->fill_info = macvlan_fill_info; 1060 1061 return rtnl_link_register(ops); 1062 }; 1063 EXPORT_SYMBOL_GPL(macvlan_link_register); 1064 1065 static struct rtnl_link_ops macvlan_link_ops = { 1066 .kind = "macvlan", 1067 .setup = macvlan_setup, 1068 .newlink = macvlan_newlink, 1069 .dellink = macvlan_dellink, 1070 }; 1071 1072 static int macvlan_device_event(struct notifier_block *unused, 1073 unsigned long event, void *ptr) 1074 { 1075 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1076 struct macvlan_dev *vlan, *next; 1077 struct macvlan_port *port; 1078 LIST_HEAD(list_kill); 1079 1080 if (!macvlan_port_exists(dev)) 1081 return NOTIFY_DONE; 1082 1083 port = macvlan_port_get_rtnl(dev); 1084 1085 switch (event) { 1086 case NETDEV_CHANGE: 1087 list_for_each_entry(vlan, &port->vlans, list) 1088 netif_stacked_transfer_operstate(vlan->lowerdev, 1089 vlan->dev); 1090 break; 1091 case NETDEV_FEAT_CHANGE: 1092 list_for_each_entry(vlan, &port->vlans, list) { 1093 vlan->dev->gso_max_size = dev->gso_max_size; 1094 netdev_update_features(vlan->dev); 1095 } 1096 break; 1097 case NETDEV_UNREGISTER: 1098 /* twiddle thumbs on netns device moves */ 1099 if (dev->reg_state != NETREG_UNREGISTERING) 1100 break; 1101 1102 list_for_each_entry_safe(vlan, next, &port->vlans, list) 1103 vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill); 1104 unregister_netdevice_many(&list_kill); 1105 list_del(&list_kill); 1106 break; 1107 case NETDEV_PRE_TYPE_CHANGE: 1108 /* Forbid underlaying device to change its type. */ 1109 return NOTIFY_BAD; 1110 } 1111 return NOTIFY_DONE; 1112 } 1113 1114 static struct notifier_block macvlan_notifier_block __read_mostly = { 1115 .notifier_call = macvlan_device_event, 1116 }; 1117 1118 static int __init macvlan_init_module(void) 1119 { 1120 int err; 1121 1122 register_netdevice_notifier(&macvlan_notifier_block); 1123 1124 err = macvlan_link_register(&macvlan_link_ops); 1125 if (err < 0) 1126 goto err1; 1127 return 0; 1128 err1: 1129 unregister_netdevice_notifier(&macvlan_notifier_block); 1130 return err; 1131 } 1132 1133 static void __exit macvlan_cleanup_module(void) 1134 { 1135 rtnl_link_unregister(&macvlan_link_ops); 1136 unregister_netdevice_notifier(&macvlan_notifier_block); 1137 } 1138 1139 module_init(macvlan_init_module); 1140 module_exit(macvlan_cleanup_module); 1141 1142 MODULE_LICENSE("GPL"); 1143 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 1144 MODULE_DESCRIPTION("Driver for MAC address based VLANs"); 1145 MODULE_ALIAS_RTNL_LINK("macvlan"); 1146