1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* -*- linux-c -*- 3 * INET 802.1Q VLAN 4 * Ethernet-type device handling. 5 * 6 * Authors: Ben Greear <greearb@candelatech.com> 7 * Please send support related email to: netdev@vger.kernel.org 8 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html 9 * 10 * Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com> 11 * - reset skb->pkt_type on incoming packets when MAC was changed 12 * - see that changed MAC is saddr for outgoing packets 13 * Oct 20, 2001: Ard van Breeman: 14 * - Fix MC-list, finally. 15 * - Flush MC-list on VLAN destroy. 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include <linux/module.h> 21 #include <linux/slab.h> 22 #include <linux/skbuff.h> 23 #include <linux/netdevice.h> 24 #include <linux/net_tstamp.h> 25 #include <linux/etherdevice.h> 26 #include <linux/ethtool.h> 27 #include <linux/phy.h> 28 #include <net/arp.h> 29 #include <net/macsec.h> 30 #include <net/netdev_lock.h> 31 32 #include "vlan.h" 33 #include "vlanproc.h" 34 #include <linux/if_vlan.h> 35 #include <linux/netpoll.h> 36 37 /* 38 * Create the VLAN header for an arbitrary protocol layer 39 * 40 * saddr=NULL means use device source address 41 * daddr=NULL means leave destination address (eg unresolved arp) 42 * 43 * This is called when the SKB is moving down the stack towards the 44 * physical devices. 45 */ 46 static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, 47 unsigned short type, 48 const void *daddr, const void *saddr, 49 unsigned int len) 50 { 51 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 52 struct vlan_hdr *vhdr; 53 unsigned int vhdrlen = 0; 54 u16 vlan_tci = 0; 55 int rc; 56 57 if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) { 58 vhdr = skb_push(skb, VLAN_HLEN); 59 60 vlan_tci = vlan->vlan_id; 61 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority); 62 vhdr->h_vlan_TCI = htons(vlan_tci); 63 64 /* 65 * Set the protocol type. For a packet of type ETH_P_802_3/2 we 66 * put the length in here instead. 67 */ 68 if (type != ETH_P_802_3 && type != ETH_P_802_2) 69 vhdr->h_vlan_encapsulated_proto = htons(type); 70 else 71 vhdr->h_vlan_encapsulated_proto = htons(len); 72 73 skb->protocol = vlan->vlan_proto; 74 type = ntohs(vlan->vlan_proto); 75 vhdrlen = VLAN_HLEN; 76 } 77 78 /* Before delegating work to the lower layer, enter our MAC-address */ 79 if (saddr == NULL) 80 saddr = dev->dev_addr; 81 82 /* Now make the underlying real hard header */ 83 dev = vlan->real_dev; 84 rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen); 85 if (rc > 0) 86 rc += vhdrlen; 87 return rc; 88 } 89 90 static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb) 91 { 92 #ifdef CONFIG_NET_POLL_CONTROLLER 93 return netpoll_send_skb(vlan->netpoll, skb); 94 #else 95 BUG(); 96 return NETDEV_TX_OK; 97 #endif 98 } 99 100 static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, 101 struct net_device *dev) 102 { 103 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 104 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); 105 unsigned int len; 106 int ret; 107 108 /* Handle non-VLAN frames if they are sent to us, for example by DHCP. 109 * 110 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING 111 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... 112 */ 113 if (vlan->flags & VLAN_FLAG_REORDER_HDR || 114 veth->h_vlan_proto != vlan->vlan_proto) { 115 u16 vlan_tci; 116 vlan_tci = vlan->vlan_id; 117 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority); 118 __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci); 119 } 120 121 skb->dev = vlan->real_dev; 122 len = skb->len; 123 if (unlikely(netpoll_tx_running(dev))) 124 return vlan_netpoll_send_skb(vlan, skb); 125 126 ret = dev_queue_xmit(skb); 127 128 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 129 struct vlan_pcpu_stats *stats; 130 131 stats = this_cpu_ptr(vlan->vlan_pcpu_stats); 132 u64_stats_update_begin(&stats->syncp); 133 u64_stats_inc(&stats->tx_packets); 134 u64_stats_add(&stats->tx_bytes, len); 135 u64_stats_update_end(&stats->syncp); 136 } else { 137 this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped); 138 } 139 140 return ret; 141 } 142 143 static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) 144 { 145 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 146 unsigned int max_mtu = real_dev->mtu; 147 148 if (netif_reduces_vlan_mtu(real_dev)) 149 max_mtu -= VLAN_HLEN; 150 if (max_mtu < new_mtu) 151 return -ERANGE; 152 153 WRITE_ONCE(dev->mtu, new_mtu); 154 155 return 0; 156 } 157 158 void vlan_dev_set_ingress_priority(const struct net_device *dev, 159 u32 skb_prio, u16 vlan_prio) 160 { 161 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 162 163 if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio) 164 vlan->nr_ingress_mappings--; 165 else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio) 166 vlan->nr_ingress_mappings++; 167 168 vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio; 169 } 170 171 int vlan_dev_set_egress_priority(const struct net_device *dev, 172 u32 skb_prio, u16 vlan_prio) 173 { 174 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 175 struct vlan_priority_tci_mapping *mp = NULL; 176 struct vlan_priority_tci_mapping *np; 177 u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; 178 179 /* See if a priority mapping exists.. */ 180 mp = vlan->egress_priority_map[skb_prio & 0xF]; 181 while (mp) { 182 if (mp->priority == skb_prio) { 183 if (mp->vlan_qos && !vlan_qos) 184 vlan->nr_egress_mappings--; 185 else if (!mp->vlan_qos && vlan_qos) 186 vlan->nr_egress_mappings++; 187 mp->vlan_qos = vlan_qos; 188 return 0; 189 } 190 mp = mp->next; 191 } 192 193 /* Create a new mapping then. */ 194 mp = vlan->egress_priority_map[skb_prio & 0xF]; 195 np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL); 196 if (!np) 197 return -ENOBUFS; 198 199 np->next = mp; 200 np->priority = skb_prio; 201 np->vlan_qos = vlan_qos; 202 /* Before inserting this element in hash table, make sure all its fields 203 * are committed to memory. 204 * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask() 205 */ 206 smp_wmb(); 207 vlan->egress_priority_map[skb_prio & 0xF] = np; 208 if (vlan_qos) 209 vlan->nr_egress_mappings++; 210 return 0; 211 } 212 213 /* Flags are defined in the vlan_flags enum in 214 * include/uapi/linux/if_vlan.h file. 215 */ 216 int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask) 217 { 218 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 219 u32 old_flags = vlan->flags; 220 221 if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | 222 VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP | 223 VLAN_FLAG_BRIDGE_BINDING)) 224 return -EINVAL; 225 226 vlan->flags = (old_flags & ~mask) | (flags & mask); 227 228 if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) { 229 if (vlan->flags & VLAN_FLAG_GVRP) 230 vlan_gvrp_request_join(dev); 231 else 232 vlan_gvrp_request_leave(dev); 233 } 234 235 if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_MVRP) { 236 if (vlan->flags & VLAN_FLAG_MVRP) 237 vlan_mvrp_request_join(dev); 238 else 239 vlan_mvrp_request_leave(dev); 240 } 241 return 0; 242 } 243 244 void vlan_dev_get_realdev_name(const struct net_device *dev, char *result, size_t size) 245 { 246 strscpy_pad(result, vlan_dev_priv(dev)->real_dev->name, size); 247 } 248 249 bool vlan_dev_inherit_address(struct net_device *dev, 250 struct net_device *real_dev) 251 { 252 if (dev->addr_assign_type != NET_ADDR_STOLEN) 253 return false; 254 255 eth_hw_addr_set(dev, real_dev->dev_addr); 256 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 257 return true; 258 } 259 260 static int vlan_dev_open(struct net_device *dev) 261 { 262 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 263 struct net_device *real_dev = vlan->real_dev; 264 int err; 265 266 if (!(real_dev->flags & IFF_UP) && 267 !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) 268 return -ENETDOWN; 269 270 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) && 271 !vlan_dev_inherit_address(dev, real_dev)) { 272 err = dev_uc_add(real_dev, dev->dev_addr); 273 if (err < 0) 274 goto out; 275 } 276 277 ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr); 278 279 if (vlan->flags & VLAN_FLAG_GVRP) 280 vlan_gvrp_request_join(dev); 281 282 if (vlan->flags & VLAN_FLAG_MVRP) 283 vlan_mvrp_request_join(dev); 284 285 if (netif_carrier_ok(real_dev) && 286 !(vlan->flags & VLAN_FLAG_BRIDGE_BINDING)) 287 netif_carrier_on(dev); 288 return 0; 289 290 out: 291 netif_carrier_off(dev); 292 return err; 293 } 294 295 static int vlan_dev_stop(struct net_device *dev) 296 { 297 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 298 struct net_device *real_dev = vlan->real_dev; 299 300 dev_mc_unsync(real_dev, dev); 301 dev_uc_unsync(real_dev, dev); 302 303 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) 304 dev_uc_del(real_dev, dev->dev_addr); 305 306 if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING)) 307 netif_carrier_off(dev); 308 return 0; 309 } 310 311 static int vlan_dev_set_mac_address(struct net_device *dev, void *p) 312 { 313 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 314 struct sockaddr *addr = p; 315 int err; 316 317 if (!is_valid_ether_addr(addr->sa_data)) 318 return -EADDRNOTAVAIL; 319 320 if (!(dev->flags & IFF_UP)) 321 goto out; 322 323 if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) { 324 err = dev_uc_add(real_dev, addr->sa_data); 325 if (err < 0) 326 return err; 327 } 328 329 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) 330 dev_uc_del(real_dev, dev->dev_addr); 331 332 out: 333 eth_hw_addr_set(dev, addr->sa_data); 334 return 0; 335 } 336 337 static int vlan_hwtstamp_get(struct net_device *dev, 338 struct kernel_hwtstamp_config *cfg) 339 { 340 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 341 342 return generic_hwtstamp_get_lower(real_dev, cfg); 343 } 344 345 static int vlan_hwtstamp_set(struct net_device *dev, 346 struct kernel_hwtstamp_config *cfg, 347 struct netlink_ext_ack *extack) 348 { 349 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 350 351 if (!net_eq(dev_net(dev), dev_net(real_dev))) 352 return -EOPNOTSUPP; 353 354 return generic_hwtstamp_set_lower(real_dev, cfg, extack); 355 } 356 357 static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 358 { 359 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 360 struct ifreq ifrr; 361 int err = -EOPNOTSUPP; 362 363 strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ); 364 ifrr.ifr_ifru = ifr->ifr_ifru; 365 366 switch (cmd) { 367 case SIOCGMIIPHY: 368 case SIOCGMIIREG: 369 case SIOCSMIIREG: 370 err = dev_eth_ioctl(real_dev, &ifrr, cmd); 371 break; 372 } 373 374 if (!err) 375 ifr->ifr_ifru = ifrr.ifr_ifru; 376 377 return err; 378 } 379 380 static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa) 381 { 382 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 383 const struct net_device_ops *ops = real_dev->netdev_ops; 384 int err = 0; 385 386 if (netif_device_present(real_dev) && ops->ndo_neigh_setup) 387 err = ops->ndo_neigh_setup(real_dev, pa); 388 389 return err; 390 } 391 392 #if IS_ENABLED(CONFIG_FCOE) 393 static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid, 394 struct scatterlist *sgl, unsigned int sgc) 395 { 396 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 397 const struct net_device_ops *ops = real_dev->netdev_ops; 398 int rc = 0; 399 400 if (ops->ndo_fcoe_ddp_setup) 401 rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc); 402 403 return rc; 404 } 405 406 static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid) 407 { 408 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 409 const struct net_device_ops *ops = real_dev->netdev_ops; 410 int len = 0; 411 412 if (ops->ndo_fcoe_ddp_done) 413 len = ops->ndo_fcoe_ddp_done(real_dev, xid); 414 415 return len; 416 } 417 418 static int vlan_dev_fcoe_enable(struct net_device *dev) 419 { 420 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 421 const struct net_device_ops *ops = real_dev->netdev_ops; 422 int rc = -EINVAL; 423 424 if (ops->ndo_fcoe_enable) 425 rc = ops->ndo_fcoe_enable(real_dev); 426 return rc; 427 } 428 429 static int vlan_dev_fcoe_disable(struct net_device *dev) 430 { 431 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 432 const struct net_device_ops *ops = real_dev->netdev_ops; 433 int rc = -EINVAL; 434 435 if (ops->ndo_fcoe_disable) 436 rc = ops->ndo_fcoe_disable(real_dev); 437 return rc; 438 } 439 440 static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid, 441 struct scatterlist *sgl, unsigned int sgc) 442 { 443 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 444 const struct net_device_ops *ops = real_dev->netdev_ops; 445 int rc = 0; 446 447 if (ops->ndo_fcoe_ddp_target) 448 rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc); 449 450 return rc; 451 } 452 #endif 453 454 #ifdef NETDEV_FCOE_WWNN 455 static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) 456 { 457 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 458 const struct net_device_ops *ops = real_dev->netdev_ops; 459 int rc = -EINVAL; 460 461 if (ops->ndo_fcoe_get_wwn) 462 rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type); 463 return rc; 464 } 465 #endif 466 467 static void vlan_dev_change_rx_flags(struct net_device *dev, int change) 468 { 469 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 470 471 if (change & IFF_ALLMULTI) 472 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 473 if (change & IFF_PROMISC) 474 dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); 475 } 476 477 static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) 478 { 479 dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); 480 dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); 481 } 482 483 static __be16 vlan_parse_protocol(const struct sk_buff *skb) 484 { 485 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); 486 487 return __vlan_get_protocol(skb, veth->h_vlan_proto, NULL); 488 } 489 490 static const struct header_ops vlan_header_ops = { 491 .create = vlan_dev_hard_header, 492 .parse = eth_header_parse, 493 .parse_protocol = vlan_parse_protocol, 494 }; 495 496 static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev, 497 unsigned short type, 498 const void *daddr, const void *saddr, 499 unsigned int len) 500 { 501 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 502 struct net_device *real_dev = vlan->real_dev; 503 504 if (saddr == NULL) 505 saddr = dev->dev_addr; 506 507 return dev_hard_header(skb, real_dev, type, daddr, saddr, len); 508 } 509 510 static const struct header_ops vlan_passthru_header_ops = { 511 .create = vlan_passthru_hard_header, 512 .parse = eth_header_parse, 513 .parse_protocol = vlan_parse_protocol, 514 }; 515 516 static const struct device_type vlan_type = { 517 .name = "vlan", 518 }; 519 520 static const struct net_device_ops vlan_netdev_ops; 521 522 static int vlan_dev_init(struct net_device *dev) 523 { 524 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 525 struct net_device *real_dev = vlan->real_dev; 526 527 netif_carrier_off(dev); 528 529 /* IFF_BROADCAST|IFF_MULTICAST; ??? */ 530 dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | 531 IFF_MASTER | IFF_SLAVE); 532 dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | 533 (1<<__LINK_STATE_DORMANT))) | 534 (1<<__LINK_STATE_PRESENT); 535 536 if (vlan->flags & VLAN_FLAG_BRIDGE_BINDING) 537 dev->state |= (1 << __LINK_STATE_NOCARRIER); 538 539 dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | 540 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | 541 NETIF_F_GSO_ENCAP_ALL | 542 NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC | 543 NETIF_F_FCOE_CRC | NETIF_F_FSO; 544 545 if (real_dev->vlan_features & NETIF_F_HW_MACSEC) 546 dev->hw_features |= NETIF_F_HW_MACSEC; 547 548 dev->features |= dev->hw_features; 549 dev->lltx = true; 550 dev->fcoe_mtu = true; 551 netif_inherit_tso_max(dev, real_dev); 552 if (dev->features & NETIF_F_VLAN_FEATURES) 553 netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n"); 554 555 dev->vlan_features = real_dev->vlan_features & 556 ~(NETIF_F_FCOE_CRC | NETIF_F_FSO); 557 dev->hw_enc_features = vlan_tnl_features(real_dev); 558 dev->mpls_features = real_dev->mpls_features; 559 560 /* ipv6 shared card related stuff */ 561 dev->dev_id = real_dev->dev_id; 562 563 if (is_zero_ether_addr(dev->dev_addr)) { 564 eth_hw_addr_set(dev, real_dev->dev_addr); 565 dev->addr_assign_type = NET_ADDR_STOLEN; 566 } 567 if (is_zero_ether_addr(dev->broadcast)) 568 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 569 570 #if IS_ENABLED(CONFIG_FCOE) 571 dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid; 572 #endif 573 574 dev->needed_headroom = real_dev->needed_headroom; 575 if (vlan_hw_offload_capable(real_dev->features, vlan->vlan_proto)) { 576 dev->header_ops = &vlan_passthru_header_ops; 577 dev->hard_header_len = real_dev->hard_header_len; 578 } else { 579 dev->header_ops = &vlan_header_ops; 580 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; 581 } 582 583 dev->netdev_ops = &vlan_netdev_ops; 584 585 SET_NETDEV_DEVTYPE(dev, &vlan_type); 586 587 netdev_lockdep_set_classes(dev); 588 589 vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); 590 if (!vlan->vlan_pcpu_stats) 591 return -ENOMEM; 592 593 /* Get vlan's reference to real_dev */ 594 netdev_hold(real_dev, &vlan->dev_tracker, GFP_KERNEL); 595 596 return 0; 597 } 598 599 /* Note: this function might be called multiple times for the same device. */ 600 void vlan_dev_free_egress_priority(const struct net_device *dev) 601 { 602 struct vlan_priority_tci_mapping *pm; 603 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 604 int i; 605 606 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { 607 while ((pm = vlan->egress_priority_map[i]) != NULL) { 608 vlan->egress_priority_map[i] = pm->next; 609 kfree(pm); 610 } 611 } 612 } 613 614 static void vlan_dev_uninit(struct net_device *dev) 615 { 616 vlan_dev_free_egress_priority(dev); 617 } 618 619 static netdev_features_t vlan_dev_fix_features(struct net_device *dev, 620 netdev_features_t features) 621 { 622 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 623 netdev_features_t old_features = features; 624 netdev_features_t lower_features; 625 626 lower_features = netdev_intersect_features((real_dev->vlan_features | 627 NETIF_F_RXCSUM), 628 real_dev->features); 629 630 /* Add HW_CSUM setting to preserve user ability to control 631 * checksum offload on the vlan device. 632 */ 633 if (lower_features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) 634 lower_features |= NETIF_F_HW_CSUM; 635 features = netdev_intersect_features(features, lower_features); 636 features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE); 637 638 return features; 639 } 640 641 static int vlan_ethtool_get_link_ksettings(struct net_device *dev, 642 struct ethtool_link_ksettings *cmd) 643 { 644 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 645 646 return __ethtool_get_link_ksettings(vlan->real_dev, cmd); 647 } 648 649 static void vlan_ethtool_get_drvinfo(struct net_device *dev, 650 struct ethtool_drvinfo *info) 651 { 652 strscpy(info->driver, vlan_fullname, sizeof(info->driver)); 653 strscpy(info->version, vlan_version, sizeof(info->version)); 654 strscpy(info->fw_version, "N/A", sizeof(info->fw_version)); 655 } 656 657 static int vlan_ethtool_get_ts_info(struct net_device *dev, 658 struct kernel_ethtool_ts_info *info) 659 { 660 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 661 return ethtool_get_ts_info_by_layer(vlan->real_dev, info); 662 } 663 664 static void vlan_dev_get_stats64(struct net_device *dev, 665 struct rtnl_link_stats64 *stats) 666 { 667 struct vlan_pcpu_stats *p; 668 u32 rx_errors = 0, tx_dropped = 0; 669 int i; 670 671 for_each_possible_cpu(i) { 672 u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes; 673 unsigned int start; 674 675 p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); 676 do { 677 start = u64_stats_fetch_begin(&p->syncp); 678 rxpackets = u64_stats_read(&p->rx_packets); 679 rxbytes = u64_stats_read(&p->rx_bytes); 680 rxmulticast = u64_stats_read(&p->rx_multicast); 681 txpackets = u64_stats_read(&p->tx_packets); 682 txbytes = u64_stats_read(&p->tx_bytes); 683 } while (u64_stats_fetch_retry(&p->syncp, start)); 684 685 stats->rx_packets += rxpackets; 686 stats->rx_bytes += rxbytes; 687 stats->multicast += rxmulticast; 688 stats->tx_packets += txpackets; 689 stats->tx_bytes += txbytes; 690 /* rx_errors & tx_dropped are u32 */ 691 rx_errors += READ_ONCE(p->rx_errors); 692 tx_dropped += READ_ONCE(p->tx_dropped); 693 } 694 stats->rx_errors = rx_errors; 695 stats->tx_dropped = tx_dropped; 696 } 697 698 #ifdef CONFIG_NET_POLL_CONTROLLER 699 static void vlan_dev_poll_controller(struct net_device *dev) 700 { 701 return; 702 } 703 704 static int vlan_dev_netpoll_setup(struct net_device *dev) 705 { 706 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 707 struct net_device *real_dev = vlan->real_dev; 708 struct netpoll *netpoll; 709 int err = 0; 710 711 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); 712 err = -ENOMEM; 713 if (!netpoll) 714 goto out; 715 716 err = __netpoll_setup(netpoll, real_dev); 717 if (err) { 718 kfree(netpoll); 719 goto out; 720 } 721 722 vlan->netpoll = netpoll; 723 724 out: 725 return err; 726 } 727 728 static void vlan_dev_netpoll_cleanup(struct net_device *dev) 729 { 730 struct vlan_dev_priv *vlan= vlan_dev_priv(dev); 731 struct netpoll *netpoll = vlan->netpoll; 732 733 if (!netpoll) 734 return; 735 736 vlan->netpoll = NULL; 737 __netpoll_free(netpoll); 738 } 739 #endif /* CONFIG_NET_POLL_CONTROLLER */ 740 741 static int vlan_dev_get_iflink(const struct net_device *dev) 742 { 743 const struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 744 745 return READ_ONCE(real_dev->ifindex); 746 } 747 748 static int vlan_dev_fill_forward_path(struct net_device_path_ctx *ctx, 749 struct net_device_path *path) 750 { 751 struct vlan_dev_priv *vlan = vlan_dev_priv(ctx->dev); 752 753 path->type = DEV_PATH_VLAN; 754 path->encap.id = vlan->vlan_id; 755 path->encap.proto = vlan->vlan_proto; 756 path->dev = ctx->dev; 757 ctx->dev = vlan->real_dev; 758 if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan)) 759 return -ENOSPC; 760 761 ctx->vlan[ctx->num_vlans].id = vlan->vlan_id; 762 ctx->vlan[ctx->num_vlans].proto = vlan->vlan_proto; 763 ctx->num_vlans++; 764 765 return 0; 766 } 767 768 #if IS_ENABLED(CONFIG_MACSEC) 769 770 static const struct macsec_ops *vlan_get_macsec_ops(const struct macsec_context *ctx) 771 { 772 return vlan_dev_priv(ctx->netdev)->real_dev->macsec_ops; 773 } 774 775 static int vlan_macsec_offload(int (* const func)(struct macsec_context *), 776 struct macsec_context *ctx) 777 { 778 if (unlikely(!func)) 779 return 0; 780 781 return (*func)(ctx); 782 } 783 784 static int vlan_macsec_dev_open(struct macsec_context *ctx) 785 { 786 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 787 788 if (!ops) 789 return -EOPNOTSUPP; 790 791 return vlan_macsec_offload(ops->mdo_dev_open, ctx); 792 } 793 794 static int vlan_macsec_dev_stop(struct macsec_context *ctx) 795 { 796 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 797 798 if (!ops) 799 return -EOPNOTSUPP; 800 801 return vlan_macsec_offload(ops->mdo_dev_stop, ctx); 802 } 803 804 static int vlan_macsec_add_secy(struct macsec_context *ctx) 805 { 806 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 807 808 if (!ops) 809 return -EOPNOTSUPP; 810 811 return vlan_macsec_offload(ops->mdo_add_secy, ctx); 812 } 813 814 static int vlan_macsec_upd_secy(struct macsec_context *ctx) 815 { 816 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 817 818 if (!ops) 819 return -EOPNOTSUPP; 820 821 return vlan_macsec_offload(ops->mdo_upd_secy, ctx); 822 } 823 824 static int vlan_macsec_del_secy(struct macsec_context *ctx) 825 { 826 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 827 828 if (!ops) 829 return -EOPNOTSUPP; 830 831 return vlan_macsec_offload(ops->mdo_del_secy, ctx); 832 } 833 834 static int vlan_macsec_add_rxsc(struct macsec_context *ctx) 835 { 836 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 837 838 if (!ops) 839 return -EOPNOTSUPP; 840 841 return vlan_macsec_offload(ops->mdo_add_rxsc, ctx); 842 } 843 844 static int vlan_macsec_upd_rxsc(struct macsec_context *ctx) 845 { 846 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 847 848 if (!ops) 849 return -EOPNOTSUPP; 850 851 return vlan_macsec_offload(ops->mdo_upd_rxsc, ctx); 852 } 853 854 static int vlan_macsec_del_rxsc(struct macsec_context *ctx) 855 { 856 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 857 858 if (!ops) 859 return -EOPNOTSUPP; 860 861 return vlan_macsec_offload(ops->mdo_del_rxsc, ctx); 862 } 863 864 static int vlan_macsec_add_rxsa(struct macsec_context *ctx) 865 { 866 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 867 868 if (!ops) 869 return -EOPNOTSUPP; 870 871 return vlan_macsec_offload(ops->mdo_add_rxsa, ctx); 872 } 873 874 static int vlan_macsec_upd_rxsa(struct macsec_context *ctx) 875 { 876 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 877 878 if (!ops) 879 return -EOPNOTSUPP; 880 881 return vlan_macsec_offload(ops->mdo_upd_rxsa, ctx); 882 } 883 884 static int vlan_macsec_del_rxsa(struct macsec_context *ctx) 885 { 886 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 887 888 if (!ops) 889 return -EOPNOTSUPP; 890 891 return vlan_macsec_offload(ops->mdo_del_rxsa, ctx); 892 } 893 894 static int vlan_macsec_add_txsa(struct macsec_context *ctx) 895 { 896 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 897 898 if (!ops) 899 return -EOPNOTSUPP; 900 901 return vlan_macsec_offload(ops->mdo_add_txsa, ctx); 902 } 903 904 static int vlan_macsec_upd_txsa(struct macsec_context *ctx) 905 { 906 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 907 908 if (!ops) 909 return -EOPNOTSUPP; 910 911 return vlan_macsec_offload(ops->mdo_upd_txsa, ctx); 912 } 913 914 static int vlan_macsec_del_txsa(struct macsec_context *ctx) 915 { 916 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 917 918 if (!ops) 919 return -EOPNOTSUPP; 920 921 return vlan_macsec_offload(ops->mdo_del_txsa, ctx); 922 } 923 924 static int vlan_macsec_get_dev_stats(struct macsec_context *ctx) 925 { 926 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 927 928 if (!ops) 929 return -EOPNOTSUPP; 930 931 return vlan_macsec_offload(ops->mdo_get_dev_stats, ctx); 932 } 933 934 static int vlan_macsec_get_tx_sc_stats(struct macsec_context *ctx) 935 { 936 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 937 938 if (!ops) 939 return -EOPNOTSUPP; 940 941 return vlan_macsec_offload(ops->mdo_get_tx_sc_stats, ctx); 942 } 943 944 static int vlan_macsec_get_tx_sa_stats(struct macsec_context *ctx) 945 { 946 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 947 948 if (!ops) 949 return -EOPNOTSUPP; 950 951 return vlan_macsec_offload(ops->mdo_get_tx_sa_stats, ctx); 952 } 953 954 static int vlan_macsec_get_rx_sc_stats(struct macsec_context *ctx) 955 { 956 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 957 958 if (!ops) 959 return -EOPNOTSUPP; 960 961 return vlan_macsec_offload(ops->mdo_get_rx_sc_stats, ctx); 962 } 963 964 static int vlan_macsec_get_rx_sa_stats(struct macsec_context *ctx) 965 { 966 const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); 967 968 if (!ops) 969 return -EOPNOTSUPP; 970 971 return vlan_macsec_offload(ops->mdo_get_rx_sa_stats, ctx); 972 } 973 974 static const struct macsec_ops macsec_offload_ops = { 975 /* Device wide */ 976 .mdo_dev_open = vlan_macsec_dev_open, 977 .mdo_dev_stop = vlan_macsec_dev_stop, 978 /* SecY */ 979 .mdo_add_secy = vlan_macsec_add_secy, 980 .mdo_upd_secy = vlan_macsec_upd_secy, 981 .mdo_del_secy = vlan_macsec_del_secy, 982 /* Security channels */ 983 .mdo_add_rxsc = vlan_macsec_add_rxsc, 984 .mdo_upd_rxsc = vlan_macsec_upd_rxsc, 985 .mdo_del_rxsc = vlan_macsec_del_rxsc, 986 /* Security associations */ 987 .mdo_add_rxsa = vlan_macsec_add_rxsa, 988 .mdo_upd_rxsa = vlan_macsec_upd_rxsa, 989 .mdo_del_rxsa = vlan_macsec_del_rxsa, 990 .mdo_add_txsa = vlan_macsec_add_txsa, 991 .mdo_upd_txsa = vlan_macsec_upd_txsa, 992 .mdo_del_txsa = vlan_macsec_del_txsa, 993 /* Statistics */ 994 .mdo_get_dev_stats = vlan_macsec_get_dev_stats, 995 .mdo_get_tx_sc_stats = vlan_macsec_get_tx_sc_stats, 996 .mdo_get_tx_sa_stats = vlan_macsec_get_tx_sa_stats, 997 .mdo_get_rx_sc_stats = vlan_macsec_get_rx_sc_stats, 998 .mdo_get_rx_sa_stats = vlan_macsec_get_rx_sa_stats, 999 }; 1000 1001 #endif 1002 1003 static const struct ethtool_ops vlan_ethtool_ops = { 1004 .get_link_ksettings = vlan_ethtool_get_link_ksettings, 1005 .get_drvinfo = vlan_ethtool_get_drvinfo, 1006 .get_link = ethtool_op_get_link, 1007 .get_ts_info = vlan_ethtool_get_ts_info, 1008 }; 1009 1010 static const struct net_device_ops vlan_netdev_ops = { 1011 .ndo_change_mtu = vlan_dev_change_mtu, 1012 .ndo_init = vlan_dev_init, 1013 .ndo_uninit = vlan_dev_uninit, 1014 .ndo_open = vlan_dev_open, 1015 .ndo_stop = vlan_dev_stop, 1016 .ndo_start_xmit = vlan_dev_hard_start_xmit, 1017 .ndo_validate_addr = eth_validate_addr, 1018 .ndo_set_mac_address = vlan_dev_set_mac_address, 1019 .ndo_set_rx_mode = vlan_dev_set_rx_mode, 1020 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 1021 .ndo_eth_ioctl = vlan_dev_ioctl, 1022 .ndo_neigh_setup = vlan_dev_neigh_setup, 1023 .ndo_get_stats64 = vlan_dev_get_stats64, 1024 #if IS_ENABLED(CONFIG_FCOE) 1025 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, 1026 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, 1027 .ndo_fcoe_enable = vlan_dev_fcoe_enable, 1028 .ndo_fcoe_disable = vlan_dev_fcoe_disable, 1029 .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target, 1030 #endif 1031 #ifdef NETDEV_FCOE_WWNN 1032 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, 1033 #endif 1034 #ifdef CONFIG_NET_POLL_CONTROLLER 1035 .ndo_poll_controller = vlan_dev_poll_controller, 1036 .ndo_netpoll_setup = vlan_dev_netpoll_setup, 1037 .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, 1038 #endif 1039 .ndo_fix_features = vlan_dev_fix_features, 1040 .ndo_get_iflink = vlan_dev_get_iflink, 1041 .ndo_fill_forward_path = vlan_dev_fill_forward_path, 1042 .ndo_hwtstamp_get = vlan_hwtstamp_get, 1043 .ndo_hwtstamp_set = vlan_hwtstamp_set, 1044 }; 1045 1046 static void vlan_dev_free(struct net_device *dev) 1047 { 1048 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 1049 1050 free_percpu(vlan->vlan_pcpu_stats); 1051 vlan->vlan_pcpu_stats = NULL; 1052 1053 /* Get rid of the vlan's reference to real_dev */ 1054 netdev_put(vlan->real_dev, &vlan->dev_tracker); 1055 } 1056 1057 void vlan_setup(struct net_device *dev) 1058 { 1059 ether_setup(dev); 1060 1061 dev->priv_flags |= IFF_802_1Q_VLAN | IFF_NO_QUEUE; 1062 dev->priv_flags |= IFF_UNICAST_FLT; 1063 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1064 netif_keep_dst(dev); 1065 1066 dev->netdev_ops = &vlan_netdev_ops; 1067 dev->needs_free_netdev = true; 1068 dev->priv_destructor = vlan_dev_free; 1069 dev->ethtool_ops = &vlan_ethtool_ops; 1070 1071 #if IS_ENABLED(CONFIG_MACSEC) 1072 dev->macsec_ops = &macsec_offload_ops; 1073 #endif 1074 dev->min_mtu = 0; 1075 dev->max_mtu = ETH_MAX_MTU; 1076 1077 eth_zero_addr(dev->broadcast); 1078 } 1079