1 #include <linux/skbuff.h> 2 #include <linux/netdevice.h> 3 #include <linux/if_vlan.h> 4 #include <linux/netpoll.h> 5 #include "vlan.h" 6 7 /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 8 int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 9 u16 vlan_tci, int polling) 10 { 11 struct net_device *vlan_dev; 12 u16 vlan_id; 13 14 if (netpoll_rx(skb)) 15 return NET_RX_DROP; 16 17 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 18 skb->deliver_no_wcard = 1; 19 20 skb->skb_iif = skb->dev->ifindex; 21 __vlan_hwaccel_put_tag(skb, vlan_tci); 22 vlan_id = vlan_tci & VLAN_VID_MASK; 23 vlan_dev = vlan_group_get_device(grp, vlan_id); 24 25 if (vlan_dev) 26 skb->dev = vlan_dev; 27 else if (vlan_id) 28 goto drop; 29 30 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 31 32 drop: 33 dev_kfree_skb_any(skb); 34 return NET_RX_DROP; 35 } 36 EXPORT_SYMBOL(__vlan_hwaccel_rx); 37 38 int vlan_hwaccel_do_receive(struct sk_buff *skb) 39 { 40 struct net_device *dev = skb->dev; 41 struct vlan_rx_stats *rx_stats; 42 43 skb->dev = vlan_dev_info(dev)->real_dev; 44 netif_nit_deliver(skb); 45 46 skb->dev = dev; 47 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); 48 skb->vlan_tci = 0; 49 50 rx_stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats); 51 52 u64_stats_update_begin(&rx_stats->syncp); 53 rx_stats->rx_packets++; 54 rx_stats->rx_bytes += skb->len; 55 56 switch (skb->pkt_type) { 57 case PACKET_BROADCAST: 58 break; 59 case PACKET_MULTICAST: 60 rx_stats->rx_multicast++; 61 break; 62 case PACKET_OTHERHOST: 63 /* Our lower layer thinks this is not local, let's make sure. 64 * This allows the VLAN to have a different MAC than the 65 * underlying device, and still route correctly. */ 66 if (!compare_ether_addr(eth_hdr(skb)->h_dest, 67 dev->dev_addr)) 68 skb->pkt_type = PACKET_HOST; 69 break; 70 } 71 u64_stats_update_end(&rx_stats->syncp); 72 return 0; 73 } 74 75 struct net_device *vlan_dev_real_dev(const struct net_device *dev) 76 { 77 return vlan_dev_info(dev)->real_dev; 78 } 79 EXPORT_SYMBOL(vlan_dev_real_dev); 80 81 u16 vlan_dev_vlan_id(const struct net_device *dev) 82 { 83 return vlan_dev_info(dev)->vlan_id; 84 } 85 EXPORT_SYMBOL(vlan_dev_vlan_id); 86 87 static gro_result_t 88 vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, 89 unsigned int vlan_tci, struct sk_buff *skb) 90 { 91 struct sk_buff *p; 92 struct net_device *vlan_dev; 93 u16 vlan_id; 94 95 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 96 skb->deliver_no_wcard = 1; 97 98 skb->skb_iif = skb->dev->ifindex; 99 __vlan_hwaccel_put_tag(skb, vlan_tci); 100 vlan_id = vlan_tci & VLAN_VID_MASK; 101 vlan_dev = vlan_group_get_device(grp, vlan_id); 102 103 if (vlan_dev) 104 skb->dev = vlan_dev; 105 else if (vlan_id) 106 goto drop; 107 108 for (p = napi->gro_list; p; p = p->next) { 109 NAPI_GRO_CB(p)->same_flow = 110 p->dev == skb->dev && !compare_ether_header( 111 skb_mac_header(p), skb_gro_mac_header(skb)); 112 NAPI_GRO_CB(p)->flush = 0; 113 } 114 115 return dev_gro_receive(napi, skb); 116 117 drop: 118 return GRO_DROP; 119 } 120 121 gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, 122 unsigned int vlan_tci, struct sk_buff *skb) 123 { 124 if (netpoll_rx_on(skb)) 125 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) 126 ? GRO_DROP : GRO_NORMAL; 127 128 skb_gro_reset_offset(skb); 129 130 return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb); 131 } 132 EXPORT_SYMBOL(vlan_gro_receive); 133 134 gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, 135 unsigned int vlan_tci) 136 { 137 struct sk_buff *skb = napi_frags_skb(napi); 138 139 if (!skb) 140 return GRO_DROP; 141 142 if (netpoll_rx_on(skb)) { 143 skb->protocol = eth_type_trans(skb, skb->dev); 144 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) 145 ? GRO_DROP : GRO_NORMAL; 146 } 147 148 return napi_frags_finish(napi, skb, 149 vlan_gro_common(napi, grp, vlan_tci, skb)); 150 } 151 EXPORT_SYMBOL(vlan_gro_frags); 152