1 /* 2 * VLAN An implementation of 802.1Q VLAN tagging. 3 * 4 * Authors: Ben Greear <greearb@candelatech.com> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 */ 12 #ifndef _LINUX_IF_VLAN_H_ 13 #define _LINUX_IF_VLAN_H_ 14 15 #include <linux/netdevice.h> 16 #include <linux/etherdevice.h> 17 #include <linux/rtnetlink.h> 18 #include <linux/bug.h> 19 #include <uapi/linux/if_vlan.h> 20 21 #define VLAN_HLEN 4 /* The additional bytes required by VLAN 22 * (in addition to the Ethernet header) 23 */ 24 #define VLAN_ETH_HLEN 18 /* Total octets in header. */ 25 #define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */ 26 27 /* 28 * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan 29 */ 30 #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ 31 #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ 32 33 /* 34 * struct vlan_hdr - vlan header 35 * @h_vlan_TCI: priority and VLAN ID 36 * @h_vlan_encapsulated_proto: packet type ID or len 37 */ 38 struct vlan_hdr { 39 __be16 h_vlan_TCI; 40 __be16 h_vlan_encapsulated_proto; 41 }; 42 43 /** 44 * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) 45 * @h_dest: destination ethernet address 46 * @h_source: source ethernet address 47 * @h_vlan_proto: ethernet protocol 48 * @h_vlan_TCI: priority and VLAN ID 49 * @h_vlan_encapsulated_proto: packet type ID or len 50 */ 51 struct vlan_ethhdr { 52 unsigned char h_dest[ETH_ALEN]; 53 unsigned char h_source[ETH_ALEN]; 54 __be16 h_vlan_proto; 55 __be16 h_vlan_TCI; 56 __be16 h_vlan_encapsulated_proto; 57 }; 58 59 #include <linux/skbuff.h> 60 61 static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) 62 { 63 return (struct vlan_ethhdr *)skb_mac_header(skb); 64 } 65 66 #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ 67 #define VLAN_PRIO_SHIFT 13 68 #define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ 69 #define VLAN_TAG_PRESENT VLAN_CFI_MASK 70 #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ 71 #define VLAN_N_VID 4096 72 73 /* found in socket.c */ 74 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); 75 76 static inline int is_vlan_dev(struct net_device *dev) 77 { 78 return dev->priv_flags & IFF_802_1Q_VLAN; 79 } 80 81 #define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) 82 #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) 83 #define vlan_tx_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) 84 85 /** 86 * struct vlan_pcpu_stats - VLAN percpu rx/tx stats 87 * @rx_packets: number of received packets 88 * @rx_bytes: number of received bytes 89 * @rx_multicast: number of received multicast packets 90 * @tx_packets: number of transmitted packets 91 * @tx_bytes: number of transmitted bytes 92 * @syncp: synchronization point for 64bit counters 93 * @rx_errors: number of rx errors 94 * @tx_dropped: number of tx drops 95 */ 96 struct vlan_pcpu_stats { 97 u64 rx_packets; 98 u64 rx_bytes; 99 u64 rx_multicast; 100 u64 tx_packets; 101 u64 tx_bytes; 102 struct u64_stats_sync syncp; 103 u32 rx_errors; 104 u32 tx_dropped; 105 }; 106 107 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 108 109 extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, 110 __be16 vlan_proto, u16 vlan_id); 111 extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); 112 extern u16 vlan_dev_vlan_id(const struct net_device *dev); 113 114 /** 115 * struct vlan_priority_tci_mapping - vlan egress priority mappings 116 * @priority: skb priority 117 * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000 118 * @next: pointer to next struct 119 */ 120 struct vlan_priority_tci_mapping { 121 u32 priority; 122 u16 vlan_qos; 123 struct vlan_priority_tci_mapping *next; 124 }; 125 126 struct proc_dir_entry; 127 struct netpoll; 128 129 /** 130 * struct vlan_dev_priv - VLAN private device data 131 * @nr_ingress_mappings: number of ingress priority mappings 132 * @ingress_priority_map: ingress priority mappings 133 * @nr_egress_mappings: number of egress priority mappings 134 * @egress_priority_map: hash of egress priority mappings 135 * @vlan_proto: VLAN encapsulation protocol 136 * @vlan_id: VLAN identifier 137 * @flags: device flags 138 * @real_dev: underlying netdevice 139 * @real_dev_addr: address of underlying netdevice 140 * @dent: proc dir entry 141 * @vlan_pcpu_stats: ptr to percpu rx stats 142 */ 143 struct vlan_dev_priv { 144 unsigned int nr_ingress_mappings; 145 u32 ingress_priority_map[8]; 146 unsigned int nr_egress_mappings; 147 struct vlan_priority_tci_mapping *egress_priority_map[16]; 148 149 __be16 vlan_proto; 150 u16 vlan_id; 151 u16 flags; 152 153 struct net_device *real_dev; 154 unsigned char real_dev_addr[ETH_ALEN]; 155 156 struct proc_dir_entry *dent; 157 struct vlan_pcpu_stats __percpu *vlan_pcpu_stats; 158 #ifdef CONFIG_NET_POLL_CONTROLLER 159 struct netpoll *netpoll; 160 #endif 161 }; 162 163 static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) 164 { 165 return netdev_priv(dev); 166 } 167 168 static inline u16 169 vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio) 170 { 171 struct vlan_priority_tci_mapping *mp; 172 173 smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */ 174 175 mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)]; 176 while (mp) { 177 if (mp->priority == skprio) { 178 return mp->vlan_qos; /* This should already be shifted 179 * to mask correctly with the 180 * VLAN's TCI */ 181 } 182 mp = mp->next; 183 } 184 return 0; 185 } 186 187 extern bool vlan_do_receive(struct sk_buff **skb); 188 extern struct sk_buff *vlan_untag(struct sk_buff *skb); 189 190 extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid); 191 extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid); 192 193 extern int vlan_vids_add_by_dev(struct net_device *dev, 194 const struct net_device *by_dev); 195 extern void vlan_vids_del_by_dev(struct net_device *dev, 196 const struct net_device *by_dev); 197 198 extern bool vlan_uses_dev(const struct net_device *dev); 199 #else 200 static inline struct net_device * 201 __vlan_find_dev_deep(struct net_device *real_dev, 202 __be16 vlan_proto, u16 vlan_id) 203 { 204 return NULL; 205 } 206 207 static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) 208 { 209 BUG(); 210 return NULL; 211 } 212 213 static inline u16 vlan_dev_vlan_id(const struct net_device *dev) 214 { 215 BUG(); 216 return 0; 217 } 218 219 static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev, 220 u32 skprio) 221 { 222 return 0; 223 } 224 225 static inline bool vlan_do_receive(struct sk_buff **skb) 226 { 227 return false; 228 } 229 230 static inline struct sk_buff *vlan_untag(struct sk_buff *skb) 231 { 232 return skb; 233 } 234 235 static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) 236 { 237 return 0; 238 } 239 240 static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) 241 { 242 } 243 244 static inline int vlan_vids_add_by_dev(struct net_device *dev, 245 const struct net_device *by_dev) 246 { 247 return 0; 248 } 249 250 static inline void vlan_vids_del_by_dev(struct net_device *dev, 251 const struct net_device *by_dev) 252 { 253 } 254 255 static inline bool vlan_uses_dev(const struct net_device *dev) 256 { 257 return false; 258 } 259 #endif 260 261 static inline bool vlan_hw_offload_capable(netdev_features_t features, 262 __be16 proto) 263 { 264 if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX) 265 return true; 266 if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX) 267 return true; 268 return false; 269 } 270 271 /** 272 * vlan_insert_tag - regular VLAN tag inserting 273 * @skb: skbuff to tag 274 * @vlan_proto: VLAN encapsulation protocol 275 * @vlan_tci: VLAN TCI to insert 276 * 277 * Inserts the VLAN tag into @skb as part of the payload 278 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. 279 * 280 * Following the skb_unshare() example, in case of error, the calling function 281 * doesn't have to worry about freeing the original skb. 282 * 283 * Does not change skb->protocol so this function can be used during receive. 284 */ 285 static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, 286 __be16 vlan_proto, u16 vlan_tci) 287 { 288 struct vlan_ethhdr *veth; 289 290 if (skb_cow_head(skb, VLAN_HLEN) < 0) { 291 kfree_skb(skb); 292 return NULL; 293 } 294 veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); 295 296 /* Move the mac addresses to the beginning of the new header. */ 297 memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); 298 skb->mac_header -= VLAN_HLEN; 299 300 /* first, the ethernet type */ 301 veth->h_vlan_proto = vlan_proto; 302 303 /* now, the TCI */ 304 veth->h_vlan_TCI = htons(vlan_tci); 305 306 return skb; 307 } 308 309 /** 310 * __vlan_put_tag - regular VLAN tag inserting 311 * @skb: skbuff to tag 312 * @vlan_tci: VLAN TCI to insert 313 * 314 * Inserts the VLAN tag into @skb as part of the payload 315 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. 316 * 317 * Following the skb_unshare() example, in case of error, the calling function 318 * doesn't have to worry about freeing the original skb. 319 */ 320 static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, 321 __be16 vlan_proto, u16 vlan_tci) 322 { 323 skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); 324 if (skb) 325 skb->protocol = vlan_proto; 326 return skb; 327 } 328 329 /** 330 * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting 331 * @skb: skbuff to tag 332 * @vlan_proto: VLAN encapsulation protocol 333 * @vlan_tci: VLAN TCI to insert 334 * 335 * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest 336 */ 337 static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, 338 __be16 vlan_proto, 339 u16 vlan_tci) 340 { 341 skb->vlan_proto = vlan_proto; 342 skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; 343 return skb; 344 } 345 346 /** 347 * vlan_put_tag - inserts VLAN tag according to device features 348 * @skb: skbuff to tag 349 * @vlan_tci: VLAN TCI to insert 350 * 351 * Assumes skb->dev is the target that will xmit this frame. 352 * Returns a VLAN tagged skb. 353 */ 354 static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, 355 __be16 vlan_proto, u16 vlan_tci) 356 { 357 if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) { 358 return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 359 } else { 360 return __vlan_put_tag(skb, vlan_proto, vlan_tci); 361 } 362 } 363 364 /** 365 * __vlan_get_tag - get the VLAN ID that is part of the payload 366 * @skb: skbuff to query 367 * @vlan_tci: buffer to store vlaue 368 * 369 * Returns error if the skb is not of VLAN type 370 */ 371 static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) 372 { 373 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; 374 375 if (veth->h_vlan_proto != htons(ETH_P_8021Q) && 376 veth->h_vlan_proto != htons(ETH_P_8021AD)) 377 return -EINVAL; 378 379 *vlan_tci = ntohs(veth->h_vlan_TCI); 380 return 0; 381 } 382 383 /** 384 * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] 385 * @skb: skbuff to query 386 * @vlan_tci: buffer to store vlaue 387 * 388 * Returns error if @skb->vlan_tci is not set correctly 389 */ 390 static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, 391 u16 *vlan_tci) 392 { 393 if (vlan_tx_tag_present(skb)) { 394 *vlan_tci = vlan_tx_tag_get(skb); 395 return 0; 396 } else { 397 *vlan_tci = 0; 398 return -EINVAL; 399 } 400 } 401 402 #define HAVE_VLAN_GET_TAG 403 404 /** 405 * vlan_get_tag - get the VLAN ID from the skb 406 * @skb: skbuff to query 407 * @vlan_tci: buffer to store vlaue 408 * 409 * Returns error if the skb is not VLAN tagged 410 */ 411 static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) 412 { 413 if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) { 414 return __vlan_hwaccel_get_tag(skb, vlan_tci); 415 } else { 416 return __vlan_get_tag(skb, vlan_tci); 417 } 418 } 419 420 /** 421 * vlan_get_protocol - get protocol EtherType. 422 * @skb: skbuff to query 423 * 424 * Returns the EtherType of the packet, regardless of whether it is 425 * vlan encapsulated (normal or hardware accelerated) or not. 426 */ 427 static inline __be16 vlan_get_protocol(const struct sk_buff *skb) 428 { 429 __be16 protocol = 0; 430 431 if (vlan_tx_tag_present(skb) || 432 skb->protocol != cpu_to_be16(ETH_P_8021Q)) 433 protocol = skb->protocol; 434 else { 435 __be16 proto, *protop; 436 protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr, 437 h_vlan_encapsulated_proto), 438 sizeof(proto), &proto); 439 if (likely(protop)) 440 protocol = *protop; 441 } 442 443 return protocol; 444 } 445 446 static inline void vlan_set_encap_proto(struct sk_buff *skb, 447 struct vlan_hdr *vhdr) 448 { 449 __be16 proto; 450 unsigned short *rawp; 451 452 /* 453 * Was a VLAN packet, grab the encapsulated protocol, which the layer 454 * three protocols care about. 455 */ 456 457 proto = vhdr->h_vlan_encapsulated_proto; 458 if (ntohs(proto) >= ETH_P_802_3_MIN) { 459 skb->protocol = proto; 460 return; 461 } 462 463 rawp = (unsigned short *)(vhdr + 1); 464 if (*rawp == 0xFFFF) 465 /* 466 * This is a magic hack to spot IPX packets. Older Novell 467 * breaks the protocol design and runs IPX over 802.3 without 468 * an 802.2 LLC layer. We look for FFFF which isn't a used 469 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware 470 * but does for the rest. 471 */ 472 skb->protocol = htons(ETH_P_802_3); 473 else 474 /* 475 * Real 802.2 LLC 476 */ 477 skb->protocol = htons(ETH_P_802_2); 478 } 479 #endif /* !(_LINUX_IF_VLAN_H_) */ 480