xref: /linux/net/8021q/vlan.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * INET		802.1Q VLAN
3  *		Ethernet-type device handling.
4  *
5  * Authors:	Ben Greear <greearb@candelatech.com>
6  *              Please send support related email to: netdev@vger.kernel.org
7  *              VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
8  *
9  * Fixes:
10  *              Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
11  *		Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
12  *		Correct all the locking - David S. Miller <davem@redhat.com>;
13  *		Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
14  *
15  *		This program is free software; you can redistribute it and/or
16  *		modify it under the terms of the GNU General Public License
17  *		as published by the Free Software Foundation; either version
18  *		2 of the License, or (at your option) any later version.
19  */
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 
23 #include <linux/capability.h>
24 #include <linux/module.h>
25 #include <linux/netdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/rculist.h>
30 #include <net/p8022.h>
31 #include <net/arp.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/notifier.h>
34 #include <net/rtnetlink.h>
35 #include <net/net_namespace.h>
36 #include <net/netns/generic.h>
37 #include <asm/uaccess.h>
38 
39 #include <linux/if_vlan.h>
40 #include "vlan.h"
41 #include "vlanproc.h"
42 
43 #define DRV_VERSION "1.8"
44 
45 /* Global VLAN variables */
46 
47 int vlan_net_id __read_mostly;
48 
49 const char vlan_fullname[] = "802.1Q VLAN Support";
50 const char vlan_version[] = DRV_VERSION;
51 
52 /* End of global variables definitions. */
53 
54 static int vlan_group_prealloc_vid(struct vlan_group *vg,
55 				   __be16 vlan_proto, u16 vlan_id)
56 {
57 	struct net_device **array;
58 	unsigned int pidx, vidx;
59 	unsigned int size;
60 
61 	ASSERT_RTNL();
62 
63 	pidx  = vlan_proto_idx(vlan_proto);
64 	vidx  = vlan_id / VLAN_GROUP_ARRAY_PART_LEN;
65 	array = vg->vlan_devices_arrays[pidx][vidx];
66 	if (array != NULL)
67 		return 0;
68 
69 	size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
70 	array = kzalloc(size, GFP_KERNEL);
71 	if (array == NULL)
72 		return -ENOBUFS;
73 
74 	vg->vlan_devices_arrays[pidx][vidx] = array;
75 	return 0;
76 }
77 
78 void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
79 {
80 	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
81 	struct net_device *real_dev = vlan->real_dev;
82 	struct vlan_info *vlan_info;
83 	struct vlan_group *grp;
84 	u16 vlan_id = vlan->vlan_id;
85 
86 	ASSERT_RTNL();
87 
88 	vlan_info = rtnl_dereference(real_dev->vlan_info);
89 	BUG_ON(!vlan_info);
90 
91 	grp = &vlan_info->grp;
92 
93 	grp->nr_vlan_devs--;
94 
95 	if (vlan->flags & VLAN_FLAG_MVRP)
96 		vlan_mvrp_request_leave(dev);
97 	if (vlan->flags & VLAN_FLAG_GVRP)
98 		vlan_gvrp_request_leave(dev);
99 
100 	vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
101 
102 	netdev_upper_dev_unlink(real_dev, dev);
103 	/* Because unregister_netdevice_queue() makes sure at least one rcu
104 	 * grace period is respected before device freeing,
105 	 * we dont need to call synchronize_net() here.
106 	 */
107 	unregister_netdevice_queue(dev, head);
108 
109 	if (grp->nr_vlan_devs == 0) {
110 		vlan_mvrp_uninit_applicant(real_dev);
111 		vlan_gvrp_uninit_applicant(real_dev);
112 	}
113 
114 	/* Take it out of our own structures, but be sure to interlock with
115 	 * HW accelerating devices or SW vlan input packet processing if
116 	 * VLAN is not 0 (leave it there for 802.1p).
117 	 */
118 	if (vlan_id)
119 		vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
120 
121 	/* Get rid of the vlan's reference to real_dev */
122 	dev_put(real_dev);
123 }
124 
125 int vlan_check_real_dev(struct net_device *real_dev,
126 			__be16 protocol, u16 vlan_id)
127 {
128 	const char *name = real_dev->name;
129 
130 	if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
131 		pr_info("VLANs not supported on %s\n", name);
132 		return -EOPNOTSUPP;
133 	}
134 
135 	if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL)
136 		return -EEXIST;
137 
138 	return 0;
139 }
140 
141 int register_vlan_dev(struct net_device *dev)
142 {
143 	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
144 	struct net_device *real_dev = vlan->real_dev;
145 	u16 vlan_id = vlan->vlan_id;
146 	struct vlan_info *vlan_info;
147 	struct vlan_group *grp;
148 	int err;
149 
150 	err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id);
151 	if (err)
152 		return err;
153 
154 	vlan_info = rtnl_dereference(real_dev->vlan_info);
155 	/* vlan_info should be there now. vlan_vid_add took care of it */
156 	BUG_ON(!vlan_info);
157 
158 	grp = &vlan_info->grp;
159 	if (grp->nr_vlan_devs == 0) {
160 		err = vlan_gvrp_init_applicant(real_dev);
161 		if (err < 0)
162 			goto out_vid_del;
163 		err = vlan_mvrp_init_applicant(real_dev);
164 		if (err < 0)
165 			goto out_uninit_gvrp;
166 	}
167 
168 	err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id);
169 	if (err < 0)
170 		goto out_uninit_mvrp;
171 
172 	vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
173 	err = register_netdevice(dev);
174 	if (err < 0)
175 		goto out_uninit_mvrp;
176 
177 	err = netdev_upper_dev_link(real_dev, dev);
178 	if (err)
179 		goto out_unregister_netdev;
180 
181 	/* Account for reference in struct vlan_dev_priv */
182 	dev_hold(real_dev);
183 
184 	netif_stacked_transfer_operstate(real_dev, dev);
185 	linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
186 
187 	/* So, got the sucker initialized, now lets place
188 	 * it into our local structure.
189 	 */
190 	vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev);
191 	grp->nr_vlan_devs++;
192 
193 	return 0;
194 
195 out_unregister_netdev:
196 	unregister_netdevice(dev);
197 out_uninit_mvrp:
198 	if (grp->nr_vlan_devs == 0)
199 		vlan_mvrp_uninit_applicant(real_dev);
200 out_uninit_gvrp:
201 	if (grp->nr_vlan_devs == 0)
202 		vlan_gvrp_uninit_applicant(real_dev);
203 out_vid_del:
204 	vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
205 	return err;
206 }
207 
208 /*  Attach a VLAN device to a mac address (ie Ethernet Card).
209  *  Returns 0 if the device was created or a negative error code otherwise.
210  */
211 static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
212 {
213 	struct net_device *new_dev;
214 	struct vlan_dev_priv *vlan;
215 	struct net *net = dev_net(real_dev);
216 	struct vlan_net *vn = net_generic(net, vlan_net_id);
217 	char name[IFNAMSIZ];
218 	int err;
219 
220 	if (vlan_id >= VLAN_VID_MASK)
221 		return -ERANGE;
222 
223 	err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id);
224 	if (err < 0)
225 		return err;
226 
227 	/* Gotta set up the fields for the device. */
228 	switch (vn->name_type) {
229 	case VLAN_NAME_TYPE_RAW_PLUS_VID:
230 		/* name will look like:	 eth1.0005 */
231 		snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id);
232 		break;
233 	case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
234 		/* Put our vlan.VID in the name.
235 		 * Name will look like:	 vlan5
236 		 */
237 		snprintf(name, IFNAMSIZ, "vlan%i", vlan_id);
238 		break;
239 	case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
240 		/* Put our vlan.VID in the name.
241 		 * Name will look like:	 eth0.5
242 		 */
243 		snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id);
244 		break;
245 	case VLAN_NAME_TYPE_PLUS_VID:
246 		/* Put our vlan.VID in the name.
247 		 * Name will look like:	 vlan0005
248 		 */
249 	default:
250 		snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
251 	}
252 
253 	new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name,
254 			       NET_NAME_UNKNOWN, vlan_setup);
255 
256 	if (new_dev == NULL)
257 		return -ENOBUFS;
258 
259 	dev_net_set(new_dev, net);
260 	/* need 4 bytes for extra VLAN header info,
261 	 * hope the underlying device can handle it.
262 	 */
263 	new_dev->mtu = real_dev->mtu;
264 	new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT);
265 
266 	vlan = vlan_dev_priv(new_dev);
267 	vlan->vlan_proto = htons(ETH_P_8021Q);
268 	vlan->vlan_id = vlan_id;
269 	vlan->real_dev = real_dev;
270 	vlan->dent = NULL;
271 	vlan->flags = VLAN_FLAG_REORDER_HDR;
272 
273 	new_dev->rtnl_link_ops = &vlan_link_ops;
274 	err = register_vlan_dev(new_dev);
275 	if (err < 0)
276 		goto out_free_newdev;
277 
278 	return 0;
279 
280 out_free_newdev:
281 	free_netdev(new_dev);
282 	return err;
283 }
284 
285 static void vlan_sync_address(struct net_device *dev,
286 			      struct net_device *vlandev)
287 {
288 	struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
289 
290 	/* May be called without an actual change */
291 	if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
292 		return;
293 
294 	/* vlan address was different from the old address and is equal to
295 	 * the new address */
296 	if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
297 	    ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
298 		dev_uc_del(dev, vlandev->dev_addr);
299 
300 	/* vlan address was equal to the old address and is different from
301 	 * the new address */
302 	if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
303 	    !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
304 		dev_uc_add(dev, vlandev->dev_addr);
305 
306 	ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
307 }
308 
309 static void vlan_transfer_features(struct net_device *dev,
310 				   struct net_device *vlandev)
311 {
312 	struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
313 
314 	vlandev->gso_max_size = dev->gso_max_size;
315 
316 	if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
317 		vlandev->hard_header_len = dev->hard_header_len;
318 	else
319 		vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
320 
321 #if IS_ENABLED(CONFIG_FCOE)
322 	vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
323 #endif
324 
325 	netdev_update_features(vlandev);
326 }
327 
328 static int __vlan_device_event(struct net_device *dev, unsigned long event)
329 {
330 	int err = 0;
331 
332 	switch (event) {
333 	case NETDEV_CHANGENAME:
334 		vlan_proc_rem_dev(dev);
335 		err = vlan_proc_add_dev(dev);
336 		break;
337 	case NETDEV_REGISTER:
338 		err = vlan_proc_add_dev(dev);
339 		break;
340 	case NETDEV_UNREGISTER:
341 		vlan_proc_rem_dev(dev);
342 		break;
343 	}
344 
345 	return err;
346 }
347 
348 static int vlan_device_event(struct notifier_block *unused, unsigned long event,
349 			     void *ptr)
350 {
351 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
352 	struct vlan_group *grp;
353 	struct vlan_info *vlan_info;
354 	int i, flgs;
355 	struct net_device *vlandev;
356 	struct vlan_dev_priv *vlan;
357 	bool last = false;
358 	LIST_HEAD(list);
359 
360 	if (is_vlan_dev(dev)) {
361 		int err = __vlan_device_event(dev, event);
362 
363 		if (err)
364 			return notifier_from_errno(err);
365 	}
366 
367 	if ((event == NETDEV_UP) &&
368 	    (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
369 		pr_info("adding VLAN 0 to HW filter on device %s\n",
370 			dev->name);
371 		vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
372 	}
373 
374 	vlan_info = rtnl_dereference(dev->vlan_info);
375 	if (!vlan_info)
376 		goto out;
377 	grp = &vlan_info->grp;
378 
379 	/* It is OK that we do not hold the group lock right now,
380 	 * as we run under the RTNL lock.
381 	 */
382 
383 	switch (event) {
384 	case NETDEV_CHANGE:
385 		/* Propagate real device state to vlan devices */
386 		vlan_group_for_each_dev(grp, i, vlandev)
387 			netif_stacked_transfer_operstate(dev, vlandev);
388 		break;
389 
390 	case NETDEV_CHANGEADDR:
391 		/* Adjust unicast filters on underlying device */
392 		vlan_group_for_each_dev(grp, i, vlandev) {
393 			flgs = vlandev->flags;
394 			if (!(flgs & IFF_UP))
395 				continue;
396 
397 			vlan_sync_address(dev, vlandev);
398 		}
399 		break;
400 
401 	case NETDEV_CHANGEMTU:
402 		vlan_group_for_each_dev(grp, i, vlandev) {
403 			if (vlandev->mtu <= dev->mtu)
404 				continue;
405 
406 			dev_set_mtu(vlandev, dev->mtu);
407 		}
408 		break;
409 
410 	case NETDEV_FEAT_CHANGE:
411 		/* Propagate device features to underlying device */
412 		vlan_group_for_each_dev(grp, i, vlandev)
413 			vlan_transfer_features(dev, vlandev);
414 		break;
415 
416 	case NETDEV_DOWN: {
417 		struct net_device *tmp;
418 		LIST_HEAD(close_list);
419 
420 		if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
421 			vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
422 
423 		/* Put all VLANs for this dev in the down state too.  */
424 		vlan_group_for_each_dev(grp, i, vlandev) {
425 			flgs = vlandev->flags;
426 			if (!(flgs & IFF_UP))
427 				continue;
428 
429 			vlan = vlan_dev_priv(vlandev);
430 			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
431 				list_add(&vlandev->close_list, &close_list);
432 		}
433 
434 		dev_close_many(&close_list, false);
435 
436 		list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) {
437 			netif_stacked_transfer_operstate(dev, vlandev);
438 			list_del_init(&vlandev->close_list);
439 		}
440 		list_del(&close_list);
441 		break;
442 	}
443 	case NETDEV_UP:
444 		/* Put all VLANs for this dev in the up state too.  */
445 		vlan_group_for_each_dev(grp, i, vlandev) {
446 			flgs = dev_get_flags(vlandev);
447 			if (flgs & IFF_UP)
448 				continue;
449 
450 			vlan = vlan_dev_priv(vlandev);
451 			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
452 				dev_change_flags(vlandev, flgs | IFF_UP);
453 			netif_stacked_transfer_operstate(dev, vlandev);
454 		}
455 		break;
456 
457 	case NETDEV_UNREGISTER:
458 		/* twiddle thumbs on netns device moves */
459 		if (dev->reg_state != NETREG_UNREGISTERING)
460 			break;
461 
462 		vlan_group_for_each_dev(grp, i, vlandev) {
463 			/* removal of last vid destroys vlan_info, abort
464 			 * afterwards */
465 			if (vlan_info->nr_vids == 1)
466 				last = true;
467 
468 			unregister_vlan_dev(vlandev, &list);
469 			if (last)
470 				break;
471 		}
472 		unregister_netdevice_many(&list);
473 		break;
474 
475 	case NETDEV_PRE_TYPE_CHANGE:
476 		/* Forbid underlaying device to change its type. */
477 		if (vlan_uses_dev(dev))
478 			return NOTIFY_BAD;
479 		break;
480 
481 	case NETDEV_NOTIFY_PEERS:
482 	case NETDEV_BONDING_FAILOVER:
483 	case NETDEV_RESEND_IGMP:
484 		/* Propagate to vlan devices */
485 		vlan_group_for_each_dev(grp, i, vlandev)
486 			call_netdevice_notifiers(event, vlandev);
487 		break;
488 	}
489 
490 out:
491 	return NOTIFY_DONE;
492 }
493 
494 static struct notifier_block vlan_notifier_block __read_mostly = {
495 	.notifier_call = vlan_device_event,
496 };
497 
498 /*
499  *	VLAN IOCTL handler.
500  *	o execute requested action or pass command to the device driver
501  *   arg is really a struct vlan_ioctl_args __user *.
502  */
503 static int vlan_ioctl_handler(struct net *net, void __user *arg)
504 {
505 	int err;
506 	struct vlan_ioctl_args args;
507 	struct net_device *dev = NULL;
508 
509 	if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
510 		return -EFAULT;
511 
512 	/* Null terminate this sucker, just in case. */
513 	args.device1[23] = 0;
514 	args.u.device2[23] = 0;
515 
516 	rtnl_lock();
517 
518 	switch (args.cmd) {
519 	case SET_VLAN_INGRESS_PRIORITY_CMD:
520 	case SET_VLAN_EGRESS_PRIORITY_CMD:
521 	case SET_VLAN_FLAG_CMD:
522 	case ADD_VLAN_CMD:
523 	case DEL_VLAN_CMD:
524 	case GET_VLAN_REALDEV_NAME_CMD:
525 	case GET_VLAN_VID_CMD:
526 		err = -ENODEV;
527 		dev = __dev_get_by_name(net, args.device1);
528 		if (!dev)
529 			goto out;
530 
531 		err = -EINVAL;
532 		if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
533 			goto out;
534 	}
535 
536 	switch (args.cmd) {
537 	case SET_VLAN_INGRESS_PRIORITY_CMD:
538 		err = -EPERM;
539 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
540 			break;
541 		vlan_dev_set_ingress_priority(dev,
542 					      args.u.skb_priority,
543 					      args.vlan_qos);
544 		err = 0;
545 		break;
546 
547 	case SET_VLAN_EGRESS_PRIORITY_CMD:
548 		err = -EPERM;
549 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
550 			break;
551 		err = vlan_dev_set_egress_priority(dev,
552 						   args.u.skb_priority,
553 						   args.vlan_qos);
554 		break;
555 
556 	case SET_VLAN_FLAG_CMD:
557 		err = -EPERM;
558 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
559 			break;
560 		err = vlan_dev_change_flags(dev,
561 					    args.vlan_qos ? args.u.flag : 0,
562 					    args.u.flag);
563 		break;
564 
565 	case SET_VLAN_NAME_TYPE_CMD:
566 		err = -EPERM;
567 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
568 			break;
569 		if ((args.u.name_type >= 0) &&
570 		    (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
571 			struct vlan_net *vn;
572 
573 			vn = net_generic(net, vlan_net_id);
574 			vn->name_type = args.u.name_type;
575 			err = 0;
576 		} else {
577 			err = -EINVAL;
578 		}
579 		break;
580 
581 	case ADD_VLAN_CMD:
582 		err = -EPERM;
583 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
584 			break;
585 		err = register_vlan_device(dev, args.u.VID);
586 		break;
587 
588 	case DEL_VLAN_CMD:
589 		err = -EPERM;
590 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
591 			break;
592 		unregister_vlan_dev(dev, NULL);
593 		err = 0;
594 		break;
595 
596 	case GET_VLAN_REALDEV_NAME_CMD:
597 		err = 0;
598 		vlan_dev_get_realdev_name(dev, args.u.device2);
599 		if (copy_to_user(arg, &args,
600 				 sizeof(struct vlan_ioctl_args)))
601 			err = -EFAULT;
602 		break;
603 
604 	case GET_VLAN_VID_CMD:
605 		err = 0;
606 		args.u.VID = vlan_dev_vlan_id(dev);
607 		if (copy_to_user(arg, &args,
608 				 sizeof(struct vlan_ioctl_args)))
609 		      err = -EFAULT;
610 		break;
611 
612 	default:
613 		err = -EOPNOTSUPP;
614 		break;
615 	}
616 out:
617 	rtnl_unlock();
618 	return err;
619 }
620 
621 static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
622 					 struct sk_buff *skb)
623 {
624 	struct sk_buff *p, **pp = NULL;
625 	struct vlan_hdr *vhdr;
626 	unsigned int hlen, off_vlan;
627 	const struct packet_offload *ptype;
628 	__be16 type;
629 	int flush = 1;
630 
631 	off_vlan = skb_gro_offset(skb);
632 	hlen = off_vlan + sizeof(*vhdr);
633 	vhdr = skb_gro_header_fast(skb, off_vlan);
634 	if (skb_gro_header_hard(skb, hlen)) {
635 		vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
636 		if (unlikely(!vhdr))
637 			goto out;
638 	}
639 
640 	type = vhdr->h_vlan_encapsulated_proto;
641 
642 	rcu_read_lock();
643 	ptype = gro_find_receive_by_type(type);
644 	if (!ptype)
645 		goto out_unlock;
646 
647 	flush = 0;
648 
649 	for (p = *head; p; p = p->next) {
650 		struct vlan_hdr *vhdr2;
651 
652 		if (!NAPI_GRO_CB(p)->same_flow)
653 			continue;
654 
655 		vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
656 		if (compare_vlan_header(vhdr, vhdr2))
657 			NAPI_GRO_CB(p)->same_flow = 0;
658 	}
659 
660 	skb_gro_pull(skb, sizeof(*vhdr));
661 	skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
662 	pp = ptype->callbacks.gro_receive(head, skb);
663 
664 out_unlock:
665 	rcu_read_unlock();
666 out:
667 	NAPI_GRO_CB(skb)->flush |= flush;
668 
669 	return pp;
670 }
671 
672 static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
673 {
674 	struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
675 	__be16 type = vhdr->h_vlan_encapsulated_proto;
676 	struct packet_offload *ptype;
677 	int err = -ENOENT;
678 
679 	rcu_read_lock();
680 	ptype = gro_find_complete_by_type(type);
681 	if (ptype)
682 		err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
683 
684 	rcu_read_unlock();
685 	return err;
686 }
687 
688 static struct packet_offload vlan_packet_offloads[] __read_mostly = {
689 	{
690 		.type = cpu_to_be16(ETH_P_8021Q),
691 		.priority = 10,
692 		.callbacks = {
693 			.gro_receive = vlan_gro_receive,
694 			.gro_complete = vlan_gro_complete,
695 		},
696 	},
697 	{
698 		.type = cpu_to_be16(ETH_P_8021AD),
699 		.priority = 10,
700 		.callbacks = {
701 			.gro_receive = vlan_gro_receive,
702 			.gro_complete = vlan_gro_complete,
703 		},
704 	},
705 };
706 
707 static int __net_init vlan_init_net(struct net *net)
708 {
709 	struct vlan_net *vn = net_generic(net, vlan_net_id);
710 	int err;
711 
712 	vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
713 
714 	err = vlan_proc_init(net);
715 
716 	return err;
717 }
718 
719 static void __net_exit vlan_exit_net(struct net *net)
720 {
721 	vlan_proc_cleanup(net);
722 }
723 
724 static struct pernet_operations vlan_net_ops = {
725 	.init = vlan_init_net,
726 	.exit = vlan_exit_net,
727 	.id   = &vlan_net_id,
728 	.size = sizeof(struct vlan_net),
729 };
730 
731 static int __init vlan_proto_init(void)
732 {
733 	int err;
734 	unsigned int i;
735 
736 	pr_info("%s v%s\n", vlan_fullname, vlan_version);
737 
738 	err = register_pernet_subsys(&vlan_net_ops);
739 	if (err < 0)
740 		goto err0;
741 
742 	err = register_netdevice_notifier(&vlan_notifier_block);
743 	if (err < 0)
744 		goto err2;
745 
746 	err = vlan_gvrp_init();
747 	if (err < 0)
748 		goto err3;
749 
750 	err = vlan_mvrp_init();
751 	if (err < 0)
752 		goto err4;
753 
754 	err = vlan_netlink_init();
755 	if (err < 0)
756 		goto err5;
757 
758 	for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
759 		dev_add_offload(&vlan_packet_offloads[i]);
760 
761 	vlan_ioctl_set(vlan_ioctl_handler);
762 	return 0;
763 
764 err5:
765 	vlan_mvrp_uninit();
766 err4:
767 	vlan_gvrp_uninit();
768 err3:
769 	unregister_netdevice_notifier(&vlan_notifier_block);
770 err2:
771 	unregister_pernet_subsys(&vlan_net_ops);
772 err0:
773 	return err;
774 }
775 
776 static void __exit vlan_cleanup_module(void)
777 {
778 	unsigned int i;
779 
780 	vlan_ioctl_set(NULL);
781 
782 	for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
783 		dev_remove_offload(&vlan_packet_offloads[i]);
784 
785 	vlan_netlink_fini();
786 
787 	unregister_netdevice_notifier(&vlan_notifier_block);
788 
789 	unregister_pernet_subsys(&vlan_net_ops);
790 	rcu_barrier(); /* Wait for completion of call_rcu()'s */
791 
792 	vlan_mvrp_uninit();
793 	vlan_gvrp_uninit();
794 }
795 
796 module_init(vlan_proto_init);
797 module_exit(vlan_cleanup_module);
798 
799 MODULE_LICENSE("GPL");
800 MODULE_VERSION(DRV_VERSION);
801