xref: /linux/net/8021q/vlan.c (revision b68fc09be48edbc47de1a0f3d42ef8adf6c0ac55)
1 /*
2  * INET		802.1Q VLAN
3  *		Ethernet-type device handling.
4  *
5  * Authors:	Ben Greear <greearb@candelatech.com>
6  *              Please send support related email to: netdev@vger.kernel.org
7  *              VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
8  *
9  * Fixes:
10  *              Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
11  *		Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
12  *		Correct all the locking - David S. Miller <davem@redhat.com>;
13  *		Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
14  *
15  *		This program is free software; you can redistribute it and/or
16  *		modify it under the terms of the GNU General Public License
17  *		as published by the Free Software Foundation; either version
18  *		2 of the License, or (at your option) any later version.
19  */
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 
23 #include <linux/capability.h>
24 #include <linux/module.h>
25 #include <linux/netdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/rculist.h>
30 #include <net/p8022.h>
31 #include <net/arp.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/notifier.h>
34 #include <net/rtnetlink.h>
35 #include <net/net_namespace.h>
36 #include <net/netns/generic.h>
37 #include <linux/uaccess.h>
38 
39 #include <linux/if_vlan.h>
40 #include "vlan.h"
41 #include "vlanproc.h"
42 
43 #define DRV_VERSION "1.8"
44 
45 /* Global VLAN variables */
46 
47 unsigned int vlan_net_id __read_mostly;
48 
49 const char vlan_fullname[] = "802.1Q VLAN Support";
50 const char vlan_version[] = DRV_VERSION;
51 
52 /* End of global variables definitions. */
53 
54 static int vlan_group_prealloc_vid(struct vlan_group *vg,
55 				   __be16 vlan_proto, u16 vlan_id)
56 {
57 	struct net_device **array;
58 	unsigned int pidx, vidx;
59 	unsigned int size;
60 
61 	ASSERT_RTNL();
62 
63 	pidx  = vlan_proto_idx(vlan_proto);
64 	vidx  = vlan_id / VLAN_GROUP_ARRAY_PART_LEN;
65 	array = vg->vlan_devices_arrays[pidx][vidx];
66 	if (array != NULL)
67 		return 0;
68 
69 	size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
70 	array = kzalloc(size, GFP_KERNEL);
71 	if (array == NULL)
72 		return -ENOBUFS;
73 
74 	vg->vlan_devices_arrays[pidx][vidx] = array;
75 	return 0;
76 }
77 
78 void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
79 {
80 	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
81 	struct net_device *real_dev = vlan->real_dev;
82 	struct vlan_info *vlan_info;
83 	struct vlan_group *grp;
84 	u16 vlan_id = vlan->vlan_id;
85 
86 	ASSERT_RTNL();
87 
88 	vlan_info = rtnl_dereference(real_dev->vlan_info);
89 	BUG_ON(!vlan_info);
90 
91 	grp = &vlan_info->grp;
92 
93 	grp->nr_vlan_devs--;
94 
95 	if (vlan->flags & VLAN_FLAG_MVRP)
96 		vlan_mvrp_request_leave(dev);
97 	if (vlan->flags & VLAN_FLAG_GVRP)
98 		vlan_gvrp_request_leave(dev);
99 
100 	vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
101 
102 	netdev_upper_dev_unlink(real_dev, dev);
103 	/* Because unregister_netdevice_queue() makes sure at least one rcu
104 	 * grace period is respected before device freeing,
105 	 * we dont need to call synchronize_net() here.
106 	 */
107 	unregister_netdevice_queue(dev, head);
108 
109 	if (grp->nr_vlan_devs == 0) {
110 		vlan_mvrp_uninit_applicant(real_dev);
111 		vlan_gvrp_uninit_applicant(real_dev);
112 	}
113 
114 	vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
115 
116 	/* Get rid of the vlan's reference to real_dev */
117 	dev_put(real_dev);
118 }
119 
120 int vlan_check_real_dev(struct net_device *real_dev,
121 			__be16 protocol, u16 vlan_id,
122 			struct netlink_ext_ack *extack)
123 {
124 	const char *name = real_dev->name;
125 
126 	if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
127 		pr_info("VLANs not supported on %s\n", name);
128 		NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device");
129 		return -EOPNOTSUPP;
130 	}
131 
132 	if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL) {
133 		NL_SET_ERR_MSG_MOD(extack, "VLAN device already exists");
134 		return -EEXIST;
135 	}
136 
137 	return 0;
138 }
139 
140 int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
141 {
142 	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
143 	struct net_device *real_dev = vlan->real_dev;
144 	u16 vlan_id = vlan->vlan_id;
145 	struct vlan_info *vlan_info;
146 	struct vlan_group *grp;
147 	int err;
148 
149 	err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id);
150 	if (err)
151 		return err;
152 
153 	vlan_info = rtnl_dereference(real_dev->vlan_info);
154 	/* vlan_info should be there now. vlan_vid_add took care of it */
155 	BUG_ON(!vlan_info);
156 
157 	grp = &vlan_info->grp;
158 	if (grp->nr_vlan_devs == 0) {
159 		err = vlan_gvrp_init_applicant(real_dev);
160 		if (err < 0)
161 			goto out_vid_del;
162 		err = vlan_mvrp_init_applicant(real_dev);
163 		if (err < 0)
164 			goto out_uninit_gvrp;
165 	}
166 
167 	err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id);
168 	if (err < 0)
169 		goto out_uninit_mvrp;
170 
171 	vlan->nest_level = dev_get_nest_level(real_dev) + 1;
172 	err = register_netdevice(dev);
173 	if (err < 0)
174 		goto out_uninit_mvrp;
175 
176 	err = netdev_upper_dev_link(real_dev, dev, extack);
177 	if (err)
178 		goto out_unregister_netdev;
179 
180 	/* Account for reference in struct vlan_dev_priv */
181 	dev_hold(real_dev);
182 
183 	netif_stacked_transfer_operstate(real_dev, dev);
184 	linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
185 
186 	/* So, got the sucker initialized, now lets place
187 	 * it into our local structure.
188 	 */
189 	vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev);
190 	grp->nr_vlan_devs++;
191 
192 	return 0;
193 
194 out_unregister_netdev:
195 	unregister_netdevice(dev);
196 out_uninit_mvrp:
197 	if (grp->nr_vlan_devs == 0)
198 		vlan_mvrp_uninit_applicant(real_dev);
199 out_uninit_gvrp:
200 	if (grp->nr_vlan_devs == 0)
201 		vlan_gvrp_uninit_applicant(real_dev);
202 out_vid_del:
203 	vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
204 	return err;
205 }
206 
207 /*  Attach a VLAN device to a mac address (ie Ethernet Card).
208  *  Returns 0 if the device was created or a negative error code otherwise.
209  */
210 static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
211 {
212 	struct net_device *new_dev;
213 	struct vlan_dev_priv *vlan;
214 	struct net *net = dev_net(real_dev);
215 	struct vlan_net *vn = net_generic(net, vlan_net_id);
216 	char name[IFNAMSIZ];
217 	int err;
218 
219 	if (vlan_id >= VLAN_VID_MASK)
220 		return -ERANGE;
221 
222 	err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id,
223 				  NULL);
224 	if (err < 0)
225 		return err;
226 
227 	/* Gotta set up the fields for the device. */
228 	switch (vn->name_type) {
229 	case VLAN_NAME_TYPE_RAW_PLUS_VID:
230 		/* name will look like:	 eth1.0005 */
231 		snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id);
232 		break;
233 	case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
234 		/* Put our vlan.VID in the name.
235 		 * Name will look like:	 vlan5
236 		 */
237 		snprintf(name, IFNAMSIZ, "vlan%i", vlan_id);
238 		break;
239 	case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
240 		/* Put our vlan.VID in the name.
241 		 * Name will look like:	 eth0.5
242 		 */
243 		snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id);
244 		break;
245 	case VLAN_NAME_TYPE_PLUS_VID:
246 		/* Put our vlan.VID in the name.
247 		 * Name will look like:	 vlan0005
248 		 */
249 	default:
250 		snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
251 	}
252 
253 	new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name,
254 			       NET_NAME_UNKNOWN, vlan_setup);
255 
256 	if (new_dev == NULL)
257 		return -ENOBUFS;
258 
259 	dev_net_set(new_dev, net);
260 	/* need 4 bytes for extra VLAN header info,
261 	 * hope the underlying device can handle it.
262 	 */
263 	new_dev->mtu = real_dev->mtu;
264 
265 	vlan = vlan_dev_priv(new_dev);
266 	vlan->vlan_proto = htons(ETH_P_8021Q);
267 	vlan->vlan_id = vlan_id;
268 	vlan->real_dev = real_dev;
269 	vlan->dent = NULL;
270 	vlan->flags = VLAN_FLAG_REORDER_HDR;
271 
272 	new_dev->rtnl_link_ops = &vlan_link_ops;
273 	err = register_vlan_dev(new_dev, NULL);
274 	if (err < 0)
275 		goto out_free_newdev;
276 
277 	return 0;
278 
279 out_free_newdev:
280 	if (new_dev->reg_state == NETREG_UNINITIALIZED)
281 		free_netdev(new_dev);
282 	return err;
283 }
284 
285 static void vlan_sync_address(struct net_device *dev,
286 			      struct net_device *vlandev)
287 {
288 	struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
289 
290 	/* May be called without an actual change */
291 	if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
292 		return;
293 
294 	/* vlan continues to inherit address of lower device */
295 	if (vlan_dev_inherit_address(vlandev, dev))
296 		goto out;
297 
298 	/* vlan address was different from the old address and is equal to
299 	 * the new address */
300 	if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
301 	    ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
302 		dev_uc_del(dev, vlandev->dev_addr);
303 
304 	/* vlan address was equal to the old address and is different from
305 	 * the new address */
306 	if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
307 	    !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
308 		dev_uc_add(dev, vlandev->dev_addr);
309 
310 out:
311 	ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
312 }
313 
314 static void vlan_transfer_features(struct net_device *dev,
315 				   struct net_device *vlandev)
316 {
317 	struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
318 
319 	vlandev->gso_max_size = dev->gso_max_size;
320 	vlandev->gso_max_segs = dev->gso_max_segs;
321 
322 	if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
323 		vlandev->hard_header_len = dev->hard_header_len;
324 	else
325 		vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
326 
327 #if IS_ENABLED(CONFIG_FCOE)
328 	vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
329 #endif
330 
331 	vlandev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
332 	vlandev->priv_flags |= (vlan->real_dev->priv_flags & IFF_XMIT_DST_RELEASE);
333 
334 	netdev_update_features(vlandev);
335 }
336 
337 static int __vlan_device_event(struct net_device *dev, unsigned long event)
338 {
339 	int err = 0;
340 
341 	switch (event) {
342 	case NETDEV_CHANGENAME:
343 		vlan_proc_rem_dev(dev);
344 		err = vlan_proc_add_dev(dev);
345 		break;
346 	case NETDEV_REGISTER:
347 		err = vlan_proc_add_dev(dev);
348 		break;
349 	case NETDEV_UNREGISTER:
350 		vlan_proc_rem_dev(dev);
351 		break;
352 	}
353 
354 	return err;
355 }
356 
357 static int vlan_device_event(struct notifier_block *unused, unsigned long event,
358 			     void *ptr)
359 {
360 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
361 	struct vlan_group *grp;
362 	struct vlan_info *vlan_info;
363 	int i, flgs;
364 	struct net_device *vlandev;
365 	struct vlan_dev_priv *vlan;
366 	bool last = false;
367 	LIST_HEAD(list);
368 	int err;
369 
370 	if (is_vlan_dev(dev)) {
371 		int err = __vlan_device_event(dev, event);
372 
373 		if (err)
374 			return notifier_from_errno(err);
375 	}
376 
377 	if ((event == NETDEV_UP) &&
378 	    (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
379 		pr_info("adding VLAN 0 to HW filter on device %s\n",
380 			dev->name);
381 		vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
382 	}
383 	if (event == NETDEV_DOWN &&
384 	    (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
385 		vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
386 
387 	vlan_info = rtnl_dereference(dev->vlan_info);
388 	if (!vlan_info)
389 		goto out;
390 	grp = &vlan_info->grp;
391 
392 	/* It is OK that we do not hold the group lock right now,
393 	 * as we run under the RTNL lock.
394 	 */
395 
396 	switch (event) {
397 	case NETDEV_CHANGE:
398 		/* Propagate real device state to vlan devices */
399 		vlan_group_for_each_dev(grp, i, vlandev)
400 			netif_stacked_transfer_operstate(dev, vlandev);
401 		break;
402 
403 	case NETDEV_CHANGEADDR:
404 		/* Adjust unicast filters on underlying device */
405 		vlan_group_for_each_dev(grp, i, vlandev) {
406 			flgs = vlandev->flags;
407 			if (!(flgs & IFF_UP))
408 				continue;
409 
410 			vlan_sync_address(dev, vlandev);
411 		}
412 		break;
413 
414 	case NETDEV_CHANGEMTU:
415 		vlan_group_for_each_dev(grp, i, vlandev) {
416 			if (vlandev->mtu <= dev->mtu)
417 				continue;
418 
419 			dev_set_mtu(vlandev, dev->mtu);
420 		}
421 		break;
422 
423 	case NETDEV_FEAT_CHANGE:
424 		/* Propagate device features to underlying device */
425 		vlan_group_for_each_dev(grp, i, vlandev)
426 			vlan_transfer_features(dev, vlandev);
427 		break;
428 
429 	case NETDEV_DOWN: {
430 		struct net_device *tmp;
431 		LIST_HEAD(close_list);
432 
433 		/* Put all VLANs for this dev in the down state too.  */
434 		vlan_group_for_each_dev(grp, i, vlandev) {
435 			flgs = vlandev->flags;
436 			if (!(flgs & IFF_UP))
437 				continue;
438 
439 			vlan = vlan_dev_priv(vlandev);
440 			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
441 				list_add(&vlandev->close_list, &close_list);
442 		}
443 
444 		dev_close_many(&close_list, false);
445 
446 		list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) {
447 			netif_stacked_transfer_operstate(dev, vlandev);
448 			list_del_init(&vlandev->close_list);
449 		}
450 		list_del(&close_list);
451 		break;
452 	}
453 	case NETDEV_UP:
454 		/* Put all VLANs for this dev in the up state too.  */
455 		vlan_group_for_each_dev(grp, i, vlandev) {
456 			flgs = dev_get_flags(vlandev);
457 			if (flgs & IFF_UP)
458 				continue;
459 
460 			vlan = vlan_dev_priv(vlandev);
461 			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
462 				dev_change_flags(vlandev, flgs | IFF_UP);
463 			netif_stacked_transfer_operstate(dev, vlandev);
464 		}
465 		break;
466 
467 	case NETDEV_UNREGISTER:
468 		/* twiddle thumbs on netns device moves */
469 		if (dev->reg_state != NETREG_UNREGISTERING)
470 			break;
471 
472 		vlan_group_for_each_dev(grp, i, vlandev) {
473 			/* removal of last vid destroys vlan_info, abort
474 			 * afterwards */
475 			if (vlan_info->nr_vids == 1)
476 				last = true;
477 
478 			unregister_vlan_dev(vlandev, &list);
479 			if (last)
480 				break;
481 		}
482 		unregister_netdevice_many(&list);
483 		break;
484 
485 	case NETDEV_PRE_TYPE_CHANGE:
486 		/* Forbid underlaying device to change its type. */
487 		if (vlan_uses_dev(dev))
488 			return NOTIFY_BAD;
489 		break;
490 
491 	case NETDEV_NOTIFY_PEERS:
492 	case NETDEV_BONDING_FAILOVER:
493 	case NETDEV_RESEND_IGMP:
494 		/* Propagate to vlan devices */
495 		vlan_group_for_each_dev(grp, i, vlandev)
496 			call_netdevice_notifiers(event, vlandev);
497 		break;
498 
499 	case NETDEV_CVLAN_FILTER_PUSH_INFO:
500 		err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021Q));
501 		if (err)
502 			return notifier_from_errno(err);
503 		break;
504 
505 	case NETDEV_CVLAN_FILTER_DROP_INFO:
506 		vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021Q));
507 		break;
508 
509 	case NETDEV_SVLAN_FILTER_PUSH_INFO:
510 		err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021AD));
511 		if (err)
512 			return notifier_from_errno(err);
513 		break;
514 
515 	case NETDEV_SVLAN_FILTER_DROP_INFO:
516 		vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021AD));
517 		break;
518 	}
519 
520 out:
521 	return NOTIFY_DONE;
522 }
523 
524 static struct notifier_block vlan_notifier_block __read_mostly = {
525 	.notifier_call = vlan_device_event,
526 };
527 
528 /*
529  *	VLAN IOCTL handler.
530  *	o execute requested action or pass command to the device driver
531  *   arg is really a struct vlan_ioctl_args __user *.
532  */
533 static int vlan_ioctl_handler(struct net *net, void __user *arg)
534 {
535 	int err;
536 	struct vlan_ioctl_args args;
537 	struct net_device *dev = NULL;
538 
539 	if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
540 		return -EFAULT;
541 
542 	/* Null terminate this sucker, just in case. */
543 	args.device1[sizeof(args.device1) - 1] = 0;
544 	args.u.device2[sizeof(args.u.device2) - 1] = 0;
545 
546 	rtnl_lock();
547 
548 	switch (args.cmd) {
549 	case SET_VLAN_INGRESS_PRIORITY_CMD:
550 	case SET_VLAN_EGRESS_PRIORITY_CMD:
551 	case SET_VLAN_FLAG_CMD:
552 	case ADD_VLAN_CMD:
553 	case DEL_VLAN_CMD:
554 	case GET_VLAN_REALDEV_NAME_CMD:
555 	case GET_VLAN_VID_CMD:
556 		err = -ENODEV;
557 		dev = __dev_get_by_name(net, args.device1);
558 		if (!dev)
559 			goto out;
560 
561 		err = -EINVAL;
562 		if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
563 			goto out;
564 	}
565 
566 	switch (args.cmd) {
567 	case SET_VLAN_INGRESS_PRIORITY_CMD:
568 		err = -EPERM;
569 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
570 			break;
571 		vlan_dev_set_ingress_priority(dev,
572 					      args.u.skb_priority,
573 					      args.vlan_qos);
574 		err = 0;
575 		break;
576 
577 	case SET_VLAN_EGRESS_PRIORITY_CMD:
578 		err = -EPERM;
579 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
580 			break;
581 		err = vlan_dev_set_egress_priority(dev,
582 						   args.u.skb_priority,
583 						   args.vlan_qos);
584 		break;
585 
586 	case SET_VLAN_FLAG_CMD:
587 		err = -EPERM;
588 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
589 			break;
590 		err = vlan_dev_change_flags(dev,
591 					    args.vlan_qos ? args.u.flag : 0,
592 					    args.u.flag);
593 		break;
594 
595 	case SET_VLAN_NAME_TYPE_CMD:
596 		err = -EPERM;
597 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
598 			break;
599 		if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
600 			struct vlan_net *vn;
601 
602 			vn = net_generic(net, vlan_net_id);
603 			vn->name_type = args.u.name_type;
604 			err = 0;
605 		} else {
606 			err = -EINVAL;
607 		}
608 		break;
609 
610 	case ADD_VLAN_CMD:
611 		err = -EPERM;
612 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
613 			break;
614 		err = register_vlan_device(dev, args.u.VID);
615 		break;
616 
617 	case DEL_VLAN_CMD:
618 		err = -EPERM;
619 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
620 			break;
621 		unregister_vlan_dev(dev, NULL);
622 		err = 0;
623 		break;
624 
625 	case GET_VLAN_REALDEV_NAME_CMD:
626 		err = 0;
627 		vlan_dev_get_realdev_name(dev, args.u.device2);
628 		if (copy_to_user(arg, &args,
629 				 sizeof(struct vlan_ioctl_args)))
630 			err = -EFAULT;
631 		break;
632 
633 	case GET_VLAN_VID_CMD:
634 		err = 0;
635 		args.u.VID = vlan_dev_vlan_id(dev);
636 		if (copy_to_user(arg, &args,
637 				 sizeof(struct vlan_ioctl_args)))
638 		      err = -EFAULT;
639 		break;
640 
641 	default:
642 		err = -EOPNOTSUPP;
643 		break;
644 	}
645 out:
646 	rtnl_unlock();
647 	return err;
648 }
649 
650 static struct sk_buff *vlan_gro_receive(struct list_head *head,
651 					struct sk_buff *skb)
652 {
653 	const struct packet_offload *ptype;
654 	unsigned int hlen, off_vlan;
655 	struct sk_buff *pp = NULL;
656 	struct vlan_hdr *vhdr;
657 	struct sk_buff *p;
658 	__be16 type;
659 	int flush = 1;
660 
661 	off_vlan = skb_gro_offset(skb);
662 	hlen = off_vlan + sizeof(*vhdr);
663 	vhdr = skb_gro_header_fast(skb, off_vlan);
664 	if (skb_gro_header_hard(skb, hlen)) {
665 		vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
666 		if (unlikely(!vhdr))
667 			goto out;
668 	}
669 
670 	type = vhdr->h_vlan_encapsulated_proto;
671 
672 	rcu_read_lock();
673 	ptype = gro_find_receive_by_type(type);
674 	if (!ptype)
675 		goto out_unlock;
676 
677 	flush = 0;
678 
679 	list_for_each_entry(p, head, list) {
680 		struct vlan_hdr *vhdr2;
681 
682 		if (!NAPI_GRO_CB(p)->same_flow)
683 			continue;
684 
685 		vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
686 		if (compare_vlan_header(vhdr, vhdr2))
687 			NAPI_GRO_CB(p)->same_flow = 0;
688 	}
689 
690 	skb_gro_pull(skb, sizeof(*vhdr));
691 	skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
692 	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
693 
694 out_unlock:
695 	rcu_read_unlock();
696 out:
697 	skb_gro_flush_final(skb, pp, flush);
698 
699 	return pp;
700 }
701 
702 static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
703 {
704 	struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
705 	__be16 type = vhdr->h_vlan_encapsulated_proto;
706 	struct packet_offload *ptype;
707 	int err = -ENOENT;
708 
709 	rcu_read_lock();
710 	ptype = gro_find_complete_by_type(type);
711 	if (ptype)
712 		err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
713 
714 	rcu_read_unlock();
715 	return err;
716 }
717 
718 static struct packet_offload vlan_packet_offloads[] __read_mostly = {
719 	{
720 		.type = cpu_to_be16(ETH_P_8021Q),
721 		.priority = 10,
722 		.callbacks = {
723 			.gro_receive = vlan_gro_receive,
724 			.gro_complete = vlan_gro_complete,
725 		},
726 	},
727 	{
728 		.type = cpu_to_be16(ETH_P_8021AD),
729 		.priority = 10,
730 		.callbacks = {
731 			.gro_receive = vlan_gro_receive,
732 			.gro_complete = vlan_gro_complete,
733 		},
734 	},
735 };
736 
737 static int __net_init vlan_init_net(struct net *net)
738 {
739 	struct vlan_net *vn = net_generic(net, vlan_net_id);
740 	int err;
741 
742 	vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
743 
744 	err = vlan_proc_init(net);
745 
746 	return err;
747 }
748 
749 static void __net_exit vlan_exit_net(struct net *net)
750 {
751 	vlan_proc_cleanup(net);
752 }
753 
754 static struct pernet_operations vlan_net_ops = {
755 	.init = vlan_init_net,
756 	.exit = vlan_exit_net,
757 	.id   = &vlan_net_id,
758 	.size = sizeof(struct vlan_net),
759 };
760 
761 static int __init vlan_proto_init(void)
762 {
763 	int err;
764 	unsigned int i;
765 
766 	pr_info("%s v%s\n", vlan_fullname, vlan_version);
767 
768 	err = register_pernet_subsys(&vlan_net_ops);
769 	if (err < 0)
770 		goto err0;
771 
772 	err = register_netdevice_notifier(&vlan_notifier_block);
773 	if (err < 0)
774 		goto err2;
775 
776 	err = vlan_gvrp_init();
777 	if (err < 0)
778 		goto err3;
779 
780 	err = vlan_mvrp_init();
781 	if (err < 0)
782 		goto err4;
783 
784 	err = vlan_netlink_init();
785 	if (err < 0)
786 		goto err5;
787 
788 	for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
789 		dev_add_offload(&vlan_packet_offloads[i]);
790 
791 	vlan_ioctl_set(vlan_ioctl_handler);
792 	return 0;
793 
794 err5:
795 	vlan_mvrp_uninit();
796 err4:
797 	vlan_gvrp_uninit();
798 err3:
799 	unregister_netdevice_notifier(&vlan_notifier_block);
800 err2:
801 	unregister_pernet_subsys(&vlan_net_ops);
802 err0:
803 	return err;
804 }
805 
806 static void __exit vlan_cleanup_module(void)
807 {
808 	unsigned int i;
809 
810 	vlan_ioctl_set(NULL);
811 
812 	for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
813 		dev_remove_offload(&vlan_packet_offloads[i]);
814 
815 	vlan_netlink_fini();
816 
817 	unregister_netdevice_notifier(&vlan_notifier_block);
818 
819 	unregister_pernet_subsys(&vlan_net_ops);
820 	rcu_barrier(); /* Wait for completion of call_rcu()'s */
821 
822 	vlan_mvrp_uninit();
823 	vlan_gvrp_uninit();
824 }
825 
826 module_init(vlan_proto_init);
827 module_exit(vlan_cleanup_module);
828 
829 MODULE_LICENSE("GPL");
830 MODULE_VERSION(DRV_VERSION);
831