xref: /linux/net/8021q/vlan_core.c (revision f9c41a62bba3f3f7ef3541b2a025e3371bcbba97)
1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h>
5 #include <linux/export.h>
6 #include "vlan.h"
7 
8 bool vlan_do_receive(struct sk_buff **skbp)
9 {
10 	struct sk_buff *skb = *skbp;
11 	u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
12 	struct net_device *vlan_dev;
13 	struct vlan_pcpu_stats *rx_stats;
14 
15 	vlan_dev = vlan_find_dev(skb->dev, vlan_id);
16 	if (!vlan_dev)
17 		return false;
18 
19 	skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
20 	if (unlikely(!skb))
21 		return false;
22 
23 	skb->dev = vlan_dev;
24 	if (skb->pkt_type == PACKET_OTHERHOST) {
25 		/* Our lower layer thinks this is not local, let's make sure.
26 		 * This allows the VLAN to have a different MAC than the
27 		 * underlying device, and still route correctly. */
28 		if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
29 			skb->pkt_type = PACKET_HOST;
30 	}
31 
32 	if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
33 		unsigned int offset = skb->data - skb_mac_header(skb);
34 
35 		/*
36 		 * vlan_insert_tag expect skb->data pointing to mac header.
37 		 * So change skb->data before calling it and change back to
38 		 * original position later
39 		 */
40 		skb_push(skb, offset);
41 		skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
42 		if (!skb)
43 			return false;
44 		skb_pull(skb, offset + VLAN_HLEN);
45 		skb_reset_mac_len(skb);
46 	}
47 
48 	skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
49 	skb->vlan_tci = 0;
50 
51 	rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
52 
53 	u64_stats_update_begin(&rx_stats->syncp);
54 	rx_stats->rx_packets++;
55 	rx_stats->rx_bytes += skb->len;
56 	if (skb->pkt_type == PACKET_MULTICAST)
57 		rx_stats->rx_multicast++;
58 	u64_stats_update_end(&rx_stats->syncp);
59 
60 	return true;
61 }
62 
63 /* Must be invoked with rcu_read_lock. */
64 struct net_device *__vlan_find_dev_deep(struct net_device *dev,
65 					u16 vlan_id)
66 {
67 	struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
68 
69 	if (vlan_info) {
70 		return vlan_group_get_device(&vlan_info->grp, vlan_id);
71 	} else {
72 		/*
73 		 * Lower devices of master uppers (bonding, team) do not have
74 		 * grp assigned to themselves. Grp is assigned to upper device
75 		 * instead.
76 		 */
77 		struct net_device *upper_dev;
78 
79 		upper_dev = netdev_master_upper_dev_get_rcu(dev);
80 		if (upper_dev)
81 			return __vlan_find_dev_deep(upper_dev, vlan_id);
82 	}
83 
84 	return NULL;
85 }
86 EXPORT_SYMBOL(__vlan_find_dev_deep);
87 
88 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
89 {
90 	return vlan_dev_priv(dev)->real_dev;
91 }
92 EXPORT_SYMBOL(vlan_dev_real_dev);
93 
94 u16 vlan_dev_vlan_id(const struct net_device *dev)
95 {
96 	return vlan_dev_priv(dev)->vlan_id;
97 }
98 EXPORT_SYMBOL(vlan_dev_vlan_id);
99 
100 static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
101 {
102 	if (skb_cow(skb, skb_headroom(skb)) < 0)
103 		return NULL;
104 	memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
105 	skb->mac_header += VLAN_HLEN;
106 	return skb;
107 }
108 
109 struct sk_buff *vlan_untag(struct sk_buff *skb)
110 {
111 	struct vlan_hdr *vhdr;
112 	u16 vlan_tci;
113 
114 	if (unlikely(vlan_tx_tag_present(skb))) {
115 		/* vlan_tci is already set-up so leave this for another time */
116 		return skb;
117 	}
118 
119 	skb = skb_share_check(skb, GFP_ATOMIC);
120 	if (unlikely(!skb))
121 		goto err_free;
122 
123 	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
124 		goto err_free;
125 
126 	vhdr = (struct vlan_hdr *) skb->data;
127 	vlan_tci = ntohs(vhdr->h_vlan_TCI);
128 	__vlan_hwaccel_put_tag(skb, vlan_tci);
129 
130 	skb_pull_rcsum(skb, VLAN_HLEN);
131 	vlan_set_encap_proto(skb, vhdr);
132 
133 	skb = vlan_reorder_header(skb);
134 	if (unlikely(!skb))
135 		goto err_free;
136 
137 	skb_reset_network_header(skb);
138 	skb_reset_transport_header(skb);
139 	skb_reset_mac_len(skb);
140 
141 	return skb;
142 
143 err_free:
144 	kfree_skb(skb);
145 	return NULL;
146 }
147 EXPORT_SYMBOL(vlan_untag);
148 
149 
150 /*
151  * vlan info and vid list
152  */
153 
154 static void vlan_group_free(struct vlan_group *grp)
155 {
156 	int i;
157 
158 	for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
159 		kfree(grp->vlan_devices_arrays[i]);
160 }
161 
162 static void vlan_info_free(struct vlan_info *vlan_info)
163 {
164 	vlan_group_free(&vlan_info->grp);
165 	kfree(vlan_info);
166 }
167 
168 static void vlan_info_rcu_free(struct rcu_head *rcu)
169 {
170 	vlan_info_free(container_of(rcu, struct vlan_info, rcu));
171 }
172 
173 static struct vlan_info *vlan_info_alloc(struct net_device *dev)
174 {
175 	struct vlan_info *vlan_info;
176 
177 	vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
178 	if (!vlan_info)
179 		return NULL;
180 
181 	vlan_info->real_dev = dev;
182 	INIT_LIST_HEAD(&vlan_info->vid_list);
183 	return vlan_info;
184 }
185 
186 struct vlan_vid_info {
187 	struct list_head list;
188 	unsigned short vid;
189 	int refcount;
190 };
191 
192 static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
193 					       unsigned short vid)
194 {
195 	struct vlan_vid_info *vid_info;
196 
197 	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
198 		if (vid_info->vid == vid)
199 			return vid_info;
200 	}
201 	return NULL;
202 }
203 
204 static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid)
205 {
206 	struct vlan_vid_info *vid_info;
207 
208 	vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
209 	if (!vid_info)
210 		return NULL;
211 	vid_info->vid = vid;
212 
213 	return vid_info;
214 }
215 
216 static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
217 			  struct vlan_vid_info **pvid_info)
218 {
219 	struct net_device *dev = vlan_info->real_dev;
220 	const struct net_device_ops *ops = dev->netdev_ops;
221 	struct vlan_vid_info *vid_info;
222 	int err;
223 
224 	vid_info = vlan_vid_info_alloc(vid);
225 	if (!vid_info)
226 		return -ENOMEM;
227 
228 	if (dev->features & NETIF_F_HW_VLAN_FILTER) {
229 		err =  ops->ndo_vlan_rx_add_vid(dev, vid);
230 		if (err) {
231 			kfree(vid_info);
232 			return err;
233 		}
234 	}
235 	list_add(&vid_info->list, &vlan_info->vid_list);
236 	vlan_info->nr_vids++;
237 	*pvid_info = vid_info;
238 	return 0;
239 }
240 
241 int vlan_vid_add(struct net_device *dev, unsigned short vid)
242 {
243 	struct vlan_info *vlan_info;
244 	struct vlan_vid_info *vid_info;
245 	bool vlan_info_created = false;
246 	int err;
247 
248 	ASSERT_RTNL();
249 
250 	vlan_info = rtnl_dereference(dev->vlan_info);
251 	if (!vlan_info) {
252 		vlan_info = vlan_info_alloc(dev);
253 		if (!vlan_info)
254 			return -ENOMEM;
255 		vlan_info_created = true;
256 	}
257 	vid_info = vlan_vid_info_get(vlan_info, vid);
258 	if (!vid_info) {
259 		err = __vlan_vid_add(vlan_info, vid, &vid_info);
260 		if (err)
261 			goto out_free_vlan_info;
262 	}
263 	vid_info->refcount++;
264 
265 	if (vlan_info_created)
266 		rcu_assign_pointer(dev->vlan_info, vlan_info);
267 
268 	return 0;
269 
270 out_free_vlan_info:
271 	if (vlan_info_created)
272 		kfree(vlan_info);
273 	return err;
274 }
275 EXPORT_SYMBOL(vlan_vid_add);
276 
277 static void __vlan_vid_del(struct vlan_info *vlan_info,
278 			   struct vlan_vid_info *vid_info)
279 {
280 	struct net_device *dev = vlan_info->real_dev;
281 	const struct net_device_ops *ops = dev->netdev_ops;
282 	unsigned short vid = vid_info->vid;
283 	int err;
284 
285 	if (dev->features & NETIF_F_HW_VLAN_FILTER) {
286 		err = ops->ndo_vlan_rx_kill_vid(dev, vid);
287 		if (err) {
288 			pr_warn("failed to kill vid %d for device %s\n",
289 				vid, dev->name);
290 		}
291 	}
292 	list_del(&vid_info->list);
293 	kfree(vid_info);
294 	vlan_info->nr_vids--;
295 }
296 
297 void vlan_vid_del(struct net_device *dev, unsigned short vid)
298 {
299 	struct vlan_info *vlan_info;
300 	struct vlan_vid_info *vid_info;
301 
302 	ASSERT_RTNL();
303 
304 	vlan_info = rtnl_dereference(dev->vlan_info);
305 	if (!vlan_info)
306 		return;
307 
308 	vid_info = vlan_vid_info_get(vlan_info, vid);
309 	if (!vid_info)
310 		return;
311 	vid_info->refcount--;
312 	if (vid_info->refcount == 0) {
313 		__vlan_vid_del(vlan_info, vid_info);
314 		if (vlan_info->nr_vids == 0) {
315 			RCU_INIT_POINTER(dev->vlan_info, NULL);
316 			call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
317 		}
318 	}
319 }
320 EXPORT_SYMBOL(vlan_vid_del);
321 
322 int vlan_vids_add_by_dev(struct net_device *dev,
323 			 const struct net_device *by_dev)
324 {
325 	struct vlan_vid_info *vid_info;
326 	struct vlan_info *vlan_info;
327 	int err;
328 
329 	ASSERT_RTNL();
330 
331 	vlan_info = rtnl_dereference(by_dev->vlan_info);
332 	if (!vlan_info)
333 		return 0;
334 
335 	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
336 		err = vlan_vid_add(dev, vid_info->vid);
337 		if (err)
338 			goto unwind;
339 	}
340 	return 0;
341 
342 unwind:
343 	list_for_each_entry_continue_reverse(vid_info,
344 					     &vlan_info->vid_list,
345 					     list) {
346 		vlan_vid_del(dev, vid_info->vid);
347 	}
348 
349 	return err;
350 }
351 EXPORT_SYMBOL(vlan_vids_add_by_dev);
352 
353 void vlan_vids_del_by_dev(struct net_device *dev,
354 			  const struct net_device *by_dev)
355 {
356 	struct vlan_vid_info *vid_info;
357 	struct vlan_info *vlan_info;
358 
359 	ASSERT_RTNL();
360 
361 	vlan_info = rtnl_dereference(by_dev->vlan_info);
362 	if (!vlan_info)
363 		return;
364 
365 	list_for_each_entry(vid_info, &vlan_info->vid_list, list)
366 		vlan_vid_del(dev, vid_info->vid);
367 }
368 EXPORT_SYMBOL(vlan_vids_del_by_dev);
369 
370 bool vlan_uses_dev(const struct net_device *dev)
371 {
372 	struct vlan_info *vlan_info;
373 
374 	ASSERT_RTNL();
375 
376 	vlan_info = rtnl_dereference(dev->vlan_info);
377 	if (!vlan_info)
378 		return false;
379 	return vlan_info->grp.nr_vlan_devs ? true : false;
380 }
381 EXPORT_SYMBOL(vlan_uses_dev);
382