xref: /linux/net/8021q/vlan_core.c (revision c98be0c96db00e9b6b02d31e0fa7590c54cdaaac)
1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h>
5 #include <linux/export.h>
6 #include "vlan.h"
7 
8 bool vlan_do_receive(struct sk_buff **skbp)
9 {
10 	struct sk_buff *skb = *skbp;
11 	__be16 vlan_proto = skb->vlan_proto;
12 	u16 vlan_id = vlan_tx_tag_get_id(skb);
13 	struct net_device *vlan_dev;
14 	struct vlan_pcpu_stats *rx_stats;
15 
16 	vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
17 	if (!vlan_dev)
18 		return false;
19 
20 	skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
21 	if (unlikely(!skb))
22 		return false;
23 
24 	skb->dev = vlan_dev;
25 	if (skb->pkt_type == PACKET_OTHERHOST) {
26 		/* Our lower layer thinks this is not local, let's make sure.
27 		 * This allows the VLAN to have a different MAC than the
28 		 * underlying device, and still route correctly. */
29 		if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
30 			skb->pkt_type = PACKET_HOST;
31 	}
32 
33 	if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
34 		unsigned int offset = skb->data - skb_mac_header(skb);
35 
36 		/*
37 		 * vlan_insert_tag expect skb->data pointing to mac header.
38 		 * So change skb->data before calling it and change back to
39 		 * original position later
40 		 */
41 		skb_push(skb, offset);
42 		skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
43 					      skb->vlan_tci);
44 		if (!skb)
45 			return false;
46 		skb_pull(skb, offset + VLAN_HLEN);
47 		skb_reset_mac_len(skb);
48 	}
49 
50 	skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
51 	skb->vlan_tci = 0;
52 
53 	rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
54 
55 	u64_stats_update_begin(&rx_stats->syncp);
56 	rx_stats->rx_packets++;
57 	rx_stats->rx_bytes += skb->len;
58 	if (skb->pkt_type == PACKET_MULTICAST)
59 		rx_stats->rx_multicast++;
60 	u64_stats_update_end(&rx_stats->syncp);
61 
62 	return true;
63 }
64 
65 /* Must be invoked with rcu_read_lock. */
66 struct net_device *__vlan_find_dev_deep(struct net_device *dev,
67 					__be16 vlan_proto, u16 vlan_id)
68 {
69 	struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
70 
71 	if (vlan_info) {
72 		return vlan_group_get_device(&vlan_info->grp,
73 					     vlan_proto, vlan_id);
74 	} else {
75 		/*
76 		 * Lower devices of master uppers (bonding, team) do not have
77 		 * grp assigned to themselves. Grp is assigned to upper device
78 		 * instead.
79 		 */
80 		struct net_device *upper_dev;
81 
82 		upper_dev = netdev_master_upper_dev_get_rcu(dev);
83 		if (upper_dev)
84 			return __vlan_find_dev_deep(upper_dev,
85 						    vlan_proto, vlan_id);
86 	}
87 
88 	return NULL;
89 }
90 EXPORT_SYMBOL(__vlan_find_dev_deep);
91 
92 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
93 {
94 	struct net_device *ret = vlan_dev_priv(dev)->real_dev;
95 
96 	while (is_vlan_dev(ret))
97 		ret = vlan_dev_priv(ret)->real_dev;
98 
99 	return ret;
100 }
101 EXPORT_SYMBOL(vlan_dev_real_dev);
102 
103 u16 vlan_dev_vlan_id(const struct net_device *dev)
104 {
105 	return vlan_dev_priv(dev)->vlan_id;
106 }
107 EXPORT_SYMBOL(vlan_dev_vlan_id);
108 
109 static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
110 {
111 	if (skb_cow(skb, skb_headroom(skb)) < 0)
112 		return NULL;
113 	memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
114 	skb->mac_header += VLAN_HLEN;
115 	return skb;
116 }
117 
118 struct sk_buff *vlan_untag(struct sk_buff *skb)
119 {
120 	struct vlan_hdr *vhdr;
121 	u16 vlan_tci;
122 
123 	if (unlikely(vlan_tx_tag_present(skb))) {
124 		/* vlan_tci is already set-up so leave this for another time */
125 		return skb;
126 	}
127 
128 	skb = skb_share_check(skb, GFP_ATOMIC);
129 	if (unlikely(!skb))
130 		goto err_free;
131 
132 	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
133 		goto err_free;
134 
135 	vhdr = (struct vlan_hdr *) skb->data;
136 	vlan_tci = ntohs(vhdr->h_vlan_TCI);
137 	__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
138 
139 	skb_pull_rcsum(skb, VLAN_HLEN);
140 	vlan_set_encap_proto(skb, vhdr);
141 
142 	skb = vlan_reorder_header(skb);
143 	if (unlikely(!skb))
144 		goto err_free;
145 
146 	skb_reset_network_header(skb);
147 	skb_reset_transport_header(skb);
148 	skb_reset_mac_len(skb);
149 
150 	return skb;
151 
152 err_free:
153 	kfree_skb(skb);
154 	return NULL;
155 }
156 EXPORT_SYMBOL(vlan_untag);
157 
158 
159 /*
160  * vlan info and vid list
161  */
162 
163 static void vlan_group_free(struct vlan_group *grp)
164 {
165 	int i, j;
166 
167 	for (i = 0; i < VLAN_PROTO_NUM; i++)
168 		for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
169 			kfree(grp->vlan_devices_arrays[i][j]);
170 }
171 
172 static void vlan_info_free(struct vlan_info *vlan_info)
173 {
174 	vlan_group_free(&vlan_info->grp);
175 	kfree(vlan_info);
176 }
177 
178 static void vlan_info_rcu_free(struct rcu_head *rcu)
179 {
180 	vlan_info_free(container_of(rcu, struct vlan_info, rcu));
181 }
182 
183 static struct vlan_info *vlan_info_alloc(struct net_device *dev)
184 {
185 	struct vlan_info *vlan_info;
186 
187 	vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
188 	if (!vlan_info)
189 		return NULL;
190 
191 	vlan_info->real_dev = dev;
192 	INIT_LIST_HEAD(&vlan_info->vid_list);
193 	return vlan_info;
194 }
195 
196 struct vlan_vid_info {
197 	struct list_head list;
198 	__be16 proto;
199 	u16 vid;
200 	int refcount;
201 };
202 
203 static bool vlan_hw_filter_capable(const struct net_device *dev,
204 				     const struct vlan_vid_info *vid_info)
205 {
206 	if (vid_info->proto == htons(ETH_P_8021Q) &&
207 	    dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
208 		return true;
209 	if (vid_info->proto == htons(ETH_P_8021AD) &&
210 	    dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
211 		return true;
212 	return false;
213 }
214 
215 static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
216 					       __be16 proto, u16 vid)
217 {
218 	struct vlan_vid_info *vid_info;
219 
220 	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
221 		if (vid_info->proto == proto && vid_info->vid == vid)
222 			return vid_info;
223 	}
224 	return NULL;
225 }
226 
227 static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
228 {
229 	struct vlan_vid_info *vid_info;
230 
231 	vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
232 	if (!vid_info)
233 		return NULL;
234 	vid_info->proto = proto;
235 	vid_info->vid = vid;
236 
237 	return vid_info;
238 }
239 
240 static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
241 			  struct vlan_vid_info **pvid_info)
242 {
243 	struct net_device *dev = vlan_info->real_dev;
244 	const struct net_device_ops *ops = dev->netdev_ops;
245 	struct vlan_vid_info *vid_info;
246 	int err;
247 
248 	vid_info = vlan_vid_info_alloc(proto, vid);
249 	if (!vid_info)
250 		return -ENOMEM;
251 
252 	if (vlan_hw_filter_capable(dev, vid_info)) {
253 		err =  ops->ndo_vlan_rx_add_vid(dev, proto, vid);
254 		if (err) {
255 			kfree(vid_info);
256 			return err;
257 		}
258 	}
259 	list_add(&vid_info->list, &vlan_info->vid_list);
260 	vlan_info->nr_vids++;
261 	*pvid_info = vid_info;
262 	return 0;
263 }
264 
265 int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
266 {
267 	struct vlan_info *vlan_info;
268 	struct vlan_vid_info *vid_info;
269 	bool vlan_info_created = false;
270 	int err;
271 
272 	ASSERT_RTNL();
273 
274 	vlan_info = rtnl_dereference(dev->vlan_info);
275 	if (!vlan_info) {
276 		vlan_info = vlan_info_alloc(dev);
277 		if (!vlan_info)
278 			return -ENOMEM;
279 		vlan_info_created = true;
280 	}
281 	vid_info = vlan_vid_info_get(vlan_info, proto, vid);
282 	if (!vid_info) {
283 		err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
284 		if (err)
285 			goto out_free_vlan_info;
286 	}
287 	vid_info->refcount++;
288 
289 	if (vlan_info_created)
290 		rcu_assign_pointer(dev->vlan_info, vlan_info);
291 
292 	return 0;
293 
294 out_free_vlan_info:
295 	if (vlan_info_created)
296 		kfree(vlan_info);
297 	return err;
298 }
299 EXPORT_SYMBOL(vlan_vid_add);
300 
301 static void __vlan_vid_del(struct vlan_info *vlan_info,
302 			   struct vlan_vid_info *vid_info)
303 {
304 	struct net_device *dev = vlan_info->real_dev;
305 	const struct net_device_ops *ops = dev->netdev_ops;
306 	__be16 proto = vid_info->proto;
307 	u16 vid = vid_info->vid;
308 	int err;
309 
310 	if (vlan_hw_filter_capable(dev, vid_info)) {
311 		err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
312 		if (err) {
313 			pr_warn("failed to kill vid %04x/%d for device %s\n",
314 				proto, vid, dev->name);
315 		}
316 	}
317 	list_del(&vid_info->list);
318 	kfree(vid_info);
319 	vlan_info->nr_vids--;
320 }
321 
322 void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
323 {
324 	struct vlan_info *vlan_info;
325 	struct vlan_vid_info *vid_info;
326 
327 	ASSERT_RTNL();
328 
329 	vlan_info = rtnl_dereference(dev->vlan_info);
330 	if (!vlan_info)
331 		return;
332 
333 	vid_info = vlan_vid_info_get(vlan_info, proto, vid);
334 	if (!vid_info)
335 		return;
336 	vid_info->refcount--;
337 	if (vid_info->refcount == 0) {
338 		__vlan_vid_del(vlan_info, vid_info);
339 		if (vlan_info->nr_vids == 0) {
340 			RCU_INIT_POINTER(dev->vlan_info, NULL);
341 			call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
342 		}
343 	}
344 }
345 EXPORT_SYMBOL(vlan_vid_del);
346 
347 int vlan_vids_add_by_dev(struct net_device *dev,
348 			 const struct net_device *by_dev)
349 {
350 	struct vlan_vid_info *vid_info;
351 	struct vlan_info *vlan_info;
352 	int err;
353 
354 	ASSERT_RTNL();
355 
356 	vlan_info = rtnl_dereference(by_dev->vlan_info);
357 	if (!vlan_info)
358 		return 0;
359 
360 	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
361 		err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
362 		if (err)
363 			goto unwind;
364 	}
365 	return 0;
366 
367 unwind:
368 	list_for_each_entry_continue_reverse(vid_info,
369 					     &vlan_info->vid_list,
370 					     list) {
371 		vlan_vid_del(dev, vid_info->proto, vid_info->vid);
372 	}
373 
374 	return err;
375 }
376 EXPORT_SYMBOL(vlan_vids_add_by_dev);
377 
378 void vlan_vids_del_by_dev(struct net_device *dev,
379 			  const struct net_device *by_dev)
380 {
381 	struct vlan_vid_info *vid_info;
382 	struct vlan_info *vlan_info;
383 
384 	ASSERT_RTNL();
385 
386 	vlan_info = rtnl_dereference(by_dev->vlan_info);
387 	if (!vlan_info)
388 		return;
389 
390 	list_for_each_entry(vid_info, &vlan_info->vid_list, list)
391 		vlan_vid_del(dev, vid_info->proto, vid_info->vid);
392 }
393 EXPORT_SYMBOL(vlan_vids_del_by_dev);
394 
395 bool vlan_uses_dev(const struct net_device *dev)
396 {
397 	struct vlan_info *vlan_info;
398 
399 	ASSERT_RTNL();
400 
401 	vlan_info = rtnl_dereference(dev->vlan_info);
402 	if (!vlan_info)
403 		return false;
404 	return vlan_info->grp.nr_vlan_devs ? true : false;
405 }
406 EXPORT_SYMBOL(vlan_uses_dev);
407