xref: /linux/net/dsa/tag.h (revision aa0743a229366e8c1963f1b72a1c974a9d15f08f)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #ifndef __DSA_TAG_H
4 #define __DSA_TAG_H
5 
6 #include <linux/if_vlan.h>
7 #include <linux/list.h>
8 #include <linux/types.h>
9 #include <net/dsa.h>
10 
11 #include "port.h"
12 #include "user.h"
13 
14 struct dsa_tag_driver {
15 	const struct dsa_device_ops *ops;
16 	struct list_head list;
17 	struct module *owner;
18 };
19 
20 extern struct packet_type dsa_pack_type;
21 
22 const struct dsa_device_ops *dsa_tag_driver_get_by_id(int tag_protocol);
23 const struct dsa_device_ops *dsa_tag_driver_get_by_name(const char *name);
24 void dsa_tag_driver_put(const struct dsa_device_ops *ops);
25 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
26 
dsa_tag_protocol_overhead(const struct dsa_device_ops * ops)27 static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
28 {
29 	return ops->needed_headroom + ops->needed_tailroom;
30 }
31 
dsa_conduit_find_user(struct net_device * dev,int device,int port)32 static inline struct net_device *dsa_conduit_find_user(struct net_device *dev,
33 						       int device, int port)
34 {
35 	struct dsa_port *cpu_dp = dev->dsa_ptr;
36 	struct dsa_switch_tree *dst = cpu_dp->dst;
37 	struct dsa_port *dp;
38 
39 	list_for_each_entry(dp, &dst->ports, list)
40 		if (dp->ds->index == device && dp->index == port &&
41 		    dp->type == DSA_PORT_TYPE_USER)
42 			return dp->user;
43 
44 	return NULL;
45 }
46 
47 /**
48  * dsa_software_untag_vlan_aware_bridge: Software untagging for VLAN-aware bridge
49  * @skb: Pointer to received socket buffer (packet)
50  * @br: Pointer to bridge upper interface of ingress port
51  * @vid: Parsed VID from packet
52  *
53  * The bridge can process tagged packets. Software like STP/PTP may not. The
54  * bridge can also process untagged packets, to the same effect as if they were
55  * tagged with the PVID of the ingress port. So packets tagged with the PVID of
56  * the bridge port must be software-untagged, to support both use cases.
57  */
dsa_software_untag_vlan_aware_bridge(struct sk_buff * skb,struct net_device * br,u16 vid)58 static inline void dsa_software_untag_vlan_aware_bridge(struct sk_buff *skb,
59 							struct net_device *br,
60 							u16 vid)
61 {
62 	u16 pvid, proto;
63 	int err;
64 
65 	err = br_vlan_get_proto(br, &proto);
66 	if (err)
67 		return;
68 
69 	err = br_vlan_get_pvid_rcu(skb->dev, &pvid);
70 	if (err)
71 		return;
72 
73 	if (vid == pvid && skb->vlan_proto == htons(proto))
74 		__vlan_hwaccel_clear_tag(skb);
75 }
76 
77 /**
78  * dsa_software_untag_vlan_unaware_bridge: Software untagging for VLAN-unaware bridge
79  * @skb: Pointer to received socket buffer (packet)
80  * @br: Pointer to bridge upper interface of ingress port
81  * @vid: Parsed VID from packet
82  *
83  * The bridge ignores all VLAN tags. Software like STP/PTP may not (it may run
84  * on the plain port, or on a VLAN upper interface). Maybe packets are coming
85  * to software as tagged with a driver-defined VID which is NOT equal to the
86  * PVID of the bridge port (since the bridge is VLAN-unaware, its configuration
87  * should NOT be committed to hardware). DSA needs a method for this private
88  * VID to be communicated by software to it, and if packets are tagged with it,
89  * software-untag them. Note: the private VID may be different per bridge, to
90  * support the FDB isolation use case.
91  *
92  * FIXME: this is currently implemented based on the broken assumption that
93  * the "private VID" used by the driver in VLAN-unaware mode is equal to the
94  * bridge PVID. It should not be, except for a coincidence; the bridge PVID is
95  * irrelevant to the data path in the VLAN-unaware mode. Thus, the VID that
96  * this function removes is wrong.
97  *
98  * All users of ds->untag_bridge_pvid should fix their drivers, if necessary,
99  * to make the two independent. Only then, if there still remains a need to
100  * strip the private VID from packets, then a new ds->ops->get_private_vid()
101  * API shall be introduced to communicate to DSA what this VID is, which needs
102  * to be stripped here.
103  */
dsa_software_untag_vlan_unaware_bridge(struct sk_buff * skb,struct net_device * br,u16 vid)104 static inline void dsa_software_untag_vlan_unaware_bridge(struct sk_buff *skb,
105 							  struct net_device *br,
106 							  u16 vid)
107 {
108 	struct net_device *upper_dev;
109 	u16 pvid, proto;
110 	int err;
111 
112 	err = br_vlan_get_proto(br, &proto);
113 	if (err)
114 		return;
115 
116 	err = br_vlan_get_pvid_rcu(skb->dev, &pvid);
117 	if (err)
118 		return;
119 
120 	if (vid != pvid || skb->vlan_proto != htons(proto))
121 		return;
122 
123 	/* The sad part about attempting to untag from DSA is that we
124 	 * don't know, unless we check, if the skb will end up in
125 	 * the bridge's data path - br_allowed_ingress() - or not.
126 	 * For example, there might be an 8021q upper for the
127 	 * default_pvid of the bridge, which will steal VLAN-tagged traffic
128 	 * from the bridge's data path. This is a configuration that DSA
129 	 * supports because vlan_filtering is 0. In that case, we should
130 	 * definitely keep the tag, to make sure it keeps working.
131 	 */
132 	upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
133 	if (!upper_dev)
134 		__vlan_hwaccel_clear_tag(skb);
135 }
136 
137 /**
138  * dsa_software_vlan_untag: Software VLAN untagging in DSA receive path
139  * @skb: Pointer to socket buffer (packet)
140  *
141  * Receive path method for switches which cannot avoid tagging all packets
142  * towards the CPU port. Called when ds->untag_bridge_pvid (legacy) or
143  * ds->untag_vlan_aware_bridge_pvid is set to true.
144  *
145  * As a side effect of this method, any VLAN tag from the skb head is moved
146  * to hwaccel.
147  */
dsa_software_vlan_untag(struct sk_buff * skb)148 static inline struct sk_buff *dsa_software_vlan_untag(struct sk_buff *skb)
149 {
150 	struct dsa_port *dp = dsa_user_to_port(skb->dev);
151 	struct net_device *br = dsa_port_bridge_dev_get(dp);
152 	u16 vid;
153 
154 	/* software untagging for standalone ports not yet necessary */
155 	if (!br)
156 		return skb;
157 
158 	/* Move VLAN tag from data to hwaccel */
159 	if (!skb_vlan_tag_present(skb)) {
160 		skb = skb_vlan_untag(skb);
161 		if (!skb)
162 			return NULL;
163 	}
164 
165 	if (!skb_vlan_tag_present(skb))
166 		return skb;
167 
168 	vid = skb_vlan_tag_get_id(skb);
169 
170 	if (br_vlan_enabled(br)) {
171 		if (dp->ds->untag_vlan_aware_bridge_pvid)
172 			dsa_software_untag_vlan_aware_bridge(skb, br, vid);
173 	} else {
174 		if (dp->ds->untag_bridge_pvid)
175 			dsa_software_untag_vlan_unaware_bridge(skb, br, vid);
176 	}
177 
178 	return skb;
179 }
180 
181 /* For switches without hardware support for DSA tagging to be able
182  * to support termination through the bridge.
183  */
184 static inline struct net_device *
dsa_find_designated_bridge_port_by_vid(struct net_device * conduit,u16 vid)185 dsa_find_designated_bridge_port_by_vid(struct net_device *conduit, u16 vid)
186 {
187 	struct dsa_port *cpu_dp = conduit->dsa_ptr;
188 	struct dsa_switch_tree *dst = cpu_dp->dst;
189 	struct bridge_vlan_info vinfo;
190 	struct net_device *user;
191 	struct dsa_port *dp;
192 	int err;
193 
194 	list_for_each_entry(dp, &dst->ports, list) {
195 		if (dp->type != DSA_PORT_TYPE_USER)
196 			continue;
197 
198 		if (!dp->bridge)
199 			continue;
200 
201 		if (dp->stp_state != BR_STATE_LEARNING &&
202 		    dp->stp_state != BR_STATE_FORWARDING)
203 			continue;
204 
205 		/* Since the bridge might learn this packet, keep the CPU port
206 		 * affinity with the port that will be used for the reply on
207 		 * xmit.
208 		 */
209 		if (dp->cpu_dp != cpu_dp)
210 			continue;
211 
212 		user = dp->user;
213 
214 		err = br_vlan_get_info_rcu(user, vid, &vinfo);
215 		if (err)
216 			continue;
217 
218 		return user;
219 	}
220 
221 	return NULL;
222 }
223 
224 /* If the ingress port offloads the bridge, we mark the frame as autonomously
225  * forwarded by hardware, so the software bridge doesn't forward in twice, back
226  * to us, because we already did. However, if we're in fallback mode and we do
227  * software bridging, we are not offloading it, therefore the dp->bridge
228  * pointer is not populated, and flooding needs to be done by software (we are
229  * effectively operating in standalone ports mode).
230  */
dsa_default_offload_fwd_mark(struct sk_buff * skb)231 static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
232 {
233 	struct dsa_port *dp = dsa_user_to_port(skb->dev);
234 
235 	skb->offload_fwd_mark = !!(dp->bridge);
236 }
237 
238 /* Helper for removing DSA header tags from packets in the RX path.
239  * Must not be called before skb_pull(len).
240  *                                                                 skb->data
241  *                                                                         |
242  *                                                                         v
243  * |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
244  * +-----------------------+-----------------------+---------------+-------+
245  * |    Destination MAC    |      Source MAC       |  DSA header   | EType |
246  * +-----------------------+-----------------------+---------------+-------+
247  *                                                 |               |
248  * <----- len ----->                               <----- len ----->
249  *                 |
250  *       >>>>>>>   v
251  *       >>>>>>>   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
252  *       >>>>>>>   +-----------------------+-----------------------+-------+
253  *       >>>>>>>   |    Destination MAC    |      Source MAC       | EType |
254  *                 +-----------------------+-----------------------+-------+
255  *                                                                         ^
256  *                                                                         |
257  *                                                                 skb->data
258  */
dsa_strip_etype_header(struct sk_buff * skb,int len)259 static inline void dsa_strip_etype_header(struct sk_buff *skb, int len)
260 {
261 	memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - len, 2 * ETH_ALEN);
262 }
263 
264 /* Helper for creating space for DSA header tags in TX path packets.
265  * Must not be called before skb_push(len).
266  *
267  * Before:
268  *
269  *       <<<<<<<   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
270  * ^     <<<<<<<   +-----------------------+-----------------------+-------+
271  * |     <<<<<<<   |    Destination MAC    |      Source MAC       | EType |
272  * |               +-----------------------+-----------------------+-------+
273  * <----- len ----->
274  * |
275  * |
276  * skb->data
277  *
278  * After:
279  *
280  * |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
281  * +-----------------------+-----------------------+---------------+-------+
282  * |    Destination MAC    |      Source MAC       |  DSA header   | EType |
283  * +-----------------------+-----------------------+---------------+-------+
284  * ^                                               |               |
285  * |                                               <----- len ----->
286  * skb->data
287  */
dsa_alloc_etype_header(struct sk_buff * skb,int len)288 static inline void dsa_alloc_etype_header(struct sk_buff *skb, int len)
289 {
290 	memmove(skb->data, skb->data + len, 2 * ETH_ALEN);
291 }
292 
293 /* On RX, eth_type_trans() on the DSA conduit pulls ETH_HLEN bytes starting from
294  * skb_mac_header(skb), which leaves skb->data pointing at the first byte after
295  * what the DSA conduit perceives as the EtherType (the beginning of the L3
296  * protocol). Since DSA EtherType header taggers treat the EtherType as part of
297  * the DSA tag itself, and the EtherType is 2 bytes in length, the DSA header
298  * is located 2 bytes behind skb->data. Note that EtherType in this context
299  * means the first 2 bytes of the DSA header, not the encapsulated EtherType
300  * that will become visible after the DSA header is stripped.
301  */
dsa_etype_header_pos_rx(struct sk_buff * skb)302 static inline void *dsa_etype_header_pos_rx(struct sk_buff *skb)
303 {
304 	return skb->data - 2;
305 }
306 
307 /* On TX, skb->data points to the MAC header, which means that EtherType
308  * header taggers start exactly where the EtherType is (the EtherType is
309  * treated as part of the DSA header).
310  */
dsa_etype_header_pos_tx(struct sk_buff * skb)311 static inline void *dsa_etype_header_pos_tx(struct sk_buff *skb)
312 {
313 	return skb->data + 2 * ETH_ALEN;
314 }
315 
316 /* Create 2 modaliases per tagging protocol, one to auto-load the module
317  * given the ID reported by get_tag_protocol(), and the other by name.
318  */
319 #define DSA_TAG_DRIVER_ALIAS "dsa_tag:"
320 #define MODULE_ALIAS_DSA_TAG_DRIVER(__proto, __name) \
321 	MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __name); \
322 	MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS "id-" \
323 		     __stringify(__proto##_VALUE))
324 
325 void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
326 			      unsigned int count,
327 			      struct module *owner);
328 void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
329 				unsigned int count);
330 
331 #define dsa_tag_driver_module_drivers(__dsa_tag_drivers_array, __count)	\
332 static int __init dsa_tag_driver_module_init(void)			\
333 {									\
334 	dsa_tag_drivers_register(__dsa_tag_drivers_array, __count,	\
335 				 THIS_MODULE);				\
336 	return 0;							\
337 }									\
338 module_init(dsa_tag_driver_module_init);				\
339 									\
340 static void __exit dsa_tag_driver_module_exit(void)			\
341 {									\
342 	dsa_tag_drivers_unregister(__dsa_tag_drivers_array, __count);	\
343 }									\
344 module_exit(dsa_tag_driver_module_exit)
345 
346 /**
347  * module_dsa_tag_drivers() - Helper macro for registering DSA tag
348  * drivers
349  * @__ops_array: Array of tag driver structures
350  *
351  * Helper macro for DSA tag drivers which do not do anything special
352  * in module init/exit. Each module may only use this macro once, and
353  * calling it replaces module_init() and module_exit().
354  */
355 #define module_dsa_tag_drivers(__ops_array)				\
356 dsa_tag_driver_module_drivers(__ops_array, ARRAY_SIZE(__ops_array))
357 
358 #define DSA_TAG_DRIVER_NAME(__ops) dsa_tag_driver ## _ ## __ops
359 
360 /* Create a static structure we can build a linked list of dsa_tag
361  * drivers
362  */
363 #define DSA_TAG_DRIVER(__ops)						\
364 static struct dsa_tag_driver DSA_TAG_DRIVER_NAME(__ops) = {		\
365 	.ops = &__ops,							\
366 }
367 
368 /**
369  * module_dsa_tag_driver() - Helper macro for registering a single DSA tag
370  * driver
371  * @__ops: Single tag driver structures
372  *
373  * Helper macro for DSA tag drivers which do not do anything special
374  * in module init/exit. Each module may only use this macro once, and
375  * calling it replaces module_init() and module_exit().
376  */
377 #define module_dsa_tag_driver(__ops)					\
378 DSA_TAG_DRIVER(__ops);							\
379 									\
380 static struct dsa_tag_driver *dsa_tag_driver_array[] =	{		\
381 	&DSA_TAG_DRIVER_NAME(__ops)					\
382 };									\
383 module_dsa_tag_drivers(dsa_tag_driver_array)
384 
385 #endif
386