xref: /linux/net/dsa/dsa.c (revision 57f273adbcd44172cbe0bd10b8b7408dd255699f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/dsa.c - Hardware switch handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6  */
7 
8 #include <linux/device.h>
9 #include <linux/list.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/sysfs.h>
13 #include <linux/ptp_classify.h>
14 #include <net/dst_metadata.h>
15 
16 #include "dsa_priv.h"
17 
18 static LIST_HEAD(dsa_tag_drivers_list);
19 static DEFINE_MUTEX(dsa_tag_drivers_lock);
20 
21 static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
22 					    struct net_device *dev)
23 {
24 	/* Just return the original SKB */
25 	return skb;
26 }
27 
28 static const struct dsa_device_ops none_ops = {
29 	.name	= "none",
30 	.proto	= DSA_TAG_PROTO_NONE,
31 	.xmit	= dsa_slave_notag_xmit,
32 	.rcv	= NULL,
33 };
34 
35 DSA_TAG_DRIVER(none_ops);
36 
37 static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver,
38 				    struct module *owner)
39 {
40 	dsa_tag_driver->owner = owner;
41 
42 	mutex_lock(&dsa_tag_drivers_lock);
43 	list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list);
44 	mutex_unlock(&dsa_tag_drivers_lock);
45 }
46 
47 void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
48 			      unsigned int count, struct module *owner)
49 {
50 	unsigned int i;
51 
52 	for (i = 0; i < count; i++)
53 		dsa_tag_driver_register(dsa_tag_driver_array[i], owner);
54 }
55 
56 static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver)
57 {
58 	mutex_lock(&dsa_tag_drivers_lock);
59 	list_del(&dsa_tag_driver->list);
60 	mutex_unlock(&dsa_tag_drivers_lock);
61 }
62 EXPORT_SYMBOL_GPL(dsa_tag_drivers_register);
63 
64 void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
65 				unsigned int count)
66 {
67 	unsigned int i;
68 
69 	for (i = 0; i < count; i++)
70 		dsa_tag_driver_unregister(dsa_tag_driver_array[i]);
71 }
72 EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister);
73 
74 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
75 {
76 	return ops->name;
77 };
78 
79 /* Function takes a reference on the module owning the tagger,
80  * so dsa_tag_driver_put must be called afterwards.
81  */
82 const struct dsa_device_ops *dsa_tag_driver_get_by_name(const char *name)
83 {
84 	const struct dsa_device_ops *ops = ERR_PTR(-ENOPROTOOPT);
85 	struct dsa_tag_driver *dsa_tag_driver;
86 
87 	request_module("%s%s", DSA_TAG_DRIVER_ALIAS, name);
88 
89 	mutex_lock(&dsa_tag_drivers_lock);
90 	list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
91 		const struct dsa_device_ops *tmp = dsa_tag_driver->ops;
92 
93 		if (strcmp(name, tmp->name))
94 			continue;
95 
96 		if (!try_module_get(dsa_tag_driver->owner))
97 			break;
98 
99 		ops = tmp;
100 		break;
101 	}
102 	mutex_unlock(&dsa_tag_drivers_lock);
103 
104 	return ops;
105 }
106 
107 const struct dsa_device_ops *dsa_tag_driver_get_by_id(int tag_protocol)
108 {
109 	struct dsa_tag_driver *dsa_tag_driver;
110 	const struct dsa_device_ops *ops;
111 	bool found = false;
112 
113 	request_module("%sid-%d", DSA_TAG_DRIVER_ALIAS, tag_protocol);
114 
115 	mutex_lock(&dsa_tag_drivers_lock);
116 	list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
117 		ops = dsa_tag_driver->ops;
118 		if (ops->proto == tag_protocol) {
119 			found = true;
120 			break;
121 		}
122 	}
123 
124 	if (found) {
125 		if (!try_module_get(dsa_tag_driver->owner))
126 			ops = ERR_PTR(-ENOPROTOOPT);
127 	} else {
128 		ops = ERR_PTR(-ENOPROTOOPT);
129 	}
130 
131 	mutex_unlock(&dsa_tag_drivers_lock);
132 
133 	return ops;
134 }
135 
136 void dsa_tag_driver_put(const struct dsa_device_ops *ops)
137 {
138 	struct dsa_tag_driver *dsa_tag_driver;
139 
140 	mutex_lock(&dsa_tag_drivers_lock);
141 	list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
142 		if (dsa_tag_driver->ops == ops) {
143 			module_put(dsa_tag_driver->owner);
144 			break;
145 		}
146 	}
147 	mutex_unlock(&dsa_tag_drivers_lock);
148 }
149 
150 static int dev_is_class(struct device *dev, void *class)
151 {
152 	if (dev->class != NULL && !strcmp(dev->class->name, class))
153 		return 1;
154 
155 	return 0;
156 }
157 
158 static struct device *dev_find_class(struct device *parent, char *class)
159 {
160 	if (dev_is_class(parent, class)) {
161 		get_device(parent);
162 		return parent;
163 	}
164 
165 	return device_find_child(parent, class, dev_is_class);
166 }
167 
168 struct net_device *dsa_dev_to_net_device(struct device *dev)
169 {
170 	struct device *d;
171 
172 	d = dev_find_class(dev, "net");
173 	if (d != NULL) {
174 		struct net_device *nd;
175 
176 		nd = to_net_dev(d);
177 		dev_hold(nd);
178 		put_device(d);
179 
180 		return nd;
181 	}
182 
183 	return NULL;
184 }
185 EXPORT_SYMBOL_GPL(dsa_dev_to_net_device);
186 
187 /* Determine if we should defer delivery of skb until we have a rx timestamp.
188  *
189  * Called from dsa_switch_rcv. For now, this will only work if tagging is
190  * enabled on the switch. Normally the MAC driver would retrieve the hardware
191  * timestamp when it reads the packet out of the hardware. However in a DSA
192  * switch, the DSA driver owning the interface to which the packet is
193  * delivered is never notified unless we do so here.
194  */
195 static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
196 				       struct sk_buff *skb)
197 {
198 	struct dsa_switch *ds = p->dp->ds;
199 	unsigned int type;
200 
201 	if (skb_headroom(skb) < ETH_HLEN)
202 		return false;
203 
204 	__skb_push(skb, ETH_HLEN);
205 
206 	type = ptp_classify_raw(skb);
207 
208 	__skb_pull(skb, ETH_HLEN);
209 
210 	if (type == PTP_CLASS_NONE)
211 		return false;
212 
213 	if (likely(ds->ops->port_rxtstamp))
214 		return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
215 
216 	return false;
217 }
218 
219 static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
220 			  struct packet_type *pt, struct net_device *unused)
221 {
222 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
223 	struct dsa_port *cpu_dp = dev->dsa_ptr;
224 	struct sk_buff *nskb = NULL;
225 	struct dsa_slave_priv *p;
226 
227 	if (unlikely(!cpu_dp)) {
228 		kfree_skb(skb);
229 		return 0;
230 	}
231 
232 	skb = skb_unshare(skb, GFP_ATOMIC);
233 	if (!skb)
234 		return 0;
235 
236 	if (md_dst && md_dst->type == METADATA_HW_PORT_MUX) {
237 		unsigned int port = md_dst->u.port_info.port_id;
238 
239 		skb_dst_drop(skb);
240 		if (!skb_has_extensions(skb))
241 			skb->slow_gro = 0;
242 
243 		skb->dev = dsa_master_find_slave(dev, 0, port);
244 		if (likely(skb->dev)) {
245 			dsa_default_offload_fwd_mark(skb);
246 			nskb = skb;
247 		}
248 	} else {
249 		nskb = cpu_dp->rcv(skb, dev);
250 	}
251 
252 	if (!nskb) {
253 		kfree_skb(skb);
254 		return 0;
255 	}
256 
257 	skb = nskb;
258 	skb_push(skb, ETH_HLEN);
259 	skb->pkt_type = PACKET_HOST;
260 	skb->protocol = eth_type_trans(skb, skb->dev);
261 
262 	if (unlikely(!dsa_slave_dev_check(skb->dev))) {
263 		/* Packet is to be injected directly on an upper
264 		 * device, e.g. a team/bond, so skip all DSA-port
265 		 * specific actions.
266 		 */
267 		netif_rx(skb);
268 		return 0;
269 	}
270 
271 	p = netdev_priv(skb->dev);
272 
273 	if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
274 		nskb = dsa_untag_bridge_pvid(skb);
275 		if (!nskb) {
276 			kfree_skb(skb);
277 			return 0;
278 		}
279 		skb = nskb;
280 	}
281 
282 	dev_sw_netstats_rx_add(skb->dev, skb->len);
283 
284 	if (dsa_skb_defer_rx_timestamp(p, skb))
285 		return 0;
286 
287 	gro_cells_receive(&p->gcells, skb);
288 
289 	return 0;
290 }
291 
292 #ifdef CONFIG_PM_SLEEP
293 static bool dsa_port_is_initialized(const struct dsa_port *dp)
294 {
295 	return dp->type == DSA_PORT_TYPE_USER && dp->slave;
296 }
297 
298 int dsa_switch_suspend(struct dsa_switch *ds)
299 {
300 	struct dsa_port *dp;
301 	int ret = 0;
302 
303 	/* Suspend slave network devices */
304 	dsa_switch_for_each_port(dp, ds) {
305 		if (!dsa_port_is_initialized(dp))
306 			continue;
307 
308 		ret = dsa_slave_suspend(dp->slave);
309 		if (ret)
310 			return ret;
311 	}
312 
313 	if (ds->ops->suspend)
314 		ret = ds->ops->suspend(ds);
315 
316 	return ret;
317 }
318 EXPORT_SYMBOL_GPL(dsa_switch_suspend);
319 
320 int dsa_switch_resume(struct dsa_switch *ds)
321 {
322 	struct dsa_port *dp;
323 	int ret = 0;
324 
325 	if (ds->ops->resume)
326 		ret = ds->ops->resume(ds);
327 
328 	if (ret)
329 		return ret;
330 
331 	/* Resume slave network devices */
332 	dsa_switch_for_each_port(dp, ds) {
333 		if (!dsa_port_is_initialized(dp))
334 			continue;
335 
336 		ret = dsa_slave_resume(dp->slave);
337 		if (ret)
338 			return ret;
339 	}
340 
341 	return 0;
342 }
343 EXPORT_SYMBOL_GPL(dsa_switch_resume);
344 #endif
345 
346 static struct packet_type dsa_pack_type __read_mostly = {
347 	.type	= cpu_to_be16(ETH_P_XDSA),
348 	.func	= dsa_switch_rcv,
349 };
350 
351 static struct workqueue_struct *dsa_owq;
352 
353 bool dsa_schedule_work(struct work_struct *work)
354 {
355 	return queue_work(dsa_owq, work);
356 }
357 
358 void dsa_flush_workqueue(void)
359 {
360 	flush_workqueue(dsa_owq);
361 }
362 EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
363 
364 int dsa_devlink_param_get(struct devlink *dl, u32 id,
365 			  struct devlink_param_gset_ctx *ctx)
366 {
367 	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
368 
369 	if (!ds->ops->devlink_param_get)
370 		return -EOPNOTSUPP;
371 
372 	return ds->ops->devlink_param_get(ds, id, ctx);
373 }
374 EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
375 
376 int dsa_devlink_param_set(struct devlink *dl, u32 id,
377 			  struct devlink_param_gset_ctx *ctx)
378 {
379 	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
380 
381 	if (!ds->ops->devlink_param_set)
382 		return -EOPNOTSUPP;
383 
384 	return ds->ops->devlink_param_set(ds, id, ctx);
385 }
386 EXPORT_SYMBOL_GPL(dsa_devlink_param_set);
387 
388 int dsa_devlink_params_register(struct dsa_switch *ds,
389 				const struct devlink_param *params,
390 				size_t params_count)
391 {
392 	return devlink_params_register(ds->devlink, params, params_count);
393 }
394 EXPORT_SYMBOL_GPL(dsa_devlink_params_register);
395 
396 void dsa_devlink_params_unregister(struct dsa_switch *ds,
397 				   const struct devlink_param *params,
398 				   size_t params_count)
399 {
400 	devlink_params_unregister(ds->devlink, params, params_count);
401 }
402 EXPORT_SYMBOL_GPL(dsa_devlink_params_unregister);
403 
404 int dsa_devlink_resource_register(struct dsa_switch *ds,
405 				  const char *resource_name,
406 				  u64 resource_size,
407 				  u64 resource_id,
408 				  u64 parent_resource_id,
409 				  const struct devlink_resource_size_params *size_params)
410 {
411 	return devlink_resource_register(ds->devlink, resource_name,
412 					 resource_size, resource_id,
413 					 parent_resource_id,
414 					 size_params);
415 }
416 EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
417 
418 void dsa_devlink_resources_unregister(struct dsa_switch *ds)
419 {
420 	devlink_resources_unregister(ds->devlink);
421 }
422 EXPORT_SYMBOL_GPL(dsa_devlink_resources_unregister);
423 
424 void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
425 					   u64 resource_id,
426 					   devlink_resource_occ_get_t *occ_get,
427 					   void *occ_get_priv)
428 {
429 	return devlink_resource_occ_get_register(ds->devlink, resource_id,
430 						 occ_get, occ_get_priv);
431 }
432 EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register);
433 
434 void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
435 					     u64 resource_id)
436 {
437 	devlink_resource_occ_get_unregister(ds->devlink, resource_id);
438 }
439 EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
440 
441 struct devlink_region *
442 dsa_devlink_region_create(struct dsa_switch *ds,
443 			  const struct devlink_region_ops *ops,
444 			  u32 region_max_snapshots, u64 region_size)
445 {
446 	return devlink_region_create(ds->devlink, ops, region_max_snapshots,
447 				     region_size);
448 }
449 EXPORT_SYMBOL_GPL(dsa_devlink_region_create);
450 
451 struct devlink_region *
452 dsa_devlink_port_region_create(struct dsa_switch *ds,
453 			       int port,
454 			       const struct devlink_port_region_ops *ops,
455 			       u32 region_max_snapshots, u64 region_size)
456 {
457 	struct dsa_port *dp = dsa_to_port(ds, port);
458 
459 	return devlink_port_region_create(&dp->devlink_port, ops,
460 					  region_max_snapshots,
461 					  region_size);
462 }
463 EXPORT_SYMBOL_GPL(dsa_devlink_port_region_create);
464 
465 void dsa_devlink_region_destroy(struct devlink_region *region)
466 {
467 	devlink_region_destroy(region);
468 }
469 EXPORT_SYMBOL_GPL(dsa_devlink_region_destroy);
470 
471 struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
472 {
473 	if (!netdev || !dsa_slave_dev_check(netdev))
474 		return ERR_PTR(-ENODEV);
475 
476 	return dsa_slave_to_port(netdev);
477 }
478 EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
479 
480 bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b)
481 {
482 	if (a->type != b->type)
483 		return false;
484 
485 	switch (a->type) {
486 	case DSA_DB_PORT:
487 		return a->dp == b->dp;
488 	case DSA_DB_LAG:
489 		return a->lag.dev == b->lag.dev;
490 	case DSA_DB_BRIDGE:
491 		return a->bridge.num == b->bridge.num;
492 	default:
493 		WARN_ON(1);
494 		return false;
495 	}
496 }
497 
498 bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
499 				 const unsigned char *addr, u16 vid,
500 				 struct dsa_db db)
501 {
502 	struct dsa_port *dp = dsa_to_port(ds, port);
503 	struct dsa_mac_addr *a;
504 
505 	lockdep_assert_held(&dp->addr_lists_lock);
506 
507 	list_for_each_entry(a, &dp->fdbs, list) {
508 		if (!ether_addr_equal(a->addr, addr) || a->vid != vid)
509 			continue;
510 
511 		if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
512 			return true;
513 	}
514 
515 	return false;
516 }
517 EXPORT_SYMBOL_GPL(dsa_fdb_present_in_other_db);
518 
519 bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
520 				 const struct switchdev_obj_port_mdb *mdb,
521 				 struct dsa_db db)
522 {
523 	struct dsa_port *dp = dsa_to_port(ds, port);
524 	struct dsa_mac_addr *a;
525 
526 	lockdep_assert_held(&dp->addr_lists_lock);
527 
528 	list_for_each_entry(a, &dp->mdbs, list) {
529 		if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid)
530 			continue;
531 
532 		if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
533 			return true;
534 	}
535 
536 	return false;
537 }
538 EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
539 
540 static int __init dsa_init_module(void)
541 {
542 	int rc;
543 
544 	dsa_owq = alloc_ordered_workqueue("dsa_ordered",
545 					  WQ_MEM_RECLAIM);
546 	if (!dsa_owq)
547 		return -ENOMEM;
548 
549 	rc = dsa_slave_register_notifier();
550 	if (rc)
551 		goto register_notifier_fail;
552 
553 	dev_add_pack(&dsa_pack_type);
554 
555 	dsa_tag_driver_register(&DSA_TAG_DRIVER_NAME(none_ops),
556 				THIS_MODULE);
557 
558 	rc = rtnl_link_register(&dsa_link_ops);
559 	if (rc)
560 		goto netlink_register_fail;
561 
562 	return 0;
563 
564 netlink_register_fail:
565 	dsa_tag_driver_unregister(&DSA_TAG_DRIVER_NAME(none_ops));
566 	dsa_slave_unregister_notifier();
567 	dev_remove_pack(&dsa_pack_type);
568 register_notifier_fail:
569 	destroy_workqueue(dsa_owq);
570 
571 	return rc;
572 }
573 module_init(dsa_init_module);
574 
575 static void __exit dsa_cleanup_module(void)
576 {
577 	rtnl_link_unregister(&dsa_link_ops);
578 	dsa_tag_driver_unregister(&DSA_TAG_DRIVER_NAME(none_ops));
579 
580 	dsa_slave_unregister_notifier();
581 	dev_remove_pack(&dsa_pack_type);
582 	destroy_workqueue(dsa_owq);
583 }
584 module_exit(dsa_cleanup_module);
585 
586 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
587 MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
588 MODULE_LICENSE("GPL");
589 MODULE_ALIAS("platform:dsa");
590