1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/dsa/user.c - user device handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 */
6
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/selftests.h>
19 #include <net/tc_act/tc_mirred.h>
20 #include <linux/if_bridge.h>
21 #include <linux/if_hsr.h>
22 #include <net/dcbnl.h>
23 #include <linux/netpoll.h>
24 #include <linux/string.h>
25
26 #include "conduit.h"
27 #include "dsa.h"
28 #include "netlink.h"
29 #include "port.h"
30 #include "switch.h"
31 #include "tag.h"
32 #include "user.h"
33
34 struct dsa_switchdev_event_work {
35 struct net_device *dev;
36 struct net_device *orig_dev;
37 struct work_struct work;
38 unsigned long event;
39 /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
40 * SWITCHDEV_FDB_DEL_TO_DEVICE
41 */
42 unsigned char addr[ETH_ALEN];
43 u16 vid;
44 bool host_addr;
45 };
46
47 enum dsa_standalone_event {
48 DSA_UC_ADD,
49 DSA_UC_DEL,
50 DSA_MC_ADD,
51 DSA_MC_DEL,
52 };
53
54 struct dsa_standalone_event_work {
55 struct work_struct work;
56 struct net_device *dev;
57 enum dsa_standalone_event event;
58 unsigned char addr[ETH_ALEN];
59 u16 vid;
60 };
61
62 struct dsa_host_vlan_rx_filtering_ctx {
63 struct net_device *dev;
64 const unsigned char *addr;
65 enum dsa_standalone_event event;
66 };
67
dsa_switch_supports_uc_filtering(struct dsa_switch * ds)68 static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds)
69 {
70 return ds->ops->port_fdb_add && ds->ops->port_fdb_del &&
71 ds->fdb_isolation && !ds->vlan_filtering_is_global &&
72 !ds->needs_standalone_vlan_filtering;
73 }
74
dsa_switch_supports_mc_filtering(struct dsa_switch * ds)75 static bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds)
76 {
77 return ds->ops->port_mdb_add && ds->ops->port_mdb_del &&
78 ds->fdb_isolation && !ds->vlan_filtering_is_global &&
79 !ds->needs_standalone_vlan_filtering;
80 }
81
dsa_user_standalone_event_work(struct work_struct * work)82 static void dsa_user_standalone_event_work(struct work_struct *work)
83 {
84 struct dsa_standalone_event_work *standalone_work =
85 container_of(work, struct dsa_standalone_event_work, work);
86 const unsigned char *addr = standalone_work->addr;
87 struct net_device *dev = standalone_work->dev;
88 struct dsa_port *dp = dsa_user_to_port(dev);
89 struct switchdev_obj_port_mdb mdb;
90 struct dsa_switch *ds = dp->ds;
91 u16 vid = standalone_work->vid;
92 int err;
93
94 switch (standalone_work->event) {
95 case DSA_UC_ADD:
96 err = dsa_port_standalone_host_fdb_add(dp, addr, vid);
97 if (err) {
98 dev_err(ds->dev,
99 "port %d failed to add %pM vid %d to fdb: %d\n",
100 dp->index, addr, vid, err);
101 break;
102 }
103 break;
104
105 case DSA_UC_DEL:
106 err = dsa_port_standalone_host_fdb_del(dp, addr, vid);
107 if (err) {
108 dev_err(ds->dev,
109 "port %d failed to delete %pM vid %d from fdb: %d\n",
110 dp->index, addr, vid, err);
111 }
112
113 break;
114 case DSA_MC_ADD:
115 ether_addr_copy(mdb.addr, addr);
116 mdb.vid = vid;
117
118 err = dsa_port_standalone_host_mdb_add(dp, &mdb);
119 if (err) {
120 dev_err(ds->dev,
121 "port %d failed to add %pM vid %d to mdb: %d\n",
122 dp->index, addr, vid, err);
123 break;
124 }
125 break;
126 case DSA_MC_DEL:
127 ether_addr_copy(mdb.addr, addr);
128 mdb.vid = vid;
129
130 err = dsa_port_standalone_host_mdb_del(dp, &mdb);
131 if (err) {
132 dev_err(ds->dev,
133 "port %d failed to delete %pM vid %d from mdb: %d\n",
134 dp->index, addr, vid, err);
135 }
136
137 break;
138 }
139
140 kfree(standalone_work);
141 }
142
dsa_user_schedule_standalone_work(struct net_device * dev,enum dsa_standalone_event event,const unsigned char * addr,u16 vid)143 static int dsa_user_schedule_standalone_work(struct net_device *dev,
144 enum dsa_standalone_event event,
145 const unsigned char *addr,
146 u16 vid)
147 {
148 struct dsa_standalone_event_work *standalone_work;
149
150 standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC);
151 if (!standalone_work)
152 return -ENOMEM;
153
154 INIT_WORK(&standalone_work->work, dsa_user_standalone_event_work);
155 standalone_work->event = event;
156 standalone_work->dev = dev;
157
158 ether_addr_copy(standalone_work->addr, addr);
159 standalone_work->vid = vid;
160
161 dsa_schedule_work(&standalone_work->work);
162
163 return 0;
164 }
165
dsa_user_host_vlan_rx_filtering(void * arg,int vid)166 static int dsa_user_host_vlan_rx_filtering(void *arg, int vid)
167 {
168 struct dsa_host_vlan_rx_filtering_ctx *ctx = arg;
169
170 return dsa_user_schedule_standalone_work(ctx->dev, ctx->event,
171 ctx->addr, vid);
172 }
173
dsa_user_vlan_for_each(struct net_device * dev,int (* cb)(void * arg,int vid),void * arg)174 static int dsa_user_vlan_for_each(struct net_device *dev,
175 int (*cb)(void *arg, int vid), void *arg)
176 {
177 struct dsa_port *dp = dsa_user_to_port(dev);
178 struct dsa_vlan *v;
179 int err;
180
181 lockdep_assert_held(&dev->addr_list_lock);
182
183 err = cb(arg, 0);
184 if (err)
185 return err;
186
187 list_for_each_entry(v, &dp->user_vlans, list) {
188 err = cb(arg, v->vid);
189 if (err)
190 return err;
191 }
192
193 return 0;
194 }
195
dsa_user_sync_uc(struct net_device * dev,const unsigned char * addr)196 static int dsa_user_sync_uc(struct net_device *dev,
197 const unsigned char *addr)
198 {
199 struct net_device *conduit = dsa_user_to_conduit(dev);
200 struct dsa_port *dp = dsa_user_to_port(dev);
201 struct dsa_host_vlan_rx_filtering_ctx ctx = {
202 .dev = dev,
203 .addr = addr,
204 .event = DSA_UC_ADD,
205 };
206
207 dev_uc_add(conduit, addr);
208
209 if (!dsa_switch_supports_uc_filtering(dp->ds))
210 return 0;
211
212 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
213 &ctx);
214 }
215
dsa_user_unsync_uc(struct net_device * dev,const unsigned char * addr)216 static int dsa_user_unsync_uc(struct net_device *dev,
217 const unsigned char *addr)
218 {
219 struct net_device *conduit = dsa_user_to_conduit(dev);
220 struct dsa_port *dp = dsa_user_to_port(dev);
221 struct dsa_host_vlan_rx_filtering_ctx ctx = {
222 .dev = dev,
223 .addr = addr,
224 .event = DSA_UC_DEL,
225 };
226
227 dev_uc_del(conduit, addr);
228
229 if (!dsa_switch_supports_uc_filtering(dp->ds))
230 return 0;
231
232 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
233 &ctx);
234 }
235
dsa_user_sync_mc(struct net_device * dev,const unsigned char * addr)236 static int dsa_user_sync_mc(struct net_device *dev,
237 const unsigned char *addr)
238 {
239 struct net_device *conduit = dsa_user_to_conduit(dev);
240 struct dsa_port *dp = dsa_user_to_port(dev);
241 struct dsa_host_vlan_rx_filtering_ctx ctx = {
242 .dev = dev,
243 .addr = addr,
244 .event = DSA_MC_ADD,
245 };
246
247 dev_mc_add(conduit, addr);
248
249 if (!dsa_switch_supports_mc_filtering(dp->ds))
250 return 0;
251
252 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
253 &ctx);
254 }
255
dsa_user_unsync_mc(struct net_device * dev,const unsigned char * addr)256 static int dsa_user_unsync_mc(struct net_device *dev,
257 const unsigned char *addr)
258 {
259 struct net_device *conduit = dsa_user_to_conduit(dev);
260 struct dsa_port *dp = dsa_user_to_port(dev);
261 struct dsa_host_vlan_rx_filtering_ctx ctx = {
262 .dev = dev,
263 .addr = addr,
264 .event = DSA_MC_DEL,
265 };
266
267 dev_mc_del(conduit, addr);
268
269 if (!dsa_switch_supports_mc_filtering(dp->ds))
270 return 0;
271
272 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
273 &ctx);
274 }
275
dsa_user_sync_ha(struct net_device * dev)276 void dsa_user_sync_ha(struct net_device *dev)
277 {
278 struct dsa_port *dp = dsa_user_to_port(dev);
279 struct dsa_switch *ds = dp->ds;
280 struct netdev_hw_addr *ha;
281
282 netif_addr_lock_bh(dev);
283
284 netdev_for_each_synced_mc_addr(ha, dev)
285 dsa_user_sync_mc(dev, ha->addr);
286
287 netdev_for_each_synced_uc_addr(ha, dev)
288 dsa_user_sync_uc(dev, ha->addr);
289
290 netif_addr_unlock_bh(dev);
291
292 if (dsa_switch_supports_uc_filtering(ds) ||
293 dsa_switch_supports_mc_filtering(ds))
294 dsa_flush_workqueue();
295 }
296
dsa_user_unsync_ha(struct net_device * dev)297 void dsa_user_unsync_ha(struct net_device *dev)
298 {
299 struct dsa_port *dp = dsa_user_to_port(dev);
300 struct dsa_switch *ds = dp->ds;
301 struct netdev_hw_addr *ha;
302
303 netif_addr_lock_bh(dev);
304
305 netdev_for_each_synced_uc_addr(ha, dev)
306 dsa_user_unsync_uc(dev, ha->addr);
307
308 netdev_for_each_synced_mc_addr(ha, dev)
309 dsa_user_unsync_mc(dev, ha->addr);
310
311 netif_addr_unlock_bh(dev);
312
313 if (dsa_switch_supports_uc_filtering(ds) ||
314 dsa_switch_supports_mc_filtering(ds))
315 dsa_flush_workqueue();
316 }
317
318 /* user mii_bus handling ***************************************************/
dsa_user_phy_read(struct mii_bus * bus,int addr,int reg)319 static int dsa_user_phy_read(struct mii_bus *bus, int addr, int reg)
320 {
321 struct dsa_switch *ds = bus->priv;
322
323 if (ds->phys_mii_mask & (1 << addr))
324 return ds->ops->phy_read(ds, addr, reg);
325
326 return 0xffff;
327 }
328
dsa_user_phy_write(struct mii_bus * bus,int addr,int reg,u16 val)329 static int dsa_user_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
330 {
331 struct dsa_switch *ds = bus->priv;
332
333 if (ds->phys_mii_mask & (1 << addr))
334 return ds->ops->phy_write(ds, addr, reg, val);
335
336 return 0;
337 }
338
dsa_user_mii_bus_init(struct dsa_switch * ds)339 void dsa_user_mii_bus_init(struct dsa_switch *ds)
340 {
341 ds->user_mii_bus->priv = (void *)ds;
342 ds->user_mii_bus->name = "dsa user smi";
343 ds->user_mii_bus->read = dsa_user_phy_read;
344 ds->user_mii_bus->write = dsa_user_phy_write;
345 snprintf(ds->user_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
346 ds->dst->index, ds->index);
347 ds->user_mii_bus->parent = ds->dev;
348 ds->user_mii_bus->phy_mask = ~ds->phys_mii_mask;
349 }
350
351
352 /* user device handling ****************************************************/
dsa_user_get_iflink(const struct net_device * dev)353 static int dsa_user_get_iflink(const struct net_device *dev)
354 {
355 return READ_ONCE(dsa_user_to_conduit(dev)->ifindex);
356 }
357
dsa_user_host_uc_install(struct net_device * dev,const u8 * addr)358 int dsa_user_host_uc_install(struct net_device *dev, const u8 *addr)
359 {
360 struct net_device *conduit = dsa_user_to_conduit(dev);
361 struct dsa_port *dp = dsa_user_to_port(dev);
362 struct dsa_switch *ds = dp->ds;
363 int err;
364
365 if (dsa_switch_supports_uc_filtering(ds)) {
366 err = dsa_port_standalone_host_fdb_add(dp, addr, 0);
367 if (err)
368 goto out;
369 }
370
371 if (!ether_addr_equal(addr, conduit->dev_addr)) {
372 err = dev_uc_add(conduit, addr);
373 if (err < 0)
374 goto del_host_addr;
375 }
376
377 return 0;
378
379 del_host_addr:
380 if (dsa_switch_supports_uc_filtering(ds))
381 dsa_port_standalone_host_fdb_del(dp, addr, 0);
382 out:
383 return err;
384 }
385
dsa_user_host_uc_uninstall(struct net_device * dev)386 void dsa_user_host_uc_uninstall(struct net_device *dev)
387 {
388 struct net_device *conduit = dsa_user_to_conduit(dev);
389 struct dsa_port *dp = dsa_user_to_port(dev);
390 struct dsa_switch *ds = dp->ds;
391
392 if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr))
393 dev_uc_del(conduit, dev->dev_addr);
394
395 if (dsa_switch_supports_uc_filtering(ds))
396 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
397 }
398
dsa_user_open(struct net_device * dev)399 static int dsa_user_open(struct net_device *dev)
400 {
401 struct net_device *conduit = dsa_user_to_conduit(dev);
402 struct dsa_port *dp = dsa_user_to_port(dev);
403 int err;
404
405 err = dev_open(conduit, NULL);
406 if (err < 0) {
407 netdev_err(dev, "failed to open conduit %s\n", conduit->name);
408 goto out;
409 }
410
411 err = dsa_user_host_uc_install(dev, dev->dev_addr);
412 if (err)
413 goto out;
414
415 err = dsa_port_enable_rt(dp, dev->phydev);
416 if (err)
417 goto out_del_host_uc;
418
419 return 0;
420
421 out_del_host_uc:
422 dsa_user_host_uc_uninstall(dev);
423 out:
424 return err;
425 }
426
dsa_user_close(struct net_device * dev)427 static int dsa_user_close(struct net_device *dev)
428 {
429 struct dsa_port *dp = dsa_user_to_port(dev);
430
431 dsa_port_disable_rt(dp);
432
433 dsa_user_host_uc_uninstall(dev);
434
435 return 0;
436 }
437
dsa_user_manage_host_flood(struct net_device * dev)438 static void dsa_user_manage_host_flood(struct net_device *dev)
439 {
440 bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI);
441 struct dsa_port *dp = dsa_user_to_port(dev);
442 bool uc = dev->flags & IFF_PROMISC;
443
444 dsa_port_set_host_flood(dp, uc, mc);
445 }
446
dsa_user_change_rx_flags(struct net_device * dev,int change)447 static void dsa_user_change_rx_flags(struct net_device *dev, int change)
448 {
449 struct net_device *conduit = dsa_user_to_conduit(dev);
450 struct dsa_port *dp = dsa_user_to_port(dev);
451 struct dsa_switch *ds = dp->ds;
452
453 if (change & IFF_ALLMULTI)
454 dev_set_allmulti(conduit,
455 dev->flags & IFF_ALLMULTI ? 1 : -1);
456 if (change & IFF_PROMISC)
457 dev_set_promiscuity(conduit,
458 dev->flags & IFF_PROMISC ? 1 : -1);
459
460 if (dsa_switch_supports_uc_filtering(ds) &&
461 dsa_switch_supports_mc_filtering(ds))
462 dsa_user_manage_host_flood(dev);
463 }
464
dsa_user_set_rx_mode(struct net_device * dev)465 static void dsa_user_set_rx_mode(struct net_device *dev)
466 {
467 __dev_mc_sync(dev, dsa_user_sync_mc, dsa_user_unsync_mc);
468 __dev_uc_sync(dev, dsa_user_sync_uc, dsa_user_unsync_uc);
469 }
470
dsa_user_set_mac_address(struct net_device * dev,void * a)471 static int dsa_user_set_mac_address(struct net_device *dev, void *a)
472 {
473 struct dsa_port *dp = dsa_user_to_port(dev);
474 struct dsa_switch *ds = dp->ds;
475 struct sockaddr *addr = a;
476 int err;
477
478 if (!is_valid_ether_addr(addr->sa_data))
479 return -EADDRNOTAVAIL;
480
481 if (ds->ops->port_set_mac_address) {
482 err = ds->ops->port_set_mac_address(ds, dp->index,
483 addr->sa_data);
484 if (err)
485 return err;
486 }
487
488 /* If the port is down, the address isn't synced yet to hardware or
489 * to the DSA conduit, so there is nothing to change.
490 */
491 if (!(dev->flags & IFF_UP))
492 goto out_change_dev_addr;
493
494 err = dsa_user_host_uc_install(dev, addr->sa_data);
495 if (err)
496 return err;
497
498 dsa_user_host_uc_uninstall(dev);
499
500 out_change_dev_addr:
501 eth_hw_addr_set(dev, addr->sa_data);
502
503 return 0;
504 }
505
506 struct dsa_user_dump_ctx {
507 struct net_device *dev;
508 struct sk_buff *skb;
509 struct netlink_callback *cb;
510 int idx;
511 };
512
513 static int
dsa_user_port_fdb_do_dump(const unsigned char * addr,u16 vid,bool is_static,void * data)514 dsa_user_port_fdb_do_dump(const unsigned char *addr, u16 vid,
515 bool is_static, void *data)
516 {
517 struct dsa_user_dump_ctx *dump = data;
518 u32 portid = NETLINK_CB(dump->cb->skb).portid;
519 u32 seq = dump->cb->nlh->nlmsg_seq;
520 struct nlmsghdr *nlh;
521 struct ndmsg *ndm;
522
523 if (dump->idx < dump->cb->args[2])
524 goto skip;
525
526 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
527 sizeof(*ndm), NLM_F_MULTI);
528 if (!nlh)
529 return -EMSGSIZE;
530
531 ndm = nlmsg_data(nlh);
532 ndm->ndm_family = AF_BRIDGE;
533 ndm->ndm_pad1 = 0;
534 ndm->ndm_pad2 = 0;
535 ndm->ndm_flags = NTF_SELF;
536 ndm->ndm_type = 0;
537 ndm->ndm_ifindex = dump->dev->ifindex;
538 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
539
540 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
541 goto nla_put_failure;
542
543 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
544 goto nla_put_failure;
545
546 nlmsg_end(dump->skb, nlh);
547
548 skip:
549 dump->idx++;
550 return 0;
551
552 nla_put_failure:
553 nlmsg_cancel(dump->skb, nlh);
554 return -EMSGSIZE;
555 }
556
557 static int
dsa_user_fdb_dump(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * dev,struct net_device * filter_dev,int * idx)558 dsa_user_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
559 struct net_device *dev, struct net_device *filter_dev,
560 int *idx)
561 {
562 struct dsa_port *dp = dsa_user_to_port(dev);
563 struct dsa_user_dump_ctx dump = {
564 .dev = dev,
565 .skb = skb,
566 .cb = cb,
567 .idx = *idx,
568 };
569 int err;
570
571 err = dsa_port_fdb_dump(dp, dsa_user_port_fdb_do_dump, &dump);
572 *idx = dump.idx;
573
574 return err;
575 }
576
dsa_user_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)577 static int dsa_user_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
578 {
579 struct dsa_user_priv *p = netdev_priv(dev);
580 struct dsa_switch *ds = p->dp->ds;
581 int port = p->dp->index;
582
583 /* Pass through to switch driver if it supports timestamping */
584 switch (cmd) {
585 case SIOCGHWTSTAMP:
586 if (ds->ops->port_hwtstamp_get)
587 return ds->ops->port_hwtstamp_get(ds, port, ifr);
588 break;
589 case SIOCSHWTSTAMP:
590 if (ds->ops->port_hwtstamp_set)
591 return ds->ops->port_hwtstamp_set(ds, port, ifr);
592 break;
593 }
594
595 return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
596 }
597
dsa_user_port_attr_set(struct net_device * dev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)598 static int dsa_user_port_attr_set(struct net_device *dev, const void *ctx,
599 const struct switchdev_attr *attr,
600 struct netlink_ext_ack *extack)
601 {
602 struct dsa_port *dp = dsa_user_to_port(dev);
603 int ret;
604
605 if (ctx && ctx != dp)
606 return 0;
607
608 switch (attr->id) {
609 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
610 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
611 return -EOPNOTSUPP;
612
613 ret = dsa_port_set_state(dp, attr->u.stp_state, true);
614 break;
615 case SWITCHDEV_ATTR_ID_PORT_MST_STATE:
616 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
617 return -EOPNOTSUPP;
618
619 ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack);
620 break;
621 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
622 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
623 return -EOPNOTSUPP;
624
625 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
626 extack);
627 break;
628 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
629 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
630 return -EOPNOTSUPP;
631
632 ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
633 break;
634 case SWITCHDEV_ATTR_ID_BRIDGE_MST:
635 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
636 return -EOPNOTSUPP;
637
638 ret = dsa_port_mst_enable(dp, attr->u.mst, extack);
639 break;
640 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
641 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
642 return -EOPNOTSUPP;
643
644 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
645 extack);
646 break;
647 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
648 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
649 return -EOPNOTSUPP;
650
651 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
652 break;
653 case SWITCHDEV_ATTR_ID_VLAN_MSTI:
654 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
655 return -EOPNOTSUPP;
656
657 ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti);
658 break;
659 default:
660 ret = -EOPNOTSUPP;
661 break;
662 }
663
664 return ret;
665 }
666
667 /* Must be called under rcu_read_lock() */
668 static int
dsa_user_vlan_check_for_8021q_uppers(struct net_device * user,const struct switchdev_obj_port_vlan * vlan)669 dsa_user_vlan_check_for_8021q_uppers(struct net_device *user,
670 const struct switchdev_obj_port_vlan *vlan)
671 {
672 struct net_device *upper_dev;
673 struct list_head *iter;
674
675 netdev_for_each_upper_dev_rcu(user, upper_dev, iter) {
676 u16 vid;
677
678 if (!is_vlan_dev(upper_dev))
679 continue;
680
681 vid = vlan_dev_vlan_id(upper_dev);
682 if (vid == vlan->vid)
683 return -EBUSY;
684 }
685
686 return 0;
687 }
688
dsa_user_vlan_add(struct net_device * dev,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)689 static int dsa_user_vlan_add(struct net_device *dev,
690 const struct switchdev_obj *obj,
691 struct netlink_ext_ack *extack)
692 {
693 struct dsa_port *dp = dsa_user_to_port(dev);
694 struct switchdev_obj_port_vlan *vlan;
695 int err;
696
697 if (dsa_port_skip_vlan_configuration(dp)) {
698 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
699 return 0;
700 }
701
702 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
703
704 /* Deny adding a bridge VLAN when there is already an 802.1Q upper with
705 * the same VID.
706 */
707 if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
708 rcu_read_lock();
709 err = dsa_user_vlan_check_for_8021q_uppers(dev, vlan);
710 rcu_read_unlock();
711 if (err) {
712 NL_SET_ERR_MSG_MOD(extack,
713 "Port already has a VLAN upper with this VID");
714 return err;
715 }
716 }
717
718 return dsa_port_vlan_add(dp, vlan, extack);
719 }
720
721 /* Offload a VLAN installed on the bridge or on a foreign interface by
722 * installing it as a VLAN towards the CPU port.
723 */
dsa_user_host_vlan_add(struct net_device * dev,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)724 static int dsa_user_host_vlan_add(struct net_device *dev,
725 const struct switchdev_obj *obj,
726 struct netlink_ext_ack *extack)
727 {
728 struct dsa_port *dp = dsa_user_to_port(dev);
729 struct switchdev_obj_port_vlan vlan;
730
731 /* Do nothing if this is a software bridge */
732 if (!dp->bridge)
733 return -EOPNOTSUPP;
734
735 if (dsa_port_skip_vlan_configuration(dp)) {
736 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
737 return 0;
738 }
739
740 vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
741
742 /* Even though drivers often handle CPU membership in special ways,
743 * it doesn't make sense to program a PVID, so clear this flag.
744 */
745 vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
746
747 return dsa_port_host_vlan_add(dp, &vlan, extack);
748 }
749
dsa_user_port_obj_add(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)750 static int dsa_user_port_obj_add(struct net_device *dev, const void *ctx,
751 const struct switchdev_obj *obj,
752 struct netlink_ext_ack *extack)
753 {
754 struct dsa_port *dp = dsa_user_to_port(dev);
755 int err;
756
757 if (ctx && ctx != dp)
758 return 0;
759
760 switch (obj->id) {
761 case SWITCHDEV_OBJ_ID_PORT_MDB:
762 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
763 return -EOPNOTSUPP;
764
765 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
766 break;
767 case SWITCHDEV_OBJ_ID_HOST_MDB:
768 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
769 return -EOPNOTSUPP;
770
771 err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
772 break;
773 case SWITCHDEV_OBJ_ID_PORT_VLAN:
774 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
775 err = dsa_user_vlan_add(dev, obj, extack);
776 else
777 err = dsa_user_host_vlan_add(dev, obj, extack);
778 break;
779 case SWITCHDEV_OBJ_ID_MRP:
780 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
781 return -EOPNOTSUPP;
782
783 err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
784 break;
785 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
786 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
787 return -EOPNOTSUPP;
788
789 err = dsa_port_mrp_add_ring_role(dp,
790 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
791 break;
792 default:
793 err = -EOPNOTSUPP;
794 break;
795 }
796
797 return err;
798 }
799
dsa_user_vlan_del(struct net_device * dev,const struct switchdev_obj * obj)800 static int dsa_user_vlan_del(struct net_device *dev,
801 const struct switchdev_obj *obj)
802 {
803 struct dsa_port *dp = dsa_user_to_port(dev);
804 struct switchdev_obj_port_vlan *vlan;
805
806 if (dsa_port_skip_vlan_configuration(dp))
807 return 0;
808
809 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
810
811 return dsa_port_vlan_del(dp, vlan);
812 }
813
dsa_user_host_vlan_del(struct net_device * dev,const struct switchdev_obj * obj)814 static int dsa_user_host_vlan_del(struct net_device *dev,
815 const struct switchdev_obj *obj)
816 {
817 struct dsa_port *dp = dsa_user_to_port(dev);
818 struct switchdev_obj_port_vlan *vlan;
819
820 /* Do nothing if this is a software bridge */
821 if (!dp->bridge)
822 return -EOPNOTSUPP;
823
824 if (dsa_port_skip_vlan_configuration(dp))
825 return 0;
826
827 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
828
829 return dsa_port_host_vlan_del(dp, vlan);
830 }
831
dsa_user_port_obj_del(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj)832 static int dsa_user_port_obj_del(struct net_device *dev, const void *ctx,
833 const struct switchdev_obj *obj)
834 {
835 struct dsa_port *dp = dsa_user_to_port(dev);
836 int err;
837
838 if (ctx && ctx != dp)
839 return 0;
840
841 switch (obj->id) {
842 case SWITCHDEV_OBJ_ID_PORT_MDB:
843 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
844 return -EOPNOTSUPP;
845
846 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
847 break;
848 case SWITCHDEV_OBJ_ID_HOST_MDB:
849 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
850 return -EOPNOTSUPP;
851
852 err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
853 break;
854 case SWITCHDEV_OBJ_ID_PORT_VLAN:
855 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
856 err = dsa_user_vlan_del(dev, obj);
857 else
858 err = dsa_user_host_vlan_del(dev, obj);
859 break;
860 case SWITCHDEV_OBJ_ID_MRP:
861 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
862 return -EOPNOTSUPP;
863
864 err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
865 break;
866 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
867 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
868 return -EOPNOTSUPP;
869
870 err = dsa_port_mrp_del_ring_role(dp,
871 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
872 break;
873 default:
874 err = -EOPNOTSUPP;
875 break;
876 }
877
878 return err;
879 }
880
dsa_user_netpoll_send_skb(struct net_device * dev,struct sk_buff * skb)881 static netdev_tx_t dsa_user_netpoll_send_skb(struct net_device *dev,
882 struct sk_buff *skb)
883 {
884 #ifdef CONFIG_NET_POLL_CONTROLLER
885 struct dsa_user_priv *p = netdev_priv(dev);
886
887 return netpoll_send_skb(p->netpoll, skb);
888 #else
889 BUG();
890 return NETDEV_TX_OK;
891 #endif
892 }
893
dsa_skb_tx_timestamp(struct dsa_user_priv * p,struct sk_buff * skb)894 static void dsa_skb_tx_timestamp(struct dsa_user_priv *p,
895 struct sk_buff *skb)
896 {
897 struct dsa_switch *ds = p->dp->ds;
898
899 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
900 return;
901
902 if (!ds->ops->port_txtstamp)
903 return;
904
905 ds->ops->port_txtstamp(ds, p->dp->index, skb);
906 }
907
dsa_enqueue_skb(struct sk_buff * skb,struct net_device * dev)908 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
909 {
910 /* SKB for netpoll still need to be mangled with the protocol-specific
911 * tag to be successfully transmitted
912 */
913 if (unlikely(netpoll_tx_running(dev)))
914 return dsa_user_netpoll_send_skb(dev, skb);
915
916 /* Queue the SKB for transmission on the parent interface, but
917 * do not modify its EtherType
918 */
919 skb->dev = dsa_user_to_conduit(dev);
920 dev_queue_xmit(skb);
921
922 return NETDEV_TX_OK;
923 }
924 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
925
dsa_user_xmit(struct sk_buff * skb,struct net_device * dev)926 static netdev_tx_t dsa_user_xmit(struct sk_buff *skb, struct net_device *dev)
927 {
928 struct dsa_user_priv *p = netdev_priv(dev);
929 struct sk_buff *nskb;
930
931 dev_sw_netstats_tx_add(dev, 1, skb->len);
932
933 memset(skb->cb, 0, sizeof(skb->cb));
934
935 /* Handle tx timestamp if any */
936 dsa_skb_tx_timestamp(p, skb);
937
938 if (skb_ensure_writable_head_tail(skb, dev)) {
939 dev_kfree_skb_any(skb);
940 return NETDEV_TX_OK;
941 }
942
943 /* needed_tailroom should still be 'warm' in the cache line from
944 * skb_ensure_writable_head_tail(), which has also ensured that
945 * padding is safe.
946 */
947 if (dev->needed_tailroom)
948 eth_skb_pad(skb);
949
950 /* Transmit function may have to reallocate the original SKB,
951 * in which case it must have freed it. Only free it here on error.
952 */
953 nskb = p->xmit(skb, dev);
954 if (!nskb) {
955 kfree_skb(skb);
956 return NETDEV_TX_OK;
957 }
958
959 return dsa_enqueue_skb(nskb, dev);
960 }
961
962 /* ethtool operations *******************************************************/
963
dsa_user_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)964 static void dsa_user_get_drvinfo(struct net_device *dev,
965 struct ethtool_drvinfo *drvinfo)
966 {
967 strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
968 strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
969 strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
970 }
971
dsa_user_get_regs_len(struct net_device * dev)972 static int dsa_user_get_regs_len(struct net_device *dev)
973 {
974 struct dsa_port *dp = dsa_user_to_port(dev);
975 struct dsa_switch *ds = dp->ds;
976
977 if (ds->ops->get_regs_len)
978 return ds->ops->get_regs_len(ds, dp->index);
979
980 return -EOPNOTSUPP;
981 }
982
983 static void
dsa_user_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)984 dsa_user_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
985 {
986 struct dsa_port *dp = dsa_user_to_port(dev);
987 struct dsa_switch *ds = dp->ds;
988
989 if (ds->ops->get_regs)
990 ds->ops->get_regs(ds, dp->index, regs, _p);
991 }
992
dsa_user_nway_reset(struct net_device * dev)993 static int dsa_user_nway_reset(struct net_device *dev)
994 {
995 struct dsa_port *dp = dsa_user_to_port(dev);
996
997 return phylink_ethtool_nway_reset(dp->pl);
998 }
999
dsa_user_get_eeprom_len(struct net_device * dev)1000 static int dsa_user_get_eeprom_len(struct net_device *dev)
1001 {
1002 struct dsa_port *dp = dsa_user_to_port(dev);
1003 struct dsa_switch *ds = dp->ds;
1004
1005 if (ds->cd && ds->cd->eeprom_len)
1006 return ds->cd->eeprom_len;
1007
1008 if (ds->ops->get_eeprom_len)
1009 return ds->ops->get_eeprom_len(ds);
1010
1011 return 0;
1012 }
1013
dsa_user_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)1014 static int dsa_user_get_eeprom(struct net_device *dev,
1015 struct ethtool_eeprom *eeprom, u8 *data)
1016 {
1017 struct dsa_port *dp = dsa_user_to_port(dev);
1018 struct dsa_switch *ds = dp->ds;
1019
1020 if (ds->ops->get_eeprom)
1021 return ds->ops->get_eeprom(ds, eeprom, data);
1022
1023 return -EOPNOTSUPP;
1024 }
1025
dsa_user_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)1026 static int dsa_user_set_eeprom(struct net_device *dev,
1027 struct ethtool_eeprom *eeprom, u8 *data)
1028 {
1029 struct dsa_port *dp = dsa_user_to_port(dev);
1030 struct dsa_switch *ds = dp->ds;
1031
1032 if (ds->ops->set_eeprom)
1033 return ds->ops->set_eeprom(ds, eeprom, data);
1034
1035 return -EOPNOTSUPP;
1036 }
1037
dsa_user_get_strings(struct net_device * dev,uint32_t stringset,uint8_t * data)1038 static void dsa_user_get_strings(struct net_device *dev,
1039 uint32_t stringset, uint8_t *data)
1040 {
1041 struct dsa_port *dp = dsa_user_to_port(dev);
1042 struct dsa_switch *ds = dp->ds;
1043
1044 if (stringset == ETH_SS_STATS) {
1045 int len = ETH_GSTRING_LEN;
1046
1047 strscpy_pad(data, "tx_packets", len);
1048 strscpy_pad(data + len, "tx_bytes", len);
1049 strscpy_pad(data + 2 * len, "rx_packets", len);
1050 strscpy_pad(data + 3 * len, "rx_bytes", len);
1051 if (ds->ops->get_strings)
1052 ds->ops->get_strings(ds, dp->index, stringset,
1053 data + 4 * len);
1054 } else if (stringset == ETH_SS_TEST) {
1055 net_selftest_get_strings(data);
1056 }
1057
1058 }
1059
dsa_user_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,uint64_t * data)1060 static void dsa_user_get_ethtool_stats(struct net_device *dev,
1061 struct ethtool_stats *stats,
1062 uint64_t *data)
1063 {
1064 struct dsa_port *dp = dsa_user_to_port(dev);
1065 struct dsa_switch *ds = dp->ds;
1066 struct pcpu_sw_netstats *s;
1067 unsigned int start;
1068 int i;
1069
1070 for_each_possible_cpu(i) {
1071 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
1072
1073 s = per_cpu_ptr(dev->tstats, i);
1074 do {
1075 start = u64_stats_fetch_begin(&s->syncp);
1076 tx_packets = u64_stats_read(&s->tx_packets);
1077 tx_bytes = u64_stats_read(&s->tx_bytes);
1078 rx_packets = u64_stats_read(&s->rx_packets);
1079 rx_bytes = u64_stats_read(&s->rx_bytes);
1080 } while (u64_stats_fetch_retry(&s->syncp, start));
1081 data[0] += tx_packets;
1082 data[1] += tx_bytes;
1083 data[2] += rx_packets;
1084 data[3] += rx_bytes;
1085 }
1086 if (ds->ops->get_ethtool_stats)
1087 ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
1088 }
1089
dsa_user_get_sset_count(struct net_device * dev,int sset)1090 static int dsa_user_get_sset_count(struct net_device *dev, int sset)
1091 {
1092 struct dsa_port *dp = dsa_user_to_port(dev);
1093 struct dsa_switch *ds = dp->ds;
1094
1095 if (sset == ETH_SS_STATS) {
1096 int count = 0;
1097
1098 if (ds->ops->get_sset_count) {
1099 count = ds->ops->get_sset_count(ds, dp->index, sset);
1100 if (count < 0)
1101 return count;
1102 }
1103
1104 return count + 4;
1105 } else if (sset == ETH_SS_TEST) {
1106 return net_selftest_get_count();
1107 }
1108
1109 return -EOPNOTSUPP;
1110 }
1111
dsa_user_get_eth_phy_stats(struct net_device * dev,struct ethtool_eth_phy_stats * phy_stats)1112 static void dsa_user_get_eth_phy_stats(struct net_device *dev,
1113 struct ethtool_eth_phy_stats *phy_stats)
1114 {
1115 struct dsa_port *dp = dsa_user_to_port(dev);
1116 struct dsa_switch *ds = dp->ds;
1117
1118 if (ds->ops->get_eth_phy_stats)
1119 ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
1120 }
1121
dsa_user_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)1122 static void dsa_user_get_eth_mac_stats(struct net_device *dev,
1123 struct ethtool_eth_mac_stats *mac_stats)
1124 {
1125 struct dsa_port *dp = dsa_user_to_port(dev);
1126 struct dsa_switch *ds = dp->ds;
1127
1128 if (ds->ops->get_eth_mac_stats)
1129 ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats);
1130 }
1131
1132 static void
dsa_user_get_eth_ctrl_stats(struct net_device * dev,struct ethtool_eth_ctrl_stats * ctrl_stats)1133 dsa_user_get_eth_ctrl_stats(struct net_device *dev,
1134 struct ethtool_eth_ctrl_stats *ctrl_stats)
1135 {
1136 struct dsa_port *dp = dsa_user_to_port(dev);
1137 struct dsa_switch *ds = dp->ds;
1138
1139 if (ds->ops->get_eth_ctrl_stats)
1140 ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
1141 }
1142
1143 static void
dsa_user_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)1144 dsa_user_get_rmon_stats(struct net_device *dev,
1145 struct ethtool_rmon_stats *rmon_stats,
1146 const struct ethtool_rmon_hist_range **ranges)
1147 {
1148 struct dsa_port *dp = dsa_user_to_port(dev);
1149 struct dsa_switch *ds = dp->ds;
1150
1151 if (ds->ops->get_rmon_stats)
1152 ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges);
1153 }
1154
dsa_user_net_selftest(struct net_device * ndev,struct ethtool_test * etest,u64 * buf)1155 static void dsa_user_net_selftest(struct net_device *ndev,
1156 struct ethtool_test *etest, u64 *buf)
1157 {
1158 struct dsa_port *dp = dsa_user_to_port(ndev);
1159 struct dsa_switch *ds = dp->ds;
1160
1161 if (ds->ops->self_test) {
1162 ds->ops->self_test(ds, dp->index, etest, buf);
1163 return;
1164 }
1165
1166 net_selftest(ndev, etest, buf);
1167 }
1168
dsa_user_get_mm(struct net_device * dev,struct ethtool_mm_state * state)1169 static int dsa_user_get_mm(struct net_device *dev,
1170 struct ethtool_mm_state *state)
1171 {
1172 struct dsa_port *dp = dsa_user_to_port(dev);
1173 struct dsa_switch *ds = dp->ds;
1174
1175 if (!ds->ops->get_mm)
1176 return -EOPNOTSUPP;
1177
1178 return ds->ops->get_mm(ds, dp->index, state);
1179 }
1180
dsa_user_set_mm(struct net_device * dev,struct ethtool_mm_cfg * cfg,struct netlink_ext_ack * extack)1181 static int dsa_user_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg,
1182 struct netlink_ext_ack *extack)
1183 {
1184 struct dsa_port *dp = dsa_user_to_port(dev);
1185 struct dsa_switch *ds = dp->ds;
1186
1187 if (!ds->ops->set_mm)
1188 return -EOPNOTSUPP;
1189
1190 return ds->ops->set_mm(ds, dp->index, cfg, extack);
1191 }
1192
dsa_user_get_mm_stats(struct net_device * dev,struct ethtool_mm_stats * stats)1193 static void dsa_user_get_mm_stats(struct net_device *dev,
1194 struct ethtool_mm_stats *stats)
1195 {
1196 struct dsa_port *dp = dsa_user_to_port(dev);
1197 struct dsa_switch *ds = dp->ds;
1198
1199 if (ds->ops->get_mm_stats)
1200 ds->ops->get_mm_stats(ds, dp->index, stats);
1201 }
1202
dsa_user_get_wol(struct net_device * dev,struct ethtool_wolinfo * w)1203 static void dsa_user_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1204 {
1205 struct dsa_port *dp = dsa_user_to_port(dev);
1206 struct dsa_switch *ds = dp->ds;
1207
1208 phylink_ethtool_get_wol(dp->pl, w);
1209
1210 if (ds->ops->get_wol)
1211 ds->ops->get_wol(ds, dp->index, w);
1212 }
1213
dsa_user_set_wol(struct net_device * dev,struct ethtool_wolinfo * w)1214 static int dsa_user_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1215 {
1216 struct dsa_port *dp = dsa_user_to_port(dev);
1217 struct dsa_switch *ds = dp->ds;
1218 int ret = -EOPNOTSUPP;
1219
1220 phylink_ethtool_set_wol(dp->pl, w);
1221
1222 if (ds->ops->set_wol)
1223 ret = ds->ops->set_wol(ds, dp->index, w);
1224
1225 return ret;
1226 }
1227
dsa_user_set_eee(struct net_device * dev,struct ethtool_keee * e)1228 static int dsa_user_set_eee(struct net_device *dev, struct ethtool_keee *e)
1229 {
1230 struct dsa_port *dp = dsa_user_to_port(dev);
1231 struct dsa_switch *ds = dp->ds;
1232 int ret;
1233
1234 /* Port's PHY and MAC both need to be EEE capable */
1235 if (!dev->phydev || !dp->pl)
1236 return -ENODEV;
1237
1238 if (!ds->ops->set_mac_eee)
1239 return -EOPNOTSUPP;
1240
1241 ret = ds->ops->set_mac_eee(ds, dp->index, e);
1242 if (ret)
1243 return ret;
1244
1245 return phylink_ethtool_set_eee(dp->pl, e);
1246 }
1247
dsa_user_get_eee(struct net_device * dev,struct ethtool_keee * e)1248 static int dsa_user_get_eee(struct net_device *dev, struct ethtool_keee *e)
1249 {
1250 struct dsa_port *dp = dsa_user_to_port(dev);
1251 struct dsa_switch *ds = dp->ds;
1252 int ret;
1253
1254 /* Port's PHY and MAC both need to be EEE capable */
1255 if (!dev->phydev || !dp->pl)
1256 return -ENODEV;
1257
1258 if (!ds->ops->get_mac_eee)
1259 return -EOPNOTSUPP;
1260
1261 ret = ds->ops->get_mac_eee(ds, dp->index, e);
1262 if (ret)
1263 return ret;
1264
1265 return phylink_ethtool_get_eee(dp->pl, e);
1266 }
1267
dsa_user_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1268 static int dsa_user_get_link_ksettings(struct net_device *dev,
1269 struct ethtool_link_ksettings *cmd)
1270 {
1271 struct dsa_port *dp = dsa_user_to_port(dev);
1272
1273 return phylink_ethtool_ksettings_get(dp->pl, cmd);
1274 }
1275
dsa_user_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1276 static int dsa_user_set_link_ksettings(struct net_device *dev,
1277 const struct ethtool_link_ksettings *cmd)
1278 {
1279 struct dsa_port *dp = dsa_user_to_port(dev);
1280
1281 return phylink_ethtool_ksettings_set(dp->pl, cmd);
1282 }
1283
dsa_user_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * pause_stats)1284 static void dsa_user_get_pause_stats(struct net_device *dev,
1285 struct ethtool_pause_stats *pause_stats)
1286 {
1287 struct dsa_port *dp = dsa_user_to_port(dev);
1288 struct dsa_switch *ds = dp->ds;
1289
1290 if (ds->ops->get_pause_stats)
1291 ds->ops->get_pause_stats(ds, dp->index, pause_stats);
1292 }
1293
dsa_user_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1294 static void dsa_user_get_pauseparam(struct net_device *dev,
1295 struct ethtool_pauseparam *pause)
1296 {
1297 struct dsa_port *dp = dsa_user_to_port(dev);
1298
1299 phylink_ethtool_get_pauseparam(dp->pl, pause);
1300 }
1301
dsa_user_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1302 static int dsa_user_set_pauseparam(struct net_device *dev,
1303 struct ethtool_pauseparam *pause)
1304 {
1305 struct dsa_port *dp = dsa_user_to_port(dev);
1306
1307 return phylink_ethtool_set_pauseparam(dp->pl, pause);
1308 }
1309
1310 #ifdef CONFIG_NET_POLL_CONTROLLER
dsa_user_netpoll_setup(struct net_device * dev,struct netpoll_info * ni)1311 static int dsa_user_netpoll_setup(struct net_device *dev,
1312 struct netpoll_info *ni)
1313 {
1314 struct net_device *conduit = dsa_user_to_conduit(dev);
1315 struct dsa_user_priv *p = netdev_priv(dev);
1316 struct netpoll *netpoll;
1317 int err = 0;
1318
1319 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
1320 if (!netpoll)
1321 return -ENOMEM;
1322
1323 err = __netpoll_setup(netpoll, conduit);
1324 if (err) {
1325 kfree(netpoll);
1326 goto out;
1327 }
1328
1329 p->netpoll = netpoll;
1330 out:
1331 return err;
1332 }
1333
dsa_user_netpoll_cleanup(struct net_device * dev)1334 static void dsa_user_netpoll_cleanup(struct net_device *dev)
1335 {
1336 struct dsa_user_priv *p = netdev_priv(dev);
1337 struct netpoll *netpoll = p->netpoll;
1338
1339 if (!netpoll)
1340 return;
1341
1342 p->netpoll = NULL;
1343
1344 __netpoll_free(netpoll);
1345 }
1346
dsa_user_poll_controller(struct net_device * dev)1347 static void dsa_user_poll_controller(struct net_device *dev)
1348 {
1349 }
1350 #endif
1351
1352 static struct dsa_mall_tc_entry *
dsa_user_mall_tc_entry_find(struct net_device * dev,unsigned long cookie)1353 dsa_user_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
1354 {
1355 struct dsa_user_priv *p = netdev_priv(dev);
1356 struct dsa_mall_tc_entry *mall_tc_entry;
1357
1358 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
1359 if (mall_tc_entry->cookie == cookie)
1360 return mall_tc_entry;
1361
1362 return NULL;
1363 }
1364
1365 static int
dsa_user_add_cls_matchall_mirred(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress)1366 dsa_user_add_cls_matchall_mirred(struct net_device *dev,
1367 struct tc_cls_matchall_offload *cls,
1368 bool ingress)
1369 {
1370 struct netlink_ext_ack *extack = cls->common.extack;
1371 struct dsa_port *dp = dsa_user_to_port(dev);
1372 struct dsa_user_priv *p = netdev_priv(dev);
1373 struct dsa_mall_mirror_tc_entry *mirror;
1374 struct dsa_mall_tc_entry *mall_tc_entry;
1375 struct dsa_switch *ds = dp->ds;
1376 struct flow_action_entry *act;
1377 struct dsa_port *to_dp;
1378 int err;
1379
1380 if (!ds->ops->port_mirror_add)
1381 return -EOPNOTSUPP;
1382
1383 if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1384 cls->common.extack))
1385 return -EOPNOTSUPP;
1386
1387 act = &cls->rule->action.entries[0];
1388
1389 if (!act->dev)
1390 return -EINVAL;
1391
1392 if (!dsa_user_dev_check(act->dev))
1393 return -EOPNOTSUPP;
1394
1395 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1396 if (!mall_tc_entry)
1397 return -ENOMEM;
1398
1399 mall_tc_entry->cookie = cls->cookie;
1400 mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
1401 mirror = &mall_tc_entry->mirror;
1402
1403 to_dp = dsa_user_to_port(act->dev);
1404
1405 mirror->to_local_port = to_dp->index;
1406 mirror->ingress = ingress;
1407
1408 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack);
1409 if (err) {
1410 kfree(mall_tc_entry);
1411 return err;
1412 }
1413
1414 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1415
1416 return err;
1417 }
1418
1419 static int
dsa_user_add_cls_matchall_police(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress)1420 dsa_user_add_cls_matchall_police(struct net_device *dev,
1421 struct tc_cls_matchall_offload *cls,
1422 bool ingress)
1423 {
1424 struct netlink_ext_ack *extack = cls->common.extack;
1425 struct dsa_port *dp = dsa_user_to_port(dev);
1426 struct dsa_user_priv *p = netdev_priv(dev);
1427 struct dsa_mall_policer_tc_entry *policer;
1428 struct dsa_mall_tc_entry *mall_tc_entry;
1429 struct dsa_switch *ds = dp->ds;
1430 struct flow_action_entry *act;
1431 int err;
1432
1433 if (!ds->ops->port_policer_add) {
1434 NL_SET_ERR_MSG_MOD(extack,
1435 "Policing offload not implemented");
1436 return -EOPNOTSUPP;
1437 }
1438
1439 if (!ingress) {
1440 NL_SET_ERR_MSG_MOD(extack,
1441 "Only supported on ingress qdisc");
1442 return -EOPNOTSUPP;
1443 }
1444
1445 if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1446 cls->common.extack))
1447 return -EOPNOTSUPP;
1448
1449 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1450 if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1451 NL_SET_ERR_MSG_MOD(extack,
1452 "Only one port policer allowed");
1453 return -EEXIST;
1454 }
1455 }
1456
1457 act = &cls->rule->action.entries[0];
1458
1459 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1460 if (!mall_tc_entry)
1461 return -ENOMEM;
1462
1463 mall_tc_entry->cookie = cls->cookie;
1464 mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1465 policer = &mall_tc_entry->policer;
1466 policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1467 policer->burst = act->police.burst;
1468
1469 err = ds->ops->port_policer_add(ds, dp->index, policer);
1470 if (err) {
1471 kfree(mall_tc_entry);
1472 return err;
1473 }
1474
1475 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1476
1477 return err;
1478 }
1479
dsa_user_add_cls_matchall(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress)1480 static int dsa_user_add_cls_matchall(struct net_device *dev,
1481 struct tc_cls_matchall_offload *cls,
1482 bool ingress)
1483 {
1484 int err = -EOPNOTSUPP;
1485
1486 if (cls->common.protocol == htons(ETH_P_ALL) &&
1487 flow_offload_has_one_action(&cls->rule->action) &&
1488 cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1489 err = dsa_user_add_cls_matchall_mirred(dev, cls, ingress);
1490 else if (flow_offload_has_one_action(&cls->rule->action) &&
1491 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1492 err = dsa_user_add_cls_matchall_police(dev, cls, ingress);
1493
1494 return err;
1495 }
1496
dsa_user_del_cls_matchall(struct net_device * dev,struct tc_cls_matchall_offload * cls)1497 static void dsa_user_del_cls_matchall(struct net_device *dev,
1498 struct tc_cls_matchall_offload *cls)
1499 {
1500 struct dsa_port *dp = dsa_user_to_port(dev);
1501 struct dsa_mall_tc_entry *mall_tc_entry;
1502 struct dsa_switch *ds = dp->ds;
1503
1504 mall_tc_entry = dsa_user_mall_tc_entry_find(dev, cls->cookie);
1505 if (!mall_tc_entry)
1506 return;
1507
1508 list_del(&mall_tc_entry->list);
1509
1510 switch (mall_tc_entry->type) {
1511 case DSA_PORT_MALL_MIRROR:
1512 if (ds->ops->port_mirror_del)
1513 ds->ops->port_mirror_del(ds, dp->index,
1514 &mall_tc_entry->mirror);
1515 break;
1516 case DSA_PORT_MALL_POLICER:
1517 if (ds->ops->port_policer_del)
1518 ds->ops->port_policer_del(ds, dp->index);
1519 break;
1520 default:
1521 WARN_ON(1);
1522 }
1523
1524 kfree(mall_tc_entry);
1525 }
1526
dsa_user_setup_tc_cls_matchall(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress)1527 static int dsa_user_setup_tc_cls_matchall(struct net_device *dev,
1528 struct tc_cls_matchall_offload *cls,
1529 bool ingress)
1530 {
1531 if (cls->common.chain_index)
1532 return -EOPNOTSUPP;
1533
1534 switch (cls->command) {
1535 case TC_CLSMATCHALL_REPLACE:
1536 return dsa_user_add_cls_matchall(dev, cls, ingress);
1537 case TC_CLSMATCHALL_DESTROY:
1538 dsa_user_del_cls_matchall(dev, cls);
1539 return 0;
1540 default:
1541 return -EOPNOTSUPP;
1542 }
1543 }
1544
dsa_user_add_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1545 static int dsa_user_add_cls_flower(struct net_device *dev,
1546 struct flow_cls_offload *cls,
1547 bool ingress)
1548 {
1549 struct dsa_port *dp = dsa_user_to_port(dev);
1550 struct dsa_switch *ds = dp->ds;
1551 int port = dp->index;
1552
1553 if (!ds->ops->cls_flower_add)
1554 return -EOPNOTSUPP;
1555
1556 return ds->ops->cls_flower_add(ds, port, cls, ingress);
1557 }
1558
dsa_user_del_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1559 static int dsa_user_del_cls_flower(struct net_device *dev,
1560 struct flow_cls_offload *cls,
1561 bool ingress)
1562 {
1563 struct dsa_port *dp = dsa_user_to_port(dev);
1564 struct dsa_switch *ds = dp->ds;
1565 int port = dp->index;
1566
1567 if (!ds->ops->cls_flower_del)
1568 return -EOPNOTSUPP;
1569
1570 return ds->ops->cls_flower_del(ds, port, cls, ingress);
1571 }
1572
dsa_user_stats_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1573 static int dsa_user_stats_cls_flower(struct net_device *dev,
1574 struct flow_cls_offload *cls,
1575 bool ingress)
1576 {
1577 struct dsa_port *dp = dsa_user_to_port(dev);
1578 struct dsa_switch *ds = dp->ds;
1579 int port = dp->index;
1580
1581 if (!ds->ops->cls_flower_stats)
1582 return -EOPNOTSUPP;
1583
1584 return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1585 }
1586
dsa_user_setup_tc_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1587 static int dsa_user_setup_tc_cls_flower(struct net_device *dev,
1588 struct flow_cls_offload *cls,
1589 bool ingress)
1590 {
1591 switch (cls->command) {
1592 case FLOW_CLS_REPLACE:
1593 return dsa_user_add_cls_flower(dev, cls, ingress);
1594 case FLOW_CLS_DESTROY:
1595 return dsa_user_del_cls_flower(dev, cls, ingress);
1596 case FLOW_CLS_STATS:
1597 return dsa_user_stats_cls_flower(dev, cls, ingress);
1598 default:
1599 return -EOPNOTSUPP;
1600 }
1601 }
1602
dsa_user_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv,bool ingress)1603 static int dsa_user_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1604 void *cb_priv, bool ingress)
1605 {
1606 struct net_device *dev = cb_priv;
1607
1608 if (!tc_can_offload(dev))
1609 return -EOPNOTSUPP;
1610
1611 switch (type) {
1612 case TC_SETUP_CLSMATCHALL:
1613 return dsa_user_setup_tc_cls_matchall(dev, type_data, ingress);
1614 case TC_SETUP_CLSFLOWER:
1615 return dsa_user_setup_tc_cls_flower(dev, type_data, ingress);
1616 default:
1617 return -EOPNOTSUPP;
1618 }
1619 }
1620
dsa_user_setup_tc_block_cb_ig(enum tc_setup_type type,void * type_data,void * cb_priv)1621 static int dsa_user_setup_tc_block_cb_ig(enum tc_setup_type type,
1622 void *type_data, void *cb_priv)
1623 {
1624 return dsa_user_setup_tc_block_cb(type, type_data, cb_priv, true);
1625 }
1626
dsa_user_setup_tc_block_cb_eg(enum tc_setup_type type,void * type_data,void * cb_priv)1627 static int dsa_user_setup_tc_block_cb_eg(enum tc_setup_type type,
1628 void *type_data, void *cb_priv)
1629 {
1630 return dsa_user_setup_tc_block_cb(type, type_data, cb_priv, false);
1631 }
1632
1633 static LIST_HEAD(dsa_user_block_cb_list);
1634
dsa_user_setup_tc_block(struct net_device * dev,struct flow_block_offload * f)1635 static int dsa_user_setup_tc_block(struct net_device *dev,
1636 struct flow_block_offload *f)
1637 {
1638 struct flow_block_cb *block_cb;
1639 flow_setup_cb_t *cb;
1640
1641 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1642 cb = dsa_user_setup_tc_block_cb_ig;
1643 else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1644 cb = dsa_user_setup_tc_block_cb_eg;
1645 else
1646 return -EOPNOTSUPP;
1647
1648 f->driver_block_list = &dsa_user_block_cb_list;
1649
1650 switch (f->command) {
1651 case FLOW_BLOCK_BIND:
1652 if (flow_block_cb_is_busy(cb, dev, &dsa_user_block_cb_list))
1653 return -EBUSY;
1654
1655 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1656 if (IS_ERR(block_cb))
1657 return PTR_ERR(block_cb);
1658
1659 flow_block_cb_add(block_cb, f);
1660 list_add_tail(&block_cb->driver_list, &dsa_user_block_cb_list);
1661 return 0;
1662 case FLOW_BLOCK_UNBIND:
1663 block_cb = flow_block_cb_lookup(f->block, cb, dev);
1664 if (!block_cb)
1665 return -ENOENT;
1666
1667 flow_block_cb_remove(block_cb, f);
1668 list_del(&block_cb->driver_list);
1669 return 0;
1670 default:
1671 return -EOPNOTSUPP;
1672 }
1673 }
1674
dsa_user_setup_ft_block(struct dsa_switch * ds,int port,void * type_data)1675 static int dsa_user_setup_ft_block(struct dsa_switch *ds, int port,
1676 void *type_data)
1677 {
1678 struct net_device *conduit = dsa_port_to_conduit(dsa_to_port(ds, port));
1679
1680 if (!conduit->netdev_ops->ndo_setup_tc)
1681 return -EOPNOTSUPP;
1682
1683 return conduit->netdev_ops->ndo_setup_tc(conduit, TC_SETUP_FT, type_data);
1684 }
1685
dsa_user_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)1686 static int dsa_user_setup_tc(struct net_device *dev, enum tc_setup_type type,
1687 void *type_data)
1688 {
1689 struct dsa_port *dp = dsa_user_to_port(dev);
1690 struct dsa_switch *ds = dp->ds;
1691
1692 switch (type) {
1693 case TC_SETUP_BLOCK:
1694 return dsa_user_setup_tc_block(dev, type_data);
1695 case TC_SETUP_FT:
1696 return dsa_user_setup_ft_block(ds, dp->index, type_data);
1697 default:
1698 break;
1699 }
1700
1701 if (!ds->ops->port_setup_tc)
1702 return -EOPNOTSUPP;
1703
1704 return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1705 }
1706
dsa_user_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * nfc,u32 * rule_locs)1707 static int dsa_user_get_rxnfc(struct net_device *dev,
1708 struct ethtool_rxnfc *nfc, u32 *rule_locs)
1709 {
1710 struct dsa_port *dp = dsa_user_to_port(dev);
1711 struct dsa_switch *ds = dp->ds;
1712
1713 if (!ds->ops->get_rxnfc)
1714 return -EOPNOTSUPP;
1715
1716 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1717 }
1718
dsa_user_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * nfc)1719 static int dsa_user_set_rxnfc(struct net_device *dev,
1720 struct ethtool_rxnfc *nfc)
1721 {
1722 struct dsa_port *dp = dsa_user_to_port(dev);
1723 struct dsa_switch *ds = dp->ds;
1724
1725 if (!ds->ops->set_rxnfc)
1726 return -EOPNOTSUPP;
1727
1728 return ds->ops->set_rxnfc(ds, dp->index, nfc);
1729 }
1730
dsa_user_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * ts)1731 static int dsa_user_get_ts_info(struct net_device *dev,
1732 struct kernel_ethtool_ts_info *ts)
1733 {
1734 struct dsa_user_priv *p = netdev_priv(dev);
1735 struct dsa_switch *ds = p->dp->ds;
1736
1737 if (!ds->ops->get_ts_info)
1738 return -EOPNOTSUPP;
1739
1740 return ds->ops->get_ts_info(ds, p->dp->index, ts);
1741 }
1742
dsa_user_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)1743 static int dsa_user_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1744 u16 vid)
1745 {
1746 struct dsa_port *dp = dsa_user_to_port(dev);
1747 struct switchdev_obj_port_vlan vlan = {
1748 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1749 .vid = vid,
1750 /* This API only allows programming tagged, non-PVID VIDs */
1751 .flags = 0,
1752 };
1753 struct netlink_ext_ack extack = {0};
1754 struct dsa_switch *ds = dp->ds;
1755 struct netdev_hw_addr *ha;
1756 struct dsa_vlan *v;
1757 int ret;
1758
1759 /* User port... */
1760 ret = dsa_port_vlan_add(dp, &vlan, &extack);
1761 if (ret) {
1762 if (extack._msg)
1763 netdev_err(dev, "%s\n", extack._msg);
1764 return ret;
1765 }
1766
1767 /* And CPU port... */
1768 ret = dsa_port_host_vlan_add(dp, &vlan, &extack);
1769 if (ret) {
1770 if (extack._msg)
1771 netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
1772 extack._msg);
1773 return ret;
1774 }
1775
1776 if (!dsa_switch_supports_uc_filtering(ds) &&
1777 !dsa_switch_supports_mc_filtering(ds))
1778 return 0;
1779
1780 v = kzalloc(sizeof(*v), GFP_KERNEL);
1781 if (!v) {
1782 ret = -ENOMEM;
1783 goto rollback;
1784 }
1785
1786 netif_addr_lock_bh(dev);
1787
1788 v->vid = vid;
1789 list_add_tail(&v->list, &dp->user_vlans);
1790
1791 if (dsa_switch_supports_mc_filtering(ds)) {
1792 netdev_for_each_synced_mc_addr(ha, dev) {
1793 dsa_user_schedule_standalone_work(dev, DSA_MC_ADD,
1794 ha->addr, vid);
1795 }
1796 }
1797
1798 if (dsa_switch_supports_uc_filtering(ds)) {
1799 netdev_for_each_synced_uc_addr(ha, dev) {
1800 dsa_user_schedule_standalone_work(dev, DSA_UC_ADD,
1801 ha->addr, vid);
1802 }
1803 }
1804
1805 netif_addr_unlock_bh(dev);
1806
1807 dsa_flush_workqueue();
1808
1809 return 0;
1810
1811 rollback:
1812 dsa_port_host_vlan_del(dp, &vlan);
1813 dsa_port_vlan_del(dp, &vlan);
1814
1815 return ret;
1816 }
1817
dsa_user_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)1818 static int dsa_user_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1819 u16 vid)
1820 {
1821 struct dsa_port *dp = dsa_user_to_port(dev);
1822 struct switchdev_obj_port_vlan vlan = {
1823 .vid = vid,
1824 /* This API only allows programming tagged, non-PVID VIDs */
1825 .flags = 0,
1826 };
1827 struct dsa_switch *ds = dp->ds;
1828 struct netdev_hw_addr *ha;
1829 struct dsa_vlan *v;
1830 int err;
1831
1832 err = dsa_port_vlan_del(dp, &vlan);
1833 if (err)
1834 return err;
1835
1836 err = dsa_port_host_vlan_del(dp, &vlan);
1837 if (err)
1838 return err;
1839
1840 if (!dsa_switch_supports_uc_filtering(ds) &&
1841 !dsa_switch_supports_mc_filtering(ds))
1842 return 0;
1843
1844 netif_addr_lock_bh(dev);
1845
1846 v = dsa_vlan_find(&dp->user_vlans, &vlan);
1847 if (!v) {
1848 netif_addr_unlock_bh(dev);
1849 return -ENOENT;
1850 }
1851
1852 list_del(&v->list);
1853 kfree(v);
1854
1855 if (dsa_switch_supports_mc_filtering(ds)) {
1856 netdev_for_each_synced_mc_addr(ha, dev) {
1857 dsa_user_schedule_standalone_work(dev, DSA_MC_DEL,
1858 ha->addr, vid);
1859 }
1860 }
1861
1862 if (dsa_switch_supports_uc_filtering(ds)) {
1863 netdev_for_each_synced_uc_addr(ha, dev) {
1864 dsa_user_schedule_standalone_work(dev, DSA_UC_DEL,
1865 ha->addr, vid);
1866 }
1867 }
1868
1869 netif_addr_unlock_bh(dev);
1870
1871 dsa_flush_workqueue();
1872
1873 return 0;
1874 }
1875
dsa_user_restore_vlan(struct net_device * vdev,int vid,void * arg)1876 static int dsa_user_restore_vlan(struct net_device *vdev, int vid, void *arg)
1877 {
1878 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1879
1880 return dsa_user_vlan_rx_add_vid(arg, proto, vid);
1881 }
1882
dsa_user_clear_vlan(struct net_device * vdev,int vid,void * arg)1883 static int dsa_user_clear_vlan(struct net_device *vdev, int vid, void *arg)
1884 {
1885 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1886
1887 return dsa_user_vlan_rx_kill_vid(arg, proto, vid);
1888 }
1889
1890 /* Keep the VLAN RX filtering list in sync with the hardware only if VLAN
1891 * filtering is enabled. The baseline is that only ports that offload a
1892 * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware,
1893 * but there are exceptions for quirky hardware.
1894 *
1895 * If ds->vlan_filtering_is_global = true, then standalone ports which share
1896 * the same switch with other ports that offload a VLAN-aware bridge are also
1897 * inevitably VLAN-aware.
1898 *
1899 * To summarize, a DSA switch port offloads:
1900 *
1901 * - If standalone (this includes software bridge, software LAG):
1902 * - if ds->needs_standalone_vlan_filtering = true, OR if
1903 * (ds->vlan_filtering_is_global = true AND there are bridges spanning
1904 * this switch chip which have vlan_filtering=1)
1905 * - the 8021q upper VLANs
1906 * - else (standalone VLAN filtering is not needed, VLAN filtering is not
1907 * global, or it is, but no port is under a VLAN-aware bridge):
1908 * - no VLAN (any 8021q upper is a software VLAN)
1909 *
1910 * - If under a vlan_filtering=0 bridge which it offload:
1911 * - if ds->configure_vlan_while_not_filtering = true (default):
1912 * - the bridge VLANs. These VLANs are committed to hardware but inactive.
1913 * - else (deprecated):
1914 * - no VLAN. The bridge VLANs are not restored when VLAN awareness is
1915 * enabled, so this behavior is broken and discouraged.
1916 *
1917 * - If under a vlan_filtering=1 bridge which it offload:
1918 * - the bridge VLANs
1919 * - the 8021q upper VLANs
1920 */
dsa_user_manage_vlan_filtering(struct net_device * user,bool vlan_filtering)1921 int dsa_user_manage_vlan_filtering(struct net_device *user,
1922 bool vlan_filtering)
1923 {
1924 int err;
1925
1926 if (vlan_filtering) {
1927 user->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1928
1929 err = vlan_for_each(user, dsa_user_restore_vlan, user);
1930 if (err) {
1931 vlan_for_each(user, dsa_user_clear_vlan, user);
1932 user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1933 return err;
1934 }
1935 } else {
1936 err = vlan_for_each(user, dsa_user_clear_vlan, user);
1937 if (err)
1938 return err;
1939
1940 user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1941 }
1942
1943 return 0;
1944 }
1945
1946 struct dsa_hw_port {
1947 struct list_head list;
1948 struct net_device *dev;
1949 int old_mtu;
1950 };
1951
dsa_hw_port_list_set_mtu(struct list_head * hw_port_list,int mtu)1952 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1953 {
1954 const struct dsa_hw_port *p;
1955 int err;
1956
1957 list_for_each_entry(p, hw_port_list, list) {
1958 if (p->dev->mtu == mtu)
1959 continue;
1960
1961 err = dev_set_mtu(p->dev, mtu);
1962 if (err)
1963 goto rollback;
1964 }
1965
1966 return 0;
1967
1968 rollback:
1969 list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1970 if (p->dev->mtu == p->old_mtu)
1971 continue;
1972
1973 if (dev_set_mtu(p->dev, p->old_mtu))
1974 netdev_err(p->dev, "Failed to restore MTU\n");
1975 }
1976
1977 return err;
1978 }
1979
dsa_hw_port_list_free(struct list_head * hw_port_list)1980 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1981 {
1982 struct dsa_hw_port *p, *n;
1983
1984 list_for_each_entry_safe(p, n, hw_port_list, list)
1985 kfree(p);
1986 }
1987
1988 /* Make the hardware datapath to/from @dev limited to a common MTU */
dsa_bridge_mtu_normalization(struct dsa_port * dp)1989 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
1990 {
1991 struct list_head hw_port_list;
1992 struct dsa_switch_tree *dst;
1993 int min_mtu = ETH_MAX_MTU;
1994 struct dsa_port *other_dp;
1995 int err;
1996
1997 if (!dp->ds->mtu_enforcement_ingress)
1998 return;
1999
2000 if (!dp->bridge)
2001 return;
2002
2003 INIT_LIST_HEAD(&hw_port_list);
2004
2005 /* Populate the list of ports that are part of the same bridge
2006 * as the newly added/modified port
2007 */
2008 list_for_each_entry(dst, &dsa_tree_list, list) {
2009 list_for_each_entry(other_dp, &dst->ports, list) {
2010 struct dsa_hw_port *hw_port;
2011 struct net_device *user;
2012
2013 if (other_dp->type != DSA_PORT_TYPE_USER)
2014 continue;
2015
2016 if (!dsa_port_bridge_same(dp, other_dp))
2017 continue;
2018
2019 if (!other_dp->ds->mtu_enforcement_ingress)
2020 continue;
2021
2022 user = other_dp->user;
2023
2024 if (min_mtu > user->mtu)
2025 min_mtu = user->mtu;
2026
2027 hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
2028 if (!hw_port)
2029 goto out;
2030
2031 hw_port->dev = user;
2032 hw_port->old_mtu = user->mtu;
2033
2034 list_add(&hw_port->list, &hw_port_list);
2035 }
2036 }
2037
2038 /* Attempt to configure the entire hardware bridge to the newly added
2039 * interface's MTU first, regardless of whether the intention of the
2040 * user was to raise or lower it.
2041 */
2042 err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->user->mtu);
2043 if (!err)
2044 goto out;
2045
2046 /* Clearly that didn't work out so well, so just set the minimum MTU on
2047 * all hardware bridge ports now. If this fails too, then all ports will
2048 * still have their old MTU rolled back anyway.
2049 */
2050 dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
2051
2052 out:
2053 dsa_hw_port_list_free(&hw_port_list);
2054 }
2055
dsa_user_change_mtu(struct net_device * dev,int new_mtu)2056 int dsa_user_change_mtu(struct net_device *dev, int new_mtu)
2057 {
2058 struct net_device *conduit = dsa_user_to_conduit(dev);
2059 struct dsa_port *dp = dsa_user_to_port(dev);
2060 struct dsa_port *cpu_dp = dp->cpu_dp;
2061 struct dsa_switch *ds = dp->ds;
2062 struct dsa_port *other_dp;
2063 int largest_mtu = 0;
2064 int new_conduit_mtu;
2065 int old_conduit_mtu;
2066 int mtu_limit;
2067 int overhead;
2068 int cpu_mtu;
2069 int err;
2070
2071 if (!ds->ops->port_change_mtu)
2072 return -EOPNOTSUPP;
2073
2074 dsa_tree_for_each_user_port(other_dp, ds->dst) {
2075 int user_mtu;
2076
2077 /* During probe, this function will be called for each user
2078 * device, while not all of them have been allocated. That's
2079 * ok, it doesn't change what the maximum is, so ignore it.
2080 */
2081 if (!other_dp->user)
2082 continue;
2083
2084 /* Pretend that we already applied the setting, which we
2085 * actually haven't (still haven't done all integrity checks)
2086 */
2087 if (dp == other_dp)
2088 user_mtu = new_mtu;
2089 else
2090 user_mtu = other_dp->user->mtu;
2091
2092 if (largest_mtu < user_mtu)
2093 largest_mtu = user_mtu;
2094 }
2095
2096 overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops);
2097 mtu_limit = min_t(int, conduit->max_mtu, dev->max_mtu + overhead);
2098 old_conduit_mtu = conduit->mtu;
2099 new_conduit_mtu = largest_mtu + overhead;
2100 if (new_conduit_mtu > mtu_limit)
2101 return -ERANGE;
2102
2103 /* If the conduit MTU isn't over limit, there's no need to check the CPU
2104 * MTU, since that surely isn't either.
2105 */
2106 cpu_mtu = largest_mtu;
2107
2108 /* Start applying stuff */
2109 if (new_conduit_mtu != old_conduit_mtu) {
2110 err = dev_set_mtu(conduit, new_conduit_mtu);
2111 if (err < 0)
2112 goto out_conduit_failed;
2113
2114 /* We only need to propagate the MTU of the CPU port to
2115 * upstream switches, so emit a notifier which updates them.
2116 */
2117 err = dsa_port_mtu_change(cpu_dp, cpu_mtu);
2118 if (err)
2119 goto out_cpu_failed;
2120 }
2121
2122 err = ds->ops->port_change_mtu(ds, dp->index, new_mtu);
2123 if (err)
2124 goto out_port_failed;
2125
2126 WRITE_ONCE(dev->mtu, new_mtu);
2127
2128 dsa_bridge_mtu_normalization(dp);
2129
2130 return 0;
2131
2132 out_port_failed:
2133 if (new_conduit_mtu != old_conduit_mtu)
2134 dsa_port_mtu_change(cpu_dp, old_conduit_mtu - overhead);
2135 out_cpu_failed:
2136 if (new_conduit_mtu != old_conduit_mtu)
2137 dev_set_mtu(conduit, old_conduit_mtu);
2138 out_conduit_failed:
2139 return err;
2140 }
2141
2142 static int __maybe_unused
dsa_user_dcbnl_set_apptrust(struct net_device * dev,u8 * sel,int nsel)2143 dsa_user_dcbnl_set_apptrust(struct net_device *dev, u8 *sel, int nsel)
2144 {
2145 struct dsa_port *dp = dsa_user_to_port(dev);
2146 struct dsa_switch *ds = dp->ds;
2147 int port = dp->index;
2148
2149 if (!ds->ops->port_set_apptrust)
2150 return -EOPNOTSUPP;
2151
2152 return ds->ops->port_set_apptrust(ds, port, sel, nsel);
2153 }
2154
2155 static int __maybe_unused
dsa_user_dcbnl_get_apptrust(struct net_device * dev,u8 * sel,int * nsel)2156 dsa_user_dcbnl_get_apptrust(struct net_device *dev, u8 *sel, int *nsel)
2157 {
2158 struct dsa_port *dp = dsa_user_to_port(dev);
2159 struct dsa_switch *ds = dp->ds;
2160 int port = dp->index;
2161
2162 if (!ds->ops->port_get_apptrust)
2163 return -EOPNOTSUPP;
2164
2165 return ds->ops->port_get_apptrust(ds, port, sel, nsel);
2166 }
2167
2168 static int __maybe_unused
dsa_user_dcbnl_set_default_prio(struct net_device * dev,struct dcb_app * app)2169 dsa_user_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
2170 {
2171 struct dsa_port *dp = dsa_user_to_port(dev);
2172 struct dsa_switch *ds = dp->ds;
2173 unsigned long mask, new_prio;
2174 int err, port = dp->index;
2175
2176 if (!ds->ops->port_set_default_prio)
2177 return -EOPNOTSUPP;
2178
2179 err = dcb_ieee_setapp(dev, app);
2180 if (err)
2181 return err;
2182
2183 mask = dcb_ieee_getapp_mask(dev, app);
2184 new_prio = __fls(mask);
2185
2186 err = ds->ops->port_set_default_prio(ds, port, new_prio);
2187 if (err) {
2188 dcb_ieee_delapp(dev, app);
2189 return err;
2190 }
2191
2192 return 0;
2193 }
2194
2195 /* Update the DSCP prio entries on all user ports of the switch in case
2196 * the switch supports global DSCP prio instead of per port DSCP prios.
2197 */
dsa_user_dcbnl_ieee_global_dscp_setdel(struct net_device * dev,struct dcb_app * app,bool del)2198 static int dsa_user_dcbnl_ieee_global_dscp_setdel(struct net_device *dev,
2199 struct dcb_app *app, bool del)
2200 {
2201 int (*setdel)(struct net_device *dev, struct dcb_app *app);
2202 struct dsa_port *dp = dsa_user_to_port(dev);
2203 struct dsa_switch *ds = dp->ds;
2204 struct dsa_port *other_dp;
2205 int err, restore_err;
2206
2207 if (del)
2208 setdel = dcb_ieee_delapp;
2209 else
2210 setdel = dcb_ieee_setapp;
2211
2212 dsa_switch_for_each_user_port(other_dp, ds) {
2213 struct net_device *user = other_dp->user;
2214
2215 if (!user || user == dev)
2216 continue;
2217
2218 err = setdel(user, app);
2219 if (err)
2220 goto err_try_to_restore;
2221 }
2222
2223 return 0;
2224
2225 err_try_to_restore:
2226
2227 /* Revert logic to restore previous state of app entries */
2228 if (!del)
2229 setdel = dcb_ieee_delapp;
2230 else
2231 setdel = dcb_ieee_setapp;
2232
2233 dsa_switch_for_each_user_port_continue_reverse(other_dp, ds) {
2234 struct net_device *user = other_dp->user;
2235
2236 if (!user || user == dev)
2237 continue;
2238
2239 restore_err = setdel(user, app);
2240 if (restore_err)
2241 netdev_err(user, "Failed to restore DSCP prio entry configuration\n");
2242 }
2243
2244 return err;
2245 }
2246
2247 static int __maybe_unused
dsa_user_dcbnl_add_dscp_prio(struct net_device * dev,struct dcb_app * app)2248 dsa_user_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
2249 {
2250 struct dsa_port *dp = dsa_user_to_port(dev);
2251 struct dsa_switch *ds = dp->ds;
2252 unsigned long mask, new_prio;
2253 int err, port = dp->index;
2254 u8 dscp = app->protocol;
2255
2256 if (!ds->ops->port_add_dscp_prio)
2257 return -EOPNOTSUPP;
2258
2259 if (dscp >= 64) {
2260 netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
2261 dscp);
2262 return -EINVAL;
2263 }
2264
2265 err = dcb_ieee_setapp(dev, app);
2266 if (err)
2267 return err;
2268
2269 mask = dcb_ieee_getapp_mask(dev, app);
2270 new_prio = __fls(mask);
2271
2272 err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio);
2273 if (err) {
2274 dcb_ieee_delapp(dev, app);
2275 return err;
2276 }
2277
2278 if (!ds->dscp_prio_mapping_is_global)
2279 return 0;
2280
2281 err = dsa_user_dcbnl_ieee_global_dscp_setdel(dev, app, false);
2282 if (err) {
2283 if (ds->ops->port_del_dscp_prio)
2284 ds->ops->port_del_dscp_prio(ds, port, dscp, new_prio);
2285 dcb_ieee_delapp(dev, app);
2286 return err;
2287 }
2288
2289 return 0;
2290 }
2291
dsa_user_dcbnl_ieee_setapp(struct net_device * dev,struct dcb_app * app)2292 static int __maybe_unused dsa_user_dcbnl_ieee_setapp(struct net_device *dev,
2293 struct dcb_app *app)
2294 {
2295 switch (app->selector) {
2296 case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2297 switch (app->protocol) {
2298 case 0:
2299 return dsa_user_dcbnl_set_default_prio(dev, app);
2300 default:
2301 return -EOPNOTSUPP;
2302 }
2303 break;
2304 case IEEE_8021QAZ_APP_SEL_DSCP:
2305 return dsa_user_dcbnl_add_dscp_prio(dev, app);
2306 default:
2307 return -EOPNOTSUPP;
2308 }
2309 }
2310
2311 static int __maybe_unused
dsa_user_dcbnl_del_default_prio(struct net_device * dev,struct dcb_app * app)2312 dsa_user_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
2313 {
2314 struct dsa_port *dp = dsa_user_to_port(dev);
2315 struct dsa_switch *ds = dp->ds;
2316 unsigned long mask, new_prio;
2317 int err, port = dp->index;
2318
2319 if (!ds->ops->port_set_default_prio)
2320 return -EOPNOTSUPP;
2321
2322 err = dcb_ieee_delapp(dev, app);
2323 if (err)
2324 return err;
2325
2326 mask = dcb_ieee_getapp_mask(dev, app);
2327 new_prio = mask ? __fls(mask) : 0;
2328
2329 err = ds->ops->port_set_default_prio(ds, port, new_prio);
2330 if (err) {
2331 dcb_ieee_setapp(dev, app);
2332 return err;
2333 }
2334
2335 return 0;
2336 }
2337
2338 static int __maybe_unused
dsa_user_dcbnl_del_dscp_prio(struct net_device * dev,struct dcb_app * app)2339 dsa_user_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
2340 {
2341 struct dsa_port *dp = dsa_user_to_port(dev);
2342 struct dsa_switch *ds = dp->ds;
2343 int err, port = dp->index;
2344 u8 dscp = app->protocol;
2345
2346 if (!ds->ops->port_del_dscp_prio)
2347 return -EOPNOTSUPP;
2348
2349 err = dcb_ieee_delapp(dev, app);
2350 if (err)
2351 return err;
2352
2353 err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority);
2354 if (err) {
2355 dcb_ieee_setapp(dev, app);
2356 return err;
2357 }
2358
2359 if (!ds->dscp_prio_mapping_is_global)
2360 return 0;
2361
2362 err = dsa_user_dcbnl_ieee_global_dscp_setdel(dev, app, true);
2363 if (err) {
2364 if (ds->ops->port_add_dscp_prio)
2365 ds->ops->port_add_dscp_prio(ds, port, dscp,
2366 app->priority);
2367 dcb_ieee_setapp(dev, app);
2368 return err;
2369 }
2370
2371 return 0;
2372 }
2373
dsa_user_dcbnl_ieee_delapp(struct net_device * dev,struct dcb_app * app)2374 static int __maybe_unused dsa_user_dcbnl_ieee_delapp(struct net_device *dev,
2375 struct dcb_app *app)
2376 {
2377 switch (app->selector) {
2378 case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2379 switch (app->protocol) {
2380 case 0:
2381 return dsa_user_dcbnl_del_default_prio(dev, app);
2382 default:
2383 return -EOPNOTSUPP;
2384 }
2385 break;
2386 case IEEE_8021QAZ_APP_SEL_DSCP:
2387 return dsa_user_dcbnl_del_dscp_prio(dev, app);
2388 default:
2389 return -EOPNOTSUPP;
2390 }
2391 }
2392
2393 /* Pre-populate the DCB application priority table with the priorities
2394 * configured during switch setup, which we read from hardware here.
2395 */
dsa_user_dcbnl_init(struct net_device * dev)2396 static int dsa_user_dcbnl_init(struct net_device *dev)
2397 {
2398 struct dsa_port *dp = dsa_user_to_port(dev);
2399 struct dsa_switch *ds = dp->ds;
2400 int port = dp->index;
2401 int err;
2402
2403 if (ds->ops->port_get_default_prio) {
2404 int prio = ds->ops->port_get_default_prio(ds, port);
2405 struct dcb_app app = {
2406 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
2407 .protocol = 0,
2408 .priority = prio,
2409 };
2410
2411 if (prio < 0)
2412 return prio;
2413
2414 err = dcb_ieee_setapp(dev, &app);
2415 if (err)
2416 return err;
2417 }
2418
2419 if (ds->ops->port_get_dscp_prio) {
2420 int protocol;
2421
2422 for (protocol = 0; protocol < 64; protocol++) {
2423 struct dcb_app app = {
2424 .selector = IEEE_8021QAZ_APP_SEL_DSCP,
2425 .protocol = protocol,
2426 };
2427 int prio;
2428
2429 prio = ds->ops->port_get_dscp_prio(ds, port, protocol);
2430 if (prio == -EOPNOTSUPP)
2431 continue;
2432 if (prio < 0)
2433 return prio;
2434
2435 app.priority = prio;
2436
2437 err = dcb_ieee_setapp(dev, &app);
2438 if (err)
2439 return err;
2440 }
2441 }
2442
2443 return 0;
2444 }
2445
2446 static const struct ethtool_ops dsa_user_ethtool_ops = {
2447 .get_drvinfo = dsa_user_get_drvinfo,
2448 .get_regs_len = dsa_user_get_regs_len,
2449 .get_regs = dsa_user_get_regs,
2450 .nway_reset = dsa_user_nway_reset,
2451 .get_link = ethtool_op_get_link,
2452 .get_eeprom_len = dsa_user_get_eeprom_len,
2453 .get_eeprom = dsa_user_get_eeprom,
2454 .set_eeprom = dsa_user_set_eeprom,
2455 .get_strings = dsa_user_get_strings,
2456 .get_ethtool_stats = dsa_user_get_ethtool_stats,
2457 .get_sset_count = dsa_user_get_sset_count,
2458 .get_eth_phy_stats = dsa_user_get_eth_phy_stats,
2459 .get_eth_mac_stats = dsa_user_get_eth_mac_stats,
2460 .get_eth_ctrl_stats = dsa_user_get_eth_ctrl_stats,
2461 .get_rmon_stats = dsa_user_get_rmon_stats,
2462 .set_wol = dsa_user_set_wol,
2463 .get_wol = dsa_user_get_wol,
2464 .set_eee = dsa_user_set_eee,
2465 .get_eee = dsa_user_get_eee,
2466 .get_link_ksettings = dsa_user_get_link_ksettings,
2467 .set_link_ksettings = dsa_user_set_link_ksettings,
2468 .get_pause_stats = dsa_user_get_pause_stats,
2469 .get_pauseparam = dsa_user_get_pauseparam,
2470 .set_pauseparam = dsa_user_set_pauseparam,
2471 .get_rxnfc = dsa_user_get_rxnfc,
2472 .set_rxnfc = dsa_user_set_rxnfc,
2473 .get_ts_info = dsa_user_get_ts_info,
2474 .self_test = dsa_user_net_selftest,
2475 .get_mm = dsa_user_get_mm,
2476 .set_mm = dsa_user_set_mm,
2477 .get_mm_stats = dsa_user_get_mm_stats,
2478 };
2479
2480 static const struct dcbnl_rtnl_ops __maybe_unused dsa_user_dcbnl_ops = {
2481 .ieee_setapp = dsa_user_dcbnl_ieee_setapp,
2482 .ieee_delapp = dsa_user_dcbnl_ieee_delapp,
2483 .dcbnl_setapptrust = dsa_user_dcbnl_set_apptrust,
2484 .dcbnl_getapptrust = dsa_user_dcbnl_get_apptrust,
2485 };
2486
dsa_user_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * s)2487 static void dsa_user_get_stats64(struct net_device *dev,
2488 struct rtnl_link_stats64 *s)
2489 {
2490 struct dsa_port *dp = dsa_user_to_port(dev);
2491 struct dsa_switch *ds = dp->ds;
2492
2493 if (ds->ops->get_stats64)
2494 ds->ops->get_stats64(ds, dp->index, s);
2495 else
2496 dev_get_tstats64(dev, s);
2497 }
2498
dsa_user_fill_forward_path(struct net_device_path_ctx * ctx,struct net_device_path * path)2499 static int dsa_user_fill_forward_path(struct net_device_path_ctx *ctx,
2500 struct net_device_path *path)
2501 {
2502 struct dsa_port *dp = dsa_user_to_port(ctx->dev);
2503 struct net_device *conduit = dsa_port_to_conduit(dp);
2504 struct dsa_port *cpu_dp = dp->cpu_dp;
2505
2506 path->dev = ctx->dev;
2507 path->type = DEV_PATH_DSA;
2508 path->dsa.proto = cpu_dp->tag_ops->proto;
2509 path->dsa.port = dp->index;
2510 ctx->dev = conduit;
2511
2512 return 0;
2513 }
2514
2515 static const struct net_device_ops dsa_user_netdev_ops = {
2516 .ndo_open = dsa_user_open,
2517 .ndo_stop = dsa_user_close,
2518 .ndo_start_xmit = dsa_user_xmit,
2519 .ndo_change_rx_flags = dsa_user_change_rx_flags,
2520 .ndo_set_rx_mode = dsa_user_set_rx_mode,
2521 .ndo_set_mac_address = dsa_user_set_mac_address,
2522 .ndo_fdb_dump = dsa_user_fdb_dump,
2523 .ndo_eth_ioctl = dsa_user_ioctl,
2524 .ndo_get_iflink = dsa_user_get_iflink,
2525 #ifdef CONFIG_NET_POLL_CONTROLLER
2526 .ndo_netpoll_setup = dsa_user_netpoll_setup,
2527 .ndo_netpoll_cleanup = dsa_user_netpoll_cleanup,
2528 .ndo_poll_controller = dsa_user_poll_controller,
2529 #endif
2530 .ndo_setup_tc = dsa_user_setup_tc,
2531 .ndo_get_stats64 = dsa_user_get_stats64,
2532 .ndo_vlan_rx_add_vid = dsa_user_vlan_rx_add_vid,
2533 .ndo_vlan_rx_kill_vid = dsa_user_vlan_rx_kill_vid,
2534 .ndo_change_mtu = dsa_user_change_mtu,
2535 .ndo_fill_forward_path = dsa_user_fill_forward_path,
2536 };
2537
2538 static const struct device_type dsa_type = {
2539 .name = "dsa",
2540 };
2541
dsa_port_phylink_mac_change(struct dsa_switch * ds,int port,bool up)2542 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
2543 {
2544 const struct dsa_port *dp = dsa_to_port(ds, port);
2545
2546 if (dp->pl)
2547 phylink_mac_change(dp->pl, up);
2548 }
2549 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
2550
dsa_user_phylink_fixed_state(struct phylink_config * config,struct phylink_link_state * state)2551 static void dsa_user_phylink_fixed_state(struct phylink_config *config,
2552 struct phylink_link_state *state)
2553 {
2554 struct dsa_port *dp = dsa_phylink_to_port(config);
2555 struct dsa_switch *ds = dp->ds;
2556
2557 /* No need to check that this operation is valid, the callback would
2558 * not be called if it was not.
2559 */
2560 ds->ops->phylink_fixed_state(ds, dp->index, state);
2561 }
2562
2563 /* user device setup *******************************************************/
dsa_user_phy_connect(struct net_device * user_dev,int addr,u32 flags)2564 static int dsa_user_phy_connect(struct net_device *user_dev, int addr,
2565 u32 flags)
2566 {
2567 struct dsa_port *dp = dsa_user_to_port(user_dev);
2568 struct dsa_switch *ds = dp->ds;
2569
2570 user_dev->phydev = mdiobus_get_phy(ds->user_mii_bus, addr);
2571 if (!user_dev->phydev) {
2572 netdev_err(user_dev, "no phy at %d\n", addr);
2573 return -ENODEV;
2574 }
2575
2576 user_dev->phydev->dev_flags |= flags;
2577
2578 return phylink_connect_phy(dp->pl, user_dev->phydev);
2579 }
2580
dsa_user_phy_setup(struct net_device * user_dev)2581 static int dsa_user_phy_setup(struct net_device *user_dev)
2582 {
2583 struct dsa_port *dp = dsa_user_to_port(user_dev);
2584 struct device_node *port_dn = dp->dn;
2585 struct dsa_switch *ds = dp->ds;
2586 u32 phy_flags = 0;
2587 int ret;
2588
2589 dp->pl_config.dev = &user_dev->dev;
2590 dp->pl_config.type = PHYLINK_NETDEV;
2591
2592 /* The get_fixed_state callback takes precedence over polling the
2593 * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set
2594 * this if the switch provides such a callback.
2595 */
2596 if (ds->ops->phylink_fixed_state) {
2597 dp->pl_config.get_fixed_state = dsa_user_phylink_fixed_state;
2598 dp->pl_config.poll_fixed_state = true;
2599 }
2600
2601 ret = dsa_port_phylink_create(dp);
2602 if (ret)
2603 return ret;
2604
2605 if (ds->ops->get_phy_flags)
2606 phy_flags = ds->ops->get_phy_flags(ds, dp->index);
2607
2608 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
2609 if (ret == -ENODEV && ds->user_mii_bus) {
2610 /* We could not connect to a designated PHY or SFP, so try to
2611 * use the switch internal MDIO bus instead
2612 */
2613 ret = dsa_user_phy_connect(user_dev, dp->index, phy_flags);
2614 }
2615 if (ret) {
2616 netdev_err(user_dev, "failed to connect to PHY: %pe\n",
2617 ERR_PTR(ret));
2618 dsa_port_phylink_destroy(dp);
2619 }
2620
2621 return ret;
2622 }
2623
dsa_user_setup_tagger(struct net_device * user)2624 void dsa_user_setup_tagger(struct net_device *user)
2625 {
2626 struct dsa_port *dp = dsa_user_to_port(user);
2627 struct net_device *conduit = dsa_port_to_conduit(dp);
2628 struct dsa_user_priv *p = netdev_priv(user);
2629 const struct dsa_port *cpu_dp = dp->cpu_dp;
2630 const struct dsa_switch *ds = dp->ds;
2631
2632 user->needed_headroom = cpu_dp->tag_ops->needed_headroom;
2633 user->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
2634 /* Try to save one extra realloc later in the TX path (in the conduit)
2635 * by also inheriting the conduit's needed headroom and tailroom.
2636 * The 8021q driver also does this.
2637 */
2638 user->needed_headroom += conduit->needed_headroom;
2639 user->needed_tailroom += conduit->needed_tailroom;
2640
2641 p->xmit = cpu_dp->tag_ops->xmit;
2642
2643 user->features = conduit->vlan_features | NETIF_F_HW_TC;
2644 user->hw_features |= NETIF_F_HW_TC;
2645 if (user->needed_tailroom)
2646 user->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
2647 if (ds->needs_standalone_vlan_filtering)
2648 user->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2649
2650 user->lltx = true;
2651 }
2652
dsa_user_suspend(struct net_device * user_dev)2653 int dsa_user_suspend(struct net_device *user_dev)
2654 {
2655 struct dsa_port *dp = dsa_user_to_port(user_dev);
2656
2657 if (!netif_running(user_dev))
2658 return 0;
2659
2660 netif_device_detach(user_dev);
2661
2662 rtnl_lock();
2663 phylink_stop(dp->pl);
2664 rtnl_unlock();
2665
2666 return 0;
2667 }
2668
dsa_user_resume(struct net_device * user_dev)2669 int dsa_user_resume(struct net_device *user_dev)
2670 {
2671 struct dsa_port *dp = dsa_user_to_port(user_dev);
2672
2673 if (!netif_running(user_dev))
2674 return 0;
2675
2676 netif_device_attach(user_dev);
2677
2678 rtnl_lock();
2679 phylink_start(dp->pl);
2680 rtnl_unlock();
2681
2682 return 0;
2683 }
2684
dsa_user_create(struct dsa_port * port)2685 int dsa_user_create(struct dsa_port *port)
2686 {
2687 struct net_device *conduit = dsa_port_to_conduit(port);
2688 struct dsa_switch *ds = port->ds;
2689 struct net_device *user_dev;
2690 struct dsa_user_priv *p;
2691 const char *name;
2692 int assign_type;
2693 int ret;
2694
2695 if (!ds->num_tx_queues)
2696 ds->num_tx_queues = 1;
2697
2698 if (port->name) {
2699 name = port->name;
2700 assign_type = NET_NAME_PREDICTABLE;
2701 } else {
2702 name = "eth%d";
2703 assign_type = NET_NAME_ENUM;
2704 }
2705
2706 user_dev = alloc_netdev_mqs(sizeof(struct dsa_user_priv), name,
2707 assign_type, ether_setup,
2708 ds->num_tx_queues, 1);
2709 if (user_dev == NULL)
2710 return -ENOMEM;
2711
2712 user_dev->rtnl_link_ops = &dsa_link_ops;
2713 user_dev->ethtool_ops = &dsa_user_ethtool_ops;
2714 #if IS_ENABLED(CONFIG_DCB)
2715 user_dev->dcbnl_ops = &dsa_user_dcbnl_ops;
2716 #endif
2717 if (!is_zero_ether_addr(port->mac))
2718 eth_hw_addr_set(user_dev, port->mac);
2719 else
2720 eth_hw_addr_inherit(user_dev, conduit);
2721 user_dev->priv_flags |= IFF_NO_QUEUE;
2722 if (dsa_switch_supports_uc_filtering(ds))
2723 user_dev->priv_flags |= IFF_UNICAST_FLT;
2724 user_dev->netdev_ops = &dsa_user_netdev_ops;
2725 if (ds->ops->port_max_mtu)
2726 user_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
2727 SET_NETDEV_DEVTYPE(user_dev, &dsa_type);
2728
2729 SET_NETDEV_DEV(user_dev, port->ds->dev);
2730 SET_NETDEV_DEVLINK_PORT(user_dev, &port->devlink_port);
2731 user_dev->dev.of_node = port->dn;
2732 user_dev->vlan_features = conduit->vlan_features;
2733
2734 p = netdev_priv(user_dev);
2735 user_dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
2736
2737 ret = gro_cells_init(&p->gcells, user_dev);
2738 if (ret)
2739 goto out_free;
2740
2741 p->dp = port;
2742 INIT_LIST_HEAD(&p->mall_tc_list);
2743 port->user = user_dev;
2744 dsa_user_setup_tagger(user_dev);
2745
2746 netif_carrier_off(user_dev);
2747
2748 ret = dsa_user_phy_setup(user_dev);
2749 if (ret) {
2750 netdev_err(user_dev,
2751 "error %d setting up PHY for tree %d, switch %d, port %d\n",
2752 ret, ds->dst->index, ds->index, port->index);
2753 goto out_gcells;
2754 }
2755
2756 rtnl_lock();
2757
2758 ret = dsa_user_change_mtu(user_dev, ETH_DATA_LEN);
2759 if (ret && ret != -EOPNOTSUPP)
2760 dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
2761 ret, ETH_DATA_LEN, port->index);
2762
2763 ret = register_netdevice(user_dev);
2764 if (ret) {
2765 netdev_err(conduit, "error %d registering interface %s\n",
2766 ret, user_dev->name);
2767 rtnl_unlock();
2768 goto out_phy;
2769 }
2770
2771 if (IS_ENABLED(CONFIG_DCB)) {
2772 ret = dsa_user_dcbnl_init(user_dev);
2773 if (ret) {
2774 netdev_err(user_dev,
2775 "failed to initialize DCB: %pe\n",
2776 ERR_PTR(ret));
2777 rtnl_unlock();
2778 goto out_unregister;
2779 }
2780 }
2781
2782 ret = netdev_upper_dev_link(conduit, user_dev, NULL);
2783
2784 rtnl_unlock();
2785
2786 if (ret)
2787 goto out_unregister;
2788
2789 return 0;
2790
2791 out_unregister:
2792 unregister_netdev(user_dev);
2793 out_phy:
2794 rtnl_lock();
2795 phylink_disconnect_phy(p->dp->pl);
2796 rtnl_unlock();
2797 dsa_port_phylink_destroy(p->dp);
2798 out_gcells:
2799 gro_cells_destroy(&p->gcells);
2800 out_free:
2801 free_netdev(user_dev);
2802 port->user = NULL;
2803 return ret;
2804 }
2805
dsa_user_destroy(struct net_device * user_dev)2806 void dsa_user_destroy(struct net_device *user_dev)
2807 {
2808 struct net_device *conduit = dsa_user_to_conduit(user_dev);
2809 struct dsa_port *dp = dsa_user_to_port(user_dev);
2810 struct dsa_user_priv *p = netdev_priv(user_dev);
2811
2812 netif_carrier_off(user_dev);
2813 rtnl_lock();
2814 netdev_upper_dev_unlink(conduit, user_dev);
2815 unregister_netdevice(user_dev);
2816 phylink_disconnect_phy(dp->pl);
2817 rtnl_unlock();
2818
2819 dsa_port_phylink_destroy(dp);
2820 gro_cells_destroy(&p->gcells);
2821 free_netdev(user_dev);
2822 }
2823
dsa_user_change_conduit(struct net_device * dev,struct net_device * conduit,struct netlink_ext_ack * extack)2824 int dsa_user_change_conduit(struct net_device *dev, struct net_device *conduit,
2825 struct netlink_ext_ack *extack)
2826 {
2827 struct net_device *old_conduit = dsa_user_to_conduit(dev);
2828 struct dsa_port *dp = dsa_user_to_port(dev);
2829 struct dsa_switch *ds = dp->ds;
2830 struct net_device *upper;
2831 struct list_head *iter;
2832 int err;
2833
2834 if (conduit == old_conduit)
2835 return 0;
2836
2837 if (!ds->ops->port_change_conduit) {
2838 NL_SET_ERR_MSG_MOD(extack,
2839 "Driver does not support changing DSA conduit");
2840 return -EOPNOTSUPP;
2841 }
2842
2843 if (!netdev_uses_dsa(conduit)) {
2844 NL_SET_ERR_MSG_MOD(extack,
2845 "Interface not eligible as DSA conduit");
2846 return -EOPNOTSUPP;
2847 }
2848
2849 netdev_for_each_upper_dev_rcu(conduit, upper, iter) {
2850 if (dsa_user_dev_check(upper))
2851 continue;
2852 if (netif_is_bridge_master(upper))
2853 continue;
2854 NL_SET_ERR_MSG_MOD(extack, "Cannot join conduit with unknown uppers");
2855 return -EOPNOTSUPP;
2856 }
2857
2858 /* Since we allow live-changing the DSA conduit, plus we auto-open the
2859 * DSA conduit when the user port opens => we need to ensure that the
2860 * new DSA conduit is open too.
2861 */
2862 if (dev->flags & IFF_UP) {
2863 err = dev_open(conduit, extack);
2864 if (err)
2865 return err;
2866 }
2867
2868 netdev_upper_dev_unlink(old_conduit, dev);
2869
2870 err = netdev_upper_dev_link(conduit, dev, extack);
2871 if (err)
2872 goto out_revert_old_conduit_unlink;
2873
2874 err = dsa_port_change_conduit(dp, conduit, extack);
2875 if (err)
2876 goto out_revert_conduit_link;
2877
2878 /* Update the MTU of the new CPU port through cross-chip notifiers */
2879 err = dsa_user_change_mtu(dev, dev->mtu);
2880 if (err && err != -EOPNOTSUPP) {
2881 netdev_warn(dev,
2882 "nonfatal error updating MTU with new conduit: %pe\n",
2883 ERR_PTR(err));
2884 }
2885
2886 return 0;
2887
2888 out_revert_conduit_link:
2889 netdev_upper_dev_unlink(conduit, dev);
2890 out_revert_old_conduit_unlink:
2891 netdev_upper_dev_link(old_conduit, dev, NULL);
2892 return err;
2893 }
2894
dsa_user_dev_check(const struct net_device * dev)2895 bool dsa_user_dev_check(const struct net_device *dev)
2896 {
2897 return dev->netdev_ops == &dsa_user_netdev_ops;
2898 }
2899 EXPORT_SYMBOL_GPL(dsa_user_dev_check);
2900
dsa_user_changeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)2901 static int dsa_user_changeupper(struct net_device *dev,
2902 struct netdev_notifier_changeupper_info *info)
2903 {
2904 struct netlink_ext_ack *extack;
2905 int err = NOTIFY_DONE;
2906 struct dsa_port *dp;
2907
2908 if (!dsa_user_dev_check(dev))
2909 return err;
2910
2911 dp = dsa_user_to_port(dev);
2912 extack = netdev_notifier_info_to_extack(&info->info);
2913
2914 if (netif_is_bridge_master(info->upper_dev)) {
2915 if (info->linking) {
2916 err = dsa_port_bridge_join(dp, info->upper_dev, extack);
2917 if (!err)
2918 dsa_bridge_mtu_normalization(dp);
2919 if (err == -EOPNOTSUPP) {
2920 NL_SET_ERR_MSG_WEAK_MOD(extack,
2921 "Offloading not supported");
2922 err = 0;
2923 }
2924 err = notifier_from_errno(err);
2925 } else {
2926 dsa_port_bridge_leave(dp, info->upper_dev);
2927 err = NOTIFY_OK;
2928 }
2929 } else if (netif_is_lag_master(info->upper_dev)) {
2930 if (info->linking) {
2931 err = dsa_port_lag_join(dp, info->upper_dev,
2932 info->upper_info, extack);
2933 if (err == -EOPNOTSUPP) {
2934 NL_SET_ERR_MSG_WEAK_MOD(extack,
2935 "Offloading not supported");
2936 err = 0;
2937 }
2938 err = notifier_from_errno(err);
2939 } else {
2940 dsa_port_lag_leave(dp, info->upper_dev);
2941 err = NOTIFY_OK;
2942 }
2943 } else if (is_hsr_master(info->upper_dev)) {
2944 if (info->linking) {
2945 err = dsa_port_hsr_join(dp, info->upper_dev, extack);
2946 if (err == -EOPNOTSUPP) {
2947 NL_SET_ERR_MSG_WEAK_MOD(extack,
2948 "Offloading not supported");
2949 err = 0;
2950 }
2951 err = notifier_from_errno(err);
2952 } else {
2953 dsa_port_hsr_leave(dp, info->upper_dev);
2954 err = NOTIFY_OK;
2955 }
2956 }
2957
2958 return err;
2959 }
2960
dsa_user_prechangeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)2961 static int dsa_user_prechangeupper(struct net_device *dev,
2962 struct netdev_notifier_changeupper_info *info)
2963 {
2964 struct dsa_port *dp;
2965
2966 if (!dsa_user_dev_check(dev))
2967 return NOTIFY_DONE;
2968
2969 dp = dsa_user_to_port(dev);
2970
2971 if (netif_is_bridge_master(info->upper_dev) && !info->linking)
2972 dsa_port_pre_bridge_leave(dp, info->upper_dev);
2973 else if (netif_is_lag_master(info->upper_dev) && !info->linking)
2974 dsa_port_pre_lag_leave(dp, info->upper_dev);
2975 /* dsa_port_pre_hsr_leave is not yet necessary since hsr devices cannot
2976 * meaningfully placed under a bridge yet
2977 */
2978
2979 return NOTIFY_DONE;
2980 }
2981
2982 static int
dsa_user_lag_changeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)2983 dsa_user_lag_changeupper(struct net_device *dev,
2984 struct netdev_notifier_changeupper_info *info)
2985 {
2986 struct net_device *lower;
2987 struct list_head *iter;
2988 int err = NOTIFY_DONE;
2989 struct dsa_port *dp;
2990
2991 if (!netif_is_lag_master(dev))
2992 return err;
2993
2994 netdev_for_each_lower_dev(dev, lower, iter) {
2995 if (!dsa_user_dev_check(lower))
2996 continue;
2997
2998 dp = dsa_user_to_port(lower);
2999 if (!dp->lag)
3000 /* Software LAG */
3001 continue;
3002
3003 err = dsa_user_changeupper(lower, info);
3004 if (notifier_to_errno(err))
3005 break;
3006 }
3007
3008 return err;
3009 }
3010
3011 /* Same as dsa_user_lag_changeupper() except that it calls
3012 * dsa_user_prechangeupper()
3013 */
3014 static int
dsa_user_lag_prechangeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3015 dsa_user_lag_prechangeupper(struct net_device *dev,
3016 struct netdev_notifier_changeupper_info *info)
3017 {
3018 struct net_device *lower;
3019 struct list_head *iter;
3020 int err = NOTIFY_DONE;
3021 struct dsa_port *dp;
3022
3023 if (!netif_is_lag_master(dev))
3024 return err;
3025
3026 netdev_for_each_lower_dev(dev, lower, iter) {
3027 if (!dsa_user_dev_check(lower))
3028 continue;
3029
3030 dp = dsa_user_to_port(lower);
3031 if (!dp->lag)
3032 /* Software LAG */
3033 continue;
3034
3035 err = dsa_user_prechangeupper(lower, info);
3036 if (notifier_to_errno(err))
3037 break;
3038 }
3039
3040 return err;
3041 }
3042
3043 static int
dsa_prevent_bridging_8021q_upper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3044 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
3045 struct netdev_notifier_changeupper_info *info)
3046 {
3047 struct netlink_ext_ack *ext_ack;
3048 struct net_device *user, *br;
3049 struct dsa_port *dp;
3050
3051 ext_ack = netdev_notifier_info_to_extack(&info->info);
3052
3053 if (!is_vlan_dev(dev))
3054 return NOTIFY_DONE;
3055
3056 user = vlan_dev_real_dev(dev);
3057 if (!dsa_user_dev_check(user))
3058 return NOTIFY_DONE;
3059
3060 dp = dsa_user_to_port(user);
3061 br = dsa_port_bridge_dev_get(dp);
3062 if (!br)
3063 return NOTIFY_DONE;
3064
3065 /* Deny enslaving a VLAN device into a VLAN-aware bridge */
3066 if (br_vlan_enabled(br) &&
3067 netif_is_bridge_master(info->upper_dev) && info->linking) {
3068 NL_SET_ERR_MSG_MOD(ext_ack,
3069 "Cannot make VLAN device join VLAN-aware bridge");
3070 return notifier_from_errno(-EINVAL);
3071 }
3072
3073 return NOTIFY_DONE;
3074 }
3075
3076 static int
dsa_user_check_8021q_upper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3077 dsa_user_check_8021q_upper(struct net_device *dev,
3078 struct netdev_notifier_changeupper_info *info)
3079 {
3080 struct dsa_port *dp = dsa_user_to_port(dev);
3081 struct net_device *br = dsa_port_bridge_dev_get(dp);
3082 struct bridge_vlan_info br_info;
3083 struct netlink_ext_ack *extack;
3084 int err = NOTIFY_DONE;
3085 u16 vid;
3086
3087 if (!br || !br_vlan_enabled(br))
3088 return NOTIFY_DONE;
3089
3090 extack = netdev_notifier_info_to_extack(&info->info);
3091 vid = vlan_dev_vlan_id(info->upper_dev);
3092
3093 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
3094 * device, respectively the VID is not found, returning
3095 * 0 means success, which is a failure for us here.
3096 */
3097 err = br_vlan_get_info(br, vid, &br_info);
3098 if (err == 0) {
3099 NL_SET_ERR_MSG_MOD(extack,
3100 "This VLAN is already configured by the bridge");
3101 return notifier_from_errno(-EBUSY);
3102 }
3103
3104 return NOTIFY_DONE;
3105 }
3106
3107 static int
dsa_user_prechangeupper_sanity_check(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3108 dsa_user_prechangeupper_sanity_check(struct net_device *dev,
3109 struct netdev_notifier_changeupper_info *info)
3110 {
3111 struct dsa_switch *ds;
3112 struct dsa_port *dp;
3113 int err;
3114
3115 if (!dsa_user_dev_check(dev))
3116 return dsa_prevent_bridging_8021q_upper(dev, info);
3117
3118 dp = dsa_user_to_port(dev);
3119 ds = dp->ds;
3120
3121 if (ds->ops->port_prechangeupper) {
3122 err = ds->ops->port_prechangeupper(ds, dp->index, info);
3123 if (err)
3124 return notifier_from_errno(err);
3125 }
3126
3127 if (is_vlan_dev(info->upper_dev))
3128 return dsa_user_check_8021q_upper(dev, info);
3129
3130 return NOTIFY_DONE;
3131 }
3132
3133 /* To be eligible as a DSA conduit, a LAG must have all lower interfaces be
3134 * eligible DSA conduits. Additionally, all LAG slaves must be DSA conduits of
3135 * switches in the same switch tree.
3136 */
dsa_lag_conduit_validate(struct net_device * lag_dev,struct netlink_ext_ack * extack)3137 static int dsa_lag_conduit_validate(struct net_device *lag_dev,
3138 struct netlink_ext_ack *extack)
3139 {
3140 struct net_device *lower1, *lower2;
3141 struct list_head *iter1, *iter2;
3142
3143 netdev_for_each_lower_dev(lag_dev, lower1, iter1) {
3144 netdev_for_each_lower_dev(lag_dev, lower2, iter2) {
3145 if (!netdev_uses_dsa(lower1) ||
3146 !netdev_uses_dsa(lower2)) {
3147 NL_SET_ERR_MSG_MOD(extack,
3148 "All LAG ports must be eligible as DSA conduits");
3149 return notifier_from_errno(-EINVAL);
3150 }
3151
3152 if (lower1 == lower2)
3153 continue;
3154
3155 if (!dsa_port_tree_same(lower1->dsa_ptr,
3156 lower2->dsa_ptr)) {
3157 NL_SET_ERR_MSG_MOD(extack,
3158 "LAG contains DSA conduits of disjoint switch trees");
3159 return notifier_from_errno(-EINVAL);
3160 }
3161 }
3162 }
3163
3164 return NOTIFY_DONE;
3165 }
3166
3167 static int
dsa_conduit_prechangeupper_sanity_check(struct net_device * conduit,struct netdev_notifier_changeupper_info * info)3168 dsa_conduit_prechangeupper_sanity_check(struct net_device *conduit,
3169 struct netdev_notifier_changeupper_info *info)
3170 {
3171 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
3172
3173 if (!netdev_uses_dsa(conduit))
3174 return NOTIFY_DONE;
3175
3176 if (!info->linking)
3177 return NOTIFY_DONE;
3178
3179 /* Allow DSA switch uppers */
3180 if (dsa_user_dev_check(info->upper_dev))
3181 return NOTIFY_DONE;
3182
3183 /* Allow bridge uppers of DSA conduits, subject to further
3184 * restrictions in dsa_bridge_prechangelower_sanity_check()
3185 */
3186 if (netif_is_bridge_master(info->upper_dev))
3187 return NOTIFY_DONE;
3188
3189 /* Allow LAG uppers, subject to further restrictions in
3190 * dsa_lag_conduit_prechangelower_sanity_check()
3191 */
3192 if (netif_is_lag_master(info->upper_dev))
3193 return dsa_lag_conduit_validate(info->upper_dev, extack);
3194
3195 NL_SET_ERR_MSG_MOD(extack,
3196 "DSA conduit cannot join unknown upper interfaces");
3197 return notifier_from_errno(-EBUSY);
3198 }
3199
3200 static int
dsa_lag_conduit_prechangelower_sanity_check(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3201 dsa_lag_conduit_prechangelower_sanity_check(struct net_device *dev,
3202 struct netdev_notifier_changeupper_info *info)
3203 {
3204 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
3205 struct net_device *lag_dev = info->upper_dev;
3206 struct net_device *lower;
3207 struct list_head *iter;
3208
3209 if (!netdev_uses_dsa(lag_dev) || !netif_is_lag_master(lag_dev))
3210 return NOTIFY_DONE;
3211
3212 if (!info->linking)
3213 return NOTIFY_DONE;
3214
3215 if (!netdev_uses_dsa(dev)) {
3216 NL_SET_ERR_MSG(extack,
3217 "Only DSA conduits can join a LAG DSA conduit");
3218 return notifier_from_errno(-EINVAL);
3219 }
3220
3221 netdev_for_each_lower_dev(lag_dev, lower, iter) {
3222 if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) {
3223 NL_SET_ERR_MSG(extack,
3224 "Interface is DSA conduit for a different switch tree than this LAG");
3225 return notifier_from_errno(-EINVAL);
3226 }
3227
3228 break;
3229 }
3230
3231 return NOTIFY_DONE;
3232 }
3233
3234 /* Don't allow bridging of DSA conduits, since the bridge layer rx_handler
3235 * prevents the DSA fake ethertype handler to be invoked, so we don't get the
3236 * chance to strip off and parse the DSA switch tag protocol header (the bridge
3237 * layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these
3238 * frames).
3239 * The only case where that would not be an issue is when bridging can already
3240 * be offloaded, such as when the DSA conduit is itself a DSA or plain switchdev
3241 * port, and is bridged only with other ports from the same hardware device.
3242 */
3243 static int
dsa_bridge_prechangelower_sanity_check(struct net_device * new_lower,struct netdev_notifier_changeupper_info * info)3244 dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
3245 struct netdev_notifier_changeupper_info *info)
3246 {
3247 struct net_device *br = info->upper_dev;
3248 struct netlink_ext_ack *extack;
3249 struct net_device *lower;
3250 struct list_head *iter;
3251
3252 if (!netif_is_bridge_master(br))
3253 return NOTIFY_DONE;
3254
3255 if (!info->linking)
3256 return NOTIFY_DONE;
3257
3258 extack = netdev_notifier_info_to_extack(&info->info);
3259
3260 netdev_for_each_lower_dev(br, lower, iter) {
3261 if (!netdev_uses_dsa(new_lower) && !netdev_uses_dsa(lower))
3262 continue;
3263
3264 if (!netdev_port_same_parent_id(lower, new_lower)) {
3265 NL_SET_ERR_MSG(extack,
3266 "Cannot do software bridging with a DSA conduit");
3267 return notifier_from_errno(-EINVAL);
3268 }
3269 }
3270
3271 return NOTIFY_DONE;
3272 }
3273
dsa_tree_migrate_ports_from_lag_conduit(struct dsa_switch_tree * dst,struct net_device * lag_dev)3274 static void dsa_tree_migrate_ports_from_lag_conduit(struct dsa_switch_tree *dst,
3275 struct net_device *lag_dev)
3276 {
3277 struct net_device *new_conduit = dsa_tree_find_first_conduit(dst);
3278 struct dsa_port *dp;
3279 int err;
3280
3281 dsa_tree_for_each_user_port(dp, dst) {
3282 if (dsa_port_to_conduit(dp) != lag_dev)
3283 continue;
3284
3285 err = dsa_user_change_conduit(dp->user, new_conduit, NULL);
3286 if (err) {
3287 netdev_err(dp->user,
3288 "failed to restore conduit to %s: %pe\n",
3289 new_conduit->name, ERR_PTR(err));
3290 }
3291 }
3292 }
3293
dsa_conduit_lag_join(struct net_device * conduit,struct net_device * lag_dev,struct netdev_lag_upper_info * uinfo,struct netlink_ext_ack * extack)3294 static int dsa_conduit_lag_join(struct net_device *conduit,
3295 struct net_device *lag_dev,
3296 struct netdev_lag_upper_info *uinfo,
3297 struct netlink_ext_ack *extack)
3298 {
3299 struct dsa_port *cpu_dp = conduit->dsa_ptr;
3300 struct dsa_switch_tree *dst = cpu_dp->dst;
3301 struct dsa_port *dp;
3302 int err;
3303
3304 err = dsa_conduit_lag_setup(lag_dev, cpu_dp, uinfo, extack);
3305 if (err)
3306 return err;
3307
3308 dsa_tree_for_each_user_port(dp, dst) {
3309 if (dsa_port_to_conduit(dp) != conduit)
3310 continue;
3311
3312 err = dsa_user_change_conduit(dp->user, lag_dev, extack);
3313 if (err)
3314 goto restore;
3315 }
3316
3317 return 0;
3318
3319 restore:
3320 dsa_tree_for_each_user_port_continue_reverse(dp, dst) {
3321 if (dsa_port_to_conduit(dp) != lag_dev)
3322 continue;
3323
3324 err = dsa_user_change_conduit(dp->user, conduit, NULL);
3325 if (err) {
3326 netdev_err(dp->user,
3327 "failed to restore conduit to %s: %pe\n",
3328 conduit->name, ERR_PTR(err));
3329 }
3330 }
3331
3332 dsa_conduit_lag_teardown(lag_dev, conduit->dsa_ptr);
3333
3334 return err;
3335 }
3336
dsa_conduit_lag_leave(struct net_device * conduit,struct net_device * lag_dev)3337 static void dsa_conduit_lag_leave(struct net_device *conduit,
3338 struct net_device *lag_dev)
3339 {
3340 struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr;
3341 struct dsa_switch_tree *dst = cpu_dp->dst;
3342 struct dsa_port *new_cpu_dp = NULL;
3343 struct net_device *lower;
3344 struct list_head *iter;
3345
3346 netdev_for_each_lower_dev(lag_dev, lower, iter) {
3347 if (netdev_uses_dsa(lower)) {
3348 new_cpu_dp = lower->dsa_ptr;
3349 break;
3350 }
3351 }
3352
3353 if (new_cpu_dp) {
3354 /* Update the CPU port of the user ports still under the LAG
3355 * so that dsa_port_to_conduit() continues to work properly
3356 */
3357 dsa_tree_for_each_user_port(dp, dst)
3358 if (dsa_port_to_conduit(dp) == lag_dev)
3359 dp->cpu_dp = new_cpu_dp;
3360
3361 /* Update the index of the virtual CPU port to match the lowest
3362 * physical CPU port
3363 */
3364 lag_dev->dsa_ptr = new_cpu_dp;
3365 wmb();
3366 } else {
3367 /* If the LAG DSA conduit has no ports left, migrate back all
3368 * user ports to the first physical CPU port
3369 */
3370 dsa_tree_migrate_ports_from_lag_conduit(dst, lag_dev);
3371 }
3372
3373 /* This DSA conduit has left its LAG in any case, so let
3374 * the CPU port leave the hardware LAG as well
3375 */
3376 dsa_conduit_lag_teardown(lag_dev, conduit->dsa_ptr);
3377 }
3378
dsa_conduit_changeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3379 static int dsa_conduit_changeupper(struct net_device *dev,
3380 struct netdev_notifier_changeupper_info *info)
3381 {
3382 struct netlink_ext_ack *extack;
3383 int err = NOTIFY_DONE;
3384
3385 if (!netdev_uses_dsa(dev))
3386 return err;
3387
3388 extack = netdev_notifier_info_to_extack(&info->info);
3389
3390 if (netif_is_lag_master(info->upper_dev)) {
3391 if (info->linking) {
3392 err = dsa_conduit_lag_join(dev, info->upper_dev,
3393 info->upper_info, extack);
3394 err = notifier_from_errno(err);
3395 } else {
3396 dsa_conduit_lag_leave(dev, info->upper_dev);
3397 err = NOTIFY_OK;
3398 }
3399 }
3400
3401 return err;
3402 }
3403
dsa_user_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)3404 static int dsa_user_netdevice_event(struct notifier_block *nb,
3405 unsigned long event, void *ptr)
3406 {
3407 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3408
3409 switch (event) {
3410 case NETDEV_PRECHANGEUPPER: {
3411 struct netdev_notifier_changeupper_info *info = ptr;
3412 int err;
3413
3414 err = dsa_user_prechangeupper_sanity_check(dev, info);
3415 if (notifier_to_errno(err))
3416 return err;
3417
3418 err = dsa_conduit_prechangeupper_sanity_check(dev, info);
3419 if (notifier_to_errno(err))
3420 return err;
3421
3422 err = dsa_lag_conduit_prechangelower_sanity_check(dev, info);
3423 if (notifier_to_errno(err))
3424 return err;
3425
3426 err = dsa_bridge_prechangelower_sanity_check(dev, info);
3427 if (notifier_to_errno(err))
3428 return err;
3429
3430 err = dsa_user_prechangeupper(dev, ptr);
3431 if (notifier_to_errno(err))
3432 return err;
3433
3434 err = dsa_user_lag_prechangeupper(dev, ptr);
3435 if (notifier_to_errno(err))
3436 return err;
3437
3438 break;
3439 }
3440 case NETDEV_CHANGEUPPER: {
3441 int err;
3442
3443 err = dsa_user_changeupper(dev, ptr);
3444 if (notifier_to_errno(err))
3445 return err;
3446
3447 err = dsa_user_lag_changeupper(dev, ptr);
3448 if (notifier_to_errno(err))
3449 return err;
3450
3451 err = dsa_conduit_changeupper(dev, ptr);
3452 if (notifier_to_errno(err))
3453 return err;
3454
3455 break;
3456 }
3457 case NETDEV_CHANGELOWERSTATE: {
3458 struct netdev_notifier_changelowerstate_info *info = ptr;
3459 struct dsa_port *dp;
3460 int err = 0;
3461
3462 if (dsa_user_dev_check(dev)) {
3463 dp = dsa_user_to_port(dev);
3464
3465 err = dsa_port_lag_change(dp, info->lower_state_info);
3466 }
3467
3468 /* Mirror LAG port events on DSA conduits that are in
3469 * a LAG towards their respective switch CPU ports
3470 */
3471 if (netdev_uses_dsa(dev)) {
3472 dp = dev->dsa_ptr;
3473
3474 err = dsa_port_lag_change(dp, info->lower_state_info);
3475 }
3476
3477 return notifier_from_errno(err);
3478 }
3479 case NETDEV_CHANGE:
3480 case NETDEV_UP: {
3481 /* Track state of conduit port.
3482 * DSA driver may require the conduit port (and indirectly
3483 * the tagger) to be available for some special operation.
3484 */
3485 if (netdev_uses_dsa(dev)) {
3486 struct dsa_port *cpu_dp = dev->dsa_ptr;
3487 struct dsa_switch_tree *dst = cpu_dp->ds->dst;
3488
3489 /* Track when the conduit port is UP */
3490 dsa_tree_conduit_oper_state_change(dst, dev,
3491 netif_oper_up(dev));
3492
3493 /* Track when the conduit port is ready and can accept
3494 * packet.
3495 * NETDEV_UP event is not enough to flag a port as ready.
3496 * We also have to wait for linkwatch_do_dev to dev_activate
3497 * and emit a NETDEV_CHANGE event.
3498 * We check if a conduit port is ready by checking if the dev
3499 * have a qdisc assigned and is not noop.
3500 */
3501 dsa_tree_conduit_admin_state_change(dst, dev,
3502 !qdisc_tx_is_noop(dev));
3503
3504 return NOTIFY_OK;
3505 }
3506
3507 return NOTIFY_DONE;
3508 }
3509 case NETDEV_GOING_DOWN: {
3510 struct dsa_port *dp, *cpu_dp;
3511 struct dsa_switch_tree *dst;
3512 LIST_HEAD(close_list);
3513
3514 if (!netdev_uses_dsa(dev))
3515 return NOTIFY_DONE;
3516
3517 cpu_dp = dev->dsa_ptr;
3518 dst = cpu_dp->ds->dst;
3519
3520 dsa_tree_conduit_admin_state_change(dst, dev, false);
3521
3522 list_for_each_entry(dp, &dst->ports, list) {
3523 if (!dsa_port_is_user(dp))
3524 continue;
3525
3526 if (dp->cpu_dp != cpu_dp)
3527 continue;
3528
3529 list_add(&dp->user->close_list, &close_list);
3530 }
3531
3532 dev_close_many(&close_list, true);
3533
3534 return NOTIFY_OK;
3535 }
3536 default:
3537 break;
3538 }
3539
3540 return NOTIFY_DONE;
3541 }
3542
3543 static void
dsa_fdb_offload_notify(struct dsa_switchdev_event_work * switchdev_work)3544 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
3545 {
3546 struct switchdev_notifier_fdb_info info = {};
3547
3548 info.addr = switchdev_work->addr;
3549 info.vid = switchdev_work->vid;
3550 info.offloaded = true;
3551 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
3552 switchdev_work->orig_dev, &info.info, NULL);
3553 }
3554
dsa_user_switchdev_event_work(struct work_struct * work)3555 static void dsa_user_switchdev_event_work(struct work_struct *work)
3556 {
3557 struct dsa_switchdev_event_work *switchdev_work =
3558 container_of(work, struct dsa_switchdev_event_work, work);
3559 const unsigned char *addr = switchdev_work->addr;
3560 struct net_device *dev = switchdev_work->dev;
3561 u16 vid = switchdev_work->vid;
3562 struct dsa_switch *ds;
3563 struct dsa_port *dp;
3564 int err;
3565
3566 dp = dsa_user_to_port(dev);
3567 ds = dp->ds;
3568
3569 switch (switchdev_work->event) {
3570 case SWITCHDEV_FDB_ADD_TO_DEVICE:
3571 if (switchdev_work->host_addr)
3572 err = dsa_port_bridge_host_fdb_add(dp, addr, vid);
3573 else if (dp->lag)
3574 err = dsa_port_lag_fdb_add(dp, addr, vid);
3575 else
3576 err = dsa_port_fdb_add(dp, addr, vid);
3577 if (err) {
3578 dev_err(ds->dev,
3579 "port %d failed to add %pM vid %d to fdb: %d\n",
3580 dp->index, addr, vid, err);
3581 break;
3582 }
3583 dsa_fdb_offload_notify(switchdev_work);
3584 break;
3585
3586 case SWITCHDEV_FDB_DEL_TO_DEVICE:
3587 if (switchdev_work->host_addr)
3588 err = dsa_port_bridge_host_fdb_del(dp, addr, vid);
3589 else if (dp->lag)
3590 err = dsa_port_lag_fdb_del(dp, addr, vid);
3591 else
3592 err = dsa_port_fdb_del(dp, addr, vid);
3593 if (err) {
3594 dev_err(ds->dev,
3595 "port %d failed to delete %pM vid %d from fdb: %d\n",
3596 dp->index, addr, vid, err);
3597 }
3598
3599 break;
3600 }
3601
3602 kfree(switchdev_work);
3603 }
3604
dsa_foreign_dev_check(const struct net_device * dev,const struct net_device * foreign_dev)3605 static bool dsa_foreign_dev_check(const struct net_device *dev,
3606 const struct net_device *foreign_dev)
3607 {
3608 const struct dsa_port *dp = dsa_user_to_port(dev);
3609 struct dsa_switch_tree *dst = dp->ds->dst;
3610
3611 if (netif_is_bridge_master(foreign_dev))
3612 return !dsa_tree_offloads_bridge_dev(dst, foreign_dev);
3613
3614 if (netif_is_bridge_port(foreign_dev))
3615 return !dsa_tree_offloads_bridge_port(dst, foreign_dev);
3616
3617 /* Everything else is foreign */
3618 return true;
3619 }
3620
dsa_user_fdb_event(struct net_device * dev,struct net_device * orig_dev,unsigned long event,const void * ctx,const struct switchdev_notifier_fdb_info * fdb_info)3621 static int dsa_user_fdb_event(struct net_device *dev,
3622 struct net_device *orig_dev,
3623 unsigned long event, const void *ctx,
3624 const struct switchdev_notifier_fdb_info *fdb_info)
3625 {
3626 struct dsa_switchdev_event_work *switchdev_work;
3627 struct dsa_port *dp = dsa_user_to_port(dev);
3628 bool host_addr = fdb_info->is_local;
3629 struct dsa_switch *ds = dp->ds;
3630
3631 if (ctx && ctx != dp)
3632 return 0;
3633
3634 if (!dp->bridge)
3635 return 0;
3636
3637 if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
3638 if (dsa_port_offloads_bridge_port(dp, orig_dev))
3639 return 0;
3640
3641 /* FDB entries learned by the software bridge or by foreign
3642 * bridge ports should be installed as host addresses only if
3643 * the driver requests assisted learning.
3644 */
3645 if (!ds->assisted_learning_on_cpu_port)
3646 return 0;
3647 }
3648
3649 /* Also treat FDB entries on foreign interfaces bridged with us as host
3650 * addresses.
3651 */
3652 if (dsa_foreign_dev_check(dev, orig_dev))
3653 host_addr = true;
3654
3655 /* Check early that we're not doing work in vain.
3656 * Host addresses on LAG ports still require regular FDB ops,
3657 * since the CPU port isn't in a LAG.
3658 */
3659 if (dp->lag && !host_addr) {
3660 if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del)
3661 return -EOPNOTSUPP;
3662 } else {
3663 if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
3664 return -EOPNOTSUPP;
3665 }
3666
3667 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3668 if (!switchdev_work)
3669 return -ENOMEM;
3670
3671 netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n",
3672 event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting",
3673 orig_dev->name, fdb_info->addr, fdb_info->vid,
3674 host_addr ? " as host address" : "");
3675
3676 INIT_WORK(&switchdev_work->work, dsa_user_switchdev_event_work);
3677 switchdev_work->event = event;
3678 switchdev_work->dev = dev;
3679 switchdev_work->orig_dev = orig_dev;
3680
3681 ether_addr_copy(switchdev_work->addr, fdb_info->addr);
3682 switchdev_work->vid = fdb_info->vid;
3683 switchdev_work->host_addr = host_addr;
3684
3685 dsa_schedule_work(&switchdev_work->work);
3686
3687 return 0;
3688 }
3689
3690 /* Called under rcu_read_lock() */
dsa_user_switchdev_event(struct notifier_block * unused,unsigned long event,void * ptr)3691 static int dsa_user_switchdev_event(struct notifier_block *unused,
3692 unsigned long event, void *ptr)
3693 {
3694 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3695 int err;
3696
3697 switch (event) {
3698 case SWITCHDEV_PORT_ATTR_SET:
3699 err = switchdev_handle_port_attr_set(dev, ptr,
3700 dsa_user_dev_check,
3701 dsa_user_port_attr_set);
3702 return notifier_from_errno(err);
3703 case SWITCHDEV_FDB_ADD_TO_DEVICE:
3704 case SWITCHDEV_FDB_DEL_TO_DEVICE:
3705 err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
3706 dsa_user_dev_check,
3707 dsa_foreign_dev_check,
3708 dsa_user_fdb_event);
3709 return notifier_from_errno(err);
3710 default:
3711 return NOTIFY_DONE;
3712 }
3713
3714 return NOTIFY_OK;
3715 }
3716
dsa_user_switchdev_blocking_event(struct notifier_block * unused,unsigned long event,void * ptr)3717 static int dsa_user_switchdev_blocking_event(struct notifier_block *unused,
3718 unsigned long event, void *ptr)
3719 {
3720 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3721 int err;
3722
3723 switch (event) {
3724 case SWITCHDEV_PORT_OBJ_ADD:
3725 err = switchdev_handle_port_obj_add_foreign(dev, ptr,
3726 dsa_user_dev_check,
3727 dsa_foreign_dev_check,
3728 dsa_user_port_obj_add);
3729 return notifier_from_errno(err);
3730 case SWITCHDEV_PORT_OBJ_DEL:
3731 err = switchdev_handle_port_obj_del_foreign(dev, ptr,
3732 dsa_user_dev_check,
3733 dsa_foreign_dev_check,
3734 dsa_user_port_obj_del);
3735 return notifier_from_errno(err);
3736 case SWITCHDEV_PORT_ATTR_SET:
3737 err = switchdev_handle_port_attr_set(dev, ptr,
3738 dsa_user_dev_check,
3739 dsa_user_port_attr_set);
3740 return notifier_from_errno(err);
3741 }
3742
3743 return NOTIFY_DONE;
3744 }
3745
3746 static struct notifier_block dsa_user_nb __read_mostly = {
3747 .notifier_call = dsa_user_netdevice_event,
3748 };
3749
3750 struct notifier_block dsa_user_switchdev_notifier = {
3751 .notifier_call = dsa_user_switchdev_event,
3752 };
3753
3754 struct notifier_block dsa_user_switchdev_blocking_notifier = {
3755 .notifier_call = dsa_user_switchdev_blocking_event,
3756 };
3757
dsa_user_register_notifier(void)3758 int dsa_user_register_notifier(void)
3759 {
3760 struct notifier_block *nb;
3761 int err;
3762
3763 err = register_netdevice_notifier(&dsa_user_nb);
3764 if (err)
3765 return err;
3766
3767 err = register_switchdev_notifier(&dsa_user_switchdev_notifier);
3768 if (err)
3769 goto err_switchdev_nb;
3770
3771 nb = &dsa_user_switchdev_blocking_notifier;
3772 err = register_switchdev_blocking_notifier(nb);
3773 if (err)
3774 goto err_switchdev_blocking_nb;
3775
3776 return 0;
3777
3778 err_switchdev_blocking_nb:
3779 unregister_switchdev_notifier(&dsa_user_switchdev_notifier);
3780 err_switchdev_nb:
3781 unregister_netdevice_notifier(&dsa_user_nb);
3782 return err;
3783 }
3784
dsa_user_unregister_notifier(void)3785 void dsa_user_unregister_notifier(void)
3786 {
3787 struct notifier_block *nb;
3788 int err;
3789
3790 nb = &dsa_user_switchdev_blocking_notifier;
3791 err = unregister_switchdev_blocking_notifier(nb);
3792 if (err)
3793 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
3794
3795 err = unregister_switchdev_notifier(&dsa_user_switchdev_notifier);
3796 if (err)
3797 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
3798
3799 err = unregister_netdevice_notifier(&dsa_user_nb);
3800 if (err)
3801 pr_err("DSA: failed to unregister user notifier (%d)\n", err);
3802 }
3803