1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/dsa/user.c - user device handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 */
6
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/selftests.h>
19 #include <net/tc_act/tc_mirred.h>
20 #include <linux/if_bridge.h>
21 #include <linux/if_hsr.h>
22 #include <net/dcbnl.h>
23 #include <linux/netpoll.h>
24 #include <linux/string.h>
25
26 #include "conduit.h"
27 #include "dsa.h"
28 #include "netlink.h"
29 #include "port.h"
30 #include "switch.h"
31 #include "tag.h"
32 #include "user.h"
33
34 struct dsa_switchdev_event_work {
35 struct net_device *dev;
36 struct net_device *orig_dev;
37 struct work_struct work;
38 unsigned long event;
39 /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
40 * SWITCHDEV_FDB_DEL_TO_DEVICE
41 */
42 unsigned char addr[ETH_ALEN];
43 u16 vid;
44 bool host_addr;
45 };
46
47 enum dsa_standalone_event {
48 DSA_UC_ADD,
49 DSA_UC_DEL,
50 DSA_MC_ADD,
51 DSA_MC_DEL,
52 };
53
54 struct dsa_standalone_event_work {
55 struct work_struct work;
56 struct net_device *dev;
57 enum dsa_standalone_event event;
58 unsigned char addr[ETH_ALEN];
59 u16 vid;
60 };
61
62 struct dsa_host_vlan_rx_filtering_ctx {
63 struct net_device *dev;
64 const unsigned char *addr;
65 enum dsa_standalone_event event;
66 };
67
dsa_switch_supports_uc_filtering(struct dsa_switch * ds)68 static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds)
69 {
70 return ds->ops->port_fdb_add && ds->ops->port_fdb_del &&
71 ds->fdb_isolation && !ds->vlan_filtering_is_global &&
72 !ds->needs_standalone_vlan_filtering;
73 }
74
dsa_switch_supports_mc_filtering(struct dsa_switch * ds)75 static bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds)
76 {
77 return ds->ops->port_mdb_add && ds->ops->port_mdb_del &&
78 ds->fdb_isolation && !ds->vlan_filtering_is_global &&
79 !ds->needs_standalone_vlan_filtering;
80 }
81
dsa_user_standalone_event_work(struct work_struct * work)82 static void dsa_user_standalone_event_work(struct work_struct *work)
83 {
84 struct dsa_standalone_event_work *standalone_work =
85 container_of(work, struct dsa_standalone_event_work, work);
86 const unsigned char *addr = standalone_work->addr;
87 struct net_device *dev = standalone_work->dev;
88 struct dsa_port *dp = dsa_user_to_port(dev);
89 struct switchdev_obj_port_mdb mdb;
90 struct dsa_switch *ds = dp->ds;
91 u16 vid = standalone_work->vid;
92 int err;
93
94 switch (standalone_work->event) {
95 case DSA_UC_ADD:
96 err = dsa_port_standalone_host_fdb_add(dp, addr, vid);
97 if (err) {
98 dev_err(ds->dev,
99 "port %d failed to add %pM vid %d to fdb: %d\n",
100 dp->index, addr, vid, err);
101 break;
102 }
103 break;
104
105 case DSA_UC_DEL:
106 err = dsa_port_standalone_host_fdb_del(dp, addr, vid);
107 if (err) {
108 dev_err(ds->dev,
109 "port %d failed to delete %pM vid %d from fdb: %d\n",
110 dp->index, addr, vid, err);
111 }
112
113 break;
114 case DSA_MC_ADD:
115 ether_addr_copy(mdb.addr, addr);
116 mdb.vid = vid;
117
118 err = dsa_port_standalone_host_mdb_add(dp, &mdb);
119 if (err) {
120 dev_err(ds->dev,
121 "port %d failed to add %pM vid %d to mdb: %d\n",
122 dp->index, addr, vid, err);
123 break;
124 }
125 break;
126 case DSA_MC_DEL:
127 ether_addr_copy(mdb.addr, addr);
128 mdb.vid = vid;
129
130 err = dsa_port_standalone_host_mdb_del(dp, &mdb);
131 if (err) {
132 dev_err(ds->dev,
133 "port %d failed to delete %pM vid %d from mdb: %d\n",
134 dp->index, addr, vid, err);
135 }
136
137 break;
138 }
139
140 kfree(standalone_work);
141 }
142
dsa_user_schedule_standalone_work(struct net_device * dev,enum dsa_standalone_event event,const unsigned char * addr,u16 vid)143 static int dsa_user_schedule_standalone_work(struct net_device *dev,
144 enum dsa_standalone_event event,
145 const unsigned char *addr,
146 u16 vid)
147 {
148 struct dsa_standalone_event_work *standalone_work;
149
150 standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC);
151 if (!standalone_work)
152 return -ENOMEM;
153
154 INIT_WORK(&standalone_work->work, dsa_user_standalone_event_work);
155 standalone_work->event = event;
156 standalone_work->dev = dev;
157
158 ether_addr_copy(standalone_work->addr, addr);
159 standalone_work->vid = vid;
160
161 dsa_schedule_work(&standalone_work->work);
162
163 return 0;
164 }
165
dsa_user_host_vlan_rx_filtering(void * arg,int vid)166 static int dsa_user_host_vlan_rx_filtering(void *arg, int vid)
167 {
168 struct dsa_host_vlan_rx_filtering_ctx *ctx = arg;
169
170 return dsa_user_schedule_standalone_work(ctx->dev, ctx->event,
171 ctx->addr, vid);
172 }
173
dsa_user_vlan_for_each(struct net_device * dev,int (* cb)(void * arg,int vid),void * arg)174 static int dsa_user_vlan_for_each(struct net_device *dev,
175 int (*cb)(void *arg, int vid), void *arg)
176 {
177 struct dsa_port *dp = dsa_user_to_port(dev);
178 struct dsa_vlan *v;
179 int err;
180
181 lockdep_assert_held(&dev->addr_list_lock);
182
183 err = cb(arg, 0);
184 if (err)
185 return err;
186
187 list_for_each_entry(v, &dp->user_vlans, list) {
188 err = cb(arg, v->vid);
189 if (err)
190 return err;
191 }
192
193 return 0;
194 }
195
dsa_user_sync_uc(struct net_device * dev,const unsigned char * addr)196 static int dsa_user_sync_uc(struct net_device *dev,
197 const unsigned char *addr)
198 {
199 struct net_device *conduit = dsa_user_to_conduit(dev);
200 struct dsa_port *dp = dsa_user_to_port(dev);
201 struct dsa_host_vlan_rx_filtering_ctx ctx = {
202 .dev = dev,
203 .addr = addr,
204 .event = DSA_UC_ADD,
205 };
206
207 dev_uc_add(conduit, addr);
208
209 if (!dsa_switch_supports_uc_filtering(dp->ds))
210 return 0;
211
212 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
213 &ctx);
214 }
215
dsa_user_unsync_uc(struct net_device * dev,const unsigned char * addr)216 static int dsa_user_unsync_uc(struct net_device *dev,
217 const unsigned char *addr)
218 {
219 struct net_device *conduit = dsa_user_to_conduit(dev);
220 struct dsa_port *dp = dsa_user_to_port(dev);
221 struct dsa_host_vlan_rx_filtering_ctx ctx = {
222 .dev = dev,
223 .addr = addr,
224 .event = DSA_UC_DEL,
225 };
226
227 dev_uc_del(conduit, addr);
228
229 if (!dsa_switch_supports_uc_filtering(dp->ds))
230 return 0;
231
232 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
233 &ctx);
234 }
235
dsa_user_sync_mc(struct net_device * dev,const unsigned char * addr)236 static int dsa_user_sync_mc(struct net_device *dev,
237 const unsigned char *addr)
238 {
239 struct net_device *conduit = dsa_user_to_conduit(dev);
240 struct dsa_port *dp = dsa_user_to_port(dev);
241 struct dsa_host_vlan_rx_filtering_ctx ctx = {
242 .dev = dev,
243 .addr = addr,
244 .event = DSA_MC_ADD,
245 };
246
247 dev_mc_add(conduit, addr);
248
249 if (!dsa_switch_supports_mc_filtering(dp->ds))
250 return 0;
251
252 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
253 &ctx);
254 }
255
dsa_user_unsync_mc(struct net_device * dev,const unsigned char * addr)256 static int dsa_user_unsync_mc(struct net_device *dev,
257 const unsigned char *addr)
258 {
259 struct net_device *conduit = dsa_user_to_conduit(dev);
260 struct dsa_port *dp = dsa_user_to_port(dev);
261 struct dsa_host_vlan_rx_filtering_ctx ctx = {
262 .dev = dev,
263 .addr = addr,
264 .event = DSA_MC_DEL,
265 };
266
267 dev_mc_del(conduit, addr);
268
269 if (!dsa_switch_supports_mc_filtering(dp->ds))
270 return 0;
271
272 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
273 &ctx);
274 }
275
dsa_user_sync_ha(struct net_device * dev)276 void dsa_user_sync_ha(struct net_device *dev)
277 {
278 struct dsa_port *dp = dsa_user_to_port(dev);
279 struct dsa_switch *ds = dp->ds;
280 struct netdev_hw_addr *ha;
281
282 netif_addr_lock_bh(dev);
283
284 netdev_for_each_synced_mc_addr(ha, dev)
285 dsa_user_sync_mc(dev, ha->addr);
286
287 netdev_for_each_synced_uc_addr(ha, dev)
288 dsa_user_sync_uc(dev, ha->addr);
289
290 netif_addr_unlock_bh(dev);
291
292 if (dsa_switch_supports_uc_filtering(ds) ||
293 dsa_switch_supports_mc_filtering(ds))
294 dsa_flush_workqueue();
295 }
296
dsa_user_unsync_ha(struct net_device * dev)297 void dsa_user_unsync_ha(struct net_device *dev)
298 {
299 struct dsa_port *dp = dsa_user_to_port(dev);
300 struct dsa_switch *ds = dp->ds;
301 struct netdev_hw_addr *ha;
302
303 netif_addr_lock_bh(dev);
304
305 netdev_for_each_synced_uc_addr(ha, dev)
306 dsa_user_unsync_uc(dev, ha->addr);
307
308 netdev_for_each_synced_mc_addr(ha, dev)
309 dsa_user_unsync_mc(dev, ha->addr);
310
311 netif_addr_unlock_bh(dev);
312
313 if (dsa_switch_supports_uc_filtering(ds) ||
314 dsa_switch_supports_mc_filtering(ds))
315 dsa_flush_workqueue();
316 }
317
318 /* user mii_bus handling ***************************************************/
dsa_user_phy_read(struct mii_bus * bus,int addr,int reg)319 static int dsa_user_phy_read(struct mii_bus *bus, int addr, int reg)
320 {
321 struct dsa_switch *ds = bus->priv;
322
323 if (ds->phys_mii_mask & (1 << addr))
324 return ds->ops->phy_read(ds, addr, reg);
325
326 return 0xffff;
327 }
328
dsa_user_phy_write(struct mii_bus * bus,int addr,int reg,u16 val)329 static int dsa_user_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
330 {
331 struct dsa_switch *ds = bus->priv;
332
333 if (ds->phys_mii_mask & (1 << addr))
334 return ds->ops->phy_write(ds, addr, reg, val);
335
336 return 0;
337 }
338
dsa_user_mii_bus_init(struct dsa_switch * ds)339 void dsa_user_mii_bus_init(struct dsa_switch *ds)
340 {
341 ds->user_mii_bus->priv = (void *)ds;
342 ds->user_mii_bus->name = "dsa user smi";
343 ds->user_mii_bus->read = dsa_user_phy_read;
344 ds->user_mii_bus->write = dsa_user_phy_write;
345 snprintf(ds->user_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
346 ds->dst->index, ds->index);
347 ds->user_mii_bus->parent = ds->dev;
348 ds->user_mii_bus->phy_mask = ~ds->phys_mii_mask;
349 }
350
351
352 /* user device handling ****************************************************/
dsa_user_get_iflink(const struct net_device * dev)353 static int dsa_user_get_iflink(const struct net_device *dev)
354 {
355 return READ_ONCE(dsa_user_to_conduit(dev)->ifindex);
356 }
357
dsa_user_host_uc_install(struct net_device * dev,const u8 * addr)358 int dsa_user_host_uc_install(struct net_device *dev, const u8 *addr)
359 {
360 struct net_device *conduit = dsa_user_to_conduit(dev);
361 struct dsa_port *dp = dsa_user_to_port(dev);
362 struct dsa_switch *ds = dp->ds;
363 int err;
364
365 if (dsa_switch_supports_uc_filtering(ds)) {
366 err = dsa_port_standalone_host_fdb_add(dp, addr, 0);
367 if (err)
368 goto out;
369 }
370
371 if (!ether_addr_equal(addr, conduit->dev_addr)) {
372 err = dev_uc_add(conduit, addr);
373 if (err < 0)
374 goto del_host_addr;
375 }
376
377 return 0;
378
379 del_host_addr:
380 if (dsa_switch_supports_uc_filtering(ds))
381 dsa_port_standalone_host_fdb_del(dp, addr, 0);
382 out:
383 return err;
384 }
385
dsa_user_host_uc_uninstall(struct net_device * dev)386 void dsa_user_host_uc_uninstall(struct net_device *dev)
387 {
388 struct net_device *conduit = dsa_user_to_conduit(dev);
389 struct dsa_port *dp = dsa_user_to_port(dev);
390 struct dsa_switch *ds = dp->ds;
391
392 if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr))
393 dev_uc_del(conduit, dev->dev_addr);
394
395 if (dsa_switch_supports_uc_filtering(ds))
396 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
397 }
398
dsa_user_open(struct net_device * dev)399 static int dsa_user_open(struct net_device *dev)
400 {
401 struct net_device *conduit = dsa_user_to_conduit(dev);
402 struct dsa_port *dp = dsa_user_to_port(dev);
403 int err;
404
405 err = dev_open(conduit, NULL);
406 if (err < 0) {
407 netdev_err(dev, "failed to open conduit %s\n", conduit->name);
408 goto out;
409 }
410
411 err = dsa_user_host_uc_install(dev, dev->dev_addr);
412 if (err)
413 goto out;
414
415 err = dsa_port_enable_rt(dp, dev->phydev);
416 if (err)
417 goto out_del_host_uc;
418
419 return 0;
420
421 out_del_host_uc:
422 dsa_user_host_uc_uninstall(dev);
423 out:
424 return err;
425 }
426
dsa_user_close(struct net_device * dev)427 static int dsa_user_close(struct net_device *dev)
428 {
429 struct dsa_port *dp = dsa_user_to_port(dev);
430
431 dsa_port_disable_rt(dp);
432
433 dsa_user_host_uc_uninstall(dev);
434
435 return 0;
436 }
437
dsa_user_manage_host_flood(struct net_device * dev)438 static void dsa_user_manage_host_flood(struct net_device *dev)
439 {
440 bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI);
441 struct dsa_port *dp = dsa_user_to_port(dev);
442 bool uc = dev->flags & IFF_PROMISC;
443
444 dsa_port_set_host_flood(dp, uc, mc);
445 }
446
dsa_user_change_rx_flags(struct net_device * dev,int change)447 static void dsa_user_change_rx_flags(struct net_device *dev, int change)
448 {
449 struct net_device *conduit = dsa_user_to_conduit(dev);
450 struct dsa_port *dp = dsa_user_to_port(dev);
451 struct dsa_switch *ds = dp->ds;
452
453 if (change & IFF_ALLMULTI)
454 dev_set_allmulti(conduit,
455 dev->flags & IFF_ALLMULTI ? 1 : -1);
456 if (change & IFF_PROMISC)
457 dev_set_promiscuity(conduit,
458 dev->flags & IFF_PROMISC ? 1 : -1);
459
460 if (dsa_switch_supports_uc_filtering(ds) &&
461 dsa_switch_supports_mc_filtering(ds))
462 dsa_user_manage_host_flood(dev);
463 }
464
dsa_user_set_rx_mode(struct net_device * dev)465 static void dsa_user_set_rx_mode(struct net_device *dev)
466 {
467 __dev_mc_sync(dev, dsa_user_sync_mc, dsa_user_unsync_mc);
468 __dev_uc_sync(dev, dsa_user_sync_uc, dsa_user_unsync_uc);
469 }
470
dsa_user_set_mac_address(struct net_device * dev,void * a)471 static int dsa_user_set_mac_address(struct net_device *dev, void *a)
472 {
473 struct dsa_port *dp = dsa_user_to_port(dev);
474 struct dsa_switch *ds = dp->ds;
475 struct sockaddr *addr = a;
476 int err;
477
478 if (!is_valid_ether_addr(addr->sa_data))
479 return -EADDRNOTAVAIL;
480
481 if (ds->ops->port_set_mac_address) {
482 err = ds->ops->port_set_mac_address(ds, dp->index,
483 addr->sa_data);
484 if (err)
485 return err;
486 }
487
488 /* If the port is down, the address isn't synced yet to hardware or
489 * to the DSA conduit, so there is nothing to change.
490 */
491 if (!(dev->flags & IFF_UP))
492 goto out_change_dev_addr;
493
494 err = dsa_user_host_uc_install(dev, addr->sa_data);
495 if (err)
496 return err;
497
498 dsa_user_host_uc_uninstall(dev);
499
500 out_change_dev_addr:
501 eth_hw_addr_set(dev, addr->sa_data);
502
503 return 0;
504 }
505
506 struct dsa_user_dump_ctx {
507 struct net_device *dev;
508 struct sk_buff *skb;
509 struct netlink_callback *cb;
510 int idx;
511 };
512
513 static int
dsa_user_port_fdb_do_dump(const unsigned char * addr,u16 vid,bool is_static,void * data)514 dsa_user_port_fdb_do_dump(const unsigned char *addr, u16 vid,
515 bool is_static, void *data)
516 {
517 struct dsa_user_dump_ctx *dump = data;
518 struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
519 u32 portid = NETLINK_CB(dump->cb->skb).portid;
520 u32 seq = dump->cb->nlh->nlmsg_seq;
521 struct nlmsghdr *nlh;
522 struct ndmsg *ndm;
523
524 if (dump->idx < ctx->fdb_idx)
525 goto skip;
526
527 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
528 sizeof(*ndm), NLM_F_MULTI);
529 if (!nlh)
530 return -EMSGSIZE;
531
532 ndm = nlmsg_data(nlh);
533 ndm->ndm_family = AF_BRIDGE;
534 ndm->ndm_pad1 = 0;
535 ndm->ndm_pad2 = 0;
536 ndm->ndm_flags = NTF_SELF;
537 ndm->ndm_type = 0;
538 ndm->ndm_ifindex = dump->dev->ifindex;
539 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
540
541 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
542 goto nla_put_failure;
543
544 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
545 goto nla_put_failure;
546
547 nlmsg_end(dump->skb, nlh);
548
549 skip:
550 dump->idx++;
551 return 0;
552
553 nla_put_failure:
554 nlmsg_cancel(dump->skb, nlh);
555 return -EMSGSIZE;
556 }
557
558 static int
dsa_user_fdb_dump(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * dev,struct net_device * filter_dev,int * idx)559 dsa_user_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
560 struct net_device *dev, struct net_device *filter_dev,
561 int *idx)
562 {
563 struct dsa_port *dp = dsa_user_to_port(dev);
564 struct dsa_user_dump_ctx dump = {
565 .dev = dev,
566 .skb = skb,
567 .cb = cb,
568 .idx = *idx,
569 };
570 int err;
571
572 err = dsa_port_fdb_dump(dp, dsa_user_port_fdb_do_dump, &dump);
573 *idx = dump.idx;
574
575 return err;
576 }
577
dsa_user_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)578 static int dsa_user_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
579 {
580 struct dsa_user_priv *p = netdev_priv(dev);
581
582 return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
583 }
584
dsa_user_port_attr_set(struct net_device * dev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)585 static int dsa_user_port_attr_set(struct net_device *dev, const void *ctx,
586 const struct switchdev_attr *attr,
587 struct netlink_ext_ack *extack)
588 {
589 struct dsa_port *dp = dsa_user_to_port(dev);
590 int ret;
591
592 if (ctx && ctx != dp)
593 return 0;
594
595 switch (attr->id) {
596 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
597 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
598 return -EOPNOTSUPP;
599
600 ret = dsa_port_set_state(dp, attr->u.stp_state, true);
601 break;
602 case SWITCHDEV_ATTR_ID_PORT_MST_STATE:
603 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
604 return -EOPNOTSUPP;
605
606 ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack);
607 break;
608 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
609 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
610 return -EOPNOTSUPP;
611
612 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
613 extack);
614 break;
615 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
616 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
617 return -EOPNOTSUPP;
618
619 ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
620 break;
621 case SWITCHDEV_ATTR_ID_BRIDGE_MST:
622 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
623 return -EOPNOTSUPP;
624
625 ret = dsa_port_mst_enable(dp, attr->u.mst, extack);
626 break;
627 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
628 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
629 return -EOPNOTSUPP;
630
631 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
632 extack);
633 break;
634 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
635 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
636 return -EOPNOTSUPP;
637
638 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
639 break;
640 case SWITCHDEV_ATTR_ID_VLAN_MSTI:
641 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
642 return -EOPNOTSUPP;
643
644 ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti);
645 break;
646 default:
647 ret = -EOPNOTSUPP;
648 break;
649 }
650
651 return ret;
652 }
653
654 /* Must be called under rcu_read_lock() */
655 static int
dsa_user_vlan_check_for_8021q_uppers(struct net_device * user,const struct switchdev_obj_port_vlan * vlan)656 dsa_user_vlan_check_for_8021q_uppers(struct net_device *user,
657 const struct switchdev_obj_port_vlan *vlan)
658 {
659 struct net_device *upper_dev;
660 struct list_head *iter;
661
662 netdev_for_each_upper_dev_rcu(user, upper_dev, iter) {
663 u16 vid;
664
665 if (!is_vlan_dev(upper_dev))
666 continue;
667
668 vid = vlan_dev_vlan_id(upper_dev);
669 if (vid == vlan->vid)
670 return -EBUSY;
671 }
672
673 return 0;
674 }
675
dsa_user_vlan_add(struct net_device * dev,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)676 static int dsa_user_vlan_add(struct net_device *dev,
677 const struct switchdev_obj *obj,
678 struct netlink_ext_ack *extack)
679 {
680 struct dsa_port *dp = dsa_user_to_port(dev);
681 struct switchdev_obj_port_vlan *vlan;
682 int err;
683
684 if (dsa_port_skip_vlan_configuration(dp)) {
685 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
686 return 0;
687 }
688
689 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
690
691 /* Deny adding a bridge VLAN when there is already an 802.1Q upper with
692 * the same VID.
693 */
694 if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
695 rcu_read_lock();
696 err = dsa_user_vlan_check_for_8021q_uppers(dev, vlan);
697 rcu_read_unlock();
698 if (err) {
699 NL_SET_ERR_MSG_MOD(extack,
700 "Port already has a VLAN upper with this VID");
701 return err;
702 }
703 }
704
705 return dsa_port_vlan_add(dp, vlan, extack);
706 }
707
708 /* Offload a VLAN installed on the bridge or on a foreign interface by
709 * installing it as a VLAN towards the CPU port.
710 */
dsa_user_host_vlan_add(struct net_device * dev,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)711 static int dsa_user_host_vlan_add(struct net_device *dev,
712 const struct switchdev_obj *obj,
713 struct netlink_ext_ack *extack)
714 {
715 struct dsa_port *dp = dsa_user_to_port(dev);
716 struct switchdev_obj_port_vlan vlan;
717
718 /* Do nothing if this is a software bridge */
719 if (!dp->bridge)
720 return -EOPNOTSUPP;
721
722 if (dsa_port_skip_vlan_configuration(dp)) {
723 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
724 return 0;
725 }
726
727 vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
728
729 /* Even though drivers often handle CPU membership in special ways,
730 * it doesn't make sense to program a PVID, so clear this flag.
731 */
732 vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
733
734 return dsa_port_host_vlan_add(dp, &vlan, extack);
735 }
736
dsa_user_port_obj_add(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)737 static int dsa_user_port_obj_add(struct net_device *dev, const void *ctx,
738 const struct switchdev_obj *obj,
739 struct netlink_ext_ack *extack)
740 {
741 struct dsa_port *dp = dsa_user_to_port(dev);
742 int err;
743
744 if (ctx && ctx != dp)
745 return 0;
746
747 switch (obj->id) {
748 case SWITCHDEV_OBJ_ID_PORT_MDB:
749 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
750 return -EOPNOTSUPP;
751
752 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
753 break;
754 case SWITCHDEV_OBJ_ID_HOST_MDB:
755 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
756 return -EOPNOTSUPP;
757
758 err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
759 break;
760 case SWITCHDEV_OBJ_ID_PORT_VLAN:
761 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
762 err = dsa_user_vlan_add(dev, obj, extack);
763 else
764 err = dsa_user_host_vlan_add(dev, obj, extack);
765 break;
766 case SWITCHDEV_OBJ_ID_MRP:
767 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
768 return -EOPNOTSUPP;
769
770 err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
771 break;
772 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
773 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
774 return -EOPNOTSUPP;
775
776 err = dsa_port_mrp_add_ring_role(dp,
777 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
778 break;
779 default:
780 err = -EOPNOTSUPP;
781 break;
782 }
783
784 return err;
785 }
786
dsa_user_vlan_del(struct net_device * dev,const struct switchdev_obj * obj)787 static int dsa_user_vlan_del(struct net_device *dev,
788 const struct switchdev_obj *obj)
789 {
790 struct dsa_port *dp = dsa_user_to_port(dev);
791 struct switchdev_obj_port_vlan *vlan;
792
793 if (dsa_port_skip_vlan_configuration(dp))
794 return 0;
795
796 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
797
798 return dsa_port_vlan_del(dp, vlan);
799 }
800
dsa_user_host_vlan_del(struct net_device * dev,const struct switchdev_obj * obj)801 static int dsa_user_host_vlan_del(struct net_device *dev,
802 const struct switchdev_obj *obj)
803 {
804 struct dsa_port *dp = dsa_user_to_port(dev);
805 struct switchdev_obj_port_vlan *vlan;
806
807 /* Do nothing if this is a software bridge */
808 if (!dp->bridge)
809 return -EOPNOTSUPP;
810
811 if (dsa_port_skip_vlan_configuration(dp))
812 return 0;
813
814 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
815
816 return dsa_port_host_vlan_del(dp, vlan);
817 }
818
dsa_user_port_obj_del(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj)819 static int dsa_user_port_obj_del(struct net_device *dev, const void *ctx,
820 const struct switchdev_obj *obj)
821 {
822 struct dsa_port *dp = dsa_user_to_port(dev);
823 int err;
824
825 if (ctx && ctx != dp)
826 return 0;
827
828 switch (obj->id) {
829 case SWITCHDEV_OBJ_ID_PORT_MDB:
830 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
831 return -EOPNOTSUPP;
832
833 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
834 break;
835 case SWITCHDEV_OBJ_ID_HOST_MDB:
836 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
837 return -EOPNOTSUPP;
838
839 err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
840 break;
841 case SWITCHDEV_OBJ_ID_PORT_VLAN:
842 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
843 err = dsa_user_vlan_del(dev, obj);
844 else
845 err = dsa_user_host_vlan_del(dev, obj);
846 break;
847 case SWITCHDEV_OBJ_ID_MRP:
848 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
849 return -EOPNOTSUPP;
850
851 err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
852 break;
853 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
854 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
855 return -EOPNOTSUPP;
856
857 err = dsa_port_mrp_del_ring_role(dp,
858 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
859 break;
860 default:
861 err = -EOPNOTSUPP;
862 break;
863 }
864
865 return err;
866 }
867
dsa_user_netpoll_send_skb(struct net_device * dev,struct sk_buff * skb)868 static netdev_tx_t dsa_user_netpoll_send_skb(struct net_device *dev,
869 struct sk_buff *skb)
870 {
871 #ifdef CONFIG_NET_POLL_CONTROLLER
872 struct dsa_user_priv *p = netdev_priv(dev);
873
874 return netpoll_send_skb(p->netpoll, skb);
875 #else
876 BUG();
877 return NETDEV_TX_OK;
878 #endif
879 }
880
dsa_skb_tx_timestamp(struct dsa_user_priv * p,struct sk_buff * skb)881 static void dsa_skb_tx_timestamp(struct dsa_user_priv *p,
882 struct sk_buff *skb)
883 {
884 struct dsa_switch *ds = p->dp->ds;
885
886 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_NOBPF))
887 return;
888
889 if (!ds->ops->port_txtstamp)
890 return;
891
892 ds->ops->port_txtstamp(ds, p->dp->index, skb);
893 }
894
dsa_enqueue_skb(struct sk_buff * skb,struct net_device * dev)895 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
896 {
897 /* SKB for netpoll still need to be mangled with the protocol-specific
898 * tag to be successfully transmitted
899 */
900 if (unlikely(netpoll_tx_running(dev)))
901 return dsa_user_netpoll_send_skb(dev, skb);
902
903 /* Queue the SKB for transmission on the parent interface, but
904 * do not modify its EtherType
905 */
906 skb->dev = dsa_user_to_conduit(dev);
907 dev_queue_xmit(skb);
908
909 return NETDEV_TX_OK;
910 }
911 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
912
dsa_user_xmit(struct sk_buff * skb,struct net_device * dev)913 static netdev_tx_t dsa_user_xmit(struct sk_buff *skb, struct net_device *dev)
914 {
915 struct dsa_user_priv *p = netdev_priv(dev);
916 struct sk_buff *nskb;
917
918 dev_sw_netstats_tx_add(dev, 1, skb->len);
919
920 memset(skb->cb, 0, sizeof(skb->cb));
921
922 /* Handle tx timestamp if any */
923 dsa_skb_tx_timestamp(p, skb);
924
925 if (skb_ensure_writable_head_tail(skb, dev)) {
926 dev_kfree_skb_any(skb);
927 return NETDEV_TX_OK;
928 }
929
930 /* needed_tailroom should still be 'warm' in the cache line from
931 * skb_ensure_writable_head_tail(), which has also ensured that
932 * padding is safe.
933 */
934 if (dev->needed_tailroom)
935 eth_skb_pad(skb);
936
937 /* Transmit function may have to reallocate the original SKB,
938 * in which case it must have freed it. Only free it here on error.
939 */
940 nskb = p->xmit(skb, dev);
941 if (!nskb) {
942 kfree_skb(skb);
943 return NETDEV_TX_OK;
944 }
945
946 return dsa_enqueue_skb(nskb, dev);
947 }
948
949 /* ethtool operations *******************************************************/
950
dsa_user_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)951 static void dsa_user_get_drvinfo(struct net_device *dev,
952 struct ethtool_drvinfo *drvinfo)
953 {
954 strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
955 strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
956 strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
957 }
958
dsa_user_get_regs_len(struct net_device * dev)959 static int dsa_user_get_regs_len(struct net_device *dev)
960 {
961 struct dsa_port *dp = dsa_user_to_port(dev);
962 struct dsa_switch *ds = dp->ds;
963
964 if (ds->ops->get_regs_len)
965 return ds->ops->get_regs_len(ds, dp->index);
966
967 return -EOPNOTSUPP;
968 }
969
970 static void
dsa_user_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)971 dsa_user_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
972 {
973 struct dsa_port *dp = dsa_user_to_port(dev);
974 struct dsa_switch *ds = dp->ds;
975
976 if (ds->ops->get_regs)
977 ds->ops->get_regs(ds, dp->index, regs, _p);
978 }
979
dsa_user_nway_reset(struct net_device * dev)980 static int dsa_user_nway_reset(struct net_device *dev)
981 {
982 struct dsa_port *dp = dsa_user_to_port(dev);
983
984 return phylink_ethtool_nway_reset(dp->pl);
985 }
986
dsa_user_get_eeprom_len(struct net_device * dev)987 static int dsa_user_get_eeprom_len(struct net_device *dev)
988 {
989 struct dsa_port *dp = dsa_user_to_port(dev);
990 struct dsa_switch *ds = dp->ds;
991
992 if (ds->cd && ds->cd->eeprom_len)
993 return ds->cd->eeprom_len;
994
995 if (ds->ops->get_eeprom_len)
996 return ds->ops->get_eeprom_len(ds);
997
998 return 0;
999 }
1000
dsa_user_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)1001 static int dsa_user_get_eeprom(struct net_device *dev,
1002 struct ethtool_eeprom *eeprom, u8 *data)
1003 {
1004 struct dsa_port *dp = dsa_user_to_port(dev);
1005 struct dsa_switch *ds = dp->ds;
1006
1007 if (ds->ops->get_eeprom)
1008 return ds->ops->get_eeprom(ds, eeprom, data);
1009
1010 return -EOPNOTSUPP;
1011 }
1012
dsa_user_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)1013 static int dsa_user_set_eeprom(struct net_device *dev,
1014 struct ethtool_eeprom *eeprom, u8 *data)
1015 {
1016 struct dsa_port *dp = dsa_user_to_port(dev);
1017 struct dsa_switch *ds = dp->ds;
1018
1019 if (ds->ops->set_eeprom)
1020 return ds->ops->set_eeprom(ds, eeprom, data);
1021
1022 return -EOPNOTSUPP;
1023 }
1024
dsa_user_get_strings(struct net_device * dev,uint32_t stringset,uint8_t * data)1025 static void dsa_user_get_strings(struct net_device *dev,
1026 uint32_t stringset, uint8_t *data)
1027 {
1028 struct dsa_port *dp = dsa_user_to_port(dev);
1029 struct dsa_switch *ds = dp->ds;
1030
1031 if (stringset == ETH_SS_STATS) {
1032 ethtool_puts(&data, "tx_packets");
1033 ethtool_puts(&data, "tx_bytes");
1034 ethtool_puts(&data, "rx_packets");
1035 ethtool_puts(&data, "rx_bytes");
1036 if (ds->ops->get_strings)
1037 ds->ops->get_strings(ds, dp->index, stringset, data);
1038 } else if (stringset == ETH_SS_TEST) {
1039 net_selftest_get_strings(data);
1040 }
1041
1042 }
1043
dsa_user_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,uint64_t * data)1044 static void dsa_user_get_ethtool_stats(struct net_device *dev,
1045 struct ethtool_stats *stats,
1046 uint64_t *data)
1047 {
1048 struct dsa_port *dp = dsa_user_to_port(dev);
1049 struct dsa_switch *ds = dp->ds;
1050 struct pcpu_sw_netstats *s;
1051 unsigned int start;
1052 int i;
1053
1054 for_each_possible_cpu(i) {
1055 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
1056
1057 s = per_cpu_ptr(dev->tstats, i);
1058 do {
1059 start = u64_stats_fetch_begin(&s->syncp);
1060 tx_packets = u64_stats_read(&s->tx_packets);
1061 tx_bytes = u64_stats_read(&s->tx_bytes);
1062 rx_packets = u64_stats_read(&s->rx_packets);
1063 rx_bytes = u64_stats_read(&s->rx_bytes);
1064 } while (u64_stats_fetch_retry(&s->syncp, start));
1065 data[0] += tx_packets;
1066 data[1] += tx_bytes;
1067 data[2] += rx_packets;
1068 data[3] += rx_bytes;
1069 }
1070 if (ds->ops->get_ethtool_stats)
1071 ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
1072 }
1073
dsa_user_get_sset_count(struct net_device * dev,int sset)1074 static int dsa_user_get_sset_count(struct net_device *dev, int sset)
1075 {
1076 struct dsa_port *dp = dsa_user_to_port(dev);
1077 struct dsa_switch *ds = dp->ds;
1078
1079 if (sset == ETH_SS_STATS) {
1080 int count = 0;
1081
1082 if (ds->ops->get_sset_count) {
1083 count = ds->ops->get_sset_count(ds, dp->index, sset);
1084 if (count < 0)
1085 return count;
1086 }
1087
1088 return count + 4;
1089 } else if (sset == ETH_SS_TEST) {
1090 return net_selftest_get_count();
1091 }
1092
1093 return -EOPNOTSUPP;
1094 }
1095
dsa_user_get_eth_phy_stats(struct net_device * dev,struct ethtool_eth_phy_stats * phy_stats)1096 static void dsa_user_get_eth_phy_stats(struct net_device *dev,
1097 struct ethtool_eth_phy_stats *phy_stats)
1098 {
1099 struct dsa_port *dp = dsa_user_to_port(dev);
1100 struct dsa_switch *ds = dp->ds;
1101
1102 if (ds->ops->get_eth_phy_stats)
1103 ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
1104 }
1105
dsa_user_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)1106 static void dsa_user_get_eth_mac_stats(struct net_device *dev,
1107 struct ethtool_eth_mac_stats *mac_stats)
1108 {
1109 struct dsa_port *dp = dsa_user_to_port(dev);
1110 struct dsa_switch *ds = dp->ds;
1111
1112 if (ds->ops->get_eth_mac_stats)
1113 ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats);
1114 }
1115
1116 static void
dsa_user_get_eth_ctrl_stats(struct net_device * dev,struct ethtool_eth_ctrl_stats * ctrl_stats)1117 dsa_user_get_eth_ctrl_stats(struct net_device *dev,
1118 struct ethtool_eth_ctrl_stats *ctrl_stats)
1119 {
1120 struct dsa_port *dp = dsa_user_to_port(dev);
1121 struct dsa_switch *ds = dp->ds;
1122
1123 if (ds->ops->get_eth_ctrl_stats)
1124 ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
1125 }
1126
1127 static void
dsa_user_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)1128 dsa_user_get_rmon_stats(struct net_device *dev,
1129 struct ethtool_rmon_stats *rmon_stats,
1130 const struct ethtool_rmon_hist_range **ranges)
1131 {
1132 struct dsa_port *dp = dsa_user_to_port(dev);
1133 struct dsa_switch *ds = dp->ds;
1134
1135 if (ds->ops->get_rmon_stats)
1136 ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges);
1137 }
1138
dsa_user_get_ts_stats(struct net_device * dev,struct ethtool_ts_stats * ts_stats)1139 static void dsa_user_get_ts_stats(struct net_device *dev,
1140 struct ethtool_ts_stats *ts_stats)
1141 {
1142 struct dsa_port *dp = dsa_user_to_port(dev);
1143 struct dsa_switch *ds = dp->ds;
1144
1145 if (ds->ops->get_ts_stats)
1146 ds->ops->get_ts_stats(ds, dp->index, ts_stats);
1147 }
1148
dsa_user_net_selftest(struct net_device * ndev,struct ethtool_test * etest,u64 * buf)1149 static void dsa_user_net_selftest(struct net_device *ndev,
1150 struct ethtool_test *etest, u64 *buf)
1151 {
1152 struct dsa_port *dp = dsa_user_to_port(ndev);
1153 struct dsa_switch *ds = dp->ds;
1154
1155 if (ds->ops->self_test) {
1156 ds->ops->self_test(ds, dp->index, etest, buf);
1157 return;
1158 }
1159
1160 net_selftest(ndev, etest, buf);
1161 }
1162
dsa_user_get_mm(struct net_device * dev,struct ethtool_mm_state * state)1163 static int dsa_user_get_mm(struct net_device *dev,
1164 struct ethtool_mm_state *state)
1165 {
1166 struct dsa_port *dp = dsa_user_to_port(dev);
1167 struct dsa_switch *ds = dp->ds;
1168
1169 if (!ds->ops->get_mm)
1170 return -EOPNOTSUPP;
1171
1172 return ds->ops->get_mm(ds, dp->index, state);
1173 }
1174
dsa_user_set_mm(struct net_device * dev,struct ethtool_mm_cfg * cfg,struct netlink_ext_ack * extack)1175 static int dsa_user_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg,
1176 struct netlink_ext_ack *extack)
1177 {
1178 struct dsa_port *dp = dsa_user_to_port(dev);
1179 struct dsa_switch *ds = dp->ds;
1180
1181 if (!ds->ops->set_mm)
1182 return -EOPNOTSUPP;
1183
1184 return ds->ops->set_mm(ds, dp->index, cfg, extack);
1185 }
1186
dsa_user_get_mm_stats(struct net_device * dev,struct ethtool_mm_stats * stats)1187 static void dsa_user_get_mm_stats(struct net_device *dev,
1188 struct ethtool_mm_stats *stats)
1189 {
1190 struct dsa_port *dp = dsa_user_to_port(dev);
1191 struct dsa_switch *ds = dp->ds;
1192
1193 if (ds->ops->get_mm_stats)
1194 ds->ops->get_mm_stats(ds, dp->index, stats);
1195 }
1196
dsa_user_get_wol(struct net_device * dev,struct ethtool_wolinfo * w)1197 static void dsa_user_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1198 {
1199 struct dsa_port *dp = dsa_user_to_port(dev);
1200 struct dsa_switch *ds = dp->ds;
1201
1202 phylink_ethtool_get_wol(dp->pl, w);
1203
1204 if (ds->ops->get_wol)
1205 ds->ops->get_wol(ds, dp->index, w);
1206 }
1207
dsa_user_set_wol(struct net_device * dev,struct ethtool_wolinfo * w)1208 static int dsa_user_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1209 {
1210 struct dsa_port *dp = dsa_user_to_port(dev);
1211 struct dsa_switch *ds = dp->ds;
1212 int ret = -EOPNOTSUPP;
1213
1214 phylink_ethtool_set_wol(dp->pl, w);
1215
1216 if (ds->ops->set_wol)
1217 ret = ds->ops->set_wol(ds, dp->index, w);
1218
1219 return ret;
1220 }
1221
dsa_user_set_eee(struct net_device * dev,struct ethtool_keee * e)1222 static int dsa_user_set_eee(struct net_device *dev, struct ethtool_keee *e)
1223 {
1224 struct dsa_port *dp = dsa_user_to_port(dev);
1225 struct dsa_switch *ds = dp->ds;
1226 int ret;
1227
1228 /* Check whether the switch supports EEE */
1229 if (!ds->ops->support_eee || !ds->ops->support_eee(ds, dp->index))
1230 return -EOPNOTSUPP;
1231
1232 /* If the port is using phylink managed EEE, then an unimplemented
1233 * set_mac_eee() is permissible.
1234 */
1235 if (!phylink_mac_implements_lpi(ds->phylink_mac_ops)) {
1236 /* Port's PHY and MAC both need to be EEE capable */
1237 if (!dev->phydev)
1238 return -ENODEV;
1239
1240 if (!ds->ops->set_mac_eee)
1241 return -EOPNOTSUPP;
1242
1243 ret = ds->ops->set_mac_eee(ds, dp->index, e);
1244 if (ret)
1245 return ret;
1246 } else if (ds->ops->set_mac_eee) {
1247 ret = ds->ops->set_mac_eee(ds, dp->index, e);
1248 if (ret)
1249 return ret;
1250 }
1251
1252 return phylink_ethtool_set_eee(dp->pl, e);
1253 }
1254
dsa_user_get_eee(struct net_device * dev,struct ethtool_keee * e)1255 static int dsa_user_get_eee(struct net_device *dev, struct ethtool_keee *e)
1256 {
1257 struct dsa_port *dp = dsa_user_to_port(dev);
1258 struct dsa_switch *ds = dp->ds;
1259
1260 /* Check whether the switch supports EEE */
1261 if (!ds->ops->support_eee || !ds->ops->support_eee(ds, dp->index))
1262 return -EOPNOTSUPP;
1263
1264 /* Port's PHY and MAC both need to be EEE capable */
1265 if (!dev->phydev)
1266 return -ENODEV;
1267
1268 return phylink_ethtool_get_eee(dp->pl, e);
1269 }
1270
dsa_user_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1271 static int dsa_user_get_link_ksettings(struct net_device *dev,
1272 struct ethtool_link_ksettings *cmd)
1273 {
1274 struct dsa_port *dp = dsa_user_to_port(dev);
1275
1276 return phylink_ethtool_ksettings_get(dp->pl, cmd);
1277 }
1278
dsa_user_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1279 static int dsa_user_set_link_ksettings(struct net_device *dev,
1280 const struct ethtool_link_ksettings *cmd)
1281 {
1282 struct dsa_port *dp = dsa_user_to_port(dev);
1283
1284 return phylink_ethtool_ksettings_set(dp->pl, cmd);
1285 }
1286
dsa_user_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * pause_stats)1287 static void dsa_user_get_pause_stats(struct net_device *dev,
1288 struct ethtool_pause_stats *pause_stats)
1289 {
1290 struct dsa_port *dp = dsa_user_to_port(dev);
1291 struct dsa_switch *ds = dp->ds;
1292
1293 if (ds->ops->get_pause_stats)
1294 ds->ops->get_pause_stats(ds, dp->index, pause_stats);
1295 }
1296
dsa_user_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1297 static void dsa_user_get_pauseparam(struct net_device *dev,
1298 struct ethtool_pauseparam *pause)
1299 {
1300 struct dsa_port *dp = dsa_user_to_port(dev);
1301
1302 phylink_ethtool_get_pauseparam(dp->pl, pause);
1303 }
1304
dsa_user_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1305 static int dsa_user_set_pauseparam(struct net_device *dev,
1306 struct ethtool_pauseparam *pause)
1307 {
1308 struct dsa_port *dp = dsa_user_to_port(dev);
1309
1310 return phylink_ethtool_set_pauseparam(dp->pl, pause);
1311 }
1312
1313 #ifdef CONFIG_NET_POLL_CONTROLLER
dsa_user_netpoll_setup(struct net_device * dev)1314 static int dsa_user_netpoll_setup(struct net_device *dev)
1315 {
1316 struct net_device *conduit = dsa_user_to_conduit(dev);
1317 struct dsa_user_priv *p = netdev_priv(dev);
1318 struct netpoll *netpoll;
1319 int err = 0;
1320
1321 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
1322 if (!netpoll)
1323 return -ENOMEM;
1324
1325 err = __netpoll_setup(netpoll, conduit);
1326 if (err) {
1327 kfree(netpoll);
1328 goto out;
1329 }
1330
1331 p->netpoll = netpoll;
1332 out:
1333 return err;
1334 }
1335
dsa_user_netpoll_cleanup(struct net_device * dev)1336 static void dsa_user_netpoll_cleanup(struct net_device *dev)
1337 {
1338 struct dsa_user_priv *p = netdev_priv(dev);
1339 struct netpoll *netpoll = p->netpoll;
1340
1341 if (!netpoll)
1342 return;
1343
1344 p->netpoll = NULL;
1345
1346 __netpoll_free(netpoll);
1347 }
1348
dsa_user_poll_controller(struct net_device * dev)1349 static void dsa_user_poll_controller(struct net_device *dev)
1350 {
1351 }
1352 #endif
1353
1354 static struct dsa_mall_tc_entry *
dsa_user_mall_tc_entry_find(struct net_device * dev,unsigned long cookie)1355 dsa_user_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
1356 {
1357 struct dsa_user_priv *p = netdev_priv(dev);
1358 struct dsa_mall_tc_entry *mall_tc_entry;
1359
1360 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
1361 if (mall_tc_entry->cookie == cookie)
1362 return mall_tc_entry;
1363
1364 return NULL;
1365 }
1366
1367 static int
dsa_user_add_cls_matchall_mirred(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress,bool ingress_target)1368 dsa_user_add_cls_matchall_mirred(struct net_device *dev,
1369 struct tc_cls_matchall_offload *cls,
1370 bool ingress, bool ingress_target)
1371 {
1372 struct netlink_ext_ack *extack = cls->common.extack;
1373 struct dsa_port *dp = dsa_user_to_port(dev);
1374 struct dsa_user_priv *p = netdev_priv(dev);
1375 struct dsa_mall_mirror_tc_entry *mirror;
1376 struct dsa_mall_tc_entry *mall_tc_entry;
1377 struct dsa_switch *ds = dp->ds;
1378 struct flow_action_entry *act;
1379 struct dsa_port *to_dp;
1380 int err;
1381
1382 if (cls->common.protocol != htons(ETH_P_ALL)) {
1383 NL_SET_ERR_MSG_MOD(extack,
1384 "Can only offload \"protocol all\" matchall filter");
1385 return -EOPNOTSUPP;
1386 }
1387
1388 if (!ds->ops->port_mirror_add) {
1389 NL_SET_ERR_MSG_MOD(extack,
1390 "Switch does not support mirroring operation");
1391 return -EOPNOTSUPP;
1392 }
1393
1394 if (!flow_action_basic_hw_stats_check(&cls->rule->action, extack))
1395 return -EOPNOTSUPP;
1396
1397 act = &cls->rule->action.entries[0];
1398
1399 if (!act->dev)
1400 return -EINVAL;
1401
1402 if (dsa_user_dev_check(act->dev)) {
1403 if (ingress_target) {
1404 /* We can only fulfill this using software assist */
1405 if (cls->common.skip_sw) {
1406 NL_SET_ERR_MSG_MOD(extack,
1407 "Can only mirred to ingress of DSA user port if filter also runs in software");
1408 return -EOPNOTSUPP;
1409 }
1410 to_dp = dp->cpu_dp;
1411 } else {
1412 to_dp = dsa_user_to_port(act->dev);
1413 }
1414 } else {
1415 /* Handle mirroring to foreign target ports as a mirror towards
1416 * the CPU. The software tc rule will take the packets from
1417 * there.
1418 */
1419 if (cls->common.skip_sw) {
1420 NL_SET_ERR_MSG_MOD(extack,
1421 "Can only mirred to CPU if filter also runs in software");
1422 return -EOPNOTSUPP;
1423 }
1424 to_dp = dp->cpu_dp;
1425 }
1426
1427 if (dp->ds != to_dp->ds) {
1428 NL_SET_ERR_MSG_MOD(extack,
1429 "Cross-chip mirroring not implemented");
1430 return -EOPNOTSUPP;
1431 }
1432
1433 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1434 if (!mall_tc_entry)
1435 return -ENOMEM;
1436
1437 mall_tc_entry->cookie = cls->cookie;
1438 mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
1439 mirror = &mall_tc_entry->mirror;
1440 mirror->to_local_port = to_dp->index;
1441 mirror->ingress = ingress;
1442
1443 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack);
1444 if (err) {
1445 kfree(mall_tc_entry);
1446 return err;
1447 }
1448
1449 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1450
1451 return err;
1452 }
1453
1454 static int
dsa_user_add_cls_matchall_police(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress)1455 dsa_user_add_cls_matchall_police(struct net_device *dev,
1456 struct tc_cls_matchall_offload *cls,
1457 bool ingress)
1458 {
1459 struct netlink_ext_ack *extack = cls->common.extack;
1460 struct dsa_port *dp = dsa_user_to_port(dev);
1461 struct dsa_user_priv *p = netdev_priv(dev);
1462 struct dsa_mall_policer_tc_entry *policer;
1463 struct dsa_mall_tc_entry *mall_tc_entry;
1464 struct dsa_switch *ds = dp->ds;
1465 struct flow_action_entry *act;
1466 int err;
1467
1468 if (!ds->ops->port_policer_add) {
1469 NL_SET_ERR_MSG_MOD(extack,
1470 "Policing offload not implemented");
1471 return -EOPNOTSUPP;
1472 }
1473
1474 if (!ingress) {
1475 NL_SET_ERR_MSG_MOD(extack,
1476 "Only supported on ingress qdisc");
1477 return -EOPNOTSUPP;
1478 }
1479
1480 if (!flow_action_basic_hw_stats_check(&cls->rule->action, extack))
1481 return -EOPNOTSUPP;
1482
1483 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1484 if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1485 NL_SET_ERR_MSG_MOD(extack,
1486 "Only one port policer allowed");
1487 return -EEXIST;
1488 }
1489 }
1490
1491 act = &cls->rule->action.entries[0];
1492
1493 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1494 if (!mall_tc_entry)
1495 return -ENOMEM;
1496
1497 mall_tc_entry->cookie = cls->cookie;
1498 mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1499 policer = &mall_tc_entry->policer;
1500 policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1501 policer->burst = act->police.burst;
1502
1503 err = ds->ops->port_policer_add(ds, dp->index, policer);
1504 if (err) {
1505 kfree(mall_tc_entry);
1506 return err;
1507 }
1508
1509 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1510
1511 return err;
1512 }
1513
dsa_user_add_cls_matchall(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress)1514 static int dsa_user_add_cls_matchall(struct net_device *dev,
1515 struct tc_cls_matchall_offload *cls,
1516 bool ingress)
1517 {
1518 const struct flow_action *action = &cls->rule->action;
1519 struct netlink_ext_ack *extack = cls->common.extack;
1520
1521 if (!flow_offload_has_one_action(action)) {
1522 NL_SET_ERR_MSG_MOD(extack,
1523 "Cannot offload matchall filter with more than one action");
1524 return -EOPNOTSUPP;
1525 }
1526
1527 switch (action->entries[0].id) {
1528 case FLOW_ACTION_MIRRED:
1529 return dsa_user_add_cls_matchall_mirred(dev, cls, ingress,
1530 false);
1531 case FLOW_ACTION_MIRRED_INGRESS:
1532 return dsa_user_add_cls_matchall_mirred(dev, cls, ingress,
1533 true);
1534 case FLOW_ACTION_POLICE:
1535 return dsa_user_add_cls_matchall_police(dev, cls, ingress);
1536 default:
1537 NL_SET_ERR_MSG_MOD(extack, "Unknown action");
1538 break;
1539 }
1540
1541 return -EOPNOTSUPP;
1542 }
1543
dsa_user_del_cls_matchall(struct net_device * dev,struct tc_cls_matchall_offload * cls)1544 static void dsa_user_del_cls_matchall(struct net_device *dev,
1545 struct tc_cls_matchall_offload *cls)
1546 {
1547 struct dsa_port *dp = dsa_user_to_port(dev);
1548 struct dsa_mall_tc_entry *mall_tc_entry;
1549 struct dsa_switch *ds = dp->ds;
1550
1551 mall_tc_entry = dsa_user_mall_tc_entry_find(dev, cls->cookie);
1552 if (!mall_tc_entry)
1553 return;
1554
1555 list_del(&mall_tc_entry->list);
1556
1557 switch (mall_tc_entry->type) {
1558 case DSA_PORT_MALL_MIRROR:
1559 if (ds->ops->port_mirror_del)
1560 ds->ops->port_mirror_del(ds, dp->index,
1561 &mall_tc_entry->mirror);
1562 break;
1563 case DSA_PORT_MALL_POLICER:
1564 if (ds->ops->port_policer_del)
1565 ds->ops->port_policer_del(ds, dp->index);
1566 break;
1567 default:
1568 WARN_ON(1);
1569 }
1570
1571 kfree(mall_tc_entry);
1572 }
1573
dsa_user_setup_tc_cls_matchall(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress)1574 static int dsa_user_setup_tc_cls_matchall(struct net_device *dev,
1575 struct tc_cls_matchall_offload *cls,
1576 bool ingress)
1577 {
1578 if (cls->common.chain_index)
1579 return -EOPNOTSUPP;
1580
1581 switch (cls->command) {
1582 case TC_CLSMATCHALL_REPLACE:
1583 return dsa_user_add_cls_matchall(dev, cls, ingress);
1584 case TC_CLSMATCHALL_DESTROY:
1585 dsa_user_del_cls_matchall(dev, cls);
1586 return 0;
1587 default:
1588 return -EOPNOTSUPP;
1589 }
1590 }
1591
dsa_user_add_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1592 static int dsa_user_add_cls_flower(struct net_device *dev,
1593 struct flow_cls_offload *cls,
1594 bool ingress)
1595 {
1596 struct dsa_port *dp = dsa_user_to_port(dev);
1597 struct dsa_switch *ds = dp->ds;
1598 int port = dp->index;
1599
1600 if (!ds->ops->cls_flower_add)
1601 return -EOPNOTSUPP;
1602
1603 return ds->ops->cls_flower_add(ds, port, cls, ingress);
1604 }
1605
dsa_user_del_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1606 static int dsa_user_del_cls_flower(struct net_device *dev,
1607 struct flow_cls_offload *cls,
1608 bool ingress)
1609 {
1610 struct dsa_port *dp = dsa_user_to_port(dev);
1611 struct dsa_switch *ds = dp->ds;
1612 int port = dp->index;
1613
1614 if (!ds->ops->cls_flower_del)
1615 return -EOPNOTSUPP;
1616
1617 return ds->ops->cls_flower_del(ds, port, cls, ingress);
1618 }
1619
dsa_user_stats_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1620 static int dsa_user_stats_cls_flower(struct net_device *dev,
1621 struct flow_cls_offload *cls,
1622 bool ingress)
1623 {
1624 struct dsa_port *dp = dsa_user_to_port(dev);
1625 struct dsa_switch *ds = dp->ds;
1626 int port = dp->index;
1627
1628 if (!ds->ops->cls_flower_stats)
1629 return -EOPNOTSUPP;
1630
1631 return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1632 }
1633
dsa_user_setup_tc_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1634 static int dsa_user_setup_tc_cls_flower(struct net_device *dev,
1635 struct flow_cls_offload *cls,
1636 bool ingress)
1637 {
1638 switch (cls->command) {
1639 case FLOW_CLS_REPLACE:
1640 return dsa_user_add_cls_flower(dev, cls, ingress);
1641 case FLOW_CLS_DESTROY:
1642 return dsa_user_del_cls_flower(dev, cls, ingress);
1643 case FLOW_CLS_STATS:
1644 return dsa_user_stats_cls_flower(dev, cls, ingress);
1645 default:
1646 return -EOPNOTSUPP;
1647 }
1648 }
1649
dsa_user_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv,bool ingress)1650 static int dsa_user_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1651 void *cb_priv, bool ingress)
1652 {
1653 struct net_device *dev = cb_priv;
1654
1655 if (!tc_can_offload(dev))
1656 return -EOPNOTSUPP;
1657
1658 switch (type) {
1659 case TC_SETUP_CLSMATCHALL:
1660 return dsa_user_setup_tc_cls_matchall(dev, type_data, ingress);
1661 case TC_SETUP_CLSFLOWER:
1662 return dsa_user_setup_tc_cls_flower(dev, type_data, ingress);
1663 default:
1664 return -EOPNOTSUPP;
1665 }
1666 }
1667
dsa_user_setup_tc_block_cb_ig(enum tc_setup_type type,void * type_data,void * cb_priv)1668 static int dsa_user_setup_tc_block_cb_ig(enum tc_setup_type type,
1669 void *type_data, void *cb_priv)
1670 {
1671 return dsa_user_setup_tc_block_cb(type, type_data, cb_priv, true);
1672 }
1673
dsa_user_setup_tc_block_cb_eg(enum tc_setup_type type,void * type_data,void * cb_priv)1674 static int dsa_user_setup_tc_block_cb_eg(enum tc_setup_type type,
1675 void *type_data, void *cb_priv)
1676 {
1677 return dsa_user_setup_tc_block_cb(type, type_data, cb_priv, false);
1678 }
1679
1680 static LIST_HEAD(dsa_user_block_cb_list);
1681
dsa_user_setup_tc_block(struct net_device * dev,struct flow_block_offload * f)1682 static int dsa_user_setup_tc_block(struct net_device *dev,
1683 struct flow_block_offload *f)
1684 {
1685 struct flow_block_cb *block_cb;
1686 flow_setup_cb_t *cb;
1687
1688 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1689 cb = dsa_user_setup_tc_block_cb_ig;
1690 else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1691 cb = dsa_user_setup_tc_block_cb_eg;
1692 else
1693 return -EOPNOTSUPP;
1694
1695 f->driver_block_list = &dsa_user_block_cb_list;
1696
1697 switch (f->command) {
1698 case FLOW_BLOCK_BIND:
1699 if (flow_block_cb_is_busy(cb, dev, &dsa_user_block_cb_list))
1700 return -EBUSY;
1701
1702 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1703 if (IS_ERR(block_cb))
1704 return PTR_ERR(block_cb);
1705
1706 flow_block_cb_add(block_cb, f);
1707 list_add_tail(&block_cb->driver_list, &dsa_user_block_cb_list);
1708 return 0;
1709 case FLOW_BLOCK_UNBIND:
1710 block_cb = flow_block_cb_lookup(f->block, cb, dev);
1711 if (!block_cb)
1712 return -ENOENT;
1713
1714 flow_block_cb_remove(block_cb, f);
1715 list_del(&block_cb->driver_list);
1716 return 0;
1717 default:
1718 return -EOPNOTSUPP;
1719 }
1720 }
1721
dsa_user_setup_ft_block(struct dsa_switch * ds,int port,void * type_data)1722 static int dsa_user_setup_ft_block(struct dsa_switch *ds, int port,
1723 void *type_data)
1724 {
1725 struct net_device *conduit = dsa_port_to_conduit(dsa_to_port(ds, port));
1726
1727 if (!conduit->netdev_ops->ndo_setup_tc)
1728 return -EOPNOTSUPP;
1729
1730 return conduit->netdev_ops->ndo_setup_tc(conduit, TC_SETUP_FT, type_data);
1731 }
1732
dsa_user_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)1733 static int dsa_user_setup_tc(struct net_device *dev, enum tc_setup_type type,
1734 void *type_data)
1735 {
1736 struct dsa_port *dp = dsa_user_to_port(dev);
1737 struct dsa_switch *ds = dp->ds;
1738
1739 switch (type) {
1740 case TC_SETUP_BLOCK:
1741 return dsa_user_setup_tc_block(dev, type_data);
1742 case TC_SETUP_FT:
1743 return dsa_user_setup_ft_block(ds, dp->index, type_data);
1744 default:
1745 break;
1746 }
1747
1748 if (!ds->ops->port_setup_tc)
1749 return -EOPNOTSUPP;
1750
1751 return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1752 }
1753
dsa_user_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * nfc,u32 * rule_locs)1754 static int dsa_user_get_rxnfc(struct net_device *dev,
1755 struct ethtool_rxnfc *nfc, u32 *rule_locs)
1756 {
1757 struct dsa_port *dp = dsa_user_to_port(dev);
1758 struct dsa_switch *ds = dp->ds;
1759
1760 if (!ds->ops->get_rxnfc)
1761 return -EOPNOTSUPP;
1762
1763 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1764 }
1765
dsa_user_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * nfc)1766 static int dsa_user_set_rxnfc(struct net_device *dev,
1767 struct ethtool_rxnfc *nfc)
1768 {
1769 struct dsa_port *dp = dsa_user_to_port(dev);
1770 struct dsa_switch *ds = dp->ds;
1771
1772 if (!ds->ops->set_rxnfc)
1773 return -EOPNOTSUPP;
1774
1775 return ds->ops->set_rxnfc(ds, dp->index, nfc);
1776 }
1777
dsa_user_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * ts)1778 static int dsa_user_get_ts_info(struct net_device *dev,
1779 struct kernel_ethtool_ts_info *ts)
1780 {
1781 struct dsa_user_priv *p = netdev_priv(dev);
1782 struct dsa_switch *ds = p->dp->ds;
1783
1784 if (!ds->ops->get_ts_info)
1785 return -EOPNOTSUPP;
1786
1787 return ds->ops->get_ts_info(ds, p->dp->index, ts);
1788 }
1789
dsa_user_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)1790 static int dsa_user_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1791 u16 vid)
1792 {
1793 struct dsa_port *dp = dsa_user_to_port(dev);
1794 struct switchdev_obj_port_vlan vlan = {
1795 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1796 .vid = vid,
1797 /* This API only allows programming tagged, non-PVID VIDs */
1798 .flags = 0,
1799 };
1800 struct netlink_ext_ack extack = {0};
1801 struct dsa_switch *ds = dp->ds;
1802 struct netdev_hw_addr *ha;
1803 struct dsa_vlan *v;
1804 int ret;
1805
1806 /* User port... */
1807 ret = dsa_port_vlan_add(dp, &vlan, &extack);
1808 if (ret) {
1809 if (extack._msg)
1810 netdev_err(dev, "%s\n", extack._msg);
1811 return ret;
1812 }
1813
1814 /* And CPU port... */
1815 ret = dsa_port_host_vlan_add(dp, &vlan, &extack);
1816 if (ret) {
1817 if (extack._msg)
1818 netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
1819 extack._msg);
1820 return ret;
1821 }
1822
1823 if (!dsa_switch_supports_uc_filtering(ds) &&
1824 !dsa_switch_supports_mc_filtering(ds))
1825 return 0;
1826
1827 v = kzalloc(sizeof(*v), GFP_KERNEL);
1828 if (!v) {
1829 ret = -ENOMEM;
1830 goto rollback;
1831 }
1832
1833 netif_addr_lock_bh(dev);
1834
1835 v->vid = vid;
1836 list_add_tail(&v->list, &dp->user_vlans);
1837
1838 if (dsa_switch_supports_mc_filtering(ds)) {
1839 netdev_for_each_synced_mc_addr(ha, dev) {
1840 dsa_user_schedule_standalone_work(dev, DSA_MC_ADD,
1841 ha->addr, vid);
1842 }
1843 }
1844
1845 if (dsa_switch_supports_uc_filtering(ds)) {
1846 netdev_for_each_synced_uc_addr(ha, dev) {
1847 dsa_user_schedule_standalone_work(dev, DSA_UC_ADD,
1848 ha->addr, vid);
1849 }
1850 }
1851
1852 netif_addr_unlock_bh(dev);
1853
1854 dsa_flush_workqueue();
1855
1856 return 0;
1857
1858 rollback:
1859 dsa_port_host_vlan_del(dp, &vlan);
1860 dsa_port_vlan_del(dp, &vlan);
1861
1862 return ret;
1863 }
1864
dsa_user_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)1865 static int dsa_user_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1866 u16 vid)
1867 {
1868 struct dsa_port *dp = dsa_user_to_port(dev);
1869 struct switchdev_obj_port_vlan vlan = {
1870 .vid = vid,
1871 /* This API only allows programming tagged, non-PVID VIDs */
1872 .flags = 0,
1873 };
1874 struct dsa_switch *ds = dp->ds;
1875 struct netdev_hw_addr *ha;
1876 struct dsa_vlan *v;
1877 int err;
1878
1879 err = dsa_port_vlan_del(dp, &vlan);
1880 if (err)
1881 return err;
1882
1883 err = dsa_port_host_vlan_del(dp, &vlan);
1884 if (err)
1885 return err;
1886
1887 if (!dsa_switch_supports_uc_filtering(ds) &&
1888 !dsa_switch_supports_mc_filtering(ds))
1889 return 0;
1890
1891 netif_addr_lock_bh(dev);
1892
1893 v = dsa_vlan_find(&dp->user_vlans, &vlan);
1894 if (!v) {
1895 netif_addr_unlock_bh(dev);
1896 return -ENOENT;
1897 }
1898
1899 list_del(&v->list);
1900 kfree(v);
1901
1902 if (dsa_switch_supports_mc_filtering(ds)) {
1903 netdev_for_each_synced_mc_addr(ha, dev) {
1904 dsa_user_schedule_standalone_work(dev, DSA_MC_DEL,
1905 ha->addr, vid);
1906 }
1907 }
1908
1909 if (dsa_switch_supports_uc_filtering(ds)) {
1910 netdev_for_each_synced_uc_addr(ha, dev) {
1911 dsa_user_schedule_standalone_work(dev, DSA_UC_DEL,
1912 ha->addr, vid);
1913 }
1914 }
1915
1916 netif_addr_unlock_bh(dev);
1917
1918 dsa_flush_workqueue();
1919
1920 return 0;
1921 }
1922
dsa_user_restore_vlan(struct net_device * vdev,int vid,void * arg)1923 static int dsa_user_restore_vlan(struct net_device *vdev, int vid, void *arg)
1924 {
1925 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1926
1927 return dsa_user_vlan_rx_add_vid(arg, proto, vid);
1928 }
1929
dsa_user_clear_vlan(struct net_device * vdev,int vid,void * arg)1930 static int dsa_user_clear_vlan(struct net_device *vdev, int vid, void *arg)
1931 {
1932 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1933
1934 return dsa_user_vlan_rx_kill_vid(arg, proto, vid);
1935 }
1936
1937 /* Keep the VLAN RX filtering list in sync with the hardware only if VLAN
1938 * filtering is enabled. The baseline is that only ports that offload a
1939 * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware,
1940 * but there are exceptions for quirky hardware.
1941 *
1942 * If ds->vlan_filtering_is_global = true, then standalone ports which share
1943 * the same switch with other ports that offload a VLAN-aware bridge are also
1944 * inevitably VLAN-aware.
1945 *
1946 * To summarize, a DSA switch port offloads:
1947 *
1948 * - If standalone (this includes software bridge, software LAG):
1949 * - if ds->needs_standalone_vlan_filtering = true, OR if
1950 * (ds->vlan_filtering_is_global = true AND there are bridges spanning
1951 * this switch chip which have vlan_filtering=1)
1952 * - the 8021q upper VLANs
1953 * - else (standalone VLAN filtering is not needed, VLAN filtering is not
1954 * global, or it is, but no port is under a VLAN-aware bridge):
1955 * - no VLAN (any 8021q upper is a software VLAN)
1956 *
1957 * - If under a vlan_filtering=0 bridge which it offload:
1958 * - if ds->configure_vlan_while_not_filtering = true (default):
1959 * - the bridge VLANs. These VLANs are committed to hardware but inactive.
1960 * - else (deprecated):
1961 * - no VLAN. The bridge VLANs are not restored when VLAN awareness is
1962 * enabled, so this behavior is broken and discouraged.
1963 *
1964 * - If under a vlan_filtering=1 bridge which it offload:
1965 * - the bridge VLANs
1966 * - the 8021q upper VLANs
1967 */
dsa_user_manage_vlan_filtering(struct net_device * user,bool vlan_filtering)1968 int dsa_user_manage_vlan_filtering(struct net_device *user,
1969 bool vlan_filtering)
1970 {
1971 int err;
1972
1973 if (vlan_filtering) {
1974 user->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1975
1976 err = vlan_for_each(user, dsa_user_restore_vlan, user);
1977 if (err) {
1978 vlan_for_each(user, dsa_user_clear_vlan, user);
1979 user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1980 return err;
1981 }
1982 } else {
1983 err = vlan_for_each(user, dsa_user_clear_vlan, user);
1984 if (err)
1985 return err;
1986
1987 user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1988 }
1989
1990 return 0;
1991 }
1992
1993 struct dsa_hw_port {
1994 struct list_head list;
1995 struct net_device *dev;
1996 int old_mtu;
1997 };
1998
dsa_hw_port_list_set_mtu(struct list_head * hw_port_list,int mtu)1999 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
2000 {
2001 const struct dsa_hw_port *p;
2002 int err;
2003
2004 list_for_each_entry(p, hw_port_list, list) {
2005 if (p->dev->mtu == mtu)
2006 continue;
2007
2008 err = dev_set_mtu(p->dev, mtu);
2009 if (err)
2010 goto rollback;
2011 }
2012
2013 return 0;
2014
2015 rollback:
2016 list_for_each_entry_continue_reverse(p, hw_port_list, list) {
2017 if (p->dev->mtu == p->old_mtu)
2018 continue;
2019
2020 if (dev_set_mtu(p->dev, p->old_mtu))
2021 netdev_err(p->dev, "Failed to restore MTU\n");
2022 }
2023
2024 return err;
2025 }
2026
dsa_hw_port_list_free(struct list_head * hw_port_list)2027 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
2028 {
2029 struct dsa_hw_port *p, *n;
2030
2031 list_for_each_entry_safe(p, n, hw_port_list, list)
2032 kfree(p);
2033 }
2034
2035 /* Make the hardware datapath to/from @dev limited to a common MTU */
dsa_bridge_mtu_normalization(struct dsa_port * dp)2036 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
2037 {
2038 struct list_head hw_port_list;
2039 struct dsa_switch_tree *dst;
2040 int min_mtu = ETH_MAX_MTU;
2041 struct dsa_port *other_dp;
2042 int err;
2043
2044 if (!dp->ds->mtu_enforcement_ingress)
2045 return;
2046
2047 if (!dp->bridge)
2048 return;
2049
2050 INIT_LIST_HEAD(&hw_port_list);
2051
2052 /* Populate the list of ports that are part of the same bridge
2053 * as the newly added/modified port
2054 */
2055 list_for_each_entry(dst, &dsa_tree_list, list) {
2056 list_for_each_entry(other_dp, &dst->ports, list) {
2057 struct dsa_hw_port *hw_port;
2058 struct net_device *user;
2059
2060 if (other_dp->type != DSA_PORT_TYPE_USER)
2061 continue;
2062
2063 if (!dsa_port_bridge_same(dp, other_dp))
2064 continue;
2065
2066 if (!other_dp->ds->mtu_enforcement_ingress)
2067 continue;
2068
2069 user = other_dp->user;
2070
2071 if (min_mtu > user->mtu)
2072 min_mtu = user->mtu;
2073
2074 hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
2075 if (!hw_port)
2076 goto out;
2077
2078 hw_port->dev = user;
2079 hw_port->old_mtu = user->mtu;
2080
2081 list_add(&hw_port->list, &hw_port_list);
2082 }
2083 }
2084
2085 /* Attempt to configure the entire hardware bridge to the newly added
2086 * interface's MTU first, regardless of whether the intention of the
2087 * user was to raise or lower it.
2088 */
2089 err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->user->mtu);
2090 if (!err)
2091 goto out;
2092
2093 /* Clearly that didn't work out so well, so just set the minimum MTU on
2094 * all hardware bridge ports now. If this fails too, then all ports will
2095 * still have their old MTU rolled back anyway.
2096 */
2097 dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
2098
2099 out:
2100 dsa_hw_port_list_free(&hw_port_list);
2101 }
2102
dsa_user_change_mtu(struct net_device * dev,int new_mtu)2103 int dsa_user_change_mtu(struct net_device *dev, int new_mtu)
2104 {
2105 struct net_device *conduit = dsa_user_to_conduit(dev);
2106 struct dsa_port *dp = dsa_user_to_port(dev);
2107 struct dsa_port *cpu_dp = dp->cpu_dp;
2108 struct dsa_switch *ds = dp->ds;
2109 struct dsa_port *other_dp;
2110 int largest_mtu = 0;
2111 int new_conduit_mtu;
2112 int old_conduit_mtu;
2113 int mtu_limit;
2114 int overhead;
2115 int cpu_mtu;
2116 int err;
2117
2118 if (!ds->ops->port_change_mtu)
2119 return -EOPNOTSUPP;
2120
2121 dsa_tree_for_each_user_port(other_dp, ds->dst) {
2122 int user_mtu;
2123
2124 /* During probe, this function will be called for each user
2125 * device, while not all of them have been allocated. That's
2126 * ok, it doesn't change what the maximum is, so ignore it.
2127 */
2128 if (!other_dp->user)
2129 continue;
2130
2131 /* Pretend that we already applied the setting, which we
2132 * actually haven't (still haven't done all integrity checks)
2133 */
2134 if (dp == other_dp)
2135 user_mtu = new_mtu;
2136 else
2137 user_mtu = other_dp->user->mtu;
2138
2139 if (largest_mtu < user_mtu)
2140 largest_mtu = user_mtu;
2141 }
2142
2143 overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops);
2144 mtu_limit = min_t(int, conduit->max_mtu, dev->max_mtu + overhead);
2145 old_conduit_mtu = conduit->mtu;
2146 new_conduit_mtu = largest_mtu + overhead;
2147 if (new_conduit_mtu > mtu_limit)
2148 return -ERANGE;
2149
2150 /* If the conduit MTU isn't over limit, there's no need to check the CPU
2151 * MTU, since that surely isn't either.
2152 */
2153 cpu_mtu = largest_mtu;
2154
2155 /* Start applying stuff */
2156 if (new_conduit_mtu != old_conduit_mtu) {
2157 err = dev_set_mtu(conduit, new_conduit_mtu);
2158 if (err < 0)
2159 goto out_conduit_failed;
2160
2161 /* We only need to propagate the MTU of the CPU port to
2162 * upstream switches, so emit a notifier which updates them.
2163 */
2164 err = dsa_port_mtu_change(cpu_dp, cpu_mtu);
2165 if (err)
2166 goto out_cpu_failed;
2167 }
2168
2169 err = ds->ops->port_change_mtu(ds, dp->index, new_mtu);
2170 if (err)
2171 goto out_port_failed;
2172
2173 WRITE_ONCE(dev->mtu, new_mtu);
2174
2175 dsa_bridge_mtu_normalization(dp);
2176
2177 return 0;
2178
2179 out_port_failed:
2180 if (new_conduit_mtu != old_conduit_mtu)
2181 dsa_port_mtu_change(cpu_dp, old_conduit_mtu - overhead);
2182 out_cpu_failed:
2183 if (new_conduit_mtu != old_conduit_mtu)
2184 dev_set_mtu(conduit, old_conduit_mtu);
2185 out_conduit_failed:
2186 return err;
2187 }
2188
2189 static int __maybe_unused
dsa_user_dcbnl_set_apptrust(struct net_device * dev,u8 * sel,int nsel)2190 dsa_user_dcbnl_set_apptrust(struct net_device *dev, u8 *sel, int nsel)
2191 {
2192 struct dsa_port *dp = dsa_user_to_port(dev);
2193 struct dsa_switch *ds = dp->ds;
2194 int port = dp->index;
2195
2196 if (!ds->ops->port_set_apptrust)
2197 return -EOPNOTSUPP;
2198
2199 return ds->ops->port_set_apptrust(ds, port, sel, nsel);
2200 }
2201
2202 static int __maybe_unused
dsa_user_dcbnl_get_apptrust(struct net_device * dev,u8 * sel,int * nsel)2203 dsa_user_dcbnl_get_apptrust(struct net_device *dev, u8 *sel, int *nsel)
2204 {
2205 struct dsa_port *dp = dsa_user_to_port(dev);
2206 struct dsa_switch *ds = dp->ds;
2207 int port = dp->index;
2208
2209 if (!ds->ops->port_get_apptrust)
2210 return -EOPNOTSUPP;
2211
2212 return ds->ops->port_get_apptrust(ds, port, sel, nsel);
2213 }
2214
2215 static int __maybe_unused
dsa_user_dcbnl_set_default_prio(struct net_device * dev,struct dcb_app * app)2216 dsa_user_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
2217 {
2218 struct dsa_port *dp = dsa_user_to_port(dev);
2219 struct dsa_switch *ds = dp->ds;
2220 unsigned long mask, new_prio;
2221 int err, port = dp->index;
2222
2223 if (!ds->ops->port_set_default_prio)
2224 return -EOPNOTSUPP;
2225
2226 err = dcb_ieee_setapp(dev, app);
2227 if (err)
2228 return err;
2229
2230 mask = dcb_ieee_getapp_mask(dev, app);
2231 new_prio = __fls(mask);
2232
2233 err = ds->ops->port_set_default_prio(ds, port, new_prio);
2234 if (err) {
2235 dcb_ieee_delapp(dev, app);
2236 return err;
2237 }
2238
2239 return 0;
2240 }
2241
2242 /* Update the DSCP prio entries on all user ports of the switch in case
2243 * the switch supports global DSCP prio instead of per port DSCP prios.
2244 */
dsa_user_dcbnl_ieee_global_dscp_setdel(struct net_device * dev,struct dcb_app * app,bool del)2245 static int dsa_user_dcbnl_ieee_global_dscp_setdel(struct net_device *dev,
2246 struct dcb_app *app, bool del)
2247 {
2248 int (*setdel)(struct net_device *dev, struct dcb_app *app);
2249 struct dsa_port *dp = dsa_user_to_port(dev);
2250 struct dsa_switch *ds = dp->ds;
2251 struct dsa_port *other_dp;
2252 int err, restore_err;
2253
2254 if (del)
2255 setdel = dcb_ieee_delapp;
2256 else
2257 setdel = dcb_ieee_setapp;
2258
2259 dsa_switch_for_each_user_port(other_dp, ds) {
2260 struct net_device *user = other_dp->user;
2261
2262 if (!user || user == dev)
2263 continue;
2264
2265 err = setdel(user, app);
2266 if (err)
2267 goto err_try_to_restore;
2268 }
2269
2270 return 0;
2271
2272 err_try_to_restore:
2273
2274 /* Revert logic to restore previous state of app entries */
2275 if (!del)
2276 setdel = dcb_ieee_delapp;
2277 else
2278 setdel = dcb_ieee_setapp;
2279
2280 dsa_switch_for_each_user_port_continue_reverse(other_dp, ds) {
2281 struct net_device *user = other_dp->user;
2282
2283 if (!user || user == dev)
2284 continue;
2285
2286 restore_err = setdel(user, app);
2287 if (restore_err)
2288 netdev_err(user, "Failed to restore DSCP prio entry configuration\n");
2289 }
2290
2291 return err;
2292 }
2293
2294 static int __maybe_unused
dsa_user_dcbnl_add_dscp_prio(struct net_device * dev,struct dcb_app * app)2295 dsa_user_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
2296 {
2297 struct dsa_port *dp = dsa_user_to_port(dev);
2298 struct dsa_switch *ds = dp->ds;
2299 unsigned long mask, new_prio;
2300 int err, port = dp->index;
2301 u8 dscp = app->protocol;
2302
2303 if (!ds->ops->port_add_dscp_prio)
2304 return -EOPNOTSUPP;
2305
2306 if (dscp >= 64) {
2307 netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
2308 dscp);
2309 return -EINVAL;
2310 }
2311
2312 err = dcb_ieee_setapp(dev, app);
2313 if (err)
2314 return err;
2315
2316 mask = dcb_ieee_getapp_mask(dev, app);
2317 new_prio = __fls(mask);
2318
2319 err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio);
2320 if (err) {
2321 dcb_ieee_delapp(dev, app);
2322 return err;
2323 }
2324
2325 if (!ds->dscp_prio_mapping_is_global)
2326 return 0;
2327
2328 err = dsa_user_dcbnl_ieee_global_dscp_setdel(dev, app, false);
2329 if (err) {
2330 if (ds->ops->port_del_dscp_prio)
2331 ds->ops->port_del_dscp_prio(ds, port, dscp, new_prio);
2332 dcb_ieee_delapp(dev, app);
2333 return err;
2334 }
2335
2336 return 0;
2337 }
2338
dsa_user_dcbnl_ieee_setapp(struct net_device * dev,struct dcb_app * app)2339 static int __maybe_unused dsa_user_dcbnl_ieee_setapp(struct net_device *dev,
2340 struct dcb_app *app)
2341 {
2342 switch (app->selector) {
2343 case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2344 switch (app->protocol) {
2345 case 0:
2346 return dsa_user_dcbnl_set_default_prio(dev, app);
2347 default:
2348 return -EOPNOTSUPP;
2349 }
2350 break;
2351 case IEEE_8021QAZ_APP_SEL_DSCP:
2352 return dsa_user_dcbnl_add_dscp_prio(dev, app);
2353 default:
2354 return -EOPNOTSUPP;
2355 }
2356 }
2357
2358 static int __maybe_unused
dsa_user_dcbnl_del_default_prio(struct net_device * dev,struct dcb_app * app)2359 dsa_user_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
2360 {
2361 struct dsa_port *dp = dsa_user_to_port(dev);
2362 struct dsa_switch *ds = dp->ds;
2363 unsigned long mask, new_prio;
2364 int err, port = dp->index;
2365
2366 if (!ds->ops->port_set_default_prio)
2367 return -EOPNOTSUPP;
2368
2369 err = dcb_ieee_delapp(dev, app);
2370 if (err)
2371 return err;
2372
2373 mask = dcb_ieee_getapp_mask(dev, app);
2374 new_prio = mask ? __fls(mask) : 0;
2375
2376 err = ds->ops->port_set_default_prio(ds, port, new_prio);
2377 if (err) {
2378 dcb_ieee_setapp(dev, app);
2379 return err;
2380 }
2381
2382 return 0;
2383 }
2384
2385 static int __maybe_unused
dsa_user_dcbnl_del_dscp_prio(struct net_device * dev,struct dcb_app * app)2386 dsa_user_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
2387 {
2388 struct dsa_port *dp = dsa_user_to_port(dev);
2389 struct dsa_switch *ds = dp->ds;
2390 int err, port = dp->index;
2391 u8 dscp = app->protocol;
2392
2393 if (!ds->ops->port_del_dscp_prio)
2394 return -EOPNOTSUPP;
2395
2396 err = dcb_ieee_delapp(dev, app);
2397 if (err)
2398 return err;
2399
2400 err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority);
2401 if (err) {
2402 dcb_ieee_setapp(dev, app);
2403 return err;
2404 }
2405
2406 if (!ds->dscp_prio_mapping_is_global)
2407 return 0;
2408
2409 err = dsa_user_dcbnl_ieee_global_dscp_setdel(dev, app, true);
2410 if (err) {
2411 if (ds->ops->port_add_dscp_prio)
2412 ds->ops->port_add_dscp_prio(ds, port, dscp,
2413 app->priority);
2414 dcb_ieee_setapp(dev, app);
2415 return err;
2416 }
2417
2418 return 0;
2419 }
2420
dsa_user_dcbnl_ieee_delapp(struct net_device * dev,struct dcb_app * app)2421 static int __maybe_unused dsa_user_dcbnl_ieee_delapp(struct net_device *dev,
2422 struct dcb_app *app)
2423 {
2424 switch (app->selector) {
2425 case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2426 switch (app->protocol) {
2427 case 0:
2428 return dsa_user_dcbnl_del_default_prio(dev, app);
2429 default:
2430 return -EOPNOTSUPP;
2431 }
2432 break;
2433 case IEEE_8021QAZ_APP_SEL_DSCP:
2434 return dsa_user_dcbnl_del_dscp_prio(dev, app);
2435 default:
2436 return -EOPNOTSUPP;
2437 }
2438 }
2439
2440 /* Pre-populate the DCB application priority table with the priorities
2441 * configured during switch setup, which we read from hardware here.
2442 */
dsa_user_dcbnl_init(struct net_device * dev)2443 static int dsa_user_dcbnl_init(struct net_device *dev)
2444 {
2445 struct dsa_port *dp = dsa_user_to_port(dev);
2446 struct dsa_switch *ds = dp->ds;
2447 int port = dp->index;
2448 int err;
2449
2450 if (ds->ops->port_get_default_prio) {
2451 int prio = ds->ops->port_get_default_prio(ds, port);
2452 struct dcb_app app = {
2453 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
2454 .protocol = 0,
2455 .priority = prio,
2456 };
2457
2458 if (prio < 0)
2459 return prio;
2460
2461 err = dcb_ieee_setapp(dev, &app);
2462 if (err)
2463 return err;
2464 }
2465
2466 if (ds->ops->port_get_dscp_prio) {
2467 int protocol;
2468
2469 for (protocol = 0; protocol < 64; protocol++) {
2470 struct dcb_app app = {
2471 .selector = IEEE_8021QAZ_APP_SEL_DSCP,
2472 .protocol = protocol,
2473 };
2474 int prio;
2475
2476 prio = ds->ops->port_get_dscp_prio(ds, port, protocol);
2477 if (prio == -EOPNOTSUPP)
2478 continue;
2479 if (prio < 0)
2480 return prio;
2481
2482 app.priority = prio;
2483
2484 err = dcb_ieee_setapp(dev, &app);
2485 if (err)
2486 return err;
2487 }
2488 }
2489
2490 return 0;
2491 }
2492
2493 static const struct ethtool_ops dsa_user_ethtool_ops = {
2494 .get_drvinfo = dsa_user_get_drvinfo,
2495 .get_regs_len = dsa_user_get_regs_len,
2496 .get_regs = dsa_user_get_regs,
2497 .nway_reset = dsa_user_nway_reset,
2498 .get_link = ethtool_op_get_link,
2499 .get_eeprom_len = dsa_user_get_eeprom_len,
2500 .get_eeprom = dsa_user_get_eeprom,
2501 .set_eeprom = dsa_user_set_eeprom,
2502 .get_strings = dsa_user_get_strings,
2503 .get_ethtool_stats = dsa_user_get_ethtool_stats,
2504 .get_sset_count = dsa_user_get_sset_count,
2505 .get_eth_phy_stats = dsa_user_get_eth_phy_stats,
2506 .get_eth_mac_stats = dsa_user_get_eth_mac_stats,
2507 .get_eth_ctrl_stats = dsa_user_get_eth_ctrl_stats,
2508 .get_rmon_stats = dsa_user_get_rmon_stats,
2509 .get_ts_stats = dsa_user_get_ts_stats,
2510 .set_wol = dsa_user_set_wol,
2511 .get_wol = dsa_user_get_wol,
2512 .set_eee = dsa_user_set_eee,
2513 .get_eee = dsa_user_get_eee,
2514 .get_link_ksettings = dsa_user_get_link_ksettings,
2515 .set_link_ksettings = dsa_user_set_link_ksettings,
2516 .get_pause_stats = dsa_user_get_pause_stats,
2517 .get_pauseparam = dsa_user_get_pauseparam,
2518 .set_pauseparam = dsa_user_set_pauseparam,
2519 .get_rxnfc = dsa_user_get_rxnfc,
2520 .set_rxnfc = dsa_user_set_rxnfc,
2521 .get_ts_info = dsa_user_get_ts_info,
2522 .self_test = dsa_user_net_selftest,
2523 .get_mm = dsa_user_get_mm,
2524 .set_mm = dsa_user_set_mm,
2525 .get_mm_stats = dsa_user_get_mm_stats,
2526 };
2527
2528 static const struct dcbnl_rtnl_ops __maybe_unused dsa_user_dcbnl_ops = {
2529 .ieee_setapp = dsa_user_dcbnl_ieee_setapp,
2530 .ieee_delapp = dsa_user_dcbnl_ieee_delapp,
2531 .dcbnl_setapptrust = dsa_user_dcbnl_set_apptrust,
2532 .dcbnl_getapptrust = dsa_user_dcbnl_get_apptrust,
2533 };
2534
dsa_user_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * s)2535 static void dsa_user_get_stats64(struct net_device *dev,
2536 struct rtnl_link_stats64 *s)
2537 {
2538 struct dsa_port *dp = dsa_user_to_port(dev);
2539 struct dsa_switch *ds = dp->ds;
2540
2541 if (ds->ops->get_stats64)
2542 ds->ops->get_stats64(ds, dp->index, s);
2543 else
2544 dev_get_tstats64(dev, s);
2545 }
2546
dsa_user_fill_forward_path(struct net_device_path_ctx * ctx,struct net_device_path * path)2547 static int dsa_user_fill_forward_path(struct net_device_path_ctx *ctx,
2548 struct net_device_path *path)
2549 {
2550 struct dsa_port *dp = dsa_user_to_port(ctx->dev);
2551 struct net_device *conduit = dsa_port_to_conduit(dp);
2552 struct dsa_port *cpu_dp = dp->cpu_dp;
2553
2554 path->dev = ctx->dev;
2555 path->type = DEV_PATH_DSA;
2556 path->dsa.proto = cpu_dp->tag_ops->proto;
2557 path->dsa.port = dp->index;
2558 ctx->dev = conduit;
2559
2560 return 0;
2561 }
2562
dsa_user_hwtstamp_get(struct net_device * dev,struct kernel_hwtstamp_config * cfg)2563 static int dsa_user_hwtstamp_get(struct net_device *dev,
2564 struct kernel_hwtstamp_config *cfg)
2565 {
2566 struct dsa_port *dp = dsa_user_to_port(dev);
2567 struct dsa_switch *ds = dp->ds;
2568
2569 if (!ds->ops->port_hwtstamp_get)
2570 return -EOPNOTSUPP;
2571
2572 return ds->ops->port_hwtstamp_get(ds, dp->index, cfg);
2573 }
2574
dsa_user_hwtstamp_set(struct net_device * dev,struct kernel_hwtstamp_config * cfg,struct netlink_ext_ack * extack)2575 static int dsa_user_hwtstamp_set(struct net_device *dev,
2576 struct kernel_hwtstamp_config *cfg,
2577 struct netlink_ext_ack *extack)
2578 {
2579 struct dsa_port *dp = dsa_user_to_port(dev);
2580 struct dsa_switch *ds = dp->ds;
2581
2582 if (!ds->ops->port_hwtstamp_set)
2583 return -EOPNOTSUPP;
2584
2585 return ds->ops->port_hwtstamp_set(ds, dp->index, cfg, extack);
2586 }
2587
2588 static const struct net_device_ops dsa_user_netdev_ops = {
2589 .ndo_open = dsa_user_open,
2590 .ndo_stop = dsa_user_close,
2591 .ndo_start_xmit = dsa_user_xmit,
2592 .ndo_change_rx_flags = dsa_user_change_rx_flags,
2593 .ndo_set_rx_mode = dsa_user_set_rx_mode,
2594 .ndo_set_mac_address = dsa_user_set_mac_address,
2595 .ndo_fdb_dump = dsa_user_fdb_dump,
2596 .ndo_eth_ioctl = dsa_user_ioctl,
2597 .ndo_get_iflink = dsa_user_get_iflink,
2598 #ifdef CONFIG_NET_POLL_CONTROLLER
2599 .ndo_netpoll_setup = dsa_user_netpoll_setup,
2600 .ndo_netpoll_cleanup = dsa_user_netpoll_cleanup,
2601 .ndo_poll_controller = dsa_user_poll_controller,
2602 #endif
2603 .ndo_setup_tc = dsa_user_setup_tc,
2604 .ndo_get_stats64 = dsa_user_get_stats64,
2605 .ndo_vlan_rx_add_vid = dsa_user_vlan_rx_add_vid,
2606 .ndo_vlan_rx_kill_vid = dsa_user_vlan_rx_kill_vid,
2607 .ndo_change_mtu = dsa_user_change_mtu,
2608 .ndo_fill_forward_path = dsa_user_fill_forward_path,
2609 .ndo_hwtstamp_get = dsa_user_hwtstamp_get,
2610 .ndo_hwtstamp_set = dsa_user_hwtstamp_set,
2611 };
2612
2613 static const struct device_type dsa_type = {
2614 .name = "dsa",
2615 };
2616
dsa_port_phylink_mac_change(struct dsa_switch * ds,int port,bool up)2617 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
2618 {
2619 const struct dsa_port *dp = dsa_to_port(ds, port);
2620
2621 if (dp->pl)
2622 phylink_mac_change(dp->pl, up);
2623 }
2624 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
2625
dsa_user_phylink_fixed_state(struct phylink_config * config,struct phylink_link_state * state)2626 static void dsa_user_phylink_fixed_state(struct phylink_config *config,
2627 struct phylink_link_state *state)
2628 {
2629 struct dsa_port *dp = dsa_phylink_to_port(config);
2630 struct dsa_switch *ds = dp->ds;
2631
2632 /* No need to check that this operation is valid, the callback would
2633 * not be called if it was not.
2634 */
2635 ds->ops->phylink_fixed_state(ds, dp->index, state);
2636 }
2637
2638 /* user device setup *******************************************************/
dsa_user_phy_connect(struct net_device * user_dev,int addr,u32 flags)2639 static int dsa_user_phy_connect(struct net_device *user_dev, int addr,
2640 u32 flags)
2641 {
2642 struct dsa_port *dp = dsa_user_to_port(user_dev);
2643 struct dsa_switch *ds = dp->ds;
2644
2645 user_dev->phydev = mdiobus_get_phy(ds->user_mii_bus, addr);
2646 if (!user_dev->phydev) {
2647 netdev_err(user_dev, "no phy at %d\n", addr);
2648 return -ENODEV;
2649 }
2650
2651 user_dev->phydev->dev_flags |= flags;
2652
2653 return phylink_connect_phy(dp->pl, user_dev->phydev);
2654 }
2655
dsa_user_phy_setup(struct net_device * user_dev)2656 static int dsa_user_phy_setup(struct net_device *user_dev)
2657 {
2658 struct dsa_port *dp = dsa_user_to_port(user_dev);
2659 struct device_node *port_dn = dp->dn;
2660 struct dsa_switch *ds = dp->ds;
2661 u32 phy_flags = 0;
2662 int ret;
2663
2664 dp->pl_config.dev = &user_dev->dev;
2665 dp->pl_config.type = PHYLINK_NETDEV;
2666
2667 /* The get_fixed_state callback takes precedence over polling the
2668 * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set
2669 * this if the switch provides such a callback.
2670 */
2671 if (ds->ops->phylink_fixed_state) {
2672 dp->pl_config.get_fixed_state = dsa_user_phylink_fixed_state;
2673 dp->pl_config.poll_fixed_state = true;
2674 }
2675
2676 ret = dsa_port_phylink_create(dp);
2677 if (ret)
2678 return ret;
2679
2680 if (ds->ops->get_phy_flags)
2681 phy_flags = ds->ops->get_phy_flags(ds, dp->index);
2682
2683 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
2684 if (ret == -ENODEV && ds->user_mii_bus) {
2685 /* We could not connect to a designated PHY or SFP, so try to
2686 * use the switch internal MDIO bus instead
2687 */
2688 ret = dsa_user_phy_connect(user_dev, dp->index, phy_flags);
2689 }
2690 if (ret) {
2691 netdev_err(user_dev, "failed to connect to PHY: %pe\n",
2692 ERR_PTR(ret));
2693 dsa_port_phylink_destroy(dp);
2694 }
2695
2696 return ret;
2697 }
2698
dsa_user_setup_tagger(struct net_device * user)2699 void dsa_user_setup_tagger(struct net_device *user)
2700 {
2701 struct dsa_port *dp = dsa_user_to_port(user);
2702 struct net_device *conduit = dsa_port_to_conduit(dp);
2703 struct dsa_user_priv *p = netdev_priv(user);
2704 const struct dsa_port *cpu_dp = dp->cpu_dp;
2705 const struct dsa_switch *ds = dp->ds;
2706
2707 user->needed_headroom = cpu_dp->tag_ops->needed_headroom;
2708 user->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
2709 /* Try to save one extra realloc later in the TX path (in the conduit)
2710 * by also inheriting the conduit's needed headroom and tailroom.
2711 * The 8021q driver also does this.
2712 */
2713 user->needed_headroom += conduit->needed_headroom;
2714 user->needed_tailroom += conduit->needed_tailroom;
2715
2716 p->xmit = cpu_dp->tag_ops->xmit;
2717
2718 user->features = conduit->vlan_features | NETIF_F_HW_TC;
2719 user->hw_features |= NETIF_F_HW_TC;
2720 if (user->needed_tailroom)
2721 user->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
2722 if (ds->needs_standalone_vlan_filtering)
2723 user->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2724
2725 user->lltx = true;
2726 }
2727
dsa_user_suspend(struct net_device * user_dev)2728 int dsa_user_suspend(struct net_device *user_dev)
2729 {
2730 struct dsa_port *dp = dsa_user_to_port(user_dev);
2731
2732 if (!netif_running(user_dev))
2733 return 0;
2734
2735 netif_device_detach(user_dev);
2736
2737 rtnl_lock();
2738 phylink_stop(dp->pl);
2739 rtnl_unlock();
2740
2741 return 0;
2742 }
2743
dsa_user_resume(struct net_device * user_dev)2744 int dsa_user_resume(struct net_device *user_dev)
2745 {
2746 struct dsa_port *dp = dsa_user_to_port(user_dev);
2747
2748 if (!netif_running(user_dev))
2749 return 0;
2750
2751 netif_device_attach(user_dev);
2752
2753 rtnl_lock();
2754 phylink_start(dp->pl);
2755 rtnl_unlock();
2756
2757 return 0;
2758 }
2759
dsa_user_create(struct dsa_port * port)2760 int dsa_user_create(struct dsa_port *port)
2761 {
2762 struct net_device *conduit = dsa_port_to_conduit(port);
2763 struct dsa_switch *ds = port->ds;
2764 struct net_device *user_dev;
2765 struct dsa_user_priv *p;
2766 const char *name;
2767 int assign_type;
2768 int ret;
2769
2770 if (!ds->num_tx_queues)
2771 ds->num_tx_queues = 1;
2772
2773 if (port->name) {
2774 name = port->name;
2775 assign_type = NET_NAME_PREDICTABLE;
2776 } else {
2777 name = "eth%d";
2778 assign_type = NET_NAME_ENUM;
2779 }
2780
2781 user_dev = alloc_netdev_mqs(sizeof(struct dsa_user_priv), name,
2782 assign_type, ether_setup,
2783 ds->num_tx_queues, 1);
2784 if (user_dev == NULL)
2785 return -ENOMEM;
2786
2787 user_dev->rtnl_link_ops = &dsa_link_ops;
2788 user_dev->ethtool_ops = &dsa_user_ethtool_ops;
2789 #if IS_ENABLED(CONFIG_DCB)
2790 user_dev->dcbnl_ops = &dsa_user_dcbnl_ops;
2791 #endif
2792 if (!is_zero_ether_addr(port->mac))
2793 eth_hw_addr_set(user_dev, port->mac);
2794 else
2795 eth_hw_addr_inherit(user_dev, conduit);
2796 user_dev->priv_flags |= IFF_NO_QUEUE;
2797 if (dsa_switch_supports_uc_filtering(ds))
2798 user_dev->priv_flags |= IFF_UNICAST_FLT;
2799 user_dev->netdev_ops = &dsa_user_netdev_ops;
2800 if (ds->ops->port_max_mtu)
2801 user_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
2802 SET_NETDEV_DEVTYPE(user_dev, &dsa_type);
2803
2804 SET_NETDEV_DEV(user_dev, port->ds->dev);
2805 SET_NETDEV_DEVLINK_PORT(user_dev, &port->devlink_port);
2806 user_dev->dev.of_node = port->dn;
2807 user_dev->vlan_features = conduit->vlan_features;
2808
2809 p = netdev_priv(user_dev);
2810 user_dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
2811
2812 ret = gro_cells_init(&p->gcells, user_dev);
2813 if (ret)
2814 goto out_free;
2815
2816 p->dp = port;
2817 INIT_LIST_HEAD(&p->mall_tc_list);
2818 port->user = user_dev;
2819 dsa_user_setup_tagger(user_dev);
2820
2821 netif_carrier_off(user_dev);
2822
2823 ret = dsa_user_phy_setup(user_dev);
2824 if (ret) {
2825 netdev_err(user_dev,
2826 "error %d setting up PHY for tree %d, switch %d, port %d\n",
2827 ret, ds->dst->index, ds->index, port->index);
2828 goto out_gcells;
2829 }
2830
2831 rtnl_lock();
2832
2833 ret = dsa_user_change_mtu(user_dev, ETH_DATA_LEN);
2834 if (ret && ret != -EOPNOTSUPP)
2835 dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
2836 ret, ETH_DATA_LEN, port->index);
2837
2838 ret = register_netdevice(user_dev);
2839 if (ret) {
2840 netdev_err(conduit, "error %d registering interface %s\n",
2841 ret, user_dev->name);
2842 rtnl_unlock();
2843 goto out_phy;
2844 }
2845
2846 if (IS_ENABLED(CONFIG_DCB)) {
2847 ret = dsa_user_dcbnl_init(user_dev);
2848 if (ret) {
2849 netdev_err(user_dev,
2850 "failed to initialize DCB: %pe\n",
2851 ERR_PTR(ret));
2852 rtnl_unlock();
2853 goto out_unregister;
2854 }
2855 }
2856
2857 ret = netdev_upper_dev_link(conduit, user_dev, NULL);
2858
2859 rtnl_unlock();
2860
2861 if (ret)
2862 goto out_unregister;
2863
2864 return 0;
2865
2866 out_unregister:
2867 unregister_netdev(user_dev);
2868 out_phy:
2869 rtnl_lock();
2870 phylink_disconnect_phy(p->dp->pl);
2871 rtnl_unlock();
2872 dsa_port_phylink_destroy(p->dp);
2873 out_gcells:
2874 gro_cells_destroy(&p->gcells);
2875 out_free:
2876 free_netdev(user_dev);
2877 port->user = NULL;
2878 return ret;
2879 }
2880
dsa_user_destroy(struct net_device * user_dev)2881 void dsa_user_destroy(struct net_device *user_dev)
2882 {
2883 struct net_device *conduit = dsa_user_to_conduit(user_dev);
2884 struct dsa_port *dp = dsa_user_to_port(user_dev);
2885 struct dsa_user_priv *p = netdev_priv(user_dev);
2886
2887 netif_carrier_off(user_dev);
2888 rtnl_lock();
2889 netdev_upper_dev_unlink(conduit, user_dev);
2890 unregister_netdevice(user_dev);
2891 phylink_disconnect_phy(dp->pl);
2892 rtnl_unlock();
2893
2894 dsa_port_phylink_destroy(dp);
2895 gro_cells_destroy(&p->gcells);
2896 free_netdev(user_dev);
2897 }
2898
dsa_user_change_conduit(struct net_device * dev,struct net_device * conduit,struct netlink_ext_ack * extack)2899 int dsa_user_change_conduit(struct net_device *dev, struct net_device *conduit,
2900 struct netlink_ext_ack *extack)
2901 {
2902 struct net_device *old_conduit = dsa_user_to_conduit(dev);
2903 struct dsa_port *dp = dsa_user_to_port(dev);
2904 struct dsa_switch *ds = dp->ds;
2905 struct net_device *upper;
2906 struct list_head *iter;
2907 int err;
2908
2909 if (conduit == old_conduit)
2910 return 0;
2911
2912 if (!ds->ops->port_change_conduit) {
2913 NL_SET_ERR_MSG_MOD(extack,
2914 "Driver does not support changing DSA conduit");
2915 return -EOPNOTSUPP;
2916 }
2917
2918 if (!netdev_uses_dsa(conduit)) {
2919 NL_SET_ERR_MSG_MOD(extack,
2920 "Interface not eligible as DSA conduit");
2921 return -EOPNOTSUPP;
2922 }
2923
2924 netdev_for_each_upper_dev_rcu(conduit, upper, iter) {
2925 if (dsa_user_dev_check(upper))
2926 continue;
2927 if (netif_is_bridge_master(upper))
2928 continue;
2929 NL_SET_ERR_MSG_MOD(extack, "Cannot join conduit with unknown uppers");
2930 return -EOPNOTSUPP;
2931 }
2932
2933 /* Since we allow live-changing the DSA conduit, plus we auto-open the
2934 * DSA conduit when the user port opens => we need to ensure that the
2935 * new DSA conduit is open too.
2936 */
2937 if (dev->flags & IFF_UP) {
2938 err = dev_open(conduit, extack);
2939 if (err)
2940 return err;
2941 }
2942
2943 netdev_upper_dev_unlink(old_conduit, dev);
2944
2945 err = netdev_upper_dev_link(conduit, dev, extack);
2946 if (err)
2947 goto out_revert_old_conduit_unlink;
2948
2949 err = dsa_port_change_conduit(dp, conduit, extack);
2950 if (err)
2951 goto out_revert_conduit_link;
2952
2953 /* Update the MTU of the new CPU port through cross-chip notifiers */
2954 err = dsa_user_change_mtu(dev, dev->mtu);
2955 if (err && err != -EOPNOTSUPP) {
2956 netdev_warn(dev,
2957 "nonfatal error updating MTU with new conduit: %pe\n",
2958 ERR_PTR(err));
2959 }
2960
2961 return 0;
2962
2963 out_revert_conduit_link:
2964 netdev_upper_dev_unlink(conduit, dev);
2965 out_revert_old_conduit_unlink:
2966 netdev_upper_dev_link(old_conduit, dev, NULL);
2967 return err;
2968 }
2969
dsa_user_dev_check(const struct net_device * dev)2970 bool dsa_user_dev_check(const struct net_device *dev)
2971 {
2972 return dev->netdev_ops == &dsa_user_netdev_ops;
2973 }
2974 EXPORT_SYMBOL_GPL(dsa_user_dev_check);
2975
dsa_user_changeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)2976 static int dsa_user_changeupper(struct net_device *dev,
2977 struct netdev_notifier_changeupper_info *info)
2978 {
2979 struct netlink_ext_ack *extack;
2980 int err = NOTIFY_DONE;
2981 struct dsa_port *dp;
2982
2983 if (!dsa_user_dev_check(dev))
2984 return err;
2985
2986 dp = dsa_user_to_port(dev);
2987 extack = netdev_notifier_info_to_extack(&info->info);
2988
2989 if (netif_is_bridge_master(info->upper_dev)) {
2990 if (info->linking) {
2991 err = dsa_port_bridge_join(dp, info->upper_dev, extack);
2992 if (!err)
2993 dsa_bridge_mtu_normalization(dp);
2994 if (err == -EOPNOTSUPP) {
2995 NL_SET_ERR_MSG_WEAK_MOD(extack,
2996 "Offloading not supported");
2997 err = 0;
2998 }
2999 err = notifier_from_errno(err);
3000 } else {
3001 dsa_port_bridge_leave(dp, info->upper_dev);
3002 err = NOTIFY_OK;
3003 }
3004 } else if (netif_is_lag_master(info->upper_dev)) {
3005 if (info->linking) {
3006 err = dsa_port_lag_join(dp, info->upper_dev,
3007 info->upper_info, extack);
3008 if (err == -EOPNOTSUPP) {
3009 NL_SET_ERR_MSG_WEAK_MOD(extack,
3010 "Offloading not supported");
3011 err = 0;
3012 }
3013 err = notifier_from_errno(err);
3014 } else {
3015 dsa_port_lag_leave(dp, info->upper_dev);
3016 err = NOTIFY_OK;
3017 }
3018 } else if (is_hsr_master(info->upper_dev)) {
3019 if (info->linking) {
3020 err = dsa_port_hsr_join(dp, info->upper_dev, extack);
3021 if (err == -EOPNOTSUPP) {
3022 NL_SET_ERR_MSG_WEAK_MOD(extack,
3023 "Offloading not supported");
3024 err = 0;
3025 }
3026 err = notifier_from_errno(err);
3027 } else {
3028 dsa_port_hsr_leave(dp, info->upper_dev);
3029 err = NOTIFY_OK;
3030 }
3031 }
3032
3033 return err;
3034 }
3035
dsa_user_prechangeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3036 static int dsa_user_prechangeupper(struct net_device *dev,
3037 struct netdev_notifier_changeupper_info *info)
3038 {
3039 struct dsa_port *dp;
3040
3041 if (!dsa_user_dev_check(dev))
3042 return NOTIFY_DONE;
3043
3044 dp = dsa_user_to_port(dev);
3045
3046 if (netif_is_bridge_master(info->upper_dev) && !info->linking)
3047 dsa_port_pre_bridge_leave(dp, info->upper_dev);
3048 else if (netif_is_lag_master(info->upper_dev) && !info->linking)
3049 dsa_port_pre_lag_leave(dp, info->upper_dev);
3050 /* dsa_port_pre_hsr_leave is not yet necessary since hsr devices cannot
3051 * meaningfully placed under a bridge yet
3052 */
3053
3054 return NOTIFY_DONE;
3055 }
3056
3057 static int
dsa_user_lag_changeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3058 dsa_user_lag_changeupper(struct net_device *dev,
3059 struct netdev_notifier_changeupper_info *info)
3060 {
3061 struct net_device *lower;
3062 struct list_head *iter;
3063 int err = NOTIFY_DONE;
3064 struct dsa_port *dp;
3065
3066 if (!netif_is_lag_master(dev))
3067 return err;
3068
3069 netdev_for_each_lower_dev(dev, lower, iter) {
3070 if (!dsa_user_dev_check(lower))
3071 continue;
3072
3073 dp = dsa_user_to_port(lower);
3074 if (!dp->lag)
3075 /* Software LAG */
3076 continue;
3077
3078 err = dsa_user_changeupper(lower, info);
3079 if (notifier_to_errno(err))
3080 break;
3081 }
3082
3083 return err;
3084 }
3085
3086 /* Same as dsa_user_lag_changeupper() except that it calls
3087 * dsa_user_prechangeupper()
3088 */
3089 static int
dsa_user_lag_prechangeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3090 dsa_user_lag_prechangeupper(struct net_device *dev,
3091 struct netdev_notifier_changeupper_info *info)
3092 {
3093 struct net_device *lower;
3094 struct list_head *iter;
3095 int err = NOTIFY_DONE;
3096 struct dsa_port *dp;
3097
3098 if (!netif_is_lag_master(dev))
3099 return err;
3100
3101 netdev_for_each_lower_dev(dev, lower, iter) {
3102 if (!dsa_user_dev_check(lower))
3103 continue;
3104
3105 dp = dsa_user_to_port(lower);
3106 if (!dp->lag)
3107 /* Software LAG */
3108 continue;
3109
3110 err = dsa_user_prechangeupper(lower, info);
3111 if (notifier_to_errno(err))
3112 break;
3113 }
3114
3115 return err;
3116 }
3117
3118 static int
dsa_prevent_bridging_8021q_upper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3119 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
3120 struct netdev_notifier_changeupper_info *info)
3121 {
3122 struct netlink_ext_ack *ext_ack;
3123 struct net_device *user, *br;
3124 struct dsa_port *dp;
3125
3126 ext_ack = netdev_notifier_info_to_extack(&info->info);
3127
3128 if (!is_vlan_dev(dev))
3129 return NOTIFY_DONE;
3130
3131 user = vlan_dev_real_dev(dev);
3132 if (!dsa_user_dev_check(user))
3133 return NOTIFY_DONE;
3134
3135 dp = dsa_user_to_port(user);
3136 br = dsa_port_bridge_dev_get(dp);
3137 if (!br)
3138 return NOTIFY_DONE;
3139
3140 /* Deny enslaving a VLAN device into a VLAN-aware bridge */
3141 if (br_vlan_enabled(br) &&
3142 netif_is_bridge_master(info->upper_dev) && info->linking) {
3143 NL_SET_ERR_MSG_MOD(ext_ack,
3144 "Cannot make VLAN device join VLAN-aware bridge");
3145 return notifier_from_errno(-EINVAL);
3146 }
3147
3148 return NOTIFY_DONE;
3149 }
3150
3151 static int
dsa_user_check_8021q_upper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3152 dsa_user_check_8021q_upper(struct net_device *dev,
3153 struct netdev_notifier_changeupper_info *info)
3154 {
3155 struct dsa_port *dp = dsa_user_to_port(dev);
3156 struct net_device *br = dsa_port_bridge_dev_get(dp);
3157 struct bridge_vlan_info br_info;
3158 struct netlink_ext_ack *extack;
3159 int err = NOTIFY_DONE;
3160 u16 vid;
3161
3162 if (!br || !br_vlan_enabled(br))
3163 return NOTIFY_DONE;
3164
3165 extack = netdev_notifier_info_to_extack(&info->info);
3166 vid = vlan_dev_vlan_id(info->upper_dev);
3167
3168 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
3169 * device, respectively the VID is not found, returning
3170 * 0 means success, which is a failure for us here.
3171 */
3172 err = br_vlan_get_info(br, vid, &br_info);
3173 if (err == 0) {
3174 NL_SET_ERR_MSG_MOD(extack,
3175 "This VLAN is already configured by the bridge");
3176 return notifier_from_errno(-EBUSY);
3177 }
3178
3179 return NOTIFY_DONE;
3180 }
3181
3182 static int
dsa_user_prechangeupper_sanity_check(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3183 dsa_user_prechangeupper_sanity_check(struct net_device *dev,
3184 struct netdev_notifier_changeupper_info *info)
3185 {
3186 struct dsa_switch *ds;
3187 struct dsa_port *dp;
3188 int err;
3189
3190 if (!dsa_user_dev_check(dev))
3191 return dsa_prevent_bridging_8021q_upper(dev, info);
3192
3193 dp = dsa_user_to_port(dev);
3194 ds = dp->ds;
3195
3196 if (ds->ops->port_prechangeupper) {
3197 err = ds->ops->port_prechangeupper(ds, dp->index, info);
3198 if (err)
3199 return notifier_from_errno(err);
3200 }
3201
3202 if (is_vlan_dev(info->upper_dev))
3203 return dsa_user_check_8021q_upper(dev, info);
3204
3205 return NOTIFY_DONE;
3206 }
3207
3208 /* To be eligible as a DSA conduit, a LAG must have all lower interfaces be
3209 * eligible DSA conduits. Additionally, all LAG slaves must be DSA conduits of
3210 * switches in the same switch tree.
3211 */
dsa_lag_conduit_validate(struct net_device * lag_dev,struct netlink_ext_ack * extack)3212 static int dsa_lag_conduit_validate(struct net_device *lag_dev,
3213 struct netlink_ext_ack *extack)
3214 {
3215 struct net_device *lower1, *lower2;
3216 struct list_head *iter1, *iter2;
3217
3218 netdev_for_each_lower_dev(lag_dev, lower1, iter1) {
3219 netdev_for_each_lower_dev(lag_dev, lower2, iter2) {
3220 if (!netdev_uses_dsa(lower1) ||
3221 !netdev_uses_dsa(lower2)) {
3222 NL_SET_ERR_MSG_MOD(extack,
3223 "All LAG ports must be eligible as DSA conduits");
3224 return notifier_from_errno(-EINVAL);
3225 }
3226
3227 if (lower1 == lower2)
3228 continue;
3229
3230 if (!dsa_port_tree_same(lower1->dsa_ptr,
3231 lower2->dsa_ptr)) {
3232 NL_SET_ERR_MSG_MOD(extack,
3233 "LAG contains DSA conduits of disjoint switch trees");
3234 return notifier_from_errno(-EINVAL);
3235 }
3236 }
3237 }
3238
3239 return NOTIFY_DONE;
3240 }
3241
3242 static int
dsa_conduit_prechangeupper_sanity_check(struct net_device * conduit,struct netdev_notifier_changeupper_info * info)3243 dsa_conduit_prechangeupper_sanity_check(struct net_device *conduit,
3244 struct netdev_notifier_changeupper_info *info)
3245 {
3246 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
3247
3248 if (!netdev_uses_dsa(conduit))
3249 return NOTIFY_DONE;
3250
3251 if (!info->linking)
3252 return NOTIFY_DONE;
3253
3254 /* Allow DSA switch uppers */
3255 if (dsa_user_dev_check(info->upper_dev))
3256 return NOTIFY_DONE;
3257
3258 /* Allow bridge uppers of DSA conduits, subject to further
3259 * restrictions in dsa_bridge_prechangelower_sanity_check()
3260 */
3261 if (netif_is_bridge_master(info->upper_dev))
3262 return NOTIFY_DONE;
3263
3264 /* Allow LAG uppers, subject to further restrictions in
3265 * dsa_lag_conduit_prechangelower_sanity_check()
3266 */
3267 if (netif_is_lag_master(info->upper_dev))
3268 return dsa_lag_conduit_validate(info->upper_dev, extack);
3269
3270 NL_SET_ERR_MSG_MOD(extack,
3271 "DSA conduit cannot join unknown upper interfaces");
3272 return notifier_from_errno(-EBUSY);
3273 }
3274
3275 static int
dsa_lag_conduit_prechangelower_sanity_check(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3276 dsa_lag_conduit_prechangelower_sanity_check(struct net_device *dev,
3277 struct netdev_notifier_changeupper_info *info)
3278 {
3279 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
3280 struct net_device *lag_dev = info->upper_dev;
3281 struct net_device *lower;
3282 struct list_head *iter;
3283
3284 if (!netdev_uses_dsa(lag_dev) || !netif_is_lag_master(lag_dev))
3285 return NOTIFY_DONE;
3286
3287 if (!info->linking)
3288 return NOTIFY_DONE;
3289
3290 if (!netdev_uses_dsa(dev)) {
3291 NL_SET_ERR_MSG(extack,
3292 "Only DSA conduits can join a LAG DSA conduit");
3293 return notifier_from_errno(-EINVAL);
3294 }
3295
3296 netdev_for_each_lower_dev(lag_dev, lower, iter) {
3297 if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) {
3298 NL_SET_ERR_MSG(extack,
3299 "Interface is DSA conduit for a different switch tree than this LAG");
3300 return notifier_from_errno(-EINVAL);
3301 }
3302
3303 break;
3304 }
3305
3306 return NOTIFY_DONE;
3307 }
3308
3309 /* Don't allow bridging of DSA conduits, since the bridge layer rx_handler
3310 * prevents the DSA fake ethertype handler to be invoked, so we don't get the
3311 * chance to strip off and parse the DSA switch tag protocol header (the bridge
3312 * layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these
3313 * frames).
3314 * The only case where that would not be an issue is when bridging can already
3315 * be offloaded, such as when the DSA conduit is itself a DSA or plain switchdev
3316 * port, and is bridged only with other ports from the same hardware device.
3317 */
3318 static int
dsa_bridge_prechangelower_sanity_check(struct net_device * new_lower,struct netdev_notifier_changeupper_info * info)3319 dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
3320 struct netdev_notifier_changeupper_info *info)
3321 {
3322 struct net_device *br = info->upper_dev;
3323 struct netlink_ext_ack *extack;
3324 struct net_device *lower;
3325 struct list_head *iter;
3326
3327 if (!netif_is_bridge_master(br))
3328 return NOTIFY_DONE;
3329
3330 if (!info->linking)
3331 return NOTIFY_DONE;
3332
3333 extack = netdev_notifier_info_to_extack(&info->info);
3334
3335 netdev_for_each_lower_dev(br, lower, iter) {
3336 if (!netdev_uses_dsa(new_lower) && !netdev_uses_dsa(lower))
3337 continue;
3338
3339 if (!netdev_port_same_parent_id(lower, new_lower)) {
3340 NL_SET_ERR_MSG(extack,
3341 "Cannot do software bridging with a DSA conduit");
3342 return notifier_from_errno(-EINVAL);
3343 }
3344 }
3345
3346 return NOTIFY_DONE;
3347 }
3348
dsa_tree_migrate_ports_from_lag_conduit(struct dsa_switch_tree * dst,struct net_device * lag_dev)3349 static void dsa_tree_migrate_ports_from_lag_conduit(struct dsa_switch_tree *dst,
3350 struct net_device *lag_dev)
3351 {
3352 struct net_device *new_conduit = dsa_tree_find_first_conduit(dst);
3353 struct dsa_port *dp;
3354 int err;
3355
3356 dsa_tree_for_each_user_port(dp, dst) {
3357 if (dsa_port_to_conduit(dp) != lag_dev)
3358 continue;
3359
3360 err = dsa_user_change_conduit(dp->user, new_conduit, NULL);
3361 if (err) {
3362 netdev_err(dp->user,
3363 "failed to restore conduit to %s: %pe\n",
3364 new_conduit->name, ERR_PTR(err));
3365 }
3366 }
3367 }
3368
dsa_conduit_lag_join(struct net_device * conduit,struct net_device * lag_dev,struct netdev_lag_upper_info * uinfo,struct netlink_ext_ack * extack)3369 static int dsa_conduit_lag_join(struct net_device *conduit,
3370 struct net_device *lag_dev,
3371 struct netdev_lag_upper_info *uinfo,
3372 struct netlink_ext_ack *extack)
3373 {
3374 struct dsa_port *cpu_dp = conduit->dsa_ptr;
3375 struct dsa_switch_tree *dst = cpu_dp->dst;
3376 struct dsa_port *dp;
3377 int err;
3378
3379 err = dsa_conduit_lag_setup(lag_dev, cpu_dp, uinfo, extack);
3380 if (err)
3381 return err;
3382
3383 dsa_tree_for_each_user_port(dp, dst) {
3384 if (dsa_port_to_conduit(dp) != conduit)
3385 continue;
3386
3387 err = dsa_user_change_conduit(dp->user, lag_dev, extack);
3388 if (err)
3389 goto restore;
3390 }
3391
3392 return 0;
3393
3394 restore:
3395 dsa_tree_for_each_user_port_continue_reverse(dp, dst) {
3396 if (dsa_port_to_conduit(dp) != lag_dev)
3397 continue;
3398
3399 err = dsa_user_change_conduit(dp->user, conduit, NULL);
3400 if (err) {
3401 netdev_err(dp->user,
3402 "failed to restore conduit to %s: %pe\n",
3403 conduit->name, ERR_PTR(err));
3404 }
3405 }
3406
3407 dsa_conduit_lag_teardown(lag_dev, conduit->dsa_ptr);
3408
3409 return err;
3410 }
3411
dsa_conduit_lag_leave(struct net_device * conduit,struct net_device * lag_dev)3412 static void dsa_conduit_lag_leave(struct net_device *conduit,
3413 struct net_device *lag_dev)
3414 {
3415 struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr;
3416 struct dsa_switch_tree *dst = cpu_dp->dst;
3417 struct dsa_port *new_cpu_dp = NULL;
3418 struct net_device *lower;
3419 struct list_head *iter;
3420
3421 netdev_for_each_lower_dev(lag_dev, lower, iter) {
3422 if (netdev_uses_dsa(lower)) {
3423 new_cpu_dp = lower->dsa_ptr;
3424 break;
3425 }
3426 }
3427
3428 if (new_cpu_dp) {
3429 /* Update the CPU port of the user ports still under the LAG
3430 * so that dsa_port_to_conduit() continues to work properly
3431 */
3432 dsa_tree_for_each_user_port(dp, dst)
3433 if (dsa_port_to_conduit(dp) == lag_dev)
3434 dp->cpu_dp = new_cpu_dp;
3435
3436 /* Update the index of the virtual CPU port to match the lowest
3437 * physical CPU port
3438 */
3439 lag_dev->dsa_ptr = new_cpu_dp;
3440 wmb();
3441 } else {
3442 /* If the LAG DSA conduit has no ports left, migrate back all
3443 * user ports to the first physical CPU port
3444 */
3445 dsa_tree_migrate_ports_from_lag_conduit(dst, lag_dev);
3446 }
3447
3448 /* This DSA conduit has left its LAG in any case, so let
3449 * the CPU port leave the hardware LAG as well
3450 */
3451 dsa_conduit_lag_teardown(lag_dev, conduit->dsa_ptr);
3452 }
3453
dsa_conduit_changeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3454 static int dsa_conduit_changeupper(struct net_device *dev,
3455 struct netdev_notifier_changeupper_info *info)
3456 {
3457 struct netlink_ext_ack *extack;
3458 int err = NOTIFY_DONE;
3459
3460 if (!netdev_uses_dsa(dev))
3461 return err;
3462
3463 extack = netdev_notifier_info_to_extack(&info->info);
3464
3465 if (netif_is_lag_master(info->upper_dev)) {
3466 if (info->linking) {
3467 err = dsa_conduit_lag_join(dev, info->upper_dev,
3468 info->upper_info, extack);
3469 err = notifier_from_errno(err);
3470 } else {
3471 dsa_conduit_lag_leave(dev, info->upper_dev);
3472 err = NOTIFY_OK;
3473 }
3474 }
3475
3476 return err;
3477 }
3478
dsa_user_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)3479 static int dsa_user_netdevice_event(struct notifier_block *nb,
3480 unsigned long event, void *ptr)
3481 {
3482 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3483
3484 switch (event) {
3485 case NETDEV_PRECHANGEUPPER: {
3486 struct netdev_notifier_changeupper_info *info = ptr;
3487 int err;
3488
3489 err = dsa_user_prechangeupper_sanity_check(dev, info);
3490 if (notifier_to_errno(err))
3491 return err;
3492
3493 err = dsa_conduit_prechangeupper_sanity_check(dev, info);
3494 if (notifier_to_errno(err))
3495 return err;
3496
3497 err = dsa_lag_conduit_prechangelower_sanity_check(dev, info);
3498 if (notifier_to_errno(err))
3499 return err;
3500
3501 err = dsa_bridge_prechangelower_sanity_check(dev, info);
3502 if (notifier_to_errno(err))
3503 return err;
3504
3505 err = dsa_user_prechangeupper(dev, ptr);
3506 if (notifier_to_errno(err))
3507 return err;
3508
3509 err = dsa_user_lag_prechangeupper(dev, ptr);
3510 if (notifier_to_errno(err))
3511 return err;
3512
3513 break;
3514 }
3515 case NETDEV_CHANGEUPPER: {
3516 int err;
3517
3518 err = dsa_user_changeupper(dev, ptr);
3519 if (notifier_to_errno(err))
3520 return err;
3521
3522 err = dsa_user_lag_changeupper(dev, ptr);
3523 if (notifier_to_errno(err))
3524 return err;
3525
3526 err = dsa_conduit_changeupper(dev, ptr);
3527 if (notifier_to_errno(err))
3528 return err;
3529
3530 break;
3531 }
3532 case NETDEV_CHANGELOWERSTATE: {
3533 struct netdev_notifier_changelowerstate_info *info = ptr;
3534 struct dsa_port *dp;
3535 int err = 0;
3536
3537 if (dsa_user_dev_check(dev)) {
3538 dp = dsa_user_to_port(dev);
3539
3540 err = dsa_port_lag_change(dp, info->lower_state_info);
3541 }
3542
3543 /* Mirror LAG port events on DSA conduits that are in
3544 * a LAG towards their respective switch CPU ports
3545 */
3546 if (netdev_uses_dsa(dev)) {
3547 dp = dev->dsa_ptr;
3548
3549 err = dsa_port_lag_change(dp, info->lower_state_info);
3550 }
3551
3552 return notifier_from_errno(err);
3553 }
3554 case NETDEV_CHANGE:
3555 case NETDEV_UP: {
3556 /* Track state of conduit port.
3557 * DSA driver may require the conduit port (and indirectly
3558 * the tagger) to be available for some special operation.
3559 */
3560 if (netdev_uses_dsa(dev)) {
3561 struct dsa_port *cpu_dp = dev->dsa_ptr;
3562 struct dsa_switch_tree *dst = cpu_dp->ds->dst;
3563
3564 /* Track when the conduit port is UP */
3565 dsa_tree_conduit_oper_state_change(dst, dev,
3566 netif_oper_up(dev));
3567
3568 /* Track when the conduit port is ready and can accept
3569 * packet.
3570 * NETDEV_UP event is not enough to flag a port as ready.
3571 * We also have to wait for linkwatch_do_dev to dev_activate
3572 * and emit a NETDEV_CHANGE event.
3573 * We check if a conduit port is ready by checking if the dev
3574 * have a qdisc assigned and is not noop.
3575 */
3576 dsa_tree_conduit_admin_state_change(dst, dev,
3577 !qdisc_tx_is_noop(dev));
3578
3579 return NOTIFY_OK;
3580 }
3581
3582 return NOTIFY_DONE;
3583 }
3584 case NETDEV_GOING_DOWN: {
3585 struct dsa_port *dp, *cpu_dp;
3586 struct dsa_switch_tree *dst;
3587 LIST_HEAD(close_list);
3588
3589 if (!netdev_uses_dsa(dev))
3590 return NOTIFY_DONE;
3591
3592 cpu_dp = dev->dsa_ptr;
3593 dst = cpu_dp->ds->dst;
3594
3595 dsa_tree_conduit_admin_state_change(dst, dev, false);
3596
3597 list_for_each_entry(dp, &dst->ports, list) {
3598 if (!dsa_port_is_user(dp))
3599 continue;
3600
3601 if (dp->cpu_dp != cpu_dp)
3602 continue;
3603
3604 list_add(&dp->user->close_list, &close_list);
3605 }
3606
3607 netif_close_many(&close_list, true);
3608
3609 return NOTIFY_OK;
3610 }
3611 default:
3612 break;
3613 }
3614
3615 return NOTIFY_DONE;
3616 }
3617
3618 static void
dsa_fdb_offload_notify(struct dsa_switchdev_event_work * switchdev_work)3619 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
3620 {
3621 struct switchdev_notifier_fdb_info info = {};
3622
3623 info.addr = switchdev_work->addr;
3624 info.vid = switchdev_work->vid;
3625 info.offloaded = true;
3626 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
3627 switchdev_work->orig_dev, &info.info, NULL);
3628 }
3629
dsa_user_switchdev_event_work(struct work_struct * work)3630 static void dsa_user_switchdev_event_work(struct work_struct *work)
3631 {
3632 struct dsa_switchdev_event_work *switchdev_work =
3633 container_of(work, struct dsa_switchdev_event_work, work);
3634 const unsigned char *addr = switchdev_work->addr;
3635 struct net_device *dev = switchdev_work->dev;
3636 u16 vid = switchdev_work->vid;
3637 struct dsa_switch *ds;
3638 struct dsa_port *dp;
3639 int err;
3640
3641 dp = dsa_user_to_port(dev);
3642 ds = dp->ds;
3643
3644 switch (switchdev_work->event) {
3645 case SWITCHDEV_FDB_ADD_TO_DEVICE:
3646 if (switchdev_work->host_addr)
3647 err = dsa_port_bridge_host_fdb_add(dp, addr, vid);
3648 else if (dp->lag)
3649 err = dsa_port_lag_fdb_add(dp, addr, vid);
3650 else
3651 err = dsa_port_fdb_add(dp, addr, vid);
3652 if (err) {
3653 dev_err(ds->dev,
3654 "port %d failed to add %pM vid %d to fdb: %d\n",
3655 dp->index, addr, vid, err);
3656 break;
3657 }
3658 dsa_fdb_offload_notify(switchdev_work);
3659 break;
3660
3661 case SWITCHDEV_FDB_DEL_TO_DEVICE:
3662 if (switchdev_work->host_addr)
3663 err = dsa_port_bridge_host_fdb_del(dp, addr, vid);
3664 else if (dp->lag)
3665 err = dsa_port_lag_fdb_del(dp, addr, vid);
3666 else
3667 err = dsa_port_fdb_del(dp, addr, vid);
3668 if (err) {
3669 dev_err(ds->dev,
3670 "port %d failed to delete %pM vid %d from fdb: %d\n",
3671 dp->index, addr, vid, err);
3672 }
3673
3674 break;
3675 }
3676
3677 kfree(switchdev_work);
3678 }
3679
dsa_foreign_dev_check(const struct net_device * dev,const struct net_device * foreign_dev)3680 static bool dsa_foreign_dev_check(const struct net_device *dev,
3681 const struct net_device *foreign_dev)
3682 {
3683 const struct dsa_port *dp = dsa_user_to_port(dev);
3684 struct dsa_switch_tree *dst = dp->ds->dst;
3685
3686 if (netif_is_bridge_master(foreign_dev))
3687 return !dsa_tree_offloads_bridge_dev(dst, foreign_dev);
3688
3689 if (netif_is_bridge_port(foreign_dev))
3690 return !dsa_tree_offloads_bridge_port(dst, foreign_dev);
3691
3692 /* Everything else is foreign */
3693 return true;
3694 }
3695
dsa_user_fdb_event(struct net_device * dev,struct net_device * orig_dev,unsigned long event,const void * ctx,const struct switchdev_notifier_fdb_info * fdb_info)3696 static int dsa_user_fdb_event(struct net_device *dev,
3697 struct net_device *orig_dev,
3698 unsigned long event, const void *ctx,
3699 const struct switchdev_notifier_fdb_info *fdb_info)
3700 {
3701 struct dsa_switchdev_event_work *switchdev_work;
3702 struct dsa_port *dp = dsa_user_to_port(dev);
3703 bool host_addr = fdb_info->is_local;
3704 struct dsa_switch *ds = dp->ds;
3705
3706 if (ctx && ctx != dp)
3707 return 0;
3708
3709 if (!dp->bridge)
3710 return 0;
3711
3712 if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
3713 if (dsa_port_offloads_bridge_port(dp, orig_dev))
3714 return 0;
3715
3716 /* FDB entries learned by the software bridge or by foreign
3717 * bridge ports should be installed as host addresses only if
3718 * the driver requests assisted learning.
3719 */
3720 if (!ds->assisted_learning_on_cpu_port)
3721 return 0;
3722 }
3723
3724 /* Also treat FDB entries on foreign interfaces bridged with us as host
3725 * addresses.
3726 */
3727 if (dsa_foreign_dev_check(dev, orig_dev))
3728 host_addr = true;
3729
3730 /* Check early that we're not doing work in vain.
3731 * Host addresses on LAG ports still require regular FDB ops,
3732 * since the CPU port isn't in a LAG.
3733 */
3734 if (dp->lag && !host_addr) {
3735 if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del)
3736 return -EOPNOTSUPP;
3737 } else {
3738 if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
3739 return -EOPNOTSUPP;
3740 }
3741
3742 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3743 if (!switchdev_work)
3744 return -ENOMEM;
3745
3746 netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n",
3747 event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting",
3748 orig_dev->name, fdb_info->addr, fdb_info->vid,
3749 host_addr ? " as host address" : "");
3750
3751 INIT_WORK(&switchdev_work->work, dsa_user_switchdev_event_work);
3752 switchdev_work->event = event;
3753 switchdev_work->dev = dev;
3754 switchdev_work->orig_dev = orig_dev;
3755
3756 ether_addr_copy(switchdev_work->addr, fdb_info->addr);
3757 switchdev_work->vid = fdb_info->vid;
3758 switchdev_work->host_addr = host_addr;
3759
3760 dsa_schedule_work(&switchdev_work->work);
3761
3762 return 0;
3763 }
3764
3765 /* Called under rcu_read_lock() */
dsa_user_switchdev_event(struct notifier_block * unused,unsigned long event,void * ptr)3766 static int dsa_user_switchdev_event(struct notifier_block *unused,
3767 unsigned long event, void *ptr)
3768 {
3769 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3770 int err;
3771
3772 switch (event) {
3773 case SWITCHDEV_PORT_ATTR_SET:
3774 err = switchdev_handle_port_attr_set(dev, ptr,
3775 dsa_user_dev_check,
3776 dsa_user_port_attr_set);
3777 return notifier_from_errno(err);
3778 case SWITCHDEV_FDB_ADD_TO_DEVICE:
3779 case SWITCHDEV_FDB_DEL_TO_DEVICE:
3780 err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
3781 dsa_user_dev_check,
3782 dsa_foreign_dev_check,
3783 dsa_user_fdb_event);
3784 return notifier_from_errno(err);
3785 default:
3786 return NOTIFY_DONE;
3787 }
3788
3789 return NOTIFY_OK;
3790 }
3791
dsa_user_switchdev_blocking_event(struct notifier_block * unused,unsigned long event,void * ptr)3792 static int dsa_user_switchdev_blocking_event(struct notifier_block *unused,
3793 unsigned long event, void *ptr)
3794 {
3795 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3796 int err;
3797
3798 switch (event) {
3799 case SWITCHDEV_PORT_OBJ_ADD:
3800 err = switchdev_handle_port_obj_add_foreign(dev, ptr,
3801 dsa_user_dev_check,
3802 dsa_foreign_dev_check,
3803 dsa_user_port_obj_add);
3804 return notifier_from_errno(err);
3805 case SWITCHDEV_PORT_OBJ_DEL:
3806 err = switchdev_handle_port_obj_del_foreign(dev, ptr,
3807 dsa_user_dev_check,
3808 dsa_foreign_dev_check,
3809 dsa_user_port_obj_del);
3810 return notifier_from_errno(err);
3811 case SWITCHDEV_PORT_ATTR_SET:
3812 err = switchdev_handle_port_attr_set(dev, ptr,
3813 dsa_user_dev_check,
3814 dsa_user_port_attr_set);
3815 return notifier_from_errno(err);
3816 }
3817
3818 return NOTIFY_DONE;
3819 }
3820
3821 static struct notifier_block dsa_user_nb __read_mostly = {
3822 .notifier_call = dsa_user_netdevice_event,
3823 };
3824
3825 struct notifier_block dsa_user_switchdev_notifier = {
3826 .notifier_call = dsa_user_switchdev_event,
3827 };
3828
3829 struct notifier_block dsa_user_switchdev_blocking_notifier = {
3830 .notifier_call = dsa_user_switchdev_blocking_event,
3831 };
3832
dsa_user_register_notifier(void)3833 int dsa_user_register_notifier(void)
3834 {
3835 struct notifier_block *nb;
3836 int err;
3837
3838 err = register_netdevice_notifier(&dsa_user_nb);
3839 if (err)
3840 return err;
3841
3842 err = register_switchdev_notifier(&dsa_user_switchdev_notifier);
3843 if (err)
3844 goto err_switchdev_nb;
3845
3846 nb = &dsa_user_switchdev_blocking_notifier;
3847 err = register_switchdev_blocking_notifier(nb);
3848 if (err)
3849 goto err_switchdev_blocking_nb;
3850
3851 return 0;
3852
3853 err_switchdev_blocking_nb:
3854 unregister_switchdev_notifier(&dsa_user_switchdev_notifier);
3855 err_switchdev_nb:
3856 unregister_netdevice_notifier(&dsa_user_nb);
3857 return err;
3858 }
3859
dsa_user_unregister_notifier(void)3860 void dsa_user_unregister_notifier(void)
3861 {
3862 struct notifier_block *nb;
3863 int err;
3864
3865 nb = &dsa_user_switchdev_blocking_notifier;
3866 err = unregister_switchdev_blocking_notifier(nb);
3867 if (err)
3868 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
3869
3870 err = unregister_switchdev_notifier(&dsa_user_switchdev_notifier);
3871 if (err)
3872 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
3873
3874 err = unregister_netdevice_notifier(&dsa_user_nb);
3875 if (err)
3876 pr_err("DSA: failed to unregister user notifier (%d)\n", err);
3877 }
3878