1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/dsa/user.c - user device handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 */
6
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/selftests.h>
19 #include <net/tc_act/tc_mirred.h>
20 #include <linux/if_bridge.h>
21 #include <linux/if_hsr.h>
22 #include <net/dcbnl.h>
23 #include <linux/netpoll.h>
24 #include <linux/string.h>
25
26 #include "conduit.h"
27 #include "dsa.h"
28 #include "netlink.h"
29 #include "port.h"
30 #include "switch.h"
31 #include "tag.h"
32 #include "user.h"
33
34 struct dsa_switchdev_event_work {
35 struct net_device *dev;
36 struct net_device *orig_dev;
37 struct work_struct work;
38 unsigned long event;
39 /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
40 * SWITCHDEV_FDB_DEL_TO_DEVICE
41 */
42 unsigned char addr[ETH_ALEN];
43 u16 vid;
44 bool host_addr;
45 };
46
47 enum dsa_standalone_event {
48 DSA_UC_ADD,
49 DSA_UC_DEL,
50 DSA_MC_ADD,
51 DSA_MC_DEL,
52 };
53
54 struct dsa_standalone_event_work {
55 struct work_struct work;
56 struct net_device *dev;
57 enum dsa_standalone_event event;
58 unsigned char addr[ETH_ALEN];
59 u16 vid;
60 };
61
62 struct dsa_host_vlan_rx_filtering_ctx {
63 struct net_device *dev;
64 const unsigned char *addr;
65 enum dsa_standalone_event event;
66 };
67
dsa_switch_supports_uc_filtering(struct dsa_switch * ds)68 static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds)
69 {
70 return ds->ops->port_fdb_add && ds->ops->port_fdb_del &&
71 ds->fdb_isolation && !ds->vlan_filtering_is_global &&
72 !ds->needs_standalone_vlan_filtering;
73 }
74
dsa_switch_supports_mc_filtering(struct dsa_switch * ds)75 static bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds)
76 {
77 return ds->ops->port_mdb_add && ds->ops->port_mdb_del &&
78 ds->fdb_isolation && !ds->vlan_filtering_is_global &&
79 !ds->needs_standalone_vlan_filtering;
80 }
81
dsa_user_standalone_event_work(struct work_struct * work)82 static void dsa_user_standalone_event_work(struct work_struct *work)
83 {
84 struct dsa_standalone_event_work *standalone_work =
85 container_of(work, struct dsa_standalone_event_work, work);
86 const unsigned char *addr = standalone_work->addr;
87 struct net_device *dev = standalone_work->dev;
88 struct dsa_port *dp = dsa_user_to_port(dev);
89 struct switchdev_obj_port_mdb mdb;
90 struct dsa_switch *ds = dp->ds;
91 u16 vid = standalone_work->vid;
92 int err;
93
94 switch (standalone_work->event) {
95 case DSA_UC_ADD:
96 err = dsa_port_standalone_host_fdb_add(dp, addr, vid);
97 if (err) {
98 dev_err(ds->dev,
99 "port %d failed to add %pM vid %d to fdb: %d\n",
100 dp->index, addr, vid, err);
101 break;
102 }
103 break;
104
105 case DSA_UC_DEL:
106 err = dsa_port_standalone_host_fdb_del(dp, addr, vid);
107 if (err) {
108 dev_err(ds->dev,
109 "port %d failed to delete %pM vid %d from fdb: %d\n",
110 dp->index, addr, vid, err);
111 }
112
113 break;
114 case DSA_MC_ADD:
115 ether_addr_copy(mdb.addr, addr);
116 mdb.vid = vid;
117
118 err = dsa_port_standalone_host_mdb_add(dp, &mdb);
119 if (err) {
120 dev_err(ds->dev,
121 "port %d failed to add %pM vid %d to mdb: %d\n",
122 dp->index, addr, vid, err);
123 break;
124 }
125 break;
126 case DSA_MC_DEL:
127 ether_addr_copy(mdb.addr, addr);
128 mdb.vid = vid;
129
130 err = dsa_port_standalone_host_mdb_del(dp, &mdb);
131 if (err) {
132 dev_err(ds->dev,
133 "port %d failed to delete %pM vid %d from mdb: %d\n",
134 dp->index, addr, vid, err);
135 }
136
137 break;
138 }
139
140 kfree(standalone_work);
141 }
142
dsa_user_schedule_standalone_work(struct net_device * dev,enum dsa_standalone_event event,const unsigned char * addr,u16 vid)143 static int dsa_user_schedule_standalone_work(struct net_device *dev,
144 enum dsa_standalone_event event,
145 const unsigned char *addr,
146 u16 vid)
147 {
148 struct dsa_standalone_event_work *standalone_work;
149
150 standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC);
151 if (!standalone_work)
152 return -ENOMEM;
153
154 INIT_WORK(&standalone_work->work, dsa_user_standalone_event_work);
155 standalone_work->event = event;
156 standalone_work->dev = dev;
157
158 ether_addr_copy(standalone_work->addr, addr);
159 standalone_work->vid = vid;
160
161 dsa_schedule_work(&standalone_work->work);
162
163 return 0;
164 }
165
dsa_user_host_vlan_rx_filtering(void * arg,int vid)166 static int dsa_user_host_vlan_rx_filtering(void *arg, int vid)
167 {
168 struct dsa_host_vlan_rx_filtering_ctx *ctx = arg;
169
170 return dsa_user_schedule_standalone_work(ctx->dev, ctx->event,
171 ctx->addr, vid);
172 }
173
dsa_user_vlan_for_each(struct net_device * dev,int (* cb)(void * arg,int vid),void * arg)174 static int dsa_user_vlan_for_each(struct net_device *dev,
175 int (*cb)(void *arg, int vid), void *arg)
176 {
177 struct dsa_port *dp = dsa_user_to_port(dev);
178 struct dsa_vlan *v;
179 int err;
180
181 lockdep_assert_held(&dev->addr_list_lock);
182
183 err = cb(arg, 0);
184 if (err)
185 return err;
186
187 list_for_each_entry(v, &dp->user_vlans, list) {
188 err = cb(arg, v->vid);
189 if (err)
190 return err;
191 }
192
193 return 0;
194 }
195
dsa_user_sync_uc(struct net_device * dev,const unsigned char * addr)196 static int dsa_user_sync_uc(struct net_device *dev,
197 const unsigned char *addr)
198 {
199 struct net_device *conduit = dsa_user_to_conduit(dev);
200 struct dsa_port *dp = dsa_user_to_port(dev);
201 struct dsa_host_vlan_rx_filtering_ctx ctx = {
202 .dev = dev,
203 .addr = addr,
204 .event = DSA_UC_ADD,
205 };
206
207 dev_uc_add(conduit, addr);
208
209 if (!dsa_switch_supports_uc_filtering(dp->ds))
210 return 0;
211
212 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
213 &ctx);
214 }
215
dsa_user_unsync_uc(struct net_device * dev,const unsigned char * addr)216 static int dsa_user_unsync_uc(struct net_device *dev,
217 const unsigned char *addr)
218 {
219 struct net_device *conduit = dsa_user_to_conduit(dev);
220 struct dsa_port *dp = dsa_user_to_port(dev);
221 struct dsa_host_vlan_rx_filtering_ctx ctx = {
222 .dev = dev,
223 .addr = addr,
224 .event = DSA_UC_DEL,
225 };
226
227 dev_uc_del(conduit, addr);
228
229 if (!dsa_switch_supports_uc_filtering(dp->ds))
230 return 0;
231
232 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
233 &ctx);
234 }
235
dsa_user_sync_mc(struct net_device * dev,const unsigned char * addr)236 static int dsa_user_sync_mc(struct net_device *dev,
237 const unsigned char *addr)
238 {
239 struct net_device *conduit = dsa_user_to_conduit(dev);
240 struct dsa_port *dp = dsa_user_to_port(dev);
241 struct dsa_host_vlan_rx_filtering_ctx ctx = {
242 .dev = dev,
243 .addr = addr,
244 .event = DSA_MC_ADD,
245 };
246
247 dev_mc_add(conduit, addr);
248
249 if (!dsa_switch_supports_mc_filtering(dp->ds))
250 return 0;
251
252 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
253 &ctx);
254 }
255
dsa_user_unsync_mc(struct net_device * dev,const unsigned char * addr)256 static int dsa_user_unsync_mc(struct net_device *dev,
257 const unsigned char *addr)
258 {
259 struct net_device *conduit = dsa_user_to_conduit(dev);
260 struct dsa_port *dp = dsa_user_to_port(dev);
261 struct dsa_host_vlan_rx_filtering_ctx ctx = {
262 .dev = dev,
263 .addr = addr,
264 .event = DSA_MC_DEL,
265 };
266
267 dev_mc_del(conduit, addr);
268
269 if (!dsa_switch_supports_mc_filtering(dp->ds))
270 return 0;
271
272 return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
273 &ctx);
274 }
275
dsa_user_sync_ha(struct net_device * dev)276 void dsa_user_sync_ha(struct net_device *dev)
277 {
278 struct dsa_port *dp = dsa_user_to_port(dev);
279 struct dsa_switch *ds = dp->ds;
280 struct netdev_hw_addr *ha;
281
282 netif_addr_lock_bh(dev);
283
284 netdev_for_each_synced_mc_addr(ha, dev)
285 dsa_user_sync_mc(dev, ha->addr);
286
287 netdev_for_each_synced_uc_addr(ha, dev)
288 dsa_user_sync_uc(dev, ha->addr);
289
290 netif_addr_unlock_bh(dev);
291
292 if (dsa_switch_supports_uc_filtering(ds) ||
293 dsa_switch_supports_mc_filtering(ds))
294 dsa_flush_workqueue();
295 }
296
dsa_user_unsync_ha(struct net_device * dev)297 void dsa_user_unsync_ha(struct net_device *dev)
298 {
299 struct dsa_port *dp = dsa_user_to_port(dev);
300 struct dsa_switch *ds = dp->ds;
301 struct netdev_hw_addr *ha;
302
303 netif_addr_lock_bh(dev);
304
305 netdev_for_each_synced_uc_addr(ha, dev)
306 dsa_user_unsync_uc(dev, ha->addr);
307
308 netdev_for_each_synced_mc_addr(ha, dev)
309 dsa_user_unsync_mc(dev, ha->addr);
310
311 netif_addr_unlock_bh(dev);
312
313 if (dsa_switch_supports_uc_filtering(ds) ||
314 dsa_switch_supports_mc_filtering(ds))
315 dsa_flush_workqueue();
316 }
317
318 /* user mii_bus handling ***************************************************/
dsa_user_phy_read(struct mii_bus * bus,int addr,int reg)319 static int dsa_user_phy_read(struct mii_bus *bus, int addr, int reg)
320 {
321 struct dsa_switch *ds = bus->priv;
322
323 if (ds->phys_mii_mask & (1 << addr))
324 return ds->ops->phy_read(ds, addr, reg);
325
326 return 0xffff;
327 }
328
dsa_user_phy_write(struct mii_bus * bus,int addr,int reg,u16 val)329 static int dsa_user_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
330 {
331 struct dsa_switch *ds = bus->priv;
332
333 if (ds->phys_mii_mask & (1 << addr))
334 return ds->ops->phy_write(ds, addr, reg, val);
335
336 return 0;
337 }
338
dsa_user_mii_bus_init(struct dsa_switch * ds)339 void dsa_user_mii_bus_init(struct dsa_switch *ds)
340 {
341 ds->user_mii_bus->priv = (void *)ds;
342 ds->user_mii_bus->name = "dsa user smi";
343 ds->user_mii_bus->read = dsa_user_phy_read;
344 ds->user_mii_bus->write = dsa_user_phy_write;
345 snprintf(ds->user_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
346 ds->dst->index, ds->index);
347 ds->user_mii_bus->parent = ds->dev;
348 ds->user_mii_bus->phy_mask = ~ds->phys_mii_mask;
349 }
350
351
352 /* user device handling ****************************************************/
dsa_user_get_iflink(const struct net_device * dev)353 static int dsa_user_get_iflink(const struct net_device *dev)
354 {
355 return READ_ONCE(dsa_user_to_conduit(dev)->ifindex);
356 }
357
dsa_user_host_uc_install(struct net_device * dev,const u8 * addr)358 int dsa_user_host_uc_install(struct net_device *dev, const u8 *addr)
359 {
360 struct net_device *conduit = dsa_user_to_conduit(dev);
361 struct dsa_port *dp = dsa_user_to_port(dev);
362 struct dsa_switch *ds = dp->ds;
363 int err;
364
365 if (dsa_switch_supports_uc_filtering(ds)) {
366 err = dsa_port_standalone_host_fdb_add(dp, addr, 0);
367 if (err)
368 goto out;
369 }
370
371 if (!ether_addr_equal(addr, conduit->dev_addr)) {
372 err = dev_uc_add(conduit, addr);
373 if (err < 0)
374 goto del_host_addr;
375 }
376
377 return 0;
378
379 del_host_addr:
380 if (dsa_switch_supports_uc_filtering(ds))
381 dsa_port_standalone_host_fdb_del(dp, addr, 0);
382 out:
383 return err;
384 }
385
dsa_user_host_uc_uninstall(struct net_device * dev)386 void dsa_user_host_uc_uninstall(struct net_device *dev)
387 {
388 struct net_device *conduit = dsa_user_to_conduit(dev);
389 struct dsa_port *dp = dsa_user_to_port(dev);
390 struct dsa_switch *ds = dp->ds;
391
392 if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr))
393 dev_uc_del(conduit, dev->dev_addr);
394
395 if (dsa_switch_supports_uc_filtering(ds))
396 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
397 }
398
dsa_user_open(struct net_device * dev)399 static int dsa_user_open(struct net_device *dev)
400 {
401 struct net_device *conduit = dsa_user_to_conduit(dev);
402 struct dsa_port *dp = dsa_user_to_port(dev);
403 int err;
404
405 err = dev_open(conduit, NULL);
406 if (err < 0) {
407 netdev_err(dev, "failed to open conduit %s\n", conduit->name);
408 goto out;
409 }
410
411 err = dsa_user_host_uc_install(dev, dev->dev_addr);
412 if (err)
413 goto out;
414
415 err = dsa_port_enable_rt(dp, dev->phydev);
416 if (err)
417 goto out_del_host_uc;
418
419 return 0;
420
421 out_del_host_uc:
422 dsa_user_host_uc_uninstall(dev);
423 out:
424 return err;
425 }
426
dsa_user_close(struct net_device * dev)427 static int dsa_user_close(struct net_device *dev)
428 {
429 struct dsa_port *dp = dsa_user_to_port(dev);
430
431 dsa_port_disable_rt(dp);
432
433 dsa_user_host_uc_uninstall(dev);
434
435 return 0;
436 }
437
dsa_user_manage_host_flood(struct net_device * dev)438 static void dsa_user_manage_host_flood(struct net_device *dev)
439 {
440 bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI);
441 struct dsa_port *dp = dsa_user_to_port(dev);
442 bool uc = dev->flags & IFF_PROMISC;
443
444 dsa_port_set_host_flood(dp, uc, mc);
445 }
446
dsa_user_change_rx_flags(struct net_device * dev,int change)447 static void dsa_user_change_rx_flags(struct net_device *dev, int change)
448 {
449 struct net_device *conduit = dsa_user_to_conduit(dev);
450 struct dsa_port *dp = dsa_user_to_port(dev);
451 struct dsa_switch *ds = dp->ds;
452
453 if (change & IFF_ALLMULTI)
454 dev_set_allmulti(conduit,
455 dev->flags & IFF_ALLMULTI ? 1 : -1);
456 if (change & IFF_PROMISC)
457 dev_set_promiscuity(conduit,
458 dev->flags & IFF_PROMISC ? 1 : -1);
459
460 if (dsa_switch_supports_uc_filtering(ds) &&
461 dsa_switch_supports_mc_filtering(ds))
462 dsa_user_manage_host_flood(dev);
463 }
464
dsa_user_set_rx_mode(struct net_device * dev)465 static void dsa_user_set_rx_mode(struct net_device *dev)
466 {
467 __dev_mc_sync(dev, dsa_user_sync_mc, dsa_user_unsync_mc);
468 __dev_uc_sync(dev, dsa_user_sync_uc, dsa_user_unsync_uc);
469 }
470
dsa_user_set_mac_address(struct net_device * dev,void * a)471 static int dsa_user_set_mac_address(struct net_device *dev, void *a)
472 {
473 struct dsa_port *dp = dsa_user_to_port(dev);
474 struct dsa_switch *ds = dp->ds;
475 struct sockaddr *addr = a;
476 int err;
477
478 if (!is_valid_ether_addr(addr->sa_data))
479 return -EADDRNOTAVAIL;
480
481 if (ds->ops->port_set_mac_address) {
482 err = ds->ops->port_set_mac_address(ds, dp->index,
483 addr->sa_data);
484 if (err)
485 return err;
486 }
487
488 /* If the port is down, the address isn't synced yet to hardware or
489 * to the DSA conduit, so there is nothing to change.
490 */
491 if (!(dev->flags & IFF_UP))
492 goto out_change_dev_addr;
493
494 err = dsa_user_host_uc_install(dev, addr->sa_data);
495 if (err)
496 return err;
497
498 dsa_user_host_uc_uninstall(dev);
499
500 out_change_dev_addr:
501 eth_hw_addr_set(dev, addr->sa_data);
502
503 return 0;
504 }
505
506 struct dsa_user_dump_ctx {
507 struct net_device *dev;
508 struct sk_buff *skb;
509 struct netlink_callback *cb;
510 int idx;
511 };
512
513 static int
dsa_user_port_fdb_do_dump(const unsigned char * addr,u16 vid,bool is_static,void * data)514 dsa_user_port_fdb_do_dump(const unsigned char *addr, u16 vid,
515 bool is_static, void *data)
516 {
517 struct dsa_user_dump_ctx *dump = data;
518 struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
519 u32 portid = NETLINK_CB(dump->cb->skb).portid;
520 u32 seq = dump->cb->nlh->nlmsg_seq;
521 struct nlmsghdr *nlh;
522 struct ndmsg *ndm;
523
524 if (dump->idx < ctx->fdb_idx)
525 goto skip;
526
527 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
528 sizeof(*ndm), NLM_F_MULTI);
529 if (!nlh)
530 return -EMSGSIZE;
531
532 ndm = nlmsg_data(nlh);
533 ndm->ndm_family = AF_BRIDGE;
534 ndm->ndm_pad1 = 0;
535 ndm->ndm_pad2 = 0;
536 ndm->ndm_flags = NTF_SELF;
537 ndm->ndm_type = 0;
538 ndm->ndm_ifindex = dump->dev->ifindex;
539 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
540
541 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
542 goto nla_put_failure;
543
544 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
545 goto nla_put_failure;
546
547 nlmsg_end(dump->skb, nlh);
548
549 skip:
550 dump->idx++;
551 return 0;
552
553 nla_put_failure:
554 nlmsg_cancel(dump->skb, nlh);
555 return -EMSGSIZE;
556 }
557
558 static int
dsa_user_fdb_dump(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * dev,struct net_device * filter_dev,int * idx)559 dsa_user_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
560 struct net_device *dev, struct net_device *filter_dev,
561 int *idx)
562 {
563 struct dsa_port *dp = dsa_user_to_port(dev);
564 struct dsa_user_dump_ctx dump = {
565 .dev = dev,
566 .skb = skb,
567 .cb = cb,
568 .idx = *idx,
569 };
570 int err;
571
572 err = dsa_port_fdb_dump(dp, dsa_user_port_fdb_do_dump, &dump);
573 *idx = dump.idx;
574
575 return err;
576 }
577
dsa_user_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)578 static int dsa_user_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
579 {
580 struct dsa_user_priv *p = netdev_priv(dev);
581 struct dsa_switch *ds = p->dp->ds;
582 int port = p->dp->index;
583
584 /* Pass through to switch driver if it supports timestamping */
585 switch (cmd) {
586 case SIOCGHWTSTAMP:
587 if (ds->ops->port_hwtstamp_get)
588 return ds->ops->port_hwtstamp_get(ds, port, ifr);
589 break;
590 case SIOCSHWTSTAMP:
591 if (ds->ops->port_hwtstamp_set)
592 return ds->ops->port_hwtstamp_set(ds, port, ifr);
593 break;
594 }
595
596 return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
597 }
598
dsa_user_port_attr_set(struct net_device * dev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)599 static int dsa_user_port_attr_set(struct net_device *dev, const void *ctx,
600 const struct switchdev_attr *attr,
601 struct netlink_ext_ack *extack)
602 {
603 struct dsa_port *dp = dsa_user_to_port(dev);
604 int ret;
605
606 if (ctx && ctx != dp)
607 return 0;
608
609 switch (attr->id) {
610 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
611 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
612 return -EOPNOTSUPP;
613
614 ret = dsa_port_set_state(dp, attr->u.stp_state, true);
615 break;
616 case SWITCHDEV_ATTR_ID_PORT_MST_STATE:
617 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
618 return -EOPNOTSUPP;
619
620 ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack);
621 break;
622 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
623 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
624 return -EOPNOTSUPP;
625
626 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
627 extack);
628 break;
629 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
630 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
631 return -EOPNOTSUPP;
632
633 ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
634 break;
635 case SWITCHDEV_ATTR_ID_BRIDGE_MST:
636 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
637 return -EOPNOTSUPP;
638
639 ret = dsa_port_mst_enable(dp, attr->u.mst, extack);
640 break;
641 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
642 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
643 return -EOPNOTSUPP;
644
645 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
646 extack);
647 break;
648 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
649 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
650 return -EOPNOTSUPP;
651
652 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
653 break;
654 case SWITCHDEV_ATTR_ID_VLAN_MSTI:
655 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
656 return -EOPNOTSUPP;
657
658 ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti);
659 break;
660 default:
661 ret = -EOPNOTSUPP;
662 break;
663 }
664
665 return ret;
666 }
667
668 /* Must be called under rcu_read_lock() */
669 static int
dsa_user_vlan_check_for_8021q_uppers(struct net_device * user,const struct switchdev_obj_port_vlan * vlan)670 dsa_user_vlan_check_for_8021q_uppers(struct net_device *user,
671 const struct switchdev_obj_port_vlan *vlan)
672 {
673 struct net_device *upper_dev;
674 struct list_head *iter;
675
676 netdev_for_each_upper_dev_rcu(user, upper_dev, iter) {
677 u16 vid;
678
679 if (!is_vlan_dev(upper_dev))
680 continue;
681
682 vid = vlan_dev_vlan_id(upper_dev);
683 if (vid == vlan->vid)
684 return -EBUSY;
685 }
686
687 return 0;
688 }
689
dsa_user_vlan_add(struct net_device * dev,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)690 static int dsa_user_vlan_add(struct net_device *dev,
691 const struct switchdev_obj *obj,
692 struct netlink_ext_ack *extack)
693 {
694 struct dsa_port *dp = dsa_user_to_port(dev);
695 struct switchdev_obj_port_vlan *vlan;
696 int err;
697
698 if (dsa_port_skip_vlan_configuration(dp)) {
699 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
700 return 0;
701 }
702
703 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
704
705 /* Deny adding a bridge VLAN when there is already an 802.1Q upper with
706 * the same VID.
707 */
708 if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
709 rcu_read_lock();
710 err = dsa_user_vlan_check_for_8021q_uppers(dev, vlan);
711 rcu_read_unlock();
712 if (err) {
713 NL_SET_ERR_MSG_MOD(extack,
714 "Port already has a VLAN upper with this VID");
715 return err;
716 }
717 }
718
719 return dsa_port_vlan_add(dp, vlan, extack);
720 }
721
722 /* Offload a VLAN installed on the bridge or on a foreign interface by
723 * installing it as a VLAN towards the CPU port.
724 */
dsa_user_host_vlan_add(struct net_device * dev,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)725 static int dsa_user_host_vlan_add(struct net_device *dev,
726 const struct switchdev_obj *obj,
727 struct netlink_ext_ack *extack)
728 {
729 struct dsa_port *dp = dsa_user_to_port(dev);
730 struct switchdev_obj_port_vlan vlan;
731
732 /* Do nothing if this is a software bridge */
733 if (!dp->bridge)
734 return -EOPNOTSUPP;
735
736 if (dsa_port_skip_vlan_configuration(dp)) {
737 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
738 return 0;
739 }
740
741 vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
742
743 /* Even though drivers often handle CPU membership in special ways,
744 * it doesn't make sense to program a PVID, so clear this flag.
745 */
746 vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
747
748 return dsa_port_host_vlan_add(dp, &vlan, extack);
749 }
750
dsa_user_port_obj_add(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)751 static int dsa_user_port_obj_add(struct net_device *dev, const void *ctx,
752 const struct switchdev_obj *obj,
753 struct netlink_ext_ack *extack)
754 {
755 struct dsa_port *dp = dsa_user_to_port(dev);
756 int err;
757
758 if (ctx && ctx != dp)
759 return 0;
760
761 switch (obj->id) {
762 case SWITCHDEV_OBJ_ID_PORT_MDB:
763 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
764 return -EOPNOTSUPP;
765
766 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
767 break;
768 case SWITCHDEV_OBJ_ID_HOST_MDB:
769 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
770 return -EOPNOTSUPP;
771
772 err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
773 break;
774 case SWITCHDEV_OBJ_ID_PORT_VLAN:
775 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
776 err = dsa_user_vlan_add(dev, obj, extack);
777 else
778 err = dsa_user_host_vlan_add(dev, obj, extack);
779 break;
780 case SWITCHDEV_OBJ_ID_MRP:
781 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
782 return -EOPNOTSUPP;
783
784 err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
785 break;
786 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
787 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
788 return -EOPNOTSUPP;
789
790 err = dsa_port_mrp_add_ring_role(dp,
791 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
792 break;
793 default:
794 err = -EOPNOTSUPP;
795 break;
796 }
797
798 return err;
799 }
800
dsa_user_vlan_del(struct net_device * dev,const struct switchdev_obj * obj)801 static int dsa_user_vlan_del(struct net_device *dev,
802 const struct switchdev_obj *obj)
803 {
804 struct dsa_port *dp = dsa_user_to_port(dev);
805 struct switchdev_obj_port_vlan *vlan;
806
807 if (dsa_port_skip_vlan_configuration(dp))
808 return 0;
809
810 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
811
812 return dsa_port_vlan_del(dp, vlan);
813 }
814
dsa_user_host_vlan_del(struct net_device * dev,const struct switchdev_obj * obj)815 static int dsa_user_host_vlan_del(struct net_device *dev,
816 const struct switchdev_obj *obj)
817 {
818 struct dsa_port *dp = dsa_user_to_port(dev);
819 struct switchdev_obj_port_vlan *vlan;
820
821 /* Do nothing if this is a software bridge */
822 if (!dp->bridge)
823 return -EOPNOTSUPP;
824
825 if (dsa_port_skip_vlan_configuration(dp))
826 return 0;
827
828 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
829
830 return dsa_port_host_vlan_del(dp, vlan);
831 }
832
dsa_user_port_obj_del(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj)833 static int dsa_user_port_obj_del(struct net_device *dev, const void *ctx,
834 const struct switchdev_obj *obj)
835 {
836 struct dsa_port *dp = dsa_user_to_port(dev);
837 int err;
838
839 if (ctx && ctx != dp)
840 return 0;
841
842 switch (obj->id) {
843 case SWITCHDEV_OBJ_ID_PORT_MDB:
844 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
845 return -EOPNOTSUPP;
846
847 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
848 break;
849 case SWITCHDEV_OBJ_ID_HOST_MDB:
850 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
851 return -EOPNOTSUPP;
852
853 err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
854 break;
855 case SWITCHDEV_OBJ_ID_PORT_VLAN:
856 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
857 err = dsa_user_vlan_del(dev, obj);
858 else
859 err = dsa_user_host_vlan_del(dev, obj);
860 break;
861 case SWITCHDEV_OBJ_ID_MRP:
862 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
863 return -EOPNOTSUPP;
864
865 err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
866 break;
867 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
868 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
869 return -EOPNOTSUPP;
870
871 err = dsa_port_mrp_del_ring_role(dp,
872 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
873 break;
874 default:
875 err = -EOPNOTSUPP;
876 break;
877 }
878
879 return err;
880 }
881
dsa_user_netpoll_send_skb(struct net_device * dev,struct sk_buff * skb)882 static netdev_tx_t dsa_user_netpoll_send_skb(struct net_device *dev,
883 struct sk_buff *skb)
884 {
885 #ifdef CONFIG_NET_POLL_CONTROLLER
886 struct dsa_user_priv *p = netdev_priv(dev);
887
888 return netpoll_send_skb(p->netpoll, skb);
889 #else
890 BUG();
891 return NETDEV_TX_OK;
892 #endif
893 }
894
dsa_skb_tx_timestamp(struct dsa_user_priv * p,struct sk_buff * skb)895 static void dsa_skb_tx_timestamp(struct dsa_user_priv *p,
896 struct sk_buff *skb)
897 {
898 struct dsa_switch *ds = p->dp->ds;
899
900 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_NOBPF))
901 return;
902
903 if (!ds->ops->port_txtstamp)
904 return;
905
906 ds->ops->port_txtstamp(ds, p->dp->index, skb);
907 }
908
dsa_enqueue_skb(struct sk_buff * skb,struct net_device * dev)909 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
910 {
911 /* SKB for netpoll still need to be mangled with the protocol-specific
912 * tag to be successfully transmitted
913 */
914 if (unlikely(netpoll_tx_running(dev)))
915 return dsa_user_netpoll_send_skb(dev, skb);
916
917 /* Queue the SKB for transmission on the parent interface, but
918 * do not modify its EtherType
919 */
920 skb->dev = dsa_user_to_conduit(dev);
921 dev_queue_xmit(skb);
922
923 return NETDEV_TX_OK;
924 }
925 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
926
dsa_user_xmit(struct sk_buff * skb,struct net_device * dev)927 static netdev_tx_t dsa_user_xmit(struct sk_buff *skb, struct net_device *dev)
928 {
929 struct dsa_user_priv *p = netdev_priv(dev);
930 struct sk_buff *nskb;
931
932 dev_sw_netstats_tx_add(dev, 1, skb->len);
933
934 memset(skb->cb, 0, sizeof(skb->cb));
935
936 /* Handle tx timestamp if any */
937 dsa_skb_tx_timestamp(p, skb);
938
939 if (skb_ensure_writable_head_tail(skb, dev)) {
940 dev_kfree_skb_any(skb);
941 return NETDEV_TX_OK;
942 }
943
944 /* needed_tailroom should still be 'warm' in the cache line from
945 * skb_ensure_writable_head_tail(), which has also ensured that
946 * padding is safe.
947 */
948 if (dev->needed_tailroom)
949 eth_skb_pad(skb);
950
951 /* Transmit function may have to reallocate the original SKB,
952 * in which case it must have freed it. Only free it here on error.
953 */
954 nskb = p->xmit(skb, dev);
955 if (!nskb) {
956 kfree_skb(skb);
957 return NETDEV_TX_OK;
958 }
959
960 return dsa_enqueue_skb(nskb, dev);
961 }
962
963 /* ethtool operations *******************************************************/
964
dsa_user_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)965 static void dsa_user_get_drvinfo(struct net_device *dev,
966 struct ethtool_drvinfo *drvinfo)
967 {
968 strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
969 strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
970 strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
971 }
972
dsa_user_get_regs_len(struct net_device * dev)973 static int dsa_user_get_regs_len(struct net_device *dev)
974 {
975 struct dsa_port *dp = dsa_user_to_port(dev);
976 struct dsa_switch *ds = dp->ds;
977
978 if (ds->ops->get_regs_len)
979 return ds->ops->get_regs_len(ds, dp->index);
980
981 return -EOPNOTSUPP;
982 }
983
984 static void
dsa_user_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)985 dsa_user_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
986 {
987 struct dsa_port *dp = dsa_user_to_port(dev);
988 struct dsa_switch *ds = dp->ds;
989
990 if (ds->ops->get_regs)
991 ds->ops->get_regs(ds, dp->index, regs, _p);
992 }
993
dsa_user_nway_reset(struct net_device * dev)994 static int dsa_user_nway_reset(struct net_device *dev)
995 {
996 struct dsa_port *dp = dsa_user_to_port(dev);
997
998 return phylink_ethtool_nway_reset(dp->pl);
999 }
1000
dsa_user_get_eeprom_len(struct net_device * dev)1001 static int dsa_user_get_eeprom_len(struct net_device *dev)
1002 {
1003 struct dsa_port *dp = dsa_user_to_port(dev);
1004 struct dsa_switch *ds = dp->ds;
1005
1006 if (ds->cd && ds->cd->eeprom_len)
1007 return ds->cd->eeprom_len;
1008
1009 if (ds->ops->get_eeprom_len)
1010 return ds->ops->get_eeprom_len(ds);
1011
1012 return 0;
1013 }
1014
dsa_user_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)1015 static int dsa_user_get_eeprom(struct net_device *dev,
1016 struct ethtool_eeprom *eeprom, u8 *data)
1017 {
1018 struct dsa_port *dp = dsa_user_to_port(dev);
1019 struct dsa_switch *ds = dp->ds;
1020
1021 if (ds->ops->get_eeprom)
1022 return ds->ops->get_eeprom(ds, eeprom, data);
1023
1024 return -EOPNOTSUPP;
1025 }
1026
dsa_user_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)1027 static int dsa_user_set_eeprom(struct net_device *dev,
1028 struct ethtool_eeprom *eeprom, u8 *data)
1029 {
1030 struct dsa_port *dp = dsa_user_to_port(dev);
1031 struct dsa_switch *ds = dp->ds;
1032
1033 if (ds->ops->set_eeprom)
1034 return ds->ops->set_eeprom(ds, eeprom, data);
1035
1036 return -EOPNOTSUPP;
1037 }
1038
dsa_user_get_strings(struct net_device * dev,uint32_t stringset,uint8_t * data)1039 static void dsa_user_get_strings(struct net_device *dev,
1040 uint32_t stringset, uint8_t *data)
1041 {
1042 struct dsa_port *dp = dsa_user_to_port(dev);
1043 struct dsa_switch *ds = dp->ds;
1044
1045 if (stringset == ETH_SS_STATS) {
1046 ethtool_puts(&data, "tx_packets");
1047 ethtool_puts(&data, "tx_bytes");
1048 ethtool_puts(&data, "rx_packets");
1049 ethtool_puts(&data, "rx_bytes");
1050 if (ds->ops->get_strings)
1051 ds->ops->get_strings(ds, dp->index, stringset, data);
1052 } else if (stringset == ETH_SS_TEST) {
1053 net_selftest_get_strings(data);
1054 }
1055
1056 }
1057
dsa_user_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,uint64_t * data)1058 static void dsa_user_get_ethtool_stats(struct net_device *dev,
1059 struct ethtool_stats *stats,
1060 uint64_t *data)
1061 {
1062 struct dsa_port *dp = dsa_user_to_port(dev);
1063 struct dsa_switch *ds = dp->ds;
1064 struct pcpu_sw_netstats *s;
1065 unsigned int start;
1066 int i;
1067
1068 for_each_possible_cpu(i) {
1069 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
1070
1071 s = per_cpu_ptr(dev->tstats, i);
1072 do {
1073 start = u64_stats_fetch_begin(&s->syncp);
1074 tx_packets = u64_stats_read(&s->tx_packets);
1075 tx_bytes = u64_stats_read(&s->tx_bytes);
1076 rx_packets = u64_stats_read(&s->rx_packets);
1077 rx_bytes = u64_stats_read(&s->rx_bytes);
1078 } while (u64_stats_fetch_retry(&s->syncp, start));
1079 data[0] += tx_packets;
1080 data[1] += tx_bytes;
1081 data[2] += rx_packets;
1082 data[3] += rx_bytes;
1083 }
1084 if (ds->ops->get_ethtool_stats)
1085 ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
1086 }
1087
dsa_user_get_sset_count(struct net_device * dev,int sset)1088 static int dsa_user_get_sset_count(struct net_device *dev, int sset)
1089 {
1090 struct dsa_port *dp = dsa_user_to_port(dev);
1091 struct dsa_switch *ds = dp->ds;
1092
1093 if (sset == ETH_SS_STATS) {
1094 int count = 0;
1095
1096 if (ds->ops->get_sset_count) {
1097 count = ds->ops->get_sset_count(ds, dp->index, sset);
1098 if (count < 0)
1099 return count;
1100 }
1101
1102 return count + 4;
1103 } else if (sset == ETH_SS_TEST) {
1104 return net_selftest_get_count();
1105 }
1106
1107 return -EOPNOTSUPP;
1108 }
1109
dsa_user_get_eth_phy_stats(struct net_device * dev,struct ethtool_eth_phy_stats * phy_stats)1110 static void dsa_user_get_eth_phy_stats(struct net_device *dev,
1111 struct ethtool_eth_phy_stats *phy_stats)
1112 {
1113 struct dsa_port *dp = dsa_user_to_port(dev);
1114 struct dsa_switch *ds = dp->ds;
1115
1116 if (ds->ops->get_eth_phy_stats)
1117 ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
1118 }
1119
dsa_user_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)1120 static void dsa_user_get_eth_mac_stats(struct net_device *dev,
1121 struct ethtool_eth_mac_stats *mac_stats)
1122 {
1123 struct dsa_port *dp = dsa_user_to_port(dev);
1124 struct dsa_switch *ds = dp->ds;
1125
1126 if (ds->ops->get_eth_mac_stats)
1127 ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats);
1128 }
1129
1130 static void
dsa_user_get_eth_ctrl_stats(struct net_device * dev,struct ethtool_eth_ctrl_stats * ctrl_stats)1131 dsa_user_get_eth_ctrl_stats(struct net_device *dev,
1132 struct ethtool_eth_ctrl_stats *ctrl_stats)
1133 {
1134 struct dsa_port *dp = dsa_user_to_port(dev);
1135 struct dsa_switch *ds = dp->ds;
1136
1137 if (ds->ops->get_eth_ctrl_stats)
1138 ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
1139 }
1140
1141 static void
dsa_user_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)1142 dsa_user_get_rmon_stats(struct net_device *dev,
1143 struct ethtool_rmon_stats *rmon_stats,
1144 const struct ethtool_rmon_hist_range **ranges)
1145 {
1146 struct dsa_port *dp = dsa_user_to_port(dev);
1147 struct dsa_switch *ds = dp->ds;
1148
1149 if (ds->ops->get_rmon_stats)
1150 ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges);
1151 }
1152
dsa_user_get_ts_stats(struct net_device * dev,struct ethtool_ts_stats * ts_stats)1153 static void dsa_user_get_ts_stats(struct net_device *dev,
1154 struct ethtool_ts_stats *ts_stats)
1155 {
1156 struct dsa_port *dp = dsa_user_to_port(dev);
1157 struct dsa_switch *ds = dp->ds;
1158
1159 if (ds->ops->get_ts_stats)
1160 ds->ops->get_ts_stats(ds, dp->index, ts_stats);
1161 }
1162
dsa_user_net_selftest(struct net_device * ndev,struct ethtool_test * etest,u64 * buf)1163 static void dsa_user_net_selftest(struct net_device *ndev,
1164 struct ethtool_test *etest, u64 *buf)
1165 {
1166 struct dsa_port *dp = dsa_user_to_port(ndev);
1167 struct dsa_switch *ds = dp->ds;
1168
1169 if (ds->ops->self_test) {
1170 ds->ops->self_test(ds, dp->index, etest, buf);
1171 return;
1172 }
1173
1174 net_selftest(ndev, etest, buf);
1175 }
1176
dsa_user_get_mm(struct net_device * dev,struct ethtool_mm_state * state)1177 static int dsa_user_get_mm(struct net_device *dev,
1178 struct ethtool_mm_state *state)
1179 {
1180 struct dsa_port *dp = dsa_user_to_port(dev);
1181 struct dsa_switch *ds = dp->ds;
1182
1183 if (!ds->ops->get_mm)
1184 return -EOPNOTSUPP;
1185
1186 return ds->ops->get_mm(ds, dp->index, state);
1187 }
1188
dsa_user_set_mm(struct net_device * dev,struct ethtool_mm_cfg * cfg,struct netlink_ext_ack * extack)1189 static int dsa_user_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg,
1190 struct netlink_ext_ack *extack)
1191 {
1192 struct dsa_port *dp = dsa_user_to_port(dev);
1193 struct dsa_switch *ds = dp->ds;
1194
1195 if (!ds->ops->set_mm)
1196 return -EOPNOTSUPP;
1197
1198 return ds->ops->set_mm(ds, dp->index, cfg, extack);
1199 }
1200
dsa_user_get_mm_stats(struct net_device * dev,struct ethtool_mm_stats * stats)1201 static void dsa_user_get_mm_stats(struct net_device *dev,
1202 struct ethtool_mm_stats *stats)
1203 {
1204 struct dsa_port *dp = dsa_user_to_port(dev);
1205 struct dsa_switch *ds = dp->ds;
1206
1207 if (ds->ops->get_mm_stats)
1208 ds->ops->get_mm_stats(ds, dp->index, stats);
1209 }
1210
dsa_user_get_wol(struct net_device * dev,struct ethtool_wolinfo * w)1211 static void dsa_user_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1212 {
1213 struct dsa_port *dp = dsa_user_to_port(dev);
1214 struct dsa_switch *ds = dp->ds;
1215
1216 phylink_ethtool_get_wol(dp->pl, w);
1217
1218 if (ds->ops->get_wol)
1219 ds->ops->get_wol(ds, dp->index, w);
1220 }
1221
dsa_user_set_wol(struct net_device * dev,struct ethtool_wolinfo * w)1222 static int dsa_user_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1223 {
1224 struct dsa_port *dp = dsa_user_to_port(dev);
1225 struct dsa_switch *ds = dp->ds;
1226 int ret = -EOPNOTSUPP;
1227
1228 phylink_ethtool_set_wol(dp->pl, w);
1229
1230 if (ds->ops->set_wol)
1231 ret = ds->ops->set_wol(ds, dp->index, w);
1232
1233 return ret;
1234 }
1235
dsa_user_set_eee(struct net_device * dev,struct ethtool_keee * e)1236 static int dsa_user_set_eee(struct net_device *dev, struct ethtool_keee *e)
1237 {
1238 struct dsa_port *dp = dsa_user_to_port(dev);
1239 struct dsa_switch *ds = dp->ds;
1240 int ret;
1241
1242 /* Check whether the switch supports EEE */
1243 if (!ds->ops->support_eee || !ds->ops->support_eee(ds, dp->index))
1244 return -EOPNOTSUPP;
1245
1246 /* If the port is using phylink managed EEE, then an unimplemented
1247 * set_mac_eee() is permissible.
1248 */
1249 if (!phylink_mac_implements_lpi(ds->phylink_mac_ops)) {
1250 /* Port's PHY and MAC both need to be EEE capable */
1251 if (!dev->phydev)
1252 return -ENODEV;
1253
1254 if (!ds->ops->set_mac_eee)
1255 return -EOPNOTSUPP;
1256
1257 ret = ds->ops->set_mac_eee(ds, dp->index, e);
1258 if (ret)
1259 return ret;
1260 } else if (ds->ops->set_mac_eee) {
1261 ret = ds->ops->set_mac_eee(ds, dp->index, e);
1262 if (ret)
1263 return ret;
1264 }
1265
1266 return phylink_ethtool_set_eee(dp->pl, e);
1267 }
1268
dsa_user_get_eee(struct net_device * dev,struct ethtool_keee * e)1269 static int dsa_user_get_eee(struct net_device *dev, struct ethtool_keee *e)
1270 {
1271 struct dsa_port *dp = dsa_user_to_port(dev);
1272 struct dsa_switch *ds = dp->ds;
1273
1274 /* Check whether the switch supports EEE */
1275 if (!ds->ops->support_eee || !ds->ops->support_eee(ds, dp->index))
1276 return -EOPNOTSUPP;
1277
1278 /* Port's PHY and MAC both need to be EEE capable */
1279 if (!dev->phydev)
1280 return -ENODEV;
1281
1282 return phylink_ethtool_get_eee(dp->pl, e);
1283 }
1284
dsa_user_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1285 static int dsa_user_get_link_ksettings(struct net_device *dev,
1286 struct ethtool_link_ksettings *cmd)
1287 {
1288 struct dsa_port *dp = dsa_user_to_port(dev);
1289
1290 return phylink_ethtool_ksettings_get(dp->pl, cmd);
1291 }
1292
dsa_user_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1293 static int dsa_user_set_link_ksettings(struct net_device *dev,
1294 const struct ethtool_link_ksettings *cmd)
1295 {
1296 struct dsa_port *dp = dsa_user_to_port(dev);
1297
1298 return phylink_ethtool_ksettings_set(dp->pl, cmd);
1299 }
1300
dsa_user_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * pause_stats)1301 static void dsa_user_get_pause_stats(struct net_device *dev,
1302 struct ethtool_pause_stats *pause_stats)
1303 {
1304 struct dsa_port *dp = dsa_user_to_port(dev);
1305 struct dsa_switch *ds = dp->ds;
1306
1307 if (ds->ops->get_pause_stats)
1308 ds->ops->get_pause_stats(ds, dp->index, pause_stats);
1309 }
1310
dsa_user_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1311 static void dsa_user_get_pauseparam(struct net_device *dev,
1312 struct ethtool_pauseparam *pause)
1313 {
1314 struct dsa_port *dp = dsa_user_to_port(dev);
1315
1316 phylink_ethtool_get_pauseparam(dp->pl, pause);
1317 }
1318
dsa_user_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1319 static int dsa_user_set_pauseparam(struct net_device *dev,
1320 struct ethtool_pauseparam *pause)
1321 {
1322 struct dsa_port *dp = dsa_user_to_port(dev);
1323
1324 return phylink_ethtool_set_pauseparam(dp->pl, pause);
1325 }
1326
1327 #ifdef CONFIG_NET_POLL_CONTROLLER
dsa_user_netpoll_setup(struct net_device * dev)1328 static int dsa_user_netpoll_setup(struct net_device *dev)
1329 {
1330 struct net_device *conduit = dsa_user_to_conduit(dev);
1331 struct dsa_user_priv *p = netdev_priv(dev);
1332 struct netpoll *netpoll;
1333 int err = 0;
1334
1335 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
1336 if (!netpoll)
1337 return -ENOMEM;
1338
1339 err = __netpoll_setup(netpoll, conduit);
1340 if (err) {
1341 kfree(netpoll);
1342 goto out;
1343 }
1344
1345 p->netpoll = netpoll;
1346 out:
1347 return err;
1348 }
1349
dsa_user_netpoll_cleanup(struct net_device * dev)1350 static void dsa_user_netpoll_cleanup(struct net_device *dev)
1351 {
1352 struct dsa_user_priv *p = netdev_priv(dev);
1353 struct netpoll *netpoll = p->netpoll;
1354
1355 if (!netpoll)
1356 return;
1357
1358 p->netpoll = NULL;
1359
1360 __netpoll_free(netpoll);
1361 }
1362
dsa_user_poll_controller(struct net_device * dev)1363 static void dsa_user_poll_controller(struct net_device *dev)
1364 {
1365 }
1366 #endif
1367
1368 static struct dsa_mall_tc_entry *
dsa_user_mall_tc_entry_find(struct net_device * dev,unsigned long cookie)1369 dsa_user_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
1370 {
1371 struct dsa_user_priv *p = netdev_priv(dev);
1372 struct dsa_mall_tc_entry *mall_tc_entry;
1373
1374 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
1375 if (mall_tc_entry->cookie == cookie)
1376 return mall_tc_entry;
1377
1378 return NULL;
1379 }
1380
1381 static int
dsa_user_add_cls_matchall_mirred(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress,bool ingress_target)1382 dsa_user_add_cls_matchall_mirred(struct net_device *dev,
1383 struct tc_cls_matchall_offload *cls,
1384 bool ingress, bool ingress_target)
1385 {
1386 struct netlink_ext_ack *extack = cls->common.extack;
1387 struct dsa_port *dp = dsa_user_to_port(dev);
1388 struct dsa_user_priv *p = netdev_priv(dev);
1389 struct dsa_mall_mirror_tc_entry *mirror;
1390 struct dsa_mall_tc_entry *mall_tc_entry;
1391 struct dsa_switch *ds = dp->ds;
1392 struct flow_action_entry *act;
1393 struct dsa_port *to_dp;
1394 int err;
1395
1396 if (cls->common.protocol != htons(ETH_P_ALL)) {
1397 NL_SET_ERR_MSG_MOD(extack,
1398 "Can only offload \"protocol all\" matchall filter");
1399 return -EOPNOTSUPP;
1400 }
1401
1402 if (!ds->ops->port_mirror_add) {
1403 NL_SET_ERR_MSG_MOD(extack,
1404 "Switch does not support mirroring operation");
1405 return -EOPNOTSUPP;
1406 }
1407
1408 if (!flow_action_basic_hw_stats_check(&cls->rule->action, extack))
1409 return -EOPNOTSUPP;
1410
1411 act = &cls->rule->action.entries[0];
1412
1413 if (!act->dev)
1414 return -EINVAL;
1415
1416 if (dsa_user_dev_check(act->dev)) {
1417 if (ingress_target) {
1418 /* We can only fulfill this using software assist */
1419 if (cls->common.skip_sw) {
1420 NL_SET_ERR_MSG_MOD(extack,
1421 "Can only mirred to ingress of DSA user port if filter also runs in software");
1422 return -EOPNOTSUPP;
1423 }
1424 to_dp = dp->cpu_dp;
1425 } else {
1426 to_dp = dsa_user_to_port(act->dev);
1427 }
1428 } else {
1429 /* Handle mirroring to foreign target ports as a mirror towards
1430 * the CPU. The software tc rule will take the packets from
1431 * there.
1432 */
1433 if (cls->common.skip_sw) {
1434 NL_SET_ERR_MSG_MOD(extack,
1435 "Can only mirred to CPU if filter also runs in software");
1436 return -EOPNOTSUPP;
1437 }
1438 to_dp = dp->cpu_dp;
1439 }
1440
1441 if (dp->ds != to_dp->ds) {
1442 NL_SET_ERR_MSG_MOD(extack,
1443 "Cross-chip mirroring not implemented");
1444 return -EOPNOTSUPP;
1445 }
1446
1447 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1448 if (!mall_tc_entry)
1449 return -ENOMEM;
1450
1451 mall_tc_entry->cookie = cls->cookie;
1452 mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
1453 mirror = &mall_tc_entry->mirror;
1454 mirror->to_local_port = to_dp->index;
1455 mirror->ingress = ingress;
1456
1457 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack);
1458 if (err) {
1459 kfree(mall_tc_entry);
1460 return err;
1461 }
1462
1463 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1464
1465 return err;
1466 }
1467
1468 static int
dsa_user_add_cls_matchall_police(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress)1469 dsa_user_add_cls_matchall_police(struct net_device *dev,
1470 struct tc_cls_matchall_offload *cls,
1471 bool ingress)
1472 {
1473 struct netlink_ext_ack *extack = cls->common.extack;
1474 struct dsa_port *dp = dsa_user_to_port(dev);
1475 struct dsa_user_priv *p = netdev_priv(dev);
1476 struct dsa_mall_policer_tc_entry *policer;
1477 struct dsa_mall_tc_entry *mall_tc_entry;
1478 struct dsa_switch *ds = dp->ds;
1479 struct flow_action_entry *act;
1480 int err;
1481
1482 if (!ds->ops->port_policer_add) {
1483 NL_SET_ERR_MSG_MOD(extack,
1484 "Policing offload not implemented");
1485 return -EOPNOTSUPP;
1486 }
1487
1488 if (!ingress) {
1489 NL_SET_ERR_MSG_MOD(extack,
1490 "Only supported on ingress qdisc");
1491 return -EOPNOTSUPP;
1492 }
1493
1494 if (!flow_action_basic_hw_stats_check(&cls->rule->action, extack))
1495 return -EOPNOTSUPP;
1496
1497 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1498 if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1499 NL_SET_ERR_MSG_MOD(extack,
1500 "Only one port policer allowed");
1501 return -EEXIST;
1502 }
1503 }
1504
1505 act = &cls->rule->action.entries[0];
1506
1507 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1508 if (!mall_tc_entry)
1509 return -ENOMEM;
1510
1511 mall_tc_entry->cookie = cls->cookie;
1512 mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1513 policer = &mall_tc_entry->policer;
1514 policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1515 policer->burst = act->police.burst;
1516
1517 err = ds->ops->port_policer_add(ds, dp->index, policer);
1518 if (err) {
1519 kfree(mall_tc_entry);
1520 return err;
1521 }
1522
1523 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1524
1525 return err;
1526 }
1527
dsa_user_add_cls_matchall(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress)1528 static int dsa_user_add_cls_matchall(struct net_device *dev,
1529 struct tc_cls_matchall_offload *cls,
1530 bool ingress)
1531 {
1532 const struct flow_action *action = &cls->rule->action;
1533 struct netlink_ext_ack *extack = cls->common.extack;
1534
1535 if (!flow_offload_has_one_action(action)) {
1536 NL_SET_ERR_MSG_MOD(extack,
1537 "Cannot offload matchall filter with more than one action");
1538 return -EOPNOTSUPP;
1539 }
1540
1541 switch (action->entries[0].id) {
1542 case FLOW_ACTION_MIRRED:
1543 return dsa_user_add_cls_matchall_mirred(dev, cls, ingress,
1544 false);
1545 case FLOW_ACTION_MIRRED_INGRESS:
1546 return dsa_user_add_cls_matchall_mirred(dev, cls, ingress,
1547 true);
1548 case FLOW_ACTION_POLICE:
1549 return dsa_user_add_cls_matchall_police(dev, cls, ingress);
1550 default:
1551 NL_SET_ERR_MSG_MOD(extack, "Unknown action");
1552 break;
1553 }
1554
1555 return -EOPNOTSUPP;
1556 }
1557
dsa_user_del_cls_matchall(struct net_device * dev,struct tc_cls_matchall_offload * cls)1558 static void dsa_user_del_cls_matchall(struct net_device *dev,
1559 struct tc_cls_matchall_offload *cls)
1560 {
1561 struct dsa_port *dp = dsa_user_to_port(dev);
1562 struct dsa_mall_tc_entry *mall_tc_entry;
1563 struct dsa_switch *ds = dp->ds;
1564
1565 mall_tc_entry = dsa_user_mall_tc_entry_find(dev, cls->cookie);
1566 if (!mall_tc_entry)
1567 return;
1568
1569 list_del(&mall_tc_entry->list);
1570
1571 switch (mall_tc_entry->type) {
1572 case DSA_PORT_MALL_MIRROR:
1573 if (ds->ops->port_mirror_del)
1574 ds->ops->port_mirror_del(ds, dp->index,
1575 &mall_tc_entry->mirror);
1576 break;
1577 case DSA_PORT_MALL_POLICER:
1578 if (ds->ops->port_policer_del)
1579 ds->ops->port_policer_del(ds, dp->index);
1580 break;
1581 default:
1582 WARN_ON(1);
1583 }
1584
1585 kfree(mall_tc_entry);
1586 }
1587
dsa_user_setup_tc_cls_matchall(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress)1588 static int dsa_user_setup_tc_cls_matchall(struct net_device *dev,
1589 struct tc_cls_matchall_offload *cls,
1590 bool ingress)
1591 {
1592 if (cls->common.chain_index)
1593 return -EOPNOTSUPP;
1594
1595 switch (cls->command) {
1596 case TC_CLSMATCHALL_REPLACE:
1597 return dsa_user_add_cls_matchall(dev, cls, ingress);
1598 case TC_CLSMATCHALL_DESTROY:
1599 dsa_user_del_cls_matchall(dev, cls);
1600 return 0;
1601 default:
1602 return -EOPNOTSUPP;
1603 }
1604 }
1605
dsa_user_add_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1606 static int dsa_user_add_cls_flower(struct net_device *dev,
1607 struct flow_cls_offload *cls,
1608 bool ingress)
1609 {
1610 struct dsa_port *dp = dsa_user_to_port(dev);
1611 struct dsa_switch *ds = dp->ds;
1612 int port = dp->index;
1613
1614 if (!ds->ops->cls_flower_add)
1615 return -EOPNOTSUPP;
1616
1617 return ds->ops->cls_flower_add(ds, port, cls, ingress);
1618 }
1619
dsa_user_del_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1620 static int dsa_user_del_cls_flower(struct net_device *dev,
1621 struct flow_cls_offload *cls,
1622 bool ingress)
1623 {
1624 struct dsa_port *dp = dsa_user_to_port(dev);
1625 struct dsa_switch *ds = dp->ds;
1626 int port = dp->index;
1627
1628 if (!ds->ops->cls_flower_del)
1629 return -EOPNOTSUPP;
1630
1631 return ds->ops->cls_flower_del(ds, port, cls, ingress);
1632 }
1633
dsa_user_stats_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1634 static int dsa_user_stats_cls_flower(struct net_device *dev,
1635 struct flow_cls_offload *cls,
1636 bool ingress)
1637 {
1638 struct dsa_port *dp = dsa_user_to_port(dev);
1639 struct dsa_switch *ds = dp->ds;
1640 int port = dp->index;
1641
1642 if (!ds->ops->cls_flower_stats)
1643 return -EOPNOTSUPP;
1644
1645 return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1646 }
1647
dsa_user_setup_tc_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1648 static int dsa_user_setup_tc_cls_flower(struct net_device *dev,
1649 struct flow_cls_offload *cls,
1650 bool ingress)
1651 {
1652 switch (cls->command) {
1653 case FLOW_CLS_REPLACE:
1654 return dsa_user_add_cls_flower(dev, cls, ingress);
1655 case FLOW_CLS_DESTROY:
1656 return dsa_user_del_cls_flower(dev, cls, ingress);
1657 case FLOW_CLS_STATS:
1658 return dsa_user_stats_cls_flower(dev, cls, ingress);
1659 default:
1660 return -EOPNOTSUPP;
1661 }
1662 }
1663
dsa_user_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv,bool ingress)1664 static int dsa_user_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1665 void *cb_priv, bool ingress)
1666 {
1667 struct net_device *dev = cb_priv;
1668
1669 if (!tc_can_offload(dev))
1670 return -EOPNOTSUPP;
1671
1672 switch (type) {
1673 case TC_SETUP_CLSMATCHALL:
1674 return dsa_user_setup_tc_cls_matchall(dev, type_data, ingress);
1675 case TC_SETUP_CLSFLOWER:
1676 return dsa_user_setup_tc_cls_flower(dev, type_data, ingress);
1677 default:
1678 return -EOPNOTSUPP;
1679 }
1680 }
1681
dsa_user_setup_tc_block_cb_ig(enum tc_setup_type type,void * type_data,void * cb_priv)1682 static int dsa_user_setup_tc_block_cb_ig(enum tc_setup_type type,
1683 void *type_data, void *cb_priv)
1684 {
1685 return dsa_user_setup_tc_block_cb(type, type_data, cb_priv, true);
1686 }
1687
dsa_user_setup_tc_block_cb_eg(enum tc_setup_type type,void * type_data,void * cb_priv)1688 static int dsa_user_setup_tc_block_cb_eg(enum tc_setup_type type,
1689 void *type_data, void *cb_priv)
1690 {
1691 return dsa_user_setup_tc_block_cb(type, type_data, cb_priv, false);
1692 }
1693
1694 static LIST_HEAD(dsa_user_block_cb_list);
1695
dsa_user_setup_tc_block(struct net_device * dev,struct flow_block_offload * f)1696 static int dsa_user_setup_tc_block(struct net_device *dev,
1697 struct flow_block_offload *f)
1698 {
1699 struct flow_block_cb *block_cb;
1700 flow_setup_cb_t *cb;
1701
1702 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1703 cb = dsa_user_setup_tc_block_cb_ig;
1704 else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1705 cb = dsa_user_setup_tc_block_cb_eg;
1706 else
1707 return -EOPNOTSUPP;
1708
1709 f->driver_block_list = &dsa_user_block_cb_list;
1710
1711 switch (f->command) {
1712 case FLOW_BLOCK_BIND:
1713 if (flow_block_cb_is_busy(cb, dev, &dsa_user_block_cb_list))
1714 return -EBUSY;
1715
1716 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1717 if (IS_ERR(block_cb))
1718 return PTR_ERR(block_cb);
1719
1720 flow_block_cb_add(block_cb, f);
1721 list_add_tail(&block_cb->driver_list, &dsa_user_block_cb_list);
1722 return 0;
1723 case FLOW_BLOCK_UNBIND:
1724 block_cb = flow_block_cb_lookup(f->block, cb, dev);
1725 if (!block_cb)
1726 return -ENOENT;
1727
1728 flow_block_cb_remove(block_cb, f);
1729 list_del(&block_cb->driver_list);
1730 return 0;
1731 default:
1732 return -EOPNOTSUPP;
1733 }
1734 }
1735
dsa_user_setup_ft_block(struct dsa_switch * ds,int port,void * type_data)1736 static int dsa_user_setup_ft_block(struct dsa_switch *ds, int port,
1737 void *type_data)
1738 {
1739 struct net_device *conduit = dsa_port_to_conduit(dsa_to_port(ds, port));
1740
1741 if (!conduit->netdev_ops->ndo_setup_tc)
1742 return -EOPNOTSUPP;
1743
1744 return conduit->netdev_ops->ndo_setup_tc(conduit, TC_SETUP_FT, type_data);
1745 }
1746
dsa_user_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)1747 static int dsa_user_setup_tc(struct net_device *dev, enum tc_setup_type type,
1748 void *type_data)
1749 {
1750 struct dsa_port *dp = dsa_user_to_port(dev);
1751 struct dsa_switch *ds = dp->ds;
1752
1753 switch (type) {
1754 case TC_SETUP_BLOCK:
1755 return dsa_user_setup_tc_block(dev, type_data);
1756 case TC_SETUP_FT:
1757 return dsa_user_setup_ft_block(ds, dp->index, type_data);
1758 default:
1759 break;
1760 }
1761
1762 if (!ds->ops->port_setup_tc)
1763 return -EOPNOTSUPP;
1764
1765 return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1766 }
1767
dsa_user_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * nfc,u32 * rule_locs)1768 static int dsa_user_get_rxnfc(struct net_device *dev,
1769 struct ethtool_rxnfc *nfc, u32 *rule_locs)
1770 {
1771 struct dsa_port *dp = dsa_user_to_port(dev);
1772 struct dsa_switch *ds = dp->ds;
1773
1774 if (!ds->ops->get_rxnfc)
1775 return -EOPNOTSUPP;
1776
1777 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1778 }
1779
dsa_user_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * nfc)1780 static int dsa_user_set_rxnfc(struct net_device *dev,
1781 struct ethtool_rxnfc *nfc)
1782 {
1783 struct dsa_port *dp = dsa_user_to_port(dev);
1784 struct dsa_switch *ds = dp->ds;
1785
1786 if (!ds->ops->set_rxnfc)
1787 return -EOPNOTSUPP;
1788
1789 return ds->ops->set_rxnfc(ds, dp->index, nfc);
1790 }
1791
dsa_user_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * ts)1792 static int dsa_user_get_ts_info(struct net_device *dev,
1793 struct kernel_ethtool_ts_info *ts)
1794 {
1795 struct dsa_user_priv *p = netdev_priv(dev);
1796 struct dsa_switch *ds = p->dp->ds;
1797
1798 if (!ds->ops->get_ts_info)
1799 return -EOPNOTSUPP;
1800
1801 return ds->ops->get_ts_info(ds, p->dp->index, ts);
1802 }
1803
dsa_user_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)1804 static int dsa_user_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1805 u16 vid)
1806 {
1807 struct dsa_port *dp = dsa_user_to_port(dev);
1808 struct switchdev_obj_port_vlan vlan = {
1809 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1810 .vid = vid,
1811 /* This API only allows programming tagged, non-PVID VIDs */
1812 .flags = 0,
1813 };
1814 struct netlink_ext_ack extack = {0};
1815 struct dsa_switch *ds = dp->ds;
1816 struct netdev_hw_addr *ha;
1817 struct dsa_vlan *v;
1818 int ret;
1819
1820 /* User port... */
1821 ret = dsa_port_vlan_add(dp, &vlan, &extack);
1822 if (ret) {
1823 if (extack._msg)
1824 netdev_err(dev, "%s\n", extack._msg);
1825 return ret;
1826 }
1827
1828 /* And CPU port... */
1829 ret = dsa_port_host_vlan_add(dp, &vlan, &extack);
1830 if (ret) {
1831 if (extack._msg)
1832 netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
1833 extack._msg);
1834 return ret;
1835 }
1836
1837 if (!dsa_switch_supports_uc_filtering(ds) &&
1838 !dsa_switch_supports_mc_filtering(ds))
1839 return 0;
1840
1841 v = kzalloc(sizeof(*v), GFP_KERNEL);
1842 if (!v) {
1843 ret = -ENOMEM;
1844 goto rollback;
1845 }
1846
1847 netif_addr_lock_bh(dev);
1848
1849 v->vid = vid;
1850 list_add_tail(&v->list, &dp->user_vlans);
1851
1852 if (dsa_switch_supports_mc_filtering(ds)) {
1853 netdev_for_each_synced_mc_addr(ha, dev) {
1854 dsa_user_schedule_standalone_work(dev, DSA_MC_ADD,
1855 ha->addr, vid);
1856 }
1857 }
1858
1859 if (dsa_switch_supports_uc_filtering(ds)) {
1860 netdev_for_each_synced_uc_addr(ha, dev) {
1861 dsa_user_schedule_standalone_work(dev, DSA_UC_ADD,
1862 ha->addr, vid);
1863 }
1864 }
1865
1866 netif_addr_unlock_bh(dev);
1867
1868 dsa_flush_workqueue();
1869
1870 return 0;
1871
1872 rollback:
1873 dsa_port_host_vlan_del(dp, &vlan);
1874 dsa_port_vlan_del(dp, &vlan);
1875
1876 return ret;
1877 }
1878
dsa_user_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)1879 static int dsa_user_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1880 u16 vid)
1881 {
1882 struct dsa_port *dp = dsa_user_to_port(dev);
1883 struct switchdev_obj_port_vlan vlan = {
1884 .vid = vid,
1885 /* This API only allows programming tagged, non-PVID VIDs */
1886 .flags = 0,
1887 };
1888 struct dsa_switch *ds = dp->ds;
1889 struct netdev_hw_addr *ha;
1890 struct dsa_vlan *v;
1891 int err;
1892
1893 err = dsa_port_vlan_del(dp, &vlan);
1894 if (err)
1895 return err;
1896
1897 err = dsa_port_host_vlan_del(dp, &vlan);
1898 if (err)
1899 return err;
1900
1901 if (!dsa_switch_supports_uc_filtering(ds) &&
1902 !dsa_switch_supports_mc_filtering(ds))
1903 return 0;
1904
1905 netif_addr_lock_bh(dev);
1906
1907 v = dsa_vlan_find(&dp->user_vlans, &vlan);
1908 if (!v) {
1909 netif_addr_unlock_bh(dev);
1910 return -ENOENT;
1911 }
1912
1913 list_del(&v->list);
1914 kfree(v);
1915
1916 if (dsa_switch_supports_mc_filtering(ds)) {
1917 netdev_for_each_synced_mc_addr(ha, dev) {
1918 dsa_user_schedule_standalone_work(dev, DSA_MC_DEL,
1919 ha->addr, vid);
1920 }
1921 }
1922
1923 if (dsa_switch_supports_uc_filtering(ds)) {
1924 netdev_for_each_synced_uc_addr(ha, dev) {
1925 dsa_user_schedule_standalone_work(dev, DSA_UC_DEL,
1926 ha->addr, vid);
1927 }
1928 }
1929
1930 netif_addr_unlock_bh(dev);
1931
1932 dsa_flush_workqueue();
1933
1934 return 0;
1935 }
1936
dsa_user_restore_vlan(struct net_device * vdev,int vid,void * arg)1937 static int dsa_user_restore_vlan(struct net_device *vdev, int vid, void *arg)
1938 {
1939 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1940
1941 return dsa_user_vlan_rx_add_vid(arg, proto, vid);
1942 }
1943
dsa_user_clear_vlan(struct net_device * vdev,int vid,void * arg)1944 static int dsa_user_clear_vlan(struct net_device *vdev, int vid, void *arg)
1945 {
1946 __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1947
1948 return dsa_user_vlan_rx_kill_vid(arg, proto, vid);
1949 }
1950
1951 /* Keep the VLAN RX filtering list in sync with the hardware only if VLAN
1952 * filtering is enabled. The baseline is that only ports that offload a
1953 * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware,
1954 * but there are exceptions for quirky hardware.
1955 *
1956 * If ds->vlan_filtering_is_global = true, then standalone ports which share
1957 * the same switch with other ports that offload a VLAN-aware bridge are also
1958 * inevitably VLAN-aware.
1959 *
1960 * To summarize, a DSA switch port offloads:
1961 *
1962 * - If standalone (this includes software bridge, software LAG):
1963 * - if ds->needs_standalone_vlan_filtering = true, OR if
1964 * (ds->vlan_filtering_is_global = true AND there are bridges spanning
1965 * this switch chip which have vlan_filtering=1)
1966 * - the 8021q upper VLANs
1967 * - else (standalone VLAN filtering is not needed, VLAN filtering is not
1968 * global, or it is, but no port is under a VLAN-aware bridge):
1969 * - no VLAN (any 8021q upper is a software VLAN)
1970 *
1971 * - If under a vlan_filtering=0 bridge which it offload:
1972 * - if ds->configure_vlan_while_not_filtering = true (default):
1973 * - the bridge VLANs. These VLANs are committed to hardware but inactive.
1974 * - else (deprecated):
1975 * - no VLAN. The bridge VLANs are not restored when VLAN awareness is
1976 * enabled, so this behavior is broken and discouraged.
1977 *
1978 * - If under a vlan_filtering=1 bridge which it offload:
1979 * - the bridge VLANs
1980 * - the 8021q upper VLANs
1981 */
dsa_user_manage_vlan_filtering(struct net_device * user,bool vlan_filtering)1982 int dsa_user_manage_vlan_filtering(struct net_device *user,
1983 bool vlan_filtering)
1984 {
1985 int err;
1986
1987 if (vlan_filtering) {
1988 user->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1989
1990 err = vlan_for_each(user, dsa_user_restore_vlan, user);
1991 if (err) {
1992 vlan_for_each(user, dsa_user_clear_vlan, user);
1993 user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1994 return err;
1995 }
1996 } else {
1997 err = vlan_for_each(user, dsa_user_clear_vlan, user);
1998 if (err)
1999 return err;
2000
2001 user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2002 }
2003
2004 return 0;
2005 }
2006
2007 struct dsa_hw_port {
2008 struct list_head list;
2009 struct net_device *dev;
2010 int old_mtu;
2011 };
2012
dsa_hw_port_list_set_mtu(struct list_head * hw_port_list,int mtu)2013 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
2014 {
2015 const struct dsa_hw_port *p;
2016 int err;
2017
2018 list_for_each_entry(p, hw_port_list, list) {
2019 if (p->dev->mtu == mtu)
2020 continue;
2021
2022 err = dev_set_mtu(p->dev, mtu);
2023 if (err)
2024 goto rollback;
2025 }
2026
2027 return 0;
2028
2029 rollback:
2030 list_for_each_entry_continue_reverse(p, hw_port_list, list) {
2031 if (p->dev->mtu == p->old_mtu)
2032 continue;
2033
2034 if (dev_set_mtu(p->dev, p->old_mtu))
2035 netdev_err(p->dev, "Failed to restore MTU\n");
2036 }
2037
2038 return err;
2039 }
2040
dsa_hw_port_list_free(struct list_head * hw_port_list)2041 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
2042 {
2043 struct dsa_hw_port *p, *n;
2044
2045 list_for_each_entry_safe(p, n, hw_port_list, list)
2046 kfree(p);
2047 }
2048
2049 /* Make the hardware datapath to/from @dev limited to a common MTU */
dsa_bridge_mtu_normalization(struct dsa_port * dp)2050 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
2051 {
2052 struct list_head hw_port_list;
2053 struct dsa_switch_tree *dst;
2054 int min_mtu = ETH_MAX_MTU;
2055 struct dsa_port *other_dp;
2056 int err;
2057
2058 if (!dp->ds->mtu_enforcement_ingress)
2059 return;
2060
2061 if (!dp->bridge)
2062 return;
2063
2064 INIT_LIST_HEAD(&hw_port_list);
2065
2066 /* Populate the list of ports that are part of the same bridge
2067 * as the newly added/modified port
2068 */
2069 list_for_each_entry(dst, &dsa_tree_list, list) {
2070 list_for_each_entry(other_dp, &dst->ports, list) {
2071 struct dsa_hw_port *hw_port;
2072 struct net_device *user;
2073
2074 if (other_dp->type != DSA_PORT_TYPE_USER)
2075 continue;
2076
2077 if (!dsa_port_bridge_same(dp, other_dp))
2078 continue;
2079
2080 if (!other_dp->ds->mtu_enforcement_ingress)
2081 continue;
2082
2083 user = other_dp->user;
2084
2085 if (min_mtu > user->mtu)
2086 min_mtu = user->mtu;
2087
2088 hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
2089 if (!hw_port)
2090 goto out;
2091
2092 hw_port->dev = user;
2093 hw_port->old_mtu = user->mtu;
2094
2095 list_add(&hw_port->list, &hw_port_list);
2096 }
2097 }
2098
2099 /* Attempt to configure the entire hardware bridge to the newly added
2100 * interface's MTU first, regardless of whether the intention of the
2101 * user was to raise or lower it.
2102 */
2103 err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->user->mtu);
2104 if (!err)
2105 goto out;
2106
2107 /* Clearly that didn't work out so well, so just set the minimum MTU on
2108 * all hardware bridge ports now. If this fails too, then all ports will
2109 * still have their old MTU rolled back anyway.
2110 */
2111 dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
2112
2113 out:
2114 dsa_hw_port_list_free(&hw_port_list);
2115 }
2116
dsa_user_change_mtu(struct net_device * dev,int new_mtu)2117 int dsa_user_change_mtu(struct net_device *dev, int new_mtu)
2118 {
2119 struct net_device *conduit = dsa_user_to_conduit(dev);
2120 struct dsa_port *dp = dsa_user_to_port(dev);
2121 struct dsa_port *cpu_dp = dp->cpu_dp;
2122 struct dsa_switch *ds = dp->ds;
2123 struct dsa_port *other_dp;
2124 int largest_mtu = 0;
2125 int new_conduit_mtu;
2126 int old_conduit_mtu;
2127 int mtu_limit;
2128 int overhead;
2129 int cpu_mtu;
2130 int err;
2131
2132 if (!ds->ops->port_change_mtu)
2133 return -EOPNOTSUPP;
2134
2135 dsa_tree_for_each_user_port(other_dp, ds->dst) {
2136 int user_mtu;
2137
2138 /* During probe, this function will be called for each user
2139 * device, while not all of them have been allocated. That's
2140 * ok, it doesn't change what the maximum is, so ignore it.
2141 */
2142 if (!other_dp->user)
2143 continue;
2144
2145 /* Pretend that we already applied the setting, which we
2146 * actually haven't (still haven't done all integrity checks)
2147 */
2148 if (dp == other_dp)
2149 user_mtu = new_mtu;
2150 else
2151 user_mtu = other_dp->user->mtu;
2152
2153 if (largest_mtu < user_mtu)
2154 largest_mtu = user_mtu;
2155 }
2156
2157 overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops);
2158 mtu_limit = min_t(int, conduit->max_mtu, dev->max_mtu + overhead);
2159 old_conduit_mtu = conduit->mtu;
2160 new_conduit_mtu = largest_mtu + overhead;
2161 if (new_conduit_mtu > mtu_limit)
2162 return -ERANGE;
2163
2164 /* If the conduit MTU isn't over limit, there's no need to check the CPU
2165 * MTU, since that surely isn't either.
2166 */
2167 cpu_mtu = largest_mtu;
2168
2169 /* Start applying stuff */
2170 if (new_conduit_mtu != old_conduit_mtu) {
2171 err = dev_set_mtu(conduit, new_conduit_mtu);
2172 if (err < 0)
2173 goto out_conduit_failed;
2174
2175 /* We only need to propagate the MTU of the CPU port to
2176 * upstream switches, so emit a notifier which updates them.
2177 */
2178 err = dsa_port_mtu_change(cpu_dp, cpu_mtu);
2179 if (err)
2180 goto out_cpu_failed;
2181 }
2182
2183 err = ds->ops->port_change_mtu(ds, dp->index, new_mtu);
2184 if (err)
2185 goto out_port_failed;
2186
2187 WRITE_ONCE(dev->mtu, new_mtu);
2188
2189 dsa_bridge_mtu_normalization(dp);
2190
2191 return 0;
2192
2193 out_port_failed:
2194 if (new_conduit_mtu != old_conduit_mtu)
2195 dsa_port_mtu_change(cpu_dp, old_conduit_mtu - overhead);
2196 out_cpu_failed:
2197 if (new_conduit_mtu != old_conduit_mtu)
2198 dev_set_mtu(conduit, old_conduit_mtu);
2199 out_conduit_failed:
2200 return err;
2201 }
2202
2203 static int __maybe_unused
dsa_user_dcbnl_set_apptrust(struct net_device * dev,u8 * sel,int nsel)2204 dsa_user_dcbnl_set_apptrust(struct net_device *dev, u8 *sel, int nsel)
2205 {
2206 struct dsa_port *dp = dsa_user_to_port(dev);
2207 struct dsa_switch *ds = dp->ds;
2208 int port = dp->index;
2209
2210 if (!ds->ops->port_set_apptrust)
2211 return -EOPNOTSUPP;
2212
2213 return ds->ops->port_set_apptrust(ds, port, sel, nsel);
2214 }
2215
2216 static int __maybe_unused
dsa_user_dcbnl_get_apptrust(struct net_device * dev,u8 * sel,int * nsel)2217 dsa_user_dcbnl_get_apptrust(struct net_device *dev, u8 *sel, int *nsel)
2218 {
2219 struct dsa_port *dp = dsa_user_to_port(dev);
2220 struct dsa_switch *ds = dp->ds;
2221 int port = dp->index;
2222
2223 if (!ds->ops->port_get_apptrust)
2224 return -EOPNOTSUPP;
2225
2226 return ds->ops->port_get_apptrust(ds, port, sel, nsel);
2227 }
2228
2229 static int __maybe_unused
dsa_user_dcbnl_set_default_prio(struct net_device * dev,struct dcb_app * app)2230 dsa_user_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
2231 {
2232 struct dsa_port *dp = dsa_user_to_port(dev);
2233 struct dsa_switch *ds = dp->ds;
2234 unsigned long mask, new_prio;
2235 int err, port = dp->index;
2236
2237 if (!ds->ops->port_set_default_prio)
2238 return -EOPNOTSUPP;
2239
2240 err = dcb_ieee_setapp(dev, app);
2241 if (err)
2242 return err;
2243
2244 mask = dcb_ieee_getapp_mask(dev, app);
2245 new_prio = __fls(mask);
2246
2247 err = ds->ops->port_set_default_prio(ds, port, new_prio);
2248 if (err) {
2249 dcb_ieee_delapp(dev, app);
2250 return err;
2251 }
2252
2253 return 0;
2254 }
2255
2256 /* Update the DSCP prio entries on all user ports of the switch in case
2257 * the switch supports global DSCP prio instead of per port DSCP prios.
2258 */
dsa_user_dcbnl_ieee_global_dscp_setdel(struct net_device * dev,struct dcb_app * app,bool del)2259 static int dsa_user_dcbnl_ieee_global_dscp_setdel(struct net_device *dev,
2260 struct dcb_app *app, bool del)
2261 {
2262 int (*setdel)(struct net_device *dev, struct dcb_app *app);
2263 struct dsa_port *dp = dsa_user_to_port(dev);
2264 struct dsa_switch *ds = dp->ds;
2265 struct dsa_port *other_dp;
2266 int err, restore_err;
2267
2268 if (del)
2269 setdel = dcb_ieee_delapp;
2270 else
2271 setdel = dcb_ieee_setapp;
2272
2273 dsa_switch_for_each_user_port(other_dp, ds) {
2274 struct net_device *user = other_dp->user;
2275
2276 if (!user || user == dev)
2277 continue;
2278
2279 err = setdel(user, app);
2280 if (err)
2281 goto err_try_to_restore;
2282 }
2283
2284 return 0;
2285
2286 err_try_to_restore:
2287
2288 /* Revert logic to restore previous state of app entries */
2289 if (!del)
2290 setdel = dcb_ieee_delapp;
2291 else
2292 setdel = dcb_ieee_setapp;
2293
2294 dsa_switch_for_each_user_port_continue_reverse(other_dp, ds) {
2295 struct net_device *user = other_dp->user;
2296
2297 if (!user || user == dev)
2298 continue;
2299
2300 restore_err = setdel(user, app);
2301 if (restore_err)
2302 netdev_err(user, "Failed to restore DSCP prio entry configuration\n");
2303 }
2304
2305 return err;
2306 }
2307
2308 static int __maybe_unused
dsa_user_dcbnl_add_dscp_prio(struct net_device * dev,struct dcb_app * app)2309 dsa_user_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
2310 {
2311 struct dsa_port *dp = dsa_user_to_port(dev);
2312 struct dsa_switch *ds = dp->ds;
2313 unsigned long mask, new_prio;
2314 int err, port = dp->index;
2315 u8 dscp = app->protocol;
2316
2317 if (!ds->ops->port_add_dscp_prio)
2318 return -EOPNOTSUPP;
2319
2320 if (dscp >= 64) {
2321 netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
2322 dscp);
2323 return -EINVAL;
2324 }
2325
2326 err = dcb_ieee_setapp(dev, app);
2327 if (err)
2328 return err;
2329
2330 mask = dcb_ieee_getapp_mask(dev, app);
2331 new_prio = __fls(mask);
2332
2333 err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio);
2334 if (err) {
2335 dcb_ieee_delapp(dev, app);
2336 return err;
2337 }
2338
2339 if (!ds->dscp_prio_mapping_is_global)
2340 return 0;
2341
2342 err = dsa_user_dcbnl_ieee_global_dscp_setdel(dev, app, false);
2343 if (err) {
2344 if (ds->ops->port_del_dscp_prio)
2345 ds->ops->port_del_dscp_prio(ds, port, dscp, new_prio);
2346 dcb_ieee_delapp(dev, app);
2347 return err;
2348 }
2349
2350 return 0;
2351 }
2352
dsa_user_dcbnl_ieee_setapp(struct net_device * dev,struct dcb_app * app)2353 static int __maybe_unused dsa_user_dcbnl_ieee_setapp(struct net_device *dev,
2354 struct dcb_app *app)
2355 {
2356 switch (app->selector) {
2357 case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2358 switch (app->protocol) {
2359 case 0:
2360 return dsa_user_dcbnl_set_default_prio(dev, app);
2361 default:
2362 return -EOPNOTSUPP;
2363 }
2364 break;
2365 case IEEE_8021QAZ_APP_SEL_DSCP:
2366 return dsa_user_dcbnl_add_dscp_prio(dev, app);
2367 default:
2368 return -EOPNOTSUPP;
2369 }
2370 }
2371
2372 static int __maybe_unused
dsa_user_dcbnl_del_default_prio(struct net_device * dev,struct dcb_app * app)2373 dsa_user_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
2374 {
2375 struct dsa_port *dp = dsa_user_to_port(dev);
2376 struct dsa_switch *ds = dp->ds;
2377 unsigned long mask, new_prio;
2378 int err, port = dp->index;
2379
2380 if (!ds->ops->port_set_default_prio)
2381 return -EOPNOTSUPP;
2382
2383 err = dcb_ieee_delapp(dev, app);
2384 if (err)
2385 return err;
2386
2387 mask = dcb_ieee_getapp_mask(dev, app);
2388 new_prio = mask ? __fls(mask) : 0;
2389
2390 err = ds->ops->port_set_default_prio(ds, port, new_prio);
2391 if (err) {
2392 dcb_ieee_setapp(dev, app);
2393 return err;
2394 }
2395
2396 return 0;
2397 }
2398
2399 static int __maybe_unused
dsa_user_dcbnl_del_dscp_prio(struct net_device * dev,struct dcb_app * app)2400 dsa_user_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
2401 {
2402 struct dsa_port *dp = dsa_user_to_port(dev);
2403 struct dsa_switch *ds = dp->ds;
2404 int err, port = dp->index;
2405 u8 dscp = app->protocol;
2406
2407 if (!ds->ops->port_del_dscp_prio)
2408 return -EOPNOTSUPP;
2409
2410 err = dcb_ieee_delapp(dev, app);
2411 if (err)
2412 return err;
2413
2414 err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority);
2415 if (err) {
2416 dcb_ieee_setapp(dev, app);
2417 return err;
2418 }
2419
2420 if (!ds->dscp_prio_mapping_is_global)
2421 return 0;
2422
2423 err = dsa_user_dcbnl_ieee_global_dscp_setdel(dev, app, true);
2424 if (err) {
2425 if (ds->ops->port_add_dscp_prio)
2426 ds->ops->port_add_dscp_prio(ds, port, dscp,
2427 app->priority);
2428 dcb_ieee_setapp(dev, app);
2429 return err;
2430 }
2431
2432 return 0;
2433 }
2434
dsa_user_dcbnl_ieee_delapp(struct net_device * dev,struct dcb_app * app)2435 static int __maybe_unused dsa_user_dcbnl_ieee_delapp(struct net_device *dev,
2436 struct dcb_app *app)
2437 {
2438 switch (app->selector) {
2439 case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2440 switch (app->protocol) {
2441 case 0:
2442 return dsa_user_dcbnl_del_default_prio(dev, app);
2443 default:
2444 return -EOPNOTSUPP;
2445 }
2446 break;
2447 case IEEE_8021QAZ_APP_SEL_DSCP:
2448 return dsa_user_dcbnl_del_dscp_prio(dev, app);
2449 default:
2450 return -EOPNOTSUPP;
2451 }
2452 }
2453
2454 /* Pre-populate the DCB application priority table with the priorities
2455 * configured during switch setup, which we read from hardware here.
2456 */
dsa_user_dcbnl_init(struct net_device * dev)2457 static int dsa_user_dcbnl_init(struct net_device *dev)
2458 {
2459 struct dsa_port *dp = dsa_user_to_port(dev);
2460 struct dsa_switch *ds = dp->ds;
2461 int port = dp->index;
2462 int err;
2463
2464 if (ds->ops->port_get_default_prio) {
2465 int prio = ds->ops->port_get_default_prio(ds, port);
2466 struct dcb_app app = {
2467 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
2468 .protocol = 0,
2469 .priority = prio,
2470 };
2471
2472 if (prio < 0)
2473 return prio;
2474
2475 err = dcb_ieee_setapp(dev, &app);
2476 if (err)
2477 return err;
2478 }
2479
2480 if (ds->ops->port_get_dscp_prio) {
2481 int protocol;
2482
2483 for (protocol = 0; protocol < 64; protocol++) {
2484 struct dcb_app app = {
2485 .selector = IEEE_8021QAZ_APP_SEL_DSCP,
2486 .protocol = protocol,
2487 };
2488 int prio;
2489
2490 prio = ds->ops->port_get_dscp_prio(ds, port, protocol);
2491 if (prio == -EOPNOTSUPP)
2492 continue;
2493 if (prio < 0)
2494 return prio;
2495
2496 app.priority = prio;
2497
2498 err = dcb_ieee_setapp(dev, &app);
2499 if (err)
2500 return err;
2501 }
2502 }
2503
2504 return 0;
2505 }
2506
2507 static const struct ethtool_ops dsa_user_ethtool_ops = {
2508 .get_drvinfo = dsa_user_get_drvinfo,
2509 .get_regs_len = dsa_user_get_regs_len,
2510 .get_regs = dsa_user_get_regs,
2511 .nway_reset = dsa_user_nway_reset,
2512 .get_link = ethtool_op_get_link,
2513 .get_eeprom_len = dsa_user_get_eeprom_len,
2514 .get_eeprom = dsa_user_get_eeprom,
2515 .set_eeprom = dsa_user_set_eeprom,
2516 .get_strings = dsa_user_get_strings,
2517 .get_ethtool_stats = dsa_user_get_ethtool_stats,
2518 .get_sset_count = dsa_user_get_sset_count,
2519 .get_eth_phy_stats = dsa_user_get_eth_phy_stats,
2520 .get_eth_mac_stats = dsa_user_get_eth_mac_stats,
2521 .get_eth_ctrl_stats = dsa_user_get_eth_ctrl_stats,
2522 .get_rmon_stats = dsa_user_get_rmon_stats,
2523 .get_ts_stats = dsa_user_get_ts_stats,
2524 .set_wol = dsa_user_set_wol,
2525 .get_wol = dsa_user_get_wol,
2526 .set_eee = dsa_user_set_eee,
2527 .get_eee = dsa_user_get_eee,
2528 .get_link_ksettings = dsa_user_get_link_ksettings,
2529 .set_link_ksettings = dsa_user_set_link_ksettings,
2530 .get_pause_stats = dsa_user_get_pause_stats,
2531 .get_pauseparam = dsa_user_get_pauseparam,
2532 .set_pauseparam = dsa_user_set_pauseparam,
2533 .get_rxnfc = dsa_user_get_rxnfc,
2534 .set_rxnfc = dsa_user_set_rxnfc,
2535 .get_ts_info = dsa_user_get_ts_info,
2536 .self_test = dsa_user_net_selftest,
2537 .get_mm = dsa_user_get_mm,
2538 .set_mm = dsa_user_set_mm,
2539 .get_mm_stats = dsa_user_get_mm_stats,
2540 };
2541
2542 static const struct dcbnl_rtnl_ops __maybe_unused dsa_user_dcbnl_ops = {
2543 .ieee_setapp = dsa_user_dcbnl_ieee_setapp,
2544 .ieee_delapp = dsa_user_dcbnl_ieee_delapp,
2545 .dcbnl_setapptrust = dsa_user_dcbnl_set_apptrust,
2546 .dcbnl_getapptrust = dsa_user_dcbnl_get_apptrust,
2547 };
2548
dsa_user_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * s)2549 static void dsa_user_get_stats64(struct net_device *dev,
2550 struct rtnl_link_stats64 *s)
2551 {
2552 struct dsa_port *dp = dsa_user_to_port(dev);
2553 struct dsa_switch *ds = dp->ds;
2554
2555 if (ds->ops->get_stats64)
2556 ds->ops->get_stats64(ds, dp->index, s);
2557 else
2558 dev_get_tstats64(dev, s);
2559 }
2560
dsa_user_fill_forward_path(struct net_device_path_ctx * ctx,struct net_device_path * path)2561 static int dsa_user_fill_forward_path(struct net_device_path_ctx *ctx,
2562 struct net_device_path *path)
2563 {
2564 struct dsa_port *dp = dsa_user_to_port(ctx->dev);
2565 struct net_device *conduit = dsa_port_to_conduit(dp);
2566 struct dsa_port *cpu_dp = dp->cpu_dp;
2567
2568 path->dev = ctx->dev;
2569 path->type = DEV_PATH_DSA;
2570 path->dsa.proto = cpu_dp->tag_ops->proto;
2571 path->dsa.port = dp->index;
2572 ctx->dev = conduit;
2573
2574 return 0;
2575 }
2576
2577 static const struct net_device_ops dsa_user_netdev_ops = {
2578 .ndo_open = dsa_user_open,
2579 .ndo_stop = dsa_user_close,
2580 .ndo_start_xmit = dsa_user_xmit,
2581 .ndo_change_rx_flags = dsa_user_change_rx_flags,
2582 .ndo_set_rx_mode = dsa_user_set_rx_mode,
2583 .ndo_set_mac_address = dsa_user_set_mac_address,
2584 .ndo_fdb_dump = dsa_user_fdb_dump,
2585 .ndo_eth_ioctl = dsa_user_ioctl,
2586 .ndo_get_iflink = dsa_user_get_iflink,
2587 #ifdef CONFIG_NET_POLL_CONTROLLER
2588 .ndo_netpoll_setup = dsa_user_netpoll_setup,
2589 .ndo_netpoll_cleanup = dsa_user_netpoll_cleanup,
2590 .ndo_poll_controller = dsa_user_poll_controller,
2591 #endif
2592 .ndo_setup_tc = dsa_user_setup_tc,
2593 .ndo_get_stats64 = dsa_user_get_stats64,
2594 .ndo_vlan_rx_add_vid = dsa_user_vlan_rx_add_vid,
2595 .ndo_vlan_rx_kill_vid = dsa_user_vlan_rx_kill_vid,
2596 .ndo_change_mtu = dsa_user_change_mtu,
2597 .ndo_fill_forward_path = dsa_user_fill_forward_path,
2598 };
2599
2600 static const struct device_type dsa_type = {
2601 .name = "dsa",
2602 };
2603
dsa_port_phylink_mac_change(struct dsa_switch * ds,int port,bool up)2604 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
2605 {
2606 const struct dsa_port *dp = dsa_to_port(ds, port);
2607
2608 if (dp->pl)
2609 phylink_mac_change(dp->pl, up);
2610 }
2611 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
2612
dsa_user_phylink_fixed_state(struct phylink_config * config,struct phylink_link_state * state)2613 static void dsa_user_phylink_fixed_state(struct phylink_config *config,
2614 struct phylink_link_state *state)
2615 {
2616 struct dsa_port *dp = dsa_phylink_to_port(config);
2617 struct dsa_switch *ds = dp->ds;
2618
2619 /* No need to check that this operation is valid, the callback would
2620 * not be called if it was not.
2621 */
2622 ds->ops->phylink_fixed_state(ds, dp->index, state);
2623 }
2624
2625 /* user device setup *******************************************************/
dsa_user_phy_connect(struct net_device * user_dev,int addr,u32 flags)2626 static int dsa_user_phy_connect(struct net_device *user_dev, int addr,
2627 u32 flags)
2628 {
2629 struct dsa_port *dp = dsa_user_to_port(user_dev);
2630 struct dsa_switch *ds = dp->ds;
2631
2632 user_dev->phydev = mdiobus_get_phy(ds->user_mii_bus, addr);
2633 if (!user_dev->phydev) {
2634 netdev_err(user_dev, "no phy at %d\n", addr);
2635 return -ENODEV;
2636 }
2637
2638 user_dev->phydev->dev_flags |= flags;
2639
2640 return phylink_connect_phy(dp->pl, user_dev->phydev);
2641 }
2642
dsa_user_phy_setup(struct net_device * user_dev)2643 static int dsa_user_phy_setup(struct net_device *user_dev)
2644 {
2645 struct dsa_port *dp = dsa_user_to_port(user_dev);
2646 struct device_node *port_dn = dp->dn;
2647 struct dsa_switch *ds = dp->ds;
2648 u32 phy_flags = 0;
2649 int ret;
2650
2651 dp->pl_config.dev = &user_dev->dev;
2652 dp->pl_config.type = PHYLINK_NETDEV;
2653
2654 /* The get_fixed_state callback takes precedence over polling the
2655 * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set
2656 * this if the switch provides such a callback.
2657 */
2658 if (ds->ops->phylink_fixed_state) {
2659 dp->pl_config.get_fixed_state = dsa_user_phylink_fixed_state;
2660 dp->pl_config.poll_fixed_state = true;
2661 }
2662
2663 ret = dsa_port_phylink_create(dp);
2664 if (ret)
2665 return ret;
2666
2667 if (ds->ops->get_phy_flags)
2668 phy_flags = ds->ops->get_phy_flags(ds, dp->index);
2669
2670 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
2671 if (ret == -ENODEV && ds->user_mii_bus) {
2672 /* We could not connect to a designated PHY or SFP, so try to
2673 * use the switch internal MDIO bus instead
2674 */
2675 ret = dsa_user_phy_connect(user_dev, dp->index, phy_flags);
2676 }
2677 if (ret) {
2678 netdev_err(user_dev, "failed to connect to PHY: %pe\n",
2679 ERR_PTR(ret));
2680 dsa_port_phylink_destroy(dp);
2681 }
2682
2683 return ret;
2684 }
2685
dsa_user_setup_tagger(struct net_device * user)2686 void dsa_user_setup_tagger(struct net_device *user)
2687 {
2688 struct dsa_port *dp = dsa_user_to_port(user);
2689 struct net_device *conduit = dsa_port_to_conduit(dp);
2690 struct dsa_user_priv *p = netdev_priv(user);
2691 const struct dsa_port *cpu_dp = dp->cpu_dp;
2692 const struct dsa_switch *ds = dp->ds;
2693
2694 user->needed_headroom = cpu_dp->tag_ops->needed_headroom;
2695 user->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
2696 /* Try to save one extra realloc later in the TX path (in the conduit)
2697 * by also inheriting the conduit's needed headroom and tailroom.
2698 * The 8021q driver also does this.
2699 */
2700 user->needed_headroom += conduit->needed_headroom;
2701 user->needed_tailroom += conduit->needed_tailroom;
2702
2703 p->xmit = cpu_dp->tag_ops->xmit;
2704
2705 user->features = conduit->vlan_features | NETIF_F_HW_TC;
2706 user->hw_features |= NETIF_F_HW_TC;
2707 if (user->needed_tailroom)
2708 user->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
2709 if (ds->needs_standalone_vlan_filtering)
2710 user->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2711
2712 user->lltx = true;
2713 }
2714
dsa_user_suspend(struct net_device * user_dev)2715 int dsa_user_suspend(struct net_device *user_dev)
2716 {
2717 struct dsa_port *dp = dsa_user_to_port(user_dev);
2718
2719 if (!netif_running(user_dev))
2720 return 0;
2721
2722 netif_device_detach(user_dev);
2723
2724 rtnl_lock();
2725 phylink_stop(dp->pl);
2726 rtnl_unlock();
2727
2728 return 0;
2729 }
2730
dsa_user_resume(struct net_device * user_dev)2731 int dsa_user_resume(struct net_device *user_dev)
2732 {
2733 struct dsa_port *dp = dsa_user_to_port(user_dev);
2734
2735 if (!netif_running(user_dev))
2736 return 0;
2737
2738 netif_device_attach(user_dev);
2739
2740 rtnl_lock();
2741 phylink_start(dp->pl);
2742 rtnl_unlock();
2743
2744 return 0;
2745 }
2746
dsa_user_create(struct dsa_port * port)2747 int dsa_user_create(struct dsa_port *port)
2748 {
2749 struct net_device *conduit = dsa_port_to_conduit(port);
2750 struct dsa_switch *ds = port->ds;
2751 struct net_device *user_dev;
2752 struct dsa_user_priv *p;
2753 const char *name;
2754 int assign_type;
2755 int ret;
2756
2757 if (!ds->num_tx_queues)
2758 ds->num_tx_queues = 1;
2759
2760 if (port->name) {
2761 name = port->name;
2762 assign_type = NET_NAME_PREDICTABLE;
2763 } else {
2764 name = "eth%d";
2765 assign_type = NET_NAME_ENUM;
2766 }
2767
2768 user_dev = alloc_netdev_mqs(sizeof(struct dsa_user_priv), name,
2769 assign_type, ether_setup,
2770 ds->num_tx_queues, 1);
2771 if (user_dev == NULL)
2772 return -ENOMEM;
2773
2774 user_dev->rtnl_link_ops = &dsa_link_ops;
2775 user_dev->ethtool_ops = &dsa_user_ethtool_ops;
2776 #if IS_ENABLED(CONFIG_DCB)
2777 user_dev->dcbnl_ops = &dsa_user_dcbnl_ops;
2778 #endif
2779 if (!is_zero_ether_addr(port->mac))
2780 eth_hw_addr_set(user_dev, port->mac);
2781 else
2782 eth_hw_addr_inherit(user_dev, conduit);
2783 user_dev->priv_flags |= IFF_NO_QUEUE;
2784 if (dsa_switch_supports_uc_filtering(ds))
2785 user_dev->priv_flags |= IFF_UNICAST_FLT;
2786 user_dev->netdev_ops = &dsa_user_netdev_ops;
2787 if (ds->ops->port_max_mtu)
2788 user_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
2789 SET_NETDEV_DEVTYPE(user_dev, &dsa_type);
2790
2791 SET_NETDEV_DEV(user_dev, port->ds->dev);
2792 SET_NETDEV_DEVLINK_PORT(user_dev, &port->devlink_port);
2793 user_dev->dev.of_node = port->dn;
2794 user_dev->vlan_features = conduit->vlan_features;
2795
2796 p = netdev_priv(user_dev);
2797 user_dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
2798
2799 ret = gro_cells_init(&p->gcells, user_dev);
2800 if (ret)
2801 goto out_free;
2802
2803 p->dp = port;
2804 INIT_LIST_HEAD(&p->mall_tc_list);
2805 port->user = user_dev;
2806 dsa_user_setup_tagger(user_dev);
2807
2808 netif_carrier_off(user_dev);
2809
2810 ret = dsa_user_phy_setup(user_dev);
2811 if (ret) {
2812 netdev_err(user_dev,
2813 "error %d setting up PHY for tree %d, switch %d, port %d\n",
2814 ret, ds->dst->index, ds->index, port->index);
2815 goto out_gcells;
2816 }
2817
2818 rtnl_lock();
2819
2820 ret = dsa_user_change_mtu(user_dev, ETH_DATA_LEN);
2821 if (ret && ret != -EOPNOTSUPP)
2822 dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
2823 ret, ETH_DATA_LEN, port->index);
2824
2825 ret = register_netdevice(user_dev);
2826 if (ret) {
2827 netdev_err(conduit, "error %d registering interface %s\n",
2828 ret, user_dev->name);
2829 rtnl_unlock();
2830 goto out_phy;
2831 }
2832
2833 if (IS_ENABLED(CONFIG_DCB)) {
2834 ret = dsa_user_dcbnl_init(user_dev);
2835 if (ret) {
2836 netdev_err(user_dev,
2837 "failed to initialize DCB: %pe\n",
2838 ERR_PTR(ret));
2839 rtnl_unlock();
2840 goto out_unregister;
2841 }
2842 }
2843
2844 ret = netdev_upper_dev_link(conduit, user_dev, NULL);
2845
2846 rtnl_unlock();
2847
2848 if (ret)
2849 goto out_unregister;
2850
2851 return 0;
2852
2853 out_unregister:
2854 unregister_netdev(user_dev);
2855 out_phy:
2856 rtnl_lock();
2857 phylink_disconnect_phy(p->dp->pl);
2858 rtnl_unlock();
2859 dsa_port_phylink_destroy(p->dp);
2860 out_gcells:
2861 gro_cells_destroy(&p->gcells);
2862 out_free:
2863 free_netdev(user_dev);
2864 port->user = NULL;
2865 return ret;
2866 }
2867
dsa_user_destroy(struct net_device * user_dev)2868 void dsa_user_destroy(struct net_device *user_dev)
2869 {
2870 struct net_device *conduit = dsa_user_to_conduit(user_dev);
2871 struct dsa_port *dp = dsa_user_to_port(user_dev);
2872 struct dsa_user_priv *p = netdev_priv(user_dev);
2873
2874 netif_carrier_off(user_dev);
2875 rtnl_lock();
2876 netdev_upper_dev_unlink(conduit, user_dev);
2877 unregister_netdevice(user_dev);
2878 phylink_disconnect_phy(dp->pl);
2879 rtnl_unlock();
2880
2881 dsa_port_phylink_destroy(dp);
2882 gro_cells_destroy(&p->gcells);
2883 free_netdev(user_dev);
2884 }
2885
dsa_user_change_conduit(struct net_device * dev,struct net_device * conduit,struct netlink_ext_ack * extack)2886 int dsa_user_change_conduit(struct net_device *dev, struct net_device *conduit,
2887 struct netlink_ext_ack *extack)
2888 {
2889 struct net_device *old_conduit = dsa_user_to_conduit(dev);
2890 struct dsa_port *dp = dsa_user_to_port(dev);
2891 struct dsa_switch *ds = dp->ds;
2892 struct net_device *upper;
2893 struct list_head *iter;
2894 int err;
2895
2896 if (conduit == old_conduit)
2897 return 0;
2898
2899 if (!ds->ops->port_change_conduit) {
2900 NL_SET_ERR_MSG_MOD(extack,
2901 "Driver does not support changing DSA conduit");
2902 return -EOPNOTSUPP;
2903 }
2904
2905 if (!netdev_uses_dsa(conduit)) {
2906 NL_SET_ERR_MSG_MOD(extack,
2907 "Interface not eligible as DSA conduit");
2908 return -EOPNOTSUPP;
2909 }
2910
2911 netdev_for_each_upper_dev_rcu(conduit, upper, iter) {
2912 if (dsa_user_dev_check(upper))
2913 continue;
2914 if (netif_is_bridge_master(upper))
2915 continue;
2916 NL_SET_ERR_MSG_MOD(extack, "Cannot join conduit with unknown uppers");
2917 return -EOPNOTSUPP;
2918 }
2919
2920 /* Since we allow live-changing the DSA conduit, plus we auto-open the
2921 * DSA conduit when the user port opens => we need to ensure that the
2922 * new DSA conduit is open too.
2923 */
2924 if (dev->flags & IFF_UP) {
2925 err = dev_open(conduit, extack);
2926 if (err)
2927 return err;
2928 }
2929
2930 netdev_upper_dev_unlink(old_conduit, dev);
2931
2932 err = netdev_upper_dev_link(conduit, dev, extack);
2933 if (err)
2934 goto out_revert_old_conduit_unlink;
2935
2936 err = dsa_port_change_conduit(dp, conduit, extack);
2937 if (err)
2938 goto out_revert_conduit_link;
2939
2940 /* Update the MTU of the new CPU port through cross-chip notifiers */
2941 err = dsa_user_change_mtu(dev, dev->mtu);
2942 if (err && err != -EOPNOTSUPP) {
2943 netdev_warn(dev,
2944 "nonfatal error updating MTU with new conduit: %pe\n",
2945 ERR_PTR(err));
2946 }
2947
2948 return 0;
2949
2950 out_revert_conduit_link:
2951 netdev_upper_dev_unlink(conduit, dev);
2952 out_revert_old_conduit_unlink:
2953 netdev_upper_dev_link(old_conduit, dev, NULL);
2954 return err;
2955 }
2956
dsa_user_dev_check(const struct net_device * dev)2957 bool dsa_user_dev_check(const struct net_device *dev)
2958 {
2959 return dev->netdev_ops == &dsa_user_netdev_ops;
2960 }
2961 EXPORT_SYMBOL_GPL(dsa_user_dev_check);
2962
dsa_user_changeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)2963 static int dsa_user_changeupper(struct net_device *dev,
2964 struct netdev_notifier_changeupper_info *info)
2965 {
2966 struct netlink_ext_ack *extack;
2967 int err = NOTIFY_DONE;
2968 struct dsa_port *dp;
2969
2970 if (!dsa_user_dev_check(dev))
2971 return err;
2972
2973 dp = dsa_user_to_port(dev);
2974 extack = netdev_notifier_info_to_extack(&info->info);
2975
2976 if (netif_is_bridge_master(info->upper_dev)) {
2977 if (info->linking) {
2978 err = dsa_port_bridge_join(dp, info->upper_dev, extack);
2979 if (!err)
2980 dsa_bridge_mtu_normalization(dp);
2981 if (err == -EOPNOTSUPP) {
2982 NL_SET_ERR_MSG_WEAK_MOD(extack,
2983 "Offloading not supported");
2984 err = 0;
2985 }
2986 err = notifier_from_errno(err);
2987 } else {
2988 dsa_port_bridge_leave(dp, info->upper_dev);
2989 err = NOTIFY_OK;
2990 }
2991 } else if (netif_is_lag_master(info->upper_dev)) {
2992 if (info->linking) {
2993 err = dsa_port_lag_join(dp, info->upper_dev,
2994 info->upper_info, extack);
2995 if (err == -EOPNOTSUPP) {
2996 NL_SET_ERR_MSG_WEAK_MOD(extack,
2997 "Offloading not supported");
2998 err = 0;
2999 }
3000 err = notifier_from_errno(err);
3001 } else {
3002 dsa_port_lag_leave(dp, info->upper_dev);
3003 err = NOTIFY_OK;
3004 }
3005 } else if (is_hsr_master(info->upper_dev)) {
3006 if (info->linking) {
3007 err = dsa_port_hsr_join(dp, info->upper_dev, extack);
3008 if (err == -EOPNOTSUPP) {
3009 NL_SET_ERR_MSG_WEAK_MOD(extack,
3010 "Offloading not supported");
3011 err = 0;
3012 }
3013 err = notifier_from_errno(err);
3014 } else {
3015 dsa_port_hsr_leave(dp, info->upper_dev);
3016 err = NOTIFY_OK;
3017 }
3018 }
3019
3020 return err;
3021 }
3022
dsa_user_prechangeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3023 static int dsa_user_prechangeupper(struct net_device *dev,
3024 struct netdev_notifier_changeupper_info *info)
3025 {
3026 struct dsa_port *dp;
3027
3028 if (!dsa_user_dev_check(dev))
3029 return NOTIFY_DONE;
3030
3031 dp = dsa_user_to_port(dev);
3032
3033 if (netif_is_bridge_master(info->upper_dev) && !info->linking)
3034 dsa_port_pre_bridge_leave(dp, info->upper_dev);
3035 else if (netif_is_lag_master(info->upper_dev) && !info->linking)
3036 dsa_port_pre_lag_leave(dp, info->upper_dev);
3037 /* dsa_port_pre_hsr_leave is not yet necessary since hsr devices cannot
3038 * meaningfully placed under a bridge yet
3039 */
3040
3041 return NOTIFY_DONE;
3042 }
3043
3044 static int
dsa_user_lag_changeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3045 dsa_user_lag_changeupper(struct net_device *dev,
3046 struct netdev_notifier_changeupper_info *info)
3047 {
3048 struct net_device *lower;
3049 struct list_head *iter;
3050 int err = NOTIFY_DONE;
3051 struct dsa_port *dp;
3052
3053 if (!netif_is_lag_master(dev))
3054 return err;
3055
3056 netdev_for_each_lower_dev(dev, lower, iter) {
3057 if (!dsa_user_dev_check(lower))
3058 continue;
3059
3060 dp = dsa_user_to_port(lower);
3061 if (!dp->lag)
3062 /* Software LAG */
3063 continue;
3064
3065 err = dsa_user_changeupper(lower, info);
3066 if (notifier_to_errno(err))
3067 break;
3068 }
3069
3070 return err;
3071 }
3072
3073 /* Same as dsa_user_lag_changeupper() except that it calls
3074 * dsa_user_prechangeupper()
3075 */
3076 static int
dsa_user_lag_prechangeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3077 dsa_user_lag_prechangeupper(struct net_device *dev,
3078 struct netdev_notifier_changeupper_info *info)
3079 {
3080 struct net_device *lower;
3081 struct list_head *iter;
3082 int err = NOTIFY_DONE;
3083 struct dsa_port *dp;
3084
3085 if (!netif_is_lag_master(dev))
3086 return err;
3087
3088 netdev_for_each_lower_dev(dev, lower, iter) {
3089 if (!dsa_user_dev_check(lower))
3090 continue;
3091
3092 dp = dsa_user_to_port(lower);
3093 if (!dp->lag)
3094 /* Software LAG */
3095 continue;
3096
3097 err = dsa_user_prechangeupper(lower, info);
3098 if (notifier_to_errno(err))
3099 break;
3100 }
3101
3102 return err;
3103 }
3104
3105 static int
dsa_prevent_bridging_8021q_upper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3106 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
3107 struct netdev_notifier_changeupper_info *info)
3108 {
3109 struct netlink_ext_ack *ext_ack;
3110 struct net_device *user, *br;
3111 struct dsa_port *dp;
3112
3113 ext_ack = netdev_notifier_info_to_extack(&info->info);
3114
3115 if (!is_vlan_dev(dev))
3116 return NOTIFY_DONE;
3117
3118 user = vlan_dev_real_dev(dev);
3119 if (!dsa_user_dev_check(user))
3120 return NOTIFY_DONE;
3121
3122 dp = dsa_user_to_port(user);
3123 br = dsa_port_bridge_dev_get(dp);
3124 if (!br)
3125 return NOTIFY_DONE;
3126
3127 /* Deny enslaving a VLAN device into a VLAN-aware bridge */
3128 if (br_vlan_enabled(br) &&
3129 netif_is_bridge_master(info->upper_dev) && info->linking) {
3130 NL_SET_ERR_MSG_MOD(ext_ack,
3131 "Cannot make VLAN device join VLAN-aware bridge");
3132 return notifier_from_errno(-EINVAL);
3133 }
3134
3135 return NOTIFY_DONE;
3136 }
3137
3138 static int
dsa_user_check_8021q_upper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3139 dsa_user_check_8021q_upper(struct net_device *dev,
3140 struct netdev_notifier_changeupper_info *info)
3141 {
3142 struct dsa_port *dp = dsa_user_to_port(dev);
3143 struct net_device *br = dsa_port_bridge_dev_get(dp);
3144 struct bridge_vlan_info br_info;
3145 struct netlink_ext_ack *extack;
3146 int err = NOTIFY_DONE;
3147 u16 vid;
3148
3149 if (!br || !br_vlan_enabled(br))
3150 return NOTIFY_DONE;
3151
3152 extack = netdev_notifier_info_to_extack(&info->info);
3153 vid = vlan_dev_vlan_id(info->upper_dev);
3154
3155 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
3156 * device, respectively the VID is not found, returning
3157 * 0 means success, which is a failure for us here.
3158 */
3159 err = br_vlan_get_info(br, vid, &br_info);
3160 if (err == 0) {
3161 NL_SET_ERR_MSG_MOD(extack,
3162 "This VLAN is already configured by the bridge");
3163 return notifier_from_errno(-EBUSY);
3164 }
3165
3166 return NOTIFY_DONE;
3167 }
3168
3169 static int
dsa_user_prechangeupper_sanity_check(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3170 dsa_user_prechangeupper_sanity_check(struct net_device *dev,
3171 struct netdev_notifier_changeupper_info *info)
3172 {
3173 struct dsa_switch *ds;
3174 struct dsa_port *dp;
3175 int err;
3176
3177 if (!dsa_user_dev_check(dev))
3178 return dsa_prevent_bridging_8021q_upper(dev, info);
3179
3180 dp = dsa_user_to_port(dev);
3181 ds = dp->ds;
3182
3183 if (ds->ops->port_prechangeupper) {
3184 err = ds->ops->port_prechangeupper(ds, dp->index, info);
3185 if (err)
3186 return notifier_from_errno(err);
3187 }
3188
3189 if (is_vlan_dev(info->upper_dev))
3190 return dsa_user_check_8021q_upper(dev, info);
3191
3192 return NOTIFY_DONE;
3193 }
3194
3195 /* To be eligible as a DSA conduit, a LAG must have all lower interfaces be
3196 * eligible DSA conduits. Additionally, all LAG slaves must be DSA conduits of
3197 * switches in the same switch tree.
3198 */
dsa_lag_conduit_validate(struct net_device * lag_dev,struct netlink_ext_ack * extack)3199 static int dsa_lag_conduit_validate(struct net_device *lag_dev,
3200 struct netlink_ext_ack *extack)
3201 {
3202 struct net_device *lower1, *lower2;
3203 struct list_head *iter1, *iter2;
3204
3205 netdev_for_each_lower_dev(lag_dev, lower1, iter1) {
3206 netdev_for_each_lower_dev(lag_dev, lower2, iter2) {
3207 if (!netdev_uses_dsa(lower1) ||
3208 !netdev_uses_dsa(lower2)) {
3209 NL_SET_ERR_MSG_MOD(extack,
3210 "All LAG ports must be eligible as DSA conduits");
3211 return notifier_from_errno(-EINVAL);
3212 }
3213
3214 if (lower1 == lower2)
3215 continue;
3216
3217 if (!dsa_port_tree_same(lower1->dsa_ptr,
3218 lower2->dsa_ptr)) {
3219 NL_SET_ERR_MSG_MOD(extack,
3220 "LAG contains DSA conduits of disjoint switch trees");
3221 return notifier_from_errno(-EINVAL);
3222 }
3223 }
3224 }
3225
3226 return NOTIFY_DONE;
3227 }
3228
3229 static int
dsa_conduit_prechangeupper_sanity_check(struct net_device * conduit,struct netdev_notifier_changeupper_info * info)3230 dsa_conduit_prechangeupper_sanity_check(struct net_device *conduit,
3231 struct netdev_notifier_changeupper_info *info)
3232 {
3233 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
3234
3235 if (!netdev_uses_dsa(conduit))
3236 return NOTIFY_DONE;
3237
3238 if (!info->linking)
3239 return NOTIFY_DONE;
3240
3241 /* Allow DSA switch uppers */
3242 if (dsa_user_dev_check(info->upper_dev))
3243 return NOTIFY_DONE;
3244
3245 /* Allow bridge uppers of DSA conduits, subject to further
3246 * restrictions in dsa_bridge_prechangelower_sanity_check()
3247 */
3248 if (netif_is_bridge_master(info->upper_dev))
3249 return NOTIFY_DONE;
3250
3251 /* Allow LAG uppers, subject to further restrictions in
3252 * dsa_lag_conduit_prechangelower_sanity_check()
3253 */
3254 if (netif_is_lag_master(info->upper_dev))
3255 return dsa_lag_conduit_validate(info->upper_dev, extack);
3256
3257 NL_SET_ERR_MSG_MOD(extack,
3258 "DSA conduit cannot join unknown upper interfaces");
3259 return notifier_from_errno(-EBUSY);
3260 }
3261
3262 static int
dsa_lag_conduit_prechangelower_sanity_check(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3263 dsa_lag_conduit_prechangelower_sanity_check(struct net_device *dev,
3264 struct netdev_notifier_changeupper_info *info)
3265 {
3266 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
3267 struct net_device *lag_dev = info->upper_dev;
3268 struct net_device *lower;
3269 struct list_head *iter;
3270
3271 if (!netdev_uses_dsa(lag_dev) || !netif_is_lag_master(lag_dev))
3272 return NOTIFY_DONE;
3273
3274 if (!info->linking)
3275 return NOTIFY_DONE;
3276
3277 if (!netdev_uses_dsa(dev)) {
3278 NL_SET_ERR_MSG(extack,
3279 "Only DSA conduits can join a LAG DSA conduit");
3280 return notifier_from_errno(-EINVAL);
3281 }
3282
3283 netdev_for_each_lower_dev(lag_dev, lower, iter) {
3284 if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) {
3285 NL_SET_ERR_MSG(extack,
3286 "Interface is DSA conduit for a different switch tree than this LAG");
3287 return notifier_from_errno(-EINVAL);
3288 }
3289
3290 break;
3291 }
3292
3293 return NOTIFY_DONE;
3294 }
3295
3296 /* Don't allow bridging of DSA conduits, since the bridge layer rx_handler
3297 * prevents the DSA fake ethertype handler to be invoked, so we don't get the
3298 * chance to strip off and parse the DSA switch tag protocol header (the bridge
3299 * layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these
3300 * frames).
3301 * The only case where that would not be an issue is when bridging can already
3302 * be offloaded, such as when the DSA conduit is itself a DSA or plain switchdev
3303 * port, and is bridged only with other ports from the same hardware device.
3304 */
3305 static int
dsa_bridge_prechangelower_sanity_check(struct net_device * new_lower,struct netdev_notifier_changeupper_info * info)3306 dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
3307 struct netdev_notifier_changeupper_info *info)
3308 {
3309 struct net_device *br = info->upper_dev;
3310 struct netlink_ext_ack *extack;
3311 struct net_device *lower;
3312 struct list_head *iter;
3313
3314 if (!netif_is_bridge_master(br))
3315 return NOTIFY_DONE;
3316
3317 if (!info->linking)
3318 return NOTIFY_DONE;
3319
3320 extack = netdev_notifier_info_to_extack(&info->info);
3321
3322 netdev_for_each_lower_dev(br, lower, iter) {
3323 if (!netdev_uses_dsa(new_lower) && !netdev_uses_dsa(lower))
3324 continue;
3325
3326 if (!netdev_port_same_parent_id(lower, new_lower)) {
3327 NL_SET_ERR_MSG(extack,
3328 "Cannot do software bridging with a DSA conduit");
3329 return notifier_from_errno(-EINVAL);
3330 }
3331 }
3332
3333 return NOTIFY_DONE;
3334 }
3335
dsa_tree_migrate_ports_from_lag_conduit(struct dsa_switch_tree * dst,struct net_device * lag_dev)3336 static void dsa_tree_migrate_ports_from_lag_conduit(struct dsa_switch_tree *dst,
3337 struct net_device *lag_dev)
3338 {
3339 struct net_device *new_conduit = dsa_tree_find_first_conduit(dst);
3340 struct dsa_port *dp;
3341 int err;
3342
3343 dsa_tree_for_each_user_port(dp, dst) {
3344 if (dsa_port_to_conduit(dp) != lag_dev)
3345 continue;
3346
3347 err = dsa_user_change_conduit(dp->user, new_conduit, NULL);
3348 if (err) {
3349 netdev_err(dp->user,
3350 "failed to restore conduit to %s: %pe\n",
3351 new_conduit->name, ERR_PTR(err));
3352 }
3353 }
3354 }
3355
dsa_conduit_lag_join(struct net_device * conduit,struct net_device * lag_dev,struct netdev_lag_upper_info * uinfo,struct netlink_ext_ack * extack)3356 static int dsa_conduit_lag_join(struct net_device *conduit,
3357 struct net_device *lag_dev,
3358 struct netdev_lag_upper_info *uinfo,
3359 struct netlink_ext_ack *extack)
3360 {
3361 struct dsa_port *cpu_dp = conduit->dsa_ptr;
3362 struct dsa_switch_tree *dst = cpu_dp->dst;
3363 struct dsa_port *dp;
3364 int err;
3365
3366 err = dsa_conduit_lag_setup(lag_dev, cpu_dp, uinfo, extack);
3367 if (err)
3368 return err;
3369
3370 dsa_tree_for_each_user_port(dp, dst) {
3371 if (dsa_port_to_conduit(dp) != conduit)
3372 continue;
3373
3374 err = dsa_user_change_conduit(dp->user, lag_dev, extack);
3375 if (err)
3376 goto restore;
3377 }
3378
3379 return 0;
3380
3381 restore:
3382 dsa_tree_for_each_user_port_continue_reverse(dp, dst) {
3383 if (dsa_port_to_conduit(dp) != lag_dev)
3384 continue;
3385
3386 err = dsa_user_change_conduit(dp->user, conduit, NULL);
3387 if (err) {
3388 netdev_err(dp->user,
3389 "failed to restore conduit to %s: %pe\n",
3390 conduit->name, ERR_PTR(err));
3391 }
3392 }
3393
3394 dsa_conduit_lag_teardown(lag_dev, conduit->dsa_ptr);
3395
3396 return err;
3397 }
3398
dsa_conduit_lag_leave(struct net_device * conduit,struct net_device * lag_dev)3399 static void dsa_conduit_lag_leave(struct net_device *conduit,
3400 struct net_device *lag_dev)
3401 {
3402 struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr;
3403 struct dsa_switch_tree *dst = cpu_dp->dst;
3404 struct dsa_port *new_cpu_dp = NULL;
3405 struct net_device *lower;
3406 struct list_head *iter;
3407
3408 netdev_for_each_lower_dev(lag_dev, lower, iter) {
3409 if (netdev_uses_dsa(lower)) {
3410 new_cpu_dp = lower->dsa_ptr;
3411 break;
3412 }
3413 }
3414
3415 if (new_cpu_dp) {
3416 /* Update the CPU port of the user ports still under the LAG
3417 * so that dsa_port_to_conduit() continues to work properly
3418 */
3419 dsa_tree_for_each_user_port(dp, dst)
3420 if (dsa_port_to_conduit(dp) == lag_dev)
3421 dp->cpu_dp = new_cpu_dp;
3422
3423 /* Update the index of the virtual CPU port to match the lowest
3424 * physical CPU port
3425 */
3426 lag_dev->dsa_ptr = new_cpu_dp;
3427 wmb();
3428 } else {
3429 /* If the LAG DSA conduit has no ports left, migrate back all
3430 * user ports to the first physical CPU port
3431 */
3432 dsa_tree_migrate_ports_from_lag_conduit(dst, lag_dev);
3433 }
3434
3435 /* This DSA conduit has left its LAG in any case, so let
3436 * the CPU port leave the hardware LAG as well
3437 */
3438 dsa_conduit_lag_teardown(lag_dev, conduit->dsa_ptr);
3439 }
3440
dsa_conduit_changeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3441 static int dsa_conduit_changeupper(struct net_device *dev,
3442 struct netdev_notifier_changeupper_info *info)
3443 {
3444 struct netlink_ext_ack *extack;
3445 int err = NOTIFY_DONE;
3446
3447 if (!netdev_uses_dsa(dev))
3448 return err;
3449
3450 extack = netdev_notifier_info_to_extack(&info->info);
3451
3452 if (netif_is_lag_master(info->upper_dev)) {
3453 if (info->linking) {
3454 err = dsa_conduit_lag_join(dev, info->upper_dev,
3455 info->upper_info, extack);
3456 err = notifier_from_errno(err);
3457 } else {
3458 dsa_conduit_lag_leave(dev, info->upper_dev);
3459 err = NOTIFY_OK;
3460 }
3461 }
3462
3463 return err;
3464 }
3465
dsa_user_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)3466 static int dsa_user_netdevice_event(struct notifier_block *nb,
3467 unsigned long event, void *ptr)
3468 {
3469 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3470
3471 switch (event) {
3472 case NETDEV_PRECHANGEUPPER: {
3473 struct netdev_notifier_changeupper_info *info = ptr;
3474 int err;
3475
3476 err = dsa_user_prechangeupper_sanity_check(dev, info);
3477 if (notifier_to_errno(err))
3478 return err;
3479
3480 err = dsa_conduit_prechangeupper_sanity_check(dev, info);
3481 if (notifier_to_errno(err))
3482 return err;
3483
3484 err = dsa_lag_conduit_prechangelower_sanity_check(dev, info);
3485 if (notifier_to_errno(err))
3486 return err;
3487
3488 err = dsa_bridge_prechangelower_sanity_check(dev, info);
3489 if (notifier_to_errno(err))
3490 return err;
3491
3492 err = dsa_user_prechangeupper(dev, ptr);
3493 if (notifier_to_errno(err))
3494 return err;
3495
3496 err = dsa_user_lag_prechangeupper(dev, ptr);
3497 if (notifier_to_errno(err))
3498 return err;
3499
3500 break;
3501 }
3502 case NETDEV_CHANGEUPPER: {
3503 int err;
3504
3505 err = dsa_user_changeupper(dev, ptr);
3506 if (notifier_to_errno(err))
3507 return err;
3508
3509 err = dsa_user_lag_changeupper(dev, ptr);
3510 if (notifier_to_errno(err))
3511 return err;
3512
3513 err = dsa_conduit_changeupper(dev, ptr);
3514 if (notifier_to_errno(err))
3515 return err;
3516
3517 break;
3518 }
3519 case NETDEV_CHANGELOWERSTATE: {
3520 struct netdev_notifier_changelowerstate_info *info = ptr;
3521 struct dsa_port *dp;
3522 int err = 0;
3523
3524 if (dsa_user_dev_check(dev)) {
3525 dp = dsa_user_to_port(dev);
3526
3527 err = dsa_port_lag_change(dp, info->lower_state_info);
3528 }
3529
3530 /* Mirror LAG port events on DSA conduits that are in
3531 * a LAG towards their respective switch CPU ports
3532 */
3533 if (netdev_uses_dsa(dev)) {
3534 dp = dev->dsa_ptr;
3535
3536 err = dsa_port_lag_change(dp, info->lower_state_info);
3537 }
3538
3539 return notifier_from_errno(err);
3540 }
3541 case NETDEV_CHANGE:
3542 case NETDEV_UP: {
3543 /* Track state of conduit port.
3544 * DSA driver may require the conduit port (and indirectly
3545 * the tagger) to be available for some special operation.
3546 */
3547 if (netdev_uses_dsa(dev)) {
3548 struct dsa_port *cpu_dp = dev->dsa_ptr;
3549 struct dsa_switch_tree *dst = cpu_dp->ds->dst;
3550
3551 /* Track when the conduit port is UP */
3552 dsa_tree_conduit_oper_state_change(dst, dev,
3553 netif_oper_up(dev));
3554
3555 /* Track when the conduit port is ready and can accept
3556 * packet.
3557 * NETDEV_UP event is not enough to flag a port as ready.
3558 * We also have to wait for linkwatch_do_dev to dev_activate
3559 * and emit a NETDEV_CHANGE event.
3560 * We check if a conduit port is ready by checking if the dev
3561 * have a qdisc assigned and is not noop.
3562 */
3563 dsa_tree_conduit_admin_state_change(dst, dev,
3564 !qdisc_tx_is_noop(dev));
3565
3566 return NOTIFY_OK;
3567 }
3568
3569 return NOTIFY_DONE;
3570 }
3571 case NETDEV_GOING_DOWN: {
3572 struct dsa_port *dp, *cpu_dp;
3573 struct dsa_switch_tree *dst;
3574 LIST_HEAD(close_list);
3575
3576 if (!netdev_uses_dsa(dev))
3577 return NOTIFY_DONE;
3578
3579 cpu_dp = dev->dsa_ptr;
3580 dst = cpu_dp->ds->dst;
3581
3582 dsa_tree_conduit_admin_state_change(dst, dev, false);
3583
3584 list_for_each_entry(dp, &dst->ports, list) {
3585 if (!dsa_port_is_user(dp))
3586 continue;
3587
3588 if (dp->cpu_dp != cpu_dp)
3589 continue;
3590
3591 list_add(&dp->user->close_list, &close_list);
3592 }
3593
3594 dev_close_many(&close_list, true);
3595
3596 return NOTIFY_OK;
3597 }
3598 default:
3599 break;
3600 }
3601
3602 return NOTIFY_DONE;
3603 }
3604
3605 static void
dsa_fdb_offload_notify(struct dsa_switchdev_event_work * switchdev_work)3606 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
3607 {
3608 struct switchdev_notifier_fdb_info info = {};
3609
3610 info.addr = switchdev_work->addr;
3611 info.vid = switchdev_work->vid;
3612 info.offloaded = true;
3613 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
3614 switchdev_work->orig_dev, &info.info, NULL);
3615 }
3616
dsa_user_switchdev_event_work(struct work_struct * work)3617 static void dsa_user_switchdev_event_work(struct work_struct *work)
3618 {
3619 struct dsa_switchdev_event_work *switchdev_work =
3620 container_of(work, struct dsa_switchdev_event_work, work);
3621 const unsigned char *addr = switchdev_work->addr;
3622 struct net_device *dev = switchdev_work->dev;
3623 u16 vid = switchdev_work->vid;
3624 struct dsa_switch *ds;
3625 struct dsa_port *dp;
3626 int err;
3627
3628 dp = dsa_user_to_port(dev);
3629 ds = dp->ds;
3630
3631 switch (switchdev_work->event) {
3632 case SWITCHDEV_FDB_ADD_TO_DEVICE:
3633 if (switchdev_work->host_addr)
3634 err = dsa_port_bridge_host_fdb_add(dp, addr, vid);
3635 else if (dp->lag)
3636 err = dsa_port_lag_fdb_add(dp, addr, vid);
3637 else
3638 err = dsa_port_fdb_add(dp, addr, vid);
3639 if (err) {
3640 dev_err(ds->dev,
3641 "port %d failed to add %pM vid %d to fdb: %d\n",
3642 dp->index, addr, vid, err);
3643 break;
3644 }
3645 dsa_fdb_offload_notify(switchdev_work);
3646 break;
3647
3648 case SWITCHDEV_FDB_DEL_TO_DEVICE:
3649 if (switchdev_work->host_addr)
3650 err = dsa_port_bridge_host_fdb_del(dp, addr, vid);
3651 else if (dp->lag)
3652 err = dsa_port_lag_fdb_del(dp, addr, vid);
3653 else
3654 err = dsa_port_fdb_del(dp, addr, vid);
3655 if (err) {
3656 dev_err(ds->dev,
3657 "port %d failed to delete %pM vid %d from fdb: %d\n",
3658 dp->index, addr, vid, err);
3659 }
3660
3661 break;
3662 }
3663
3664 kfree(switchdev_work);
3665 }
3666
dsa_foreign_dev_check(const struct net_device * dev,const struct net_device * foreign_dev)3667 static bool dsa_foreign_dev_check(const struct net_device *dev,
3668 const struct net_device *foreign_dev)
3669 {
3670 const struct dsa_port *dp = dsa_user_to_port(dev);
3671 struct dsa_switch_tree *dst = dp->ds->dst;
3672
3673 if (netif_is_bridge_master(foreign_dev))
3674 return !dsa_tree_offloads_bridge_dev(dst, foreign_dev);
3675
3676 if (netif_is_bridge_port(foreign_dev))
3677 return !dsa_tree_offloads_bridge_port(dst, foreign_dev);
3678
3679 /* Everything else is foreign */
3680 return true;
3681 }
3682
dsa_user_fdb_event(struct net_device * dev,struct net_device * orig_dev,unsigned long event,const void * ctx,const struct switchdev_notifier_fdb_info * fdb_info)3683 static int dsa_user_fdb_event(struct net_device *dev,
3684 struct net_device *orig_dev,
3685 unsigned long event, const void *ctx,
3686 const struct switchdev_notifier_fdb_info *fdb_info)
3687 {
3688 struct dsa_switchdev_event_work *switchdev_work;
3689 struct dsa_port *dp = dsa_user_to_port(dev);
3690 bool host_addr = fdb_info->is_local;
3691 struct dsa_switch *ds = dp->ds;
3692
3693 if (ctx && ctx != dp)
3694 return 0;
3695
3696 if (!dp->bridge)
3697 return 0;
3698
3699 if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
3700 if (dsa_port_offloads_bridge_port(dp, orig_dev))
3701 return 0;
3702
3703 /* FDB entries learned by the software bridge or by foreign
3704 * bridge ports should be installed as host addresses only if
3705 * the driver requests assisted learning.
3706 */
3707 if (!ds->assisted_learning_on_cpu_port)
3708 return 0;
3709 }
3710
3711 /* Also treat FDB entries on foreign interfaces bridged with us as host
3712 * addresses.
3713 */
3714 if (dsa_foreign_dev_check(dev, orig_dev))
3715 host_addr = true;
3716
3717 /* Check early that we're not doing work in vain.
3718 * Host addresses on LAG ports still require regular FDB ops,
3719 * since the CPU port isn't in a LAG.
3720 */
3721 if (dp->lag && !host_addr) {
3722 if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del)
3723 return -EOPNOTSUPP;
3724 } else {
3725 if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
3726 return -EOPNOTSUPP;
3727 }
3728
3729 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3730 if (!switchdev_work)
3731 return -ENOMEM;
3732
3733 netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n",
3734 event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting",
3735 orig_dev->name, fdb_info->addr, fdb_info->vid,
3736 host_addr ? " as host address" : "");
3737
3738 INIT_WORK(&switchdev_work->work, dsa_user_switchdev_event_work);
3739 switchdev_work->event = event;
3740 switchdev_work->dev = dev;
3741 switchdev_work->orig_dev = orig_dev;
3742
3743 ether_addr_copy(switchdev_work->addr, fdb_info->addr);
3744 switchdev_work->vid = fdb_info->vid;
3745 switchdev_work->host_addr = host_addr;
3746
3747 dsa_schedule_work(&switchdev_work->work);
3748
3749 return 0;
3750 }
3751
3752 /* Called under rcu_read_lock() */
dsa_user_switchdev_event(struct notifier_block * unused,unsigned long event,void * ptr)3753 static int dsa_user_switchdev_event(struct notifier_block *unused,
3754 unsigned long event, void *ptr)
3755 {
3756 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3757 int err;
3758
3759 switch (event) {
3760 case SWITCHDEV_PORT_ATTR_SET:
3761 err = switchdev_handle_port_attr_set(dev, ptr,
3762 dsa_user_dev_check,
3763 dsa_user_port_attr_set);
3764 return notifier_from_errno(err);
3765 case SWITCHDEV_FDB_ADD_TO_DEVICE:
3766 case SWITCHDEV_FDB_DEL_TO_DEVICE:
3767 err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
3768 dsa_user_dev_check,
3769 dsa_foreign_dev_check,
3770 dsa_user_fdb_event);
3771 return notifier_from_errno(err);
3772 default:
3773 return NOTIFY_DONE;
3774 }
3775
3776 return NOTIFY_OK;
3777 }
3778
dsa_user_switchdev_blocking_event(struct notifier_block * unused,unsigned long event,void * ptr)3779 static int dsa_user_switchdev_blocking_event(struct notifier_block *unused,
3780 unsigned long event, void *ptr)
3781 {
3782 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3783 int err;
3784
3785 switch (event) {
3786 case SWITCHDEV_PORT_OBJ_ADD:
3787 err = switchdev_handle_port_obj_add_foreign(dev, ptr,
3788 dsa_user_dev_check,
3789 dsa_foreign_dev_check,
3790 dsa_user_port_obj_add);
3791 return notifier_from_errno(err);
3792 case SWITCHDEV_PORT_OBJ_DEL:
3793 err = switchdev_handle_port_obj_del_foreign(dev, ptr,
3794 dsa_user_dev_check,
3795 dsa_foreign_dev_check,
3796 dsa_user_port_obj_del);
3797 return notifier_from_errno(err);
3798 case SWITCHDEV_PORT_ATTR_SET:
3799 err = switchdev_handle_port_attr_set(dev, ptr,
3800 dsa_user_dev_check,
3801 dsa_user_port_attr_set);
3802 return notifier_from_errno(err);
3803 }
3804
3805 return NOTIFY_DONE;
3806 }
3807
3808 static struct notifier_block dsa_user_nb __read_mostly = {
3809 .notifier_call = dsa_user_netdevice_event,
3810 };
3811
3812 struct notifier_block dsa_user_switchdev_notifier = {
3813 .notifier_call = dsa_user_switchdev_event,
3814 };
3815
3816 struct notifier_block dsa_user_switchdev_blocking_notifier = {
3817 .notifier_call = dsa_user_switchdev_blocking_event,
3818 };
3819
dsa_user_register_notifier(void)3820 int dsa_user_register_notifier(void)
3821 {
3822 struct notifier_block *nb;
3823 int err;
3824
3825 err = register_netdevice_notifier(&dsa_user_nb);
3826 if (err)
3827 return err;
3828
3829 err = register_switchdev_notifier(&dsa_user_switchdev_notifier);
3830 if (err)
3831 goto err_switchdev_nb;
3832
3833 nb = &dsa_user_switchdev_blocking_notifier;
3834 err = register_switchdev_blocking_notifier(nb);
3835 if (err)
3836 goto err_switchdev_blocking_nb;
3837
3838 return 0;
3839
3840 err_switchdev_blocking_nb:
3841 unregister_switchdev_notifier(&dsa_user_switchdev_notifier);
3842 err_switchdev_nb:
3843 unregister_netdevice_notifier(&dsa_user_nb);
3844 return err;
3845 }
3846
dsa_user_unregister_notifier(void)3847 void dsa_user_unregister_notifier(void)
3848 {
3849 struct notifier_block *nb;
3850 int err;
3851
3852 nb = &dsa_user_switchdev_blocking_notifier;
3853 err = unregister_switchdev_blocking_notifier(nb);
3854 if (err)
3855 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
3856
3857 err = unregister_switchdev_notifier(&dsa_user_switchdev_notifier);
3858 if (err)
3859 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
3860
3861 err = unregister_netdevice_notifier(&dsa_user_nb);
3862 if (err)
3863 pr_err("DSA: failed to unregister user notifier (%d)\n", err);
3864 }
3865