1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019-2021 NXP
3 *
4 * This is an umbrella module for all network switches that are
5 * register-compatible with Ocelot and that perform I/O to their host CPU
6 * through an NPI (Node Processor Interface) Ethernet port.
7 */
8 #include <uapi/linux/if_bridge.h>
9 #include <soc/mscc/ocelot_vcap.h>
10 #include <soc/mscc/ocelot_qsys.h>
11 #include <soc/mscc/ocelot_sys.h>
12 #include <soc/mscc/ocelot_dev.h>
13 #include <soc/mscc/ocelot_ana.h>
14 #include <soc/mscc/ocelot_ptp.h>
15 #include <soc/mscc/ocelot.h>
16 #include <linux/dsa/8021q.h>
17 #include <linux/dsa/ocelot.h>
18 #include <linux/platform_device.h>
19 #include <linux/ptp_classify.h>
20 #include <linux/module.h>
21 #include <linux/of_net.h>
22 #include <linux/pci.h>
23 #include <linux/of.h>
24 #include <net/pkt_sched.h>
25 #include <net/dsa.h>
26 #include "felix.h"
27
28 /* Translate the DSA database API into the ocelot switch library API,
29 * which uses VID 0 for all ports that aren't part of a bridge,
30 * and expects the bridge_dev to be NULL in that case.
31 */
felix_classify_db(struct dsa_db db)32 static struct net_device *felix_classify_db(struct dsa_db db)
33 {
34 switch (db.type) {
35 case DSA_DB_PORT:
36 case DSA_DB_LAG:
37 return NULL;
38 case DSA_DB_BRIDGE:
39 return db.bridge.dev;
40 default:
41 return ERR_PTR(-EOPNOTSUPP);
42 }
43 }
44
felix_cpu_port_for_conduit(struct dsa_switch * ds,struct net_device * conduit)45 static int felix_cpu_port_for_conduit(struct dsa_switch *ds,
46 struct net_device *conduit)
47 {
48 struct ocelot *ocelot = ds->priv;
49 struct dsa_port *cpu_dp;
50 int lag;
51
52 if (netif_is_lag_master(conduit)) {
53 mutex_lock(&ocelot->fwd_domain_lock);
54 lag = ocelot_bond_get_id(ocelot, conduit);
55 mutex_unlock(&ocelot->fwd_domain_lock);
56
57 return lag;
58 }
59
60 cpu_dp = conduit->dsa_ptr;
61 return cpu_dp->index;
62 }
63
64 /**
65 * felix_update_tag_8021q_rx_rule - Update VCAP ES0 tag_8021q rule after
66 * vlan_filtering change
67 * @outer_tagging_rule: Pointer to VCAP filter on which the update is performed
68 * @vlan_filtering: Current bridge VLAN filtering setting
69 *
70 * Source port identification for tag_8021q is done using VCAP ES0 rules on the
71 * CPU port(s). The ES0 tag B (inner tag from the packet) can be configured as
72 * either:
73 * - push_inner_tag=0: the inner tag is never pushed into the frame
74 * (and we lose info about the classified VLAN). This is
75 * good when the classified VLAN is a discardable quantity
76 * for the software RX path: it is either set to
77 * OCELOT_STANDALONE_PVID, or to
78 * ocelot_vlan_unaware_pvid(bridge).
79 * - push_inner_tag=1: the inner tag is always pushed. This is good when the
80 * classified VLAN is not a discardable quantity (the port
81 * is under a VLAN-aware bridge, and software needs to
82 * continue processing the packet in the same VLAN as the
83 * hardware).
84 * The point is that what is good for a VLAN-unaware port is not good for a
85 * VLAN-aware port, and vice versa. Thus, the RX tagging rules must be kept in
86 * sync with the VLAN filtering state of the port.
87 */
88 static void
felix_update_tag_8021q_rx_rule(struct ocelot_vcap_filter * outer_tagging_rule,bool vlan_filtering)89 felix_update_tag_8021q_rx_rule(struct ocelot_vcap_filter *outer_tagging_rule,
90 bool vlan_filtering)
91 {
92 if (vlan_filtering)
93 outer_tagging_rule->action.push_inner_tag = OCELOT_ES0_TAG;
94 else
95 outer_tagging_rule->action.push_inner_tag = OCELOT_NO_ES0_TAG;
96 }
97
98 /* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
99 * the tagger can perform RX source port identification.
100 */
felix_tag_8021q_vlan_add_rx(struct dsa_switch * ds,int port,int upstream,u16 vid,bool vlan_filtering)101 static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port,
102 int upstream, u16 vid,
103 bool vlan_filtering)
104 {
105 struct ocelot_vcap_filter *outer_tagging_rule;
106 struct ocelot *ocelot = ds->priv;
107 unsigned long cookie;
108 int key_length, err;
109
110 key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length;
111
112 outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter),
113 GFP_KERNEL);
114 if (!outer_tagging_rule)
115 return -ENOMEM;
116
117 cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream);
118
119 outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
120 outer_tagging_rule->prio = 1;
121 outer_tagging_rule->id.cookie = cookie;
122 outer_tagging_rule->id.tc_offload = false;
123 outer_tagging_rule->block_id = VCAP_ES0;
124 outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
125 outer_tagging_rule->lookup = 0;
126 outer_tagging_rule->ingress_port.value = port;
127 outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0);
128 outer_tagging_rule->egress_port.value = upstream;
129 outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0);
130 outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG;
131 outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD;
132 outer_tagging_rule->action.tag_a_vid_sel = 1;
133 outer_tagging_rule->action.vid_a_val = vid;
134 felix_update_tag_8021q_rx_rule(outer_tagging_rule, vlan_filtering);
135 outer_tagging_rule->action.tag_b_tpid_sel = OCELOT_TAG_TPID_SEL_8021Q;
136 /* Leave TAG_B_VID_SEL at 0 (Classified VID + VID_B_VAL). Since we also
137 * leave VID_B_VAL at 0, this makes ES0 tag B (the inner tag) equal to
138 * the classified VID, which we need to see in the DSA tagger's receive
139 * path. Note: the inner tag is only visible in the packet when pushed
140 * (push_inner_tag == OCELOT_ES0_TAG).
141 */
142
143 err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL);
144 if (err)
145 kfree(outer_tagging_rule);
146
147 return err;
148 }
149
felix_tag_8021q_vlan_del_rx(struct dsa_switch * ds,int port,int upstream,u16 vid)150 static int felix_tag_8021q_vlan_del_rx(struct dsa_switch *ds, int port,
151 int upstream, u16 vid)
152 {
153 struct ocelot_vcap_filter *outer_tagging_rule;
154 struct ocelot_vcap_block *block_vcap_es0;
155 struct ocelot *ocelot = ds->priv;
156 unsigned long cookie;
157
158 block_vcap_es0 = &ocelot->block[VCAP_ES0];
159 cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream);
160
161 outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
162 cookie, false);
163 if (!outer_tagging_rule)
164 return -ENOENT;
165
166 return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
167 }
168
169 /* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2
170 * rules for steering those tagged packets towards the correct destination port
171 */
felix_tag_8021q_vlan_add_tx(struct dsa_switch * ds,int port,u16 vid)172 static int felix_tag_8021q_vlan_add_tx(struct dsa_switch *ds, int port,
173 u16 vid)
174 {
175 struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
176 unsigned long cpu_ports = dsa_cpu_ports(ds);
177 struct ocelot *ocelot = ds->priv;
178 unsigned long cookie;
179 int err;
180
181 untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
182 if (!untagging_rule)
183 return -ENOMEM;
184
185 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
186 if (!redirect_rule) {
187 kfree(untagging_rule);
188 return -ENOMEM;
189 }
190
191 cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
192
193 untagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
194 untagging_rule->ingress_port_mask = cpu_ports;
195 untagging_rule->vlan.vid.value = vid;
196 untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
197 untagging_rule->prio = 1;
198 untagging_rule->id.cookie = cookie;
199 untagging_rule->id.tc_offload = false;
200 untagging_rule->block_id = VCAP_IS1;
201 untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
202 untagging_rule->lookup = 0;
203 untagging_rule->action.vlan_pop_cnt_ena = true;
204 untagging_rule->action.vlan_pop_cnt = 1;
205 untagging_rule->action.pag_override_mask = 0xff;
206 untagging_rule->action.pag_val = port;
207
208 err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL);
209 if (err) {
210 kfree(untagging_rule);
211 kfree(redirect_rule);
212 return err;
213 }
214
215 cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
216
217 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
218 redirect_rule->ingress_port_mask = cpu_ports;
219 redirect_rule->pag = port;
220 redirect_rule->prio = 1;
221 redirect_rule->id.cookie = cookie;
222 redirect_rule->id.tc_offload = false;
223 redirect_rule->block_id = VCAP_IS2;
224 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
225 redirect_rule->lookup = 0;
226 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
227 redirect_rule->action.port_mask = BIT(port);
228
229 err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
230 if (err) {
231 ocelot_vcap_filter_del(ocelot, untagging_rule);
232 kfree(redirect_rule);
233 return err;
234 }
235
236 return 0;
237 }
238
felix_tag_8021q_vlan_del_tx(struct dsa_switch * ds,int port,u16 vid)239 static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid)
240 {
241 struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
242 struct ocelot_vcap_block *block_vcap_is1;
243 struct ocelot_vcap_block *block_vcap_is2;
244 struct ocelot *ocelot = ds->priv;
245 unsigned long cookie;
246 int err;
247
248 block_vcap_is1 = &ocelot->block[VCAP_IS1];
249 block_vcap_is2 = &ocelot->block[VCAP_IS2];
250
251 cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
252 untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
253 cookie, false);
254 if (!untagging_rule)
255 return -ENOENT;
256
257 err = ocelot_vcap_filter_del(ocelot, untagging_rule);
258 if (err)
259 return err;
260
261 cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
262 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
263 cookie, false);
264 if (!redirect_rule)
265 return -ENOENT;
266
267 return ocelot_vcap_filter_del(ocelot, redirect_rule);
268 }
269
felix_tag_8021q_vlan_add(struct dsa_switch * ds,int port,u16 vid,u16 flags)270 static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
271 u16 flags)
272 {
273 struct dsa_port *dp = dsa_to_port(ds, port);
274 struct dsa_port *cpu_dp;
275 int err;
276
277 /* tag_8021q.c assumes we are implementing this via port VLAN
278 * membership, which we aren't. So we don't need to add any VCAP filter
279 * for the CPU port.
280 */
281 if (!dsa_port_is_user(dp))
282 return 0;
283
284 dsa_switch_for_each_cpu_port(cpu_dp, ds) {
285 err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid,
286 dsa_port_is_vlan_filtering(dp));
287 if (err)
288 return err;
289 }
290
291 err = felix_tag_8021q_vlan_add_tx(ds, port, vid);
292 if (err)
293 goto add_tx_failed;
294
295 return 0;
296
297 add_tx_failed:
298 dsa_switch_for_each_cpu_port(cpu_dp, ds)
299 felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid);
300
301 return err;
302 }
303
felix_tag_8021q_vlan_del(struct dsa_switch * ds,int port,u16 vid)304 static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
305 {
306 struct dsa_port *dp = dsa_to_port(ds, port);
307 struct dsa_port *cpu_dp;
308 int err;
309
310 if (!dsa_port_is_user(dp))
311 return 0;
312
313 dsa_switch_for_each_cpu_port(cpu_dp, ds) {
314 err = felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid);
315 if (err)
316 return err;
317 }
318
319 err = felix_tag_8021q_vlan_del_tx(ds, port, vid);
320 if (err)
321 goto del_tx_failed;
322
323 return 0;
324
325 del_tx_failed:
326 dsa_switch_for_each_cpu_port(cpu_dp, ds)
327 felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid,
328 dsa_port_is_vlan_filtering(dp));
329
330 return err;
331 }
332
felix_update_tag_8021q_rx_rules(struct dsa_switch * ds,int port,bool vlan_filtering)333 static int felix_update_tag_8021q_rx_rules(struct dsa_switch *ds, int port,
334 bool vlan_filtering)
335 {
336 struct ocelot_vcap_filter *outer_tagging_rule;
337 struct ocelot_vcap_block *block_vcap_es0;
338 struct ocelot *ocelot = ds->priv;
339 struct dsa_port *cpu_dp;
340 unsigned long cookie;
341 int err;
342
343 block_vcap_es0 = &ocelot->block[VCAP_ES0];
344
345 dsa_switch_for_each_cpu_port(cpu_dp, ds) {
346 cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port,
347 cpu_dp->index);
348
349 outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
350 cookie, false);
351
352 felix_update_tag_8021q_rx_rule(outer_tagging_rule, vlan_filtering);
353
354 err = ocelot_vcap_filter_replace(ocelot, outer_tagging_rule);
355 if (err)
356 return err;
357 }
358
359 return 0;
360 }
361
felix_trap_get_cpu_port(struct dsa_switch * ds,const struct ocelot_vcap_filter * trap)362 static int felix_trap_get_cpu_port(struct dsa_switch *ds,
363 const struct ocelot_vcap_filter *trap)
364 {
365 struct dsa_port *dp;
366 int first_port;
367
368 if (WARN_ON(!trap->ingress_port_mask))
369 return -1;
370
371 first_port = __ffs(trap->ingress_port_mask);
372 dp = dsa_to_port(ds, first_port);
373
374 return dp->cpu_dp->index;
375 }
376
377 /* On switches with no extraction IRQ wired, trapped packets need to be
378 * replicated over Ethernet as well, otherwise we'd get no notification of
379 * their arrival when using the ocelot-8021q tagging protocol.
380 */
felix_update_trapping_destinations(struct dsa_switch * ds,bool using_tag_8021q)381 static int felix_update_trapping_destinations(struct dsa_switch *ds,
382 bool using_tag_8021q)
383 {
384 struct ocelot *ocelot = ds->priv;
385 struct felix *felix = ocelot_to_felix(ocelot);
386 struct ocelot_vcap_block *block_vcap_is2;
387 struct ocelot_vcap_filter *trap;
388 enum ocelot_mask_mode mask_mode;
389 unsigned long port_mask;
390 bool cpu_copy_ena;
391 int err;
392
393 if (!felix->info->quirk_no_xtr_irq)
394 return 0;
395
396 /* We are sure that "cpu" was found, otherwise
397 * dsa_tree_setup_default_cpu() would have failed earlier.
398 */
399 block_vcap_is2 = &ocelot->block[VCAP_IS2];
400
401 /* Make sure all traps are set up for that destination */
402 list_for_each_entry(trap, &block_vcap_is2->rules, list) {
403 if (!trap->is_trap)
404 continue;
405
406 /* Figure out the current trapping destination */
407 if (using_tag_8021q) {
408 /* Redirect to the tag_8021q CPU port. If timestamps
409 * are necessary, also copy trapped packets to the CPU
410 * port module.
411 */
412 mask_mode = OCELOT_MASK_MODE_REDIRECT;
413 port_mask = BIT(felix_trap_get_cpu_port(ds, trap));
414 cpu_copy_ena = !!trap->take_ts;
415 } else {
416 /* Trap packets only to the CPU port module, which is
417 * redirected to the NPI port (the DSA CPU port)
418 */
419 mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
420 port_mask = 0;
421 cpu_copy_ena = true;
422 }
423
424 if (trap->action.mask_mode == mask_mode &&
425 trap->action.port_mask == port_mask &&
426 trap->action.cpu_copy_ena == cpu_copy_ena)
427 continue;
428
429 trap->action.mask_mode = mask_mode;
430 trap->action.port_mask = port_mask;
431 trap->action.cpu_copy_ena = cpu_copy_ena;
432
433 err = ocelot_vcap_filter_replace(ocelot, trap);
434 if (err)
435 return err;
436 }
437
438 return 0;
439 }
440
441 /* The CPU port module is connected to the Node Processor Interface (NPI). This
442 * is the mode through which frames can be injected from and extracted to an
443 * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU
444 * running Linux, and this forms a DSA setup together with the enetc or fman
445 * DSA conduit.
446 */
felix_npi_port_init(struct ocelot * ocelot,int port)447 static void felix_npi_port_init(struct ocelot *ocelot, int port)
448 {
449 ocelot->npi = port;
450
451 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
452 QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port),
453 QSYS_EXT_CPU_CFG);
454
455 /* NPI port Injection/Extraction configuration */
456 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
457 ocelot->npi_xtr_prefix);
458 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
459 ocelot->npi_inj_prefix);
460
461 /* Disable transmission of pause frames */
462 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
463 }
464
felix_npi_port_deinit(struct ocelot * ocelot,int port)465 static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
466 {
467 /* Restore hardware defaults */
468 int unused_port = ocelot->num_phys_ports + 2;
469
470 ocelot->npi = -1;
471
472 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port),
473 QSYS_EXT_CPU_CFG);
474
475 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
476 OCELOT_TAG_PREFIX_DISABLED);
477 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
478 OCELOT_TAG_PREFIX_DISABLED);
479
480 /* Enable transmission of pause frames */
481 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
482 }
483
felix_tag_npi_setup(struct dsa_switch * ds)484 static int felix_tag_npi_setup(struct dsa_switch *ds)
485 {
486 struct dsa_port *dp, *first_cpu_dp = NULL;
487 struct ocelot *ocelot = ds->priv;
488
489 dsa_switch_for_each_user_port(dp, ds) {
490 if (first_cpu_dp && dp->cpu_dp != first_cpu_dp) {
491 dev_err(ds->dev, "Multiple NPI ports not supported\n");
492 return -EINVAL;
493 }
494
495 first_cpu_dp = dp->cpu_dp;
496 }
497
498 if (!first_cpu_dp)
499 return -EINVAL;
500
501 felix_npi_port_init(ocelot, first_cpu_dp->index);
502
503 return 0;
504 }
505
felix_tag_npi_teardown(struct dsa_switch * ds)506 static void felix_tag_npi_teardown(struct dsa_switch *ds)
507 {
508 struct ocelot *ocelot = ds->priv;
509
510 felix_npi_port_deinit(ocelot, ocelot->npi);
511 }
512
felix_tag_npi_get_host_fwd_mask(struct dsa_switch * ds)513 static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds)
514 {
515 struct ocelot *ocelot = ds->priv;
516
517 return BIT(ocelot->num_phys_ports);
518 }
519
felix_tag_npi_change_conduit(struct dsa_switch * ds,int port,struct net_device * conduit,struct netlink_ext_ack * extack)520 static int felix_tag_npi_change_conduit(struct dsa_switch *ds, int port,
521 struct net_device *conduit,
522 struct netlink_ext_ack *extack)
523 {
524 struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
525 struct ocelot *ocelot = ds->priv;
526
527 if (netif_is_lag_master(conduit)) {
528 NL_SET_ERR_MSG_MOD(extack,
529 "LAG DSA conduit only supported using ocelot-8021q");
530 return -EOPNOTSUPP;
531 }
532
533 /* Changing the NPI port breaks user ports still assigned to the old
534 * one, so only allow it while they're down, and don't allow them to
535 * come back up until they're all changed to the new one.
536 */
537 dsa_switch_for_each_user_port(other_dp, ds) {
538 struct net_device *user = other_dp->user;
539
540 if (other_dp != dp && (user->flags & IFF_UP) &&
541 dsa_port_to_conduit(other_dp) != conduit) {
542 NL_SET_ERR_MSG_MOD(extack,
543 "Cannot change while old conduit still has users");
544 return -EOPNOTSUPP;
545 }
546 }
547
548 felix_npi_port_deinit(ocelot, ocelot->npi);
549 felix_npi_port_init(ocelot, felix_cpu_port_for_conduit(ds, conduit));
550
551 return 0;
552 }
553
554 /* Alternatively to using the NPI functionality, that same hardware MAC
555 * connected internally to the enetc or fman DSA conduit can be configured to
556 * use the software-defined tag_8021q frame format. As far as the hardware is
557 * concerned, it thinks it is a "dumb switch" - the queues of the CPU port
558 * module are now disconnected from it, but can still be accessed through
559 * register-based MMIO.
560 */
561 static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = {
562 .setup = felix_tag_npi_setup,
563 .teardown = felix_tag_npi_teardown,
564 .get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask,
565 .change_conduit = felix_tag_npi_change_conduit,
566 };
567
felix_tag_8021q_setup(struct dsa_switch * ds)568 static int felix_tag_8021q_setup(struct dsa_switch *ds)
569 {
570 struct ocelot *ocelot = ds->priv;
571 struct dsa_port *dp;
572 int err;
573
574 err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
575 if (err)
576 return err;
577
578 dsa_switch_for_each_cpu_port(dp, ds)
579 ocelot_port_setup_dsa_8021q_cpu(ocelot, dp->index);
580
581 dsa_switch_for_each_user_port(dp, ds)
582 ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index,
583 dp->cpu_dp->index);
584
585 dsa_switch_for_each_available_port(dp, ds)
586 /* This overwrites ocelot_init():
587 * Do not forward BPDU frames to the CPU port module,
588 * for 2 reasons:
589 * - When these packets are injected from the tag_8021q
590 * CPU port, we want them to go out, not loop back
591 * into the system.
592 * - STP traffic ingressing on a user port should go to
593 * the tag_8021q CPU port, not to the hardware CPU
594 * port module.
595 */
596 ocelot_write_gix(ocelot,
597 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
598 ANA_PORT_CPU_FWD_BPDU_CFG, dp->index);
599
600 /* The ownership of the CPU port module's queues might have just been
601 * transferred to the tag_8021q tagger from the NPI-based tagger.
602 * So there might still be all sorts of crap in the queues. On the
603 * other hand, the MMIO-based matching of PTP frames is very brittle,
604 * so we need to be careful that there are no extra frames to be
605 * dequeued over MMIO, since we would never know to discard them.
606 */
607 ocelot_lock_xtr_grp_bh(ocelot, 0);
608 ocelot_drain_cpu_queue(ocelot, 0);
609 ocelot_unlock_xtr_grp_bh(ocelot, 0);
610
611 /* Problem: when using push_inner_tag=1 for ES0 tag B, we lose info
612 * about whether the received packets were VLAN-tagged on the wire,
613 * since they are always tagged on egress towards the CPU port.
614 *
615 * Since using push_inner_tag=1 is unavoidable for VLAN-aware bridges,
616 * we must work around the fallout by untagging in software to make
617 * untagged reception work more or less as expected.
618 */
619 ds->untag_vlan_aware_bridge_pvid = true;
620
621 return 0;
622 }
623
felix_tag_8021q_teardown(struct dsa_switch * ds)624 static void felix_tag_8021q_teardown(struct dsa_switch *ds)
625 {
626 struct ocelot *ocelot = ds->priv;
627 struct dsa_port *dp;
628
629 dsa_switch_for_each_available_port(dp, ds)
630 /* Restore the logic from ocelot_init:
631 * do not forward BPDU frames to the front ports.
632 */
633 ocelot_write_gix(ocelot,
634 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
635 ANA_PORT_CPU_FWD_BPDU_CFG,
636 dp->index);
637
638 dsa_switch_for_each_user_port(dp, ds)
639 ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index);
640
641 dsa_switch_for_each_cpu_port(dp, ds)
642 ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index);
643
644 dsa_tag_8021q_unregister(ds);
645
646 ds->untag_vlan_aware_bridge_pvid = false;
647 }
648
felix_tag_8021q_get_host_fwd_mask(struct dsa_switch * ds)649 static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds)
650 {
651 return dsa_cpu_ports(ds);
652 }
653
felix_tag_8021q_change_conduit(struct dsa_switch * ds,int port,struct net_device * conduit,struct netlink_ext_ack * extack)654 static int felix_tag_8021q_change_conduit(struct dsa_switch *ds, int port,
655 struct net_device *conduit,
656 struct netlink_ext_ack *extack)
657 {
658 int cpu = felix_cpu_port_for_conduit(ds, conduit);
659 struct ocelot *ocelot = ds->priv;
660
661 ocelot_port_unassign_dsa_8021q_cpu(ocelot, port);
662 ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu);
663
664 return felix_update_trapping_destinations(ds, true);
665 }
666
667 static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = {
668 .setup = felix_tag_8021q_setup,
669 .teardown = felix_tag_8021q_teardown,
670 .get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask,
671 .change_conduit = felix_tag_8021q_change_conduit,
672 };
673
felix_set_host_flood(struct dsa_switch * ds,unsigned long mask,bool uc,bool mc,bool bc)674 static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask,
675 bool uc, bool mc, bool bc)
676 {
677 struct ocelot *ocelot = ds->priv;
678 unsigned long val;
679
680 val = uc ? mask : 0;
681 ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC);
682
683 val = mc ? mask : 0;
684 ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC);
685 ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4);
686 ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6);
687
688 val = bc ? mask : 0;
689 ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_BC);
690 }
691
692 static void
felix_migrate_host_flood(struct dsa_switch * ds,const struct felix_tag_proto_ops * proto_ops,const struct felix_tag_proto_ops * old_proto_ops)693 felix_migrate_host_flood(struct dsa_switch *ds,
694 const struct felix_tag_proto_ops *proto_ops,
695 const struct felix_tag_proto_ops *old_proto_ops)
696 {
697 struct ocelot *ocelot = ds->priv;
698 struct felix *felix = ocelot_to_felix(ocelot);
699 unsigned long mask;
700
701 if (old_proto_ops) {
702 mask = old_proto_ops->get_host_fwd_mask(ds);
703 felix_set_host_flood(ds, mask, false, false, false);
704 }
705
706 mask = proto_ops->get_host_fwd_mask(ds);
707 felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask,
708 !!felix->host_flood_mc_mask, true);
709 }
710
felix_migrate_mdbs(struct dsa_switch * ds,const struct felix_tag_proto_ops * proto_ops,const struct felix_tag_proto_ops * old_proto_ops)711 static int felix_migrate_mdbs(struct dsa_switch *ds,
712 const struct felix_tag_proto_ops *proto_ops,
713 const struct felix_tag_proto_ops *old_proto_ops)
714 {
715 struct ocelot *ocelot = ds->priv;
716 unsigned long from, to;
717
718 if (!old_proto_ops)
719 return 0;
720
721 from = old_proto_ops->get_host_fwd_mask(ds);
722 to = proto_ops->get_host_fwd_mask(ds);
723
724 return ocelot_migrate_mdbs(ocelot, from, to);
725 }
726
727 /* Configure the shared hardware resources for a transition between
728 * @old_proto_ops and @proto_ops.
729 * Manual migration is needed because as far as DSA is concerned, no change of
730 * the CPU port is taking place here, just of the tagging protocol.
731 */
732 static int
felix_tag_proto_setup_shared(struct dsa_switch * ds,const struct felix_tag_proto_ops * proto_ops,const struct felix_tag_proto_ops * old_proto_ops)733 felix_tag_proto_setup_shared(struct dsa_switch *ds,
734 const struct felix_tag_proto_ops *proto_ops,
735 const struct felix_tag_proto_ops *old_proto_ops)
736 {
737 bool using_tag_8021q = (proto_ops == &felix_tag_8021q_proto_ops);
738 int err;
739
740 err = felix_migrate_mdbs(ds, proto_ops, old_proto_ops);
741 if (err)
742 return err;
743
744 felix_update_trapping_destinations(ds, using_tag_8021q);
745
746 felix_migrate_host_flood(ds, proto_ops, old_proto_ops);
747
748 return 0;
749 }
750
751 /* This always leaves the switch in a consistent state, because although the
752 * tag_8021q setup can fail, the NPI setup can't. So either the change is made,
753 * or the restoration is guaranteed to work.
754 */
felix_change_tag_protocol(struct dsa_switch * ds,enum dsa_tag_protocol proto)755 static int felix_change_tag_protocol(struct dsa_switch *ds,
756 enum dsa_tag_protocol proto)
757 {
758 const struct felix_tag_proto_ops *old_proto_ops, *proto_ops;
759 struct ocelot *ocelot = ds->priv;
760 struct felix *felix = ocelot_to_felix(ocelot);
761 int err;
762
763 switch (proto) {
764 case DSA_TAG_PROTO_SEVILLE:
765 case DSA_TAG_PROTO_OCELOT:
766 proto_ops = &felix_tag_npi_proto_ops;
767 break;
768 case DSA_TAG_PROTO_OCELOT_8021Q:
769 proto_ops = &felix_tag_8021q_proto_ops;
770 break;
771 default:
772 return -EPROTONOSUPPORT;
773 }
774
775 old_proto_ops = felix->tag_proto_ops;
776
777 if (proto_ops == old_proto_ops)
778 return 0;
779
780 err = proto_ops->setup(ds);
781 if (err)
782 goto setup_failed;
783
784 err = felix_tag_proto_setup_shared(ds, proto_ops, old_proto_ops);
785 if (err)
786 goto setup_shared_failed;
787
788 if (old_proto_ops)
789 old_proto_ops->teardown(ds);
790
791 felix->tag_proto_ops = proto_ops;
792 felix->tag_proto = proto;
793
794 return 0;
795
796 setup_shared_failed:
797 proto_ops->teardown(ds);
798 setup_failed:
799 return err;
800 }
801
felix_get_tag_protocol(struct dsa_switch * ds,int port,enum dsa_tag_protocol mp)802 static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
803 int port,
804 enum dsa_tag_protocol mp)
805 {
806 struct ocelot *ocelot = ds->priv;
807 struct felix *felix = ocelot_to_felix(ocelot);
808
809 return felix->tag_proto;
810 }
811
felix_port_set_host_flood(struct dsa_switch * ds,int port,bool uc,bool mc)812 static void felix_port_set_host_flood(struct dsa_switch *ds, int port,
813 bool uc, bool mc)
814 {
815 struct ocelot *ocelot = ds->priv;
816 struct felix *felix = ocelot_to_felix(ocelot);
817 unsigned long mask;
818
819 if (uc)
820 felix->host_flood_uc_mask |= BIT(port);
821 else
822 felix->host_flood_uc_mask &= ~BIT(port);
823
824 if (mc)
825 felix->host_flood_mc_mask |= BIT(port);
826 else
827 felix->host_flood_mc_mask &= ~BIT(port);
828
829 mask = felix->tag_proto_ops->get_host_fwd_mask(ds);
830 felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask,
831 !!felix->host_flood_mc_mask, true);
832 }
833
felix_port_change_conduit(struct dsa_switch * ds,int port,struct net_device * conduit,struct netlink_ext_ack * extack)834 static int felix_port_change_conduit(struct dsa_switch *ds, int port,
835 struct net_device *conduit,
836 struct netlink_ext_ack *extack)
837 {
838 struct ocelot *ocelot = ds->priv;
839 struct felix *felix = ocelot_to_felix(ocelot);
840
841 return felix->tag_proto_ops->change_conduit(ds, port, conduit, extack);
842 }
843
felix_set_ageing_time(struct dsa_switch * ds,unsigned int ageing_time)844 static int felix_set_ageing_time(struct dsa_switch *ds,
845 unsigned int ageing_time)
846 {
847 struct ocelot *ocelot = ds->priv;
848
849 ocelot_set_ageing_time(ocelot, ageing_time);
850
851 return 0;
852 }
853
felix_port_fast_age(struct dsa_switch * ds,int port)854 static void felix_port_fast_age(struct dsa_switch *ds, int port)
855 {
856 struct ocelot *ocelot = ds->priv;
857 int err;
858
859 err = ocelot_mact_flush(ocelot, port);
860 if (err)
861 dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n",
862 port, ERR_PTR(err));
863 }
864
felix_fdb_dump(struct dsa_switch * ds,int port,dsa_fdb_dump_cb_t * cb,void * data)865 static int felix_fdb_dump(struct dsa_switch *ds, int port,
866 dsa_fdb_dump_cb_t *cb, void *data)
867 {
868 struct ocelot *ocelot = ds->priv;
869
870 return ocelot_fdb_dump(ocelot, port, cb, data);
871 }
872
felix_fdb_add(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)873 static int felix_fdb_add(struct dsa_switch *ds, int port,
874 const unsigned char *addr, u16 vid,
875 struct dsa_db db)
876 {
877 struct net_device *bridge_dev = felix_classify_db(db);
878 struct dsa_port *dp = dsa_to_port(ds, port);
879 struct ocelot *ocelot = ds->priv;
880
881 if (IS_ERR(bridge_dev))
882 return PTR_ERR(bridge_dev);
883
884 if (dsa_port_is_cpu(dp) && !bridge_dev &&
885 dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
886 return 0;
887
888 if (dsa_port_is_cpu(dp))
889 port = PGID_CPU;
890
891 return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev);
892 }
893
felix_fdb_del(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)894 static int felix_fdb_del(struct dsa_switch *ds, int port,
895 const unsigned char *addr, u16 vid,
896 struct dsa_db db)
897 {
898 struct net_device *bridge_dev = felix_classify_db(db);
899 struct dsa_port *dp = dsa_to_port(ds, port);
900 struct ocelot *ocelot = ds->priv;
901
902 if (IS_ERR(bridge_dev))
903 return PTR_ERR(bridge_dev);
904
905 if (dsa_port_is_cpu(dp) && !bridge_dev &&
906 dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
907 return 0;
908
909 if (dsa_port_is_cpu(dp))
910 port = PGID_CPU;
911
912 return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev);
913 }
914
felix_lag_fdb_add(struct dsa_switch * ds,struct dsa_lag lag,const unsigned char * addr,u16 vid,struct dsa_db db)915 static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag,
916 const unsigned char *addr, u16 vid,
917 struct dsa_db db)
918 {
919 struct net_device *bridge_dev = felix_classify_db(db);
920 struct ocelot *ocelot = ds->priv;
921
922 if (IS_ERR(bridge_dev))
923 return PTR_ERR(bridge_dev);
924
925 return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev);
926 }
927
felix_lag_fdb_del(struct dsa_switch * ds,struct dsa_lag lag,const unsigned char * addr,u16 vid,struct dsa_db db)928 static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag,
929 const unsigned char *addr, u16 vid,
930 struct dsa_db db)
931 {
932 struct net_device *bridge_dev = felix_classify_db(db);
933 struct ocelot *ocelot = ds->priv;
934
935 if (IS_ERR(bridge_dev))
936 return PTR_ERR(bridge_dev);
937
938 return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev);
939 }
940
felix_mdb_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)941 static int felix_mdb_add(struct dsa_switch *ds, int port,
942 const struct switchdev_obj_port_mdb *mdb,
943 struct dsa_db db)
944 {
945 struct net_device *bridge_dev = felix_classify_db(db);
946 struct ocelot *ocelot = ds->priv;
947
948 if (IS_ERR(bridge_dev))
949 return PTR_ERR(bridge_dev);
950
951 if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
952 dsa_mdb_present_in_other_db(ds, port, mdb, db))
953 return 0;
954
955 if (port == ocelot->npi)
956 port = ocelot->num_phys_ports;
957
958 return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev);
959 }
960
felix_mdb_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)961 static int felix_mdb_del(struct dsa_switch *ds, int port,
962 const struct switchdev_obj_port_mdb *mdb,
963 struct dsa_db db)
964 {
965 struct net_device *bridge_dev = felix_classify_db(db);
966 struct ocelot *ocelot = ds->priv;
967
968 if (IS_ERR(bridge_dev))
969 return PTR_ERR(bridge_dev);
970
971 if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
972 dsa_mdb_present_in_other_db(ds, port, mdb, db))
973 return 0;
974
975 if (port == ocelot->npi)
976 port = ocelot->num_phys_ports;
977
978 return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev);
979 }
980
felix_bridge_stp_state_set(struct dsa_switch * ds,int port,u8 state)981 static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
982 u8 state)
983 {
984 struct ocelot *ocelot = ds->priv;
985
986 return ocelot_bridge_stp_state_set(ocelot, port, state);
987 }
988
felix_pre_bridge_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags val,struct netlink_ext_ack * extack)989 static int felix_pre_bridge_flags(struct dsa_switch *ds, int port,
990 struct switchdev_brport_flags val,
991 struct netlink_ext_ack *extack)
992 {
993 struct ocelot *ocelot = ds->priv;
994
995 return ocelot_port_pre_bridge_flags(ocelot, port, val);
996 }
997
felix_bridge_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags val,struct netlink_ext_ack * extack)998 static int felix_bridge_flags(struct dsa_switch *ds, int port,
999 struct switchdev_brport_flags val,
1000 struct netlink_ext_ack *extack)
1001 {
1002 struct ocelot *ocelot = ds->priv;
1003
1004 if (port == ocelot->npi)
1005 port = ocelot->num_phys_ports;
1006
1007 ocelot_port_bridge_flags(ocelot, port, val);
1008
1009 return 0;
1010 }
1011
felix_bridge_join(struct dsa_switch * ds,int port,struct dsa_bridge bridge,bool * tx_fwd_offload,struct netlink_ext_ack * extack)1012 static int felix_bridge_join(struct dsa_switch *ds, int port,
1013 struct dsa_bridge bridge, bool *tx_fwd_offload,
1014 struct netlink_ext_ack *extack)
1015 {
1016 struct ocelot *ocelot = ds->priv;
1017
1018 return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num,
1019 extack);
1020 }
1021
felix_bridge_leave(struct dsa_switch * ds,int port,struct dsa_bridge bridge)1022 static void felix_bridge_leave(struct dsa_switch *ds, int port,
1023 struct dsa_bridge bridge)
1024 {
1025 struct ocelot *ocelot = ds->priv;
1026
1027 ocelot_port_bridge_leave(ocelot, port, bridge.dev);
1028 }
1029
felix_lag_join(struct dsa_switch * ds,int port,struct dsa_lag lag,struct netdev_lag_upper_info * info,struct netlink_ext_ack * extack)1030 static int felix_lag_join(struct dsa_switch *ds, int port,
1031 struct dsa_lag lag,
1032 struct netdev_lag_upper_info *info,
1033 struct netlink_ext_ack *extack)
1034 {
1035 struct ocelot *ocelot = ds->priv;
1036 int err;
1037
1038 err = ocelot_port_lag_join(ocelot, port, lag.dev, info, extack);
1039 if (err)
1040 return err;
1041
1042 /* Update the logical LAG port that serves as tag_8021q CPU port */
1043 if (!dsa_is_cpu_port(ds, port))
1044 return 0;
1045
1046 return felix_port_change_conduit(ds, port, lag.dev, extack);
1047 }
1048
felix_lag_leave(struct dsa_switch * ds,int port,struct dsa_lag lag)1049 static int felix_lag_leave(struct dsa_switch *ds, int port,
1050 struct dsa_lag lag)
1051 {
1052 struct ocelot *ocelot = ds->priv;
1053
1054 ocelot_port_lag_leave(ocelot, port, lag.dev);
1055
1056 /* Update the logical LAG port that serves as tag_8021q CPU port */
1057 if (!dsa_is_cpu_port(ds, port))
1058 return 0;
1059
1060 return felix_port_change_conduit(ds, port, lag.dev, NULL);
1061 }
1062
felix_lag_change(struct dsa_switch * ds,int port)1063 static int felix_lag_change(struct dsa_switch *ds, int port)
1064 {
1065 struct dsa_port *dp = dsa_to_port(ds, port);
1066 struct ocelot *ocelot = ds->priv;
1067
1068 ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled);
1069
1070 return 0;
1071 }
1072
felix_vlan_prepare(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)1073 static int felix_vlan_prepare(struct dsa_switch *ds, int port,
1074 const struct switchdev_obj_port_vlan *vlan,
1075 struct netlink_ext_ack *extack)
1076 {
1077 struct ocelot *ocelot = ds->priv;
1078 u16 flags = vlan->flags;
1079
1080 /* Ocelot switches copy frames as-is to the CPU, so the flags:
1081 * egress-untagged or not, pvid or not, make no difference. This
1082 * behavior is already better than what DSA just tries to approximate
1083 * when it installs the VLAN with the same flags on the CPU port.
1084 * Just accept any configuration, and don't let ocelot deny installing
1085 * multiple native VLANs on the NPI port, because the switch doesn't
1086 * look at the port tag settings towards the NPI interface anyway.
1087 */
1088 if (port == ocelot->npi)
1089 return 0;
1090
1091 return ocelot_vlan_prepare(ocelot, port, vlan->vid,
1092 flags & BRIDGE_VLAN_INFO_PVID,
1093 flags & BRIDGE_VLAN_INFO_UNTAGGED,
1094 extack);
1095 }
1096
felix_vlan_filtering(struct dsa_switch * ds,int port,bool enabled,struct netlink_ext_ack * extack)1097 static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
1098 struct netlink_ext_ack *extack)
1099 {
1100 struct ocelot *ocelot = ds->priv;
1101 bool using_tag_8021q;
1102 struct felix *felix;
1103 int err;
1104
1105 err = ocelot_port_vlan_filtering(ocelot, port, enabled, extack);
1106 if (err)
1107 return err;
1108
1109 felix = ocelot_to_felix(ocelot);
1110 using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
1111 if (using_tag_8021q) {
1112 err = felix_update_tag_8021q_rx_rules(ds, port, enabled);
1113 if (err)
1114 return err;
1115 }
1116
1117 return 0;
1118 }
1119
felix_vlan_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)1120 static int felix_vlan_add(struct dsa_switch *ds, int port,
1121 const struct switchdev_obj_port_vlan *vlan,
1122 struct netlink_ext_ack *extack)
1123 {
1124 struct ocelot *ocelot = ds->priv;
1125 u16 flags = vlan->flags;
1126 int err;
1127
1128 err = felix_vlan_prepare(ds, port, vlan, extack);
1129 if (err)
1130 return err;
1131
1132 return ocelot_vlan_add(ocelot, port, vlan->vid,
1133 flags & BRIDGE_VLAN_INFO_PVID,
1134 flags & BRIDGE_VLAN_INFO_UNTAGGED);
1135 }
1136
felix_vlan_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)1137 static int felix_vlan_del(struct dsa_switch *ds, int port,
1138 const struct switchdev_obj_port_vlan *vlan)
1139 {
1140 struct ocelot *ocelot = ds->priv;
1141
1142 return ocelot_vlan_del(ocelot, port, vlan->vid);
1143 }
1144
felix_phylink_get_caps(struct dsa_switch * ds,int port,struct phylink_config * config)1145 static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
1146 struct phylink_config *config)
1147 {
1148 struct ocelot *ocelot = ds->priv;
1149
1150 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1151 MAC_10 | MAC_100 | MAC_1000FD |
1152 MAC_2500FD;
1153
1154 __set_bit(ocelot->ports[port]->phy_mode,
1155 config->supported_interfaces);
1156 }
1157
felix_phylink_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)1158 static void felix_phylink_mac_config(struct phylink_config *config,
1159 unsigned int mode,
1160 const struct phylink_link_state *state)
1161 {
1162 struct dsa_port *dp = dsa_phylink_to_port(config);
1163 struct ocelot *ocelot = dp->ds->priv;
1164 int port = dp->index;
1165 struct felix *felix;
1166
1167 felix = ocelot_to_felix(ocelot);
1168
1169 if (felix->info->phylink_mac_config)
1170 felix->info->phylink_mac_config(ocelot, port, mode, state);
1171 }
1172
1173 static struct phylink_pcs *
felix_phylink_mac_select_pcs(struct phylink_config * config,phy_interface_t iface)1174 felix_phylink_mac_select_pcs(struct phylink_config *config,
1175 phy_interface_t iface)
1176 {
1177 struct dsa_port *dp = dsa_phylink_to_port(config);
1178 struct ocelot *ocelot = dp->ds->priv;
1179 struct phylink_pcs *pcs = NULL;
1180 int port = dp->index;
1181 struct felix *felix;
1182
1183 felix = ocelot_to_felix(ocelot);
1184
1185 if (felix->pcs && felix->pcs[port])
1186 pcs = felix->pcs[port];
1187
1188 return pcs;
1189 }
1190
felix_phylink_mac_link_down(struct phylink_config * config,unsigned int link_an_mode,phy_interface_t interface)1191 static void felix_phylink_mac_link_down(struct phylink_config *config,
1192 unsigned int link_an_mode,
1193 phy_interface_t interface)
1194 {
1195 struct dsa_port *dp = dsa_phylink_to_port(config);
1196 struct ocelot *ocelot = dp->ds->priv;
1197 int port = dp->index;
1198 struct felix *felix;
1199
1200 felix = ocelot_to_felix(ocelot);
1201
1202 ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface,
1203 felix->info->quirks);
1204 }
1205
felix_phylink_mac_link_up(struct phylink_config * config,struct phy_device * phydev,unsigned int link_an_mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)1206 static void felix_phylink_mac_link_up(struct phylink_config *config,
1207 struct phy_device *phydev,
1208 unsigned int link_an_mode,
1209 phy_interface_t interface,
1210 int speed, int duplex,
1211 bool tx_pause, bool rx_pause)
1212 {
1213 struct dsa_port *dp = dsa_phylink_to_port(config);
1214 struct ocelot *ocelot = dp->ds->priv;
1215 int port = dp->index;
1216 struct felix *felix;
1217
1218 felix = ocelot_to_felix(ocelot);
1219
1220 ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
1221 interface, speed, duplex, tx_pause, rx_pause,
1222 felix->info->quirks);
1223
1224 if (felix->info->port_sched_speed_set)
1225 felix->info->port_sched_speed_set(ocelot, port, speed);
1226 }
1227
felix_port_enable(struct dsa_switch * ds,int port,struct phy_device * phydev)1228 static int felix_port_enable(struct dsa_switch *ds, int port,
1229 struct phy_device *phydev)
1230 {
1231 struct dsa_port *dp = dsa_to_port(ds, port);
1232 struct ocelot *ocelot = ds->priv;
1233
1234 if (!dsa_port_is_user(dp))
1235 return 0;
1236
1237 if (ocelot->npi >= 0) {
1238 struct net_device *conduit = dsa_port_to_conduit(dp);
1239
1240 if (felix_cpu_port_for_conduit(ds, conduit) != ocelot->npi) {
1241 dev_err(ds->dev, "Multiple conduits are not allowed\n");
1242 return -EINVAL;
1243 }
1244 }
1245
1246 return 0;
1247 }
1248
felix_port_qos_map_init(struct ocelot * ocelot,int port)1249 static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
1250 {
1251 int i;
1252
1253 ocelot_rmw_gix(ocelot,
1254 ANA_PORT_QOS_CFG_QOS_PCP_ENA,
1255 ANA_PORT_QOS_CFG_QOS_PCP_ENA,
1256 ANA_PORT_QOS_CFG,
1257 port);
1258
1259 for (i = 0; i < OCELOT_NUM_TC * 2; i++) {
1260 ocelot_rmw_ix(ocelot,
1261 (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) |
1262 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i),
1263 ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL |
1264 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M,
1265 ANA_PORT_PCP_DEI_MAP,
1266 port, i);
1267 }
1268 }
1269
felix_get_stats64(struct dsa_switch * ds,int port,struct rtnl_link_stats64 * stats)1270 static void felix_get_stats64(struct dsa_switch *ds, int port,
1271 struct rtnl_link_stats64 *stats)
1272 {
1273 struct ocelot *ocelot = ds->priv;
1274
1275 ocelot_port_get_stats64(ocelot, port, stats);
1276 }
1277
felix_get_pause_stats(struct dsa_switch * ds,int port,struct ethtool_pause_stats * pause_stats)1278 static void felix_get_pause_stats(struct dsa_switch *ds, int port,
1279 struct ethtool_pause_stats *pause_stats)
1280 {
1281 struct ocelot *ocelot = ds->priv;
1282
1283 ocelot_port_get_pause_stats(ocelot, port, pause_stats);
1284 }
1285
felix_get_rmon_stats(struct dsa_switch * ds,int port,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)1286 static void felix_get_rmon_stats(struct dsa_switch *ds, int port,
1287 struct ethtool_rmon_stats *rmon_stats,
1288 const struct ethtool_rmon_hist_range **ranges)
1289 {
1290 struct ocelot *ocelot = ds->priv;
1291
1292 ocelot_port_get_rmon_stats(ocelot, port, rmon_stats, ranges);
1293 }
1294
felix_get_eth_ctrl_stats(struct dsa_switch * ds,int port,struct ethtool_eth_ctrl_stats * ctrl_stats)1295 static void felix_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
1296 struct ethtool_eth_ctrl_stats *ctrl_stats)
1297 {
1298 struct ocelot *ocelot = ds->priv;
1299
1300 ocelot_port_get_eth_ctrl_stats(ocelot, port, ctrl_stats);
1301 }
1302
felix_get_eth_mac_stats(struct dsa_switch * ds,int port,struct ethtool_eth_mac_stats * mac_stats)1303 static void felix_get_eth_mac_stats(struct dsa_switch *ds, int port,
1304 struct ethtool_eth_mac_stats *mac_stats)
1305 {
1306 struct ocelot *ocelot = ds->priv;
1307
1308 ocelot_port_get_eth_mac_stats(ocelot, port, mac_stats);
1309 }
1310
felix_get_eth_phy_stats(struct dsa_switch * ds,int port,struct ethtool_eth_phy_stats * phy_stats)1311 static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port,
1312 struct ethtool_eth_phy_stats *phy_stats)
1313 {
1314 struct ocelot *ocelot = ds->priv;
1315
1316 ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats);
1317 }
1318
felix_get_strings(struct dsa_switch * ds,int port,u32 stringset,u8 * data)1319 static void felix_get_strings(struct dsa_switch *ds, int port,
1320 u32 stringset, u8 *data)
1321 {
1322 struct ocelot *ocelot = ds->priv;
1323
1324 return ocelot_get_strings(ocelot, port, stringset, data);
1325 }
1326
felix_get_ethtool_stats(struct dsa_switch * ds,int port,u64 * data)1327 static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
1328 {
1329 struct ocelot *ocelot = ds->priv;
1330
1331 ocelot_get_ethtool_stats(ocelot, port, data);
1332 }
1333
felix_get_sset_count(struct dsa_switch * ds,int port,int sset)1334 static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset)
1335 {
1336 struct ocelot *ocelot = ds->priv;
1337
1338 return ocelot_get_sset_count(ocelot, port, sset);
1339 }
1340
felix_get_ts_info(struct dsa_switch * ds,int port,struct kernel_ethtool_ts_info * info)1341 static int felix_get_ts_info(struct dsa_switch *ds, int port,
1342 struct kernel_ethtool_ts_info *info)
1343 {
1344 struct ocelot *ocelot = ds->priv;
1345
1346 return ocelot_get_ts_info(ocelot, port, info);
1347 }
1348
1349 static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = {
1350 [PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL,
1351 [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII,
1352 [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII,
1353 [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII,
1354 [PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX,
1355 [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX,
1356 };
1357
felix_validate_phy_mode(struct felix * felix,int port,phy_interface_t phy_mode)1358 static int felix_validate_phy_mode(struct felix *felix, int port,
1359 phy_interface_t phy_mode)
1360 {
1361 u32 modes = felix->info->port_modes[port];
1362
1363 if (felix_phy_match_table[phy_mode] & modes)
1364 return 0;
1365 return -EOPNOTSUPP;
1366 }
1367
felix_parse_ports_node(struct felix * felix,struct device_node * ports_node,phy_interface_t * port_phy_modes)1368 static int felix_parse_ports_node(struct felix *felix,
1369 struct device_node *ports_node,
1370 phy_interface_t *port_phy_modes)
1371 {
1372 struct device *dev = felix->ocelot.dev;
1373
1374 for_each_available_child_of_node_scoped(ports_node, child) {
1375 phy_interface_t phy_mode;
1376 u32 port;
1377 int err;
1378
1379 /* Get switch port number from DT */
1380 if (of_property_read_u32(child, "reg", &port) < 0) {
1381 dev_err(dev, "Port number not defined in device tree "
1382 "(property \"reg\")\n");
1383 return -ENODEV;
1384 }
1385
1386 /* Get PHY mode from DT */
1387 err = of_get_phy_mode(child, &phy_mode);
1388 if (err) {
1389 dev_err(dev, "Failed to read phy-mode or "
1390 "phy-interface-type property for port %d\n",
1391 port);
1392 return -ENODEV;
1393 }
1394
1395 err = felix_validate_phy_mode(felix, port, phy_mode);
1396 if (err < 0) {
1397 dev_info(dev, "Unsupported PHY mode %s on port %d\n",
1398 phy_modes(phy_mode), port);
1399
1400 /* Leave port_phy_modes[port] = 0, which is also
1401 * PHY_INTERFACE_MODE_NA. This will perform a
1402 * best-effort to bring up as many ports as possible.
1403 */
1404 continue;
1405 }
1406
1407 port_phy_modes[port] = phy_mode;
1408 }
1409
1410 return 0;
1411 }
1412
felix_parse_dt(struct felix * felix,phy_interface_t * port_phy_modes)1413 static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
1414 {
1415 struct device *dev = felix->ocelot.dev;
1416 struct device_node *switch_node;
1417 struct device_node *ports_node;
1418 int err;
1419
1420 switch_node = dev->of_node;
1421
1422 ports_node = of_get_child_by_name(switch_node, "ports");
1423 if (!ports_node)
1424 ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
1425 if (!ports_node) {
1426 dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n");
1427 return -ENODEV;
1428 }
1429
1430 err = felix_parse_ports_node(felix, ports_node, port_phy_modes);
1431 of_node_put(ports_node);
1432
1433 return err;
1434 }
1435
felix_request_regmap_by_name(struct felix * felix,const char * resource_name)1436 static struct regmap *felix_request_regmap_by_name(struct felix *felix,
1437 const char *resource_name)
1438 {
1439 struct ocelot *ocelot = &felix->ocelot;
1440 struct resource res;
1441 int i;
1442
1443 /* In an MFD configuration, regmaps are registered directly to the
1444 * parent device before the child devices are probed, so there is no
1445 * need to initialize a new one.
1446 */
1447 if (!felix->info->resources)
1448 return dev_get_regmap(ocelot->dev->parent, resource_name);
1449
1450 for (i = 0; i < felix->info->num_resources; i++) {
1451 if (strcmp(resource_name, felix->info->resources[i].name))
1452 continue;
1453
1454 memcpy(&res, &felix->info->resources[i], sizeof(res));
1455 res.start += felix->switch_base;
1456 res.end += felix->switch_base;
1457
1458 return ocelot_regmap_init(ocelot, &res);
1459 }
1460
1461 return ERR_PTR(-ENOENT);
1462 }
1463
felix_request_regmap(struct felix * felix,enum ocelot_target target)1464 static struct regmap *felix_request_regmap(struct felix *felix,
1465 enum ocelot_target target)
1466 {
1467 const char *resource_name = felix->info->resource_names[target];
1468
1469 /* If the driver didn't provide a resource name for the target,
1470 * the resource is optional.
1471 */
1472 if (!resource_name)
1473 return NULL;
1474
1475 return felix_request_regmap_by_name(felix, resource_name);
1476 }
1477
felix_request_port_regmap(struct felix * felix,int port)1478 static struct regmap *felix_request_port_regmap(struct felix *felix, int port)
1479 {
1480 char resource_name[32];
1481
1482 sprintf(resource_name, "port%d", port);
1483
1484 return felix_request_regmap_by_name(felix, resource_name);
1485 }
1486
felix_init_structs(struct felix * felix,int num_phys_ports)1487 static int felix_init_structs(struct felix *felix, int num_phys_ports)
1488 {
1489 struct ocelot *ocelot = &felix->ocelot;
1490 phy_interface_t *port_phy_modes;
1491 struct regmap *target;
1492 int port, i, err;
1493
1494 ocelot->num_phys_ports = num_phys_ports;
1495 ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports,
1496 sizeof(struct ocelot_port *), GFP_KERNEL);
1497 if (!ocelot->ports)
1498 return -ENOMEM;
1499
1500 ocelot->map = felix->info->map;
1501 ocelot->num_mact_rows = felix->info->num_mact_rows;
1502 ocelot->vcap = felix->info->vcap;
1503 ocelot->vcap_pol.base = felix->info->vcap_pol_base;
1504 ocelot->vcap_pol.max = felix->info->vcap_pol_max;
1505 ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2;
1506 ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2;
1507 ocelot->ops = felix->info->ops;
1508 ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT;
1509 ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
1510 ocelot->devlink = felix->ds->devlink;
1511
1512 port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
1513 GFP_KERNEL);
1514 if (!port_phy_modes)
1515 return -ENOMEM;
1516
1517 err = felix_parse_dt(felix, port_phy_modes);
1518 if (err) {
1519 kfree(port_phy_modes);
1520 return err;
1521 }
1522
1523 for (i = 0; i < TARGET_MAX; i++) {
1524 target = felix_request_regmap(felix, i);
1525 if (IS_ERR(target)) {
1526 dev_err(ocelot->dev,
1527 "Failed to map device memory space: %pe\n",
1528 target);
1529 kfree(port_phy_modes);
1530 return PTR_ERR(target);
1531 }
1532
1533 ocelot->targets[i] = target;
1534 }
1535
1536 err = ocelot_regfields_init(ocelot, felix->info->regfields);
1537 if (err) {
1538 dev_err(ocelot->dev, "failed to init reg fields map\n");
1539 kfree(port_phy_modes);
1540 return err;
1541 }
1542
1543 for (port = 0; port < num_phys_ports; port++) {
1544 struct ocelot_port *ocelot_port;
1545
1546 ocelot_port = devm_kzalloc(ocelot->dev,
1547 sizeof(struct ocelot_port),
1548 GFP_KERNEL);
1549 if (!ocelot_port) {
1550 dev_err(ocelot->dev,
1551 "failed to allocate port memory\n");
1552 kfree(port_phy_modes);
1553 return -ENOMEM;
1554 }
1555
1556 target = felix_request_port_regmap(felix, port);
1557 if (IS_ERR(target)) {
1558 dev_err(ocelot->dev,
1559 "Failed to map memory space for port %d: %pe\n",
1560 port, target);
1561 kfree(port_phy_modes);
1562 return PTR_ERR(target);
1563 }
1564
1565 ocelot_port->phy_mode = port_phy_modes[port];
1566 ocelot_port->ocelot = ocelot;
1567 ocelot_port->target = target;
1568 ocelot_port->index = port;
1569 ocelot->ports[port] = ocelot_port;
1570 }
1571
1572 kfree(port_phy_modes);
1573
1574 if (felix->info->mdio_bus_alloc) {
1575 err = felix->info->mdio_bus_alloc(ocelot);
1576 if (err < 0)
1577 return err;
1578 }
1579
1580 return 0;
1581 }
1582
ocelot_port_purge_txtstamp_skb(struct ocelot * ocelot,int port,struct sk_buff * skb)1583 static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port,
1584 struct sk_buff *skb)
1585 {
1586 struct ocelot_port *ocelot_port = ocelot->ports[port];
1587 struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
1588 struct sk_buff *skb_match = NULL, *skb_tmp;
1589 unsigned long flags;
1590
1591 if (!clone)
1592 return;
1593
1594 spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags);
1595
1596 skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
1597 if (skb != clone)
1598 continue;
1599 __skb_unlink(skb, &ocelot_port->tx_skbs);
1600 skb_match = skb;
1601 break;
1602 }
1603
1604 spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags);
1605
1606 WARN_ONCE(!skb_match,
1607 "Could not find skb clone in TX timestamping list\n");
1608 }
1609
1610 #define work_to_xmit_work(w) \
1611 container_of((w), struct felix_deferred_xmit_work, work)
1612
felix_port_deferred_xmit(struct kthread_work * work)1613 static void felix_port_deferred_xmit(struct kthread_work *work)
1614 {
1615 struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
1616 struct dsa_switch *ds = xmit_work->dp->ds;
1617 struct sk_buff *skb = xmit_work->skb;
1618 u32 rew_op = ocelot_ptp_rew_op(skb);
1619 struct ocelot *ocelot = ds->priv;
1620 int port = xmit_work->dp->index;
1621 int retries = 10;
1622
1623 ocelot_lock_inj_grp(ocelot, 0);
1624
1625 do {
1626 if (ocelot_can_inject(ocelot, 0))
1627 break;
1628
1629 cpu_relax();
1630 } while (--retries);
1631
1632 if (!retries) {
1633 ocelot_unlock_inj_grp(ocelot, 0);
1634 dev_err(ocelot->dev, "port %d failed to inject skb\n",
1635 port);
1636 ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
1637 kfree_skb(skb);
1638 return;
1639 }
1640
1641 ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
1642
1643 ocelot_unlock_inj_grp(ocelot, 0);
1644
1645 consume_skb(skb);
1646 kfree(xmit_work);
1647 }
1648
felix_connect_tag_protocol(struct dsa_switch * ds,enum dsa_tag_protocol proto)1649 static int felix_connect_tag_protocol(struct dsa_switch *ds,
1650 enum dsa_tag_protocol proto)
1651 {
1652 struct ocelot_8021q_tagger_data *tagger_data;
1653
1654 switch (proto) {
1655 case DSA_TAG_PROTO_OCELOT_8021Q:
1656 tagger_data = ocelot_8021q_tagger_data(ds);
1657 tagger_data->xmit_work_fn = felix_port_deferred_xmit;
1658 return 0;
1659 case DSA_TAG_PROTO_OCELOT:
1660 case DSA_TAG_PROTO_SEVILLE:
1661 return 0;
1662 default:
1663 return -EPROTONOSUPPORT;
1664 }
1665 }
1666
felix_setup(struct dsa_switch * ds)1667 static int felix_setup(struct dsa_switch *ds)
1668 {
1669 struct ocelot *ocelot = ds->priv;
1670 struct felix *felix = ocelot_to_felix(ocelot);
1671 struct dsa_port *dp;
1672 int err;
1673
1674 err = felix_init_structs(felix, ds->num_ports);
1675 if (err)
1676 return err;
1677
1678 if (ocelot->targets[HSIO])
1679 ocelot_pll5_init(ocelot);
1680
1681 err = ocelot_init(ocelot);
1682 if (err)
1683 goto out_mdiobus_free;
1684
1685 if (ocelot->ptp) {
1686 err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps);
1687 if (err) {
1688 dev_err(ocelot->dev,
1689 "Timestamp initialization failed\n");
1690 ocelot->ptp = 0;
1691 }
1692 }
1693
1694 dsa_switch_for_each_available_port(dp, ds) {
1695 ocelot_init_port(ocelot, dp->index);
1696
1697 if (felix->info->configure_serdes)
1698 felix->info->configure_serdes(ocelot, dp->index,
1699 dp->dn);
1700
1701 /* Set the default QoS Classification based on PCP and DEI
1702 * bits of vlan tag.
1703 */
1704 felix_port_qos_map_init(ocelot, dp->index);
1705 }
1706
1707 if (felix->info->request_irq) {
1708 err = felix->info->request_irq(ocelot);
1709 if (err) {
1710 dev_err(ocelot->dev, "Failed to request IRQ: %pe\n",
1711 ERR_PTR(err));
1712 goto out_deinit_ports;
1713 }
1714 }
1715
1716 err = ocelot_devlink_sb_register(ocelot);
1717 if (err)
1718 goto out_deinit_ports;
1719
1720 /* The initial tag protocol is NPI which won't fail during initial
1721 * setup, there's no real point in checking for errors.
1722 */
1723 felix_change_tag_protocol(ds, felix->tag_proto);
1724
1725 ds->mtu_enforcement_ingress = true;
1726 ds->assisted_learning_on_cpu_port = true;
1727 ds->fdb_isolation = true;
1728 ds->max_num_bridges = ds->num_ports;
1729
1730 return 0;
1731
1732 out_deinit_ports:
1733 dsa_switch_for_each_available_port(dp, ds)
1734 ocelot_deinit_port(ocelot, dp->index);
1735
1736 ocelot_deinit_timestamp(ocelot);
1737 ocelot_deinit(ocelot);
1738
1739 out_mdiobus_free:
1740 if (felix->info->mdio_bus_free)
1741 felix->info->mdio_bus_free(ocelot);
1742
1743 return err;
1744 }
1745
felix_teardown(struct dsa_switch * ds)1746 static void felix_teardown(struct dsa_switch *ds)
1747 {
1748 struct ocelot *ocelot = ds->priv;
1749 struct felix *felix = ocelot_to_felix(ocelot);
1750 struct dsa_port *dp;
1751
1752 rtnl_lock();
1753 if (felix->tag_proto_ops)
1754 felix->tag_proto_ops->teardown(ds);
1755 rtnl_unlock();
1756
1757 dsa_switch_for_each_available_port(dp, ds)
1758 ocelot_deinit_port(ocelot, dp->index);
1759
1760 ocelot_devlink_sb_unregister(ocelot);
1761 ocelot_deinit_timestamp(ocelot);
1762 ocelot_deinit(ocelot);
1763
1764 if (felix->info->mdio_bus_free)
1765 felix->info->mdio_bus_free(ocelot);
1766 }
1767
felix_hwtstamp_get(struct dsa_switch * ds,int port,struct ifreq * ifr)1768 static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
1769 struct ifreq *ifr)
1770 {
1771 struct ocelot *ocelot = ds->priv;
1772
1773 return ocelot_hwstamp_get(ocelot, port, ifr);
1774 }
1775
felix_hwtstamp_set(struct dsa_switch * ds,int port,struct ifreq * ifr)1776 static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
1777 struct ifreq *ifr)
1778 {
1779 struct ocelot *ocelot = ds->priv;
1780 struct felix *felix = ocelot_to_felix(ocelot);
1781 bool using_tag_8021q;
1782 int err;
1783
1784 err = ocelot_hwstamp_set(ocelot, port, ifr);
1785 if (err)
1786 return err;
1787
1788 using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
1789
1790 return felix_update_trapping_destinations(ds, using_tag_8021q);
1791 }
1792
felix_check_xtr_pkt(struct ocelot * ocelot)1793 static bool felix_check_xtr_pkt(struct ocelot *ocelot)
1794 {
1795 struct felix *felix = ocelot_to_felix(ocelot);
1796 int err = 0, grp = 0;
1797
1798 if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q)
1799 return false;
1800
1801 if (!felix->info->quirk_no_xtr_irq)
1802 return false;
1803
1804 ocelot_lock_xtr_grp(ocelot, grp);
1805
1806 while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
1807 struct sk_buff *skb;
1808 unsigned int type;
1809
1810 err = ocelot_xtr_poll_frame(ocelot, grp, &skb);
1811 if (err)
1812 goto out;
1813
1814 /* We trap to the CPU port module all PTP frames, but
1815 * felix_rxtstamp() only gets called for event frames.
1816 * So we need to avoid sending duplicate general
1817 * message frames by running a second BPF classifier
1818 * here and dropping those.
1819 */
1820 __skb_push(skb, ETH_HLEN);
1821
1822 type = ptp_classify_raw(skb);
1823
1824 __skb_pull(skb, ETH_HLEN);
1825
1826 if (type == PTP_CLASS_NONE) {
1827 kfree_skb(skb);
1828 continue;
1829 }
1830
1831 netif_rx(skb);
1832 }
1833
1834 out:
1835 if (err < 0) {
1836 dev_err_ratelimited(ocelot->dev,
1837 "Error during packet extraction: %pe\n",
1838 ERR_PTR(err));
1839 ocelot_drain_cpu_queue(ocelot, 0);
1840 }
1841
1842 ocelot_unlock_xtr_grp(ocelot, grp);
1843
1844 return true;
1845 }
1846
felix_rxtstamp(struct dsa_switch * ds,int port,struct sk_buff * skb,unsigned int type)1847 static bool felix_rxtstamp(struct dsa_switch *ds, int port,
1848 struct sk_buff *skb, unsigned int type)
1849 {
1850 u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo;
1851 struct skb_shared_hwtstamps *shhwtstamps;
1852 struct ocelot *ocelot = ds->priv;
1853 struct timespec64 ts;
1854 u32 tstamp_hi;
1855 u64 tstamp;
1856
1857 switch (type & PTP_CLASS_PMASK) {
1858 case PTP_CLASS_L2:
1859 if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L2))
1860 return false;
1861 break;
1862 case PTP_CLASS_IPV4:
1863 case PTP_CLASS_IPV6:
1864 if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L4))
1865 return false;
1866 break;
1867 }
1868
1869 /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb
1870 * for RX timestamping. Then free it, and poll for its copy through
1871 * MMIO in the CPU port module, and inject that into the stack from
1872 * ocelot_xtr_poll().
1873 */
1874 if (felix_check_xtr_pkt(ocelot)) {
1875 kfree_skb(skb);
1876 return true;
1877 }
1878
1879 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
1880 tstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
1881
1882 tstamp_hi = tstamp >> 32;
1883 if ((tstamp & 0xffffffff) < tstamp_lo)
1884 tstamp_hi--;
1885
1886 tstamp = ((u64)tstamp_hi << 32) | tstamp_lo;
1887
1888 shhwtstamps = skb_hwtstamps(skb);
1889 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
1890 shhwtstamps->hwtstamp = tstamp;
1891 return false;
1892 }
1893
felix_txtstamp(struct dsa_switch * ds,int port,struct sk_buff * skb)1894 static void felix_txtstamp(struct dsa_switch *ds, int port,
1895 struct sk_buff *skb)
1896 {
1897 struct ocelot *ocelot = ds->priv;
1898 struct sk_buff *clone = NULL;
1899
1900 if (!ocelot->ptp)
1901 return;
1902
1903 if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) {
1904 dev_err_ratelimited(ds->dev,
1905 "port %d delivering skb without TX timestamp\n",
1906 port);
1907 return;
1908 }
1909
1910 if (clone)
1911 OCELOT_SKB_CB(skb)->clone = clone;
1912 }
1913
felix_change_mtu(struct dsa_switch * ds,int port,int new_mtu)1914 static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
1915 {
1916 struct ocelot *ocelot = ds->priv;
1917 struct ocelot_port *ocelot_port = ocelot->ports[port];
1918
1919 ocelot_port_set_maxlen(ocelot, port, new_mtu);
1920
1921 mutex_lock(&ocelot->fwd_domain_lock);
1922
1923 if (ocelot_port->taprio && ocelot->ops->tas_guard_bands_update)
1924 ocelot->ops->tas_guard_bands_update(ocelot, port);
1925
1926 mutex_unlock(&ocelot->fwd_domain_lock);
1927
1928 return 0;
1929 }
1930
felix_get_max_mtu(struct dsa_switch * ds,int port)1931 static int felix_get_max_mtu(struct dsa_switch *ds, int port)
1932 {
1933 struct ocelot *ocelot = ds->priv;
1934
1935 return ocelot_get_max_mtu(ocelot, port);
1936 }
1937
felix_cls_flower_add(struct dsa_switch * ds,int port,struct flow_cls_offload * cls,bool ingress)1938 static int felix_cls_flower_add(struct dsa_switch *ds, int port,
1939 struct flow_cls_offload *cls, bool ingress)
1940 {
1941 struct ocelot *ocelot = ds->priv;
1942 struct felix *felix = ocelot_to_felix(ocelot);
1943 bool using_tag_8021q;
1944 int err;
1945
1946 err = ocelot_cls_flower_replace(ocelot, port, cls, ingress);
1947 if (err)
1948 return err;
1949
1950 using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
1951
1952 return felix_update_trapping_destinations(ds, using_tag_8021q);
1953 }
1954
felix_cls_flower_del(struct dsa_switch * ds,int port,struct flow_cls_offload * cls,bool ingress)1955 static int felix_cls_flower_del(struct dsa_switch *ds, int port,
1956 struct flow_cls_offload *cls, bool ingress)
1957 {
1958 struct ocelot *ocelot = ds->priv;
1959
1960 return ocelot_cls_flower_destroy(ocelot, port, cls, ingress);
1961 }
1962
felix_cls_flower_stats(struct dsa_switch * ds,int port,struct flow_cls_offload * cls,bool ingress)1963 static int felix_cls_flower_stats(struct dsa_switch *ds, int port,
1964 struct flow_cls_offload *cls, bool ingress)
1965 {
1966 struct ocelot *ocelot = ds->priv;
1967
1968 return ocelot_cls_flower_stats(ocelot, port, cls, ingress);
1969 }
1970
felix_port_policer_add(struct dsa_switch * ds,int port,struct dsa_mall_policer_tc_entry * policer)1971 static int felix_port_policer_add(struct dsa_switch *ds, int port,
1972 struct dsa_mall_policer_tc_entry *policer)
1973 {
1974 struct ocelot *ocelot = ds->priv;
1975 struct ocelot_policer pol = {
1976 .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8,
1977 .burst = policer->burst,
1978 };
1979
1980 return ocelot_port_policer_add(ocelot, port, &pol);
1981 }
1982
felix_port_policer_del(struct dsa_switch * ds,int port)1983 static void felix_port_policer_del(struct dsa_switch *ds, int port)
1984 {
1985 struct ocelot *ocelot = ds->priv;
1986
1987 ocelot_port_policer_del(ocelot, port);
1988 }
1989
felix_port_mirror_add(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror,bool ingress,struct netlink_ext_ack * extack)1990 static int felix_port_mirror_add(struct dsa_switch *ds, int port,
1991 struct dsa_mall_mirror_tc_entry *mirror,
1992 bool ingress, struct netlink_ext_ack *extack)
1993 {
1994 struct ocelot *ocelot = ds->priv;
1995
1996 return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port,
1997 ingress, extack);
1998 }
1999
felix_port_mirror_del(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror)2000 static void felix_port_mirror_del(struct dsa_switch *ds, int port,
2001 struct dsa_mall_mirror_tc_entry *mirror)
2002 {
2003 struct ocelot *ocelot = ds->priv;
2004
2005 ocelot_port_mirror_del(ocelot, port, mirror->ingress);
2006 }
2007
felix_port_setup_tc(struct dsa_switch * ds,int port,enum tc_setup_type type,void * type_data)2008 static int felix_port_setup_tc(struct dsa_switch *ds, int port,
2009 enum tc_setup_type type,
2010 void *type_data)
2011 {
2012 struct ocelot *ocelot = ds->priv;
2013 struct felix *felix = ocelot_to_felix(ocelot);
2014
2015 if (felix->info->port_setup_tc)
2016 return felix->info->port_setup_tc(ds, port, type, type_data);
2017 else
2018 return -EOPNOTSUPP;
2019 }
2020
felix_sb_pool_get(struct dsa_switch * ds,unsigned int sb_index,u16 pool_index,struct devlink_sb_pool_info * pool_info)2021 static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index,
2022 u16 pool_index,
2023 struct devlink_sb_pool_info *pool_info)
2024 {
2025 struct ocelot *ocelot = ds->priv;
2026
2027 return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
2028 }
2029
felix_sb_pool_set(struct dsa_switch * ds,unsigned int sb_index,u16 pool_index,u32 size,enum devlink_sb_threshold_type threshold_type,struct netlink_ext_ack * extack)2030 static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index,
2031 u16 pool_index, u32 size,
2032 enum devlink_sb_threshold_type threshold_type,
2033 struct netlink_ext_ack *extack)
2034 {
2035 struct ocelot *ocelot = ds->priv;
2036
2037 return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
2038 threshold_type, extack);
2039 }
2040
felix_sb_port_pool_get(struct dsa_switch * ds,int port,unsigned int sb_index,u16 pool_index,u32 * p_threshold)2041 static int felix_sb_port_pool_get(struct dsa_switch *ds, int port,
2042 unsigned int sb_index, u16 pool_index,
2043 u32 *p_threshold)
2044 {
2045 struct ocelot *ocelot = ds->priv;
2046
2047 return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
2048 p_threshold);
2049 }
2050
felix_sb_port_pool_set(struct dsa_switch * ds,int port,unsigned int sb_index,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)2051 static int felix_sb_port_pool_set(struct dsa_switch *ds, int port,
2052 unsigned int sb_index, u16 pool_index,
2053 u32 threshold, struct netlink_ext_ack *extack)
2054 {
2055 struct ocelot *ocelot = ds->priv;
2056
2057 return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
2058 threshold, extack);
2059 }
2060
felix_sb_tc_pool_bind_get(struct dsa_switch * ds,int port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 * p_pool_index,u32 * p_threshold)2061 static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port,
2062 unsigned int sb_index, u16 tc_index,
2063 enum devlink_sb_pool_type pool_type,
2064 u16 *p_pool_index, u32 *p_threshold)
2065 {
2066 struct ocelot *ocelot = ds->priv;
2067
2068 return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
2069 pool_type, p_pool_index,
2070 p_threshold);
2071 }
2072
felix_sb_tc_pool_bind_set(struct dsa_switch * ds,int port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)2073 static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port,
2074 unsigned int sb_index, u16 tc_index,
2075 enum devlink_sb_pool_type pool_type,
2076 u16 pool_index, u32 threshold,
2077 struct netlink_ext_ack *extack)
2078 {
2079 struct ocelot *ocelot = ds->priv;
2080
2081 return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
2082 pool_type, pool_index, threshold,
2083 extack);
2084 }
2085
felix_sb_occ_snapshot(struct dsa_switch * ds,unsigned int sb_index)2086 static int felix_sb_occ_snapshot(struct dsa_switch *ds,
2087 unsigned int sb_index)
2088 {
2089 struct ocelot *ocelot = ds->priv;
2090
2091 return ocelot_sb_occ_snapshot(ocelot, sb_index);
2092 }
2093
felix_sb_occ_max_clear(struct dsa_switch * ds,unsigned int sb_index)2094 static int felix_sb_occ_max_clear(struct dsa_switch *ds,
2095 unsigned int sb_index)
2096 {
2097 struct ocelot *ocelot = ds->priv;
2098
2099 return ocelot_sb_occ_max_clear(ocelot, sb_index);
2100 }
2101
felix_sb_occ_port_pool_get(struct dsa_switch * ds,int port,unsigned int sb_index,u16 pool_index,u32 * p_cur,u32 * p_max)2102 static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port,
2103 unsigned int sb_index, u16 pool_index,
2104 u32 *p_cur, u32 *p_max)
2105 {
2106 struct ocelot *ocelot = ds->priv;
2107
2108 return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
2109 p_cur, p_max);
2110 }
2111
felix_sb_occ_tc_port_bind_get(struct dsa_switch * ds,int port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u32 * p_cur,u32 * p_max)2112 static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port,
2113 unsigned int sb_index, u16 tc_index,
2114 enum devlink_sb_pool_type pool_type,
2115 u32 *p_cur, u32 *p_max)
2116 {
2117 struct ocelot *ocelot = ds->priv;
2118
2119 return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index,
2120 pool_type, p_cur, p_max);
2121 }
2122
felix_mrp_add(struct dsa_switch * ds,int port,const struct switchdev_obj_mrp * mrp)2123 static int felix_mrp_add(struct dsa_switch *ds, int port,
2124 const struct switchdev_obj_mrp *mrp)
2125 {
2126 struct ocelot *ocelot = ds->priv;
2127
2128 return ocelot_mrp_add(ocelot, port, mrp);
2129 }
2130
felix_mrp_del(struct dsa_switch * ds,int port,const struct switchdev_obj_mrp * mrp)2131 static int felix_mrp_del(struct dsa_switch *ds, int port,
2132 const struct switchdev_obj_mrp *mrp)
2133 {
2134 struct ocelot *ocelot = ds->priv;
2135
2136 return ocelot_mrp_add(ocelot, port, mrp);
2137 }
2138
2139 static int
felix_mrp_add_ring_role(struct dsa_switch * ds,int port,const struct switchdev_obj_ring_role_mrp * mrp)2140 felix_mrp_add_ring_role(struct dsa_switch *ds, int port,
2141 const struct switchdev_obj_ring_role_mrp *mrp)
2142 {
2143 struct ocelot *ocelot = ds->priv;
2144
2145 return ocelot_mrp_add_ring_role(ocelot, port, mrp);
2146 }
2147
2148 static int
felix_mrp_del_ring_role(struct dsa_switch * ds,int port,const struct switchdev_obj_ring_role_mrp * mrp)2149 felix_mrp_del_ring_role(struct dsa_switch *ds, int port,
2150 const struct switchdev_obj_ring_role_mrp *mrp)
2151 {
2152 struct ocelot *ocelot = ds->priv;
2153
2154 return ocelot_mrp_del_ring_role(ocelot, port, mrp);
2155 }
2156
felix_port_get_default_prio(struct dsa_switch * ds,int port)2157 static int felix_port_get_default_prio(struct dsa_switch *ds, int port)
2158 {
2159 struct ocelot *ocelot = ds->priv;
2160
2161 return ocelot_port_get_default_prio(ocelot, port);
2162 }
2163
felix_port_set_default_prio(struct dsa_switch * ds,int port,u8 prio)2164 static int felix_port_set_default_prio(struct dsa_switch *ds, int port,
2165 u8 prio)
2166 {
2167 struct ocelot *ocelot = ds->priv;
2168
2169 return ocelot_port_set_default_prio(ocelot, port, prio);
2170 }
2171
felix_port_get_dscp_prio(struct dsa_switch * ds,int port,u8 dscp)2172 static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
2173 {
2174 struct ocelot *ocelot = ds->priv;
2175
2176 return ocelot_port_get_dscp_prio(ocelot, port, dscp);
2177 }
2178
felix_port_add_dscp_prio(struct dsa_switch * ds,int port,u8 dscp,u8 prio)2179 static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
2180 u8 prio)
2181 {
2182 struct ocelot *ocelot = ds->priv;
2183
2184 return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio);
2185 }
2186
felix_port_del_dscp_prio(struct dsa_switch * ds,int port,u8 dscp,u8 prio)2187 static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
2188 u8 prio)
2189 {
2190 struct ocelot *ocelot = ds->priv;
2191
2192 return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio);
2193 }
2194
felix_get_mm(struct dsa_switch * ds,int port,struct ethtool_mm_state * state)2195 static int felix_get_mm(struct dsa_switch *ds, int port,
2196 struct ethtool_mm_state *state)
2197 {
2198 struct ocelot *ocelot = ds->priv;
2199
2200 return ocelot_port_get_mm(ocelot, port, state);
2201 }
2202
felix_set_mm(struct dsa_switch * ds,int port,struct ethtool_mm_cfg * cfg,struct netlink_ext_ack * extack)2203 static int felix_set_mm(struct dsa_switch *ds, int port,
2204 struct ethtool_mm_cfg *cfg,
2205 struct netlink_ext_ack *extack)
2206 {
2207 struct ocelot *ocelot = ds->priv;
2208
2209 return ocelot_port_set_mm(ocelot, port, cfg, extack);
2210 }
2211
felix_get_mm_stats(struct dsa_switch * ds,int port,struct ethtool_mm_stats * stats)2212 static void felix_get_mm_stats(struct dsa_switch *ds, int port,
2213 struct ethtool_mm_stats *stats)
2214 {
2215 struct ocelot *ocelot = ds->priv;
2216
2217 ocelot_port_get_mm_stats(ocelot, port, stats);
2218 }
2219
2220 static const struct phylink_mac_ops felix_phylink_mac_ops = {
2221 .mac_select_pcs = felix_phylink_mac_select_pcs,
2222 .mac_config = felix_phylink_mac_config,
2223 .mac_link_down = felix_phylink_mac_link_down,
2224 .mac_link_up = felix_phylink_mac_link_up,
2225 };
2226
2227 static const struct dsa_switch_ops felix_switch_ops = {
2228 .get_tag_protocol = felix_get_tag_protocol,
2229 .change_tag_protocol = felix_change_tag_protocol,
2230 .connect_tag_protocol = felix_connect_tag_protocol,
2231 .setup = felix_setup,
2232 .teardown = felix_teardown,
2233 .set_ageing_time = felix_set_ageing_time,
2234 .get_mm = felix_get_mm,
2235 .set_mm = felix_set_mm,
2236 .get_mm_stats = felix_get_mm_stats,
2237 .get_stats64 = felix_get_stats64,
2238 .get_pause_stats = felix_get_pause_stats,
2239 .get_rmon_stats = felix_get_rmon_stats,
2240 .get_eth_ctrl_stats = felix_get_eth_ctrl_stats,
2241 .get_eth_mac_stats = felix_get_eth_mac_stats,
2242 .get_eth_phy_stats = felix_get_eth_phy_stats,
2243 .get_strings = felix_get_strings,
2244 .get_ethtool_stats = felix_get_ethtool_stats,
2245 .get_sset_count = felix_get_sset_count,
2246 .get_ts_info = felix_get_ts_info,
2247 .phylink_get_caps = felix_phylink_get_caps,
2248 .port_enable = felix_port_enable,
2249 .port_fast_age = felix_port_fast_age,
2250 .port_fdb_dump = felix_fdb_dump,
2251 .port_fdb_add = felix_fdb_add,
2252 .port_fdb_del = felix_fdb_del,
2253 .lag_fdb_add = felix_lag_fdb_add,
2254 .lag_fdb_del = felix_lag_fdb_del,
2255 .port_mdb_add = felix_mdb_add,
2256 .port_mdb_del = felix_mdb_del,
2257 .port_pre_bridge_flags = felix_pre_bridge_flags,
2258 .port_bridge_flags = felix_bridge_flags,
2259 .port_bridge_join = felix_bridge_join,
2260 .port_bridge_leave = felix_bridge_leave,
2261 .port_lag_join = felix_lag_join,
2262 .port_lag_leave = felix_lag_leave,
2263 .port_lag_change = felix_lag_change,
2264 .port_stp_state_set = felix_bridge_stp_state_set,
2265 .port_vlan_filtering = felix_vlan_filtering,
2266 .port_vlan_add = felix_vlan_add,
2267 .port_vlan_del = felix_vlan_del,
2268 .port_hwtstamp_get = felix_hwtstamp_get,
2269 .port_hwtstamp_set = felix_hwtstamp_set,
2270 .port_rxtstamp = felix_rxtstamp,
2271 .port_txtstamp = felix_txtstamp,
2272 .port_change_mtu = felix_change_mtu,
2273 .port_max_mtu = felix_get_max_mtu,
2274 .port_policer_add = felix_port_policer_add,
2275 .port_policer_del = felix_port_policer_del,
2276 .port_mirror_add = felix_port_mirror_add,
2277 .port_mirror_del = felix_port_mirror_del,
2278 .cls_flower_add = felix_cls_flower_add,
2279 .cls_flower_del = felix_cls_flower_del,
2280 .cls_flower_stats = felix_cls_flower_stats,
2281 .port_setup_tc = felix_port_setup_tc,
2282 .devlink_sb_pool_get = felix_sb_pool_get,
2283 .devlink_sb_pool_set = felix_sb_pool_set,
2284 .devlink_sb_port_pool_get = felix_sb_port_pool_get,
2285 .devlink_sb_port_pool_set = felix_sb_port_pool_set,
2286 .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get,
2287 .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set,
2288 .devlink_sb_occ_snapshot = felix_sb_occ_snapshot,
2289 .devlink_sb_occ_max_clear = felix_sb_occ_max_clear,
2290 .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get,
2291 .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get,
2292 .port_mrp_add = felix_mrp_add,
2293 .port_mrp_del = felix_mrp_del,
2294 .port_mrp_add_ring_role = felix_mrp_add_ring_role,
2295 .port_mrp_del_ring_role = felix_mrp_del_ring_role,
2296 .tag_8021q_vlan_add = felix_tag_8021q_vlan_add,
2297 .tag_8021q_vlan_del = felix_tag_8021q_vlan_del,
2298 .port_get_default_prio = felix_port_get_default_prio,
2299 .port_set_default_prio = felix_port_set_default_prio,
2300 .port_get_dscp_prio = felix_port_get_dscp_prio,
2301 .port_add_dscp_prio = felix_port_add_dscp_prio,
2302 .port_del_dscp_prio = felix_port_del_dscp_prio,
2303 .port_set_host_flood = felix_port_set_host_flood,
2304 .port_change_conduit = felix_port_change_conduit,
2305 };
2306
felix_register_switch(struct device * dev,resource_size_t switch_base,int num_flooding_pgids,bool ptp,bool mm_supported,enum dsa_tag_protocol init_tag_proto,const struct felix_info * info)2307 int felix_register_switch(struct device *dev, resource_size_t switch_base,
2308 int num_flooding_pgids, bool ptp,
2309 bool mm_supported,
2310 enum dsa_tag_protocol init_tag_proto,
2311 const struct felix_info *info)
2312 {
2313 struct dsa_switch *ds;
2314 struct ocelot *ocelot;
2315 struct felix *felix;
2316 int err;
2317
2318 felix = devm_kzalloc(dev, sizeof(*felix), GFP_KERNEL);
2319 if (!felix)
2320 return -ENOMEM;
2321
2322 ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
2323 if (!ds)
2324 return -ENOMEM;
2325
2326 dev_set_drvdata(dev, felix);
2327
2328 ocelot = &felix->ocelot;
2329 ocelot->dev = dev;
2330 ocelot->num_flooding_pgids = num_flooding_pgids;
2331 ocelot->ptp = ptp;
2332 ocelot->mm_supported = mm_supported;
2333
2334 felix->info = info;
2335 felix->switch_base = switch_base;
2336 felix->ds = ds;
2337 felix->tag_proto = init_tag_proto;
2338
2339 ds->dev = dev;
2340 ds->num_ports = info->num_ports;
2341 ds->num_tx_queues = OCELOT_NUM_TC;
2342 ds->ops = &felix_switch_ops;
2343 ds->phylink_mac_ops = &felix_phylink_mac_ops;
2344 ds->priv = ocelot;
2345
2346 err = dsa_register_switch(ds);
2347 if (err)
2348 dev_err_probe(dev, err, "Failed to register DSA switch\n");
2349
2350 return err;
2351 }
2352 EXPORT_SYMBOL_GPL(felix_register_switch);
2353
felix_port_to_netdev(struct ocelot * ocelot,int port)2354 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
2355 {
2356 struct felix *felix = ocelot_to_felix(ocelot);
2357 struct dsa_switch *ds = felix->ds;
2358
2359 if (!dsa_is_user_port(ds, port))
2360 return NULL;
2361
2362 return dsa_to_port(ds, port)->user;
2363 }
2364 EXPORT_SYMBOL_GPL(felix_port_to_netdev);
2365
felix_netdev_to_port(struct net_device * dev)2366 int felix_netdev_to_port(struct net_device *dev)
2367 {
2368 struct dsa_port *dp;
2369
2370 dp = dsa_port_from_netdev(dev);
2371 if (IS_ERR(dp))
2372 return -EINVAL;
2373
2374 return dp->index;
2375 }
2376 EXPORT_SYMBOL_GPL(felix_netdev_to_port);
2377
2378 MODULE_DESCRIPTION("Felix DSA library");
2379 MODULE_LICENSE("GPL");
2380