1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2019-2021 NXP 3 * 4 * This is an umbrella module for all network switches that are 5 * register-compatible with Ocelot and that perform I/O to their host CPU 6 * through an NPI (Node Processor Interface) Ethernet port. 7 */ 8 #include <uapi/linux/if_bridge.h> 9 #include <soc/mscc/ocelot_vcap.h> 10 #include <soc/mscc/ocelot_qsys.h> 11 #include <soc/mscc/ocelot_sys.h> 12 #include <soc/mscc/ocelot_dev.h> 13 #include <soc/mscc/ocelot_ana.h> 14 #include <soc/mscc/ocelot_ptp.h> 15 #include <soc/mscc/ocelot.h> 16 #include <linux/dsa/8021q.h> 17 #include <linux/dsa/ocelot.h> 18 #include <linux/platform_device.h> 19 #include <linux/ptp_classify.h> 20 #include <linux/module.h> 21 #include <linux/of_net.h> 22 #include <linux/pci.h> 23 #include <linux/of.h> 24 #include <linux/pcs-lynx.h> 25 #include <net/pkt_sched.h> 26 #include <net/dsa.h> 27 #include "felix.h" 28 29 static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, 30 bool pvid, bool untagged) 31 { 32 struct ocelot_vcap_filter *outer_tagging_rule; 33 struct ocelot *ocelot = &felix->ocelot; 34 struct dsa_switch *ds = felix->ds; 35 int key_length, upstream, err; 36 37 /* We don't need to install the rxvlan into the other ports' filtering 38 * tables, because we're just pushing the rxvlan when sending towards 39 * the CPU 40 */ 41 if (!pvid) 42 return 0; 43 44 key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length; 45 upstream = dsa_upstream_port(ds, port); 46 47 outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), 48 GFP_KERNEL); 49 if (!outer_tagging_rule) 50 return -ENOMEM; 51 52 outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 53 outer_tagging_rule->prio = 1; 54 outer_tagging_rule->id.cookie = port; 55 outer_tagging_rule->id.tc_offload = false; 56 outer_tagging_rule->block_id = VCAP_ES0; 57 outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 58 outer_tagging_rule->lookup = 0; 59 outer_tagging_rule->ingress_port.value = port; 60 outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0); 61 outer_tagging_rule->egress_port.value = upstream; 62 outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0); 63 outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG; 64 outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD; 65 outer_tagging_rule->action.tag_a_vid_sel = 1; 66 outer_tagging_rule->action.vid_a_val = vid; 67 68 err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL); 69 if (err) 70 kfree(outer_tagging_rule); 71 72 return err; 73 } 74 75 static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, 76 bool pvid, bool untagged) 77 { 78 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 79 struct ocelot *ocelot = &felix->ocelot; 80 struct dsa_switch *ds = felix->ds; 81 int upstream, err; 82 83 /* tag_8021q.c assumes we are implementing this via port VLAN 84 * membership, which we aren't. So we don't need to add any VCAP filter 85 * for the CPU port. 86 */ 87 if (ocelot->ports[port]->is_dsa_8021q_cpu) 88 return 0; 89 90 untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 91 if (!untagging_rule) 92 return -ENOMEM; 93 94 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 95 if (!redirect_rule) { 96 kfree(untagging_rule); 97 return -ENOMEM; 98 } 99 100 upstream = dsa_upstream_port(ds, port); 101 102 untagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 103 untagging_rule->ingress_port_mask = BIT(upstream); 104 untagging_rule->vlan.vid.value = vid; 105 untagging_rule->vlan.vid.mask = VLAN_VID_MASK; 106 untagging_rule->prio = 1; 107 untagging_rule->id.cookie = port; 108 untagging_rule->id.tc_offload = false; 109 untagging_rule->block_id = VCAP_IS1; 110 untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 111 untagging_rule->lookup = 0; 112 untagging_rule->action.vlan_pop_cnt_ena = true; 113 untagging_rule->action.vlan_pop_cnt = 1; 114 untagging_rule->action.pag_override_mask = 0xff; 115 untagging_rule->action.pag_val = port; 116 117 err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL); 118 if (err) { 119 kfree(untagging_rule); 120 kfree(redirect_rule); 121 return err; 122 } 123 124 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; 125 redirect_rule->ingress_port_mask = BIT(upstream); 126 redirect_rule->pag = port; 127 redirect_rule->prio = 1; 128 redirect_rule->id.cookie = port; 129 redirect_rule->id.tc_offload = false; 130 redirect_rule->block_id = VCAP_IS2; 131 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 132 redirect_rule->lookup = 0; 133 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 134 redirect_rule->action.port_mask = BIT(port); 135 136 err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); 137 if (err) { 138 ocelot_vcap_filter_del(ocelot, untagging_rule); 139 kfree(redirect_rule); 140 return err; 141 } 142 143 return 0; 144 } 145 146 static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, 147 u16 flags) 148 { 149 bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED; 150 bool pvid = flags & BRIDGE_VLAN_INFO_PVID; 151 struct ocelot *ocelot = ds->priv; 152 153 if (vid_is_dsa_8021q_rxvlan(vid)) 154 return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot), 155 port, vid, pvid, untagged); 156 157 if (vid_is_dsa_8021q_txvlan(vid)) 158 return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot), 159 port, vid, pvid, untagged); 160 161 return 0; 162 } 163 164 static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid) 165 { 166 struct ocelot_vcap_filter *outer_tagging_rule; 167 struct ocelot_vcap_block *block_vcap_es0; 168 struct ocelot *ocelot = &felix->ocelot; 169 170 block_vcap_es0 = &ocelot->block[VCAP_ES0]; 171 172 outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, 173 port, false); 174 /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid 175 * installing outer tagging ES0 rules where they weren't needed. 176 * But in rxvlan_del, the API doesn't give us the "flags" anymore, 177 * so that forces us to be slightly sloppy here, and just assume that 178 * if we didn't find an outer_tagging_rule it means that there was 179 * none in the first place, i.e. rxvlan_del is called on a non-pvid 180 * port. This is most probably true though. 181 */ 182 if (!outer_tagging_rule) 183 return 0; 184 185 return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); 186 } 187 188 static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid) 189 { 190 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 191 struct ocelot_vcap_block *block_vcap_is1; 192 struct ocelot_vcap_block *block_vcap_is2; 193 struct ocelot *ocelot = &felix->ocelot; 194 int err; 195 196 if (ocelot->ports[port]->is_dsa_8021q_cpu) 197 return 0; 198 199 block_vcap_is1 = &ocelot->block[VCAP_IS1]; 200 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 201 202 untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, 203 port, false); 204 if (!untagging_rule) 205 return 0; 206 207 err = ocelot_vcap_filter_del(ocelot, untagging_rule); 208 if (err) 209 return err; 210 211 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, 212 port, false); 213 if (!redirect_rule) 214 return 0; 215 216 return ocelot_vcap_filter_del(ocelot, redirect_rule); 217 } 218 219 static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) 220 { 221 struct ocelot *ocelot = ds->priv; 222 223 if (vid_is_dsa_8021q_rxvlan(vid)) 224 return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot), 225 port, vid); 226 227 if (vid_is_dsa_8021q_txvlan(vid)) 228 return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot), 229 port, vid); 230 231 return 0; 232 } 233 234 /* Alternatively to using the NPI functionality, that same hardware MAC 235 * connected internally to the enetc or fman DSA master can be configured to 236 * use the software-defined tag_8021q frame format. As far as the hardware is 237 * concerned, it thinks it is a "dumb switch" - the queues of the CPU port 238 * module are now disconnected from it, but can still be accessed through 239 * register-based MMIO. 240 */ 241 static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port) 242 { 243 mutex_lock(&ocelot->fwd_domain_lock); 244 245 ocelot->ports[port]->is_dsa_8021q_cpu = true; 246 ocelot->npi = -1; 247 248 /* Overwrite PGID_CPU with the non-tagging port */ 249 ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU); 250 251 ocelot_apply_bridge_fwd_mask(ocelot, true); 252 253 mutex_unlock(&ocelot->fwd_domain_lock); 254 } 255 256 static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) 257 { 258 mutex_lock(&ocelot->fwd_domain_lock); 259 260 ocelot->ports[port]->is_dsa_8021q_cpu = false; 261 262 /* Restore PGID_CPU */ 263 ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID, 264 PGID_CPU); 265 266 ocelot_apply_bridge_fwd_mask(ocelot, true); 267 268 mutex_unlock(&ocelot->fwd_domain_lock); 269 } 270 271 /* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module. 272 * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the 273 * tag_8021q CPU port. 274 */ 275 static int felix_setup_mmio_filtering(struct felix *felix) 276 { 277 unsigned long user_ports = dsa_user_ports(felix->ds); 278 struct ocelot_vcap_filter *redirect_rule; 279 struct ocelot_vcap_filter *tagging_rule; 280 struct ocelot *ocelot = &felix->ocelot; 281 struct dsa_switch *ds = felix->ds; 282 int cpu = -1, port, ret; 283 284 tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 285 if (!tagging_rule) 286 return -ENOMEM; 287 288 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 289 if (!redirect_rule) { 290 kfree(tagging_rule); 291 return -ENOMEM; 292 } 293 294 for (port = 0; port < ocelot->num_phys_ports; port++) { 295 if (dsa_is_cpu_port(ds, port)) { 296 cpu = port; 297 break; 298 } 299 } 300 301 if (cpu < 0) { 302 kfree(tagging_rule); 303 kfree(redirect_rule); 304 return -EINVAL; 305 } 306 307 tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE; 308 *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588); 309 *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff); 310 tagging_rule->ingress_port_mask = user_ports; 311 tagging_rule->prio = 1; 312 tagging_rule->id.cookie = ocelot->num_phys_ports; 313 tagging_rule->id.tc_offload = false; 314 tagging_rule->block_id = VCAP_IS1; 315 tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 316 tagging_rule->lookup = 0; 317 tagging_rule->action.pag_override_mask = 0xff; 318 tagging_rule->action.pag_val = ocelot->num_phys_ports; 319 320 ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL); 321 if (ret) { 322 kfree(tagging_rule); 323 kfree(redirect_rule); 324 return ret; 325 } 326 327 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; 328 redirect_rule->ingress_port_mask = user_ports; 329 redirect_rule->pag = ocelot->num_phys_ports; 330 redirect_rule->prio = 1; 331 redirect_rule->id.cookie = ocelot->num_phys_ports; 332 redirect_rule->id.tc_offload = false; 333 redirect_rule->block_id = VCAP_IS2; 334 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 335 redirect_rule->lookup = 0; 336 redirect_rule->action.cpu_copy_ena = true; 337 if (felix->info->quirk_no_xtr_irq) { 338 /* Redirect to the tag_8021q CPU but also copy PTP packets to 339 * the CPU port module 340 */ 341 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 342 redirect_rule->action.port_mask = BIT(cpu); 343 } else { 344 /* Trap PTP packets only to the CPU port module (which is 345 * redirected to the NPI port) 346 */ 347 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; 348 redirect_rule->action.port_mask = 0; 349 } 350 351 ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); 352 if (ret) { 353 ocelot_vcap_filter_del(ocelot, tagging_rule); 354 kfree(redirect_rule); 355 return ret; 356 } 357 358 /* The ownership of the CPU port module's queues might have just been 359 * transferred to the tag_8021q tagger from the NPI-based tagger. 360 * So there might still be all sorts of crap in the queues. On the 361 * other hand, the MMIO-based matching of PTP frames is very brittle, 362 * so we need to be careful that there are no extra frames to be 363 * dequeued over MMIO, since we would never know to discard them. 364 */ 365 ocelot_drain_cpu_queue(ocelot, 0); 366 367 return 0; 368 } 369 370 static int felix_teardown_mmio_filtering(struct felix *felix) 371 { 372 struct ocelot_vcap_filter *tagging_rule, *redirect_rule; 373 struct ocelot_vcap_block *block_vcap_is1; 374 struct ocelot_vcap_block *block_vcap_is2; 375 struct ocelot *ocelot = &felix->ocelot; 376 int err; 377 378 block_vcap_is1 = &ocelot->block[VCAP_IS1]; 379 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 380 381 tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, 382 ocelot->num_phys_ports, 383 false); 384 if (!tagging_rule) 385 return -ENOENT; 386 387 err = ocelot_vcap_filter_del(ocelot, tagging_rule); 388 if (err) 389 return err; 390 391 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, 392 ocelot->num_phys_ports, 393 false); 394 if (!redirect_rule) 395 return -ENOENT; 396 397 return ocelot_vcap_filter_del(ocelot, redirect_rule); 398 } 399 400 static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) 401 { 402 struct ocelot *ocelot = ds->priv; 403 struct felix *felix = ocelot_to_felix(ocelot); 404 unsigned long cpu_flood; 405 int port, err; 406 407 felix_8021q_cpu_port_init(ocelot, cpu); 408 409 for (port = 0; port < ds->num_ports; port++) { 410 if (dsa_is_unused_port(ds, port)) 411 continue; 412 413 /* This overwrites ocelot_init(): 414 * Do not forward BPDU frames to the CPU port module, 415 * for 2 reasons: 416 * - When these packets are injected from the tag_8021q 417 * CPU port, we want them to go out, not loop back 418 * into the system. 419 * - STP traffic ingressing on a user port should go to 420 * the tag_8021q CPU port, not to the hardware CPU 421 * port module. 422 */ 423 ocelot_write_gix(ocelot, 424 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), 425 ANA_PORT_CPU_FWD_BPDU_CFG, port); 426 } 427 428 /* In tag_8021q mode, the CPU port module is unused, except for PTP 429 * frames. So we want to disable flooding of any kind to the CPU port 430 * module, since packets going there will end in a black hole. 431 */ 432 cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); 433 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC); 434 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC); 435 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC); 436 437 err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); 438 if (err) 439 return err; 440 441 err = felix_setup_mmio_filtering(felix); 442 if (err) 443 goto out_tag_8021q_unregister; 444 445 return 0; 446 447 out_tag_8021q_unregister: 448 dsa_tag_8021q_unregister(ds); 449 return err; 450 } 451 452 static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu) 453 { 454 struct ocelot *ocelot = ds->priv; 455 struct felix *felix = ocelot_to_felix(ocelot); 456 int err, port; 457 458 err = felix_teardown_mmio_filtering(felix); 459 if (err) 460 dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d", 461 err); 462 463 dsa_tag_8021q_unregister(ds); 464 465 for (port = 0; port < ds->num_ports; port++) { 466 if (dsa_is_unused_port(ds, port)) 467 continue; 468 469 /* Restore the logic from ocelot_init: 470 * do not forward BPDU frames to the front ports. 471 */ 472 ocelot_write_gix(ocelot, 473 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), 474 ANA_PORT_CPU_FWD_BPDU_CFG, 475 port); 476 } 477 478 felix_8021q_cpu_port_deinit(ocelot, cpu); 479 } 480 481 /* The CPU port module is connected to the Node Processor Interface (NPI). This 482 * is the mode through which frames can be injected from and extracted to an 483 * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU 484 * running Linux, and this forms a DSA setup together with the enetc or fman 485 * DSA master. 486 */ 487 static void felix_npi_port_init(struct ocelot *ocelot, int port) 488 { 489 ocelot->npi = port; 490 491 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | 492 QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port), 493 QSYS_EXT_CPU_CFG); 494 495 /* NPI port Injection/Extraction configuration */ 496 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 497 ocelot->npi_xtr_prefix); 498 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 499 ocelot->npi_inj_prefix); 500 501 /* Disable transmission of pause frames */ 502 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); 503 } 504 505 static void felix_npi_port_deinit(struct ocelot *ocelot, int port) 506 { 507 /* Restore hardware defaults */ 508 int unused_port = ocelot->num_phys_ports + 2; 509 510 ocelot->npi = -1; 511 512 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port), 513 QSYS_EXT_CPU_CFG); 514 515 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 516 OCELOT_TAG_PREFIX_DISABLED); 517 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 518 OCELOT_TAG_PREFIX_DISABLED); 519 520 /* Enable transmission of pause frames */ 521 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); 522 } 523 524 static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu) 525 { 526 struct ocelot *ocelot = ds->priv; 527 unsigned long cpu_flood; 528 529 felix_npi_port_init(ocelot, cpu); 530 531 /* Include the CPU port module (and indirectly, the NPI port) 532 * in the forwarding mask for unknown unicast - the hardware 533 * default value for ANA_FLOODING_FLD_UNICAST excludes 534 * BIT(ocelot->num_phys_ports), and so does ocelot_init, 535 * since Ocelot relies on whitelisting MAC addresses towards 536 * PGID_CPU. 537 * We do this because DSA does not yet perform RX filtering, 538 * and the NPI port does not perform source address learning, 539 * so traffic sent to Linux is effectively unknown from the 540 * switch's perspective. 541 */ 542 cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); 543 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC); 544 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC); 545 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC); 546 547 return 0; 548 } 549 550 static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu) 551 { 552 struct ocelot *ocelot = ds->priv; 553 554 felix_npi_port_deinit(ocelot, cpu); 555 } 556 557 static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu, 558 enum dsa_tag_protocol proto) 559 { 560 int err; 561 562 switch (proto) { 563 case DSA_TAG_PROTO_SEVILLE: 564 case DSA_TAG_PROTO_OCELOT: 565 err = felix_setup_tag_npi(ds, cpu); 566 break; 567 case DSA_TAG_PROTO_OCELOT_8021Q: 568 err = felix_setup_tag_8021q(ds, cpu); 569 break; 570 default: 571 err = -EPROTONOSUPPORT; 572 } 573 574 return err; 575 } 576 577 static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu, 578 enum dsa_tag_protocol proto) 579 { 580 switch (proto) { 581 case DSA_TAG_PROTO_SEVILLE: 582 case DSA_TAG_PROTO_OCELOT: 583 felix_teardown_tag_npi(ds, cpu); 584 break; 585 case DSA_TAG_PROTO_OCELOT_8021Q: 586 felix_teardown_tag_8021q(ds, cpu); 587 break; 588 default: 589 break; 590 } 591 } 592 593 /* This always leaves the switch in a consistent state, because although the 594 * tag_8021q setup can fail, the NPI setup can't. So either the change is made, 595 * or the restoration is guaranteed to work. 596 */ 597 static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, 598 enum dsa_tag_protocol proto) 599 { 600 struct ocelot *ocelot = ds->priv; 601 struct felix *felix = ocelot_to_felix(ocelot); 602 enum dsa_tag_protocol old_proto = felix->tag_proto; 603 int err; 604 605 if (proto != DSA_TAG_PROTO_SEVILLE && 606 proto != DSA_TAG_PROTO_OCELOT && 607 proto != DSA_TAG_PROTO_OCELOT_8021Q) 608 return -EPROTONOSUPPORT; 609 610 felix_del_tag_protocol(ds, cpu, old_proto); 611 612 err = felix_set_tag_protocol(ds, cpu, proto); 613 if (err) { 614 felix_set_tag_protocol(ds, cpu, old_proto); 615 return err; 616 } 617 618 felix->tag_proto = proto; 619 620 return 0; 621 } 622 623 static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, 624 int port, 625 enum dsa_tag_protocol mp) 626 { 627 struct ocelot *ocelot = ds->priv; 628 struct felix *felix = ocelot_to_felix(ocelot); 629 630 return felix->tag_proto; 631 } 632 633 static int felix_set_ageing_time(struct dsa_switch *ds, 634 unsigned int ageing_time) 635 { 636 struct ocelot *ocelot = ds->priv; 637 638 ocelot_set_ageing_time(ocelot, ageing_time); 639 640 return 0; 641 } 642 643 static int felix_fdb_dump(struct dsa_switch *ds, int port, 644 dsa_fdb_dump_cb_t *cb, void *data) 645 { 646 struct ocelot *ocelot = ds->priv; 647 648 return ocelot_fdb_dump(ocelot, port, cb, data); 649 } 650 651 static int felix_fdb_add(struct dsa_switch *ds, int port, 652 const unsigned char *addr, u16 vid) 653 { 654 struct ocelot *ocelot = ds->priv; 655 656 return ocelot_fdb_add(ocelot, port, addr, vid); 657 } 658 659 static int felix_fdb_del(struct dsa_switch *ds, int port, 660 const unsigned char *addr, u16 vid) 661 { 662 struct ocelot *ocelot = ds->priv; 663 664 return ocelot_fdb_del(ocelot, port, addr, vid); 665 } 666 667 static int felix_mdb_add(struct dsa_switch *ds, int port, 668 const struct switchdev_obj_port_mdb *mdb) 669 { 670 struct ocelot *ocelot = ds->priv; 671 672 return ocelot_port_mdb_add(ocelot, port, mdb); 673 } 674 675 static int felix_mdb_del(struct dsa_switch *ds, int port, 676 const struct switchdev_obj_port_mdb *mdb) 677 { 678 struct ocelot *ocelot = ds->priv; 679 680 return ocelot_port_mdb_del(ocelot, port, mdb); 681 } 682 683 static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, 684 u8 state) 685 { 686 struct ocelot *ocelot = ds->priv; 687 688 return ocelot_bridge_stp_state_set(ocelot, port, state); 689 } 690 691 static int felix_pre_bridge_flags(struct dsa_switch *ds, int port, 692 struct switchdev_brport_flags val, 693 struct netlink_ext_ack *extack) 694 { 695 struct ocelot *ocelot = ds->priv; 696 697 return ocelot_port_pre_bridge_flags(ocelot, port, val); 698 } 699 700 static int felix_bridge_flags(struct dsa_switch *ds, int port, 701 struct switchdev_brport_flags val, 702 struct netlink_ext_ack *extack) 703 { 704 struct ocelot *ocelot = ds->priv; 705 706 ocelot_port_bridge_flags(ocelot, port, val); 707 708 return 0; 709 } 710 711 static int felix_bridge_join(struct dsa_switch *ds, int port, 712 struct dsa_bridge bridge, bool *tx_fwd_offload) 713 { 714 struct ocelot *ocelot = ds->priv; 715 716 ocelot_port_bridge_join(ocelot, port, bridge.dev); 717 718 return 0; 719 } 720 721 static void felix_bridge_leave(struct dsa_switch *ds, int port, 722 struct dsa_bridge bridge) 723 { 724 struct ocelot *ocelot = ds->priv; 725 726 ocelot_port_bridge_leave(ocelot, port, bridge.dev); 727 } 728 729 static int felix_lag_join(struct dsa_switch *ds, int port, 730 struct net_device *bond, 731 struct netdev_lag_upper_info *info) 732 { 733 struct ocelot *ocelot = ds->priv; 734 735 return ocelot_port_lag_join(ocelot, port, bond, info); 736 } 737 738 static int felix_lag_leave(struct dsa_switch *ds, int port, 739 struct net_device *bond) 740 { 741 struct ocelot *ocelot = ds->priv; 742 743 ocelot_port_lag_leave(ocelot, port, bond); 744 745 return 0; 746 } 747 748 static int felix_lag_change(struct dsa_switch *ds, int port) 749 { 750 struct dsa_port *dp = dsa_to_port(ds, port); 751 struct ocelot *ocelot = ds->priv; 752 753 ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled); 754 755 return 0; 756 } 757 758 static int felix_vlan_prepare(struct dsa_switch *ds, int port, 759 const struct switchdev_obj_port_vlan *vlan, 760 struct netlink_ext_ack *extack) 761 { 762 struct ocelot *ocelot = ds->priv; 763 u16 flags = vlan->flags; 764 765 /* Ocelot switches copy frames as-is to the CPU, so the flags: 766 * egress-untagged or not, pvid or not, make no difference. This 767 * behavior is already better than what DSA just tries to approximate 768 * when it installs the VLAN with the same flags on the CPU port. 769 * Just accept any configuration, and don't let ocelot deny installing 770 * multiple native VLANs on the NPI port, because the switch doesn't 771 * look at the port tag settings towards the NPI interface anyway. 772 */ 773 if (port == ocelot->npi) 774 return 0; 775 776 return ocelot_vlan_prepare(ocelot, port, vlan->vid, 777 flags & BRIDGE_VLAN_INFO_PVID, 778 flags & BRIDGE_VLAN_INFO_UNTAGGED, 779 extack); 780 } 781 782 static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, 783 struct netlink_ext_ack *extack) 784 { 785 struct ocelot *ocelot = ds->priv; 786 787 return ocelot_port_vlan_filtering(ocelot, port, enabled, extack); 788 } 789 790 static int felix_vlan_add(struct dsa_switch *ds, int port, 791 const struct switchdev_obj_port_vlan *vlan, 792 struct netlink_ext_ack *extack) 793 { 794 struct ocelot *ocelot = ds->priv; 795 u16 flags = vlan->flags; 796 int err; 797 798 err = felix_vlan_prepare(ds, port, vlan, extack); 799 if (err) 800 return err; 801 802 return ocelot_vlan_add(ocelot, port, vlan->vid, 803 flags & BRIDGE_VLAN_INFO_PVID, 804 flags & BRIDGE_VLAN_INFO_UNTAGGED); 805 } 806 807 static int felix_vlan_del(struct dsa_switch *ds, int port, 808 const struct switchdev_obj_port_vlan *vlan) 809 { 810 struct ocelot *ocelot = ds->priv; 811 812 return ocelot_vlan_del(ocelot, port, vlan->vid); 813 } 814 815 static void felix_phylink_validate(struct dsa_switch *ds, int port, 816 unsigned long *supported, 817 struct phylink_link_state *state) 818 { 819 struct ocelot *ocelot = ds->priv; 820 struct felix *felix = ocelot_to_felix(ocelot); 821 822 if (felix->info->phylink_validate) 823 felix->info->phylink_validate(ocelot, port, supported, state); 824 } 825 826 static void felix_phylink_mac_config(struct dsa_switch *ds, int port, 827 unsigned int link_an_mode, 828 const struct phylink_link_state *state) 829 { 830 struct ocelot *ocelot = ds->priv; 831 struct felix *felix = ocelot_to_felix(ocelot); 832 struct dsa_port *dp = dsa_to_port(ds, port); 833 834 if (felix->pcs && felix->pcs[port]) 835 phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs); 836 } 837 838 static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, 839 unsigned int link_an_mode, 840 phy_interface_t interface) 841 { 842 struct ocelot *ocelot = ds->priv; 843 844 ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, 845 FELIX_MAC_QUIRKS); 846 } 847 848 static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, 849 unsigned int link_an_mode, 850 phy_interface_t interface, 851 struct phy_device *phydev, 852 int speed, int duplex, 853 bool tx_pause, bool rx_pause) 854 { 855 struct ocelot *ocelot = ds->priv; 856 struct felix *felix = ocelot_to_felix(ocelot); 857 858 ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, 859 interface, speed, duplex, tx_pause, rx_pause, 860 FELIX_MAC_QUIRKS); 861 862 if (felix->info->port_sched_speed_set) 863 felix->info->port_sched_speed_set(ocelot, port, speed); 864 } 865 866 static void felix_port_qos_map_init(struct ocelot *ocelot, int port) 867 { 868 int i; 869 870 ocelot_rmw_gix(ocelot, 871 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 872 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 873 ANA_PORT_QOS_CFG, 874 port); 875 876 for (i = 0; i < OCELOT_NUM_TC * 2; i++) { 877 ocelot_rmw_ix(ocelot, 878 (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) | 879 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i), 880 ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL | 881 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M, 882 ANA_PORT_PCP_DEI_MAP, 883 port, i); 884 } 885 } 886 887 static void felix_get_strings(struct dsa_switch *ds, int port, 888 u32 stringset, u8 *data) 889 { 890 struct ocelot *ocelot = ds->priv; 891 892 return ocelot_get_strings(ocelot, port, stringset, data); 893 } 894 895 static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) 896 { 897 struct ocelot *ocelot = ds->priv; 898 899 ocelot_get_ethtool_stats(ocelot, port, data); 900 } 901 902 static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset) 903 { 904 struct ocelot *ocelot = ds->priv; 905 906 return ocelot_get_sset_count(ocelot, port, sset); 907 } 908 909 static int felix_get_ts_info(struct dsa_switch *ds, int port, 910 struct ethtool_ts_info *info) 911 { 912 struct ocelot *ocelot = ds->priv; 913 914 return ocelot_get_ts_info(ocelot, port, info); 915 } 916 917 static int felix_parse_ports_node(struct felix *felix, 918 struct device_node *ports_node, 919 phy_interface_t *port_phy_modes) 920 { 921 struct ocelot *ocelot = &felix->ocelot; 922 struct device *dev = felix->ocelot.dev; 923 struct device_node *child; 924 925 for_each_available_child_of_node(ports_node, child) { 926 phy_interface_t phy_mode; 927 u32 port; 928 int err; 929 930 /* Get switch port number from DT */ 931 if (of_property_read_u32(child, "reg", &port) < 0) { 932 dev_err(dev, "Port number not defined in device tree " 933 "(property \"reg\")\n"); 934 of_node_put(child); 935 return -ENODEV; 936 } 937 938 /* Get PHY mode from DT */ 939 err = of_get_phy_mode(child, &phy_mode); 940 if (err) { 941 dev_err(dev, "Failed to read phy-mode or " 942 "phy-interface-type property for port %d\n", 943 port); 944 of_node_put(child); 945 return -ENODEV; 946 } 947 948 err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode); 949 if (err < 0) { 950 dev_err(dev, "Unsupported PHY mode %s on port %d\n", 951 phy_modes(phy_mode), port); 952 of_node_put(child); 953 return err; 954 } 955 956 port_phy_modes[port] = phy_mode; 957 } 958 959 return 0; 960 } 961 962 static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) 963 { 964 struct device *dev = felix->ocelot.dev; 965 struct device_node *switch_node; 966 struct device_node *ports_node; 967 int err; 968 969 switch_node = dev->of_node; 970 971 ports_node = of_get_child_by_name(switch_node, "ports"); 972 if (!ports_node) 973 ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); 974 if (!ports_node) { 975 dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n"); 976 return -ENODEV; 977 } 978 979 err = felix_parse_ports_node(felix, ports_node, port_phy_modes); 980 of_node_put(ports_node); 981 982 return err; 983 } 984 985 static int felix_init_structs(struct felix *felix, int num_phys_ports) 986 { 987 struct ocelot *ocelot = &felix->ocelot; 988 phy_interface_t *port_phy_modes; 989 struct resource res; 990 int port, i, err; 991 992 ocelot->num_phys_ports = num_phys_ports; 993 ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports, 994 sizeof(struct ocelot_port *), GFP_KERNEL); 995 if (!ocelot->ports) 996 return -ENOMEM; 997 998 ocelot->map = felix->info->map; 999 ocelot->stats_layout = felix->info->stats_layout; 1000 ocelot->num_stats = felix->info->num_stats; 1001 ocelot->num_mact_rows = felix->info->num_mact_rows; 1002 ocelot->vcap = felix->info->vcap; 1003 ocelot->vcap_pol.base = felix->info->vcap_pol_base; 1004 ocelot->vcap_pol.max = felix->info->vcap_pol_max; 1005 ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2; 1006 ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2; 1007 ocelot->ops = felix->info->ops; 1008 ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT; 1009 ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT; 1010 ocelot->devlink = felix->ds->devlink; 1011 1012 port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t), 1013 GFP_KERNEL); 1014 if (!port_phy_modes) 1015 return -ENOMEM; 1016 1017 err = felix_parse_dt(felix, port_phy_modes); 1018 if (err) { 1019 kfree(port_phy_modes); 1020 return err; 1021 } 1022 1023 for (i = 0; i < TARGET_MAX; i++) { 1024 struct regmap *target; 1025 1026 if (!felix->info->target_io_res[i].name) 1027 continue; 1028 1029 memcpy(&res, &felix->info->target_io_res[i], sizeof(res)); 1030 res.flags = IORESOURCE_MEM; 1031 res.start += felix->switch_base; 1032 res.end += felix->switch_base; 1033 1034 target = felix->info->init_regmap(ocelot, &res); 1035 if (IS_ERR(target)) { 1036 dev_err(ocelot->dev, 1037 "Failed to map device memory space\n"); 1038 kfree(port_phy_modes); 1039 return PTR_ERR(target); 1040 } 1041 1042 ocelot->targets[i] = target; 1043 } 1044 1045 err = ocelot_regfields_init(ocelot, felix->info->regfields); 1046 if (err) { 1047 dev_err(ocelot->dev, "failed to init reg fields map\n"); 1048 kfree(port_phy_modes); 1049 return err; 1050 } 1051 1052 for (port = 0; port < num_phys_ports; port++) { 1053 struct ocelot_port *ocelot_port; 1054 struct regmap *target; 1055 1056 ocelot_port = devm_kzalloc(ocelot->dev, 1057 sizeof(struct ocelot_port), 1058 GFP_KERNEL); 1059 if (!ocelot_port) { 1060 dev_err(ocelot->dev, 1061 "failed to allocate port memory\n"); 1062 kfree(port_phy_modes); 1063 return -ENOMEM; 1064 } 1065 1066 memcpy(&res, &felix->info->port_io_res[port], sizeof(res)); 1067 res.flags = IORESOURCE_MEM; 1068 res.start += felix->switch_base; 1069 res.end += felix->switch_base; 1070 1071 target = felix->info->init_regmap(ocelot, &res); 1072 if (IS_ERR(target)) { 1073 dev_err(ocelot->dev, 1074 "Failed to map memory space for port %d\n", 1075 port); 1076 kfree(port_phy_modes); 1077 return PTR_ERR(target); 1078 } 1079 1080 ocelot_port->phy_mode = port_phy_modes[port]; 1081 ocelot_port->ocelot = ocelot; 1082 ocelot_port->target = target; 1083 ocelot->ports[port] = ocelot_port; 1084 } 1085 1086 kfree(port_phy_modes); 1087 1088 if (felix->info->mdio_bus_alloc) { 1089 err = felix->info->mdio_bus_alloc(ocelot); 1090 if (err < 0) 1091 return err; 1092 } 1093 1094 return 0; 1095 } 1096 1097 static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port, 1098 struct sk_buff *skb) 1099 { 1100 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1101 struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; 1102 struct sk_buff *skb_match = NULL, *skb_tmp; 1103 unsigned long flags; 1104 1105 if (!clone) 1106 return; 1107 1108 spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags); 1109 1110 skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { 1111 if (skb != clone) 1112 continue; 1113 __skb_unlink(skb, &ocelot_port->tx_skbs); 1114 skb_match = skb; 1115 break; 1116 } 1117 1118 spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags); 1119 1120 WARN_ONCE(!skb_match, 1121 "Could not find skb clone in TX timestamping list\n"); 1122 } 1123 1124 #define work_to_xmit_work(w) \ 1125 container_of((w), struct felix_deferred_xmit_work, work) 1126 1127 static void felix_port_deferred_xmit(struct kthread_work *work) 1128 { 1129 struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work); 1130 struct dsa_switch *ds = xmit_work->dp->ds; 1131 struct sk_buff *skb = xmit_work->skb; 1132 u32 rew_op = ocelot_ptp_rew_op(skb); 1133 struct ocelot *ocelot = ds->priv; 1134 int port = xmit_work->dp->index; 1135 int retries = 10; 1136 1137 do { 1138 if (ocelot_can_inject(ocelot, 0)) 1139 break; 1140 1141 cpu_relax(); 1142 } while (--retries); 1143 1144 if (!retries) { 1145 dev_err(ocelot->dev, "port %d failed to inject skb\n", 1146 port); 1147 ocelot_port_purge_txtstamp_skb(ocelot, port, skb); 1148 kfree_skb(skb); 1149 return; 1150 } 1151 1152 ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); 1153 1154 consume_skb(skb); 1155 kfree(xmit_work); 1156 } 1157 1158 static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port) 1159 { 1160 struct dsa_port *dp = dsa_to_port(ds, port); 1161 struct ocelot *ocelot = ds->priv; 1162 struct felix *felix = ocelot_to_felix(ocelot); 1163 struct felix_port *felix_port; 1164 1165 if (!dsa_port_is_user(dp)) 1166 return 0; 1167 1168 felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL); 1169 if (!felix_port) 1170 return -ENOMEM; 1171 1172 felix_port->xmit_worker = felix->xmit_worker; 1173 felix_port->xmit_work_fn = felix_port_deferred_xmit; 1174 1175 dp->priv = felix_port; 1176 1177 return 0; 1178 } 1179 1180 static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port) 1181 { 1182 struct dsa_port *dp = dsa_to_port(ds, port); 1183 struct felix_port *felix_port = dp->priv; 1184 1185 if (!felix_port) 1186 return; 1187 1188 dp->priv = NULL; 1189 kfree(felix_port); 1190 } 1191 1192 /* Hardware initialization done here so that we can allocate structures with 1193 * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing 1194 * us to allocate structures twice (leak memory) and map PCI memory twice 1195 * (which will not work). 1196 */ 1197 static int felix_setup(struct dsa_switch *ds) 1198 { 1199 struct ocelot *ocelot = ds->priv; 1200 struct felix *felix = ocelot_to_felix(ocelot); 1201 int port, err; 1202 1203 err = felix_init_structs(felix, ds->num_ports); 1204 if (err) 1205 return err; 1206 1207 err = ocelot_init(ocelot); 1208 if (err) 1209 goto out_mdiobus_free; 1210 1211 if (ocelot->ptp) { 1212 err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps); 1213 if (err) { 1214 dev_err(ocelot->dev, 1215 "Timestamp initialization failed\n"); 1216 ocelot->ptp = 0; 1217 } 1218 } 1219 1220 felix->xmit_worker = kthread_create_worker(0, "felix_xmit"); 1221 if (IS_ERR(felix->xmit_worker)) { 1222 err = PTR_ERR(felix->xmit_worker); 1223 goto out_deinit_timestamp; 1224 } 1225 1226 for (port = 0; port < ds->num_ports; port++) { 1227 if (dsa_is_unused_port(ds, port)) 1228 continue; 1229 1230 ocelot_init_port(ocelot, port); 1231 1232 /* Set the default QoS Classification based on PCP and DEI 1233 * bits of vlan tag. 1234 */ 1235 felix_port_qos_map_init(ocelot, port); 1236 1237 err = felix_port_setup_tagger_data(ds, port); 1238 if (err) { 1239 dev_err(ds->dev, 1240 "port %d failed to set up tagger data: %pe\n", 1241 port, ERR_PTR(err)); 1242 goto out_deinit_ports; 1243 } 1244 } 1245 1246 err = ocelot_devlink_sb_register(ocelot); 1247 if (err) 1248 goto out_deinit_ports; 1249 1250 for (port = 0; port < ds->num_ports; port++) { 1251 if (!dsa_is_cpu_port(ds, port)) 1252 continue; 1253 1254 /* The initial tag protocol is NPI which always returns 0, so 1255 * there's no real point in checking for errors. 1256 */ 1257 felix_set_tag_protocol(ds, port, felix->tag_proto); 1258 break; 1259 } 1260 1261 ds->mtu_enforcement_ingress = true; 1262 ds->assisted_learning_on_cpu_port = true; 1263 1264 return 0; 1265 1266 out_deinit_ports: 1267 for (port = 0; port < ocelot->num_phys_ports; port++) { 1268 if (dsa_is_unused_port(ds, port)) 1269 continue; 1270 1271 felix_port_teardown_tagger_data(ds, port); 1272 ocelot_deinit_port(ocelot, port); 1273 } 1274 1275 kthread_destroy_worker(felix->xmit_worker); 1276 1277 out_deinit_timestamp: 1278 ocelot_deinit_timestamp(ocelot); 1279 ocelot_deinit(ocelot); 1280 1281 out_mdiobus_free: 1282 if (felix->info->mdio_bus_free) 1283 felix->info->mdio_bus_free(ocelot); 1284 1285 return err; 1286 } 1287 1288 static void felix_teardown(struct dsa_switch *ds) 1289 { 1290 struct ocelot *ocelot = ds->priv; 1291 struct felix *felix = ocelot_to_felix(ocelot); 1292 int port; 1293 1294 for (port = 0; port < ds->num_ports; port++) { 1295 if (!dsa_is_cpu_port(ds, port)) 1296 continue; 1297 1298 felix_del_tag_protocol(ds, port, felix->tag_proto); 1299 break; 1300 } 1301 1302 for (port = 0; port < ocelot->num_phys_ports; port++) { 1303 if (dsa_is_unused_port(ds, port)) 1304 continue; 1305 1306 felix_port_teardown_tagger_data(ds, port); 1307 ocelot_deinit_port(ocelot, port); 1308 } 1309 1310 kthread_destroy_worker(felix->xmit_worker); 1311 1312 ocelot_devlink_sb_unregister(ocelot); 1313 ocelot_deinit_timestamp(ocelot); 1314 ocelot_deinit(ocelot); 1315 1316 if (felix->info->mdio_bus_free) 1317 felix->info->mdio_bus_free(ocelot); 1318 } 1319 1320 static int felix_hwtstamp_get(struct dsa_switch *ds, int port, 1321 struct ifreq *ifr) 1322 { 1323 struct ocelot *ocelot = ds->priv; 1324 1325 return ocelot_hwstamp_get(ocelot, port, ifr); 1326 } 1327 1328 static int felix_hwtstamp_set(struct dsa_switch *ds, int port, 1329 struct ifreq *ifr) 1330 { 1331 struct ocelot *ocelot = ds->priv; 1332 1333 return ocelot_hwstamp_set(ocelot, port, ifr); 1334 } 1335 1336 static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type) 1337 { 1338 struct felix *felix = ocelot_to_felix(ocelot); 1339 int err, grp = 0; 1340 1341 if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q) 1342 return false; 1343 1344 if (!felix->info->quirk_no_xtr_irq) 1345 return false; 1346 1347 if (ptp_type == PTP_CLASS_NONE) 1348 return false; 1349 1350 while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { 1351 struct sk_buff *skb; 1352 unsigned int type; 1353 1354 err = ocelot_xtr_poll_frame(ocelot, grp, &skb); 1355 if (err) 1356 goto out; 1357 1358 /* We trap to the CPU port module all PTP frames, but 1359 * felix_rxtstamp() only gets called for event frames. 1360 * So we need to avoid sending duplicate general 1361 * message frames by running a second BPF classifier 1362 * here and dropping those. 1363 */ 1364 __skb_push(skb, ETH_HLEN); 1365 1366 type = ptp_classify_raw(skb); 1367 1368 __skb_pull(skb, ETH_HLEN); 1369 1370 if (type == PTP_CLASS_NONE) { 1371 kfree_skb(skb); 1372 continue; 1373 } 1374 1375 netif_rx(skb); 1376 } 1377 1378 out: 1379 if (err < 0) 1380 ocelot_drain_cpu_queue(ocelot, 0); 1381 1382 return true; 1383 } 1384 1385 static bool felix_rxtstamp(struct dsa_switch *ds, int port, 1386 struct sk_buff *skb, unsigned int type) 1387 { 1388 u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo; 1389 struct skb_shared_hwtstamps *shhwtstamps; 1390 struct ocelot *ocelot = ds->priv; 1391 struct timespec64 ts; 1392 u32 tstamp_hi; 1393 u64 tstamp; 1394 1395 /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb 1396 * for RX timestamping. Then free it, and poll for its copy through 1397 * MMIO in the CPU port module, and inject that into the stack from 1398 * ocelot_xtr_poll(). 1399 */ 1400 if (felix_check_xtr_pkt(ocelot, type)) { 1401 kfree_skb(skb); 1402 return true; 1403 } 1404 1405 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); 1406 tstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 1407 1408 tstamp_hi = tstamp >> 32; 1409 if ((tstamp & 0xffffffff) < tstamp_lo) 1410 tstamp_hi--; 1411 1412 tstamp = ((u64)tstamp_hi << 32) | tstamp_lo; 1413 1414 shhwtstamps = skb_hwtstamps(skb); 1415 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 1416 shhwtstamps->hwtstamp = tstamp; 1417 return false; 1418 } 1419 1420 static void felix_txtstamp(struct dsa_switch *ds, int port, 1421 struct sk_buff *skb) 1422 { 1423 struct ocelot *ocelot = ds->priv; 1424 struct sk_buff *clone = NULL; 1425 1426 if (!ocelot->ptp) 1427 return; 1428 1429 if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) { 1430 dev_err_ratelimited(ds->dev, 1431 "port %d delivering skb without TX timestamp\n", 1432 port); 1433 return; 1434 } 1435 1436 if (clone) 1437 OCELOT_SKB_CB(skb)->clone = clone; 1438 } 1439 1440 static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1441 { 1442 struct ocelot *ocelot = ds->priv; 1443 1444 ocelot_port_set_maxlen(ocelot, port, new_mtu); 1445 1446 return 0; 1447 } 1448 1449 static int felix_get_max_mtu(struct dsa_switch *ds, int port) 1450 { 1451 struct ocelot *ocelot = ds->priv; 1452 1453 return ocelot_get_max_mtu(ocelot, port); 1454 } 1455 1456 static int felix_cls_flower_add(struct dsa_switch *ds, int port, 1457 struct flow_cls_offload *cls, bool ingress) 1458 { 1459 struct ocelot *ocelot = ds->priv; 1460 1461 return ocelot_cls_flower_replace(ocelot, port, cls, ingress); 1462 } 1463 1464 static int felix_cls_flower_del(struct dsa_switch *ds, int port, 1465 struct flow_cls_offload *cls, bool ingress) 1466 { 1467 struct ocelot *ocelot = ds->priv; 1468 1469 return ocelot_cls_flower_destroy(ocelot, port, cls, ingress); 1470 } 1471 1472 static int felix_cls_flower_stats(struct dsa_switch *ds, int port, 1473 struct flow_cls_offload *cls, bool ingress) 1474 { 1475 struct ocelot *ocelot = ds->priv; 1476 1477 return ocelot_cls_flower_stats(ocelot, port, cls, ingress); 1478 } 1479 1480 static int felix_port_policer_add(struct dsa_switch *ds, int port, 1481 struct dsa_mall_policer_tc_entry *policer) 1482 { 1483 struct ocelot *ocelot = ds->priv; 1484 struct ocelot_policer pol = { 1485 .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8, 1486 .burst = policer->burst, 1487 }; 1488 1489 return ocelot_port_policer_add(ocelot, port, &pol); 1490 } 1491 1492 static void felix_port_policer_del(struct dsa_switch *ds, int port) 1493 { 1494 struct ocelot *ocelot = ds->priv; 1495 1496 ocelot_port_policer_del(ocelot, port); 1497 } 1498 1499 static int felix_port_setup_tc(struct dsa_switch *ds, int port, 1500 enum tc_setup_type type, 1501 void *type_data) 1502 { 1503 struct ocelot *ocelot = ds->priv; 1504 struct felix *felix = ocelot_to_felix(ocelot); 1505 1506 if (felix->info->port_setup_tc) 1507 return felix->info->port_setup_tc(ds, port, type, type_data); 1508 else 1509 return -EOPNOTSUPP; 1510 } 1511 1512 static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index, 1513 u16 pool_index, 1514 struct devlink_sb_pool_info *pool_info) 1515 { 1516 struct ocelot *ocelot = ds->priv; 1517 1518 return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info); 1519 } 1520 1521 static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index, 1522 u16 pool_index, u32 size, 1523 enum devlink_sb_threshold_type threshold_type, 1524 struct netlink_ext_ack *extack) 1525 { 1526 struct ocelot *ocelot = ds->priv; 1527 1528 return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size, 1529 threshold_type, extack); 1530 } 1531 1532 static int felix_sb_port_pool_get(struct dsa_switch *ds, int port, 1533 unsigned int sb_index, u16 pool_index, 1534 u32 *p_threshold) 1535 { 1536 struct ocelot *ocelot = ds->priv; 1537 1538 return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index, 1539 p_threshold); 1540 } 1541 1542 static int felix_sb_port_pool_set(struct dsa_switch *ds, int port, 1543 unsigned int sb_index, u16 pool_index, 1544 u32 threshold, struct netlink_ext_ack *extack) 1545 { 1546 struct ocelot *ocelot = ds->priv; 1547 1548 return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index, 1549 threshold, extack); 1550 } 1551 1552 static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port, 1553 unsigned int sb_index, u16 tc_index, 1554 enum devlink_sb_pool_type pool_type, 1555 u16 *p_pool_index, u32 *p_threshold) 1556 { 1557 struct ocelot *ocelot = ds->priv; 1558 1559 return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index, 1560 pool_type, p_pool_index, 1561 p_threshold); 1562 } 1563 1564 static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port, 1565 unsigned int sb_index, u16 tc_index, 1566 enum devlink_sb_pool_type pool_type, 1567 u16 pool_index, u32 threshold, 1568 struct netlink_ext_ack *extack) 1569 { 1570 struct ocelot *ocelot = ds->priv; 1571 1572 return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index, 1573 pool_type, pool_index, threshold, 1574 extack); 1575 } 1576 1577 static int felix_sb_occ_snapshot(struct dsa_switch *ds, 1578 unsigned int sb_index) 1579 { 1580 struct ocelot *ocelot = ds->priv; 1581 1582 return ocelot_sb_occ_snapshot(ocelot, sb_index); 1583 } 1584 1585 static int felix_sb_occ_max_clear(struct dsa_switch *ds, 1586 unsigned int sb_index) 1587 { 1588 struct ocelot *ocelot = ds->priv; 1589 1590 return ocelot_sb_occ_max_clear(ocelot, sb_index); 1591 } 1592 1593 static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port, 1594 unsigned int sb_index, u16 pool_index, 1595 u32 *p_cur, u32 *p_max) 1596 { 1597 struct ocelot *ocelot = ds->priv; 1598 1599 return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index, 1600 p_cur, p_max); 1601 } 1602 1603 static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port, 1604 unsigned int sb_index, u16 tc_index, 1605 enum devlink_sb_pool_type pool_type, 1606 u32 *p_cur, u32 *p_max) 1607 { 1608 struct ocelot *ocelot = ds->priv; 1609 1610 return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index, 1611 pool_type, p_cur, p_max); 1612 } 1613 1614 static int felix_mrp_add(struct dsa_switch *ds, int port, 1615 const struct switchdev_obj_mrp *mrp) 1616 { 1617 struct ocelot *ocelot = ds->priv; 1618 1619 return ocelot_mrp_add(ocelot, port, mrp); 1620 } 1621 1622 static int felix_mrp_del(struct dsa_switch *ds, int port, 1623 const struct switchdev_obj_mrp *mrp) 1624 { 1625 struct ocelot *ocelot = ds->priv; 1626 1627 return ocelot_mrp_add(ocelot, port, mrp); 1628 } 1629 1630 static int 1631 felix_mrp_add_ring_role(struct dsa_switch *ds, int port, 1632 const struct switchdev_obj_ring_role_mrp *mrp) 1633 { 1634 struct ocelot *ocelot = ds->priv; 1635 1636 return ocelot_mrp_add_ring_role(ocelot, port, mrp); 1637 } 1638 1639 static int 1640 felix_mrp_del_ring_role(struct dsa_switch *ds, int port, 1641 const struct switchdev_obj_ring_role_mrp *mrp) 1642 { 1643 struct ocelot *ocelot = ds->priv; 1644 1645 return ocelot_mrp_del_ring_role(ocelot, port, mrp); 1646 } 1647 1648 const struct dsa_switch_ops felix_switch_ops = { 1649 .get_tag_protocol = felix_get_tag_protocol, 1650 .change_tag_protocol = felix_change_tag_protocol, 1651 .setup = felix_setup, 1652 .teardown = felix_teardown, 1653 .set_ageing_time = felix_set_ageing_time, 1654 .get_strings = felix_get_strings, 1655 .get_ethtool_stats = felix_get_ethtool_stats, 1656 .get_sset_count = felix_get_sset_count, 1657 .get_ts_info = felix_get_ts_info, 1658 .phylink_validate = felix_phylink_validate, 1659 .phylink_mac_config = felix_phylink_mac_config, 1660 .phylink_mac_link_down = felix_phylink_mac_link_down, 1661 .phylink_mac_link_up = felix_phylink_mac_link_up, 1662 .port_fdb_dump = felix_fdb_dump, 1663 .port_fdb_add = felix_fdb_add, 1664 .port_fdb_del = felix_fdb_del, 1665 .port_mdb_add = felix_mdb_add, 1666 .port_mdb_del = felix_mdb_del, 1667 .port_pre_bridge_flags = felix_pre_bridge_flags, 1668 .port_bridge_flags = felix_bridge_flags, 1669 .port_bridge_join = felix_bridge_join, 1670 .port_bridge_leave = felix_bridge_leave, 1671 .port_lag_join = felix_lag_join, 1672 .port_lag_leave = felix_lag_leave, 1673 .port_lag_change = felix_lag_change, 1674 .port_stp_state_set = felix_bridge_stp_state_set, 1675 .port_vlan_filtering = felix_vlan_filtering, 1676 .port_vlan_add = felix_vlan_add, 1677 .port_vlan_del = felix_vlan_del, 1678 .port_hwtstamp_get = felix_hwtstamp_get, 1679 .port_hwtstamp_set = felix_hwtstamp_set, 1680 .port_rxtstamp = felix_rxtstamp, 1681 .port_txtstamp = felix_txtstamp, 1682 .port_change_mtu = felix_change_mtu, 1683 .port_max_mtu = felix_get_max_mtu, 1684 .port_policer_add = felix_port_policer_add, 1685 .port_policer_del = felix_port_policer_del, 1686 .cls_flower_add = felix_cls_flower_add, 1687 .cls_flower_del = felix_cls_flower_del, 1688 .cls_flower_stats = felix_cls_flower_stats, 1689 .port_setup_tc = felix_port_setup_tc, 1690 .devlink_sb_pool_get = felix_sb_pool_get, 1691 .devlink_sb_pool_set = felix_sb_pool_set, 1692 .devlink_sb_port_pool_get = felix_sb_port_pool_get, 1693 .devlink_sb_port_pool_set = felix_sb_port_pool_set, 1694 .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get, 1695 .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set, 1696 .devlink_sb_occ_snapshot = felix_sb_occ_snapshot, 1697 .devlink_sb_occ_max_clear = felix_sb_occ_max_clear, 1698 .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get, 1699 .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get, 1700 .port_mrp_add = felix_mrp_add, 1701 .port_mrp_del = felix_mrp_del, 1702 .port_mrp_add_ring_role = felix_mrp_add_ring_role, 1703 .port_mrp_del_ring_role = felix_mrp_del_ring_role, 1704 .tag_8021q_vlan_add = felix_tag_8021q_vlan_add, 1705 .tag_8021q_vlan_del = felix_tag_8021q_vlan_del, 1706 }; 1707 1708 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) 1709 { 1710 struct felix *felix = ocelot_to_felix(ocelot); 1711 struct dsa_switch *ds = felix->ds; 1712 1713 if (!dsa_is_user_port(ds, port)) 1714 return NULL; 1715 1716 return dsa_to_port(ds, port)->slave; 1717 } 1718 1719 int felix_netdev_to_port(struct net_device *dev) 1720 { 1721 struct dsa_port *dp; 1722 1723 dp = dsa_port_from_netdev(dev); 1724 if (IS_ERR(dp)) 1725 return -EINVAL; 1726 1727 return dp->index; 1728 } 1729