1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2019-2021 NXP 3 * 4 * This is an umbrella module for all network switches that are 5 * register-compatible with Ocelot and that perform I/O to their host CPU 6 * through an NPI (Node Processor Interface) Ethernet port. 7 */ 8 #include <uapi/linux/if_bridge.h> 9 #include <soc/mscc/ocelot_vcap.h> 10 #include <soc/mscc/ocelot_qsys.h> 11 #include <soc/mscc/ocelot_sys.h> 12 #include <soc/mscc/ocelot_dev.h> 13 #include <soc/mscc/ocelot_ana.h> 14 #include <soc/mscc/ocelot_ptp.h> 15 #include <soc/mscc/ocelot.h> 16 #include <linux/dsa/8021q.h> 17 #include <linux/dsa/ocelot.h> 18 #include <linux/platform_device.h> 19 #include <linux/ptp_classify.h> 20 #include <linux/module.h> 21 #include <linux/of_net.h> 22 #include <linux/pci.h> 23 #include <linux/of.h> 24 #include <linux/pcs-lynx.h> 25 #include <net/pkt_sched.h> 26 #include <net/dsa.h> 27 #include "felix.h" 28 29 static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, 30 bool pvid, bool untagged) 31 { 32 struct ocelot_vcap_filter *outer_tagging_rule; 33 struct ocelot *ocelot = &felix->ocelot; 34 struct dsa_switch *ds = felix->ds; 35 int key_length, upstream, err; 36 37 /* We don't need to install the rxvlan into the other ports' filtering 38 * tables, because we're just pushing the rxvlan when sending towards 39 * the CPU 40 */ 41 if (!pvid) 42 return 0; 43 44 key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length; 45 upstream = dsa_upstream_port(ds, port); 46 47 outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), 48 GFP_KERNEL); 49 if (!outer_tagging_rule) 50 return -ENOMEM; 51 52 outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 53 outer_tagging_rule->prio = 1; 54 outer_tagging_rule->id.cookie = port; 55 outer_tagging_rule->id.tc_offload = false; 56 outer_tagging_rule->block_id = VCAP_ES0; 57 outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 58 outer_tagging_rule->lookup = 0; 59 outer_tagging_rule->ingress_port.value = port; 60 outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0); 61 outer_tagging_rule->egress_port.value = upstream; 62 outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0); 63 outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG; 64 outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD; 65 outer_tagging_rule->action.tag_a_vid_sel = 1; 66 outer_tagging_rule->action.vid_a_val = vid; 67 68 err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL); 69 if (err) 70 kfree(outer_tagging_rule); 71 72 return err; 73 } 74 75 static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, 76 bool pvid, bool untagged) 77 { 78 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 79 struct ocelot *ocelot = &felix->ocelot; 80 struct dsa_switch *ds = felix->ds; 81 int upstream, err; 82 83 /* tag_8021q.c assumes we are implementing this via port VLAN 84 * membership, which we aren't. So we don't need to add any VCAP filter 85 * for the CPU port. 86 */ 87 if (ocelot->ports[port]->is_dsa_8021q_cpu) 88 return 0; 89 90 untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 91 if (!untagging_rule) 92 return -ENOMEM; 93 94 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 95 if (!redirect_rule) { 96 kfree(untagging_rule); 97 return -ENOMEM; 98 } 99 100 upstream = dsa_upstream_port(ds, port); 101 102 untagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 103 untagging_rule->ingress_port_mask = BIT(upstream); 104 untagging_rule->vlan.vid.value = vid; 105 untagging_rule->vlan.vid.mask = VLAN_VID_MASK; 106 untagging_rule->prio = 1; 107 untagging_rule->id.cookie = port; 108 untagging_rule->id.tc_offload = false; 109 untagging_rule->block_id = VCAP_IS1; 110 untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 111 untagging_rule->lookup = 0; 112 untagging_rule->action.vlan_pop_cnt_ena = true; 113 untagging_rule->action.vlan_pop_cnt = 1; 114 untagging_rule->action.pag_override_mask = 0xff; 115 untagging_rule->action.pag_val = port; 116 117 err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL); 118 if (err) { 119 kfree(untagging_rule); 120 kfree(redirect_rule); 121 return err; 122 } 123 124 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; 125 redirect_rule->ingress_port_mask = BIT(upstream); 126 redirect_rule->pag = port; 127 redirect_rule->prio = 1; 128 redirect_rule->id.cookie = port; 129 redirect_rule->id.tc_offload = false; 130 redirect_rule->block_id = VCAP_IS2; 131 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 132 redirect_rule->lookup = 0; 133 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 134 redirect_rule->action.port_mask = BIT(port); 135 136 err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); 137 if (err) { 138 ocelot_vcap_filter_del(ocelot, untagging_rule); 139 kfree(redirect_rule); 140 return err; 141 } 142 143 return 0; 144 } 145 146 static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, 147 u16 flags) 148 { 149 bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED; 150 bool pvid = flags & BRIDGE_VLAN_INFO_PVID; 151 struct ocelot *ocelot = ds->priv; 152 153 if (vid_is_dsa_8021q_rxvlan(vid)) 154 return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot), 155 port, vid, pvid, untagged); 156 157 if (vid_is_dsa_8021q_txvlan(vid)) 158 return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot), 159 port, vid, pvid, untagged); 160 161 return 0; 162 } 163 164 static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid) 165 { 166 struct ocelot_vcap_filter *outer_tagging_rule; 167 struct ocelot_vcap_block *block_vcap_es0; 168 struct ocelot *ocelot = &felix->ocelot; 169 170 block_vcap_es0 = &ocelot->block[VCAP_ES0]; 171 172 outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, 173 port, false); 174 /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid 175 * installing outer tagging ES0 rules where they weren't needed. 176 * But in rxvlan_del, the API doesn't give us the "flags" anymore, 177 * so that forces us to be slightly sloppy here, and just assume that 178 * if we didn't find an outer_tagging_rule it means that there was 179 * none in the first place, i.e. rxvlan_del is called on a non-pvid 180 * port. This is most probably true though. 181 */ 182 if (!outer_tagging_rule) 183 return 0; 184 185 return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); 186 } 187 188 static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid) 189 { 190 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 191 struct ocelot_vcap_block *block_vcap_is1; 192 struct ocelot_vcap_block *block_vcap_is2; 193 struct ocelot *ocelot = &felix->ocelot; 194 int err; 195 196 if (ocelot->ports[port]->is_dsa_8021q_cpu) 197 return 0; 198 199 block_vcap_is1 = &ocelot->block[VCAP_IS1]; 200 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 201 202 untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, 203 port, false); 204 if (!untagging_rule) 205 return 0; 206 207 err = ocelot_vcap_filter_del(ocelot, untagging_rule); 208 if (err) 209 return err; 210 211 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, 212 port, false); 213 if (!redirect_rule) 214 return 0; 215 216 return ocelot_vcap_filter_del(ocelot, redirect_rule); 217 } 218 219 static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) 220 { 221 struct ocelot *ocelot = ds->priv; 222 223 if (vid_is_dsa_8021q_rxvlan(vid)) 224 return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot), 225 port, vid); 226 227 if (vid_is_dsa_8021q_txvlan(vid)) 228 return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot), 229 port, vid); 230 231 return 0; 232 } 233 234 /* Alternatively to using the NPI functionality, that same hardware MAC 235 * connected internally to the enetc or fman DSA master can be configured to 236 * use the software-defined tag_8021q frame format. As far as the hardware is 237 * concerned, it thinks it is a "dumb switch" - the queues of the CPU port 238 * module are now disconnected from it, but can still be accessed through 239 * register-based MMIO. 240 */ 241 static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port) 242 { 243 mutex_lock(&ocelot->fwd_domain_lock); 244 245 ocelot->ports[port]->is_dsa_8021q_cpu = true; 246 ocelot->npi = -1; 247 248 /* Overwrite PGID_CPU with the non-tagging port */ 249 ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU); 250 251 ocelot_apply_bridge_fwd_mask(ocelot, true); 252 253 mutex_unlock(&ocelot->fwd_domain_lock); 254 } 255 256 static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) 257 { 258 mutex_lock(&ocelot->fwd_domain_lock); 259 260 ocelot->ports[port]->is_dsa_8021q_cpu = false; 261 262 /* Restore PGID_CPU */ 263 ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID, 264 PGID_CPU); 265 266 ocelot_apply_bridge_fwd_mask(ocelot, true); 267 268 mutex_unlock(&ocelot->fwd_domain_lock); 269 } 270 271 /* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module. 272 * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the 273 * tag_8021q CPU port. 274 */ 275 static int felix_setup_mmio_filtering(struct felix *felix) 276 { 277 unsigned long user_ports = dsa_user_ports(felix->ds); 278 struct ocelot_vcap_filter *redirect_rule; 279 struct ocelot_vcap_filter *tagging_rule; 280 struct ocelot *ocelot = &felix->ocelot; 281 struct dsa_switch *ds = felix->ds; 282 int cpu = -1, port, ret; 283 284 tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 285 if (!tagging_rule) 286 return -ENOMEM; 287 288 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 289 if (!redirect_rule) { 290 kfree(tagging_rule); 291 return -ENOMEM; 292 } 293 294 for (port = 0; port < ocelot->num_phys_ports; port++) { 295 if (dsa_is_cpu_port(ds, port)) { 296 cpu = port; 297 break; 298 } 299 } 300 301 if (cpu < 0) 302 return -EINVAL; 303 304 tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE; 305 *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588); 306 *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff); 307 tagging_rule->ingress_port_mask = user_ports; 308 tagging_rule->prio = 1; 309 tagging_rule->id.cookie = ocelot->num_phys_ports; 310 tagging_rule->id.tc_offload = false; 311 tagging_rule->block_id = VCAP_IS1; 312 tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 313 tagging_rule->lookup = 0; 314 tagging_rule->action.pag_override_mask = 0xff; 315 tagging_rule->action.pag_val = ocelot->num_phys_ports; 316 317 ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL); 318 if (ret) { 319 kfree(tagging_rule); 320 kfree(redirect_rule); 321 return ret; 322 } 323 324 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; 325 redirect_rule->ingress_port_mask = user_ports; 326 redirect_rule->pag = ocelot->num_phys_ports; 327 redirect_rule->prio = 1; 328 redirect_rule->id.cookie = ocelot->num_phys_ports; 329 redirect_rule->id.tc_offload = false; 330 redirect_rule->block_id = VCAP_IS2; 331 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 332 redirect_rule->lookup = 0; 333 redirect_rule->action.cpu_copy_ena = true; 334 if (felix->info->quirk_no_xtr_irq) { 335 /* Redirect to the tag_8021q CPU but also copy PTP packets to 336 * the CPU port module 337 */ 338 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 339 redirect_rule->action.port_mask = BIT(cpu); 340 } else { 341 /* Trap PTP packets only to the CPU port module (which is 342 * redirected to the NPI port) 343 */ 344 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; 345 redirect_rule->action.port_mask = 0; 346 } 347 348 ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); 349 if (ret) { 350 ocelot_vcap_filter_del(ocelot, tagging_rule); 351 kfree(redirect_rule); 352 return ret; 353 } 354 355 /* The ownership of the CPU port module's queues might have just been 356 * transferred to the tag_8021q tagger from the NPI-based tagger. 357 * So there might still be all sorts of crap in the queues. On the 358 * other hand, the MMIO-based matching of PTP frames is very brittle, 359 * so we need to be careful that there are no extra frames to be 360 * dequeued over MMIO, since we would never know to discard them. 361 */ 362 ocelot_drain_cpu_queue(ocelot, 0); 363 364 return 0; 365 } 366 367 static int felix_teardown_mmio_filtering(struct felix *felix) 368 { 369 struct ocelot_vcap_filter *tagging_rule, *redirect_rule; 370 struct ocelot_vcap_block *block_vcap_is1; 371 struct ocelot_vcap_block *block_vcap_is2; 372 struct ocelot *ocelot = &felix->ocelot; 373 int err; 374 375 block_vcap_is1 = &ocelot->block[VCAP_IS1]; 376 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 377 378 tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, 379 ocelot->num_phys_ports, 380 false); 381 if (!tagging_rule) 382 return -ENOENT; 383 384 err = ocelot_vcap_filter_del(ocelot, tagging_rule); 385 if (err) 386 return err; 387 388 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, 389 ocelot->num_phys_ports, 390 false); 391 if (!redirect_rule) 392 return -ENOENT; 393 394 return ocelot_vcap_filter_del(ocelot, redirect_rule); 395 } 396 397 static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) 398 { 399 struct ocelot *ocelot = ds->priv; 400 struct felix *felix = ocelot_to_felix(ocelot); 401 unsigned long cpu_flood; 402 int port, err; 403 404 felix_8021q_cpu_port_init(ocelot, cpu); 405 406 for (port = 0; port < ds->num_ports; port++) { 407 if (dsa_is_unused_port(ds, port)) 408 continue; 409 410 /* This overwrites ocelot_init(): 411 * Do not forward BPDU frames to the CPU port module, 412 * for 2 reasons: 413 * - When these packets are injected from the tag_8021q 414 * CPU port, we want them to go out, not loop back 415 * into the system. 416 * - STP traffic ingressing on a user port should go to 417 * the tag_8021q CPU port, not to the hardware CPU 418 * port module. 419 */ 420 ocelot_write_gix(ocelot, 421 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), 422 ANA_PORT_CPU_FWD_BPDU_CFG, port); 423 } 424 425 /* In tag_8021q mode, the CPU port module is unused, except for PTP 426 * frames. So we want to disable flooding of any kind to the CPU port 427 * module, since packets going there will end in a black hole. 428 */ 429 cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); 430 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC); 431 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC); 432 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC); 433 434 err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); 435 if (err) 436 return err; 437 438 err = felix_setup_mmio_filtering(felix); 439 if (err) 440 goto out_tag_8021q_unregister; 441 442 return 0; 443 444 out_tag_8021q_unregister: 445 dsa_tag_8021q_unregister(ds); 446 return err; 447 } 448 449 static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu) 450 { 451 struct ocelot *ocelot = ds->priv; 452 struct felix *felix = ocelot_to_felix(ocelot); 453 int err, port; 454 455 err = felix_teardown_mmio_filtering(felix); 456 if (err) 457 dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d", 458 err); 459 460 dsa_tag_8021q_unregister(ds); 461 462 for (port = 0; port < ds->num_ports; port++) { 463 if (dsa_is_unused_port(ds, port)) 464 continue; 465 466 /* Restore the logic from ocelot_init: 467 * do not forward BPDU frames to the front ports. 468 */ 469 ocelot_write_gix(ocelot, 470 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), 471 ANA_PORT_CPU_FWD_BPDU_CFG, 472 port); 473 } 474 475 felix_8021q_cpu_port_deinit(ocelot, cpu); 476 } 477 478 /* The CPU port module is connected to the Node Processor Interface (NPI). This 479 * is the mode through which frames can be injected from and extracted to an 480 * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU 481 * running Linux, and this forms a DSA setup together with the enetc or fman 482 * DSA master. 483 */ 484 static void felix_npi_port_init(struct ocelot *ocelot, int port) 485 { 486 ocelot->npi = port; 487 488 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | 489 QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port), 490 QSYS_EXT_CPU_CFG); 491 492 /* NPI port Injection/Extraction configuration */ 493 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 494 ocelot->npi_xtr_prefix); 495 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 496 ocelot->npi_inj_prefix); 497 498 /* Disable transmission of pause frames */ 499 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); 500 } 501 502 static void felix_npi_port_deinit(struct ocelot *ocelot, int port) 503 { 504 /* Restore hardware defaults */ 505 int unused_port = ocelot->num_phys_ports + 2; 506 507 ocelot->npi = -1; 508 509 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port), 510 QSYS_EXT_CPU_CFG); 511 512 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 513 OCELOT_TAG_PREFIX_DISABLED); 514 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 515 OCELOT_TAG_PREFIX_DISABLED); 516 517 /* Enable transmission of pause frames */ 518 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); 519 } 520 521 static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu) 522 { 523 struct ocelot *ocelot = ds->priv; 524 unsigned long cpu_flood; 525 526 felix_npi_port_init(ocelot, cpu); 527 528 /* Include the CPU port module (and indirectly, the NPI port) 529 * in the forwarding mask for unknown unicast - the hardware 530 * default value for ANA_FLOODING_FLD_UNICAST excludes 531 * BIT(ocelot->num_phys_ports), and so does ocelot_init, 532 * since Ocelot relies on whitelisting MAC addresses towards 533 * PGID_CPU. 534 * We do this because DSA does not yet perform RX filtering, 535 * and the NPI port does not perform source address learning, 536 * so traffic sent to Linux is effectively unknown from the 537 * switch's perspective. 538 */ 539 cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); 540 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC); 541 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC); 542 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC); 543 544 return 0; 545 } 546 547 static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu) 548 { 549 struct ocelot *ocelot = ds->priv; 550 551 felix_npi_port_deinit(ocelot, cpu); 552 } 553 554 static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu, 555 enum dsa_tag_protocol proto) 556 { 557 int err; 558 559 switch (proto) { 560 case DSA_TAG_PROTO_SEVILLE: 561 case DSA_TAG_PROTO_OCELOT: 562 err = felix_setup_tag_npi(ds, cpu); 563 break; 564 case DSA_TAG_PROTO_OCELOT_8021Q: 565 err = felix_setup_tag_8021q(ds, cpu); 566 break; 567 default: 568 err = -EPROTONOSUPPORT; 569 } 570 571 return err; 572 } 573 574 static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu, 575 enum dsa_tag_protocol proto) 576 { 577 switch (proto) { 578 case DSA_TAG_PROTO_SEVILLE: 579 case DSA_TAG_PROTO_OCELOT: 580 felix_teardown_tag_npi(ds, cpu); 581 break; 582 case DSA_TAG_PROTO_OCELOT_8021Q: 583 felix_teardown_tag_8021q(ds, cpu); 584 break; 585 default: 586 break; 587 } 588 } 589 590 /* This always leaves the switch in a consistent state, because although the 591 * tag_8021q setup can fail, the NPI setup can't. So either the change is made, 592 * or the restoration is guaranteed to work. 593 */ 594 static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, 595 enum dsa_tag_protocol proto) 596 { 597 struct ocelot *ocelot = ds->priv; 598 struct felix *felix = ocelot_to_felix(ocelot); 599 enum dsa_tag_protocol old_proto = felix->tag_proto; 600 int err; 601 602 if (proto != DSA_TAG_PROTO_SEVILLE && 603 proto != DSA_TAG_PROTO_OCELOT && 604 proto != DSA_TAG_PROTO_OCELOT_8021Q) 605 return -EPROTONOSUPPORT; 606 607 felix_del_tag_protocol(ds, cpu, old_proto); 608 609 err = felix_set_tag_protocol(ds, cpu, proto); 610 if (err) { 611 felix_set_tag_protocol(ds, cpu, old_proto); 612 return err; 613 } 614 615 felix->tag_proto = proto; 616 617 return 0; 618 } 619 620 static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, 621 int port, 622 enum dsa_tag_protocol mp) 623 { 624 struct ocelot *ocelot = ds->priv; 625 struct felix *felix = ocelot_to_felix(ocelot); 626 627 return felix->tag_proto; 628 } 629 630 static int felix_set_ageing_time(struct dsa_switch *ds, 631 unsigned int ageing_time) 632 { 633 struct ocelot *ocelot = ds->priv; 634 635 ocelot_set_ageing_time(ocelot, ageing_time); 636 637 return 0; 638 } 639 640 static int felix_fdb_dump(struct dsa_switch *ds, int port, 641 dsa_fdb_dump_cb_t *cb, void *data) 642 { 643 struct ocelot *ocelot = ds->priv; 644 645 return ocelot_fdb_dump(ocelot, port, cb, data); 646 } 647 648 static int felix_fdb_add(struct dsa_switch *ds, int port, 649 const unsigned char *addr, u16 vid) 650 { 651 struct ocelot *ocelot = ds->priv; 652 653 return ocelot_fdb_add(ocelot, port, addr, vid); 654 } 655 656 static int felix_fdb_del(struct dsa_switch *ds, int port, 657 const unsigned char *addr, u16 vid) 658 { 659 struct ocelot *ocelot = ds->priv; 660 661 return ocelot_fdb_del(ocelot, port, addr, vid); 662 } 663 664 static int felix_mdb_add(struct dsa_switch *ds, int port, 665 const struct switchdev_obj_port_mdb *mdb) 666 { 667 struct ocelot *ocelot = ds->priv; 668 669 return ocelot_port_mdb_add(ocelot, port, mdb); 670 } 671 672 static int felix_mdb_del(struct dsa_switch *ds, int port, 673 const struct switchdev_obj_port_mdb *mdb) 674 { 675 struct ocelot *ocelot = ds->priv; 676 677 return ocelot_port_mdb_del(ocelot, port, mdb); 678 } 679 680 static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, 681 u8 state) 682 { 683 struct ocelot *ocelot = ds->priv; 684 685 return ocelot_bridge_stp_state_set(ocelot, port, state); 686 } 687 688 static int felix_pre_bridge_flags(struct dsa_switch *ds, int port, 689 struct switchdev_brport_flags val, 690 struct netlink_ext_ack *extack) 691 { 692 struct ocelot *ocelot = ds->priv; 693 694 return ocelot_port_pre_bridge_flags(ocelot, port, val); 695 } 696 697 static int felix_bridge_flags(struct dsa_switch *ds, int port, 698 struct switchdev_brport_flags val, 699 struct netlink_ext_ack *extack) 700 { 701 struct ocelot *ocelot = ds->priv; 702 703 ocelot_port_bridge_flags(ocelot, port, val); 704 705 return 0; 706 } 707 708 static int felix_bridge_join(struct dsa_switch *ds, int port, 709 struct net_device *br) 710 { 711 struct ocelot *ocelot = ds->priv; 712 713 ocelot_port_bridge_join(ocelot, port, br); 714 715 return 0; 716 } 717 718 static void felix_bridge_leave(struct dsa_switch *ds, int port, 719 struct net_device *br) 720 { 721 struct ocelot *ocelot = ds->priv; 722 723 ocelot_port_bridge_leave(ocelot, port, br); 724 } 725 726 static int felix_lag_join(struct dsa_switch *ds, int port, 727 struct net_device *bond, 728 struct netdev_lag_upper_info *info) 729 { 730 struct ocelot *ocelot = ds->priv; 731 732 return ocelot_port_lag_join(ocelot, port, bond, info); 733 } 734 735 static int felix_lag_leave(struct dsa_switch *ds, int port, 736 struct net_device *bond) 737 { 738 struct ocelot *ocelot = ds->priv; 739 740 ocelot_port_lag_leave(ocelot, port, bond); 741 742 return 0; 743 } 744 745 static int felix_lag_change(struct dsa_switch *ds, int port) 746 { 747 struct dsa_port *dp = dsa_to_port(ds, port); 748 struct ocelot *ocelot = ds->priv; 749 750 ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled); 751 752 return 0; 753 } 754 755 static int felix_vlan_prepare(struct dsa_switch *ds, int port, 756 const struct switchdev_obj_port_vlan *vlan, 757 struct netlink_ext_ack *extack) 758 { 759 struct ocelot *ocelot = ds->priv; 760 u16 flags = vlan->flags; 761 762 /* Ocelot switches copy frames as-is to the CPU, so the flags: 763 * egress-untagged or not, pvid or not, make no difference. This 764 * behavior is already better than what DSA just tries to approximate 765 * when it installs the VLAN with the same flags on the CPU port. 766 * Just accept any configuration, and don't let ocelot deny installing 767 * multiple native VLANs on the NPI port, because the switch doesn't 768 * look at the port tag settings towards the NPI interface anyway. 769 */ 770 if (port == ocelot->npi) 771 return 0; 772 773 return ocelot_vlan_prepare(ocelot, port, vlan->vid, 774 flags & BRIDGE_VLAN_INFO_PVID, 775 flags & BRIDGE_VLAN_INFO_UNTAGGED, 776 extack); 777 } 778 779 static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, 780 struct netlink_ext_ack *extack) 781 { 782 struct ocelot *ocelot = ds->priv; 783 784 return ocelot_port_vlan_filtering(ocelot, port, enabled, extack); 785 } 786 787 static int felix_vlan_add(struct dsa_switch *ds, int port, 788 const struct switchdev_obj_port_vlan *vlan, 789 struct netlink_ext_ack *extack) 790 { 791 struct ocelot *ocelot = ds->priv; 792 u16 flags = vlan->flags; 793 int err; 794 795 err = felix_vlan_prepare(ds, port, vlan, extack); 796 if (err) 797 return err; 798 799 return ocelot_vlan_add(ocelot, port, vlan->vid, 800 flags & BRIDGE_VLAN_INFO_PVID, 801 flags & BRIDGE_VLAN_INFO_UNTAGGED); 802 } 803 804 static int felix_vlan_del(struct dsa_switch *ds, int port, 805 const struct switchdev_obj_port_vlan *vlan) 806 { 807 struct ocelot *ocelot = ds->priv; 808 809 return ocelot_vlan_del(ocelot, port, vlan->vid); 810 } 811 812 static void felix_phylink_validate(struct dsa_switch *ds, int port, 813 unsigned long *supported, 814 struct phylink_link_state *state) 815 { 816 struct ocelot *ocelot = ds->priv; 817 struct felix *felix = ocelot_to_felix(ocelot); 818 819 if (felix->info->phylink_validate) 820 felix->info->phylink_validate(ocelot, port, supported, state); 821 } 822 823 static void felix_phylink_mac_config(struct dsa_switch *ds, int port, 824 unsigned int link_an_mode, 825 const struct phylink_link_state *state) 826 { 827 struct ocelot *ocelot = ds->priv; 828 struct felix *felix = ocelot_to_felix(ocelot); 829 struct dsa_port *dp = dsa_to_port(ds, port); 830 831 if (felix->pcs && felix->pcs[port]) 832 phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs); 833 } 834 835 static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, 836 unsigned int link_an_mode, 837 phy_interface_t interface) 838 { 839 struct ocelot *ocelot = ds->priv; 840 841 ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, 842 FELIX_MAC_QUIRKS); 843 } 844 845 static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, 846 unsigned int link_an_mode, 847 phy_interface_t interface, 848 struct phy_device *phydev, 849 int speed, int duplex, 850 bool tx_pause, bool rx_pause) 851 { 852 struct ocelot *ocelot = ds->priv; 853 struct felix *felix = ocelot_to_felix(ocelot); 854 855 ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, 856 interface, speed, duplex, tx_pause, rx_pause, 857 FELIX_MAC_QUIRKS); 858 859 if (felix->info->port_sched_speed_set) 860 felix->info->port_sched_speed_set(ocelot, port, speed); 861 } 862 863 static void felix_port_qos_map_init(struct ocelot *ocelot, int port) 864 { 865 int i; 866 867 ocelot_rmw_gix(ocelot, 868 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 869 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 870 ANA_PORT_QOS_CFG, 871 port); 872 873 for (i = 0; i < OCELOT_NUM_TC * 2; i++) { 874 ocelot_rmw_ix(ocelot, 875 (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) | 876 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i), 877 ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL | 878 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M, 879 ANA_PORT_PCP_DEI_MAP, 880 port, i); 881 } 882 } 883 884 static void felix_get_strings(struct dsa_switch *ds, int port, 885 u32 stringset, u8 *data) 886 { 887 struct ocelot *ocelot = ds->priv; 888 889 return ocelot_get_strings(ocelot, port, stringset, data); 890 } 891 892 static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) 893 { 894 struct ocelot *ocelot = ds->priv; 895 896 ocelot_get_ethtool_stats(ocelot, port, data); 897 } 898 899 static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset) 900 { 901 struct ocelot *ocelot = ds->priv; 902 903 return ocelot_get_sset_count(ocelot, port, sset); 904 } 905 906 static int felix_get_ts_info(struct dsa_switch *ds, int port, 907 struct ethtool_ts_info *info) 908 { 909 struct ocelot *ocelot = ds->priv; 910 911 return ocelot_get_ts_info(ocelot, port, info); 912 } 913 914 static int felix_parse_ports_node(struct felix *felix, 915 struct device_node *ports_node, 916 phy_interface_t *port_phy_modes) 917 { 918 struct ocelot *ocelot = &felix->ocelot; 919 struct device *dev = felix->ocelot.dev; 920 struct device_node *child; 921 922 for_each_available_child_of_node(ports_node, child) { 923 phy_interface_t phy_mode; 924 u32 port; 925 int err; 926 927 /* Get switch port number from DT */ 928 if (of_property_read_u32(child, "reg", &port) < 0) { 929 dev_err(dev, "Port number not defined in device tree " 930 "(property \"reg\")\n"); 931 of_node_put(child); 932 return -ENODEV; 933 } 934 935 /* Get PHY mode from DT */ 936 err = of_get_phy_mode(child, &phy_mode); 937 if (err) { 938 dev_err(dev, "Failed to read phy-mode or " 939 "phy-interface-type property for port %d\n", 940 port); 941 of_node_put(child); 942 return -ENODEV; 943 } 944 945 err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode); 946 if (err < 0) { 947 dev_err(dev, "Unsupported PHY mode %s on port %d\n", 948 phy_modes(phy_mode), port); 949 of_node_put(child); 950 return err; 951 } 952 953 port_phy_modes[port] = phy_mode; 954 } 955 956 return 0; 957 } 958 959 static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) 960 { 961 struct device *dev = felix->ocelot.dev; 962 struct device_node *switch_node; 963 struct device_node *ports_node; 964 int err; 965 966 switch_node = dev->of_node; 967 968 ports_node = of_get_child_by_name(switch_node, "ports"); 969 if (!ports_node) 970 ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); 971 if (!ports_node) { 972 dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n"); 973 return -ENODEV; 974 } 975 976 err = felix_parse_ports_node(felix, ports_node, port_phy_modes); 977 of_node_put(ports_node); 978 979 return err; 980 } 981 982 static int felix_init_structs(struct felix *felix, int num_phys_ports) 983 { 984 struct ocelot *ocelot = &felix->ocelot; 985 phy_interface_t *port_phy_modes; 986 struct resource res; 987 int port, i, err; 988 989 ocelot->num_phys_ports = num_phys_ports; 990 ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports, 991 sizeof(struct ocelot_port *), GFP_KERNEL); 992 if (!ocelot->ports) 993 return -ENOMEM; 994 995 ocelot->map = felix->info->map; 996 ocelot->stats_layout = felix->info->stats_layout; 997 ocelot->num_stats = felix->info->num_stats; 998 ocelot->num_mact_rows = felix->info->num_mact_rows; 999 ocelot->vcap = felix->info->vcap; 1000 ocelot->vcap_pol.base = felix->info->vcap_pol_base; 1001 ocelot->vcap_pol.max = felix->info->vcap_pol_max; 1002 ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2; 1003 ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2; 1004 ocelot->ops = felix->info->ops; 1005 ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT; 1006 ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT; 1007 ocelot->devlink = felix->ds->devlink; 1008 1009 port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t), 1010 GFP_KERNEL); 1011 if (!port_phy_modes) 1012 return -ENOMEM; 1013 1014 err = felix_parse_dt(felix, port_phy_modes); 1015 if (err) { 1016 kfree(port_phy_modes); 1017 return err; 1018 } 1019 1020 for (i = 0; i < TARGET_MAX; i++) { 1021 struct regmap *target; 1022 1023 if (!felix->info->target_io_res[i].name) 1024 continue; 1025 1026 memcpy(&res, &felix->info->target_io_res[i], sizeof(res)); 1027 res.flags = IORESOURCE_MEM; 1028 res.start += felix->switch_base; 1029 res.end += felix->switch_base; 1030 1031 target = felix->info->init_regmap(ocelot, &res); 1032 if (IS_ERR(target)) { 1033 dev_err(ocelot->dev, 1034 "Failed to map device memory space\n"); 1035 kfree(port_phy_modes); 1036 return PTR_ERR(target); 1037 } 1038 1039 ocelot->targets[i] = target; 1040 } 1041 1042 err = ocelot_regfields_init(ocelot, felix->info->regfields); 1043 if (err) { 1044 dev_err(ocelot->dev, "failed to init reg fields map\n"); 1045 kfree(port_phy_modes); 1046 return err; 1047 } 1048 1049 for (port = 0; port < num_phys_ports; port++) { 1050 struct ocelot_port *ocelot_port; 1051 struct regmap *target; 1052 1053 ocelot_port = devm_kzalloc(ocelot->dev, 1054 sizeof(struct ocelot_port), 1055 GFP_KERNEL); 1056 if (!ocelot_port) { 1057 dev_err(ocelot->dev, 1058 "failed to allocate port memory\n"); 1059 kfree(port_phy_modes); 1060 return -ENOMEM; 1061 } 1062 1063 memcpy(&res, &felix->info->port_io_res[port], sizeof(res)); 1064 res.flags = IORESOURCE_MEM; 1065 res.start += felix->switch_base; 1066 res.end += felix->switch_base; 1067 1068 target = felix->info->init_regmap(ocelot, &res); 1069 if (IS_ERR(target)) { 1070 dev_err(ocelot->dev, 1071 "Failed to map memory space for port %d\n", 1072 port); 1073 kfree(port_phy_modes); 1074 return PTR_ERR(target); 1075 } 1076 1077 ocelot_port->phy_mode = port_phy_modes[port]; 1078 ocelot_port->ocelot = ocelot; 1079 ocelot_port->target = target; 1080 ocelot->ports[port] = ocelot_port; 1081 } 1082 1083 kfree(port_phy_modes); 1084 1085 if (felix->info->mdio_bus_alloc) { 1086 err = felix->info->mdio_bus_alloc(ocelot); 1087 if (err < 0) 1088 return err; 1089 } 1090 1091 return 0; 1092 } 1093 1094 static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port, 1095 struct sk_buff *skb) 1096 { 1097 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1098 struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; 1099 struct sk_buff *skb_match = NULL, *skb_tmp; 1100 unsigned long flags; 1101 1102 if (!clone) 1103 return; 1104 1105 spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags); 1106 1107 skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { 1108 if (skb != clone) 1109 continue; 1110 __skb_unlink(skb, &ocelot_port->tx_skbs); 1111 skb_match = skb; 1112 break; 1113 } 1114 1115 spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags); 1116 1117 WARN_ONCE(!skb_match, 1118 "Could not find skb clone in TX timestamping list\n"); 1119 } 1120 1121 #define work_to_xmit_work(w) \ 1122 container_of((w), struct felix_deferred_xmit_work, work) 1123 1124 static void felix_port_deferred_xmit(struct kthread_work *work) 1125 { 1126 struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work); 1127 struct dsa_switch *ds = xmit_work->dp->ds; 1128 struct sk_buff *skb = xmit_work->skb; 1129 u32 rew_op = ocelot_ptp_rew_op(skb); 1130 struct ocelot *ocelot = ds->priv; 1131 int port = xmit_work->dp->index; 1132 int retries = 10; 1133 1134 do { 1135 if (ocelot_can_inject(ocelot, 0)) 1136 break; 1137 1138 cpu_relax(); 1139 } while (--retries); 1140 1141 if (!retries) { 1142 dev_err(ocelot->dev, "port %d failed to inject skb\n", 1143 port); 1144 ocelot_port_purge_txtstamp_skb(ocelot, port, skb); 1145 kfree_skb(skb); 1146 return; 1147 } 1148 1149 ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); 1150 1151 consume_skb(skb); 1152 kfree(xmit_work); 1153 } 1154 1155 static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port) 1156 { 1157 struct dsa_port *dp = dsa_to_port(ds, port); 1158 struct ocelot *ocelot = ds->priv; 1159 struct felix *felix = ocelot_to_felix(ocelot); 1160 struct felix_port *felix_port; 1161 1162 if (!dsa_port_is_user(dp)) 1163 return 0; 1164 1165 felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL); 1166 if (!felix_port) 1167 return -ENOMEM; 1168 1169 felix_port->xmit_worker = felix->xmit_worker; 1170 felix_port->xmit_work_fn = felix_port_deferred_xmit; 1171 1172 dp->priv = felix_port; 1173 1174 return 0; 1175 } 1176 1177 static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port) 1178 { 1179 struct dsa_port *dp = dsa_to_port(ds, port); 1180 struct felix_port *felix_port = dp->priv; 1181 1182 if (!felix_port) 1183 return; 1184 1185 dp->priv = NULL; 1186 kfree(felix_port); 1187 } 1188 1189 /* Hardware initialization done here so that we can allocate structures with 1190 * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing 1191 * us to allocate structures twice (leak memory) and map PCI memory twice 1192 * (which will not work). 1193 */ 1194 static int felix_setup(struct dsa_switch *ds) 1195 { 1196 struct ocelot *ocelot = ds->priv; 1197 struct felix *felix = ocelot_to_felix(ocelot); 1198 int port, err; 1199 1200 err = felix_init_structs(felix, ds->num_ports); 1201 if (err) 1202 return err; 1203 1204 err = ocelot_init(ocelot); 1205 if (err) 1206 goto out_mdiobus_free; 1207 1208 if (ocelot->ptp) { 1209 err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps); 1210 if (err) { 1211 dev_err(ocelot->dev, 1212 "Timestamp initialization failed\n"); 1213 ocelot->ptp = 0; 1214 } 1215 } 1216 1217 felix->xmit_worker = kthread_create_worker(0, "felix_xmit"); 1218 if (IS_ERR(felix->xmit_worker)) { 1219 err = PTR_ERR(felix->xmit_worker); 1220 goto out_deinit_timestamp; 1221 } 1222 1223 for (port = 0; port < ds->num_ports; port++) { 1224 if (dsa_is_unused_port(ds, port)) 1225 continue; 1226 1227 ocelot_init_port(ocelot, port); 1228 1229 /* Set the default QoS Classification based on PCP and DEI 1230 * bits of vlan tag. 1231 */ 1232 felix_port_qos_map_init(ocelot, port); 1233 1234 err = felix_port_setup_tagger_data(ds, port); 1235 if (err) { 1236 dev_err(ds->dev, 1237 "port %d failed to set up tagger data: %pe\n", 1238 port, ERR_PTR(err)); 1239 goto out_deinit_ports; 1240 } 1241 } 1242 1243 err = ocelot_devlink_sb_register(ocelot); 1244 if (err) 1245 goto out_deinit_ports; 1246 1247 for (port = 0; port < ds->num_ports; port++) { 1248 if (!dsa_is_cpu_port(ds, port)) 1249 continue; 1250 1251 /* The initial tag protocol is NPI which always returns 0, so 1252 * there's no real point in checking for errors. 1253 */ 1254 felix_set_tag_protocol(ds, port, felix->tag_proto); 1255 break; 1256 } 1257 1258 ds->mtu_enforcement_ingress = true; 1259 ds->assisted_learning_on_cpu_port = true; 1260 1261 return 0; 1262 1263 out_deinit_ports: 1264 for (port = 0; port < ocelot->num_phys_ports; port++) { 1265 if (dsa_is_unused_port(ds, port)) 1266 continue; 1267 1268 felix_port_teardown_tagger_data(ds, port); 1269 ocelot_deinit_port(ocelot, port); 1270 } 1271 1272 kthread_destroy_worker(felix->xmit_worker); 1273 1274 out_deinit_timestamp: 1275 ocelot_deinit_timestamp(ocelot); 1276 ocelot_deinit(ocelot); 1277 1278 out_mdiobus_free: 1279 if (felix->info->mdio_bus_free) 1280 felix->info->mdio_bus_free(ocelot); 1281 1282 return err; 1283 } 1284 1285 static void felix_teardown(struct dsa_switch *ds) 1286 { 1287 struct ocelot *ocelot = ds->priv; 1288 struct felix *felix = ocelot_to_felix(ocelot); 1289 int port; 1290 1291 for (port = 0; port < ds->num_ports; port++) { 1292 if (!dsa_is_cpu_port(ds, port)) 1293 continue; 1294 1295 felix_del_tag_protocol(ds, port, felix->tag_proto); 1296 break; 1297 } 1298 1299 for (port = 0; port < ocelot->num_phys_ports; port++) { 1300 if (dsa_is_unused_port(ds, port)) 1301 continue; 1302 1303 felix_port_teardown_tagger_data(ds, port); 1304 ocelot_deinit_port(ocelot, port); 1305 } 1306 1307 kthread_destroy_worker(felix->xmit_worker); 1308 1309 ocelot_devlink_sb_unregister(ocelot); 1310 ocelot_deinit_timestamp(ocelot); 1311 ocelot_deinit(ocelot); 1312 1313 if (felix->info->mdio_bus_free) 1314 felix->info->mdio_bus_free(ocelot); 1315 } 1316 1317 static int felix_hwtstamp_get(struct dsa_switch *ds, int port, 1318 struct ifreq *ifr) 1319 { 1320 struct ocelot *ocelot = ds->priv; 1321 1322 return ocelot_hwstamp_get(ocelot, port, ifr); 1323 } 1324 1325 static int felix_hwtstamp_set(struct dsa_switch *ds, int port, 1326 struct ifreq *ifr) 1327 { 1328 struct ocelot *ocelot = ds->priv; 1329 1330 return ocelot_hwstamp_set(ocelot, port, ifr); 1331 } 1332 1333 static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type) 1334 { 1335 struct felix *felix = ocelot_to_felix(ocelot); 1336 int err, grp = 0; 1337 1338 if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q) 1339 return false; 1340 1341 if (!felix->info->quirk_no_xtr_irq) 1342 return false; 1343 1344 if (ptp_type == PTP_CLASS_NONE) 1345 return false; 1346 1347 while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { 1348 struct sk_buff *skb; 1349 unsigned int type; 1350 1351 err = ocelot_xtr_poll_frame(ocelot, grp, &skb); 1352 if (err) 1353 goto out; 1354 1355 /* We trap to the CPU port module all PTP frames, but 1356 * felix_rxtstamp() only gets called for event frames. 1357 * So we need to avoid sending duplicate general 1358 * message frames by running a second BPF classifier 1359 * here and dropping those. 1360 */ 1361 __skb_push(skb, ETH_HLEN); 1362 1363 type = ptp_classify_raw(skb); 1364 1365 __skb_pull(skb, ETH_HLEN); 1366 1367 if (type == PTP_CLASS_NONE) { 1368 kfree_skb(skb); 1369 continue; 1370 } 1371 1372 netif_rx(skb); 1373 } 1374 1375 out: 1376 if (err < 0) 1377 ocelot_drain_cpu_queue(ocelot, 0); 1378 1379 return true; 1380 } 1381 1382 static bool felix_rxtstamp(struct dsa_switch *ds, int port, 1383 struct sk_buff *skb, unsigned int type) 1384 { 1385 u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo; 1386 struct skb_shared_hwtstamps *shhwtstamps; 1387 struct ocelot *ocelot = ds->priv; 1388 struct timespec64 ts; 1389 u32 tstamp_hi; 1390 u64 tstamp; 1391 1392 /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb 1393 * for RX timestamping. Then free it, and poll for its copy through 1394 * MMIO in the CPU port module, and inject that into the stack from 1395 * ocelot_xtr_poll(). 1396 */ 1397 if (felix_check_xtr_pkt(ocelot, type)) { 1398 kfree_skb(skb); 1399 return true; 1400 } 1401 1402 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); 1403 tstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 1404 1405 tstamp_hi = tstamp >> 32; 1406 if ((tstamp & 0xffffffff) < tstamp_lo) 1407 tstamp_hi--; 1408 1409 tstamp = ((u64)tstamp_hi << 32) | tstamp_lo; 1410 1411 shhwtstamps = skb_hwtstamps(skb); 1412 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 1413 shhwtstamps->hwtstamp = tstamp; 1414 return false; 1415 } 1416 1417 static void felix_txtstamp(struct dsa_switch *ds, int port, 1418 struct sk_buff *skb) 1419 { 1420 struct ocelot *ocelot = ds->priv; 1421 struct sk_buff *clone = NULL; 1422 1423 if (!ocelot->ptp) 1424 return; 1425 1426 if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) { 1427 dev_err_ratelimited(ds->dev, 1428 "port %d delivering skb without TX timestamp\n", 1429 port); 1430 return; 1431 } 1432 1433 if (clone) 1434 OCELOT_SKB_CB(skb)->clone = clone; 1435 } 1436 1437 static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1438 { 1439 struct ocelot *ocelot = ds->priv; 1440 1441 ocelot_port_set_maxlen(ocelot, port, new_mtu); 1442 1443 return 0; 1444 } 1445 1446 static int felix_get_max_mtu(struct dsa_switch *ds, int port) 1447 { 1448 struct ocelot *ocelot = ds->priv; 1449 1450 return ocelot_get_max_mtu(ocelot, port); 1451 } 1452 1453 static int felix_cls_flower_add(struct dsa_switch *ds, int port, 1454 struct flow_cls_offload *cls, bool ingress) 1455 { 1456 struct ocelot *ocelot = ds->priv; 1457 1458 return ocelot_cls_flower_replace(ocelot, port, cls, ingress); 1459 } 1460 1461 static int felix_cls_flower_del(struct dsa_switch *ds, int port, 1462 struct flow_cls_offload *cls, bool ingress) 1463 { 1464 struct ocelot *ocelot = ds->priv; 1465 1466 return ocelot_cls_flower_destroy(ocelot, port, cls, ingress); 1467 } 1468 1469 static int felix_cls_flower_stats(struct dsa_switch *ds, int port, 1470 struct flow_cls_offload *cls, bool ingress) 1471 { 1472 struct ocelot *ocelot = ds->priv; 1473 1474 return ocelot_cls_flower_stats(ocelot, port, cls, ingress); 1475 } 1476 1477 static int felix_port_policer_add(struct dsa_switch *ds, int port, 1478 struct dsa_mall_policer_tc_entry *policer) 1479 { 1480 struct ocelot *ocelot = ds->priv; 1481 struct ocelot_policer pol = { 1482 .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8, 1483 .burst = policer->burst, 1484 }; 1485 1486 return ocelot_port_policer_add(ocelot, port, &pol); 1487 } 1488 1489 static void felix_port_policer_del(struct dsa_switch *ds, int port) 1490 { 1491 struct ocelot *ocelot = ds->priv; 1492 1493 ocelot_port_policer_del(ocelot, port); 1494 } 1495 1496 static int felix_port_setup_tc(struct dsa_switch *ds, int port, 1497 enum tc_setup_type type, 1498 void *type_data) 1499 { 1500 struct ocelot *ocelot = ds->priv; 1501 struct felix *felix = ocelot_to_felix(ocelot); 1502 1503 if (felix->info->port_setup_tc) 1504 return felix->info->port_setup_tc(ds, port, type, type_data); 1505 else 1506 return -EOPNOTSUPP; 1507 } 1508 1509 static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index, 1510 u16 pool_index, 1511 struct devlink_sb_pool_info *pool_info) 1512 { 1513 struct ocelot *ocelot = ds->priv; 1514 1515 return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info); 1516 } 1517 1518 static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index, 1519 u16 pool_index, u32 size, 1520 enum devlink_sb_threshold_type threshold_type, 1521 struct netlink_ext_ack *extack) 1522 { 1523 struct ocelot *ocelot = ds->priv; 1524 1525 return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size, 1526 threshold_type, extack); 1527 } 1528 1529 static int felix_sb_port_pool_get(struct dsa_switch *ds, int port, 1530 unsigned int sb_index, u16 pool_index, 1531 u32 *p_threshold) 1532 { 1533 struct ocelot *ocelot = ds->priv; 1534 1535 return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index, 1536 p_threshold); 1537 } 1538 1539 static int felix_sb_port_pool_set(struct dsa_switch *ds, int port, 1540 unsigned int sb_index, u16 pool_index, 1541 u32 threshold, struct netlink_ext_ack *extack) 1542 { 1543 struct ocelot *ocelot = ds->priv; 1544 1545 return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index, 1546 threshold, extack); 1547 } 1548 1549 static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port, 1550 unsigned int sb_index, u16 tc_index, 1551 enum devlink_sb_pool_type pool_type, 1552 u16 *p_pool_index, u32 *p_threshold) 1553 { 1554 struct ocelot *ocelot = ds->priv; 1555 1556 return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index, 1557 pool_type, p_pool_index, 1558 p_threshold); 1559 } 1560 1561 static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port, 1562 unsigned int sb_index, u16 tc_index, 1563 enum devlink_sb_pool_type pool_type, 1564 u16 pool_index, u32 threshold, 1565 struct netlink_ext_ack *extack) 1566 { 1567 struct ocelot *ocelot = ds->priv; 1568 1569 return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index, 1570 pool_type, pool_index, threshold, 1571 extack); 1572 } 1573 1574 static int felix_sb_occ_snapshot(struct dsa_switch *ds, 1575 unsigned int sb_index) 1576 { 1577 struct ocelot *ocelot = ds->priv; 1578 1579 return ocelot_sb_occ_snapshot(ocelot, sb_index); 1580 } 1581 1582 static int felix_sb_occ_max_clear(struct dsa_switch *ds, 1583 unsigned int sb_index) 1584 { 1585 struct ocelot *ocelot = ds->priv; 1586 1587 return ocelot_sb_occ_max_clear(ocelot, sb_index); 1588 } 1589 1590 static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port, 1591 unsigned int sb_index, u16 pool_index, 1592 u32 *p_cur, u32 *p_max) 1593 { 1594 struct ocelot *ocelot = ds->priv; 1595 1596 return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index, 1597 p_cur, p_max); 1598 } 1599 1600 static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port, 1601 unsigned int sb_index, u16 tc_index, 1602 enum devlink_sb_pool_type pool_type, 1603 u32 *p_cur, u32 *p_max) 1604 { 1605 struct ocelot *ocelot = ds->priv; 1606 1607 return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index, 1608 pool_type, p_cur, p_max); 1609 } 1610 1611 static int felix_mrp_add(struct dsa_switch *ds, int port, 1612 const struct switchdev_obj_mrp *mrp) 1613 { 1614 struct ocelot *ocelot = ds->priv; 1615 1616 return ocelot_mrp_add(ocelot, port, mrp); 1617 } 1618 1619 static int felix_mrp_del(struct dsa_switch *ds, int port, 1620 const struct switchdev_obj_mrp *mrp) 1621 { 1622 struct ocelot *ocelot = ds->priv; 1623 1624 return ocelot_mrp_add(ocelot, port, mrp); 1625 } 1626 1627 static int 1628 felix_mrp_add_ring_role(struct dsa_switch *ds, int port, 1629 const struct switchdev_obj_ring_role_mrp *mrp) 1630 { 1631 struct ocelot *ocelot = ds->priv; 1632 1633 return ocelot_mrp_add_ring_role(ocelot, port, mrp); 1634 } 1635 1636 static int 1637 felix_mrp_del_ring_role(struct dsa_switch *ds, int port, 1638 const struct switchdev_obj_ring_role_mrp *mrp) 1639 { 1640 struct ocelot *ocelot = ds->priv; 1641 1642 return ocelot_mrp_del_ring_role(ocelot, port, mrp); 1643 } 1644 1645 const struct dsa_switch_ops felix_switch_ops = { 1646 .get_tag_protocol = felix_get_tag_protocol, 1647 .change_tag_protocol = felix_change_tag_protocol, 1648 .setup = felix_setup, 1649 .teardown = felix_teardown, 1650 .set_ageing_time = felix_set_ageing_time, 1651 .get_strings = felix_get_strings, 1652 .get_ethtool_stats = felix_get_ethtool_stats, 1653 .get_sset_count = felix_get_sset_count, 1654 .get_ts_info = felix_get_ts_info, 1655 .phylink_validate = felix_phylink_validate, 1656 .phylink_mac_config = felix_phylink_mac_config, 1657 .phylink_mac_link_down = felix_phylink_mac_link_down, 1658 .phylink_mac_link_up = felix_phylink_mac_link_up, 1659 .port_fdb_dump = felix_fdb_dump, 1660 .port_fdb_add = felix_fdb_add, 1661 .port_fdb_del = felix_fdb_del, 1662 .port_mdb_add = felix_mdb_add, 1663 .port_mdb_del = felix_mdb_del, 1664 .port_pre_bridge_flags = felix_pre_bridge_flags, 1665 .port_bridge_flags = felix_bridge_flags, 1666 .port_bridge_join = felix_bridge_join, 1667 .port_bridge_leave = felix_bridge_leave, 1668 .port_lag_join = felix_lag_join, 1669 .port_lag_leave = felix_lag_leave, 1670 .port_lag_change = felix_lag_change, 1671 .port_stp_state_set = felix_bridge_stp_state_set, 1672 .port_vlan_filtering = felix_vlan_filtering, 1673 .port_vlan_add = felix_vlan_add, 1674 .port_vlan_del = felix_vlan_del, 1675 .port_hwtstamp_get = felix_hwtstamp_get, 1676 .port_hwtstamp_set = felix_hwtstamp_set, 1677 .port_rxtstamp = felix_rxtstamp, 1678 .port_txtstamp = felix_txtstamp, 1679 .port_change_mtu = felix_change_mtu, 1680 .port_max_mtu = felix_get_max_mtu, 1681 .port_policer_add = felix_port_policer_add, 1682 .port_policer_del = felix_port_policer_del, 1683 .cls_flower_add = felix_cls_flower_add, 1684 .cls_flower_del = felix_cls_flower_del, 1685 .cls_flower_stats = felix_cls_flower_stats, 1686 .port_setup_tc = felix_port_setup_tc, 1687 .devlink_sb_pool_get = felix_sb_pool_get, 1688 .devlink_sb_pool_set = felix_sb_pool_set, 1689 .devlink_sb_port_pool_get = felix_sb_port_pool_get, 1690 .devlink_sb_port_pool_set = felix_sb_port_pool_set, 1691 .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get, 1692 .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set, 1693 .devlink_sb_occ_snapshot = felix_sb_occ_snapshot, 1694 .devlink_sb_occ_max_clear = felix_sb_occ_max_clear, 1695 .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get, 1696 .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get, 1697 .port_mrp_add = felix_mrp_add, 1698 .port_mrp_del = felix_mrp_del, 1699 .port_mrp_add_ring_role = felix_mrp_add_ring_role, 1700 .port_mrp_del_ring_role = felix_mrp_del_ring_role, 1701 .tag_8021q_vlan_add = felix_tag_8021q_vlan_add, 1702 .tag_8021q_vlan_del = felix_tag_8021q_vlan_del, 1703 }; 1704 1705 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) 1706 { 1707 struct felix *felix = ocelot_to_felix(ocelot); 1708 struct dsa_switch *ds = felix->ds; 1709 1710 if (!dsa_is_user_port(ds, port)) 1711 return NULL; 1712 1713 return dsa_to_port(ds, port)->slave; 1714 } 1715 1716 int felix_netdev_to_port(struct net_device *dev) 1717 { 1718 struct dsa_port *dp; 1719 1720 dp = dsa_port_from_netdev(dev); 1721 if (IS_ERR(dp)) 1722 return -EINVAL; 1723 1724 return dp->index; 1725 } 1726