1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2019-2021 NXP 3 * 4 * This is an umbrella module for all network switches that are 5 * register-compatible with Ocelot and that perform I/O to their host CPU 6 * through an NPI (Node Processor Interface) Ethernet port. 7 */ 8 #include <uapi/linux/if_bridge.h> 9 #include <soc/mscc/ocelot_vcap.h> 10 #include <soc/mscc/ocelot_qsys.h> 11 #include <soc/mscc/ocelot_sys.h> 12 #include <soc/mscc/ocelot_dev.h> 13 #include <soc/mscc/ocelot_ana.h> 14 #include <soc/mscc/ocelot_ptp.h> 15 #include <soc/mscc/ocelot.h> 16 #include <linux/dsa/8021q.h> 17 #include <linux/dsa/ocelot.h> 18 #include <linux/platform_device.h> 19 #include <linux/ptp_classify.h> 20 #include <linux/module.h> 21 #include <linux/of_net.h> 22 #include <linux/pci.h> 23 #include <linux/of.h> 24 #include <net/pkt_sched.h> 25 #include <net/dsa.h> 26 #include "felix.h" 27 28 /* Translate the DSA database API into the ocelot switch library API, 29 * which uses VID 0 for all ports that aren't part of a bridge, 30 * and expects the bridge_dev to be NULL in that case. 31 */ 32 static struct net_device *felix_classify_db(struct dsa_db db) 33 { 34 switch (db.type) { 35 case DSA_DB_PORT: 36 case DSA_DB_LAG: 37 return NULL; 38 case DSA_DB_BRIDGE: 39 return db.bridge.dev; 40 default: 41 return ERR_PTR(-EOPNOTSUPP); 42 } 43 } 44 45 /* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that 46 * the tagger can perform RX source port identification. 47 */ 48 static int felix_tag_8021q_vlan_add_rx(struct felix *felix, int port, u16 vid) 49 { 50 struct ocelot_vcap_filter *outer_tagging_rule; 51 struct ocelot *ocelot = &felix->ocelot; 52 struct dsa_switch *ds = felix->ds; 53 int key_length, upstream, err; 54 55 key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length; 56 upstream = dsa_upstream_port(ds, port); 57 58 outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), 59 GFP_KERNEL); 60 if (!outer_tagging_rule) 61 return -ENOMEM; 62 63 outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 64 outer_tagging_rule->prio = 1; 65 outer_tagging_rule->id.cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port); 66 outer_tagging_rule->id.tc_offload = false; 67 outer_tagging_rule->block_id = VCAP_ES0; 68 outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 69 outer_tagging_rule->lookup = 0; 70 outer_tagging_rule->ingress_port.value = port; 71 outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0); 72 outer_tagging_rule->egress_port.value = upstream; 73 outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0); 74 outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG; 75 outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD; 76 outer_tagging_rule->action.tag_a_vid_sel = 1; 77 outer_tagging_rule->action.vid_a_val = vid; 78 79 err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL); 80 if (err) 81 kfree(outer_tagging_rule); 82 83 return err; 84 } 85 86 static int felix_tag_8021q_vlan_del_rx(struct felix *felix, int port, u16 vid) 87 { 88 struct ocelot_vcap_filter *outer_tagging_rule; 89 struct ocelot_vcap_block *block_vcap_es0; 90 struct ocelot *ocelot = &felix->ocelot; 91 92 block_vcap_es0 = &ocelot->block[VCAP_ES0]; 93 94 outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, 95 port, false); 96 if (!outer_tagging_rule) 97 return -ENOENT; 98 99 return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); 100 } 101 102 /* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2 103 * rules for steering those tagged packets towards the correct destination port 104 */ 105 static int felix_tag_8021q_vlan_add_tx(struct felix *felix, int port, u16 vid) 106 { 107 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 108 struct ocelot *ocelot = &felix->ocelot; 109 struct dsa_switch *ds = felix->ds; 110 int upstream, err; 111 112 untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 113 if (!untagging_rule) 114 return -ENOMEM; 115 116 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 117 if (!redirect_rule) { 118 kfree(untagging_rule); 119 return -ENOMEM; 120 } 121 122 upstream = dsa_upstream_port(ds, port); 123 124 untagging_rule->key_type = OCELOT_VCAP_KEY_ANY; 125 untagging_rule->ingress_port_mask = BIT(upstream); 126 untagging_rule->vlan.vid.value = vid; 127 untagging_rule->vlan.vid.mask = VLAN_VID_MASK; 128 untagging_rule->prio = 1; 129 untagging_rule->id.cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); 130 untagging_rule->id.tc_offload = false; 131 untagging_rule->block_id = VCAP_IS1; 132 untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 133 untagging_rule->lookup = 0; 134 untagging_rule->action.vlan_pop_cnt_ena = true; 135 untagging_rule->action.vlan_pop_cnt = 1; 136 untagging_rule->action.pag_override_mask = 0xff; 137 untagging_rule->action.pag_val = port; 138 139 err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL); 140 if (err) { 141 kfree(untagging_rule); 142 kfree(redirect_rule); 143 return err; 144 } 145 146 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; 147 redirect_rule->ingress_port_mask = BIT(upstream); 148 redirect_rule->pag = port; 149 redirect_rule->prio = 1; 150 redirect_rule->id.cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); 151 redirect_rule->id.tc_offload = false; 152 redirect_rule->block_id = VCAP_IS2; 153 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; 154 redirect_rule->lookup = 0; 155 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 156 redirect_rule->action.port_mask = BIT(port); 157 158 err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); 159 if (err) { 160 ocelot_vcap_filter_del(ocelot, untagging_rule); 161 kfree(redirect_rule); 162 return err; 163 } 164 165 return 0; 166 } 167 168 static int felix_tag_8021q_vlan_del_tx(struct felix *felix, int port, u16 vid) 169 { 170 struct ocelot_vcap_filter *untagging_rule, *redirect_rule; 171 struct ocelot_vcap_block *block_vcap_is1; 172 struct ocelot_vcap_block *block_vcap_is2; 173 struct ocelot *ocelot = &felix->ocelot; 174 int err; 175 176 block_vcap_is1 = &ocelot->block[VCAP_IS1]; 177 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 178 179 untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, 180 port, false); 181 if (!untagging_rule) 182 return -ENOENT; 183 184 err = ocelot_vcap_filter_del(ocelot, untagging_rule); 185 if (err) 186 return err; 187 188 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, 189 port, false); 190 if (!redirect_rule) 191 return -ENOENT; 192 193 return ocelot_vcap_filter_del(ocelot, redirect_rule); 194 } 195 196 static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, 197 u16 flags) 198 { 199 struct ocelot *ocelot = ds->priv; 200 int err; 201 202 /* tag_8021q.c assumes we are implementing this via port VLAN 203 * membership, which we aren't. So we don't need to add any VCAP filter 204 * for the CPU port. 205 */ 206 if (!dsa_is_user_port(ds, port)) 207 return 0; 208 209 err = felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid); 210 if (err) 211 return err; 212 213 err = felix_tag_8021q_vlan_add_tx(ocelot_to_felix(ocelot), port, vid); 214 if (err) { 215 felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid); 216 return err; 217 } 218 219 return 0; 220 } 221 222 static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) 223 { 224 struct ocelot *ocelot = ds->priv; 225 int err; 226 227 if (!dsa_is_user_port(ds, port)) 228 return 0; 229 230 err = felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid); 231 if (err) 232 return err; 233 234 err = felix_tag_8021q_vlan_del_tx(ocelot_to_felix(ocelot), port, vid); 235 if (err) { 236 felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid); 237 return err; 238 } 239 240 return 0; 241 } 242 243 /* Alternatively to using the NPI functionality, that same hardware MAC 244 * connected internally to the enetc or fman DSA master can be configured to 245 * use the software-defined tag_8021q frame format. As far as the hardware is 246 * concerned, it thinks it is a "dumb switch" - the queues of the CPU port 247 * module are now disconnected from it, but can still be accessed through 248 * register-based MMIO. 249 */ 250 static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port) 251 { 252 mutex_lock(&ocelot->fwd_domain_lock); 253 254 ocelot_port_set_dsa_8021q_cpu(ocelot, port); 255 256 /* Overwrite PGID_CPU with the non-tagging port */ 257 ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU); 258 259 ocelot_apply_bridge_fwd_mask(ocelot, true); 260 261 mutex_unlock(&ocelot->fwd_domain_lock); 262 } 263 264 static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) 265 { 266 mutex_lock(&ocelot->fwd_domain_lock); 267 268 ocelot_port_unset_dsa_8021q_cpu(ocelot, port); 269 270 /* Restore PGID_CPU */ 271 ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID, 272 PGID_CPU); 273 274 ocelot_apply_bridge_fwd_mask(ocelot, true); 275 276 mutex_unlock(&ocelot->fwd_domain_lock); 277 } 278 279 static int felix_trap_get_cpu_port(struct dsa_switch *ds, 280 const struct ocelot_vcap_filter *trap) 281 { 282 struct dsa_port *dp; 283 int first_port; 284 285 if (WARN_ON(!trap->ingress_port_mask)) 286 return -1; 287 288 first_port = __ffs(trap->ingress_port_mask); 289 dp = dsa_to_port(ds, first_port); 290 291 return dp->cpu_dp->index; 292 } 293 294 /* On switches with no extraction IRQ wired, trapped packets need to be 295 * replicated over Ethernet as well, otherwise we'd get no notification of 296 * their arrival when using the ocelot-8021q tagging protocol. 297 */ 298 static int felix_update_trapping_destinations(struct dsa_switch *ds, 299 bool using_tag_8021q) 300 { 301 struct ocelot *ocelot = ds->priv; 302 struct felix *felix = ocelot_to_felix(ocelot); 303 struct ocelot_vcap_block *block_vcap_is2; 304 struct ocelot_vcap_filter *trap; 305 enum ocelot_mask_mode mask_mode; 306 unsigned long port_mask; 307 bool cpu_copy_ena; 308 int err; 309 310 if (!felix->info->quirk_no_xtr_irq) 311 return 0; 312 313 /* We are sure that "cpu" was found, otherwise 314 * dsa_tree_setup_default_cpu() would have failed earlier. 315 */ 316 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 317 318 /* Make sure all traps are set up for that destination */ 319 list_for_each_entry(trap, &block_vcap_is2->rules, list) { 320 if (!trap->is_trap) 321 continue; 322 323 /* Figure out the current trapping destination */ 324 if (using_tag_8021q) { 325 /* Redirect to the tag_8021q CPU port. If timestamps 326 * are necessary, also copy trapped packets to the CPU 327 * port module. 328 */ 329 mask_mode = OCELOT_MASK_MODE_REDIRECT; 330 port_mask = BIT(felix_trap_get_cpu_port(ds, trap)); 331 cpu_copy_ena = !!trap->take_ts; 332 } else { 333 /* Trap packets only to the CPU port module, which is 334 * redirected to the NPI port (the DSA CPU port) 335 */ 336 mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; 337 port_mask = 0; 338 cpu_copy_ena = true; 339 } 340 341 if (trap->action.mask_mode == mask_mode && 342 trap->action.port_mask == port_mask && 343 trap->action.cpu_copy_ena == cpu_copy_ena) 344 continue; 345 346 trap->action.mask_mode = mask_mode; 347 trap->action.port_mask = port_mask; 348 trap->action.cpu_copy_ena = cpu_copy_ena; 349 350 err = ocelot_vcap_filter_replace(ocelot, trap); 351 if (err) 352 return err; 353 } 354 355 return 0; 356 } 357 358 /* The CPU port module is connected to the Node Processor Interface (NPI). This 359 * is the mode through which frames can be injected from and extracted to an 360 * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU 361 * running Linux, and this forms a DSA setup together with the enetc or fman 362 * DSA master. 363 */ 364 static void felix_npi_port_init(struct ocelot *ocelot, int port) 365 { 366 ocelot->npi = port; 367 368 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | 369 QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port), 370 QSYS_EXT_CPU_CFG); 371 372 /* NPI port Injection/Extraction configuration */ 373 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 374 ocelot->npi_xtr_prefix); 375 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 376 ocelot->npi_inj_prefix); 377 378 /* Disable transmission of pause frames */ 379 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); 380 } 381 382 static void felix_npi_port_deinit(struct ocelot *ocelot, int port) 383 { 384 /* Restore hardware defaults */ 385 int unused_port = ocelot->num_phys_ports + 2; 386 387 ocelot->npi = -1; 388 389 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port), 390 QSYS_EXT_CPU_CFG); 391 392 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, 393 OCELOT_TAG_PREFIX_DISABLED); 394 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, 395 OCELOT_TAG_PREFIX_DISABLED); 396 397 /* Enable transmission of pause frames */ 398 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); 399 } 400 401 static int felix_tag_npi_setup(struct dsa_switch *ds) 402 { 403 struct dsa_port *dp, *first_cpu_dp = NULL; 404 struct ocelot *ocelot = ds->priv; 405 406 dsa_switch_for_each_user_port(dp, ds) { 407 if (first_cpu_dp && dp->cpu_dp != first_cpu_dp) { 408 dev_err(ds->dev, "Multiple NPI ports not supported\n"); 409 return -EINVAL; 410 } 411 412 first_cpu_dp = dp->cpu_dp; 413 } 414 415 if (!first_cpu_dp) 416 return -EINVAL; 417 418 felix_npi_port_init(ocelot, first_cpu_dp->index); 419 420 return 0; 421 } 422 423 static void felix_tag_npi_teardown(struct dsa_switch *ds) 424 { 425 struct ocelot *ocelot = ds->priv; 426 427 felix_npi_port_deinit(ocelot, ocelot->npi); 428 } 429 430 static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds) 431 { 432 struct ocelot *ocelot = ds->priv; 433 434 return BIT(ocelot->num_phys_ports); 435 } 436 437 static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = { 438 .setup = felix_tag_npi_setup, 439 .teardown = felix_tag_npi_teardown, 440 .get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask, 441 }; 442 443 static int felix_tag_8021q_setup(struct dsa_switch *ds) 444 { 445 struct ocelot *ocelot = ds->priv; 446 struct dsa_port *dp, *cpu_dp; 447 int err; 448 449 err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); 450 if (err) 451 return err; 452 453 dsa_switch_for_each_cpu_port(cpu_dp, ds) { 454 felix_8021q_cpu_port_init(ocelot, cpu_dp->index); 455 456 /* TODO we could support multiple CPU ports in tag_8021q mode */ 457 break; 458 } 459 460 dsa_switch_for_each_available_port(dp, ds) { 461 /* This overwrites ocelot_init(): 462 * Do not forward BPDU frames to the CPU port module, 463 * for 2 reasons: 464 * - When these packets are injected from the tag_8021q 465 * CPU port, we want them to go out, not loop back 466 * into the system. 467 * - STP traffic ingressing on a user port should go to 468 * the tag_8021q CPU port, not to the hardware CPU 469 * port module. 470 */ 471 ocelot_write_gix(ocelot, 472 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), 473 ANA_PORT_CPU_FWD_BPDU_CFG, dp->index); 474 } 475 476 /* The ownership of the CPU port module's queues might have just been 477 * transferred to the tag_8021q tagger from the NPI-based tagger. 478 * So there might still be all sorts of crap in the queues. On the 479 * other hand, the MMIO-based matching of PTP frames is very brittle, 480 * so we need to be careful that there are no extra frames to be 481 * dequeued over MMIO, since we would never know to discard them. 482 */ 483 ocelot_drain_cpu_queue(ocelot, 0); 484 485 return 0; 486 } 487 488 static void felix_tag_8021q_teardown(struct dsa_switch *ds) 489 { 490 struct ocelot *ocelot = ds->priv; 491 struct dsa_port *dp, *cpu_dp; 492 493 dsa_switch_for_each_available_port(dp, ds) { 494 /* Restore the logic from ocelot_init: 495 * do not forward BPDU frames to the front ports. 496 */ 497 ocelot_write_gix(ocelot, 498 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), 499 ANA_PORT_CPU_FWD_BPDU_CFG, 500 dp->index); 501 } 502 503 dsa_switch_for_each_cpu_port(cpu_dp, ds) { 504 felix_8021q_cpu_port_deinit(ocelot, cpu_dp->index); 505 506 /* TODO we could support multiple CPU ports in tag_8021q mode */ 507 break; 508 } 509 510 dsa_tag_8021q_unregister(ds); 511 } 512 513 static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds) 514 { 515 return dsa_cpu_ports(ds); 516 } 517 518 static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = { 519 .setup = felix_tag_8021q_setup, 520 .teardown = felix_tag_8021q_teardown, 521 .get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask, 522 }; 523 524 static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask, 525 bool uc, bool mc, bool bc) 526 { 527 struct ocelot *ocelot = ds->priv; 528 unsigned long val; 529 530 val = uc ? mask : 0; 531 ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC); 532 533 val = mc ? mask : 0; 534 ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC); 535 ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4); 536 ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6); 537 } 538 539 static void 540 felix_migrate_host_flood(struct dsa_switch *ds, 541 const struct felix_tag_proto_ops *proto_ops, 542 const struct felix_tag_proto_ops *old_proto_ops) 543 { 544 struct ocelot *ocelot = ds->priv; 545 struct felix *felix = ocelot_to_felix(ocelot); 546 unsigned long mask; 547 548 if (old_proto_ops) { 549 mask = old_proto_ops->get_host_fwd_mask(ds); 550 felix_set_host_flood(ds, mask, false, false, false); 551 } 552 553 mask = proto_ops->get_host_fwd_mask(ds); 554 felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask, 555 !!felix->host_flood_mc_mask, true); 556 } 557 558 static int felix_migrate_mdbs(struct dsa_switch *ds, 559 const struct felix_tag_proto_ops *proto_ops, 560 const struct felix_tag_proto_ops *old_proto_ops) 561 { 562 struct ocelot *ocelot = ds->priv; 563 unsigned long from, to; 564 565 if (!old_proto_ops) 566 return 0; 567 568 from = old_proto_ops->get_host_fwd_mask(ds); 569 to = proto_ops->get_host_fwd_mask(ds); 570 571 return ocelot_migrate_mdbs(ocelot, from, to); 572 } 573 574 /* Configure the shared hardware resources for a transition between 575 * @old_proto_ops and @proto_ops. 576 * Manual migration is needed because as far as DSA is concerned, no change of 577 * the CPU port is taking place here, just of the tagging protocol. 578 */ 579 static int 580 felix_tag_proto_setup_shared(struct dsa_switch *ds, 581 const struct felix_tag_proto_ops *proto_ops, 582 const struct felix_tag_proto_ops *old_proto_ops) 583 { 584 bool using_tag_8021q = (proto_ops == &felix_tag_8021q_proto_ops); 585 int err; 586 587 err = felix_migrate_mdbs(ds, proto_ops, old_proto_ops); 588 if (err) 589 return err; 590 591 felix_update_trapping_destinations(ds, using_tag_8021q); 592 593 felix_migrate_host_flood(ds, proto_ops, old_proto_ops); 594 595 return 0; 596 } 597 598 /* This always leaves the switch in a consistent state, because although the 599 * tag_8021q setup can fail, the NPI setup can't. So either the change is made, 600 * or the restoration is guaranteed to work. 601 */ 602 static int felix_change_tag_protocol(struct dsa_switch *ds, 603 enum dsa_tag_protocol proto) 604 { 605 const struct felix_tag_proto_ops *old_proto_ops, *proto_ops; 606 struct ocelot *ocelot = ds->priv; 607 struct felix *felix = ocelot_to_felix(ocelot); 608 int err; 609 610 switch (proto) { 611 case DSA_TAG_PROTO_SEVILLE: 612 case DSA_TAG_PROTO_OCELOT: 613 proto_ops = &felix_tag_npi_proto_ops; 614 break; 615 case DSA_TAG_PROTO_OCELOT_8021Q: 616 proto_ops = &felix_tag_8021q_proto_ops; 617 break; 618 default: 619 return -EPROTONOSUPPORT; 620 } 621 622 old_proto_ops = felix->tag_proto_ops; 623 624 err = proto_ops->setup(ds); 625 if (err) 626 goto setup_failed; 627 628 err = felix_tag_proto_setup_shared(ds, proto_ops, old_proto_ops); 629 if (err) 630 goto setup_shared_failed; 631 632 if (old_proto_ops) 633 old_proto_ops->teardown(ds); 634 635 felix->tag_proto_ops = proto_ops; 636 felix->tag_proto = proto; 637 638 return 0; 639 640 setup_shared_failed: 641 proto_ops->teardown(ds); 642 setup_failed: 643 return err; 644 } 645 646 static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, 647 int port, 648 enum dsa_tag_protocol mp) 649 { 650 struct ocelot *ocelot = ds->priv; 651 struct felix *felix = ocelot_to_felix(ocelot); 652 653 return felix->tag_proto; 654 } 655 656 static void felix_port_set_host_flood(struct dsa_switch *ds, int port, 657 bool uc, bool mc) 658 { 659 struct ocelot *ocelot = ds->priv; 660 struct felix *felix = ocelot_to_felix(ocelot); 661 unsigned long mask; 662 663 if (uc) 664 felix->host_flood_uc_mask |= BIT(port); 665 else 666 felix->host_flood_uc_mask &= ~BIT(port); 667 668 if (mc) 669 felix->host_flood_mc_mask |= BIT(port); 670 else 671 felix->host_flood_mc_mask &= ~BIT(port); 672 673 mask = felix->tag_proto_ops->get_host_fwd_mask(ds); 674 felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask, 675 !!felix->host_flood_mc_mask, true); 676 } 677 678 static int felix_set_ageing_time(struct dsa_switch *ds, 679 unsigned int ageing_time) 680 { 681 struct ocelot *ocelot = ds->priv; 682 683 ocelot_set_ageing_time(ocelot, ageing_time); 684 685 return 0; 686 } 687 688 static void felix_port_fast_age(struct dsa_switch *ds, int port) 689 { 690 struct ocelot *ocelot = ds->priv; 691 int err; 692 693 err = ocelot_mact_flush(ocelot, port); 694 if (err) 695 dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n", 696 port, ERR_PTR(err)); 697 } 698 699 static int felix_fdb_dump(struct dsa_switch *ds, int port, 700 dsa_fdb_dump_cb_t *cb, void *data) 701 { 702 struct ocelot *ocelot = ds->priv; 703 704 return ocelot_fdb_dump(ocelot, port, cb, data); 705 } 706 707 static int felix_fdb_add(struct dsa_switch *ds, int port, 708 const unsigned char *addr, u16 vid, 709 struct dsa_db db) 710 { 711 struct net_device *bridge_dev = felix_classify_db(db); 712 struct dsa_port *dp = dsa_to_port(ds, port); 713 struct ocelot *ocelot = ds->priv; 714 715 if (IS_ERR(bridge_dev)) 716 return PTR_ERR(bridge_dev); 717 718 if (dsa_port_is_cpu(dp) && !bridge_dev && 719 dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) 720 return 0; 721 722 if (dsa_port_is_cpu(dp)) 723 port = PGID_CPU; 724 725 return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev); 726 } 727 728 static int felix_fdb_del(struct dsa_switch *ds, int port, 729 const unsigned char *addr, u16 vid, 730 struct dsa_db db) 731 { 732 struct net_device *bridge_dev = felix_classify_db(db); 733 struct dsa_port *dp = dsa_to_port(ds, port); 734 struct ocelot *ocelot = ds->priv; 735 736 if (IS_ERR(bridge_dev)) 737 return PTR_ERR(bridge_dev); 738 739 if (dsa_port_is_cpu(dp) && !bridge_dev && 740 dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) 741 return 0; 742 743 if (dsa_port_is_cpu(dp)) 744 port = PGID_CPU; 745 746 return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev); 747 } 748 749 static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag, 750 const unsigned char *addr, u16 vid, 751 struct dsa_db db) 752 { 753 struct net_device *bridge_dev = felix_classify_db(db); 754 struct ocelot *ocelot = ds->priv; 755 756 if (IS_ERR(bridge_dev)) 757 return PTR_ERR(bridge_dev); 758 759 return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev); 760 } 761 762 static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag, 763 const unsigned char *addr, u16 vid, 764 struct dsa_db db) 765 { 766 struct net_device *bridge_dev = felix_classify_db(db); 767 struct ocelot *ocelot = ds->priv; 768 769 if (IS_ERR(bridge_dev)) 770 return PTR_ERR(bridge_dev); 771 772 return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev); 773 } 774 775 static int felix_mdb_add(struct dsa_switch *ds, int port, 776 const struct switchdev_obj_port_mdb *mdb, 777 struct dsa_db db) 778 { 779 struct net_device *bridge_dev = felix_classify_db(db); 780 struct ocelot *ocelot = ds->priv; 781 782 if (IS_ERR(bridge_dev)) 783 return PTR_ERR(bridge_dev); 784 785 if (dsa_is_cpu_port(ds, port) && !bridge_dev && 786 dsa_mdb_present_in_other_db(ds, port, mdb, db)) 787 return 0; 788 789 if (port == ocelot->npi) 790 port = ocelot->num_phys_ports; 791 792 return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev); 793 } 794 795 static int felix_mdb_del(struct dsa_switch *ds, int port, 796 const struct switchdev_obj_port_mdb *mdb, 797 struct dsa_db db) 798 { 799 struct net_device *bridge_dev = felix_classify_db(db); 800 struct ocelot *ocelot = ds->priv; 801 802 if (IS_ERR(bridge_dev)) 803 return PTR_ERR(bridge_dev); 804 805 if (dsa_is_cpu_port(ds, port) && !bridge_dev && 806 dsa_mdb_present_in_other_db(ds, port, mdb, db)) 807 return 0; 808 809 if (port == ocelot->npi) 810 port = ocelot->num_phys_ports; 811 812 return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev); 813 } 814 815 static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, 816 u8 state) 817 { 818 struct ocelot *ocelot = ds->priv; 819 820 return ocelot_bridge_stp_state_set(ocelot, port, state); 821 } 822 823 static int felix_pre_bridge_flags(struct dsa_switch *ds, int port, 824 struct switchdev_brport_flags val, 825 struct netlink_ext_ack *extack) 826 { 827 struct ocelot *ocelot = ds->priv; 828 829 return ocelot_port_pre_bridge_flags(ocelot, port, val); 830 } 831 832 static int felix_bridge_flags(struct dsa_switch *ds, int port, 833 struct switchdev_brport_flags val, 834 struct netlink_ext_ack *extack) 835 { 836 struct ocelot *ocelot = ds->priv; 837 838 if (port == ocelot->npi) 839 port = ocelot->num_phys_ports; 840 841 ocelot_port_bridge_flags(ocelot, port, val); 842 843 return 0; 844 } 845 846 static int felix_bridge_join(struct dsa_switch *ds, int port, 847 struct dsa_bridge bridge, bool *tx_fwd_offload, 848 struct netlink_ext_ack *extack) 849 { 850 struct ocelot *ocelot = ds->priv; 851 852 return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num, 853 extack); 854 } 855 856 static void felix_bridge_leave(struct dsa_switch *ds, int port, 857 struct dsa_bridge bridge) 858 { 859 struct ocelot *ocelot = ds->priv; 860 861 ocelot_port_bridge_leave(ocelot, port, bridge.dev); 862 } 863 864 static int felix_lag_join(struct dsa_switch *ds, int port, 865 struct dsa_lag lag, 866 struct netdev_lag_upper_info *info) 867 { 868 struct ocelot *ocelot = ds->priv; 869 870 return ocelot_port_lag_join(ocelot, port, lag.dev, info); 871 } 872 873 static int felix_lag_leave(struct dsa_switch *ds, int port, 874 struct dsa_lag lag) 875 { 876 struct ocelot *ocelot = ds->priv; 877 878 ocelot_port_lag_leave(ocelot, port, lag.dev); 879 880 return 0; 881 } 882 883 static int felix_lag_change(struct dsa_switch *ds, int port) 884 { 885 struct dsa_port *dp = dsa_to_port(ds, port); 886 struct ocelot *ocelot = ds->priv; 887 888 ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled); 889 890 return 0; 891 } 892 893 static int felix_vlan_prepare(struct dsa_switch *ds, int port, 894 const struct switchdev_obj_port_vlan *vlan, 895 struct netlink_ext_ack *extack) 896 { 897 struct ocelot *ocelot = ds->priv; 898 u16 flags = vlan->flags; 899 900 /* Ocelot switches copy frames as-is to the CPU, so the flags: 901 * egress-untagged or not, pvid or not, make no difference. This 902 * behavior is already better than what DSA just tries to approximate 903 * when it installs the VLAN with the same flags on the CPU port. 904 * Just accept any configuration, and don't let ocelot deny installing 905 * multiple native VLANs on the NPI port, because the switch doesn't 906 * look at the port tag settings towards the NPI interface anyway. 907 */ 908 if (port == ocelot->npi) 909 return 0; 910 911 return ocelot_vlan_prepare(ocelot, port, vlan->vid, 912 flags & BRIDGE_VLAN_INFO_PVID, 913 flags & BRIDGE_VLAN_INFO_UNTAGGED, 914 extack); 915 } 916 917 static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, 918 struct netlink_ext_ack *extack) 919 { 920 struct ocelot *ocelot = ds->priv; 921 922 return ocelot_port_vlan_filtering(ocelot, port, enabled, extack); 923 } 924 925 static int felix_vlan_add(struct dsa_switch *ds, int port, 926 const struct switchdev_obj_port_vlan *vlan, 927 struct netlink_ext_ack *extack) 928 { 929 struct ocelot *ocelot = ds->priv; 930 u16 flags = vlan->flags; 931 int err; 932 933 err = felix_vlan_prepare(ds, port, vlan, extack); 934 if (err) 935 return err; 936 937 return ocelot_vlan_add(ocelot, port, vlan->vid, 938 flags & BRIDGE_VLAN_INFO_PVID, 939 flags & BRIDGE_VLAN_INFO_UNTAGGED); 940 } 941 942 static int felix_vlan_del(struct dsa_switch *ds, int port, 943 const struct switchdev_obj_port_vlan *vlan) 944 { 945 struct ocelot *ocelot = ds->priv; 946 947 return ocelot_vlan_del(ocelot, port, vlan->vid); 948 } 949 950 static void felix_phylink_get_caps(struct dsa_switch *ds, int port, 951 struct phylink_config *config) 952 { 953 struct ocelot *ocelot = ds->priv; 954 955 /* This driver does not make use of the speed, duplex, pause or the 956 * advertisement in its mac_config, so it is safe to mark this driver 957 * as non-legacy. 958 */ 959 config->legacy_pre_march2020 = false; 960 961 __set_bit(ocelot->ports[port]->phy_mode, 962 config->supported_interfaces); 963 } 964 965 static void felix_phylink_validate(struct dsa_switch *ds, int port, 966 unsigned long *supported, 967 struct phylink_link_state *state) 968 { 969 struct ocelot *ocelot = ds->priv; 970 struct felix *felix = ocelot_to_felix(ocelot); 971 972 if (felix->info->phylink_validate) 973 felix->info->phylink_validate(ocelot, port, supported, state); 974 } 975 976 static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds, 977 int port, 978 phy_interface_t iface) 979 { 980 struct ocelot *ocelot = ds->priv; 981 struct felix *felix = ocelot_to_felix(ocelot); 982 struct phylink_pcs *pcs = NULL; 983 984 if (felix->pcs && felix->pcs[port]) 985 pcs = felix->pcs[port]; 986 987 return pcs; 988 } 989 990 static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, 991 unsigned int link_an_mode, 992 phy_interface_t interface) 993 { 994 struct ocelot *ocelot = ds->priv; 995 996 ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, 997 FELIX_MAC_QUIRKS); 998 } 999 1000 static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, 1001 unsigned int link_an_mode, 1002 phy_interface_t interface, 1003 struct phy_device *phydev, 1004 int speed, int duplex, 1005 bool tx_pause, bool rx_pause) 1006 { 1007 struct ocelot *ocelot = ds->priv; 1008 struct felix *felix = ocelot_to_felix(ocelot); 1009 1010 ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, 1011 interface, speed, duplex, tx_pause, rx_pause, 1012 FELIX_MAC_QUIRKS); 1013 1014 if (felix->info->port_sched_speed_set) 1015 felix->info->port_sched_speed_set(ocelot, port, speed); 1016 } 1017 1018 static void felix_port_qos_map_init(struct ocelot *ocelot, int port) 1019 { 1020 int i; 1021 1022 ocelot_rmw_gix(ocelot, 1023 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 1024 ANA_PORT_QOS_CFG_QOS_PCP_ENA, 1025 ANA_PORT_QOS_CFG, 1026 port); 1027 1028 for (i = 0; i < OCELOT_NUM_TC * 2; i++) { 1029 ocelot_rmw_ix(ocelot, 1030 (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) | 1031 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i), 1032 ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL | 1033 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M, 1034 ANA_PORT_PCP_DEI_MAP, 1035 port, i); 1036 } 1037 } 1038 1039 static void felix_get_strings(struct dsa_switch *ds, int port, 1040 u32 stringset, u8 *data) 1041 { 1042 struct ocelot *ocelot = ds->priv; 1043 1044 return ocelot_get_strings(ocelot, port, stringset, data); 1045 } 1046 1047 static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) 1048 { 1049 struct ocelot *ocelot = ds->priv; 1050 1051 ocelot_get_ethtool_stats(ocelot, port, data); 1052 } 1053 1054 static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset) 1055 { 1056 struct ocelot *ocelot = ds->priv; 1057 1058 return ocelot_get_sset_count(ocelot, port, sset); 1059 } 1060 1061 static int felix_get_ts_info(struct dsa_switch *ds, int port, 1062 struct ethtool_ts_info *info) 1063 { 1064 struct ocelot *ocelot = ds->priv; 1065 1066 return ocelot_get_ts_info(ocelot, port, info); 1067 } 1068 1069 static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = { 1070 [PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL, 1071 [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII, 1072 [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII, 1073 [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII, 1074 [PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX, 1075 [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX, 1076 }; 1077 1078 static int felix_validate_phy_mode(struct felix *felix, int port, 1079 phy_interface_t phy_mode) 1080 { 1081 u32 modes = felix->info->port_modes[port]; 1082 1083 if (felix_phy_match_table[phy_mode] & modes) 1084 return 0; 1085 return -EOPNOTSUPP; 1086 } 1087 1088 static int felix_parse_ports_node(struct felix *felix, 1089 struct device_node *ports_node, 1090 phy_interface_t *port_phy_modes) 1091 { 1092 struct device *dev = felix->ocelot.dev; 1093 struct device_node *child; 1094 1095 for_each_available_child_of_node(ports_node, child) { 1096 phy_interface_t phy_mode; 1097 u32 port; 1098 int err; 1099 1100 /* Get switch port number from DT */ 1101 if (of_property_read_u32(child, "reg", &port) < 0) { 1102 dev_err(dev, "Port number not defined in device tree " 1103 "(property \"reg\")\n"); 1104 of_node_put(child); 1105 return -ENODEV; 1106 } 1107 1108 /* Get PHY mode from DT */ 1109 err = of_get_phy_mode(child, &phy_mode); 1110 if (err) { 1111 dev_err(dev, "Failed to read phy-mode or " 1112 "phy-interface-type property for port %d\n", 1113 port); 1114 of_node_put(child); 1115 return -ENODEV; 1116 } 1117 1118 err = felix_validate_phy_mode(felix, port, phy_mode); 1119 if (err < 0) { 1120 dev_err(dev, "Unsupported PHY mode %s on port %d\n", 1121 phy_modes(phy_mode), port); 1122 of_node_put(child); 1123 return err; 1124 } 1125 1126 port_phy_modes[port] = phy_mode; 1127 } 1128 1129 return 0; 1130 } 1131 1132 static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) 1133 { 1134 struct device *dev = felix->ocelot.dev; 1135 struct device_node *switch_node; 1136 struct device_node *ports_node; 1137 int err; 1138 1139 switch_node = dev->of_node; 1140 1141 ports_node = of_get_child_by_name(switch_node, "ports"); 1142 if (!ports_node) 1143 ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); 1144 if (!ports_node) { 1145 dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n"); 1146 return -ENODEV; 1147 } 1148 1149 err = felix_parse_ports_node(felix, ports_node, port_phy_modes); 1150 of_node_put(ports_node); 1151 1152 return err; 1153 } 1154 1155 static int felix_init_structs(struct felix *felix, int num_phys_ports) 1156 { 1157 struct ocelot *ocelot = &felix->ocelot; 1158 phy_interface_t *port_phy_modes; 1159 struct resource res; 1160 int port, i, err; 1161 1162 ocelot->num_phys_ports = num_phys_ports; 1163 ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports, 1164 sizeof(struct ocelot_port *), GFP_KERNEL); 1165 if (!ocelot->ports) 1166 return -ENOMEM; 1167 1168 ocelot->map = felix->info->map; 1169 ocelot->stats_layout = felix->info->stats_layout; 1170 ocelot->num_mact_rows = felix->info->num_mact_rows; 1171 ocelot->vcap = felix->info->vcap; 1172 ocelot->vcap_pol.base = felix->info->vcap_pol_base; 1173 ocelot->vcap_pol.max = felix->info->vcap_pol_max; 1174 ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2; 1175 ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2; 1176 ocelot->ops = felix->info->ops; 1177 ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT; 1178 ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT; 1179 ocelot->devlink = felix->ds->devlink; 1180 1181 port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t), 1182 GFP_KERNEL); 1183 if (!port_phy_modes) 1184 return -ENOMEM; 1185 1186 err = felix_parse_dt(felix, port_phy_modes); 1187 if (err) { 1188 kfree(port_phy_modes); 1189 return err; 1190 } 1191 1192 for (i = 0; i < TARGET_MAX; i++) { 1193 struct regmap *target; 1194 1195 if (!felix->info->target_io_res[i].name) 1196 continue; 1197 1198 memcpy(&res, &felix->info->target_io_res[i], sizeof(res)); 1199 res.flags = IORESOURCE_MEM; 1200 res.start += felix->switch_base; 1201 res.end += felix->switch_base; 1202 1203 target = felix->info->init_regmap(ocelot, &res); 1204 if (IS_ERR(target)) { 1205 dev_err(ocelot->dev, 1206 "Failed to map device memory space\n"); 1207 kfree(port_phy_modes); 1208 return PTR_ERR(target); 1209 } 1210 1211 ocelot->targets[i] = target; 1212 } 1213 1214 err = ocelot_regfields_init(ocelot, felix->info->regfields); 1215 if (err) { 1216 dev_err(ocelot->dev, "failed to init reg fields map\n"); 1217 kfree(port_phy_modes); 1218 return err; 1219 } 1220 1221 for (port = 0; port < num_phys_ports; port++) { 1222 struct ocelot_port *ocelot_port; 1223 struct regmap *target; 1224 1225 ocelot_port = devm_kzalloc(ocelot->dev, 1226 sizeof(struct ocelot_port), 1227 GFP_KERNEL); 1228 if (!ocelot_port) { 1229 dev_err(ocelot->dev, 1230 "failed to allocate port memory\n"); 1231 kfree(port_phy_modes); 1232 return -ENOMEM; 1233 } 1234 1235 memcpy(&res, &felix->info->port_io_res[port], sizeof(res)); 1236 res.flags = IORESOURCE_MEM; 1237 res.start += felix->switch_base; 1238 res.end += felix->switch_base; 1239 1240 target = felix->info->init_regmap(ocelot, &res); 1241 if (IS_ERR(target)) { 1242 dev_err(ocelot->dev, 1243 "Failed to map memory space for port %d\n", 1244 port); 1245 kfree(port_phy_modes); 1246 return PTR_ERR(target); 1247 } 1248 1249 ocelot_port->phy_mode = port_phy_modes[port]; 1250 ocelot_port->ocelot = ocelot; 1251 ocelot_port->target = target; 1252 ocelot_port->index = port; 1253 ocelot->ports[port] = ocelot_port; 1254 } 1255 1256 kfree(port_phy_modes); 1257 1258 if (felix->info->mdio_bus_alloc) { 1259 err = felix->info->mdio_bus_alloc(ocelot); 1260 if (err < 0) 1261 return err; 1262 } 1263 1264 return 0; 1265 } 1266 1267 static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port, 1268 struct sk_buff *skb) 1269 { 1270 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1271 struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; 1272 struct sk_buff *skb_match = NULL, *skb_tmp; 1273 unsigned long flags; 1274 1275 if (!clone) 1276 return; 1277 1278 spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags); 1279 1280 skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { 1281 if (skb != clone) 1282 continue; 1283 __skb_unlink(skb, &ocelot_port->tx_skbs); 1284 skb_match = skb; 1285 break; 1286 } 1287 1288 spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags); 1289 1290 WARN_ONCE(!skb_match, 1291 "Could not find skb clone in TX timestamping list\n"); 1292 } 1293 1294 #define work_to_xmit_work(w) \ 1295 container_of((w), struct felix_deferred_xmit_work, work) 1296 1297 static void felix_port_deferred_xmit(struct kthread_work *work) 1298 { 1299 struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work); 1300 struct dsa_switch *ds = xmit_work->dp->ds; 1301 struct sk_buff *skb = xmit_work->skb; 1302 u32 rew_op = ocelot_ptp_rew_op(skb); 1303 struct ocelot *ocelot = ds->priv; 1304 int port = xmit_work->dp->index; 1305 int retries = 10; 1306 1307 do { 1308 if (ocelot_can_inject(ocelot, 0)) 1309 break; 1310 1311 cpu_relax(); 1312 } while (--retries); 1313 1314 if (!retries) { 1315 dev_err(ocelot->dev, "port %d failed to inject skb\n", 1316 port); 1317 ocelot_port_purge_txtstamp_skb(ocelot, port, skb); 1318 kfree_skb(skb); 1319 return; 1320 } 1321 1322 ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); 1323 1324 consume_skb(skb); 1325 kfree(xmit_work); 1326 } 1327 1328 static int felix_connect_tag_protocol(struct dsa_switch *ds, 1329 enum dsa_tag_protocol proto) 1330 { 1331 struct ocelot_8021q_tagger_data *tagger_data; 1332 1333 switch (proto) { 1334 case DSA_TAG_PROTO_OCELOT_8021Q: 1335 tagger_data = ocelot_8021q_tagger_data(ds); 1336 tagger_data->xmit_work_fn = felix_port_deferred_xmit; 1337 return 0; 1338 case DSA_TAG_PROTO_OCELOT: 1339 case DSA_TAG_PROTO_SEVILLE: 1340 return 0; 1341 default: 1342 return -EPROTONOSUPPORT; 1343 } 1344 } 1345 1346 /* Hardware initialization done here so that we can allocate structures with 1347 * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing 1348 * us to allocate structures twice (leak memory) and map PCI memory twice 1349 * (which will not work). 1350 */ 1351 static int felix_setup(struct dsa_switch *ds) 1352 { 1353 struct ocelot *ocelot = ds->priv; 1354 struct felix *felix = ocelot_to_felix(ocelot); 1355 struct dsa_port *dp; 1356 int err; 1357 1358 err = felix_init_structs(felix, ds->num_ports); 1359 if (err) 1360 return err; 1361 1362 err = ocelot_init(ocelot); 1363 if (err) 1364 goto out_mdiobus_free; 1365 1366 if (ocelot->ptp) { 1367 err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps); 1368 if (err) { 1369 dev_err(ocelot->dev, 1370 "Timestamp initialization failed\n"); 1371 ocelot->ptp = 0; 1372 } 1373 } 1374 1375 dsa_switch_for_each_available_port(dp, ds) { 1376 ocelot_init_port(ocelot, dp->index); 1377 1378 /* Set the default QoS Classification based on PCP and DEI 1379 * bits of vlan tag. 1380 */ 1381 felix_port_qos_map_init(ocelot, dp->index); 1382 } 1383 1384 err = ocelot_devlink_sb_register(ocelot); 1385 if (err) 1386 goto out_deinit_ports; 1387 1388 /* The initial tag protocol is NPI which won't fail during initial 1389 * setup, there's no real point in checking for errors. 1390 */ 1391 felix_change_tag_protocol(ds, felix->tag_proto); 1392 1393 ds->mtu_enforcement_ingress = true; 1394 ds->assisted_learning_on_cpu_port = true; 1395 ds->fdb_isolation = true; 1396 ds->max_num_bridges = ds->num_ports; 1397 1398 return 0; 1399 1400 out_deinit_ports: 1401 dsa_switch_for_each_available_port(dp, ds) 1402 ocelot_deinit_port(ocelot, dp->index); 1403 1404 ocelot_deinit_timestamp(ocelot); 1405 ocelot_deinit(ocelot); 1406 1407 out_mdiobus_free: 1408 if (felix->info->mdio_bus_free) 1409 felix->info->mdio_bus_free(ocelot); 1410 1411 return err; 1412 } 1413 1414 static void felix_teardown(struct dsa_switch *ds) 1415 { 1416 struct ocelot *ocelot = ds->priv; 1417 struct felix *felix = ocelot_to_felix(ocelot); 1418 struct dsa_port *dp; 1419 1420 if (felix->tag_proto_ops) 1421 felix->tag_proto_ops->teardown(ds); 1422 1423 dsa_switch_for_each_available_port(dp, ds) 1424 ocelot_deinit_port(ocelot, dp->index); 1425 1426 ocelot_devlink_sb_unregister(ocelot); 1427 ocelot_deinit_timestamp(ocelot); 1428 ocelot_deinit(ocelot); 1429 1430 if (felix->info->mdio_bus_free) 1431 felix->info->mdio_bus_free(ocelot); 1432 } 1433 1434 static int felix_hwtstamp_get(struct dsa_switch *ds, int port, 1435 struct ifreq *ifr) 1436 { 1437 struct ocelot *ocelot = ds->priv; 1438 1439 return ocelot_hwstamp_get(ocelot, port, ifr); 1440 } 1441 1442 static int felix_hwtstamp_set(struct dsa_switch *ds, int port, 1443 struct ifreq *ifr) 1444 { 1445 struct ocelot *ocelot = ds->priv; 1446 struct felix *felix = ocelot_to_felix(ocelot); 1447 bool using_tag_8021q; 1448 int err; 1449 1450 err = ocelot_hwstamp_set(ocelot, port, ifr); 1451 if (err) 1452 return err; 1453 1454 using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; 1455 1456 return felix_update_trapping_destinations(ds, using_tag_8021q); 1457 } 1458 1459 static bool felix_check_xtr_pkt(struct ocelot *ocelot) 1460 { 1461 struct felix *felix = ocelot_to_felix(ocelot); 1462 int err = 0, grp = 0; 1463 1464 if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q) 1465 return false; 1466 1467 if (!felix->info->quirk_no_xtr_irq) 1468 return false; 1469 1470 while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { 1471 struct sk_buff *skb; 1472 unsigned int type; 1473 1474 err = ocelot_xtr_poll_frame(ocelot, grp, &skb); 1475 if (err) 1476 goto out; 1477 1478 /* We trap to the CPU port module all PTP frames, but 1479 * felix_rxtstamp() only gets called for event frames. 1480 * So we need to avoid sending duplicate general 1481 * message frames by running a second BPF classifier 1482 * here and dropping those. 1483 */ 1484 __skb_push(skb, ETH_HLEN); 1485 1486 type = ptp_classify_raw(skb); 1487 1488 __skb_pull(skb, ETH_HLEN); 1489 1490 if (type == PTP_CLASS_NONE) { 1491 kfree_skb(skb); 1492 continue; 1493 } 1494 1495 netif_rx(skb); 1496 } 1497 1498 out: 1499 if (err < 0) { 1500 dev_err_ratelimited(ocelot->dev, 1501 "Error during packet extraction: %pe\n", 1502 ERR_PTR(err)); 1503 ocelot_drain_cpu_queue(ocelot, 0); 1504 } 1505 1506 return true; 1507 } 1508 1509 static bool felix_rxtstamp(struct dsa_switch *ds, int port, 1510 struct sk_buff *skb, unsigned int type) 1511 { 1512 u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo; 1513 struct skb_shared_hwtstamps *shhwtstamps; 1514 struct ocelot *ocelot = ds->priv; 1515 struct timespec64 ts; 1516 u32 tstamp_hi; 1517 u64 tstamp; 1518 1519 /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb 1520 * for RX timestamping. Then free it, and poll for its copy through 1521 * MMIO in the CPU port module, and inject that into the stack from 1522 * ocelot_xtr_poll(). 1523 */ 1524 if (felix_check_xtr_pkt(ocelot)) { 1525 kfree_skb(skb); 1526 return true; 1527 } 1528 1529 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); 1530 tstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 1531 1532 tstamp_hi = tstamp >> 32; 1533 if ((tstamp & 0xffffffff) < tstamp_lo) 1534 tstamp_hi--; 1535 1536 tstamp = ((u64)tstamp_hi << 32) | tstamp_lo; 1537 1538 shhwtstamps = skb_hwtstamps(skb); 1539 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 1540 shhwtstamps->hwtstamp = tstamp; 1541 return false; 1542 } 1543 1544 static void felix_txtstamp(struct dsa_switch *ds, int port, 1545 struct sk_buff *skb) 1546 { 1547 struct ocelot *ocelot = ds->priv; 1548 struct sk_buff *clone = NULL; 1549 1550 if (!ocelot->ptp) 1551 return; 1552 1553 if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) { 1554 dev_err_ratelimited(ds->dev, 1555 "port %d delivering skb without TX timestamp\n", 1556 port); 1557 return; 1558 } 1559 1560 if (clone) 1561 OCELOT_SKB_CB(skb)->clone = clone; 1562 } 1563 1564 static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 1565 { 1566 struct ocelot *ocelot = ds->priv; 1567 1568 ocelot_port_set_maxlen(ocelot, port, new_mtu); 1569 1570 return 0; 1571 } 1572 1573 static int felix_get_max_mtu(struct dsa_switch *ds, int port) 1574 { 1575 struct ocelot *ocelot = ds->priv; 1576 1577 return ocelot_get_max_mtu(ocelot, port); 1578 } 1579 1580 static int felix_cls_flower_add(struct dsa_switch *ds, int port, 1581 struct flow_cls_offload *cls, bool ingress) 1582 { 1583 struct ocelot *ocelot = ds->priv; 1584 struct felix *felix = ocelot_to_felix(ocelot); 1585 bool using_tag_8021q; 1586 int err; 1587 1588 err = ocelot_cls_flower_replace(ocelot, port, cls, ingress); 1589 if (err) 1590 return err; 1591 1592 using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; 1593 1594 return felix_update_trapping_destinations(ds, using_tag_8021q); 1595 } 1596 1597 static int felix_cls_flower_del(struct dsa_switch *ds, int port, 1598 struct flow_cls_offload *cls, bool ingress) 1599 { 1600 struct ocelot *ocelot = ds->priv; 1601 1602 return ocelot_cls_flower_destroy(ocelot, port, cls, ingress); 1603 } 1604 1605 static int felix_cls_flower_stats(struct dsa_switch *ds, int port, 1606 struct flow_cls_offload *cls, bool ingress) 1607 { 1608 struct ocelot *ocelot = ds->priv; 1609 1610 return ocelot_cls_flower_stats(ocelot, port, cls, ingress); 1611 } 1612 1613 static int felix_port_policer_add(struct dsa_switch *ds, int port, 1614 struct dsa_mall_policer_tc_entry *policer) 1615 { 1616 struct ocelot *ocelot = ds->priv; 1617 struct ocelot_policer pol = { 1618 .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8, 1619 .burst = policer->burst, 1620 }; 1621 1622 return ocelot_port_policer_add(ocelot, port, &pol); 1623 } 1624 1625 static void felix_port_policer_del(struct dsa_switch *ds, int port) 1626 { 1627 struct ocelot *ocelot = ds->priv; 1628 1629 ocelot_port_policer_del(ocelot, port); 1630 } 1631 1632 static int felix_port_mirror_add(struct dsa_switch *ds, int port, 1633 struct dsa_mall_mirror_tc_entry *mirror, 1634 bool ingress, struct netlink_ext_ack *extack) 1635 { 1636 struct ocelot *ocelot = ds->priv; 1637 1638 return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port, 1639 ingress, extack); 1640 } 1641 1642 static void felix_port_mirror_del(struct dsa_switch *ds, int port, 1643 struct dsa_mall_mirror_tc_entry *mirror) 1644 { 1645 struct ocelot *ocelot = ds->priv; 1646 1647 ocelot_port_mirror_del(ocelot, port, mirror->ingress); 1648 } 1649 1650 static int felix_port_setup_tc(struct dsa_switch *ds, int port, 1651 enum tc_setup_type type, 1652 void *type_data) 1653 { 1654 struct ocelot *ocelot = ds->priv; 1655 struct felix *felix = ocelot_to_felix(ocelot); 1656 1657 if (felix->info->port_setup_tc) 1658 return felix->info->port_setup_tc(ds, port, type, type_data); 1659 else 1660 return -EOPNOTSUPP; 1661 } 1662 1663 static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index, 1664 u16 pool_index, 1665 struct devlink_sb_pool_info *pool_info) 1666 { 1667 struct ocelot *ocelot = ds->priv; 1668 1669 return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info); 1670 } 1671 1672 static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index, 1673 u16 pool_index, u32 size, 1674 enum devlink_sb_threshold_type threshold_type, 1675 struct netlink_ext_ack *extack) 1676 { 1677 struct ocelot *ocelot = ds->priv; 1678 1679 return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size, 1680 threshold_type, extack); 1681 } 1682 1683 static int felix_sb_port_pool_get(struct dsa_switch *ds, int port, 1684 unsigned int sb_index, u16 pool_index, 1685 u32 *p_threshold) 1686 { 1687 struct ocelot *ocelot = ds->priv; 1688 1689 return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index, 1690 p_threshold); 1691 } 1692 1693 static int felix_sb_port_pool_set(struct dsa_switch *ds, int port, 1694 unsigned int sb_index, u16 pool_index, 1695 u32 threshold, struct netlink_ext_ack *extack) 1696 { 1697 struct ocelot *ocelot = ds->priv; 1698 1699 return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index, 1700 threshold, extack); 1701 } 1702 1703 static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port, 1704 unsigned int sb_index, u16 tc_index, 1705 enum devlink_sb_pool_type pool_type, 1706 u16 *p_pool_index, u32 *p_threshold) 1707 { 1708 struct ocelot *ocelot = ds->priv; 1709 1710 return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index, 1711 pool_type, p_pool_index, 1712 p_threshold); 1713 } 1714 1715 static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port, 1716 unsigned int sb_index, u16 tc_index, 1717 enum devlink_sb_pool_type pool_type, 1718 u16 pool_index, u32 threshold, 1719 struct netlink_ext_ack *extack) 1720 { 1721 struct ocelot *ocelot = ds->priv; 1722 1723 return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index, 1724 pool_type, pool_index, threshold, 1725 extack); 1726 } 1727 1728 static int felix_sb_occ_snapshot(struct dsa_switch *ds, 1729 unsigned int sb_index) 1730 { 1731 struct ocelot *ocelot = ds->priv; 1732 1733 return ocelot_sb_occ_snapshot(ocelot, sb_index); 1734 } 1735 1736 static int felix_sb_occ_max_clear(struct dsa_switch *ds, 1737 unsigned int sb_index) 1738 { 1739 struct ocelot *ocelot = ds->priv; 1740 1741 return ocelot_sb_occ_max_clear(ocelot, sb_index); 1742 } 1743 1744 static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port, 1745 unsigned int sb_index, u16 pool_index, 1746 u32 *p_cur, u32 *p_max) 1747 { 1748 struct ocelot *ocelot = ds->priv; 1749 1750 return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index, 1751 p_cur, p_max); 1752 } 1753 1754 static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port, 1755 unsigned int sb_index, u16 tc_index, 1756 enum devlink_sb_pool_type pool_type, 1757 u32 *p_cur, u32 *p_max) 1758 { 1759 struct ocelot *ocelot = ds->priv; 1760 1761 return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index, 1762 pool_type, p_cur, p_max); 1763 } 1764 1765 static int felix_mrp_add(struct dsa_switch *ds, int port, 1766 const struct switchdev_obj_mrp *mrp) 1767 { 1768 struct ocelot *ocelot = ds->priv; 1769 1770 return ocelot_mrp_add(ocelot, port, mrp); 1771 } 1772 1773 static int felix_mrp_del(struct dsa_switch *ds, int port, 1774 const struct switchdev_obj_mrp *mrp) 1775 { 1776 struct ocelot *ocelot = ds->priv; 1777 1778 return ocelot_mrp_add(ocelot, port, mrp); 1779 } 1780 1781 static int 1782 felix_mrp_add_ring_role(struct dsa_switch *ds, int port, 1783 const struct switchdev_obj_ring_role_mrp *mrp) 1784 { 1785 struct ocelot *ocelot = ds->priv; 1786 1787 return ocelot_mrp_add_ring_role(ocelot, port, mrp); 1788 } 1789 1790 static int 1791 felix_mrp_del_ring_role(struct dsa_switch *ds, int port, 1792 const struct switchdev_obj_ring_role_mrp *mrp) 1793 { 1794 struct ocelot *ocelot = ds->priv; 1795 1796 return ocelot_mrp_del_ring_role(ocelot, port, mrp); 1797 } 1798 1799 static int felix_port_get_default_prio(struct dsa_switch *ds, int port) 1800 { 1801 struct ocelot *ocelot = ds->priv; 1802 1803 return ocelot_port_get_default_prio(ocelot, port); 1804 } 1805 1806 static int felix_port_set_default_prio(struct dsa_switch *ds, int port, 1807 u8 prio) 1808 { 1809 struct ocelot *ocelot = ds->priv; 1810 1811 return ocelot_port_set_default_prio(ocelot, port, prio); 1812 } 1813 1814 static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp) 1815 { 1816 struct ocelot *ocelot = ds->priv; 1817 1818 return ocelot_port_get_dscp_prio(ocelot, port, dscp); 1819 } 1820 1821 static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, 1822 u8 prio) 1823 { 1824 struct ocelot *ocelot = ds->priv; 1825 1826 return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio); 1827 } 1828 1829 static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, 1830 u8 prio) 1831 { 1832 struct ocelot *ocelot = ds->priv; 1833 1834 return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio); 1835 } 1836 1837 const struct dsa_switch_ops felix_switch_ops = { 1838 .get_tag_protocol = felix_get_tag_protocol, 1839 .change_tag_protocol = felix_change_tag_protocol, 1840 .connect_tag_protocol = felix_connect_tag_protocol, 1841 .setup = felix_setup, 1842 .teardown = felix_teardown, 1843 .set_ageing_time = felix_set_ageing_time, 1844 .get_strings = felix_get_strings, 1845 .get_ethtool_stats = felix_get_ethtool_stats, 1846 .get_sset_count = felix_get_sset_count, 1847 .get_ts_info = felix_get_ts_info, 1848 .phylink_get_caps = felix_phylink_get_caps, 1849 .phylink_validate = felix_phylink_validate, 1850 .phylink_mac_select_pcs = felix_phylink_mac_select_pcs, 1851 .phylink_mac_link_down = felix_phylink_mac_link_down, 1852 .phylink_mac_link_up = felix_phylink_mac_link_up, 1853 .port_fast_age = felix_port_fast_age, 1854 .port_fdb_dump = felix_fdb_dump, 1855 .port_fdb_add = felix_fdb_add, 1856 .port_fdb_del = felix_fdb_del, 1857 .lag_fdb_add = felix_lag_fdb_add, 1858 .lag_fdb_del = felix_lag_fdb_del, 1859 .port_mdb_add = felix_mdb_add, 1860 .port_mdb_del = felix_mdb_del, 1861 .port_pre_bridge_flags = felix_pre_bridge_flags, 1862 .port_bridge_flags = felix_bridge_flags, 1863 .port_bridge_join = felix_bridge_join, 1864 .port_bridge_leave = felix_bridge_leave, 1865 .port_lag_join = felix_lag_join, 1866 .port_lag_leave = felix_lag_leave, 1867 .port_lag_change = felix_lag_change, 1868 .port_stp_state_set = felix_bridge_stp_state_set, 1869 .port_vlan_filtering = felix_vlan_filtering, 1870 .port_vlan_add = felix_vlan_add, 1871 .port_vlan_del = felix_vlan_del, 1872 .port_hwtstamp_get = felix_hwtstamp_get, 1873 .port_hwtstamp_set = felix_hwtstamp_set, 1874 .port_rxtstamp = felix_rxtstamp, 1875 .port_txtstamp = felix_txtstamp, 1876 .port_change_mtu = felix_change_mtu, 1877 .port_max_mtu = felix_get_max_mtu, 1878 .port_policer_add = felix_port_policer_add, 1879 .port_policer_del = felix_port_policer_del, 1880 .port_mirror_add = felix_port_mirror_add, 1881 .port_mirror_del = felix_port_mirror_del, 1882 .cls_flower_add = felix_cls_flower_add, 1883 .cls_flower_del = felix_cls_flower_del, 1884 .cls_flower_stats = felix_cls_flower_stats, 1885 .port_setup_tc = felix_port_setup_tc, 1886 .devlink_sb_pool_get = felix_sb_pool_get, 1887 .devlink_sb_pool_set = felix_sb_pool_set, 1888 .devlink_sb_port_pool_get = felix_sb_port_pool_get, 1889 .devlink_sb_port_pool_set = felix_sb_port_pool_set, 1890 .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get, 1891 .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set, 1892 .devlink_sb_occ_snapshot = felix_sb_occ_snapshot, 1893 .devlink_sb_occ_max_clear = felix_sb_occ_max_clear, 1894 .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get, 1895 .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get, 1896 .port_mrp_add = felix_mrp_add, 1897 .port_mrp_del = felix_mrp_del, 1898 .port_mrp_add_ring_role = felix_mrp_add_ring_role, 1899 .port_mrp_del_ring_role = felix_mrp_del_ring_role, 1900 .tag_8021q_vlan_add = felix_tag_8021q_vlan_add, 1901 .tag_8021q_vlan_del = felix_tag_8021q_vlan_del, 1902 .port_get_default_prio = felix_port_get_default_prio, 1903 .port_set_default_prio = felix_port_set_default_prio, 1904 .port_get_dscp_prio = felix_port_get_dscp_prio, 1905 .port_add_dscp_prio = felix_port_add_dscp_prio, 1906 .port_del_dscp_prio = felix_port_del_dscp_prio, 1907 .port_set_host_flood = felix_port_set_host_flood, 1908 }; 1909 1910 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) 1911 { 1912 struct felix *felix = ocelot_to_felix(ocelot); 1913 struct dsa_switch *ds = felix->ds; 1914 1915 if (!dsa_is_user_port(ds, port)) 1916 return NULL; 1917 1918 return dsa_to_port(ds, port)->slave; 1919 } 1920 1921 int felix_netdev_to_port(struct net_device *dev) 1922 { 1923 struct dsa_port *dp; 1924 1925 dp = dsa_port_from_netdev(dev); 1926 if (IS_ERR(dp)) 1927 return -EINVAL; 1928 1929 return dp->index; 1930 } 1931