1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 /* 3 * Microsemi Ocelot Switch driver 4 * 5 * Copyright (c) 2017 Microsemi Corporation 6 */ 7 #include <linux/dsa/ocelot.h> 8 #include <linux/if_bridge.h> 9 #include <linux/ptp_classify.h> 10 #include <soc/mscc/ocelot_vcap.h> 11 #include "ocelot.h" 12 #include "ocelot_vcap.h" 13 14 #define TABLE_UPDATE_SLEEP_US 10 15 #define TABLE_UPDATE_TIMEOUT_US 100000 16 17 struct ocelot_mact_entry { 18 u8 mac[ETH_ALEN]; 19 u16 vid; 20 enum macaccess_entry_type type; 21 }; 22 23 /* Caller must hold &ocelot->mact_lock */ 24 static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot) 25 { 26 return ocelot_read(ocelot, ANA_TABLES_MACACCESS); 27 } 28 29 /* Caller must hold &ocelot->mact_lock */ 30 static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot) 31 { 32 u32 val; 33 34 return readx_poll_timeout(ocelot_mact_read_macaccess, 35 ocelot, val, 36 (val & ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M) == 37 MACACCESS_CMD_IDLE, 38 TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); 39 } 40 41 /* Caller must hold &ocelot->mact_lock */ 42 static void ocelot_mact_select(struct ocelot *ocelot, 43 const unsigned char mac[ETH_ALEN], 44 unsigned int vid) 45 { 46 u32 macl = 0, mach = 0; 47 48 /* Set the MAC address to handle and the vlan associated in a format 49 * understood by the hardware. 50 */ 51 mach |= vid << 16; 52 mach |= mac[0] << 8; 53 mach |= mac[1] << 0; 54 macl |= mac[2] << 24; 55 macl |= mac[3] << 16; 56 macl |= mac[4] << 8; 57 macl |= mac[5] << 0; 58 59 ocelot_write(ocelot, macl, ANA_TABLES_MACLDATA); 60 ocelot_write(ocelot, mach, ANA_TABLES_MACHDATA); 61 62 } 63 64 int ocelot_mact_learn(struct ocelot *ocelot, int port, 65 const unsigned char mac[ETH_ALEN], 66 unsigned int vid, enum macaccess_entry_type type) 67 { 68 u32 cmd = ANA_TABLES_MACACCESS_VALID | 69 ANA_TABLES_MACACCESS_DEST_IDX(port) | 70 ANA_TABLES_MACACCESS_ENTRYTYPE(type) | 71 ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN); 72 unsigned int mc_ports; 73 int err; 74 75 /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */ 76 if (type == ENTRYTYPE_MACv4) 77 mc_ports = (mac[1] << 8) | mac[2]; 78 else if (type == ENTRYTYPE_MACv6) 79 mc_ports = (mac[0] << 8) | mac[1]; 80 else 81 mc_ports = 0; 82 83 if (mc_ports & BIT(ocelot->num_phys_ports)) 84 cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY; 85 86 mutex_lock(&ocelot->mact_lock); 87 88 ocelot_mact_select(ocelot, mac, vid); 89 90 /* Issue a write command */ 91 ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS); 92 93 err = ocelot_mact_wait_for_completion(ocelot); 94 95 mutex_unlock(&ocelot->mact_lock); 96 97 return err; 98 } 99 EXPORT_SYMBOL(ocelot_mact_learn); 100 101 int ocelot_mact_forget(struct ocelot *ocelot, 102 const unsigned char mac[ETH_ALEN], unsigned int vid) 103 { 104 int err; 105 106 mutex_lock(&ocelot->mact_lock); 107 108 ocelot_mact_select(ocelot, mac, vid); 109 110 /* Issue a forget command */ 111 ocelot_write(ocelot, 112 ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET), 113 ANA_TABLES_MACACCESS); 114 115 err = ocelot_mact_wait_for_completion(ocelot); 116 117 mutex_unlock(&ocelot->mact_lock); 118 119 return err; 120 } 121 EXPORT_SYMBOL(ocelot_mact_forget); 122 123 static void ocelot_mact_init(struct ocelot *ocelot) 124 { 125 /* Configure the learning mode entries attributes: 126 * - Do not copy the frame to the CPU extraction queues. 127 * - Use the vlan and mac_cpoy for dmac lookup. 128 */ 129 ocelot_rmw(ocelot, 0, 130 ANA_AGENCTRL_LEARN_CPU_COPY | ANA_AGENCTRL_IGNORE_DMAC_FLAGS 131 | ANA_AGENCTRL_LEARN_FWD_KILL 132 | ANA_AGENCTRL_LEARN_IGNORE_VLAN, 133 ANA_AGENCTRL); 134 135 /* Clear the MAC table. We are not concurrent with anyone, so 136 * holding &ocelot->mact_lock is pointless. 137 */ 138 ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS); 139 } 140 141 static void ocelot_vcap_enable(struct ocelot *ocelot, int port) 142 { 143 ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA | 144 ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa), 145 ANA_PORT_VCAP_S2_CFG, port); 146 147 ocelot_write_gix(ocelot, ANA_PORT_VCAP_CFG_S1_ENA, 148 ANA_PORT_VCAP_CFG, port); 149 150 ocelot_rmw_gix(ocelot, REW_PORT_CFG_ES0_EN, 151 REW_PORT_CFG_ES0_EN, 152 REW_PORT_CFG, port); 153 } 154 155 static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot) 156 { 157 return ocelot_read(ocelot, ANA_TABLES_VLANACCESS); 158 } 159 160 static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot) 161 { 162 u32 val; 163 164 return readx_poll_timeout(ocelot_vlant_read_vlanaccess, 165 ocelot, 166 val, 167 (val & ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M) == 168 ANA_TABLES_VLANACCESS_CMD_IDLE, 169 TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); 170 } 171 172 static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask) 173 { 174 /* Select the VID to configure */ 175 ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid), 176 ANA_TABLES_VLANTIDX); 177 /* Set the vlan port members mask and issue a write command */ 178 ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) | 179 ANA_TABLES_VLANACCESS_CMD_WRITE, 180 ANA_TABLES_VLANACCESS); 181 182 return ocelot_vlant_wait_for_completion(ocelot); 183 } 184 185 static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port) 186 { 187 struct ocelot_bridge_vlan *vlan; 188 int num_untagged = 0; 189 190 list_for_each_entry(vlan, &ocelot->vlans, list) { 191 if (!(vlan->portmask & BIT(port))) 192 continue; 193 194 if (vlan->untagged & BIT(port)) 195 num_untagged++; 196 } 197 198 return num_untagged; 199 } 200 201 static int ocelot_port_num_tagged_vlans(struct ocelot *ocelot, int port) 202 { 203 struct ocelot_bridge_vlan *vlan; 204 int num_tagged = 0; 205 206 list_for_each_entry(vlan, &ocelot->vlans, list) { 207 if (!(vlan->portmask & BIT(port))) 208 continue; 209 210 if (!(vlan->untagged & BIT(port))) 211 num_tagged++; 212 } 213 214 return num_tagged; 215 } 216 217 /* We use native VLAN when we have to mix egress-tagged VLANs with exactly 218 * _one_ egress-untagged VLAN (_the_ native VLAN) 219 */ 220 static bool ocelot_port_uses_native_vlan(struct ocelot *ocelot, int port) 221 { 222 return ocelot_port_num_tagged_vlans(ocelot, port) && 223 ocelot_port_num_untagged_vlans(ocelot, port) == 1; 224 } 225 226 static struct ocelot_bridge_vlan * 227 ocelot_port_find_native_vlan(struct ocelot *ocelot, int port) 228 { 229 struct ocelot_bridge_vlan *vlan; 230 231 list_for_each_entry(vlan, &ocelot->vlans, list) 232 if (vlan->portmask & BIT(port) && vlan->untagged & BIT(port)) 233 return vlan; 234 235 return NULL; 236 } 237 238 /* Keep in sync REW_TAG_CFG_TAG_CFG and, if applicable, 239 * REW_PORT_VLAN_CFG_PORT_VID, with the bridge VLAN table and VLAN awareness 240 * state of the port. 241 */ 242 static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port) 243 { 244 struct ocelot_port *ocelot_port = ocelot->ports[port]; 245 enum ocelot_port_tag_config tag_cfg; 246 bool uses_native_vlan = false; 247 248 if (ocelot_port->vlan_aware) { 249 uses_native_vlan = ocelot_port_uses_native_vlan(ocelot, port); 250 251 if (uses_native_vlan) 252 tag_cfg = OCELOT_PORT_TAG_NATIVE; 253 else if (ocelot_port_num_untagged_vlans(ocelot, port)) 254 tag_cfg = OCELOT_PORT_TAG_DISABLED; 255 else 256 tag_cfg = OCELOT_PORT_TAG_TRUNK; 257 } else { 258 tag_cfg = OCELOT_PORT_TAG_DISABLED; 259 } 260 261 ocelot_rmw_gix(ocelot, REW_TAG_CFG_TAG_CFG(tag_cfg), 262 REW_TAG_CFG_TAG_CFG_M, 263 REW_TAG_CFG, port); 264 265 if (uses_native_vlan) { 266 struct ocelot_bridge_vlan *native_vlan; 267 268 /* Not having a native VLAN is impossible, because 269 * ocelot_port_num_untagged_vlans has returned 1. 270 * So there is no use in checking for NULL here. 271 */ 272 native_vlan = ocelot_port_find_native_vlan(ocelot, port); 273 274 ocelot_rmw_gix(ocelot, 275 REW_PORT_VLAN_CFG_PORT_VID(native_vlan->vid), 276 REW_PORT_VLAN_CFG_PORT_VID_M, 277 REW_PORT_VLAN_CFG, port); 278 } 279 } 280 281 /* Default vlan to clasify for untagged frames (may be zero) */ 282 static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, 283 const struct ocelot_bridge_vlan *pvid_vlan) 284 { 285 struct ocelot_port *ocelot_port = ocelot->ports[port]; 286 u16 pvid = OCELOT_VLAN_UNAWARE_PVID; 287 u32 val = 0; 288 289 ocelot_port->pvid_vlan = pvid_vlan; 290 291 if (ocelot_port->vlan_aware && pvid_vlan) 292 pvid = pvid_vlan->vid; 293 294 ocelot_rmw_gix(ocelot, 295 ANA_PORT_VLAN_CFG_VLAN_VID(pvid), 296 ANA_PORT_VLAN_CFG_VLAN_VID_M, 297 ANA_PORT_VLAN_CFG, port); 298 299 /* If there's no pvid, we should drop not only untagged traffic (which 300 * happens automatically), but also 802.1p traffic which gets 301 * classified to VLAN 0, but that is always in our RX filter, so it 302 * would get accepted were it not for this setting. 303 */ 304 if (!pvid_vlan && ocelot_port->vlan_aware) 305 val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | 306 ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA; 307 308 ocelot_rmw_gix(ocelot, val, 309 ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | 310 ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, 311 ANA_PORT_DROP_CFG, port); 312 } 313 314 static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot, 315 u16 vid) 316 { 317 struct ocelot_bridge_vlan *vlan; 318 319 list_for_each_entry(vlan, &ocelot->vlans, list) 320 if (vlan->vid == vid) 321 return vlan; 322 323 return NULL; 324 } 325 326 static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid, 327 bool untagged) 328 { 329 struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid); 330 unsigned long portmask; 331 int err; 332 333 if (vlan) { 334 portmask = vlan->portmask | BIT(port); 335 336 err = ocelot_vlant_set_mask(ocelot, vid, portmask); 337 if (err) 338 return err; 339 340 vlan->portmask = portmask; 341 /* Bridge VLANs can be overwritten with a different 342 * egress-tagging setting, so make sure to override an untagged 343 * with a tagged VID if that's going on. 344 */ 345 if (untagged) 346 vlan->untagged |= BIT(port); 347 else 348 vlan->untagged &= ~BIT(port); 349 350 return 0; 351 } 352 353 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 354 if (!vlan) 355 return -ENOMEM; 356 357 portmask = BIT(port); 358 359 err = ocelot_vlant_set_mask(ocelot, vid, portmask); 360 if (err) { 361 kfree(vlan); 362 return err; 363 } 364 365 vlan->vid = vid; 366 vlan->portmask = portmask; 367 if (untagged) 368 vlan->untagged = BIT(port); 369 INIT_LIST_HEAD(&vlan->list); 370 list_add_tail(&vlan->list, &ocelot->vlans); 371 372 return 0; 373 } 374 375 static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid) 376 { 377 struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid); 378 unsigned long portmask; 379 int err; 380 381 if (!vlan) 382 return 0; 383 384 portmask = vlan->portmask & ~BIT(port); 385 386 err = ocelot_vlant_set_mask(ocelot, vid, portmask); 387 if (err) 388 return err; 389 390 vlan->portmask = portmask; 391 if (vlan->portmask) 392 return 0; 393 394 list_del(&vlan->list); 395 kfree(vlan); 396 397 return 0; 398 } 399 400 int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, 401 bool vlan_aware, struct netlink_ext_ack *extack) 402 { 403 struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1]; 404 struct ocelot_port *ocelot_port = ocelot->ports[port]; 405 struct ocelot_vcap_filter *filter; 406 u32 val; 407 408 list_for_each_entry(filter, &block->rules, list) { 409 if (filter->ingress_port_mask & BIT(port) && 410 filter->action.vid_replace_ena) { 411 NL_SET_ERR_MSG_MOD(extack, 412 "Cannot change VLAN state with vlan modify rules active"); 413 return -EBUSY; 414 } 415 } 416 417 ocelot_port->vlan_aware = vlan_aware; 418 419 if (vlan_aware) 420 val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | 421 ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1); 422 else 423 val = 0; 424 ocelot_rmw_gix(ocelot, val, 425 ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | 426 ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M, 427 ANA_PORT_VLAN_CFG, port); 428 429 ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan); 430 ocelot_port_manage_port_tag(ocelot, port); 431 432 return 0; 433 } 434 EXPORT_SYMBOL(ocelot_port_vlan_filtering); 435 436 int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid, 437 bool untagged, struct netlink_ext_ack *extack) 438 { 439 if (untagged) { 440 /* We are adding an egress-tagged VLAN */ 441 if (ocelot_port_uses_native_vlan(ocelot, port)) { 442 NL_SET_ERR_MSG_MOD(extack, 443 "Port with egress-tagged VLANs cannot have more than one egress-untagged (native) VLAN"); 444 return -EBUSY; 445 } 446 } else { 447 /* We are adding an egress-tagged VLAN */ 448 if (ocelot_port_num_untagged_vlans(ocelot, port) > 1) { 449 NL_SET_ERR_MSG_MOD(extack, 450 "Port with more than one egress-untagged VLAN cannot have egress-tagged VLANs"); 451 return -EBUSY; 452 } 453 } 454 455 return 0; 456 } 457 EXPORT_SYMBOL(ocelot_vlan_prepare); 458 459 int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, 460 bool untagged) 461 { 462 int err; 463 464 err = ocelot_vlan_member_add(ocelot, port, vid, untagged); 465 if (err) 466 return err; 467 468 /* Default ingress vlan classification */ 469 if (pvid) 470 ocelot_port_set_pvid(ocelot, port, 471 ocelot_bridge_vlan_find(ocelot, vid)); 472 473 /* Untagged egress vlan clasification */ 474 ocelot_port_manage_port_tag(ocelot, port); 475 476 return 0; 477 } 478 EXPORT_SYMBOL(ocelot_vlan_add); 479 480 int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) 481 { 482 struct ocelot_port *ocelot_port = ocelot->ports[port]; 483 int err; 484 485 err = ocelot_vlan_member_del(ocelot, port, vid); 486 if (err) 487 return err; 488 489 /* Ingress */ 490 if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid) 491 ocelot_port_set_pvid(ocelot, port, NULL); 492 493 /* Egress */ 494 ocelot_port_manage_port_tag(ocelot, port); 495 496 return 0; 497 } 498 EXPORT_SYMBOL(ocelot_vlan_del); 499 500 static void ocelot_vlan_init(struct ocelot *ocelot) 501 { 502 unsigned long all_ports = GENMASK(ocelot->num_phys_ports - 1, 0); 503 u16 port, vid; 504 505 /* Clear VLAN table, by default all ports are members of all VLANs */ 506 ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT, 507 ANA_TABLES_VLANACCESS); 508 ocelot_vlant_wait_for_completion(ocelot); 509 510 /* Configure the port VLAN memberships */ 511 for (vid = 1; vid < VLAN_N_VID; vid++) 512 ocelot_vlant_set_mask(ocelot, vid, 0); 513 514 /* Because VLAN filtering is enabled, we need VID 0 to get untagged 515 * traffic. It is added automatically if 8021q module is loaded, but 516 * we can't rely on it since module may be not loaded. 517 */ 518 ocelot_vlant_set_mask(ocelot, OCELOT_VLAN_UNAWARE_PVID, all_ports); 519 520 /* Set vlan ingress filter mask to all ports but the CPU port by 521 * default. 522 */ 523 ocelot_write(ocelot, all_ports, ANA_VLANMASK); 524 525 for (port = 0; port < ocelot->num_phys_ports; port++) { 526 ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port); 527 ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port); 528 } 529 } 530 531 static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port) 532 { 533 return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port); 534 } 535 536 static int ocelot_port_flush(struct ocelot *ocelot, int port) 537 { 538 unsigned int pause_ena; 539 int err, val; 540 541 /* Disable dequeuing from the egress queues */ 542 ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS, 543 QSYS_PORT_MODE_DEQUEUE_DIS, 544 QSYS_PORT_MODE, port); 545 546 /* Disable flow control */ 547 ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena); 548 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); 549 550 /* Disable priority flow control */ 551 ocelot_fields_write(ocelot, port, 552 QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0); 553 554 /* Wait at least the time it takes to receive a frame of maximum length 555 * at the port. 556 * Worst-case delays for 10 kilobyte jumbo frames are: 557 * 8 ms on a 10M port 558 * 800 μs on a 100M port 559 * 80 μs on a 1G port 560 * 32 μs on a 2.5G port 561 */ 562 usleep_range(8000, 10000); 563 564 /* Disable half duplex backpressure. */ 565 ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE, 566 SYS_FRONT_PORT_MODE, port); 567 568 /* Flush the queues associated with the port. */ 569 ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA, 570 REW_PORT_CFG, port); 571 572 /* Enable dequeuing from the egress queues. */ 573 ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE, 574 port); 575 576 /* Wait until flushing is complete. */ 577 err = read_poll_timeout(ocelot_read_eq_avail, val, !val, 578 100, 2000000, false, ocelot, port); 579 580 /* Clear flushing again. */ 581 ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port); 582 583 /* Re-enable flow control */ 584 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena); 585 586 return err; 587 } 588 589 void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port, 590 unsigned int link_an_mode, 591 phy_interface_t interface, 592 unsigned long quirks) 593 { 594 struct ocelot_port *ocelot_port = ocelot->ports[port]; 595 int err; 596 597 ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA, 598 DEV_MAC_ENA_CFG); 599 600 ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); 601 602 err = ocelot_port_flush(ocelot, port); 603 if (err) 604 dev_err(ocelot->dev, "failed to flush port %d: %d\n", 605 port, err); 606 607 /* Put the port in reset. */ 608 if (interface != PHY_INTERFACE_MODE_QSGMII || 609 !(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP)) 610 ocelot_port_rmwl(ocelot_port, 611 DEV_CLOCK_CFG_MAC_TX_RST | 612 DEV_CLOCK_CFG_MAC_RX_RST, 613 DEV_CLOCK_CFG_MAC_TX_RST | 614 DEV_CLOCK_CFG_MAC_RX_RST, 615 DEV_CLOCK_CFG); 616 } 617 EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down); 618 619 void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port, 620 struct phy_device *phydev, 621 unsigned int link_an_mode, 622 phy_interface_t interface, 623 int speed, int duplex, 624 bool tx_pause, bool rx_pause, 625 unsigned long quirks) 626 { 627 struct ocelot_port *ocelot_port = ocelot->ports[port]; 628 int mac_speed, mode = 0; 629 u32 mac_fc_cfg; 630 631 /* The MAC might be integrated in systems where the MAC speed is fixed 632 * and it's the PCS who is performing the rate adaptation, so we have 633 * to write "1000Mbps" into the LINK_SPEED field of DEV_CLOCK_CFG 634 * (which is also its default value). 635 */ 636 if ((quirks & OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION) || 637 speed == SPEED_1000) { 638 mac_speed = OCELOT_SPEED_1000; 639 mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; 640 } else if (speed == SPEED_2500) { 641 mac_speed = OCELOT_SPEED_2500; 642 mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; 643 } else if (speed == SPEED_100) { 644 mac_speed = OCELOT_SPEED_100; 645 } else { 646 mac_speed = OCELOT_SPEED_10; 647 } 648 649 if (duplex == DUPLEX_FULL) 650 mode |= DEV_MAC_MODE_CFG_FDX_ENA; 651 652 ocelot_port_writel(ocelot_port, mode, DEV_MAC_MODE_CFG); 653 654 /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and 655 * PORT_RST bits in DEV_CLOCK_CFG. 656 */ 657 ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(mac_speed), 658 DEV_CLOCK_CFG); 659 660 switch (speed) { 661 case SPEED_10: 662 mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_10); 663 break; 664 case SPEED_100: 665 mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_100); 666 break; 667 case SPEED_1000: 668 case SPEED_2500: 669 mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_1000); 670 break; 671 default: 672 dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n", 673 port, speed); 674 return; 675 } 676 677 /* Handle RX pause in all cases, with 2500base-X this is used for rate 678 * adaptation. 679 */ 680 mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA; 681 682 if (tx_pause) 683 mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA | 684 SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | 685 SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | 686 SYS_MAC_FC_CFG_ZERO_PAUSE_ENA; 687 688 /* Flow control. Link speed is only used here to evaluate the time 689 * specification in incoming pause frames. 690 */ 691 ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port); 692 693 ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); 694 695 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause); 696 697 /* Undo the effects of ocelot_phylink_mac_link_down: 698 * enable MAC module 699 */ 700 ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA | 701 DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); 702 703 /* Core: Enable port for frame transfer */ 704 ocelot_fields_write(ocelot, port, 705 QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); 706 } 707 EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up); 708 709 static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port, 710 struct sk_buff *clone) 711 { 712 struct ocelot_port *ocelot_port = ocelot->ports[port]; 713 unsigned long flags; 714 715 spin_lock_irqsave(&ocelot->ts_id_lock, flags); 716 717 if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID || 718 ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) { 719 spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); 720 return -EBUSY; 721 } 722 723 skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; 724 /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */ 725 OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id; 726 727 ocelot_port->ts_id++; 728 if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID) 729 ocelot_port->ts_id = 0; 730 731 ocelot_port->ptp_skbs_in_flight++; 732 ocelot->ptp_skbs_in_flight++; 733 734 skb_queue_tail(&ocelot_port->tx_skbs, clone); 735 736 spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); 737 738 return 0; 739 } 740 741 static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb, 742 unsigned int ptp_class) 743 { 744 struct ptp_header *hdr; 745 u8 msgtype, twostep; 746 747 hdr = ptp_parse_header(skb, ptp_class); 748 if (!hdr) 749 return false; 750 751 msgtype = ptp_get_msgtype(hdr, ptp_class); 752 twostep = hdr->flag_field[0] & 0x2; 753 754 if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) 755 return true; 756 757 return false; 758 } 759 760 int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port, 761 struct sk_buff *skb, 762 struct sk_buff **clone) 763 { 764 struct ocelot_port *ocelot_port = ocelot->ports[port]; 765 u8 ptp_cmd = ocelot_port->ptp_cmd; 766 unsigned int ptp_class; 767 int err; 768 769 /* Don't do anything if PTP timestamping not enabled */ 770 if (!ptp_cmd) 771 return 0; 772 773 ptp_class = ptp_classify_raw(skb); 774 if (ptp_class == PTP_CLASS_NONE) 775 return -EINVAL; 776 777 /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */ 778 if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) { 779 if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) { 780 OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; 781 return 0; 782 } 783 784 /* Fall back to two-step timestamping */ 785 ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; 786 } 787 788 if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { 789 *clone = skb_clone_sk(skb); 790 if (!(*clone)) 791 return -ENOMEM; 792 793 err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone); 794 if (err) 795 return err; 796 797 OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; 798 OCELOT_SKB_CB(*clone)->ptp_class = ptp_class; 799 } 800 801 return 0; 802 } 803 EXPORT_SYMBOL(ocelot_port_txtstamp_request); 804 805 static void ocelot_get_hwtimestamp(struct ocelot *ocelot, 806 struct timespec64 *ts) 807 { 808 unsigned long flags; 809 u32 val; 810 811 spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); 812 813 /* Read current PTP time to get seconds */ 814 val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN); 815 816 val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM); 817 val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE); 818 ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN); 819 ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN); 820 821 /* Read packet HW timestamp from FIFO */ 822 val = ocelot_read(ocelot, SYS_PTP_TXSTAMP); 823 ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val); 824 825 /* Sec has incremented since the ts was registered */ 826 if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC)) 827 ts->tv_sec--; 828 829 spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); 830 } 831 832 static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid) 833 { 834 struct ptp_header *hdr; 835 836 hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class); 837 if (WARN_ON(!hdr)) 838 return false; 839 840 return seqid == ntohs(hdr->sequence_id); 841 } 842 843 void ocelot_get_txtstamp(struct ocelot *ocelot) 844 { 845 int budget = OCELOT_PTP_QUEUE_SZ; 846 847 while (budget--) { 848 struct sk_buff *skb, *skb_tmp, *skb_match = NULL; 849 struct skb_shared_hwtstamps shhwtstamps; 850 u32 val, id, seqid, txport; 851 struct ocelot_port *port; 852 struct timespec64 ts; 853 unsigned long flags; 854 855 val = ocelot_read(ocelot, SYS_PTP_STATUS); 856 857 /* Check if a timestamp can be retrieved */ 858 if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD)) 859 break; 860 861 WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL); 862 863 /* Retrieve the ts ID and Tx port */ 864 id = SYS_PTP_STATUS_PTP_MESS_ID_X(val); 865 txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val); 866 seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val); 867 868 port = ocelot->ports[txport]; 869 870 spin_lock(&ocelot->ts_id_lock); 871 port->ptp_skbs_in_flight--; 872 ocelot->ptp_skbs_in_flight--; 873 spin_unlock(&ocelot->ts_id_lock); 874 875 /* Retrieve its associated skb */ 876 try_again: 877 spin_lock_irqsave(&port->tx_skbs.lock, flags); 878 879 skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { 880 if (OCELOT_SKB_CB(skb)->ts_id != id) 881 continue; 882 __skb_unlink(skb, &port->tx_skbs); 883 skb_match = skb; 884 break; 885 } 886 887 spin_unlock_irqrestore(&port->tx_skbs.lock, flags); 888 889 if (WARN_ON(!skb_match)) 890 continue; 891 892 if (!ocelot_validate_ptp_skb(skb_match, seqid)) { 893 dev_err_ratelimited(ocelot->dev, 894 "port %d received stale TX timestamp for seqid %d, discarding\n", 895 txport, seqid); 896 dev_kfree_skb_any(skb); 897 goto try_again; 898 } 899 900 /* Get the h/w timestamp */ 901 ocelot_get_hwtimestamp(ocelot, &ts); 902 903 /* Set the timestamp into the skb */ 904 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 905 shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 906 skb_complete_tx_timestamp(skb_match, &shhwtstamps); 907 908 /* Next ts */ 909 ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); 910 } 911 } 912 EXPORT_SYMBOL(ocelot_get_txtstamp); 913 914 static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh, 915 u32 *rval) 916 { 917 u32 bytes_valid, val; 918 919 val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); 920 if (val == XTR_NOT_READY) { 921 if (ifh) 922 return -EIO; 923 924 do { 925 val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); 926 } while (val == XTR_NOT_READY); 927 } 928 929 switch (val) { 930 case XTR_ABORT: 931 return -EIO; 932 case XTR_EOF_0: 933 case XTR_EOF_1: 934 case XTR_EOF_2: 935 case XTR_EOF_3: 936 case XTR_PRUNED: 937 bytes_valid = XTR_VALID_BYTES(val); 938 val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); 939 if (val == XTR_ESCAPE) 940 *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp); 941 else 942 *rval = val; 943 944 return bytes_valid; 945 case XTR_ESCAPE: 946 *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp); 947 948 return 4; 949 default: 950 *rval = val; 951 952 return 4; 953 } 954 } 955 956 static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh) 957 { 958 int i, err = 0; 959 960 for (i = 0; i < OCELOT_TAG_LEN / 4; i++) { 961 err = ocelot_rx_frame_word(ocelot, grp, true, &xfh[i]); 962 if (err != 4) 963 return (err < 0) ? err : -EIO; 964 } 965 966 return 0; 967 } 968 969 int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) 970 { 971 struct skb_shared_hwtstamps *shhwtstamps; 972 u64 tod_in_ns, full_ts_in_ns; 973 u64 timestamp, src_port, len; 974 u32 xfh[OCELOT_TAG_LEN / 4]; 975 struct net_device *dev; 976 struct timespec64 ts; 977 struct sk_buff *skb; 978 int sz, buf_len; 979 u32 val, *buf; 980 int err; 981 982 err = ocelot_xtr_poll_xfh(ocelot, grp, xfh); 983 if (err) 984 return err; 985 986 ocelot_xfh_get_src_port(xfh, &src_port); 987 ocelot_xfh_get_len(xfh, &len); 988 ocelot_xfh_get_rew_val(xfh, ×tamp); 989 990 if (WARN_ON(src_port >= ocelot->num_phys_ports)) 991 return -EINVAL; 992 993 dev = ocelot->ops->port_to_netdev(ocelot, src_port); 994 if (!dev) 995 return -EINVAL; 996 997 skb = netdev_alloc_skb(dev, len); 998 if (unlikely(!skb)) { 999 netdev_err(dev, "Unable to allocate sk_buff\n"); 1000 return -ENOMEM; 1001 } 1002 1003 buf_len = len - ETH_FCS_LEN; 1004 buf = (u32 *)skb_put(skb, buf_len); 1005 1006 len = 0; 1007 do { 1008 sz = ocelot_rx_frame_word(ocelot, grp, false, &val); 1009 if (sz < 0) { 1010 err = sz; 1011 goto out_free_skb; 1012 } 1013 *buf++ = val; 1014 len += sz; 1015 } while (len < buf_len); 1016 1017 /* Read the FCS */ 1018 sz = ocelot_rx_frame_word(ocelot, grp, false, &val); 1019 if (sz < 0) { 1020 err = sz; 1021 goto out_free_skb; 1022 } 1023 1024 /* Update the statistics if part of the FCS was read before */ 1025 len -= ETH_FCS_LEN - sz; 1026 1027 if (unlikely(dev->features & NETIF_F_RXFCS)) { 1028 buf = (u32 *)skb_put(skb, ETH_FCS_LEN); 1029 *buf = val; 1030 } 1031 1032 if (ocelot->ptp) { 1033 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); 1034 1035 tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); 1036 if ((tod_in_ns & 0xffffffff) < timestamp) 1037 full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) | 1038 timestamp; 1039 else 1040 full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) | 1041 timestamp; 1042 1043 shhwtstamps = skb_hwtstamps(skb); 1044 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 1045 shhwtstamps->hwtstamp = full_ts_in_ns; 1046 } 1047 1048 /* Everything we see on an interface that is in the HW bridge 1049 * has already been forwarded. 1050 */ 1051 if (ocelot->ports[src_port]->bridge) 1052 skb->offload_fwd_mark = 1; 1053 1054 skb->protocol = eth_type_trans(skb, dev); 1055 1056 *nskb = skb; 1057 1058 return 0; 1059 1060 out_free_skb: 1061 kfree_skb(skb); 1062 return err; 1063 } 1064 EXPORT_SYMBOL(ocelot_xtr_poll_frame); 1065 1066 bool ocelot_can_inject(struct ocelot *ocelot, int grp) 1067 { 1068 u32 val = ocelot_read(ocelot, QS_INJ_STATUS); 1069 1070 if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp)))) 1071 return false; 1072 if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp))) 1073 return false; 1074 1075 return true; 1076 } 1077 EXPORT_SYMBOL(ocelot_can_inject); 1078 1079 void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, 1080 u32 rew_op, struct sk_buff *skb) 1081 { 1082 u32 ifh[OCELOT_TAG_LEN / 4] = {0}; 1083 unsigned int i, count, last; 1084 1085 ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | 1086 QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp); 1087 1088 ocelot_ifh_set_bypass(ifh, 1); 1089 ocelot_ifh_set_dest(ifh, BIT_ULL(port)); 1090 ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C); 1091 ocelot_ifh_set_vlan_tci(ifh, skb_vlan_tag_get(skb)); 1092 ocelot_ifh_set_rew_op(ifh, rew_op); 1093 1094 for (i = 0; i < OCELOT_TAG_LEN / 4; i++) 1095 ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp); 1096 1097 count = DIV_ROUND_UP(skb->len, 4); 1098 last = skb->len % 4; 1099 for (i = 0; i < count; i++) 1100 ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp); 1101 1102 /* Add padding */ 1103 while (i < (OCELOT_BUFFER_CELL_SZ / 4)) { 1104 ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); 1105 i++; 1106 } 1107 1108 /* Indicate EOF and valid bytes in last word */ 1109 ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | 1110 QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) | 1111 QS_INJ_CTRL_EOF, 1112 QS_INJ_CTRL, grp); 1113 1114 /* Add dummy CRC */ 1115 ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); 1116 skb_tx_timestamp(skb); 1117 1118 skb->dev->stats.tx_packets++; 1119 skb->dev->stats.tx_bytes += skb->len; 1120 } 1121 EXPORT_SYMBOL(ocelot_port_inject_frame); 1122 1123 void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp) 1124 { 1125 while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) 1126 ocelot_read_rix(ocelot, QS_XTR_RD, grp); 1127 } 1128 EXPORT_SYMBOL(ocelot_drain_cpu_queue); 1129 1130 int ocelot_fdb_add(struct ocelot *ocelot, int port, 1131 const unsigned char *addr, u16 vid) 1132 { 1133 int pgid = port; 1134 1135 if (port == ocelot->npi) 1136 pgid = PGID_CPU; 1137 1138 return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED); 1139 } 1140 EXPORT_SYMBOL(ocelot_fdb_add); 1141 1142 int ocelot_fdb_del(struct ocelot *ocelot, int port, 1143 const unsigned char *addr, u16 vid) 1144 { 1145 return ocelot_mact_forget(ocelot, addr, vid); 1146 } 1147 EXPORT_SYMBOL(ocelot_fdb_del); 1148 1149 int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid, 1150 bool is_static, void *data) 1151 { 1152 struct ocelot_dump_ctx *dump = data; 1153 u32 portid = NETLINK_CB(dump->cb->skb).portid; 1154 u32 seq = dump->cb->nlh->nlmsg_seq; 1155 struct nlmsghdr *nlh; 1156 struct ndmsg *ndm; 1157 1158 if (dump->idx < dump->cb->args[2]) 1159 goto skip; 1160 1161 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 1162 sizeof(*ndm), NLM_F_MULTI); 1163 if (!nlh) 1164 return -EMSGSIZE; 1165 1166 ndm = nlmsg_data(nlh); 1167 ndm->ndm_family = AF_BRIDGE; 1168 ndm->ndm_pad1 = 0; 1169 ndm->ndm_pad2 = 0; 1170 ndm->ndm_flags = NTF_SELF; 1171 ndm->ndm_type = 0; 1172 ndm->ndm_ifindex = dump->dev->ifindex; 1173 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; 1174 1175 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr)) 1176 goto nla_put_failure; 1177 1178 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid)) 1179 goto nla_put_failure; 1180 1181 nlmsg_end(dump->skb, nlh); 1182 1183 skip: 1184 dump->idx++; 1185 return 0; 1186 1187 nla_put_failure: 1188 nlmsg_cancel(dump->skb, nlh); 1189 return -EMSGSIZE; 1190 } 1191 EXPORT_SYMBOL(ocelot_port_fdb_do_dump); 1192 1193 /* Caller must hold &ocelot->mact_lock */ 1194 static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col, 1195 struct ocelot_mact_entry *entry) 1196 { 1197 u32 val, dst, macl, mach; 1198 char mac[ETH_ALEN]; 1199 1200 /* Set row and column to read from */ 1201 ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row); 1202 ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_BUCKET, col); 1203 1204 /* Issue a read command */ 1205 ocelot_write(ocelot, 1206 ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ), 1207 ANA_TABLES_MACACCESS); 1208 1209 if (ocelot_mact_wait_for_completion(ocelot)) 1210 return -ETIMEDOUT; 1211 1212 /* Read the entry flags */ 1213 val = ocelot_read(ocelot, ANA_TABLES_MACACCESS); 1214 if (!(val & ANA_TABLES_MACACCESS_VALID)) 1215 return -EINVAL; 1216 1217 /* If the entry read has another port configured as its destination, 1218 * do not report it. 1219 */ 1220 dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3; 1221 if (dst != port) 1222 return -EINVAL; 1223 1224 /* Get the entry's MAC address and VLAN id */ 1225 macl = ocelot_read(ocelot, ANA_TABLES_MACLDATA); 1226 mach = ocelot_read(ocelot, ANA_TABLES_MACHDATA); 1227 1228 mac[0] = (mach >> 8) & 0xff; 1229 mac[1] = (mach >> 0) & 0xff; 1230 mac[2] = (macl >> 24) & 0xff; 1231 mac[3] = (macl >> 16) & 0xff; 1232 mac[4] = (macl >> 8) & 0xff; 1233 mac[5] = (macl >> 0) & 0xff; 1234 1235 entry->vid = (mach >> 16) & 0xfff; 1236 ether_addr_copy(entry->mac, mac); 1237 1238 return 0; 1239 } 1240 1241 int ocelot_fdb_dump(struct ocelot *ocelot, int port, 1242 dsa_fdb_dump_cb_t *cb, void *data) 1243 { 1244 int err = 0; 1245 int i, j; 1246 1247 /* We could take the lock just around ocelot_mact_read, but doing so 1248 * thousands of times in a row seems rather pointless and inefficient. 1249 */ 1250 mutex_lock(&ocelot->mact_lock); 1251 1252 /* Loop through all the mac tables entries. */ 1253 for (i = 0; i < ocelot->num_mact_rows; i++) { 1254 for (j = 0; j < 4; j++) { 1255 struct ocelot_mact_entry entry; 1256 bool is_static; 1257 1258 err = ocelot_mact_read(ocelot, port, i, j, &entry); 1259 /* If the entry is invalid (wrong port, invalid...), 1260 * skip it. 1261 */ 1262 if (err == -EINVAL) 1263 continue; 1264 else if (err) 1265 break; 1266 1267 is_static = (entry.type == ENTRYTYPE_LOCKED); 1268 1269 err = cb(entry.mac, entry.vid, is_static, data); 1270 if (err) 1271 break; 1272 } 1273 } 1274 1275 mutex_unlock(&ocelot->mact_lock); 1276 1277 return err; 1278 } 1279 EXPORT_SYMBOL(ocelot_fdb_dump); 1280 1281 static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap) 1282 { 1283 trap->key_type = OCELOT_VCAP_KEY_ETYPE; 1284 *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588); 1285 *(__be16 *)trap->key.etype.etype.mask = htons(0xffff); 1286 } 1287 1288 static void 1289 ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap) 1290 { 1291 trap->key_type = OCELOT_VCAP_KEY_IPV4; 1292 trap->key.ipv4.dport.value = PTP_EV_PORT; 1293 trap->key.ipv4.dport.mask = 0xffff; 1294 } 1295 1296 static void 1297 ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap) 1298 { 1299 trap->key_type = OCELOT_VCAP_KEY_IPV6; 1300 trap->key.ipv6.dport.value = PTP_EV_PORT; 1301 trap->key.ipv6.dport.mask = 0xffff; 1302 } 1303 1304 static void 1305 ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap) 1306 { 1307 trap->key_type = OCELOT_VCAP_KEY_IPV4; 1308 trap->key.ipv4.dport.value = PTP_GEN_PORT; 1309 trap->key.ipv4.dport.mask = 0xffff; 1310 } 1311 1312 static void 1313 ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap) 1314 { 1315 trap->key_type = OCELOT_VCAP_KEY_IPV6; 1316 trap->key.ipv6.dport.value = PTP_GEN_PORT; 1317 trap->key.ipv6.dport.mask = 0xffff; 1318 } 1319 1320 static int ocelot_trap_add(struct ocelot *ocelot, int port, 1321 unsigned long cookie, 1322 void (*populate)(struct ocelot_vcap_filter *f)) 1323 { 1324 struct ocelot_vcap_block *block_vcap_is2; 1325 struct ocelot_vcap_filter *trap; 1326 bool new = false; 1327 int err; 1328 1329 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 1330 1331 trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie, 1332 false); 1333 if (!trap) { 1334 trap = kzalloc(sizeof(*trap), GFP_KERNEL); 1335 if (!trap) 1336 return -ENOMEM; 1337 1338 populate(trap); 1339 trap->prio = 1; 1340 trap->id.cookie = cookie; 1341 trap->id.tc_offload = false; 1342 trap->block_id = VCAP_IS2; 1343 trap->type = OCELOT_VCAP_FILTER_OFFLOAD; 1344 trap->lookup = 0; 1345 trap->action.cpu_copy_ena = true; 1346 trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; 1347 trap->action.port_mask = 0; 1348 new = true; 1349 } 1350 1351 trap->ingress_port_mask |= BIT(port); 1352 1353 if (new) 1354 err = ocelot_vcap_filter_add(ocelot, trap, NULL); 1355 else 1356 err = ocelot_vcap_filter_replace(ocelot, trap); 1357 if (err) { 1358 trap->ingress_port_mask &= ~BIT(port); 1359 if (!trap->ingress_port_mask) 1360 kfree(trap); 1361 return err; 1362 } 1363 1364 return 0; 1365 } 1366 1367 static int ocelot_trap_del(struct ocelot *ocelot, int port, 1368 unsigned long cookie) 1369 { 1370 struct ocelot_vcap_block *block_vcap_is2; 1371 struct ocelot_vcap_filter *trap; 1372 1373 block_vcap_is2 = &ocelot->block[VCAP_IS2]; 1374 1375 trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie, 1376 false); 1377 if (!trap) 1378 return 0; 1379 1380 trap->ingress_port_mask &= ~BIT(port); 1381 if (!trap->ingress_port_mask) 1382 return ocelot_vcap_filter_del(ocelot, trap); 1383 1384 return ocelot_vcap_filter_replace(ocelot, trap); 1385 } 1386 1387 static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port) 1388 { 1389 unsigned long l2_cookie = ocelot->num_phys_ports + 1; 1390 1391 return ocelot_trap_add(ocelot, port, l2_cookie, 1392 ocelot_populate_l2_ptp_trap_key); 1393 } 1394 1395 static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port) 1396 { 1397 unsigned long l2_cookie = ocelot->num_phys_ports + 1; 1398 1399 return ocelot_trap_del(ocelot, port, l2_cookie); 1400 } 1401 1402 static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port) 1403 { 1404 unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2; 1405 unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3; 1406 int err; 1407 1408 err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, 1409 ocelot_populate_ipv4_ptp_event_trap_key); 1410 if (err) 1411 return err; 1412 1413 err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, 1414 ocelot_populate_ipv4_ptp_general_trap_key); 1415 if (err) 1416 ocelot_trap_del(ocelot, port, ipv4_ev_cookie); 1417 1418 return err; 1419 } 1420 1421 static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port) 1422 { 1423 unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2; 1424 unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3; 1425 int err; 1426 1427 err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie); 1428 err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie); 1429 return err; 1430 } 1431 1432 static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port) 1433 { 1434 unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4; 1435 unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5; 1436 int err; 1437 1438 err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, 1439 ocelot_populate_ipv6_ptp_event_trap_key); 1440 if (err) 1441 return err; 1442 1443 err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, 1444 ocelot_populate_ipv6_ptp_general_trap_key); 1445 if (err) 1446 ocelot_trap_del(ocelot, port, ipv6_ev_cookie); 1447 1448 return err; 1449 } 1450 1451 static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port) 1452 { 1453 unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4; 1454 unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5; 1455 int err; 1456 1457 err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie); 1458 err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie); 1459 return err; 1460 } 1461 1462 static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port, 1463 bool l2, bool l4) 1464 { 1465 int err; 1466 1467 if (l2) 1468 err = ocelot_l2_ptp_trap_add(ocelot, port); 1469 else 1470 err = ocelot_l2_ptp_trap_del(ocelot, port); 1471 if (err) 1472 return err; 1473 1474 if (l4) { 1475 err = ocelot_ipv4_ptp_trap_add(ocelot, port); 1476 if (err) 1477 goto err_ipv4; 1478 1479 err = ocelot_ipv6_ptp_trap_add(ocelot, port); 1480 if (err) 1481 goto err_ipv6; 1482 } else { 1483 err = ocelot_ipv4_ptp_trap_del(ocelot, port); 1484 1485 err |= ocelot_ipv6_ptp_trap_del(ocelot, port); 1486 } 1487 if (err) 1488 return err; 1489 1490 return 0; 1491 1492 err_ipv6: 1493 ocelot_ipv4_ptp_trap_del(ocelot, port); 1494 err_ipv4: 1495 if (l2) 1496 ocelot_l2_ptp_trap_del(ocelot, port); 1497 return err; 1498 } 1499 1500 int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr) 1501 { 1502 return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config, 1503 sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0; 1504 } 1505 EXPORT_SYMBOL(ocelot_hwstamp_get); 1506 1507 int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) 1508 { 1509 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1510 bool l2 = false, l4 = false; 1511 struct hwtstamp_config cfg; 1512 int err; 1513 1514 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1515 return -EFAULT; 1516 1517 /* reserved for future extensions */ 1518 if (cfg.flags) 1519 return -EINVAL; 1520 1521 /* Tx type sanity check */ 1522 switch (cfg.tx_type) { 1523 case HWTSTAMP_TX_ON: 1524 ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; 1525 break; 1526 case HWTSTAMP_TX_ONESTEP_SYNC: 1527 /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we 1528 * need to update the origin time. 1529 */ 1530 ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP; 1531 break; 1532 case HWTSTAMP_TX_OFF: 1533 ocelot_port->ptp_cmd = 0; 1534 break; 1535 default: 1536 return -ERANGE; 1537 } 1538 1539 mutex_lock(&ocelot->ptp_lock); 1540 1541 switch (cfg.rx_filter) { 1542 case HWTSTAMP_FILTER_NONE: 1543 break; 1544 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1545 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1546 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1547 l4 = true; 1548 break; 1549 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1550 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1551 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1552 l2 = true; 1553 break; 1554 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1555 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1556 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1557 l2 = true; 1558 l4 = true; 1559 break; 1560 default: 1561 mutex_unlock(&ocelot->ptp_lock); 1562 return -ERANGE; 1563 } 1564 1565 err = ocelot_setup_ptp_traps(ocelot, port, l2, l4); 1566 if (err) 1567 return err; 1568 1569 if (l2 && l4) 1570 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 1571 else if (l2) 1572 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 1573 else if (l4) 1574 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 1575 else 1576 cfg.rx_filter = HWTSTAMP_FILTER_NONE; 1577 1578 /* Commit back the result & save it */ 1579 memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg)); 1580 mutex_unlock(&ocelot->ptp_lock); 1581 1582 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 1583 } 1584 EXPORT_SYMBOL(ocelot_hwstamp_set); 1585 1586 void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data) 1587 { 1588 int i; 1589 1590 if (sset != ETH_SS_STATS) 1591 return; 1592 1593 for (i = 0; i < ocelot->num_stats; i++) 1594 memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name, 1595 ETH_GSTRING_LEN); 1596 } 1597 EXPORT_SYMBOL(ocelot_get_strings); 1598 1599 static void ocelot_update_stats(struct ocelot *ocelot) 1600 { 1601 int i, j; 1602 1603 mutex_lock(&ocelot->stats_lock); 1604 1605 for (i = 0; i < ocelot->num_phys_ports; i++) { 1606 /* Configure the port to read the stats from */ 1607 ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG); 1608 1609 for (j = 0; j < ocelot->num_stats; j++) { 1610 u32 val; 1611 unsigned int idx = i * ocelot->num_stats + j; 1612 1613 val = ocelot_read_rix(ocelot, SYS_COUNT_RX_OCTETS, 1614 ocelot->stats_layout[j].offset); 1615 1616 if (val < (ocelot->stats[idx] & U32_MAX)) 1617 ocelot->stats[idx] += (u64)1 << 32; 1618 1619 ocelot->stats[idx] = (ocelot->stats[idx] & 1620 ~(u64)U32_MAX) + val; 1621 } 1622 } 1623 1624 mutex_unlock(&ocelot->stats_lock); 1625 } 1626 1627 static void ocelot_check_stats_work(struct work_struct *work) 1628 { 1629 struct delayed_work *del_work = to_delayed_work(work); 1630 struct ocelot *ocelot = container_of(del_work, struct ocelot, 1631 stats_work); 1632 1633 ocelot_update_stats(ocelot); 1634 1635 queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, 1636 OCELOT_STATS_CHECK_DELAY); 1637 } 1638 1639 void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data) 1640 { 1641 int i; 1642 1643 /* check and update now */ 1644 ocelot_update_stats(ocelot); 1645 1646 /* Copy all counters */ 1647 for (i = 0; i < ocelot->num_stats; i++) 1648 *data++ = ocelot->stats[port * ocelot->num_stats + i]; 1649 } 1650 EXPORT_SYMBOL(ocelot_get_ethtool_stats); 1651 1652 int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset) 1653 { 1654 if (sset != ETH_SS_STATS) 1655 return -EOPNOTSUPP; 1656 1657 return ocelot->num_stats; 1658 } 1659 EXPORT_SYMBOL(ocelot_get_sset_count); 1660 1661 int ocelot_get_ts_info(struct ocelot *ocelot, int port, 1662 struct ethtool_ts_info *info) 1663 { 1664 info->phc_index = ocelot->ptp_clock ? 1665 ptp_clock_index(ocelot->ptp_clock) : -1; 1666 if (info->phc_index == -1) { 1667 info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | 1668 SOF_TIMESTAMPING_RX_SOFTWARE | 1669 SOF_TIMESTAMPING_SOFTWARE; 1670 return 0; 1671 } 1672 info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | 1673 SOF_TIMESTAMPING_RX_SOFTWARE | 1674 SOF_TIMESTAMPING_SOFTWARE | 1675 SOF_TIMESTAMPING_TX_HARDWARE | 1676 SOF_TIMESTAMPING_RX_HARDWARE | 1677 SOF_TIMESTAMPING_RAW_HARDWARE; 1678 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | 1679 BIT(HWTSTAMP_TX_ONESTEP_SYNC); 1680 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 1681 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | 1682 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 1683 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 1684 1685 return 0; 1686 } 1687 EXPORT_SYMBOL(ocelot_get_ts_info); 1688 1689 static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond, 1690 bool only_active_ports) 1691 { 1692 u32 mask = 0; 1693 int port; 1694 1695 for (port = 0; port < ocelot->num_phys_ports; port++) { 1696 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1697 1698 if (!ocelot_port) 1699 continue; 1700 1701 if (ocelot_port->bond == bond) { 1702 if (only_active_ports && !ocelot_port->lag_tx_active) 1703 continue; 1704 1705 mask |= BIT(port); 1706 } 1707 } 1708 1709 return mask; 1710 } 1711 1712 static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port, 1713 struct net_device *bridge) 1714 { 1715 struct ocelot_port *ocelot_port = ocelot->ports[src_port]; 1716 u32 mask = 0; 1717 int port; 1718 1719 if (!ocelot_port || ocelot_port->bridge != bridge || 1720 ocelot_port->stp_state != BR_STATE_FORWARDING) 1721 return 0; 1722 1723 for (port = 0; port < ocelot->num_phys_ports; port++) { 1724 ocelot_port = ocelot->ports[port]; 1725 1726 if (!ocelot_port) 1727 continue; 1728 1729 if (ocelot_port->stp_state == BR_STATE_FORWARDING && 1730 ocelot_port->bridge == bridge) 1731 mask |= BIT(port); 1732 } 1733 1734 return mask; 1735 } 1736 1737 static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot) 1738 { 1739 u32 mask = 0; 1740 int port; 1741 1742 for (port = 0; port < ocelot->num_phys_ports; port++) { 1743 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1744 1745 if (!ocelot_port) 1746 continue; 1747 1748 if (ocelot_port->is_dsa_8021q_cpu) 1749 mask |= BIT(port); 1750 } 1751 1752 return mask; 1753 } 1754 1755 void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot) 1756 { 1757 unsigned long cpu_fwd_mask; 1758 int port; 1759 1760 /* If a DSA tag_8021q CPU exists, it needs to be included in the 1761 * regular forwarding path of the front ports regardless of whether 1762 * those are bridged or standalone. 1763 * If DSA tag_8021q is not used, this returns 0, which is fine because 1764 * the hardware-based CPU port module can be a destination for packets 1765 * even if it isn't part of PGID_SRC. 1766 */ 1767 cpu_fwd_mask = ocelot_get_dsa_8021q_cpu_mask(ocelot); 1768 1769 /* Apply FWD mask. The loop is needed to add/remove the current port as 1770 * a source for the other ports. 1771 */ 1772 for (port = 0; port < ocelot->num_phys_ports; port++) { 1773 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1774 unsigned long mask; 1775 1776 if (!ocelot_port) { 1777 /* Unused ports can't send anywhere */ 1778 mask = 0; 1779 } else if (ocelot_port->is_dsa_8021q_cpu) { 1780 /* The DSA tag_8021q CPU ports need to be able to 1781 * forward packets to all other ports except for 1782 * themselves 1783 */ 1784 mask = GENMASK(ocelot->num_phys_ports - 1, 0); 1785 mask &= ~cpu_fwd_mask; 1786 } else if (ocelot_port->bridge) { 1787 struct net_device *bridge = ocelot_port->bridge; 1788 struct net_device *bond = ocelot_port->bond; 1789 1790 mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge); 1791 mask |= cpu_fwd_mask; 1792 mask &= ~BIT(port); 1793 if (bond) { 1794 mask &= ~ocelot_get_bond_mask(ocelot, bond, 1795 false); 1796 } 1797 } else { 1798 /* Standalone ports forward only to DSA tag_8021q CPU 1799 * ports (if those exist), or to the hardware CPU port 1800 * module otherwise. 1801 */ 1802 mask = cpu_fwd_mask; 1803 } 1804 1805 ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port); 1806 } 1807 } 1808 EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask); 1809 1810 void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) 1811 { 1812 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1813 u32 learn_ena = 0; 1814 1815 ocelot_port->stp_state = state; 1816 1817 if ((state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING) && 1818 ocelot_port->learn_ena) 1819 learn_ena = ANA_PORT_PORT_CFG_LEARN_ENA; 1820 1821 ocelot_rmw_gix(ocelot, learn_ena, ANA_PORT_PORT_CFG_LEARN_ENA, 1822 ANA_PORT_PORT_CFG, port); 1823 1824 ocelot_apply_bridge_fwd_mask(ocelot); 1825 } 1826 EXPORT_SYMBOL(ocelot_bridge_stp_state_set); 1827 1828 void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs) 1829 { 1830 unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000); 1831 1832 /* Setting AGE_PERIOD to zero effectively disables automatic aging, 1833 * which is clearly not what our intention is. So avoid that. 1834 */ 1835 if (!age_period) 1836 age_period = 1; 1837 1838 ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE); 1839 } 1840 EXPORT_SYMBOL(ocelot_set_ageing_time); 1841 1842 static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot, 1843 const unsigned char *addr, 1844 u16 vid) 1845 { 1846 struct ocelot_multicast *mc; 1847 1848 list_for_each_entry(mc, &ocelot->multicast, list) { 1849 if (ether_addr_equal(mc->addr, addr) && mc->vid == vid) 1850 return mc; 1851 } 1852 1853 return NULL; 1854 } 1855 1856 static enum macaccess_entry_type ocelot_classify_mdb(const unsigned char *addr) 1857 { 1858 if (addr[0] == 0x01 && addr[1] == 0x00 && addr[2] == 0x5e) 1859 return ENTRYTYPE_MACv4; 1860 if (addr[0] == 0x33 && addr[1] == 0x33) 1861 return ENTRYTYPE_MACv6; 1862 return ENTRYTYPE_LOCKED; 1863 } 1864 1865 static struct ocelot_pgid *ocelot_pgid_alloc(struct ocelot *ocelot, int index, 1866 unsigned long ports) 1867 { 1868 struct ocelot_pgid *pgid; 1869 1870 pgid = kzalloc(sizeof(*pgid), GFP_KERNEL); 1871 if (!pgid) 1872 return ERR_PTR(-ENOMEM); 1873 1874 pgid->ports = ports; 1875 pgid->index = index; 1876 refcount_set(&pgid->refcount, 1); 1877 list_add_tail(&pgid->list, &ocelot->pgids); 1878 1879 return pgid; 1880 } 1881 1882 static void ocelot_pgid_free(struct ocelot *ocelot, struct ocelot_pgid *pgid) 1883 { 1884 if (!refcount_dec_and_test(&pgid->refcount)) 1885 return; 1886 1887 list_del(&pgid->list); 1888 kfree(pgid); 1889 } 1890 1891 static struct ocelot_pgid *ocelot_mdb_get_pgid(struct ocelot *ocelot, 1892 const struct ocelot_multicast *mc) 1893 { 1894 struct ocelot_pgid *pgid; 1895 int index; 1896 1897 /* According to VSC7514 datasheet 3.9.1.5 IPv4 Multicast Entries and 1898 * 3.9.1.6 IPv6 Multicast Entries, "Instead of a lookup in the 1899 * destination mask table (PGID), the destination set is programmed as 1900 * part of the entry MAC address.", and the DEST_IDX is set to 0. 1901 */ 1902 if (mc->entry_type == ENTRYTYPE_MACv4 || 1903 mc->entry_type == ENTRYTYPE_MACv6) 1904 return ocelot_pgid_alloc(ocelot, 0, mc->ports); 1905 1906 list_for_each_entry(pgid, &ocelot->pgids, list) { 1907 /* When searching for a nonreserved multicast PGID, ignore the 1908 * dummy PGID of zero that we have for MACv4/MACv6 entries 1909 */ 1910 if (pgid->index && pgid->ports == mc->ports) { 1911 refcount_inc(&pgid->refcount); 1912 return pgid; 1913 } 1914 } 1915 1916 /* Search for a free index in the nonreserved multicast PGID area */ 1917 for_each_nonreserved_multicast_dest_pgid(ocelot, index) { 1918 bool used = false; 1919 1920 list_for_each_entry(pgid, &ocelot->pgids, list) { 1921 if (pgid->index == index) { 1922 used = true; 1923 break; 1924 } 1925 } 1926 1927 if (!used) 1928 return ocelot_pgid_alloc(ocelot, index, mc->ports); 1929 } 1930 1931 return ERR_PTR(-ENOSPC); 1932 } 1933 1934 static void ocelot_encode_ports_to_mdb(unsigned char *addr, 1935 struct ocelot_multicast *mc) 1936 { 1937 ether_addr_copy(addr, mc->addr); 1938 1939 if (mc->entry_type == ENTRYTYPE_MACv4) { 1940 addr[0] = 0; 1941 addr[1] = mc->ports >> 8; 1942 addr[2] = mc->ports & 0xff; 1943 } else if (mc->entry_type == ENTRYTYPE_MACv6) { 1944 addr[0] = mc->ports >> 8; 1945 addr[1] = mc->ports & 0xff; 1946 } 1947 } 1948 1949 int ocelot_port_mdb_add(struct ocelot *ocelot, int port, 1950 const struct switchdev_obj_port_mdb *mdb) 1951 { 1952 unsigned char addr[ETH_ALEN]; 1953 struct ocelot_multicast *mc; 1954 struct ocelot_pgid *pgid; 1955 u16 vid = mdb->vid; 1956 1957 if (port == ocelot->npi) 1958 port = ocelot->num_phys_ports; 1959 1960 mc = ocelot_multicast_get(ocelot, mdb->addr, vid); 1961 if (!mc) { 1962 /* New entry */ 1963 mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL); 1964 if (!mc) 1965 return -ENOMEM; 1966 1967 mc->entry_type = ocelot_classify_mdb(mdb->addr); 1968 ether_addr_copy(mc->addr, mdb->addr); 1969 mc->vid = vid; 1970 1971 list_add_tail(&mc->list, &ocelot->multicast); 1972 } else { 1973 /* Existing entry. Clean up the current port mask from 1974 * hardware now, because we'll be modifying it. 1975 */ 1976 ocelot_pgid_free(ocelot, mc->pgid); 1977 ocelot_encode_ports_to_mdb(addr, mc); 1978 ocelot_mact_forget(ocelot, addr, vid); 1979 } 1980 1981 mc->ports |= BIT(port); 1982 1983 pgid = ocelot_mdb_get_pgid(ocelot, mc); 1984 if (IS_ERR(pgid)) { 1985 dev_err(ocelot->dev, 1986 "Cannot allocate PGID for mdb %pM vid %d\n", 1987 mc->addr, mc->vid); 1988 devm_kfree(ocelot->dev, mc); 1989 return PTR_ERR(pgid); 1990 } 1991 mc->pgid = pgid; 1992 1993 ocelot_encode_ports_to_mdb(addr, mc); 1994 1995 if (mc->entry_type != ENTRYTYPE_MACv4 && 1996 mc->entry_type != ENTRYTYPE_MACv6) 1997 ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, 1998 pgid->index); 1999 2000 return ocelot_mact_learn(ocelot, pgid->index, addr, vid, 2001 mc->entry_type); 2002 } 2003 EXPORT_SYMBOL(ocelot_port_mdb_add); 2004 2005 int ocelot_port_mdb_del(struct ocelot *ocelot, int port, 2006 const struct switchdev_obj_port_mdb *mdb) 2007 { 2008 unsigned char addr[ETH_ALEN]; 2009 struct ocelot_multicast *mc; 2010 struct ocelot_pgid *pgid; 2011 u16 vid = mdb->vid; 2012 2013 if (port == ocelot->npi) 2014 port = ocelot->num_phys_ports; 2015 2016 mc = ocelot_multicast_get(ocelot, mdb->addr, vid); 2017 if (!mc) 2018 return -ENOENT; 2019 2020 ocelot_encode_ports_to_mdb(addr, mc); 2021 ocelot_mact_forget(ocelot, addr, vid); 2022 2023 ocelot_pgid_free(ocelot, mc->pgid); 2024 mc->ports &= ~BIT(port); 2025 if (!mc->ports) { 2026 list_del(&mc->list); 2027 devm_kfree(ocelot->dev, mc); 2028 return 0; 2029 } 2030 2031 /* We have a PGID with fewer ports now */ 2032 pgid = ocelot_mdb_get_pgid(ocelot, mc); 2033 if (IS_ERR(pgid)) 2034 return PTR_ERR(pgid); 2035 mc->pgid = pgid; 2036 2037 ocelot_encode_ports_to_mdb(addr, mc); 2038 2039 if (mc->entry_type != ENTRYTYPE_MACv4 && 2040 mc->entry_type != ENTRYTYPE_MACv6) 2041 ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, 2042 pgid->index); 2043 2044 return ocelot_mact_learn(ocelot, pgid->index, addr, vid, 2045 mc->entry_type); 2046 } 2047 EXPORT_SYMBOL(ocelot_port_mdb_del); 2048 2049 void ocelot_port_bridge_join(struct ocelot *ocelot, int port, 2050 struct net_device *bridge) 2051 { 2052 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2053 2054 ocelot_port->bridge = bridge; 2055 2056 ocelot_apply_bridge_fwd_mask(ocelot); 2057 } 2058 EXPORT_SYMBOL(ocelot_port_bridge_join); 2059 2060 void ocelot_port_bridge_leave(struct ocelot *ocelot, int port, 2061 struct net_device *bridge) 2062 { 2063 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2064 2065 ocelot_port->bridge = NULL; 2066 2067 ocelot_port_set_pvid(ocelot, port, NULL); 2068 ocelot_port_manage_port_tag(ocelot, port); 2069 ocelot_apply_bridge_fwd_mask(ocelot); 2070 } 2071 EXPORT_SYMBOL(ocelot_port_bridge_leave); 2072 2073 static void ocelot_set_aggr_pgids(struct ocelot *ocelot) 2074 { 2075 unsigned long visited = GENMASK(ocelot->num_phys_ports - 1, 0); 2076 int i, port, lag; 2077 2078 /* Reset destination and aggregation PGIDS */ 2079 for_each_unicast_dest_pgid(ocelot, port) 2080 ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port); 2081 2082 for_each_aggr_pgid(ocelot, i) 2083 ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), 2084 ANA_PGID_PGID, i); 2085 2086 /* The visited ports bitmask holds the list of ports offloading any 2087 * bonding interface. Initially we mark all these ports as unvisited, 2088 * then every time we visit a port in this bitmask, we know that it is 2089 * the lowest numbered port, i.e. the one whose logical ID == physical 2090 * port ID == LAG ID. So we mark as visited all further ports in the 2091 * bitmask that are offloading the same bonding interface. This way, 2092 * we set up the aggregation PGIDs only once per bonding interface. 2093 */ 2094 for (port = 0; port < ocelot->num_phys_ports; port++) { 2095 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2096 2097 if (!ocelot_port || !ocelot_port->bond) 2098 continue; 2099 2100 visited &= ~BIT(port); 2101 } 2102 2103 /* Now, set PGIDs for each active LAG */ 2104 for (lag = 0; lag < ocelot->num_phys_ports; lag++) { 2105 struct net_device *bond = ocelot->ports[lag]->bond; 2106 int num_active_ports = 0; 2107 unsigned long bond_mask; 2108 u8 aggr_idx[16]; 2109 2110 if (!bond || (visited & BIT(lag))) 2111 continue; 2112 2113 bond_mask = ocelot_get_bond_mask(ocelot, bond, true); 2114 2115 for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) { 2116 // Destination mask 2117 ocelot_write_rix(ocelot, bond_mask, 2118 ANA_PGID_PGID, port); 2119 aggr_idx[num_active_ports++] = port; 2120 } 2121 2122 for_each_aggr_pgid(ocelot, i) { 2123 u32 ac; 2124 2125 ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i); 2126 ac &= ~bond_mask; 2127 /* Don't do division by zero if there was no active 2128 * port. Just make all aggregation codes zero. 2129 */ 2130 if (num_active_ports) 2131 ac |= BIT(aggr_idx[i % num_active_ports]); 2132 ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i); 2133 } 2134 2135 /* Mark all ports in the same LAG as visited to avoid applying 2136 * the same config again. 2137 */ 2138 for (port = lag; port < ocelot->num_phys_ports; port++) { 2139 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2140 2141 if (!ocelot_port) 2142 continue; 2143 2144 if (ocelot_port->bond == bond) 2145 visited |= BIT(port); 2146 } 2147 } 2148 } 2149 2150 /* When offloading a bonding interface, the switch ports configured under the 2151 * same bond must have the same logical port ID, equal to the physical port ID 2152 * of the lowest numbered physical port in that bond. Otherwise, in standalone/ 2153 * bridged mode, each port has a logical port ID equal to its physical port ID. 2154 */ 2155 static void ocelot_setup_logical_port_ids(struct ocelot *ocelot) 2156 { 2157 int port; 2158 2159 for (port = 0; port < ocelot->num_phys_ports; port++) { 2160 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2161 struct net_device *bond; 2162 2163 if (!ocelot_port) 2164 continue; 2165 2166 bond = ocelot_port->bond; 2167 if (bond) { 2168 int lag = __ffs(ocelot_get_bond_mask(ocelot, bond, 2169 false)); 2170 2171 ocelot_rmw_gix(ocelot, 2172 ANA_PORT_PORT_CFG_PORTID_VAL(lag), 2173 ANA_PORT_PORT_CFG_PORTID_VAL_M, 2174 ANA_PORT_PORT_CFG, port); 2175 } else { 2176 ocelot_rmw_gix(ocelot, 2177 ANA_PORT_PORT_CFG_PORTID_VAL(port), 2178 ANA_PORT_PORT_CFG_PORTID_VAL_M, 2179 ANA_PORT_PORT_CFG, port); 2180 } 2181 } 2182 } 2183 2184 int ocelot_port_lag_join(struct ocelot *ocelot, int port, 2185 struct net_device *bond, 2186 struct netdev_lag_upper_info *info) 2187 { 2188 if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) 2189 return -EOPNOTSUPP; 2190 2191 ocelot->ports[port]->bond = bond; 2192 2193 ocelot_setup_logical_port_ids(ocelot); 2194 ocelot_apply_bridge_fwd_mask(ocelot); 2195 ocelot_set_aggr_pgids(ocelot); 2196 2197 return 0; 2198 } 2199 EXPORT_SYMBOL(ocelot_port_lag_join); 2200 2201 void ocelot_port_lag_leave(struct ocelot *ocelot, int port, 2202 struct net_device *bond) 2203 { 2204 ocelot->ports[port]->bond = NULL; 2205 2206 ocelot_setup_logical_port_ids(ocelot); 2207 ocelot_apply_bridge_fwd_mask(ocelot); 2208 ocelot_set_aggr_pgids(ocelot); 2209 } 2210 EXPORT_SYMBOL(ocelot_port_lag_leave); 2211 2212 void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active) 2213 { 2214 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2215 2216 ocelot_port->lag_tx_active = lag_tx_active; 2217 2218 /* Rebalance the LAGs */ 2219 ocelot_set_aggr_pgids(ocelot); 2220 } 2221 EXPORT_SYMBOL(ocelot_port_lag_change); 2222 2223 /* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu. 2224 * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG. 2225 * In the special case that it's the NPI port that we're configuring, the 2226 * length of the tag and optional prefix needs to be accounted for privately, 2227 * in order to be able to sustain communication at the requested @sdu. 2228 */ 2229 void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu) 2230 { 2231 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2232 int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN; 2233 int pause_start, pause_stop; 2234 int atop, atop_tot; 2235 2236 if (port == ocelot->npi) { 2237 maxlen += OCELOT_TAG_LEN; 2238 2239 if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT) 2240 maxlen += OCELOT_SHORT_PREFIX_LEN; 2241 else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG) 2242 maxlen += OCELOT_LONG_PREFIX_LEN; 2243 } 2244 2245 ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG); 2246 2247 /* Set Pause watermark hysteresis */ 2248 pause_start = 6 * maxlen / OCELOT_BUFFER_CELL_SZ; 2249 pause_stop = 4 * maxlen / OCELOT_BUFFER_CELL_SZ; 2250 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_START, 2251 pause_start); 2252 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_STOP, 2253 pause_stop); 2254 2255 /* Tail dropping watermarks */ 2256 atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) / 2257 OCELOT_BUFFER_CELL_SZ; 2258 atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ; 2259 ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port); 2260 ocelot_write(ocelot, ocelot->ops->wm_enc(atop_tot), SYS_ATOP_TOT_CFG); 2261 } 2262 EXPORT_SYMBOL(ocelot_port_set_maxlen); 2263 2264 int ocelot_get_max_mtu(struct ocelot *ocelot, int port) 2265 { 2266 int max_mtu = 65535 - ETH_HLEN - ETH_FCS_LEN; 2267 2268 if (port == ocelot->npi) { 2269 max_mtu -= OCELOT_TAG_LEN; 2270 2271 if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT) 2272 max_mtu -= OCELOT_SHORT_PREFIX_LEN; 2273 else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG) 2274 max_mtu -= OCELOT_LONG_PREFIX_LEN; 2275 } 2276 2277 return max_mtu; 2278 } 2279 EXPORT_SYMBOL(ocelot_get_max_mtu); 2280 2281 static void ocelot_port_set_learning(struct ocelot *ocelot, int port, 2282 bool enabled) 2283 { 2284 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2285 u32 val = 0; 2286 2287 if (enabled) 2288 val = ANA_PORT_PORT_CFG_LEARN_ENA; 2289 2290 ocelot_rmw_gix(ocelot, val, ANA_PORT_PORT_CFG_LEARN_ENA, 2291 ANA_PORT_PORT_CFG, port); 2292 2293 ocelot_port->learn_ena = enabled; 2294 } 2295 2296 static void ocelot_port_set_ucast_flood(struct ocelot *ocelot, int port, 2297 bool enabled) 2298 { 2299 u32 val = 0; 2300 2301 if (enabled) 2302 val = BIT(port); 2303 2304 ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_UC); 2305 } 2306 2307 static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port, 2308 bool enabled) 2309 { 2310 u32 val = 0; 2311 2312 if (enabled) 2313 val = BIT(port); 2314 2315 ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC); 2316 } 2317 2318 static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port, 2319 bool enabled) 2320 { 2321 u32 val = 0; 2322 2323 if (enabled) 2324 val = BIT(port); 2325 2326 ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_BC); 2327 } 2328 2329 int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port, 2330 struct switchdev_brport_flags flags) 2331 { 2332 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 2333 BR_BCAST_FLOOD)) 2334 return -EINVAL; 2335 2336 return 0; 2337 } 2338 EXPORT_SYMBOL(ocelot_port_pre_bridge_flags); 2339 2340 void ocelot_port_bridge_flags(struct ocelot *ocelot, int port, 2341 struct switchdev_brport_flags flags) 2342 { 2343 if (flags.mask & BR_LEARNING) 2344 ocelot_port_set_learning(ocelot, port, 2345 !!(flags.val & BR_LEARNING)); 2346 2347 if (flags.mask & BR_FLOOD) 2348 ocelot_port_set_ucast_flood(ocelot, port, 2349 !!(flags.val & BR_FLOOD)); 2350 2351 if (flags.mask & BR_MCAST_FLOOD) 2352 ocelot_port_set_mcast_flood(ocelot, port, 2353 !!(flags.val & BR_MCAST_FLOOD)); 2354 2355 if (flags.mask & BR_BCAST_FLOOD) 2356 ocelot_port_set_bcast_flood(ocelot, port, 2357 !!(flags.val & BR_BCAST_FLOOD)); 2358 } 2359 EXPORT_SYMBOL(ocelot_port_bridge_flags); 2360 2361 void ocelot_init_port(struct ocelot *ocelot, int port) 2362 { 2363 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2364 2365 skb_queue_head_init(&ocelot_port->tx_skbs); 2366 2367 /* Basic L2 initialization */ 2368 2369 /* Set MAC IFG Gaps 2370 * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0 2371 * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5 2372 */ 2373 ocelot_port_writel(ocelot_port, DEV_MAC_IFG_CFG_TX_IFG(5), 2374 DEV_MAC_IFG_CFG); 2375 2376 /* Load seed (0) and set MAC HDX late collision */ 2377 ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) | 2378 DEV_MAC_HDX_CFG_SEED_LOAD, 2379 DEV_MAC_HDX_CFG); 2380 mdelay(1); 2381 ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67), 2382 DEV_MAC_HDX_CFG); 2383 2384 /* Set Max Length and maximum tags allowed */ 2385 ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN); 2386 ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) | 2387 DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | 2388 DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA | 2389 DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, 2390 DEV_MAC_TAGS_CFG); 2391 2392 /* Set SMAC of Pause frame (00:00:00:00:00:00) */ 2393 ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_HIGH_CFG); 2394 ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG); 2395 2396 /* Enable transmission of pause frames */ 2397 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); 2398 2399 /* Drop frames with multicast source address */ 2400 ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA, 2401 ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA, 2402 ANA_PORT_DROP_CFG, port); 2403 2404 /* Set default VLAN and tag type to 8021Q. */ 2405 ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q), 2406 REW_PORT_VLAN_CFG_PORT_TPID_M, 2407 REW_PORT_VLAN_CFG, port); 2408 2409 /* Disable source address learning for standalone mode */ 2410 ocelot_port_set_learning(ocelot, port, false); 2411 2412 /* Set the port's initial logical port ID value, enable receiving 2413 * frames on it, and configure the MAC address learning type to 2414 * automatic. 2415 */ 2416 ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO | 2417 ANA_PORT_PORT_CFG_RECV_ENA | 2418 ANA_PORT_PORT_CFG_PORTID_VAL(port), 2419 ANA_PORT_PORT_CFG, port); 2420 2421 /* Enable vcap lookups */ 2422 ocelot_vcap_enable(ocelot, port); 2423 } 2424 EXPORT_SYMBOL(ocelot_init_port); 2425 2426 /* Configure and enable the CPU port module, which is a set of queues 2427 * accessible through register MMIO, frame DMA or Ethernet (in case 2428 * NPI mode is used). 2429 */ 2430 static void ocelot_cpu_port_init(struct ocelot *ocelot) 2431 { 2432 int cpu = ocelot->num_phys_ports; 2433 2434 /* The unicast destination PGID for the CPU port module is unused */ 2435 ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu); 2436 /* Instead set up a multicast destination PGID for traffic copied to 2437 * the CPU. Whitelisted MAC addresses like the port netdevice MAC 2438 * addresses will be copied to the CPU via this PGID. 2439 */ 2440 ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU); 2441 ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA | 2442 ANA_PORT_PORT_CFG_PORTID_VAL(cpu), 2443 ANA_PORT_PORT_CFG, cpu); 2444 2445 /* Enable CPU port module */ 2446 ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); 2447 /* CPU port Injection/Extraction configuration */ 2448 ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR, 2449 OCELOT_TAG_PREFIX_NONE); 2450 ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR, 2451 OCELOT_TAG_PREFIX_NONE); 2452 2453 /* Configure the CPU port to be VLAN aware */ 2454 ocelot_write_gix(ocelot, 2455 ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_VLAN_UNAWARE_PVID) | 2456 ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | 2457 ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1), 2458 ANA_PORT_VLAN_CFG, cpu); 2459 } 2460 2461 static void ocelot_detect_features(struct ocelot *ocelot) 2462 { 2463 int mmgt, eq_ctrl; 2464 2465 /* For Ocelot, Felix, Seville, Serval etc, SYS:MMGT:MMGT:FREECNT holds 2466 * the number of 240-byte free memory words (aka 4-cell chunks) and not 2467 * 192 bytes as the documentation incorrectly says. 2468 */ 2469 mmgt = ocelot_read(ocelot, SYS_MMGT); 2470 ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt); 2471 2472 eq_ctrl = ocelot_read(ocelot, QSYS_EQ_CTRL); 2473 ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl); 2474 } 2475 2476 int ocelot_init(struct ocelot *ocelot) 2477 { 2478 char queue_name[32]; 2479 int i, ret; 2480 u32 port; 2481 2482 if (ocelot->ops->reset) { 2483 ret = ocelot->ops->reset(ocelot); 2484 if (ret) { 2485 dev_err(ocelot->dev, "Switch reset failed\n"); 2486 return ret; 2487 } 2488 } 2489 2490 ocelot->stats = devm_kcalloc(ocelot->dev, 2491 ocelot->num_phys_ports * ocelot->num_stats, 2492 sizeof(u64), GFP_KERNEL); 2493 if (!ocelot->stats) 2494 return -ENOMEM; 2495 2496 mutex_init(&ocelot->stats_lock); 2497 mutex_init(&ocelot->ptp_lock); 2498 mutex_init(&ocelot->mact_lock); 2499 spin_lock_init(&ocelot->ptp_clock_lock); 2500 spin_lock_init(&ocelot->ts_id_lock); 2501 snprintf(queue_name, sizeof(queue_name), "%s-stats", 2502 dev_name(ocelot->dev)); 2503 ocelot->stats_queue = create_singlethread_workqueue(queue_name); 2504 if (!ocelot->stats_queue) 2505 return -ENOMEM; 2506 2507 ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0); 2508 if (!ocelot->owq) { 2509 destroy_workqueue(ocelot->stats_queue); 2510 return -ENOMEM; 2511 } 2512 2513 INIT_LIST_HEAD(&ocelot->multicast); 2514 INIT_LIST_HEAD(&ocelot->pgids); 2515 INIT_LIST_HEAD(&ocelot->vlans); 2516 ocelot_detect_features(ocelot); 2517 ocelot_mact_init(ocelot); 2518 ocelot_vlan_init(ocelot); 2519 ocelot_vcap_init(ocelot); 2520 ocelot_cpu_port_init(ocelot); 2521 2522 for (port = 0; port < ocelot->num_phys_ports; port++) { 2523 /* Clear all counters (5 groups) */ 2524 ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) | 2525 SYS_STAT_CFG_STAT_CLEAR_SHOT(0x7f), 2526 SYS_STAT_CFG); 2527 } 2528 2529 /* Only use S-Tag */ 2530 ocelot_write(ocelot, ETH_P_8021AD, SYS_VLAN_ETYPE_CFG); 2531 2532 /* Aggregation mode */ 2533 ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA | 2534 ANA_AGGR_CFG_AC_DMAC_ENA | 2535 ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA | 2536 ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA | 2537 ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA | 2538 ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, 2539 ANA_AGGR_CFG); 2540 2541 /* Set MAC age time to default value. The entry is aged after 2542 * 2*AGE_PERIOD 2543 */ 2544 ocelot_write(ocelot, 2545 ANA_AUTOAGE_AGE_PERIOD(BR_DEFAULT_AGEING_TIME / 2 / HZ), 2546 ANA_AUTOAGE); 2547 2548 /* Disable learning for frames discarded by VLAN ingress filtering */ 2549 regmap_field_write(ocelot->regfields[ANA_ADVLEARN_VLAN_CHK], 1); 2550 2551 /* Setup frame ageing - fixed value "2 sec" - in 6.5 us units */ 2552 ocelot_write(ocelot, SYS_FRM_AGING_AGE_TX_ENA | 2553 SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING); 2554 2555 /* Setup flooding PGIDs */ 2556 for (i = 0; i < ocelot->num_flooding_pgids; i++) 2557 ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) | 2558 ANA_FLOODING_FLD_BROADCAST(PGID_BC) | 2559 ANA_FLOODING_FLD_UNICAST(PGID_UC), 2560 ANA_FLOODING, i); 2561 ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) | 2562 ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) | 2563 ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) | 2564 ANA_FLOODING_IPMC_FLD_MC4_CTRL(PGID_MC), 2565 ANA_FLOODING_IPMC); 2566 2567 for (port = 0; port < ocelot->num_phys_ports; port++) { 2568 /* Transmit the frame to the local port. */ 2569 ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port); 2570 /* Do not forward BPDU frames to the front ports. */ 2571 ocelot_write_gix(ocelot, 2572 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), 2573 ANA_PORT_CPU_FWD_BPDU_CFG, 2574 port); 2575 /* Ensure bridging is disabled */ 2576 ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port); 2577 } 2578 2579 for_each_nonreserved_multicast_dest_pgid(ocelot, i) { 2580 u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0)); 2581 2582 ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); 2583 } 2584 2585 ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_BLACKHOLE); 2586 2587 /* Allow broadcast and unknown L2 multicast to the CPU. */ 2588 ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), 2589 ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), 2590 ANA_PGID_PGID, PGID_MC); 2591 ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), 2592 ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), 2593 ANA_PGID_PGID, PGID_BC); 2594 ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4); 2595 ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6); 2596 2597 /* Allow manual injection via DEVCPU_QS registers, and byte swap these 2598 * registers endianness. 2599 */ 2600 ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_BYTE_SWAP | 2601 QS_INJ_GRP_CFG_MODE(1), QS_INJ_GRP_CFG, 0); 2602 ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_BYTE_SWAP | 2603 QS_XTR_GRP_CFG_MODE(1), QS_XTR_GRP_CFG, 0); 2604 ocelot_write(ocelot, ANA_CPUQ_CFG_CPUQ_MIRROR(2) | 2605 ANA_CPUQ_CFG_CPUQ_LRN(2) | 2606 ANA_CPUQ_CFG_CPUQ_MAC_COPY(2) | 2607 ANA_CPUQ_CFG_CPUQ_SRC_COPY(2) | 2608 ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(2) | 2609 ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(6) | 2610 ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(6) | 2611 ANA_CPUQ_CFG_CPUQ_IGMP(6) | 2612 ANA_CPUQ_CFG_CPUQ_MLD(6), ANA_CPUQ_CFG); 2613 for (i = 0; i < 16; i++) 2614 ocelot_write_rix(ocelot, ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(6) | 2615 ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6), 2616 ANA_CPUQ_8021_CFG, i); 2617 2618 INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work); 2619 queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, 2620 OCELOT_STATS_CHECK_DELAY); 2621 2622 return 0; 2623 } 2624 EXPORT_SYMBOL(ocelot_init); 2625 2626 void ocelot_deinit(struct ocelot *ocelot) 2627 { 2628 cancel_delayed_work(&ocelot->stats_work); 2629 destroy_workqueue(ocelot->stats_queue); 2630 destroy_workqueue(ocelot->owq); 2631 mutex_destroy(&ocelot->stats_lock); 2632 } 2633 EXPORT_SYMBOL(ocelot_deinit); 2634 2635 void ocelot_deinit_port(struct ocelot *ocelot, int port) 2636 { 2637 struct ocelot_port *ocelot_port = ocelot->ports[port]; 2638 2639 skb_queue_purge(&ocelot_port->tx_skbs); 2640 } 2641 EXPORT_SYMBOL(ocelot_deinit_port); 2642 2643 MODULE_LICENSE("Dual MIT/GPL"); 2644