1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2022 Schneider-Electric 4 * 5 * Clément Léger <clement.leger@bootlin.com> 6 */ 7 8 #include <linux/clk.h> 9 #include <linux/etherdevice.h> 10 #include <linux/if_bridge.h> 11 #include <linux/if_ether.h> 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/of_mdio.h> 16 #include <net/dsa.h> 17 18 #include "rzn1_a5psw.h" 19 20 struct a5psw_stats { 21 u16 offset; 22 const char name[ETH_GSTRING_LEN]; 23 }; 24 25 #define STAT_DESC(_offset) { \ 26 .offset = A5PSW_##_offset, \ 27 .name = __stringify(_offset), \ 28 } 29 30 static const struct a5psw_stats a5psw_stats[] = { 31 STAT_DESC(aFramesTransmittedOK), 32 STAT_DESC(aFramesReceivedOK), 33 STAT_DESC(aFrameCheckSequenceErrors), 34 STAT_DESC(aAlignmentErrors), 35 STAT_DESC(aOctetsTransmittedOK), 36 STAT_DESC(aOctetsReceivedOK), 37 STAT_DESC(aTxPAUSEMACCtrlFrames), 38 STAT_DESC(aRxPAUSEMACCtrlFrames), 39 STAT_DESC(ifInErrors), 40 STAT_DESC(ifOutErrors), 41 STAT_DESC(ifInUcastPkts), 42 STAT_DESC(ifInMulticastPkts), 43 STAT_DESC(ifInBroadcastPkts), 44 STAT_DESC(ifOutDiscards), 45 STAT_DESC(ifOutUcastPkts), 46 STAT_DESC(ifOutMulticastPkts), 47 STAT_DESC(ifOutBroadcastPkts), 48 STAT_DESC(etherStatsDropEvents), 49 STAT_DESC(etherStatsOctets), 50 STAT_DESC(etherStatsPkts), 51 STAT_DESC(etherStatsUndersizePkts), 52 STAT_DESC(etherStatsOversizePkts), 53 STAT_DESC(etherStatsPkts64Octets), 54 STAT_DESC(etherStatsPkts65to127Octets), 55 STAT_DESC(etherStatsPkts128to255Octets), 56 STAT_DESC(etherStatsPkts256to511Octets), 57 STAT_DESC(etherStatsPkts1024to1518Octets), 58 STAT_DESC(etherStatsPkts1519toXOctets), 59 STAT_DESC(etherStatsJabbers), 60 STAT_DESC(etherStatsFragments), 61 STAT_DESC(VLANReceived), 62 STAT_DESC(VLANTransmitted), 63 STAT_DESC(aDeferred), 64 STAT_DESC(aMultipleCollisions), 65 STAT_DESC(aSingleCollisions), 66 STAT_DESC(aLateCollisions), 67 STAT_DESC(aExcessiveCollisions), 68 STAT_DESC(aCarrierSenseErrors), 69 }; 70 71 static void a5psw_reg_writel(struct a5psw *a5psw, int offset, u32 value) 72 { 73 writel(value, a5psw->base + offset); 74 } 75 76 static u32 a5psw_reg_readl(struct a5psw *a5psw, int offset) 77 { 78 return readl(a5psw->base + offset); 79 } 80 81 static void a5psw_reg_rmw(struct a5psw *a5psw, int offset, u32 mask, u32 val) 82 { 83 u32 reg; 84 85 spin_lock(&a5psw->reg_lock); 86 87 reg = a5psw_reg_readl(a5psw, offset); 88 reg &= ~mask; 89 reg |= val; 90 a5psw_reg_writel(a5psw, offset, reg); 91 92 spin_unlock(&a5psw->reg_lock); 93 } 94 95 static enum dsa_tag_protocol a5psw_get_tag_protocol(struct dsa_switch *ds, 96 int port, 97 enum dsa_tag_protocol mp) 98 { 99 return DSA_TAG_PROTO_RZN1_A5PSW; 100 } 101 102 static void a5psw_port_pattern_set(struct a5psw *a5psw, int port, int pattern, 103 bool enable) 104 { 105 u32 rx_match = 0; 106 107 if (enable) 108 rx_match |= A5PSW_RXMATCH_CONFIG_PATTERN(pattern); 109 110 a5psw_reg_rmw(a5psw, A5PSW_RXMATCH_CONFIG(port), 111 A5PSW_RXMATCH_CONFIG_PATTERN(pattern), rx_match); 112 } 113 114 static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable) 115 { 116 /* Enable "management forward" pattern matching, this will forward 117 * packets from this port only towards the management port and thus 118 * isolate the port. 119 */ 120 a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable); 121 } 122 123 static void a5psw_port_tx_enable(struct a5psw *a5psw, int port, bool enable) 124 { 125 u32 mask = A5PSW_PORT_ENA_TX(port); 126 u32 reg = enable ? mask : 0; 127 128 /* Even though the port TX is disabled through TXENA bit in the 129 * PORT_ENA register, it can still send BPDUs. This depends on the tag 130 * configuration added when sending packets from the CPU port to the 131 * switch port. Indeed, when using forced forwarding without filtering, 132 * even disabled ports will be able to send packets that are tagged. 133 * This allows to implement STP support when ports are in a state where 134 * forwarding traffic should be stopped but BPDUs should still be sent. 135 */ 136 a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, mask, reg); 137 } 138 139 static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable) 140 { 141 u32 port_ena = 0; 142 143 if (enable) 144 port_ena |= A5PSW_PORT_ENA_TX_RX(port); 145 146 a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, A5PSW_PORT_ENA_TX_RX(port), 147 port_ena); 148 } 149 150 static int a5psw_lk_execute_ctrl(struct a5psw *a5psw, u32 *ctrl) 151 { 152 int ret; 153 154 a5psw_reg_writel(a5psw, A5PSW_LK_ADDR_CTRL, *ctrl); 155 156 ret = readl_poll_timeout(a5psw->base + A5PSW_LK_ADDR_CTRL, *ctrl, 157 !(*ctrl & A5PSW_LK_ADDR_CTRL_BUSY), 158 A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT); 159 if (ret) 160 dev_err(a5psw->dev, "LK_CTRL timeout waiting for BUSY bit\n"); 161 162 return ret; 163 } 164 165 static void a5psw_port_fdb_flush(struct a5psw *a5psw, int port) 166 { 167 u32 ctrl = A5PSW_LK_ADDR_CTRL_DELETE_PORT | BIT(port); 168 169 mutex_lock(&a5psw->lk_lock); 170 a5psw_lk_execute_ctrl(a5psw, &ctrl); 171 mutex_unlock(&a5psw->lk_lock); 172 } 173 174 static void a5psw_port_authorize_set(struct a5psw *a5psw, int port, 175 bool authorize) 176 { 177 u32 reg = a5psw_reg_readl(a5psw, A5PSW_AUTH_PORT(port)); 178 179 if (authorize) 180 reg |= A5PSW_AUTH_PORT_AUTHORIZED; 181 else 182 reg &= ~A5PSW_AUTH_PORT_AUTHORIZED; 183 184 a5psw_reg_writel(a5psw, A5PSW_AUTH_PORT(port), reg); 185 } 186 187 static void a5psw_port_disable(struct dsa_switch *ds, int port) 188 { 189 struct a5psw *a5psw = ds->priv; 190 191 a5psw_port_authorize_set(a5psw, port, false); 192 a5psw_port_enable_set(a5psw, port, false); 193 } 194 195 static int a5psw_port_enable(struct dsa_switch *ds, int port, 196 struct phy_device *phy) 197 { 198 struct a5psw *a5psw = ds->priv; 199 200 a5psw_port_authorize_set(a5psw, port, true); 201 a5psw_port_enable_set(a5psw, port, true); 202 203 return 0; 204 } 205 206 static int a5psw_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 207 { 208 struct a5psw *a5psw = ds->priv; 209 210 new_mtu += ETH_HLEN + A5PSW_EXTRA_MTU_LEN + ETH_FCS_LEN; 211 a5psw_reg_writel(a5psw, A5PSW_FRM_LENGTH(port), new_mtu); 212 213 return 0; 214 } 215 216 static int a5psw_port_max_mtu(struct dsa_switch *ds, int port) 217 { 218 return A5PSW_MAX_MTU; 219 } 220 221 static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port, 222 struct phylink_config *config) 223 { 224 unsigned long *intf = config->supported_interfaces; 225 226 config->mac_capabilities = MAC_1000FD; 227 228 if (dsa_is_cpu_port(ds, port)) { 229 /* GMII is used internally and GMAC2 is connected to the switch 230 * using 1000Mbps Full-Duplex mode only (cf ethernet manual) 231 */ 232 __set_bit(PHY_INTERFACE_MODE_GMII, intf); 233 } else { 234 config->mac_capabilities |= MAC_100 | MAC_10; 235 phy_interface_set_rgmii(intf); 236 __set_bit(PHY_INTERFACE_MODE_RMII, intf); 237 __set_bit(PHY_INTERFACE_MODE_MII, intf); 238 } 239 } 240 241 static struct phylink_pcs * 242 a5psw_phylink_mac_select_pcs(struct dsa_switch *ds, int port, 243 phy_interface_t interface) 244 { 245 struct dsa_port *dp = dsa_to_port(ds, port); 246 struct a5psw *a5psw = ds->priv; 247 248 if (!dsa_port_is_cpu(dp) && a5psw->pcs[port]) 249 return a5psw->pcs[port]; 250 251 return NULL; 252 } 253 254 static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port, 255 unsigned int mode, 256 phy_interface_t interface) 257 { 258 struct a5psw *a5psw = ds->priv; 259 u32 cmd_cfg; 260 261 cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port)); 262 cmd_cfg &= ~(A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA); 263 a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg); 264 } 265 266 static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port, 267 unsigned int mode, 268 phy_interface_t interface, 269 struct phy_device *phydev, int speed, 270 int duplex, bool tx_pause, bool rx_pause) 271 { 272 u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA | 273 A5PSW_CMD_CFG_TX_CRC_APPEND; 274 struct a5psw *a5psw = ds->priv; 275 276 if (speed == SPEED_1000) 277 cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED; 278 279 if (duplex == DUPLEX_HALF) 280 cmd_cfg |= A5PSW_CMD_CFG_HD_ENA; 281 282 cmd_cfg |= A5PSW_CMD_CFG_CNTL_FRM_ENA; 283 284 if (!rx_pause) 285 cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE; 286 287 a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg); 288 } 289 290 static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) 291 { 292 struct a5psw *a5psw = ds->priv; 293 unsigned long rate; 294 u64 max, tmp; 295 u32 agetime; 296 297 rate = clk_get_rate(a5psw->clk); 298 max = div64_ul(((u64)A5PSW_LK_AGETIME_MASK * A5PSW_TABLE_ENTRIES * 1024), 299 rate) * 1000; 300 if (msecs > max) 301 return -EINVAL; 302 303 tmp = div_u64(rate, MSEC_PER_SEC); 304 agetime = div_u64(msecs * tmp, 1024 * A5PSW_TABLE_ENTRIES); 305 306 a5psw_reg_writel(a5psw, A5PSW_LK_AGETIME, agetime); 307 308 return 0; 309 } 310 311 static void a5psw_port_learning_set(struct a5psw *a5psw, int port, bool learn) 312 { 313 u32 mask = A5PSW_INPUT_LEARN_DIS(port); 314 u32 reg = !learn ? mask : 0; 315 316 a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg); 317 } 318 319 static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block) 320 { 321 u32 mask = A5PSW_INPUT_LEARN_BLOCK(port); 322 u32 reg = block ? mask : 0; 323 324 a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg); 325 } 326 327 static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port, 328 bool set) 329 { 330 u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK, 331 A5PSW_MCAST_DEF_MASK}; 332 int i; 333 334 for (i = 0; i < ARRAY_SIZE(offsets); i++) 335 a5psw_reg_rmw(a5psw, offsets[i], BIT(port), 336 set ? BIT(port) : 0); 337 } 338 339 static void a5psw_port_set_standalone(struct a5psw *a5psw, int port, 340 bool standalone) 341 { 342 a5psw_port_learning_set(a5psw, port, !standalone); 343 a5psw_flooding_set_resolution(a5psw, port, !standalone); 344 a5psw_port_mgmtfwd_set(a5psw, port, standalone); 345 } 346 347 static int a5psw_port_bridge_join(struct dsa_switch *ds, int port, 348 struct dsa_bridge bridge, 349 bool *tx_fwd_offload, 350 struct netlink_ext_ack *extack) 351 { 352 struct a5psw *a5psw = ds->priv; 353 354 /* We only support 1 bridge device */ 355 if (a5psw->br_dev && bridge.dev != a5psw->br_dev) { 356 NL_SET_ERR_MSG_MOD(extack, 357 "Forwarding offload supported for a single bridge"); 358 return -EOPNOTSUPP; 359 } 360 361 a5psw->br_dev = bridge.dev; 362 a5psw_port_set_standalone(a5psw, port, false); 363 364 a5psw->bridged_ports |= BIT(port); 365 366 return 0; 367 } 368 369 static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port, 370 struct dsa_bridge bridge) 371 { 372 struct a5psw *a5psw = ds->priv; 373 374 a5psw->bridged_ports &= ~BIT(port); 375 376 a5psw_port_set_standalone(a5psw, port, true); 377 378 /* No more ports bridged */ 379 if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT)) 380 a5psw->br_dev = NULL; 381 } 382 383 static int a5psw_port_pre_bridge_flags(struct dsa_switch *ds, int port, 384 struct switchdev_brport_flags flags, 385 struct netlink_ext_ack *extack) 386 { 387 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | 388 BR_BCAST_FLOOD)) 389 return -EINVAL; 390 391 return 0; 392 } 393 394 static int 395 a5psw_port_bridge_flags(struct dsa_switch *ds, int port, 396 struct switchdev_brport_flags flags, 397 struct netlink_ext_ack *extack) 398 { 399 struct a5psw *a5psw = ds->priv; 400 u32 val; 401 402 /* If a port is set as standalone, we do not want to be able to 403 * configure flooding nor learning which would result in joining the 404 * unique bridge. This can happen when a port leaves the bridge, in 405 * which case the DSA core will try to "clear" all flags for the 406 * standalone port (ie enable flooding, disable learning). In that case 407 * do not fail but do not apply the flags. 408 */ 409 if (!(a5psw->bridged_ports & BIT(port))) 410 return 0; 411 412 if (flags.mask & BR_LEARNING) { 413 val = flags.val & BR_LEARNING ? 0 : A5PSW_INPUT_LEARN_DIS(port); 414 a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, 415 A5PSW_INPUT_LEARN_DIS(port), val); 416 } 417 418 if (flags.mask & BR_FLOOD) { 419 val = flags.val & BR_FLOOD ? BIT(port) : 0; 420 a5psw_reg_rmw(a5psw, A5PSW_UCAST_DEF_MASK, BIT(port), val); 421 } 422 423 if (flags.mask & BR_MCAST_FLOOD) { 424 val = flags.val & BR_MCAST_FLOOD ? BIT(port) : 0; 425 a5psw_reg_rmw(a5psw, A5PSW_MCAST_DEF_MASK, BIT(port), val); 426 } 427 428 if (flags.mask & BR_BCAST_FLOOD) { 429 val = flags.val & BR_BCAST_FLOOD ? BIT(port) : 0; 430 a5psw_reg_rmw(a5psw, A5PSW_BCAST_DEF_MASK, BIT(port), val); 431 } 432 433 return 0; 434 } 435 436 static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) 437 { 438 bool learning_enabled, rx_enabled, tx_enabled; 439 struct dsa_port *dp = dsa_to_port(ds, port); 440 struct a5psw *a5psw = ds->priv; 441 442 switch (state) { 443 case BR_STATE_DISABLED: 444 case BR_STATE_BLOCKING: 445 case BR_STATE_LISTENING: 446 rx_enabled = false; 447 tx_enabled = false; 448 learning_enabled = false; 449 break; 450 case BR_STATE_LEARNING: 451 rx_enabled = false; 452 tx_enabled = false; 453 learning_enabled = dp->learning; 454 break; 455 case BR_STATE_FORWARDING: 456 rx_enabled = true; 457 tx_enabled = true; 458 learning_enabled = dp->learning; 459 break; 460 default: 461 dev_err(ds->dev, "invalid STP state: %d\n", state); 462 return; 463 } 464 465 a5psw_port_learning_set(a5psw, port, learning_enabled); 466 a5psw_port_rx_block_set(a5psw, port, !rx_enabled); 467 a5psw_port_tx_enable(a5psw, port, tx_enabled); 468 } 469 470 static void a5psw_port_fast_age(struct dsa_switch *ds, int port) 471 { 472 struct a5psw *a5psw = ds->priv; 473 474 a5psw_port_fdb_flush(a5psw, port); 475 } 476 477 static int a5psw_lk_execute_lookup(struct a5psw *a5psw, union lk_data *lk_data, 478 u16 *entry) 479 { 480 u32 ctrl; 481 int ret; 482 483 a5psw_reg_writel(a5psw, A5PSW_LK_DATA_LO, lk_data->lo); 484 a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data->hi); 485 486 ctrl = A5PSW_LK_ADDR_CTRL_LOOKUP; 487 ret = a5psw_lk_execute_ctrl(a5psw, &ctrl); 488 if (ret) 489 return ret; 490 491 *entry = ctrl & A5PSW_LK_ADDR_CTRL_ADDRESS; 492 493 return 0; 494 } 495 496 static int a5psw_port_fdb_add(struct dsa_switch *ds, int port, 497 const unsigned char *addr, u16 vid, 498 struct dsa_db db) 499 { 500 struct a5psw *a5psw = ds->priv; 501 union lk_data lk_data = {0}; 502 bool inc_learncount = false; 503 int ret = 0; 504 u16 entry; 505 u32 reg; 506 507 ether_addr_copy(lk_data.entry.mac, addr); 508 lk_data.entry.port_mask = BIT(port); 509 510 mutex_lock(&a5psw->lk_lock); 511 512 /* Set the value to be written in the lookup table */ 513 ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry); 514 if (ret) 515 goto lk_unlock; 516 517 lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI); 518 if (!lk_data.entry.valid) { 519 inc_learncount = true; 520 /* port_mask set to 0x1f when entry is not valid, clear it */ 521 lk_data.entry.port_mask = 0; 522 lk_data.entry.prio = 0; 523 } 524 525 lk_data.entry.port_mask |= BIT(port); 526 lk_data.entry.is_static = 1; 527 lk_data.entry.valid = 1; 528 529 a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi); 530 531 reg = A5PSW_LK_ADDR_CTRL_WRITE | entry; 532 ret = a5psw_lk_execute_ctrl(a5psw, ®); 533 if (ret) 534 goto lk_unlock; 535 536 if (inc_learncount) { 537 reg = A5PSW_LK_LEARNCOUNT_MODE_INC; 538 a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg); 539 } 540 541 lk_unlock: 542 mutex_unlock(&a5psw->lk_lock); 543 544 return ret; 545 } 546 547 static int a5psw_port_fdb_del(struct dsa_switch *ds, int port, 548 const unsigned char *addr, u16 vid, 549 struct dsa_db db) 550 { 551 struct a5psw *a5psw = ds->priv; 552 union lk_data lk_data = {0}; 553 bool clear = false; 554 u16 entry; 555 u32 reg; 556 int ret; 557 558 ether_addr_copy(lk_data.entry.mac, addr); 559 560 mutex_lock(&a5psw->lk_lock); 561 562 ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry); 563 if (ret) 564 goto lk_unlock; 565 566 lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI); 567 568 /* Our hardware does not associate any VID to the FDB entries so this 569 * means that if two entries were added for the same mac but for 570 * different VID, then, on the deletion of the first one, we would also 571 * delete the second one. Since there is unfortunately nothing we can do 572 * about that, do not return an error... 573 */ 574 if (!lk_data.entry.valid) 575 goto lk_unlock; 576 577 lk_data.entry.port_mask &= ~BIT(port); 578 /* If there is no more port in the mask, clear the entry */ 579 if (lk_data.entry.port_mask == 0) 580 clear = true; 581 582 a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi); 583 584 reg = entry; 585 if (clear) 586 reg |= A5PSW_LK_ADDR_CTRL_CLEAR; 587 else 588 reg |= A5PSW_LK_ADDR_CTRL_WRITE; 589 590 ret = a5psw_lk_execute_ctrl(a5psw, ®); 591 if (ret) 592 goto lk_unlock; 593 594 /* Decrement LEARNCOUNT */ 595 if (clear) { 596 reg = A5PSW_LK_LEARNCOUNT_MODE_DEC; 597 a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg); 598 } 599 600 lk_unlock: 601 mutex_unlock(&a5psw->lk_lock); 602 603 return ret; 604 } 605 606 static int a5psw_port_fdb_dump(struct dsa_switch *ds, int port, 607 dsa_fdb_dump_cb_t *cb, void *data) 608 { 609 struct a5psw *a5psw = ds->priv; 610 union lk_data lk_data; 611 int i = 0, ret = 0; 612 u32 reg; 613 614 mutex_lock(&a5psw->lk_lock); 615 616 for (i = 0; i < A5PSW_TABLE_ENTRIES; i++) { 617 reg = A5PSW_LK_ADDR_CTRL_READ | A5PSW_LK_ADDR_CTRL_WAIT | i; 618 619 ret = a5psw_lk_execute_ctrl(a5psw, ®); 620 if (ret) 621 goto out_unlock; 622 623 lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI); 624 /* If entry is not valid or does not contain the port, skip */ 625 if (!lk_data.entry.valid || 626 !(lk_data.entry.port_mask & BIT(port))) 627 continue; 628 629 lk_data.lo = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_LO); 630 631 ret = cb(lk_data.entry.mac, 0, lk_data.entry.is_static, data); 632 if (ret) 633 goto out_unlock; 634 } 635 636 out_unlock: 637 mutex_unlock(&a5psw->lk_lock); 638 639 return ret; 640 } 641 642 static int a5psw_port_vlan_filtering(struct dsa_switch *ds, int port, 643 bool vlan_filtering, 644 struct netlink_ext_ack *extack) 645 { 646 u32 mask = BIT(port + A5PSW_VLAN_VERI_SHIFT) | 647 BIT(port + A5PSW_VLAN_DISC_SHIFT); 648 u32 val = vlan_filtering ? mask : 0; 649 struct a5psw *a5psw = ds->priv; 650 651 /* Disable/enable vlan tagging */ 652 a5psw_reg_rmw(a5psw, A5PSW_VLAN_IN_MODE_ENA, BIT(port), 653 vlan_filtering ? BIT(port) : 0); 654 655 /* Disable/enable vlan input filtering */ 656 a5psw_reg_rmw(a5psw, A5PSW_VLAN_VERIFY, mask, val); 657 658 return 0; 659 } 660 661 static int a5psw_find_vlan_entry(struct a5psw *a5psw, u16 vid) 662 { 663 u32 vlan_res; 664 int i; 665 666 /* Find vlan for this port */ 667 for (i = 0; i < A5PSW_VLAN_COUNT; i++) { 668 vlan_res = a5psw_reg_readl(a5psw, A5PSW_VLAN_RES(i)); 669 if (FIELD_GET(A5PSW_VLAN_RES_VLANID, vlan_res) == vid) 670 return i; 671 } 672 673 return -1; 674 } 675 676 static int a5psw_new_vlan_res_entry(struct a5psw *a5psw, u16 newvid) 677 { 678 u32 vlan_res; 679 int i; 680 681 /* Find a free VLAN entry */ 682 for (i = 0; i < A5PSW_VLAN_COUNT; i++) { 683 vlan_res = a5psw_reg_readl(a5psw, A5PSW_VLAN_RES(i)); 684 if (!(FIELD_GET(A5PSW_VLAN_RES_PORTMASK, vlan_res))) { 685 vlan_res = FIELD_PREP(A5PSW_VLAN_RES_VLANID, newvid); 686 a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(i), vlan_res); 687 return i; 688 } 689 } 690 691 return -1; 692 } 693 694 static void a5psw_port_vlan_tagged_cfg(struct a5psw *a5psw, 695 unsigned int vlan_res_id, int port, 696 bool set) 697 { 698 u32 mask = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_RD_TAGMASK | 699 BIT(port); 700 u32 vlan_res_off = A5PSW_VLAN_RES(vlan_res_id); 701 u32 val = A5PSW_VLAN_RES_WR_TAGMASK, reg; 702 703 if (set) 704 val |= BIT(port); 705 706 /* Toggle tag mask read */ 707 a5psw_reg_writel(a5psw, vlan_res_off, A5PSW_VLAN_RES_RD_TAGMASK); 708 reg = a5psw_reg_readl(a5psw, vlan_res_off); 709 a5psw_reg_writel(a5psw, vlan_res_off, A5PSW_VLAN_RES_RD_TAGMASK); 710 711 reg &= ~mask; 712 reg |= val; 713 a5psw_reg_writel(a5psw, vlan_res_off, reg); 714 } 715 716 static void a5psw_port_vlan_cfg(struct a5psw *a5psw, unsigned int vlan_res_id, 717 int port, bool set) 718 { 719 u32 mask = A5PSW_VLAN_RES_WR_TAGMASK | BIT(port); 720 u32 reg = A5PSW_VLAN_RES_WR_PORTMASK; 721 722 if (set) 723 reg |= BIT(port); 724 725 a5psw_reg_rmw(a5psw, A5PSW_VLAN_RES(vlan_res_id), mask, reg); 726 } 727 728 static int a5psw_port_vlan_add(struct dsa_switch *ds, int port, 729 const struct switchdev_obj_port_vlan *vlan, 730 struct netlink_ext_ack *extack) 731 { 732 bool tagged = !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); 733 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 734 struct a5psw *a5psw = ds->priv; 735 u16 vid = vlan->vid; 736 int vlan_res_id; 737 738 dev_dbg(a5psw->dev, "Add VLAN %d on port %d, %s, %s\n", 739 vid, port, tagged ? "tagged" : "untagged", 740 pvid ? "PVID" : "no PVID"); 741 742 vlan_res_id = a5psw_find_vlan_entry(a5psw, vid); 743 if (vlan_res_id < 0) { 744 vlan_res_id = a5psw_new_vlan_res_entry(a5psw, vid); 745 if (vlan_res_id < 0) 746 return -ENOSPC; 747 } 748 749 a5psw_port_vlan_cfg(a5psw, vlan_res_id, port, true); 750 if (tagged) 751 a5psw_port_vlan_tagged_cfg(a5psw, vlan_res_id, port, true); 752 753 /* Configure port to tag with corresponding VID, but do not enable it 754 * yet: wait for vlan filtering to be enabled to enable vlan port 755 * tagging 756 */ 757 if (pvid) 758 a5psw_reg_writel(a5psw, A5PSW_SYSTEM_TAGINFO(port), vid); 759 760 return 0; 761 } 762 763 static int a5psw_port_vlan_del(struct dsa_switch *ds, int port, 764 const struct switchdev_obj_port_vlan *vlan) 765 { 766 struct a5psw *a5psw = ds->priv; 767 u16 vid = vlan->vid; 768 int vlan_res_id; 769 770 dev_dbg(a5psw->dev, "Removing VLAN %d on port %d\n", vid, port); 771 772 vlan_res_id = a5psw_find_vlan_entry(a5psw, vid); 773 if (vlan_res_id < 0) 774 return -EINVAL; 775 776 a5psw_port_vlan_cfg(a5psw, vlan_res_id, port, false); 777 a5psw_port_vlan_tagged_cfg(a5psw, vlan_res_id, port, false); 778 779 return 0; 780 } 781 782 static u64 a5psw_read_stat(struct a5psw *a5psw, u32 offset, int port) 783 { 784 u32 reg_lo, reg_hi; 785 786 reg_lo = a5psw_reg_readl(a5psw, offset + A5PSW_PORT_OFFSET(port)); 787 /* A5PSW_STATS_HIWORD is latched on stat read */ 788 reg_hi = a5psw_reg_readl(a5psw, A5PSW_STATS_HIWORD); 789 790 return ((u64)reg_hi << 32) | reg_lo; 791 } 792 793 static void a5psw_get_strings(struct dsa_switch *ds, int port, u32 stringset, 794 uint8_t *data) 795 { 796 unsigned int u; 797 798 if (stringset != ETH_SS_STATS) 799 return; 800 801 for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++) { 802 memcpy(data + u * ETH_GSTRING_LEN, a5psw_stats[u].name, 803 ETH_GSTRING_LEN); 804 } 805 } 806 807 static void a5psw_get_ethtool_stats(struct dsa_switch *ds, int port, 808 uint64_t *data) 809 { 810 struct a5psw *a5psw = ds->priv; 811 unsigned int u; 812 813 for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++) 814 data[u] = a5psw_read_stat(a5psw, a5psw_stats[u].offset, port); 815 } 816 817 static int a5psw_get_sset_count(struct dsa_switch *ds, int port, int sset) 818 { 819 if (sset != ETH_SS_STATS) 820 return 0; 821 822 return ARRAY_SIZE(a5psw_stats); 823 } 824 825 static void a5psw_get_eth_mac_stats(struct dsa_switch *ds, int port, 826 struct ethtool_eth_mac_stats *mac_stats) 827 { 828 struct a5psw *a5psw = ds->priv; 829 830 #define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port) 831 mac_stats->FramesTransmittedOK = RD(aFramesTransmittedOK); 832 mac_stats->SingleCollisionFrames = RD(aSingleCollisions); 833 mac_stats->MultipleCollisionFrames = RD(aMultipleCollisions); 834 mac_stats->FramesReceivedOK = RD(aFramesReceivedOK); 835 mac_stats->FrameCheckSequenceErrors = RD(aFrameCheckSequenceErrors); 836 mac_stats->AlignmentErrors = RD(aAlignmentErrors); 837 mac_stats->OctetsTransmittedOK = RD(aOctetsTransmittedOK); 838 mac_stats->FramesWithDeferredXmissions = RD(aDeferred); 839 mac_stats->LateCollisions = RD(aLateCollisions); 840 mac_stats->FramesAbortedDueToXSColls = RD(aExcessiveCollisions); 841 mac_stats->FramesLostDueToIntMACXmitError = RD(ifOutErrors); 842 mac_stats->CarrierSenseErrors = RD(aCarrierSenseErrors); 843 mac_stats->OctetsReceivedOK = RD(aOctetsReceivedOK); 844 mac_stats->FramesLostDueToIntMACRcvError = RD(ifInErrors); 845 mac_stats->MulticastFramesXmittedOK = RD(ifOutMulticastPkts); 846 mac_stats->BroadcastFramesXmittedOK = RD(ifOutBroadcastPkts); 847 mac_stats->FramesWithExcessiveDeferral = RD(aDeferred); 848 mac_stats->MulticastFramesReceivedOK = RD(ifInMulticastPkts); 849 mac_stats->BroadcastFramesReceivedOK = RD(ifInBroadcastPkts); 850 #undef RD 851 } 852 853 static const struct ethtool_rmon_hist_range a5psw_rmon_ranges[] = { 854 { 0, 64 }, 855 { 65, 127 }, 856 { 128, 255 }, 857 { 256, 511 }, 858 { 512, 1023 }, 859 { 1024, 1518 }, 860 { 1519, A5PSW_MAX_MTU }, 861 {} 862 }; 863 864 static void a5psw_get_rmon_stats(struct dsa_switch *ds, int port, 865 struct ethtool_rmon_stats *rmon_stats, 866 const struct ethtool_rmon_hist_range **ranges) 867 { 868 struct a5psw *a5psw = ds->priv; 869 870 #define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port) 871 rmon_stats->undersize_pkts = RD(etherStatsUndersizePkts); 872 rmon_stats->oversize_pkts = RD(etherStatsOversizePkts); 873 rmon_stats->fragments = RD(etherStatsFragments); 874 rmon_stats->jabbers = RD(etherStatsJabbers); 875 rmon_stats->hist[0] = RD(etherStatsPkts64Octets); 876 rmon_stats->hist[1] = RD(etherStatsPkts65to127Octets); 877 rmon_stats->hist[2] = RD(etherStatsPkts128to255Octets); 878 rmon_stats->hist[3] = RD(etherStatsPkts256to511Octets); 879 rmon_stats->hist[4] = RD(etherStatsPkts512to1023Octets); 880 rmon_stats->hist[5] = RD(etherStatsPkts1024to1518Octets); 881 rmon_stats->hist[6] = RD(etherStatsPkts1519toXOctets); 882 #undef RD 883 884 *ranges = a5psw_rmon_ranges; 885 } 886 887 static void a5psw_get_eth_ctrl_stats(struct dsa_switch *ds, int port, 888 struct ethtool_eth_ctrl_stats *ctrl_stats) 889 { 890 struct a5psw *a5psw = ds->priv; 891 u64 stat; 892 893 stat = a5psw_read_stat(a5psw, A5PSW_aTxPAUSEMACCtrlFrames, port); 894 ctrl_stats->MACControlFramesTransmitted = stat; 895 stat = a5psw_read_stat(a5psw, A5PSW_aRxPAUSEMACCtrlFrames, port); 896 ctrl_stats->MACControlFramesReceived = stat; 897 } 898 899 static void a5psw_vlan_setup(struct a5psw *a5psw, int port) 900 { 901 u32 reg; 902 903 /* Enable TAG always mode for the port, this is actually controlled 904 * by VLAN_IN_MODE_ENA field which will be used for PVID insertion 905 */ 906 reg = A5PSW_VLAN_IN_MODE_TAG_ALWAYS; 907 reg <<= A5PSW_VLAN_IN_MODE_PORT_SHIFT(port); 908 a5psw_reg_rmw(a5psw, A5PSW_VLAN_IN_MODE, A5PSW_VLAN_IN_MODE_PORT(port), 909 reg); 910 911 /* Set transparent mode for output frame manipulation, this will depend 912 * on the VLAN_RES configuration mode 913 */ 914 reg = A5PSW_VLAN_OUT_MODE_TRANSPARENT; 915 reg <<= A5PSW_VLAN_OUT_MODE_PORT_SHIFT(port); 916 a5psw_reg_rmw(a5psw, A5PSW_VLAN_OUT_MODE, 917 A5PSW_VLAN_OUT_MODE_PORT(port), reg); 918 } 919 920 static int a5psw_setup(struct dsa_switch *ds) 921 { 922 struct a5psw *a5psw = ds->priv; 923 int port, vlan, ret; 924 struct dsa_port *dp; 925 u32 reg; 926 927 /* Validate that there is only 1 CPU port with index A5PSW_CPU_PORT */ 928 dsa_switch_for_each_cpu_port(dp, ds) { 929 if (dp->index != A5PSW_CPU_PORT) { 930 dev_err(a5psw->dev, "Invalid CPU port\n"); 931 return -EINVAL; 932 } 933 } 934 935 /* Configure management port */ 936 reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_ENABLE; 937 a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg); 938 939 /* Set pattern 0 to forward all frame to mgmt port */ 940 a5psw_reg_writel(a5psw, A5PSW_PATTERN_CTRL(A5PSW_PATTERN_MGMTFWD), 941 A5PSW_PATTERN_CTRL_MGMTFWD); 942 943 /* Enable port tagging */ 944 reg = FIELD_PREP(A5PSW_MGMT_TAG_CFG_TAGFIELD, ETH_P_DSA_A5PSW); 945 reg |= A5PSW_MGMT_TAG_CFG_ENABLE | A5PSW_MGMT_TAG_CFG_ALL_FRAMES; 946 a5psw_reg_writel(a5psw, A5PSW_MGMT_TAG_CFG, reg); 947 948 /* Enable normal switch operation */ 949 reg = A5PSW_LK_ADDR_CTRL_BLOCKING | A5PSW_LK_ADDR_CTRL_LEARNING | 950 A5PSW_LK_ADDR_CTRL_AGEING | A5PSW_LK_ADDR_CTRL_ALLOW_MIGR | 951 A5PSW_LK_ADDR_CTRL_CLEAR_TABLE; 952 a5psw_reg_writel(a5psw, A5PSW_LK_CTRL, reg); 953 954 ret = readl_poll_timeout(a5psw->base + A5PSW_LK_CTRL, reg, 955 !(reg & A5PSW_LK_ADDR_CTRL_CLEAR_TABLE), 956 A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT); 957 if (ret) { 958 dev_err(a5psw->dev, "Failed to clear lookup table\n"); 959 return ret; 960 } 961 962 /* Reset learn count to 0 */ 963 reg = A5PSW_LK_LEARNCOUNT_MODE_SET; 964 a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg); 965 966 /* Clear VLAN resource table */ 967 reg = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_WR_TAGMASK; 968 for (vlan = 0; vlan < A5PSW_VLAN_COUNT; vlan++) 969 a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(vlan), reg); 970 971 /* Reset all ports */ 972 dsa_switch_for_each_port(dp, ds) { 973 port = dp->index; 974 975 /* Reset the port */ 976 a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), 977 A5PSW_CMD_CFG_SW_RESET); 978 979 /* Enable only CPU port */ 980 a5psw_port_enable_set(a5psw, port, dsa_port_is_cpu(dp)); 981 982 if (dsa_port_is_unused(dp)) 983 continue; 984 985 /* Enable egress flooding and learning for CPU port */ 986 if (dsa_port_is_cpu(dp)) { 987 a5psw_flooding_set_resolution(a5psw, port, true); 988 a5psw_port_learning_set(a5psw, port, true); 989 } 990 991 /* Enable standalone mode for user ports */ 992 if (dsa_port_is_user(dp)) 993 a5psw_port_set_standalone(a5psw, port, true); 994 995 a5psw_vlan_setup(a5psw, port); 996 } 997 998 return 0; 999 } 1000 1001 static const struct dsa_switch_ops a5psw_switch_ops = { 1002 .get_tag_protocol = a5psw_get_tag_protocol, 1003 .setup = a5psw_setup, 1004 .port_disable = a5psw_port_disable, 1005 .port_enable = a5psw_port_enable, 1006 .phylink_get_caps = a5psw_phylink_get_caps, 1007 .phylink_mac_select_pcs = a5psw_phylink_mac_select_pcs, 1008 .phylink_mac_link_down = a5psw_phylink_mac_link_down, 1009 .phylink_mac_link_up = a5psw_phylink_mac_link_up, 1010 .port_change_mtu = a5psw_port_change_mtu, 1011 .port_max_mtu = a5psw_port_max_mtu, 1012 .get_sset_count = a5psw_get_sset_count, 1013 .get_strings = a5psw_get_strings, 1014 .get_ethtool_stats = a5psw_get_ethtool_stats, 1015 .get_eth_mac_stats = a5psw_get_eth_mac_stats, 1016 .get_eth_ctrl_stats = a5psw_get_eth_ctrl_stats, 1017 .get_rmon_stats = a5psw_get_rmon_stats, 1018 .set_ageing_time = a5psw_set_ageing_time, 1019 .port_bridge_join = a5psw_port_bridge_join, 1020 .port_bridge_leave = a5psw_port_bridge_leave, 1021 .port_pre_bridge_flags = a5psw_port_pre_bridge_flags, 1022 .port_bridge_flags = a5psw_port_bridge_flags, 1023 .port_stp_state_set = a5psw_port_stp_state_set, 1024 .port_fast_age = a5psw_port_fast_age, 1025 .port_vlan_filtering = a5psw_port_vlan_filtering, 1026 .port_vlan_add = a5psw_port_vlan_add, 1027 .port_vlan_del = a5psw_port_vlan_del, 1028 .port_fdb_add = a5psw_port_fdb_add, 1029 .port_fdb_del = a5psw_port_fdb_del, 1030 .port_fdb_dump = a5psw_port_fdb_dump, 1031 }; 1032 1033 static int a5psw_mdio_wait_busy(struct a5psw *a5psw) 1034 { 1035 u32 status; 1036 int err; 1037 1038 err = readl_poll_timeout(a5psw->base + A5PSW_MDIO_CFG_STATUS, status, 1039 !(status & A5PSW_MDIO_CFG_STATUS_BUSY), 10, 1040 1000 * USEC_PER_MSEC); 1041 if (err) 1042 dev_err(a5psw->dev, "MDIO command timeout\n"); 1043 1044 return err; 1045 } 1046 1047 static int a5psw_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) 1048 { 1049 struct a5psw *a5psw = bus->priv; 1050 u32 cmd, status; 1051 int ret; 1052 1053 cmd = A5PSW_MDIO_COMMAND_READ; 1054 cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg); 1055 cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id); 1056 1057 a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd); 1058 1059 ret = a5psw_mdio_wait_busy(a5psw); 1060 if (ret) 1061 return ret; 1062 1063 ret = a5psw_reg_readl(a5psw, A5PSW_MDIO_DATA) & A5PSW_MDIO_DATA_MASK; 1064 1065 status = a5psw_reg_readl(a5psw, A5PSW_MDIO_CFG_STATUS); 1066 if (status & A5PSW_MDIO_CFG_STATUS_READERR) 1067 return -EIO; 1068 1069 return ret; 1070 } 1071 1072 static int a5psw_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg, 1073 u16 phy_data) 1074 { 1075 struct a5psw *a5psw = bus->priv; 1076 u32 cmd; 1077 1078 cmd = FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg); 1079 cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id); 1080 1081 a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd); 1082 a5psw_reg_writel(a5psw, A5PSW_MDIO_DATA, phy_data); 1083 1084 return a5psw_mdio_wait_busy(a5psw); 1085 } 1086 1087 static int a5psw_mdio_config(struct a5psw *a5psw, u32 mdio_freq) 1088 { 1089 unsigned long rate; 1090 unsigned long div; 1091 u32 cfgstatus; 1092 1093 rate = clk_get_rate(a5psw->hclk); 1094 div = ((rate / mdio_freq) / 2); 1095 if (div > FIELD_MAX(A5PSW_MDIO_CFG_STATUS_CLKDIV) || 1096 div < A5PSW_MDIO_CLK_DIV_MIN) { 1097 dev_err(a5psw->dev, "MDIO clock div %ld out of range\n", div); 1098 return -ERANGE; 1099 } 1100 1101 cfgstatus = FIELD_PREP(A5PSW_MDIO_CFG_STATUS_CLKDIV, div); 1102 1103 a5psw_reg_writel(a5psw, A5PSW_MDIO_CFG_STATUS, cfgstatus); 1104 1105 return 0; 1106 } 1107 1108 static int a5psw_probe_mdio(struct a5psw *a5psw, struct device_node *node) 1109 { 1110 struct device *dev = a5psw->dev; 1111 struct mii_bus *bus; 1112 u32 mdio_freq; 1113 int ret; 1114 1115 if (of_property_read_u32(node, "clock-frequency", &mdio_freq)) 1116 mdio_freq = A5PSW_MDIO_DEF_FREQ; 1117 1118 ret = a5psw_mdio_config(a5psw, mdio_freq); 1119 if (ret) 1120 return ret; 1121 1122 bus = devm_mdiobus_alloc(dev); 1123 if (!bus) 1124 return -ENOMEM; 1125 1126 bus->name = "a5psw_mdio"; 1127 bus->read = a5psw_mdio_read; 1128 bus->write = a5psw_mdio_write; 1129 bus->priv = a5psw; 1130 bus->parent = dev; 1131 snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); 1132 1133 a5psw->mii_bus = bus; 1134 1135 return devm_of_mdiobus_register(dev, bus, node); 1136 } 1137 1138 static void a5psw_pcs_free(struct a5psw *a5psw) 1139 { 1140 int i; 1141 1142 for (i = 0; i < ARRAY_SIZE(a5psw->pcs); i++) { 1143 if (a5psw->pcs[i]) 1144 miic_destroy(a5psw->pcs[i]); 1145 } 1146 } 1147 1148 static int a5psw_pcs_get(struct a5psw *a5psw) 1149 { 1150 struct device_node *ports, *port, *pcs_node; 1151 struct phylink_pcs *pcs; 1152 int ret; 1153 u32 reg; 1154 1155 ports = of_get_child_by_name(a5psw->dev->of_node, "ethernet-ports"); 1156 if (!ports) 1157 return -EINVAL; 1158 1159 for_each_available_child_of_node(ports, port) { 1160 pcs_node = of_parse_phandle(port, "pcs-handle", 0); 1161 if (!pcs_node) 1162 continue; 1163 1164 if (of_property_read_u32(port, "reg", ®)) { 1165 ret = -EINVAL; 1166 goto free_pcs; 1167 } 1168 1169 if (reg >= ARRAY_SIZE(a5psw->pcs)) { 1170 ret = -ENODEV; 1171 goto free_pcs; 1172 } 1173 1174 pcs = miic_create(a5psw->dev, pcs_node); 1175 if (IS_ERR(pcs)) { 1176 dev_err(a5psw->dev, "Failed to create PCS for port %d\n", 1177 reg); 1178 ret = PTR_ERR(pcs); 1179 goto free_pcs; 1180 } 1181 1182 a5psw->pcs[reg] = pcs; 1183 of_node_put(pcs_node); 1184 } 1185 of_node_put(ports); 1186 1187 return 0; 1188 1189 free_pcs: 1190 of_node_put(pcs_node); 1191 of_node_put(port); 1192 of_node_put(ports); 1193 a5psw_pcs_free(a5psw); 1194 1195 return ret; 1196 } 1197 1198 static int a5psw_probe(struct platform_device *pdev) 1199 { 1200 struct device *dev = &pdev->dev; 1201 struct device_node *mdio; 1202 struct dsa_switch *ds; 1203 struct a5psw *a5psw; 1204 int ret; 1205 1206 a5psw = devm_kzalloc(dev, sizeof(*a5psw), GFP_KERNEL); 1207 if (!a5psw) 1208 return -ENOMEM; 1209 1210 a5psw->dev = dev; 1211 mutex_init(&a5psw->lk_lock); 1212 spin_lock_init(&a5psw->reg_lock); 1213 a5psw->base = devm_platform_ioremap_resource(pdev, 0); 1214 if (IS_ERR(a5psw->base)) 1215 return PTR_ERR(a5psw->base); 1216 1217 a5psw->bridged_ports = BIT(A5PSW_CPU_PORT); 1218 1219 ret = a5psw_pcs_get(a5psw); 1220 if (ret) 1221 return ret; 1222 1223 a5psw->hclk = devm_clk_get(dev, "hclk"); 1224 if (IS_ERR(a5psw->hclk)) { 1225 dev_err(dev, "failed get hclk clock\n"); 1226 ret = PTR_ERR(a5psw->hclk); 1227 goto free_pcs; 1228 } 1229 1230 a5psw->clk = devm_clk_get(dev, "clk"); 1231 if (IS_ERR(a5psw->clk)) { 1232 dev_err(dev, "failed get clk_switch clock\n"); 1233 ret = PTR_ERR(a5psw->clk); 1234 goto free_pcs; 1235 } 1236 1237 ret = clk_prepare_enable(a5psw->clk); 1238 if (ret) 1239 goto free_pcs; 1240 1241 ret = clk_prepare_enable(a5psw->hclk); 1242 if (ret) 1243 goto clk_disable; 1244 1245 mdio = of_get_child_by_name(dev->of_node, "mdio"); 1246 if (of_device_is_available(mdio)) { 1247 ret = a5psw_probe_mdio(a5psw, mdio); 1248 if (ret) { 1249 of_node_put(mdio); 1250 dev_err(dev, "Failed to register MDIO: %d\n", ret); 1251 goto hclk_disable; 1252 } 1253 } 1254 1255 of_node_put(mdio); 1256 1257 ds = &a5psw->ds; 1258 ds->dev = dev; 1259 ds->num_ports = A5PSW_PORTS_NUM; 1260 ds->ops = &a5psw_switch_ops; 1261 ds->priv = a5psw; 1262 1263 ret = dsa_register_switch(ds); 1264 if (ret) { 1265 dev_err(dev, "Failed to register DSA switch: %d\n", ret); 1266 goto hclk_disable; 1267 } 1268 1269 return 0; 1270 1271 hclk_disable: 1272 clk_disable_unprepare(a5psw->hclk); 1273 clk_disable: 1274 clk_disable_unprepare(a5psw->clk); 1275 free_pcs: 1276 a5psw_pcs_free(a5psw); 1277 1278 return ret; 1279 } 1280 1281 static int a5psw_remove(struct platform_device *pdev) 1282 { 1283 struct a5psw *a5psw = platform_get_drvdata(pdev); 1284 1285 if (!a5psw) 1286 return 0; 1287 1288 dsa_unregister_switch(&a5psw->ds); 1289 a5psw_pcs_free(a5psw); 1290 clk_disable_unprepare(a5psw->hclk); 1291 clk_disable_unprepare(a5psw->clk); 1292 1293 return 0; 1294 } 1295 1296 static void a5psw_shutdown(struct platform_device *pdev) 1297 { 1298 struct a5psw *a5psw = platform_get_drvdata(pdev); 1299 1300 if (!a5psw) 1301 return; 1302 1303 dsa_switch_shutdown(&a5psw->ds); 1304 1305 platform_set_drvdata(pdev, NULL); 1306 } 1307 1308 static const struct of_device_id a5psw_of_mtable[] = { 1309 { .compatible = "renesas,rzn1-a5psw", }, 1310 { /* sentinel */ }, 1311 }; 1312 MODULE_DEVICE_TABLE(of, a5psw_of_mtable); 1313 1314 static struct platform_driver a5psw_driver = { 1315 .driver = { 1316 .name = "rzn1_a5psw", 1317 .of_match_table = a5psw_of_mtable, 1318 }, 1319 .probe = a5psw_probe, 1320 .remove = a5psw_remove, 1321 .shutdown = a5psw_shutdown, 1322 }; 1323 module_platform_driver(a5psw_driver); 1324 1325 MODULE_LICENSE("GPL"); 1326 MODULE_DESCRIPTION("Renesas RZ/N1 Advanced 5-port Switch driver"); 1327 MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>"); 1328