1 // SPDX-License-Identifier: GPL-2.0 2 /* ICSSG Ethernet driver 3 * 4 * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com 5 */ 6 7 #include <linux/iopoll.h> 8 #include <linux/regmap.h> 9 #include <uapi/linux/if_ether.h> 10 #include "icssg_config.h" 11 #include "icssg_prueth.h" 12 #include "icssg_switch_map.h" 13 #include "icssg_mii_rt.h" 14 15 /* TX IPG Values to be set for 100M link speed. These values are 16 * in ocp_clk cycles. So need change if ocp_clk is changed for a specific 17 * h/w design. 18 */ 19 20 /* IPG is in core_clk cycles */ 21 #define MII_RT_TX_IPG_100M 0x17 22 #define MII_RT_TX_IPG_1G 0xb 23 #define MII_RT_TX_IPG_100M_SR1 0x166 24 #define MII_RT_TX_IPG_1G_SR1 0x1a 25 26 #define ICSSG_QUEUES_MAX 64 27 #define ICSSG_QUEUE_OFFSET 0xd00 28 #define ICSSG_QUEUE_PEEK_OFFSET 0xe00 29 #define ICSSG_QUEUE_CNT_OFFSET 0xe40 30 #define ICSSG_QUEUE_RESET_OFFSET 0xf40 31 32 #define ICSSG_NUM_TX_QUEUES 8 33 34 #define RECYCLE_Q_SLICE0 16 35 #define RECYCLE_Q_SLICE1 17 36 37 #define ICSSG_NUM_OTHER_QUEUES 5 /* port, host and special queues */ 38 39 #define PORT_HI_Q_SLICE0 32 40 #define PORT_LO_Q_SLICE0 33 41 #define HOST_HI_Q_SLICE0 34 42 #define HOST_LO_Q_SLICE0 35 43 #define HOST_SPL_Q_SLICE0 40 /* Special Queue */ 44 45 #define PORT_HI_Q_SLICE1 36 46 #define PORT_LO_Q_SLICE1 37 47 #define HOST_HI_Q_SLICE1 38 48 #define HOST_LO_Q_SLICE1 39 49 #define HOST_SPL_Q_SLICE1 41 /* Special Queue */ 50 51 #define MII_RXCFG_DEFAULT (PRUSS_MII_RT_RXCFG_RX_ENABLE | \ 52 PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS | \ 53 PRUSS_MII_RT_RXCFG_RX_L2_EN | \ 54 PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS) 55 56 #define MII_TXCFG_DEFAULT (PRUSS_MII_RT_TXCFG_TX_ENABLE | \ 57 PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE | \ 58 PRUSS_MII_RT_TXCFG_TX_32_MODE_EN | \ 59 PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN) 60 61 #define ICSSG_CFG_DEFAULT (ICSSG_CFG_TX_L1_EN | \ 62 ICSSG_CFG_TX_L2_EN | ICSSG_CFG_RX_L2_G_EN | \ 63 ICSSG_CFG_TX_PRU_EN | \ 64 ICSSG_CFG_SGMII_MODE) 65 66 #define FDB_GEN_CFG1 0x60 67 #define SMEM_VLAN_OFFSET 8 68 #define SMEM_VLAN_OFFSET_MASK GENMASK(25, 8) 69 70 #define FDB_GEN_CFG2 0x64 71 #define FDB_VLAN_EN BIT(6) 72 #define FDB_HOST_EN BIT(2) 73 #define FDB_PRU1_EN BIT(1) 74 #define FDB_PRU0_EN BIT(0) 75 #define FDB_EN_ALL (FDB_PRU0_EN | FDB_PRU1_EN | \ 76 FDB_HOST_EN | FDB_VLAN_EN) 77 78 /** 79 * struct map - ICSSG Queue Map 80 * @queue: Queue number 81 * @pd_addr_start: Packet descriptor queue reserved memory 82 * @flags: Flags 83 * @special: Indicates whether this queue is a special queue or not 84 */ 85 struct map { 86 int queue; 87 u32 pd_addr_start; 88 u32 flags; 89 bool special; 90 }; 91 92 /* Hardware queue map for ICSSG */ 93 static const struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = { 94 { 95 { PORT_HI_Q_SLICE0, PORT_DESC0_HI, 0x200000, 0 }, 96 { PORT_LO_Q_SLICE0, PORT_DESC0_LO, 0, 0 }, 97 { HOST_HI_Q_SLICE0, HOST_DESC0_HI, 0x200000, 0 }, 98 { HOST_LO_Q_SLICE0, HOST_DESC0_LO, 0, 0 }, 99 { HOST_SPL_Q_SLICE0, HOST_SPPD0, 0x400000, 1 }, 100 }, 101 { 102 { PORT_HI_Q_SLICE1, PORT_DESC1_HI, 0xa00000, 0 }, 103 { PORT_LO_Q_SLICE1, PORT_DESC1_LO, 0x800000, 0 }, 104 { HOST_HI_Q_SLICE1, HOST_DESC1_HI, 0xa00000, 0 }, 105 { HOST_LO_Q_SLICE1, HOST_DESC1_LO, 0x800000, 0 }, 106 { HOST_SPL_Q_SLICE1, HOST_SPPD1, 0xc00000, 1 }, 107 }, 108 }; 109 110 static void icssg_config_mii_init_fw_offload(struct prueth_emac *emac) 111 { 112 struct prueth *prueth = emac->prueth; 113 int mii = prueth_emac_slice(emac); 114 u32 txcfg_reg, pcnt_reg, txcfg; 115 struct regmap *mii_rt; 116 117 mii_rt = prueth->mii_rt; 118 119 txcfg_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 : 120 PRUSS_MII_RT_TXCFG1; 121 pcnt_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 : 122 PRUSS_MII_RT_RX_PCNT1; 123 124 txcfg = PRUSS_MII_RT_TXCFG_TX_ENABLE | 125 PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE | 126 PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN; 127 128 if (emac->phy_if == PHY_INTERFACE_MODE_MII && mii == ICSS_MII1) 129 txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL; 130 else if (emac->phy_if != PHY_INTERFACE_MODE_MII && mii == ICSS_MII0) 131 txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL; 132 133 regmap_write(mii_rt, txcfg_reg, txcfg); 134 regmap_write(mii_rt, pcnt_reg, 0x1); 135 } 136 137 static void icssg_config_mii_init(struct prueth_emac *emac) 138 { 139 struct prueth *prueth = emac->prueth; 140 int slice = prueth_emac_slice(emac); 141 u32 txcfg, txcfg_reg, pcnt_reg; 142 struct regmap *mii_rt; 143 144 mii_rt = prueth->mii_rt; 145 146 txcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 : 147 PRUSS_MII_RT_TXCFG1; 148 pcnt_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 : 149 PRUSS_MII_RT_RX_PCNT1; 150 151 txcfg = MII_TXCFG_DEFAULT; 152 153 /* In MII mode TX lines swapped inside ICSSG, so TX_MUX_SEL cfg need 154 * to be swapped also comparing to RGMII mode. 155 */ 156 if (emac->phy_if == PHY_INTERFACE_MODE_MII && slice == ICSS_MII0) 157 txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL; 158 else if (emac->phy_if != PHY_INTERFACE_MODE_MII && slice == ICSS_MII1) 159 txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL; 160 161 regmap_write(mii_rt, txcfg_reg, txcfg); 162 regmap_write(mii_rt, pcnt_reg, 0x1); 163 } 164 165 static void icssg_miig_queues_init(struct prueth *prueth, int slice) 166 { 167 struct regmap *miig_rt = prueth->miig_rt; 168 void __iomem *smem = prueth->shram.va; 169 u8 pd[ICSSG_SPECIAL_PD_SIZE]; 170 int queue = 0, i, j; 171 u32 *pdword; 172 173 /* reset hwqueues */ 174 if (slice) 175 queue = ICSSG_NUM_TX_QUEUES; 176 177 for (i = 0; i < ICSSG_NUM_TX_QUEUES; i++) { 178 regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue); 179 queue++; 180 } 181 182 queue = slice ? RECYCLE_Q_SLICE1 : RECYCLE_Q_SLICE0; 183 regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue); 184 185 for (i = 0; i < ICSSG_NUM_OTHER_QUEUES; i++) { 186 regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, 187 hwq_map[slice][i].queue); 188 } 189 190 /* initialize packet descriptors in SMEM */ 191 /* push pakcet descriptors to hwqueues */ 192 193 pdword = (u32 *)pd; 194 for (j = 0; j < ICSSG_NUM_OTHER_QUEUES; j++) { 195 const struct map *mp; 196 int pd_size, num_pds; 197 u32 pdaddr; 198 199 mp = &hwq_map[slice][j]; 200 if (mp->special) { 201 pd_size = ICSSG_SPECIAL_PD_SIZE; 202 num_pds = ICSSG_NUM_SPECIAL_PDS; 203 } else { 204 pd_size = ICSSG_NORMAL_PD_SIZE; 205 num_pds = ICSSG_NUM_NORMAL_PDS; 206 } 207 208 for (i = 0; i < num_pds; i++) { 209 memset(pd, 0, pd_size); 210 211 pdword[0] &= ICSSG_FLAG_MASK; 212 pdword[0] |= mp->flags; 213 pdaddr = mp->pd_addr_start + i * pd_size; 214 215 memcpy_toio(smem + pdaddr, pd, pd_size); 216 queue = mp->queue; 217 regmap_write(miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, 218 pdaddr); 219 } 220 } 221 } 222 223 void icssg_config_ipg(struct prueth_emac *emac) 224 { 225 struct prueth *prueth = emac->prueth; 226 int slice = prueth_emac_slice(emac); 227 u32 ipg; 228 229 switch (emac->speed) { 230 case SPEED_1000: 231 ipg = emac->is_sr1 ? MII_RT_TX_IPG_1G_SR1 : MII_RT_TX_IPG_1G; 232 break; 233 case SPEED_100: 234 ipg = emac->is_sr1 ? MII_RT_TX_IPG_100M_SR1 : MII_RT_TX_IPG_100M; 235 break; 236 case SPEED_10: 237 /* Firmware hardcodes IPG for SR1.0 */ 238 if (emac->is_sr1) 239 return; 240 /* IPG for 10M is same as 100M */ 241 ipg = MII_RT_TX_IPG_100M; 242 break; 243 default: 244 /* Other links speeds not supported */ 245 netdev_err(emac->ndev, "Unsupported link speed\n"); 246 return; 247 } 248 249 icssg_mii_update_ipg(prueth->mii_rt, slice, ipg); 250 } 251 EXPORT_SYMBOL_GPL(icssg_config_ipg); 252 253 static void emac_r30_cmd_init(struct prueth_emac *emac) 254 { 255 struct icssg_r30_cmd __iomem *p; 256 int i; 257 258 p = emac->dram.va + MGR_R30_CMD_OFFSET; 259 260 for (i = 0; i < 4; i++) 261 writel(EMAC_NONE, &p->cmd[i]); 262 } 263 264 static int emac_r30_is_done(struct prueth_emac *emac) 265 { 266 const struct icssg_r30_cmd __iomem *p; 267 u32 cmd; 268 int i; 269 270 p = emac->dram.va + MGR_R30_CMD_OFFSET; 271 272 for (i = 0; i < 4; i++) { 273 cmd = readl(&p->cmd[i]); 274 if (cmd != EMAC_NONE) 275 return 0; 276 } 277 278 return 1; 279 } 280 281 static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac) 282 { 283 struct icssg_buffer_pool_cfg __iomem *bpool_cfg; 284 struct icssg_rxq_ctx __iomem *rxq_ctx; 285 struct prueth *prueth = emac->prueth; 286 int slice = prueth_emac_slice(emac); 287 u32 addr; 288 int i; 289 290 addr = lower_32_bits(prueth->msmcram.pa); 291 if (slice) 292 addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; 293 294 if (addr % SZ_64K) { 295 dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n"); 296 return -EINVAL; 297 } 298 299 bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; 300 /* workaround for f/w bug. bpool 0 needs to be initialized */ 301 for (i = 0; i < PRUETH_NUM_BUF_POOLS; i++) { 302 writel(addr, &bpool_cfg[i].addr); 303 writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len); 304 addr += PRUETH_EMAC_BUF_POOL_SIZE; 305 } 306 307 if (!slice) 308 addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; 309 else 310 addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST; 311 312 for (i = PRUETH_NUM_BUF_POOLS; 313 i < 2 * PRUETH_SW_NUM_BUF_POOLS_HOST + PRUETH_NUM_BUF_POOLS; 314 i++) { 315 /* The driver only uses first 4 queues per PRU so only initialize them */ 316 if (i % PRUETH_SW_NUM_BUF_POOLS_HOST < PRUETH_SW_NUM_BUF_POOLS_PER_PRU) { 317 writel(addr, &bpool_cfg[i].addr); 318 writel(PRUETH_SW_BUF_POOL_SIZE_HOST, &bpool_cfg[i].len); 319 addr += PRUETH_SW_BUF_POOL_SIZE_HOST; 320 } else { 321 writel(0, &bpool_cfg[i].addr); 322 writel(0, &bpool_cfg[i].len); 323 } 324 } 325 326 if (!slice) 327 addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST; 328 else 329 addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; 330 331 rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; 332 for (i = 0; i < 3; i++) 333 writel(addr, &rxq_ctx->start[i]); 334 335 addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; 336 writel(addr - SZ_2K, &rxq_ctx->end); 337 338 return 0; 339 } 340 341 static int prueth_emac_buffer_setup(struct prueth_emac *emac) 342 { 343 struct icssg_buffer_pool_cfg __iomem *bpool_cfg; 344 struct icssg_rxq_ctx __iomem *rxq_ctx; 345 struct prueth *prueth = emac->prueth; 346 int slice = prueth_emac_slice(emac); 347 u32 addr; 348 int i; 349 350 /* Layout to have 64KB aligned buffer pool 351 * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1| 352 */ 353 354 addr = lower_32_bits(prueth->msmcram.pa); 355 if (slice) 356 addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; 357 358 if (addr % SZ_64K) { 359 dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n"); 360 return -EINVAL; 361 } 362 363 bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; 364 /* workaround for f/w bug. bpool 0 needs to be initilalized */ 365 writel(addr, &bpool_cfg[0].addr); 366 writel(0, &bpool_cfg[0].len); 367 368 for (i = PRUETH_EMAC_BUF_POOL_START; 369 i < PRUETH_EMAC_BUF_POOL_START + PRUETH_NUM_BUF_POOLS; 370 i++) { 371 writel(addr, &bpool_cfg[i].addr); 372 writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len); 373 addr += PRUETH_EMAC_BUF_POOL_SIZE; 374 } 375 376 if (!slice) 377 addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; 378 else 379 addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2; 380 381 /* Pre-emptible RX buffer queue */ 382 rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; 383 for (i = 0; i < 3; i++) 384 writel(addr, &rxq_ctx->start[i]); 385 386 addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; 387 writel(addr, &rxq_ctx->end); 388 389 /* Express RX buffer queue */ 390 rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET; 391 for (i = 0; i < 3; i++) 392 writel(addr, &rxq_ctx->start[i]); 393 394 addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; 395 writel(addr, &rxq_ctx->end); 396 397 return 0; 398 } 399 400 static void icssg_init_emac_mode(struct prueth *prueth) 401 { 402 /* When the device is configured as a bridge and it is being brought 403 * back to the emac mode, the host mac address has to be set as 0. 404 */ 405 u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET; 406 int i; 407 u8 mac[ETH_ALEN] = { 0 }; 408 409 if (prueth->emacs_initialized) 410 return; 411 412 /* Set VLAN TABLE address base */ 413 regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK, 414 addr << SMEM_VLAN_OFFSET); 415 /* Set enable VLAN aware mode, and FDBs for all PRUs */ 416 regmap_write(prueth->miig_rt, FDB_GEN_CFG2, (FDB_PRU0_EN | FDB_PRU1_EN | FDB_HOST_EN)); 417 prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va + 418 EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET); 419 for (i = 0; i < SZ_4K - 1; i++) { 420 prueth->vlan_tbl[i].fid = i; 421 prueth->vlan_tbl[i].fid_c1 = 0; 422 } 423 /* Clear host MAC address */ 424 icssg_class_set_host_mac_addr(prueth->miig_rt, mac); 425 } 426 427 static void icssg_init_fw_offload_mode(struct prueth *prueth) 428 { 429 u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET; 430 int i; 431 432 if (prueth->emacs_initialized) 433 return; 434 435 /* Set VLAN TABLE address base */ 436 regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK, 437 addr << SMEM_VLAN_OFFSET); 438 /* Set enable VLAN aware mode, and FDBs for all PRUs */ 439 regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL); 440 prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va + 441 EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET); 442 for (i = 0; i < SZ_4K - 1; i++) { 443 prueth->vlan_tbl[i].fid = i; 444 prueth->vlan_tbl[i].fid_c1 = 0; 445 } 446 447 if (prueth->hw_bridge_dev) 448 icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr); 449 icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST); 450 } 451 452 int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice) 453 { 454 void __iomem *config = emac->dram.va + ICSSG_CONFIG_OFFSET; 455 struct icssg_flow_cfg __iomem *flow_cfg; 456 int ret; 457 458 if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) 459 icssg_init_fw_offload_mode(prueth); 460 else 461 icssg_init_emac_mode(prueth); 462 463 memset_io(config, 0, TAS_GATE_MASK_LIST0); 464 icssg_miig_queues_init(prueth, slice); 465 466 emac->speed = SPEED_1000; 467 emac->duplex = DUPLEX_FULL; 468 if (!phy_interface_mode_is_rgmii(emac->phy_if)) { 469 emac->speed = SPEED_100; 470 emac->duplex = DUPLEX_FULL; 471 } 472 regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET, 473 ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT); 474 icssg_miig_set_interface_mode(prueth->miig_rt, slice, emac->phy_if); 475 if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) 476 icssg_config_mii_init_fw_offload(emac); 477 else 478 icssg_config_mii_init(emac); 479 icssg_config_ipg(emac); 480 icssg_update_rgmii_cfg(prueth->miig_rt, emac); 481 482 /* set GPI mode */ 483 pruss_cfg_gpimode(prueth->pruss, prueth->pru_id[slice], 484 PRUSS_GPI_MODE_MII); 485 486 /* enable XFR shift for PRU and RTU */ 487 pruss_cfg_xfr_enable(prueth->pruss, PRU_TYPE_PRU, true); 488 pruss_cfg_xfr_enable(prueth->pruss, PRU_TYPE_RTU, true); 489 490 /* set C28 to 0x100 */ 491 pru_rproc_set_ctable(prueth->pru[slice], PRU_C28, 0x100 << 8); 492 pru_rproc_set_ctable(prueth->rtu[slice], PRU_C28, 0x100 << 8); 493 pru_rproc_set_ctable(prueth->txpru[slice], PRU_C28, 0x100 << 8); 494 495 flow_cfg = config + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET; 496 writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow); 497 writew(0, &flow_cfg->mgm_base_flow); 498 writeb(0, config + SPL_PKT_DEFAULT_PRIORITY); 499 writeb(0, config + QUEUE_NUM_UNTAGGED); 500 501 if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) 502 ret = prueth_fw_offload_buffer_setup(emac); 503 else 504 ret = prueth_emac_buffer_setup(emac); 505 if (ret) 506 return ret; 507 508 emac_r30_cmd_init(emac); 509 510 return 0; 511 } 512 EXPORT_SYMBOL_GPL(icssg_config); 513 514 /* Bitmask for ICSSG r30 commands */ 515 static const struct icssg_r30_cmd emac_r32_bitmask[] = { 516 {{0xffff0004, 0xffff0100, 0xffff0004, EMAC_NONE}}, /* EMAC_PORT_DISABLE */ 517 {{0xfffb0040, 0xfeff0200, 0xfeff0200, EMAC_NONE}}, /* EMAC_PORT_BLOCK */ 518 {{0xffbb0000, 0xfcff0000, 0xdcfb0000, EMAC_NONE}}, /* EMAC_PORT_FORWARD */ 519 {{0xffbb0000, 0xfcff0000, 0xfcff2000, EMAC_NONE}}, /* EMAC_PORT_FORWARD_WO_LEARNING */ 520 {{0xffff0001, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT ALL */ 521 {{0xfffe0002, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT TAGGED */ 522 {{0xfffc0000, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT UNTAGGED and PRIO */ 523 {{EMAC_NONE, 0xffff0020, EMAC_NONE, EMAC_NONE}}, /* TAS Trigger List change */ 524 {{EMAC_NONE, 0xdfff1000, EMAC_NONE, EMAC_NONE}}, /* TAS set state ENABLE*/ 525 {{EMAC_NONE, 0xefff2000, EMAC_NONE, EMAC_NONE}}, /* TAS set state RESET*/ 526 {{EMAC_NONE, 0xcfff0000, EMAC_NONE, EMAC_NONE}}, /* TAS set state DISABLE*/ 527 {{EMAC_NONE, EMAC_NONE, 0xffff0400, EMAC_NONE}}, /* UC flooding ENABLE*/ 528 {{EMAC_NONE, EMAC_NONE, 0xfbff0000, EMAC_NONE}}, /* UC flooding DISABLE*/ 529 {{EMAC_NONE, EMAC_NONE, 0xffff0800, EMAC_NONE}}, /* MC flooding ENABLE*/ 530 {{EMAC_NONE, EMAC_NONE, 0xf7ff0000, EMAC_NONE}}, /* MC flooding DISABLE*/ 531 {{EMAC_NONE, 0xffff4000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx ENABLE*/ 532 {{EMAC_NONE, 0xbfff0000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx DISABLE*/ 533 {{0xffff0010, EMAC_NONE, 0xffff0010, EMAC_NONE}}, /* VLAN AWARE*/ 534 {{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}}, /* VLAN UNWARE*/ 535 {{0xffff2000, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* HSR_RX_OFFLOAD_ENABLE */ 536 {{0xdfff0000, EMAC_NONE, EMAC_NONE, EMAC_NONE}} /* HSR_RX_OFFLOAD_DISABLE */ 537 }; 538 539 int icssg_set_port_state(struct prueth_emac *emac, 540 enum icssg_port_state_cmd cmd) 541 { 542 struct icssg_r30_cmd __iomem *p; 543 int ret = -ETIMEDOUT; 544 int done = 0; 545 int i; 546 547 p = emac->dram.va + MGR_R30_CMD_OFFSET; 548 549 if (cmd >= ICSSG_EMAC_PORT_MAX_COMMANDS) { 550 netdev_err(emac->ndev, "invalid port command\n"); 551 return -EINVAL; 552 } 553 554 /* only one command at a time allowed to firmware */ 555 mutex_lock(&emac->cmd_lock); 556 557 for (i = 0; i < 4; i++) 558 writel(emac_r32_bitmask[cmd].cmd[i], &p->cmd[i]); 559 560 /* wait for done */ 561 ret = read_poll_timeout(emac_r30_is_done, done, done == 1, 562 1000, 10000, false, emac); 563 564 if (ret == -ETIMEDOUT) 565 netdev_err(emac->ndev, "timeout waiting for command done\n"); 566 567 mutex_unlock(&emac->cmd_lock); 568 569 return ret; 570 } 571 EXPORT_SYMBOL_GPL(icssg_set_port_state); 572 573 void icssg_config_half_duplex(struct prueth_emac *emac) 574 { 575 u32 val; 576 577 if (!emac->half_duplex) 578 return; 579 580 val = get_random_u32(); 581 writel(val, emac->dram.va + HD_RAND_SEED_OFFSET); 582 } 583 EXPORT_SYMBOL_GPL(icssg_config_half_duplex); 584 585 void icssg_config_set_speed(struct prueth_emac *emac) 586 { 587 u8 fw_speed; 588 589 switch (emac->speed) { 590 case SPEED_1000: 591 fw_speed = FW_LINK_SPEED_1G; 592 break; 593 case SPEED_100: 594 fw_speed = FW_LINK_SPEED_100M; 595 break; 596 case SPEED_10: 597 fw_speed = FW_LINK_SPEED_10M; 598 break; 599 default: 600 /* Other links speeds not supported */ 601 netdev_err(emac->ndev, "Unsupported link speed\n"); 602 return; 603 } 604 605 if (emac->duplex == DUPLEX_HALF) 606 fw_speed |= FW_LINK_SPEED_HD; 607 608 writeb(fw_speed, emac->dram.va + PORT_LINK_SPEED_OFFSET); 609 } 610 EXPORT_SYMBOL_GPL(icssg_config_set_speed); 611 612 int icssg_send_fdb_msg(struct prueth_emac *emac, struct mgmt_cmd *cmd, 613 struct mgmt_cmd_rsp *rsp) 614 { 615 struct prueth *prueth = emac->prueth; 616 int slice = prueth_emac_slice(emac); 617 int addr, ret; 618 619 addr = icssg_queue_pop(prueth, slice == 0 ? 620 ICSSG_CMD_POP_SLICE0 : ICSSG_CMD_POP_SLICE1); 621 if (addr < 0) 622 return addr; 623 624 /* First 4 bytes have FW owned buffer linking info which should 625 * not be touched 626 */ 627 memcpy_toio(prueth->shram.va + addr + 4, cmd, sizeof(*cmd)); 628 icssg_queue_push(prueth, slice == 0 ? 629 ICSSG_CMD_PUSH_SLICE0 : ICSSG_CMD_PUSH_SLICE1, addr); 630 ret = read_poll_timeout(icssg_queue_pop, addr, addr >= 0, 631 2000, 20000000, false, prueth, slice == 0 ? 632 ICSSG_RSP_POP_SLICE0 : ICSSG_RSP_POP_SLICE1); 633 if (ret) { 634 netdev_err(emac->ndev, "Timedout sending HWQ message\n"); 635 return ret; 636 } 637 638 memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp)); 639 /* Return buffer back for to pool */ 640 icssg_queue_push(prueth, slice == 0 ? 641 ICSSG_RSP_PUSH_SLICE0 : ICSSG_RSP_PUSH_SLICE1, addr); 642 643 return 0; 644 } 645 EXPORT_SYMBOL_GPL(icssg_send_fdb_msg); 646 647 static void icssg_fdb_setup(struct prueth_emac *emac, struct mgmt_cmd *fdb_cmd, 648 const unsigned char *addr, u8 fid, int cmd) 649 { 650 int slice = prueth_emac_slice(emac); 651 u8 mac_fid[ETH_ALEN + 2]; 652 u16 fdb_slot; 653 654 ether_addr_copy(mac_fid, addr); 655 656 /* 1-1 VID-FID mapping is already setup */ 657 mac_fid[ETH_ALEN] = fid; 658 mac_fid[ETH_ALEN + 1] = 0; 659 660 fdb_slot = bitrev32(crc32_le(0, mac_fid, 8)) & PRUETH_SWITCH_FDB_MASK; 661 662 fdb_cmd->header = ICSSG_FW_MGMT_CMD_HEADER; 663 fdb_cmd->type = ICSSG_FW_MGMT_FDB_CMD_TYPE; 664 fdb_cmd->seqnum = ++(emac->prueth->icssg_hwcmdseq); 665 fdb_cmd->param = cmd; 666 fdb_cmd->param |= (slice << 4); 667 668 memcpy(&fdb_cmd->cmd_args[0], addr, 4); 669 memcpy(&fdb_cmd->cmd_args[1], &addr[4], 2); 670 fdb_cmd->cmd_args[2] = fdb_slot; 671 672 netdev_dbg(emac->ndev, "MAC %pM slot %X FID %X\n", addr, fdb_slot, fid); 673 } 674 675 int icssg_fdb_add_del(struct prueth_emac *emac, const unsigned char *addr, 676 u8 vid, u8 fid_c2, bool add) 677 { 678 struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 }; 679 struct mgmt_cmd fdb_cmd = { 0 }; 680 u8 fid = vid; 681 int ret; 682 683 icssg_fdb_setup(emac, &fdb_cmd, addr, fid, add ? ICSS_CMD_ADD_FDB : ICSS_CMD_DEL_FDB); 684 685 fid_c2 |= ICSSG_FDB_ENTRY_VALID; 686 fdb_cmd.cmd_args[1] |= ((fid << 16) | (fid_c2 << 24)); 687 688 ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp); 689 if (ret) 690 return ret; 691 692 WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum); 693 if (fdb_cmd_rsp.status == 1) 694 return 0; 695 696 return -EINVAL; 697 } 698 EXPORT_SYMBOL_GPL(icssg_fdb_add_del); 699 700 int icssg_fdb_lookup(struct prueth_emac *emac, const unsigned char *addr, 701 u8 vid) 702 { 703 struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 }; 704 struct mgmt_cmd fdb_cmd = { 0 }; 705 struct prueth_fdb_slot *slot; 706 u8 fid = vid; 707 int ret, i; 708 709 icssg_fdb_setup(emac, &fdb_cmd, addr, fid, ICSS_CMD_GET_FDB_SLOT); 710 711 fdb_cmd.cmd_args[1] |= fid << 16; 712 713 ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp); 714 if (ret) 715 return ret; 716 717 WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum); 718 719 slot = (struct prueth_fdb_slot __force *)(emac->dram.va + FDB_CMD_BUFFER); 720 for (i = 0; i < 4; i++) { 721 if (ether_addr_equal(addr, slot->mac) && vid == slot->fid) 722 return (slot->fid_c2 & ~ICSSG_FDB_ENTRY_VALID); 723 slot++; 724 } 725 726 return 0; 727 } 728 EXPORT_SYMBOL_GPL(icssg_fdb_lookup); 729 730 void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask, 731 u8 untag_mask, bool add) 732 { 733 struct prueth *prueth = emac->prueth; 734 struct prueth_vlan_tbl *tbl; 735 u8 fid_c1; 736 737 tbl = prueth->vlan_tbl; 738 spin_lock(&prueth->vtbl_lock); 739 fid_c1 = tbl[vid].fid_c1; 740 741 /* FID_C1: bit0..2 port membership mask, 742 * bit3..5 tagging mask for each port 743 * bit6 Stream VID (not handled currently) 744 * bit7 MC flood (not handled currently) 745 */ 746 if (add) { 747 fid_c1 |= (port_mask | port_mask << 3); 748 fid_c1 &= ~(untag_mask << 3); 749 } else { 750 fid_c1 &= ~(port_mask | port_mask << 3); 751 } 752 753 tbl[vid].fid_c1 = fid_c1; 754 spin_unlock(&prueth->vtbl_lock); 755 } 756 EXPORT_SYMBOL_GPL(icssg_vtbl_modify); 757 758 u16 icssg_get_pvid(struct prueth_emac *emac) 759 { 760 struct prueth *prueth = emac->prueth; 761 u32 pvid; 762 763 if (emac->port_id == PRUETH_PORT_MII0) 764 pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET); 765 else 766 pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET); 767 768 pvid = pvid >> 24; 769 770 return pvid; 771 } 772 EXPORT_SYMBOL_GPL(icssg_get_pvid); 773 774 void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port) 775 { 776 u32 pvid; 777 778 /* only 256 VLANs are supported */ 779 pvid = (u32 __force)cpu_to_be32((ETH_P_8021Q << 16) | (vid & 0xff)); 780 781 if (port == PRUETH_PORT_MII0) 782 writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET); 783 else if (port == PRUETH_PORT_MII1) 784 writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET); 785 else 786 writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET); 787 } 788 EXPORT_SYMBOL_GPL(icssg_set_pvid); 789