1 /* Applied Micro X-Gene SoC Ethernet Driver 2 * 3 * Copyright (c) 2014, Applied Micro Circuits Corporation 4 * Authors: Iyappan Subramanian <isubramanian@apm.com> 5 * Ravi Patel <rapatel@apm.com> 6 * Keyur Chudgar <kchudgar@apm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "xgene_enet_main.h" 23 #include "xgene_enet_hw.h" 24 25 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) 26 { 27 u32 *ring_cfg = ring->state; 28 u64 addr = ring->dma; 29 enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize; 30 31 ring_cfg[4] |= (1 << SELTHRSH_POS) & 32 CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN); 33 ring_cfg[3] |= ACCEPTLERR; 34 ring_cfg[2] |= QCOHERENT; 35 36 addr >>= 8; 37 ring_cfg[2] |= (addr << RINGADDRL_POS) & 38 CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN); 39 addr >>= RINGADDRL_LEN; 40 ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN); 41 ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) & 42 CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN); 43 } 44 45 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) 46 { 47 u32 *ring_cfg = ring->state; 48 bool is_bufpool; 49 u32 val; 50 51 is_bufpool = xgene_enet_is_bufpool(ring->id); 52 val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR; 53 ring_cfg[4] |= (val << RINGTYPE_POS) & 54 CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN); 55 56 if (is_bufpool) { 57 ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) & 58 CREATE_MASK(RINGMODE_POS, RINGMODE_LEN); 59 } 60 } 61 62 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) 63 { 64 u32 *ring_cfg = ring->state; 65 66 ring_cfg[3] |= RECOMBBUF; 67 ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) & 68 CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN); 69 ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN); 70 } 71 72 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring, 73 u32 offset, u32 data) 74 { 75 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 76 77 iowrite32(data, pdata->ring_csr_addr + offset); 78 } 79 80 static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring, 81 u32 offset, u32 *data) 82 { 83 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 84 85 *data = ioread32(pdata->ring_csr_addr + offset); 86 } 87 88 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring) 89 { 90 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 91 int i; 92 93 xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num); 94 for (i = 0; i < pdata->ring_ops->num_ring_config; i++) { 95 xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4), 96 ring->state[i]); 97 } 98 } 99 100 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring) 101 { 102 memset(ring->state, 0, sizeof(ring->state)); 103 xgene_enet_write_ring_state(ring); 104 } 105 106 static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring) 107 { 108 xgene_enet_ring_set_type(ring); 109 110 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 || 111 xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1) 112 xgene_enet_ring_set_recombbuf(ring); 113 114 xgene_enet_ring_init(ring); 115 xgene_enet_write_ring_state(ring); 116 } 117 118 static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) 119 { 120 u32 ring_id_val, ring_id_buf; 121 bool is_bufpool; 122 123 is_bufpool = xgene_enet_is_bufpool(ring->id); 124 125 ring_id_val = ring->id & GENMASK(9, 0); 126 ring_id_val |= OVERWRITE; 127 128 ring_id_buf = (ring->num << 9) & GENMASK(18, 9); 129 ring_id_buf |= PREFETCH_BUF_EN; 130 if (is_bufpool) 131 ring_id_buf |= IS_BUFFER_POOL; 132 133 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val); 134 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf); 135 } 136 137 static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring) 138 { 139 u32 ring_id; 140 141 ring_id = ring->id | OVERWRITE; 142 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id); 143 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0); 144 } 145 146 static struct xgene_enet_desc_ring *xgene_enet_setup_ring( 147 struct xgene_enet_desc_ring *ring) 148 { 149 u32 size = ring->size; 150 u32 i, data; 151 bool is_bufpool; 152 153 xgene_enet_clr_ring_state(ring); 154 xgene_enet_set_ring_state(ring); 155 xgene_enet_set_ring_id(ring); 156 157 ring->slots = xgene_enet_get_numslots(ring->id, size); 158 159 is_bufpool = xgene_enet_is_bufpool(ring->id); 160 if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) 161 return ring; 162 163 for (i = 0; i < ring->slots; i++) 164 xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]); 165 166 xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); 167 data |= BIT(31 - xgene_enet_ring_bufnum(ring->id)); 168 xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); 169 170 return ring; 171 } 172 173 static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring) 174 { 175 u32 data; 176 bool is_bufpool; 177 178 is_bufpool = xgene_enet_is_bufpool(ring->id); 179 if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) 180 goto out; 181 182 xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); 183 data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id)); 184 xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); 185 186 out: 187 xgene_enet_clr_desc_ring_id(ring); 188 xgene_enet_clr_ring_state(ring); 189 } 190 191 static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count) 192 { 193 iowrite32(count, ring->cmd); 194 } 195 196 static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) 197 { 198 u32 __iomem *cmd_base = ring->cmd_base; 199 u32 ring_state, num_msgs; 200 201 ring_state = ioread32(&cmd_base[1]); 202 num_msgs = GET_VAL(NUMMSGSINQ, ring_state); 203 204 return num_msgs; 205 } 206 207 void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, 208 struct xgene_enet_pdata *pdata, 209 enum xgene_enet_err_code status) 210 { 211 struct rtnl_link_stats64 *stats = &pdata->stats; 212 213 switch (status) { 214 case INGRESS_CRC: 215 stats->rx_crc_errors++; 216 break; 217 case INGRESS_CHECKSUM: 218 case INGRESS_CHECKSUM_COMPUTE: 219 stats->rx_errors++; 220 break; 221 case INGRESS_TRUNC_FRAME: 222 stats->rx_frame_errors++; 223 break; 224 case INGRESS_PKT_LEN: 225 stats->rx_length_errors++; 226 break; 227 case INGRESS_PKT_UNDER: 228 stats->rx_frame_errors++; 229 break; 230 case INGRESS_FIFO_OVERRUN: 231 stats->rx_fifo_errors++; 232 break; 233 default: 234 break; 235 } 236 } 237 238 static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata, 239 u32 offset, u32 val) 240 { 241 void __iomem *addr = pdata->eth_csr_addr + offset; 242 243 iowrite32(val, addr); 244 } 245 246 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata, 247 u32 offset, u32 val) 248 { 249 void __iomem *addr = pdata->eth_ring_if_addr + offset; 250 251 iowrite32(val, addr); 252 } 253 254 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata, 255 u32 offset, u32 val) 256 { 257 void __iomem *addr = pdata->eth_diag_csr_addr + offset; 258 259 iowrite32(val, addr); 260 } 261 262 static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata, 263 u32 offset, u32 val) 264 { 265 void __iomem *addr = pdata->mcx_mac_csr_addr + offset; 266 267 iowrite32(val, addr); 268 } 269 270 static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr, 271 void __iomem *cmd, void __iomem *cmd_done, 272 u32 wr_addr, u32 wr_data) 273 { 274 u32 done; 275 u8 wait = 10; 276 277 iowrite32(wr_addr, addr); 278 iowrite32(wr_data, wr); 279 iowrite32(XGENE_ENET_WR_CMD, cmd); 280 281 /* wait for write command to complete */ 282 while (!(done = ioread32(cmd_done)) && wait--) 283 udelay(1); 284 285 if (!done) 286 return false; 287 288 iowrite32(0, cmd); 289 290 return true; 291 } 292 293 static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata, 294 u32 wr_addr, u32 wr_data) 295 { 296 void __iomem *addr, *wr, *cmd, *cmd_done; 297 298 addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; 299 wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; 300 cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; 301 cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; 302 303 if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) 304 netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n", 305 wr_addr); 306 } 307 308 static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, 309 u32 offset, u32 *val) 310 { 311 void __iomem *addr = pdata->eth_csr_addr + offset; 312 313 *val = ioread32(addr); 314 } 315 316 static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata, 317 u32 offset, u32 *val) 318 { 319 void __iomem *addr = pdata->eth_diag_csr_addr + offset; 320 321 *val = ioread32(addr); 322 } 323 324 static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata, 325 u32 offset, u32 *val) 326 { 327 void __iomem *addr = pdata->mcx_mac_csr_addr + offset; 328 329 *val = ioread32(addr); 330 } 331 332 static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd, 333 void __iomem *cmd, void __iomem *cmd_done, 334 u32 rd_addr, u32 *rd_data) 335 { 336 u32 done; 337 u8 wait = 10; 338 339 iowrite32(rd_addr, addr); 340 iowrite32(XGENE_ENET_RD_CMD, cmd); 341 342 /* wait for read command to complete */ 343 while (!(done = ioread32(cmd_done)) && wait--) 344 udelay(1); 345 346 if (!done) 347 return false; 348 349 *rd_data = ioread32(rd); 350 iowrite32(0, cmd); 351 352 return true; 353 } 354 355 static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata, 356 u32 rd_addr, u32 *rd_data) 357 { 358 void __iomem *addr, *rd, *cmd, *cmd_done; 359 360 addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; 361 rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET; 362 cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; 363 cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; 364 365 if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data)) 366 netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n", 367 rd_addr); 368 } 369 370 static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id, 371 u32 reg, u16 data) 372 { 373 u32 addr = 0, wr_data = 0; 374 u32 done; 375 u8 wait = 10; 376 377 PHY_ADDR_SET(&addr, phy_id); 378 REG_ADDR_SET(&addr, reg); 379 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); 380 381 PHY_CONTROL_SET(&wr_data, data); 382 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data); 383 do { 384 usleep_range(5, 10); 385 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); 386 } while ((done & BUSY_MASK) && wait--); 387 388 if (done & BUSY_MASK) { 389 netdev_err(pdata->ndev, "MII_MGMT write failed\n"); 390 return -EBUSY; 391 } 392 393 return 0; 394 } 395 396 static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata, 397 u8 phy_id, u32 reg) 398 { 399 u32 addr = 0; 400 u32 data, done; 401 u8 wait = 10; 402 403 PHY_ADDR_SET(&addr, phy_id); 404 REG_ADDR_SET(&addr, reg); 405 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); 406 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); 407 do { 408 usleep_range(5, 10); 409 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); 410 } while ((done & BUSY_MASK) && wait--); 411 412 if (done & BUSY_MASK) { 413 netdev_err(pdata->ndev, "MII_MGMT read failed\n"); 414 return -EBUSY; 415 } 416 417 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data); 418 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0); 419 420 return data; 421 } 422 423 static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) 424 { 425 u32 addr0, addr1; 426 u8 *dev_addr = pdata->ndev->dev_addr; 427 428 addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | 429 (dev_addr[1] << 8) | dev_addr[0]; 430 addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); 431 432 xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0); 433 xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1); 434 } 435 436 static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) 437 { 438 struct net_device *ndev = pdata->ndev; 439 u32 data; 440 u8 wait = 10; 441 442 xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); 443 do { 444 usleep_range(100, 110); 445 xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data); 446 } while ((data != 0xffffffff) && wait--); 447 448 if (data != 0xffffffff) { 449 netdev_err(ndev, "Failed to release memory from shutdown\n"); 450 return -ENODEV; 451 } 452 453 return 0; 454 } 455 456 static void xgene_gmac_reset(struct xgene_enet_pdata *pdata) 457 { 458 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1); 459 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0); 460 } 461 462 static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata) 463 { 464 struct device *dev = &pdata->pdev->dev; 465 466 if (dev->of_node) { 467 struct clk *parent = clk_get_parent(pdata->clk); 468 469 switch (pdata->phy_speed) { 470 case SPEED_10: 471 clk_set_rate(parent, 2500000); 472 break; 473 case SPEED_100: 474 clk_set_rate(parent, 25000000); 475 break; 476 default: 477 clk_set_rate(parent, 125000000); 478 break; 479 } 480 } 481 #ifdef CONFIG_ACPI 482 else { 483 switch (pdata->phy_speed) { 484 case SPEED_10: 485 acpi_evaluate_object(ACPI_HANDLE(dev), 486 "S10", NULL, NULL); 487 break; 488 case SPEED_100: 489 acpi_evaluate_object(ACPI_HANDLE(dev), 490 "S100", NULL, NULL); 491 break; 492 default: 493 acpi_evaluate_object(ACPI_HANDLE(dev), 494 "S1G", NULL, NULL); 495 break; 496 } 497 } 498 #endif 499 } 500 501 static void xgene_gmac_init(struct xgene_enet_pdata *pdata) 502 { 503 struct device *dev = &pdata->pdev->dev; 504 u32 value, mc2; 505 u32 intf_ctl, rgmii; 506 u32 icm0, icm2; 507 508 xgene_gmac_reset(pdata); 509 510 xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0); 511 xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2); 512 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2); 513 xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl); 514 xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii); 515 516 switch (pdata->phy_speed) { 517 case SPEED_10: 518 ENET_INTERFACE_MODE2_SET(&mc2, 1); 519 intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE); 520 CFG_MACMODE_SET(&icm0, 0); 521 CFG_WAITASYNCRD_SET(&icm2, 500); 522 rgmii &= ~CFG_SPEED_1250; 523 break; 524 case SPEED_100: 525 ENET_INTERFACE_MODE2_SET(&mc2, 1); 526 intf_ctl &= ~ENET_GHD_MODE; 527 intf_ctl |= ENET_LHD_MODE; 528 CFG_MACMODE_SET(&icm0, 1); 529 CFG_WAITASYNCRD_SET(&icm2, 80); 530 rgmii &= ~CFG_SPEED_1250; 531 break; 532 default: 533 ENET_INTERFACE_MODE2_SET(&mc2, 2); 534 intf_ctl &= ~ENET_LHD_MODE; 535 intf_ctl |= ENET_GHD_MODE; 536 CFG_MACMODE_SET(&icm0, 2); 537 CFG_WAITASYNCRD_SET(&icm2, 0); 538 if (dev->of_node) { 539 CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay); 540 CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay); 541 } 542 rgmii |= CFG_SPEED_1250; 543 544 xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value); 545 value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; 546 xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value); 547 break; 548 } 549 550 mc2 |= FULL_DUPLEX2 | PAD_CRC; 551 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2); 552 xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); 553 554 xgene_gmac_set_mac_addr(pdata); 555 556 /* Adjust MDC clock frequency */ 557 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value); 558 MGMT_CLOCK_SEL_SET(&value, 7); 559 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value); 560 561 /* Enable drop if bufpool not available */ 562 xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value); 563 value |= CFG_RSIF_FPBUFF_TIMEOUT_EN; 564 xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value); 565 566 /* Rtype should be copied from FP */ 567 xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); 568 xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); 569 xgene_enet_configure_clock(pdata); 570 571 /* Rx-Tx traffic resume */ 572 xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); 573 574 xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0); 575 xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); 576 577 xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value); 578 value &= ~TX_DV_GATE_EN0; 579 value &= ~RX_DV_GATE_EN0; 580 value |= RESUME_RX0; 581 xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value); 582 583 xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX); 584 } 585 586 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) 587 { 588 u32 val = 0xffffffff; 589 590 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val); 591 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val); 592 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val); 593 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val); 594 } 595 596 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, 597 u32 dst_ring_num, u16 bufpool_id) 598 { 599 u32 cb; 600 u32 fpsel; 601 602 fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; 603 604 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); 605 cb |= CFG_CLE_BYPASS_EN0; 606 CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); 607 xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); 608 609 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); 610 CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); 611 CFG_CLE_FPSEL0_SET(&cb, fpsel); 612 xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb); 613 } 614 615 static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata) 616 { 617 u32 data; 618 619 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 620 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN); 621 } 622 623 static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata) 624 { 625 u32 data; 626 627 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 628 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN); 629 } 630 631 static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata) 632 { 633 u32 data; 634 635 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 636 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN); 637 } 638 639 static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) 640 { 641 u32 data; 642 643 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 644 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); 645 } 646 647 bool xgene_ring_mgr_init(struct xgene_enet_pdata *p) 648 { 649 if (!ioread32(p->ring_csr_addr + CLKEN_ADDR)) 650 return false; 651 652 if (ioread32(p->ring_csr_addr + SRST_ADDR)) 653 return false; 654 655 return true; 656 } 657 658 static int xgene_enet_reset(struct xgene_enet_pdata *pdata) 659 { 660 u32 val; 661 662 if (!xgene_ring_mgr_init(pdata)) 663 return -ENODEV; 664 665 if (!IS_ERR(pdata->clk)) { 666 clk_prepare_enable(pdata->clk); 667 clk_disable_unprepare(pdata->clk); 668 clk_prepare_enable(pdata->clk); 669 xgene_enet_ecc_init(pdata); 670 } 671 xgene_enet_config_ring_if_assoc(pdata); 672 673 /* Enable auto-incr for scanning */ 674 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val); 675 val |= SCAN_AUTO_INCR; 676 MGMT_CLOCK_SEL_SET(&val, 1); 677 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); 678 679 return 0; 680 } 681 682 static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) 683 { 684 if (!IS_ERR(pdata->clk)) 685 clk_disable_unprepare(pdata->clk); 686 } 687 688 static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 689 { 690 struct xgene_enet_pdata *pdata = bus->priv; 691 u32 val; 692 693 val = xgene_mii_phy_read(pdata, mii_id, regnum); 694 netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n", 695 mii_id, regnum, val); 696 697 return val; 698 } 699 700 static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 701 u16 val) 702 { 703 struct xgene_enet_pdata *pdata = bus->priv; 704 705 netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n", 706 mii_id, regnum, val); 707 return xgene_mii_phy_write(pdata, mii_id, regnum, val); 708 } 709 710 static void xgene_enet_adjust_link(struct net_device *ndev) 711 { 712 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 713 struct phy_device *phydev = pdata->phy_dev; 714 715 if (phydev->link) { 716 if (pdata->phy_speed != phydev->speed) { 717 pdata->phy_speed = phydev->speed; 718 xgene_gmac_init(pdata); 719 xgene_gmac_rx_enable(pdata); 720 xgene_gmac_tx_enable(pdata); 721 phy_print_status(phydev); 722 } 723 } else { 724 xgene_gmac_rx_disable(pdata); 725 xgene_gmac_tx_disable(pdata); 726 pdata->phy_speed = SPEED_UNKNOWN; 727 phy_print_status(phydev); 728 } 729 } 730 731 static int xgene_enet_phy_connect(struct net_device *ndev) 732 { 733 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 734 struct device_node *phy_np; 735 struct phy_device *phy_dev; 736 struct device *dev = &pdata->pdev->dev; 737 738 if (dev->of_node) { 739 phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0); 740 if (!phy_np) { 741 netdev_dbg(ndev, "No phy-handle found in DT\n"); 742 return -ENODEV; 743 } 744 745 phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link, 746 0, pdata->phy_mode); 747 if (!phy_dev) { 748 netdev_err(ndev, "Could not connect to PHY\n"); 749 return -ENODEV; 750 } 751 752 pdata->phy_dev = phy_dev; 753 } else { 754 phy_dev = pdata->phy_dev; 755 756 if (!phy_dev || 757 phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, 758 pdata->phy_mode)) { 759 netdev_err(ndev, "Could not connect to PHY\n"); 760 return -ENODEV; 761 } 762 } 763 764 pdata->phy_speed = SPEED_UNKNOWN; 765 phy_dev->supported &= ~SUPPORTED_10baseT_Half & 766 ~SUPPORTED_100baseT_Half & 767 ~SUPPORTED_1000baseT_Half; 768 phy_dev->advertising = phy_dev->supported; 769 770 return 0; 771 } 772 773 static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, 774 struct mii_bus *mdio) 775 { 776 struct device *dev = &pdata->pdev->dev; 777 struct net_device *ndev = pdata->ndev; 778 struct phy_device *phy; 779 struct device_node *child_np; 780 struct device_node *mdio_np = NULL; 781 int ret; 782 u32 phy_id; 783 784 if (dev->of_node) { 785 for_each_child_of_node(dev->of_node, child_np) { 786 if (of_device_is_compatible(child_np, 787 "apm,xgene-mdio")) { 788 mdio_np = child_np; 789 break; 790 } 791 } 792 793 if (!mdio_np) { 794 netdev_dbg(ndev, "No mdio node in the dts\n"); 795 return -ENXIO; 796 } 797 798 return of_mdiobus_register(mdio, mdio_np); 799 } 800 801 /* Mask out all PHYs from auto probing. */ 802 mdio->phy_mask = ~0; 803 804 /* Register the MDIO bus */ 805 ret = mdiobus_register(mdio); 806 if (ret) 807 return ret; 808 809 ret = device_property_read_u32(dev, "phy-channel", &phy_id); 810 if (ret) 811 ret = device_property_read_u32(dev, "phy-addr", &phy_id); 812 if (ret) 813 return -EINVAL; 814 815 phy = get_phy_device(mdio, phy_id, false); 816 if (!phy || IS_ERR(phy)) 817 return -EIO; 818 819 ret = phy_device_register(phy); 820 if (ret) 821 phy_device_free(phy); 822 else 823 pdata->phy_dev = phy; 824 825 return ret; 826 } 827 828 int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) 829 { 830 struct net_device *ndev = pdata->ndev; 831 struct mii_bus *mdio_bus; 832 int ret; 833 834 mdio_bus = mdiobus_alloc(); 835 if (!mdio_bus) 836 return -ENOMEM; 837 838 mdio_bus->name = "APM X-Gene MDIO bus"; 839 mdio_bus->read = xgene_enet_mdio_read; 840 mdio_bus->write = xgene_enet_mdio_write; 841 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii", 842 ndev->name); 843 844 mdio_bus->priv = pdata; 845 mdio_bus->parent = &ndev->dev; 846 847 ret = xgene_mdiobus_register(pdata, mdio_bus); 848 if (ret) { 849 netdev_err(ndev, "Failed to register MDIO bus\n"); 850 mdiobus_free(mdio_bus); 851 return ret; 852 } 853 pdata->mdio_bus = mdio_bus; 854 855 ret = xgene_enet_phy_connect(ndev); 856 if (ret) 857 xgene_enet_mdio_remove(pdata); 858 859 return ret; 860 } 861 862 void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) 863 { 864 if (pdata->phy_dev) 865 phy_disconnect(pdata->phy_dev); 866 867 mdiobus_unregister(pdata->mdio_bus); 868 mdiobus_free(pdata->mdio_bus); 869 pdata->mdio_bus = NULL; 870 } 871 872 const struct xgene_mac_ops xgene_gmac_ops = { 873 .init = xgene_gmac_init, 874 .reset = xgene_gmac_reset, 875 .rx_enable = xgene_gmac_rx_enable, 876 .tx_enable = xgene_gmac_tx_enable, 877 .rx_disable = xgene_gmac_rx_disable, 878 .tx_disable = xgene_gmac_tx_disable, 879 .set_mac_addr = xgene_gmac_set_mac_addr, 880 }; 881 882 const struct xgene_port_ops xgene_gport_ops = { 883 .reset = xgene_enet_reset, 884 .cle_bypass = xgene_enet_cle_bypass, 885 .shutdown = xgene_gport_shutdown, 886 }; 887 888 struct xgene_ring_ops xgene_ring1_ops = { 889 .num_ring_config = NUM_RING_CONFIG, 890 .num_ring_id_shift = 6, 891 .setup = xgene_enet_setup_ring, 892 .clear = xgene_enet_clear_ring, 893 .wr_cmd = xgene_enet_wr_cmd, 894 .len = xgene_enet_ring_len, 895 }; 896