1 // SPDX-License-Identifier: GPL-2.0 2 /* Renesas Ethernet Switch device driver 3 * 4 * Copyright (C) 2022-2025 Renesas Electronics Corporation 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/err.h> 10 #include <linux/etherdevice.h> 11 #include <linux/ethtool.h> 12 #include <linux/ip.h> 13 #include <linux/iopoll.h> 14 #include <linux/kernel.h> 15 #include <linux/list.h> 16 #include <linux/module.h> 17 #include <linux/net_tstamp.h> 18 #include <linux/of.h> 19 #include <linux/of_mdio.h> 20 #include <linux/of_net.h> 21 #include <linux/phy/phy.h> 22 #include <linux/platform_device.h> 23 #include <linux/pm.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/rtnetlink.h> 26 #include <linux/slab.h> 27 #include <linux/spinlock.h> 28 #include <linux/sys_soc.h> 29 30 #include "rswitch.h" 31 #include "rswitch_l2.h" 32 33 #define RSWITCH_GPTP_OFFSET_S4 0x00018000 34 35 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected) 36 { 37 u32 val; 38 39 return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected, 40 1, RSWITCH_TIMEOUT_US); 41 } 42 43 void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set) 44 { 45 iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg); 46 } 47 48 /* Common Agent block (COMA) */ 49 static void rswitch_reset(struct rswitch_private *priv) 50 { 51 iowrite32(RRC_RR, priv->addr + RRC); 52 iowrite32(RRC_RR_CLR, priv->addr + RRC); 53 } 54 55 static void rswitch_clock_enable(struct rswitch_private *priv) 56 { 57 iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC); 58 } 59 60 static void rswitch_clock_disable(struct rswitch_private *priv) 61 { 62 iowrite32(RCDC_RCD, priv->addr + RCDC); 63 } 64 65 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, 66 unsigned int port) 67 { 68 u32 val = ioread32(coma_addr + RCEC); 69 70 if (val & RCEC_RCE) 71 return (val & BIT(port)) ? true : false; 72 else 73 return false; 74 } 75 76 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, unsigned int port, 77 int enable) 78 { 79 u32 val; 80 81 if (enable) { 82 val = ioread32(coma_addr + RCEC); 83 iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC); 84 } else { 85 val = ioread32(coma_addr + RCDC); 86 iowrite32(val | BIT(port), coma_addr + RCDC); 87 } 88 } 89 90 static int rswitch_bpool_config(struct rswitch_private *priv) 91 { 92 u32 val; 93 94 val = ioread32(priv->addr + CABPIRM); 95 if (val & CABPIRM_BPR) 96 return 0; 97 98 iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM); 99 100 return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR); 101 } 102 103 static void rswitch_coma_init(struct rswitch_private *priv) 104 { 105 iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0); 106 } 107 108 /* R-Switch-2 block (TOP) */ 109 static void rswitch_top_init(struct rswitch_private *priv) 110 { 111 unsigned int i; 112 113 for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++) 114 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i)); 115 } 116 117 /* Forwarding engine block (MFWD) */ 118 static int rswitch_fwd_init(struct rswitch_private *priv) 119 { 120 u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0); 121 unsigned int i; 122 u32 reg_val; 123 124 /* Start with empty configuration */ 125 for (i = 0; i < RSWITCH_NUM_AGENTS; i++) { 126 /* Disable all port features */ 127 iowrite32(0, priv->addr + FWPC0(i)); 128 /* Disallow L3 forwarding and direct descriptor forwarding */ 129 iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask), 130 priv->addr + FWPC1(i)); 131 /* Disallow L2 forwarding */ 132 iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask), 133 priv->addr + FWPC2(i)); 134 /* Disallow port based forwarding */ 135 iowrite32(0, priv->addr + FWPBFC(i)); 136 } 137 138 /* Configure MAC table aging */ 139 rswitch_modify(priv->addr, FWMACAGUSPC, FWMACAGUSPC_MACAGUSP, 140 FIELD_PREP(FWMACAGUSPC_MACAGUSP, RSW_AGEING_CLK_PER_US)); 141 142 reg_val = FIELD_PREP(FWMACAGC_MACAGT, RSW_AGEING_TIME); 143 reg_val |= FWMACAGC_MACAGE | FWMACAGC_MACAGSL; 144 iowrite32(reg_val, priv->addr + FWMACAGC); 145 146 /* For enabled ETHA ports, setup port based forwarding */ 147 rswitch_for_each_enabled_port(priv, i) { 148 /* Port based forwarding from port i to GWCA port */ 149 rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV, 150 FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index))); 151 /* Within GWCA port, forward to Rx queue for port i */ 152 iowrite32(priv->rdev[i]->rx_queue->index, 153 priv->addr + FWPBFCSDC(GWCA_INDEX, i)); 154 } 155 156 /* For GWCA port, allow direct descriptor forwarding */ 157 rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE); 158 159 /* Initialize hardware L2 forwarding table */ 160 161 /* Allow entire table to be used for "unsecure" entries */ 162 rswitch_modify(priv->addr, FWMACHEC, 0, FWMACHEC_MACHMUE_MASK); 163 164 /* Initialize MAC hash table */ 165 iowrite32(FWMACTIM_MACTIOG, priv->addr + FWMACTIM); 166 167 return rswitch_reg_wait(priv->addr, FWMACTIM, FWMACTIM_MACTIOG, 0); 168 } 169 170 /* Gateway CPU agent block (GWCA) */ 171 static int rswitch_gwca_change_mode(struct rswitch_private *priv, 172 enum rswitch_gwca_mode mode) 173 { 174 int ret; 175 176 if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index)) 177 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1); 178 179 iowrite32(mode, priv->addr + GWMC); 180 181 ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode); 182 183 if (mode == GWMC_OPC_DISABLE) 184 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0); 185 186 return ret; 187 } 188 189 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv) 190 { 191 iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM); 192 193 return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR); 194 } 195 196 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv) 197 { 198 iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM); 199 200 return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR); 201 } 202 203 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx) 204 { 205 u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits; 206 unsigned int i; 207 208 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 209 if (dis[i] & mask[i]) 210 return true; 211 } 212 213 return false; 214 } 215 216 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis) 217 { 218 unsigned int i; 219 220 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 221 dis[i] = ioread32(priv->addr + GWDIS(i)); 222 dis[i] &= ioread32(priv->addr + GWDIE(i)); 223 } 224 } 225 226 static void rswitch_enadis_data_irq(struct rswitch_private *priv, 227 unsigned int index, bool enable) 228 { 229 u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32); 230 231 iowrite32(BIT(index % 32), priv->addr + offs); 232 } 233 234 static void rswitch_ack_data_irq(struct rswitch_private *priv, 235 unsigned int index) 236 { 237 u32 offs = GWDIS(index / 32); 238 239 iowrite32(BIT(index % 32), priv->addr + offs); 240 } 241 242 static unsigned int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, 243 bool cur, unsigned int num) 244 { 245 unsigned int index = cur ? gq->cur : gq->dirty; 246 247 if (index + num >= gq->ring_size) 248 index = (index + num) % gq->ring_size; 249 else 250 index += num; 251 252 return index; 253 } 254 255 static unsigned int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq) 256 { 257 if (gq->cur >= gq->dirty) 258 return gq->cur - gq->dirty; 259 else 260 return gq->ring_size - gq->dirty + gq->cur; 261 } 262 263 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq) 264 { 265 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty]; 266 267 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) 268 return true; 269 270 return false; 271 } 272 273 static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq, 274 unsigned int start_index, 275 unsigned int num) 276 { 277 unsigned int i, index; 278 279 for (i = 0; i < num; i++) { 280 index = (i + start_index) % gq->ring_size; 281 if (gq->rx_bufs[index]) 282 continue; 283 gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE); 284 if (!gq->rx_bufs[index]) 285 goto err; 286 } 287 288 return 0; 289 290 err: 291 for (; i-- > 0; ) { 292 index = (i + start_index) % gq->ring_size; 293 skb_free_frag(gq->rx_bufs[index]); 294 gq->rx_bufs[index] = NULL; 295 } 296 297 return -ENOMEM; 298 } 299 300 static void rswitch_gwca_queue_free(struct net_device *ndev, 301 struct rswitch_gwca_queue *gq) 302 { 303 unsigned int i; 304 305 if (!gq->dir_tx) { 306 dma_free_coherent(ndev->dev.parent, 307 sizeof(struct rswitch_ext_ts_desc) * 308 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma); 309 gq->rx_ring = NULL; 310 311 for (i = 0; i < gq->ring_size; i++) 312 skb_free_frag(gq->rx_bufs[i]); 313 kfree(gq->rx_bufs); 314 gq->rx_bufs = NULL; 315 } else { 316 dma_free_coherent(ndev->dev.parent, 317 sizeof(struct rswitch_ext_desc) * 318 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma); 319 gq->tx_ring = NULL; 320 kfree(gq->skbs); 321 gq->skbs = NULL; 322 kfree(gq->unmap_addrs); 323 gq->unmap_addrs = NULL; 324 } 325 } 326 327 static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv) 328 { 329 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 330 331 dma_free_coherent(&priv->pdev->dev, 332 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1), 333 gq->ts_ring, gq->ring_dma); 334 gq->ts_ring = NULL; 335 } 336 337 static int rswitch_gwca_queue_alloc(struct net_device *ndev, 338 struct rswitch_private *priv, 339 struct rswitch_gwca_queue *gq, 340 bool dir_tx, unsigned int ring_size) 341 { 342 unsigned int i, bit; 343 344 gq->dir_tx = dir_tx; 345 gq->ring_size = ring_size; 346 gq->ndev = ndev; 347 348 if (!dir_tx) { 349 gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL); 350 if (!gq->rx_bufs) 351 return -ENOMEM; 352 if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0) 353 goto out; 354 355 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent, 356 sizeof(struct rswitch_ext_ts_desc) * 357 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 358 } else { 359 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL); 360 if (!gq->skbs) 361 return -ENOMEM; 362 gq->unmap_addrs = kcalloc(gq->ring_size, sizeof(*gq->unmap_addrs), GFP_KERNEL); 363 if (!gq->unmap_addrs) 364 goto out; 365 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent, 366 sizeof(struct rswitch_ext_desc) * 367 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 368 } 369 370 if (!gq->rx_ring && !gq->tx_ring) 371 goto out; 372 373 i = gq->index / 32; 374 bit = BIT(gq->index % 32); 375 if (dir_tx) 376 priv->gwca.tx_irq_bits[i] |= bit; 377 else 378 priv->gwca.rx_irq_bits[i] |= bit; 379 380 return 0; 381 382 out: 383 rswitch_gwca_queue_free(ndev, gq); 384 385 return -ENOMEM; 386 } 387 388 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr) 389 { 390 desc->dptrl = cpu_to_le32(lower_32_bits(addr)); 391 desc->dptrh = upper_32_bits(addr) & 0xff; 392 } 393 394 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc) 395 { 396 return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32; 397 } 398 399 static int rswitch_gwca_queue_format(struct net_device *ndev, 400 struct rswitch_private *priv, 401 struct rswitch_gwca_queue *gq) 402 { 403 unsigned int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size; 404 struct rswitch_ext_desc *desc; 405 struct rswitch_desc *linkfix; 406 dma_addr_t dma_addr; 407 unsigned int i; 408 409 memset(gq->tx_ring, 0, ring_size); 410 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) { 411 if (!gq->dir_tx) { 412 dma_addr = dma_map_single(ndev->dev.parent, 413 gq->rx_bufs[i] + RSWITCH_HEADROOM, 414 RSWITCH_MAP_BUF_SIZE, 415 DMA_FROM_DEVICE); 416 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 417 goto err; 418 419 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE); 420 rswitch_desc_set_dptr(&desc->desc, dma_addr); 421 desc->desc.die_dt = DT_FEMPTY | DIE; 422 } else { 423 desc->desc.die_dt = DT_EEMPTY | DIE; 424 } 425 } 426 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 427 desc->desc.die_dt = DT_LINKFIX; 428 429 linkfix = &priv->gwca.linkfix_table[gq->index]; 430 linkfix->die_dt = DT_LINKFIX; 431 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 432 433 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE, 434 priv->addr + GWDCC_OFFS(gq->index)); 435 436 return 0; 437 438 err: 439 if (!gq->dir_tx) { 440 for (desc = gq->tx_ring; i-- > 0; desc++) { 441 dma_addr = rswitch_desc_get_dptr(&desc->desc); 442 dma_unmap_single(ndev->dev.parent, dma_addr, 443 RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE); 444 } 445 } 446 447 return -ENOMEM; 448 } 449 450 static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv, 451 unsigned int start_index, 452 unsigned int num) 453 { 454 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 455 struct rswitch_ts_desc *desc; 456 unsigned int i, index; 457 458 for (i = 0; i < num; i++) { 459 index = (i + start_index) % gq->ring_size; 460 desc = &gq->ts_ring[index]; 461 desc->desc.die_dt = DT_FEMPTY_ND | DIE; 462 } 463 } 464 465 static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev, 466 struct rswitch_gwca_queue *gq, 467 unsigned int start_index, 468 unsigned int num) 469 { 470 struct rswitch_device *rdev = netdev_priv(ndev); 471 struct rswitch_ext_ts_desc *desc; 472 unsigned int i, index; 473 dma_addr_t dma_addr; 474 475 for (i = 0; i < num; i++) { 476 index = (i + start_index) % gq->ring_size; 477 desc = &gq->rx_ring[index]; 478 if (!gq->dir_tx) { 479 dma_addr = dma_map_single(ndev->dev.parent, 480 gq->rx_bufs[index] + RSWITCH_HEADROOM, 481 RSWITCH_MAP_BUF_SIZE, 482 DMA_FROM_DEVICE); 483 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 484 goto err; 485 486 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE); 487 rswitch_desc_set_dptr(&desc->desc, dma_addr); 488 dma_wmb(); 489 desc->desc.die_dt = DT_FEMPTY | DIE; 490 desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index)); 491 } else { 492 desc->desc.die_dt = DT_EEMPTY | DIE; 493 } 494 } 495 496 return 0; 497 498 err: 499 if (!gq->dir_tx) { 500 for (; i-- > 0; ) { 501 index = (i + start_index) % gq->ring_size; 502 desc = &gq->rx_ring[index]; 503 dma_addr = rswitch_desc_get_dptr(&desc->desc); 504 dma_unmap_single(ndev->dev.parent, dma_addr, 505 RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE); 506 } 507 } 508 509 return -ENOMEM; 510 } 511 512 static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev, 513 struct rswitch_private *priv, 514 struct rswitch_gwca_queue *gq) 515 { 516 unsigned int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size; 517 struct rswitch_ext_ts_desc *desc; 518 struct rswitch_desc *linkfix; 519 int err; 520 521 memset(gq->rx_ring, 0, ring_size); 522 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size); 523 if (err < 0) 524 return err; 525 526 desc = &gq->rx_ring[gq->ring_size]; /* Last */ 527 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 528 desc->desc.die_dt = DT_LINKFIX; 529 530 linkfix = &priv->gwca.linkfix_table[gq->index]; 531 linkfix->die_dt = DT_LINKFIX; 532 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 533 534 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | 535 GWDCC_ETS | GWDCC_EDE, 536 priv->addr + GWDCC_OFFS(gq->index)); 537 538 return 0; 539 } 540 541 static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv) 542 { 543 unsigned int i, num_queues = priv->gwca.num_queues; 544 struct rswitch_gwca *gwca = &priv->gwca; 545 struct device *dev = &priv->pdev->dev; 546 547 gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues; 548 gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size, 549 &gwca->linkfix_table_dma, GFP_KERNEL); 550 if (!gwca->linkfix_table) 551 return -ENOMEM; 552 for (i = 0; i < num_queues; i++) 553 gwca->linkfix_table[i].die_dt = DT_EOS; 554 555 return 0; 556 } 557 558 static void rswitch_gwca_linkfix_free(struct rswitch_private *priv) 559 { 560 struct rswitch_gwca *gwca = &priv->gwca; 561 562 if (gwca->linkfix_table) 563 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size, 564 gwca->linkfix_table, gwca->linkfix_table_dma); 565 gwca->linkfix_table = NULL; 566 } 567 568 static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) 569 { 570 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 571 struct rswitch_ts_desc *desc; 572 573 gq->ring_size = TS_RING_SIZE; 574 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, 575 sizeof(struct rswitch_ts_desc) * 576 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 577 578 if (!gq->ts_ring) 579 return -ENOMEM; 580 581 rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE); 582 desc = &gq->ts_ring[gq->ring_size]; 583 desc->desc.die_dt = DT_LINKFIX; 584 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 585 586 return 0; 587 } 588 589 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv) 590 { 591 struct rswitch_gwca_queue *gq; 592 unsigned int index; 593 594 index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues); 595 if (index >= priv->gwca.num_queues) 596 return NULL; 597 set_bit(index, priv->gwca.used); 598 gq = &priv->gwca.queues[index]; 599 memset(gq, 0, sizeof(*gq)); 600 gq->index = index; 601 602 return gq; 603 } 604 605 static void rswitch_gwca_put(struct rswitch_private *priv, 606 struct rswitch_gwca_queue *gq) 607 { 608 clear_bit(gq->index, priv->gwca.used); 609 } 610 611 static int rswitch_txdmac_alloc(struct net_device *ndev) 612 { 613 struct rswitch_device *rdev = netdev_priv(ndev); 614 struct rswitch_private *priv = rdev->priv; 615 int err; 616 617 rdev->tx_queue = rswitch_gwca_get(priv); 618 if (!rdev->tx_queue) 619 return -EBUSY; 620 621 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE); 622 if (err < 0) { 623 rswitch_gwca_put(priv, rdev->tx_queue); 624 return err; 625 } 626 627 return 0; 628 } 629 630 static void rswitch_txdmac_free(struct net_device *ndev) 631 { 632 struct rswitch_device *rdev = netdev_priv(ndev); 633 634 rswitch_gwca_queue_free(ndev, rdev->tx_queue); 635 rswitch_gwca_put(rdev->priv, rdev->tx_queue); 636 } 637 638 static int rswitch_txdmac_init(struct rswitch_private *priv, unsigned int index) 639 { 640 struct rswitch_device *rdev = priv->rdev[index]; 641 642 return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue); 643 } 644 645 static int rswitch_rxdmac_alloc(struct net_device *ndev) 646 { 647 struct rswitch_device *rdev = netdev_priv(ndev); 648 struct rswitch_private *priv = rdev->priv; 649 int err; 650 651 rdev->rx_queue = rswitch_gwca_get(priv); 652 if (!rdev->rx_queue) 653 return -EBUSY; 654 655 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE); 656 if (err < 0) { 657 rswitch_gwca_put(priv, rdev->rx_queue); 658 return err; 659 } 660 661 return 0; 662 } 663 664 static void rswitch_rxdmac_free(struct net_device *ndev) 665 { 666 struct rswitch_device *rdev = netdev_priv(ndev); 667 668 rswitch_gwca_queue_free(ndev, rdev->rx_queue); 669 rswitch_gwca_put(rdev->priv, rdev->rx_queue); 670 } 671 672 static int rswitch_rxdmac_init(struct rswitch_private *priv, unsigned int index) 673 { 674 struct rswitch_device *rdev = priv->rdev[index]; 675 struct net_device *ndev = rdev->ndev; 676 677 return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue); 678 } 679 680 static int rswitch_gwca_hw_init(struct rswitch_private *priv) 681 { 682 unsigned int i; 683 int err; 684 685 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 686 if (err < 0) 687 return err; 688 err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG); 689 if (err < 0) 690 return err; 691 692 err = rswitch_gwca_mcast_table_reset(priv); 693 if (err < 0) 694 return err; 695 err = rswitch_gwca_axi_ram_reset(priv); 696 if (err < 0) 697 return err; 698 699 iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC); 700 iowrite32(0, priv->addr + GWTTFC); 701 iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1); 702 iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0); 703 iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10); 704 iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00); 705 iowrite32(GWMDNC_TSDMN(1) | GWMDNC_TXDMN(0x1e) | GWMDNC_RXDMN(0x1f), 706 priv->addr + GWMDNC); 707 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0); 708 709 iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0); 710 711 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 712 err = rswitch_rxdmac_init(priv, i); 713 if (err < 0) 714 return err; 715 err = rswitch_txdmac_init(priv, i); 716 if (err < 0) 717 return err; 718 } 719 720 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 721 if (err < 0) 722 return err; 723 return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION); 724 } 725 726 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv) 727 { 728 int err; 729 730 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 731 if (err < 0) 732 return err; 733 err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET); 734 if (err < 0) 735 return err; 736 737 return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 738 } 739 740 static int rswitch_gwca_halt(struct rswitch_private *priv) 741 { 742 int err; 743 744 priv->gwca_halt = true; 745 err = rswitch_gwca_hw_deinit(priv); 746 dev_err(&priv->pdev->dev, "halted (%d)\n", err); 747 748 return err; 749 } 750 751 static struct sk_buff *rswitch_rx_handle_desc(struct net_device *ndev, 752 struct rswitch_gwca_queue *gq, 753 struct rswitch_ext_ts_desc *desc) 754 { 755 dma_addr_t dma_addr = rswitch_desc_get_dptr(&desc->desc); 756 u16 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS; 757 u8 die_dt = desc->desc.die_dt & DT_MASK; 758 struct sk_buff *skb = NULL; 759 760 dma_unmap_single(ndev->dev.parent, dma_addr, RSWITCH_MAP_BUF_SIZE, 761 DMA_FROM_DEVICE); 762 763 /* The RX descriptor order will be one of the following: 764 * - FSINGLE 765 * - FSTART -> FEND 766 * - FSTART -> FMID -> FEND 767 */ 768 769 /* Check whether the descriptor is unexpected order */ 770 switch (die_dt) { 771 case DT_FSTART: 772 case DT_FSINGLE: 773 if (gq->skb_fstart) { 774 dev_kfree_skb_any(gq->skb_fstart); 775 gq->skb_fstart = NULL; 776 ndev->stats.rx_dropped++; 777 } 778 break; 779 case DT_FMID: 780 case DT_FEND: 781 if (!gq->skb_fstart) { 782 ndev->stats.rx_dropped++; 783 return NULL; 784 } 785 break; 786 default: 787 break; 788 } 789 790 /* Handle the descriptor */ 791 switch (die_dt) { 792 case DT_FSTART: 793 case DT_FSINGLE: 794 skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE); 795 if (skb) { 796 skb_reserve(skb, RSWITCH_HEADROOM); 797 skb_put(skb, pkt_len); 798 gq->pkt_len = pkt_len; 799 if (die_dt == DT_FSTART) { 800 gq->skb_fstart = skb; 801 skb = NULL; 802 } 803 } 804 break; 805 case DT_FMID: 806 case DT_FEND: 807 skb_add_rx_frag(gq->skb_fstart, skb_shinfo(gq->skb_fstart)->nr_frags, 808 virt_to_page(gq->rx_bufs[gq->cur]), 809 offset_in_page(gq->rx_bufs[gq->cur]) + RSWITCH_HEADROOM, 810 pkt_len, RSWITCH_BUF_SIZE); 811 if (die_dt == DT_FEND) { 812 skb = gq->skb_fstart; 813 gq->skb_fstart = NULL; 814 } 815 gq->pkt_len += pkt_len; 816 break; 817 default: 818 netdev_err(ndev, "%s: unexpected value (%x)\n", __func__, die_dt); 819 break; 820 } 821 822 return skb; 823 } 824 825 static bool rswitch_rx(struct net_device *ndev, int *quota) 826 { 827 struct rswitch_device *rdev = netdev_priv(ndev); 828 struct rswitch_gwca_queue *gq = rdev->rx_queue; 829 struct rswitch_ext_ts_desc *desc; 830 int limit, boguscnt, ret; 831 struct sk_buff *skb; 832 unsigned int num; 833 u32 get_ts; 834 835 if (*quota <= 0) 836 return true; 837 838 boguscnt = min_t(int, gq->ring_size, *quota); 839 limit = boguscnt; 840 841 desc = &gq->rx_ring[gq->cur]; 842 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { 843 dma_rmb(); 844 skb = rswitch_rx_handle_desc(ndev, gq, desc); 845 if (!skb) 846 goto out; 847 848 get_ts = rdev->priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE; 849 if (get_ts) { 850 struct skb_shared_hwtstamps *shhwtstamps; 851 struct timespec64 ts; 852 853 shhwtstamps = skb_hwtstamps(skb); 854 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 855 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 856 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 857 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); 858 } 859 skb->protocol = eth_type_trans(skb, ndev); 860 napi_gro_receive(&rdev->napi, skb); 861 rdev->ndev->stats.rx_packets++; 862 rdev->ndev->stats.rx_bytes += gq->pkt_len; 863 864 out: 865 gq->rx_bufs[gq->cur] = NULL; 866 gq->cur = rswitch_next_queue_index(gq, true, 1); 867 desc = &gq->rx_ring[gq->cur]; 868 869 if (--boguscnt <= 0) 870 break; 871 } 872 873 num = rswitch_get_num_cur_queues(gq); 874 ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num); 875 if (ret < 0) 876 goto err; 877 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num); 878 if (ret < 0) 879 goto err; 880 gq->dirty = rswitch_next_queue_index(gq, false, num); 881 882 *quota -= limit - boguscnt; 883 884 return boguscnt <= 0; 885 886 err: 887 rswitch_gwca_halt(rdev->priv); 888 889 return 0; 890 } 891 892 static void rswitch_tx_free(struct net_device *ndev) 893 { 894 struct rswitch_device *rdev = netdev_priv(ndev); 895 struct rswitch_gwca_queue *gq = rdev->tx_queue; 896 struct rswitch_ext_desc *desc; 897 struct sk_buff *skb; 898 899 desc = &gq->tx_ring[gq->dirty]; 900 while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) { 901 dma_rmb(); 902 903 skb = gq->skbs[gq->dirty]; 904 if (skb) { 905 rdev->ndev->stats.tx_packets++; 906 rdev->ndev->stats.tx_bytes += skb->len; 907 dma_unmap_single(ndev->dev.parent, 908 gq->unmap_addrs[gq->dirty], 909 skb->len, DMA_TO_DEVICE); 910 dev_kfree_skb_any(gq->skbs[gq->dirty]); 911 gq->skbs[gq->dirty] = NULL; 912 } 913 914 desc->desc.die_dt = DT_EEMPTY; 915 gq->dirty = rswitch_next_queue_index(gq, false, 1); 916 desc = &gq->tx_ring[gq->dirty]; 917 } 918 } 919 920 static int rswitch_poll(struct napi_struct *napi, int budget) 921 { 922 struct net_device *ndev = napi->dev; 923 struct rswitch_private *priv; 924 struct rswitch_device *rdev; 925 unsigned long flags; 926 int quota = budget; 927 928 rdev = netdev_priv(ndev); 929 priv = rdev->priv; 930 931 retry: 932 rswitch_tx_free(ndev); 933 934 if (rswitch_rx(ndev, "a)) 935 goto out; 936 else if (rdev->priv->gwca_halt) 937 goto err; 938 else if (rswitch_is_queue_rxed(rdev->rx_queue)) 939 goto retry; 940 941 netif_wake_subqueue(ndev, 0); 942 943 if (napi_complete_done(napi, budget - quota)) { 944 spin_lock_irqsave(&priv->lock, flags); 945 if (test_bit(rdev->port, priv->opened_ports)) { 946 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); 947 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); 948 } 949 spin_unlock_irqrestore(&priv->lock, flags); 950 } 951 952 out: 953 return budget - quota; 954 955 err: 956 napi_complete(napi); 957 958 return 0; 959 } 960 961 static void rswitch_queue_interrupt(struct net_device *ndev) 962 { 963 struct rswitch_device *rdev = netdev_priv(ndev); 964 965 if (napi_schedule_prep(&rdev->napi)) { 966 spin_lock(&rdev->priv->lock); 967 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 968 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 969 spin_unlock(&rdev->priv->lock); 970 __napi_schedule(&rdev->napi); 971 } 972 } 973 974 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis) 975 { 976 struct rswitch_gwca_queue *gq; 977 unsigned int i, index, bit; 978 979 for (i = 0; i < priv->gwca.num_queues; i++) { 980 gq = &priv->gwca.queues[i]; 981 index = gq->index / 32; 982 bit = BIT(gq->index % 32); 983 if (!(dis[index] & bit)) 984 continue; 985 986 rswitch_ack_data_irq(priv, gq->index); 987 rswitch_queue_interrupt(gq->ndev); 988 } 989 990 return IRQ_HANDLED; 991 } 992 993 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id) 994 { 995 struct rswitch_private *priv = dev_id; 996 u32 dis[RSWITCH_NUM_IRQ_REGS]; 997 irqreturn_t ret = IRQ_NONE; 998 999 rswitch_get_data_irq_status(priv, dis); 1000 1001 if (rswitch_is_any_data_irq(priv, dis, true) || 1002 rswitch_is_any_data_irq(priv, dis, false)) 1003 ret = rswitch_data_irq(priv, dis); 1004 1005 return ret; 1006 } 1007 1008 static int rswitch_gwca_request_irqs(struct rswitch_private *priv) 1009 { 1010 char *resource_name, *irq_name; 1011 int i, ret, irq; 1012 1013 for (i = 0; i < GWCA_NUM_IRQS; i++) { 1014 resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i); 1015 if (!resource_name) 1016 return -ENOMEM; 1017 1018 irq = platform_get_irq_byname(priv->pdev, resource_name); 1019 kfree(resource_name); 1020 if (irq < 0) 1021 return irq; 1022 1023 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, 1024 GWCA_IRQ_NAME, i); 1025 if (!irq_name) 1026 return -ENOMEM; 1027 1028 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq, 1029 0, irq_name, priv); 1030 if (ret < 0) 1031 return ret; 1032 } 1033 1034 return 0; 1035 } 1036 1037 static void rswitch_ts(struct rswitch_private *priv) 1038 { 1039 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 1040 struct skb_shared_hwtstamps shhwtstamps; 1041 struct rswitch_ts_desc *desc; 1042 struct rswitch_device *rdev; 1043 struct sk_buff *ts_skb; 1044 struct timespec64 ts; 1045 unsigned int num; 1046 u32 tag, port; 1047 1048 desc = &gq->ts_ring[gq->cur]; 1049 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) { 1050 dma_rmb(); 1051 1052 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl)); 1053 if (unlikely(port >= RSWITCH_NUM_PORTS)) 1054 goto next; 1055 rdev = priv->rdev[port]; 1056 1057 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl)); 1058 if (unlikely(tag >= TS_TAGS_PER_PORT)) 1059 goto next; 1060 ts_skb = xchg(&rdev->ts_skb[tag], NULL); 1061 smp_mb(); /* order rdev->ts_skb[] read before bitmap update */ 1062 clear_bit(tag, rdev->ts_skb_used); 1063 1064 if (unlikely(!ts_skb)) 1065 goto next; 1066 1067 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 1068 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 1069 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 1070 shhwtstamps.hwtstamp = timespec64_to_ktime(ts); 1071 skb_tstamp_tx(ts_skb, &shhwtstamps); 1072 dev_consume_skb_irq(ts_skb); 1073 1074 next: 1075 gq->cur = rswitch_next_queue_index(gq, true, 1); 1076 desc = &gq->ts_ring[gq->cur]; 1077 } 1078 1079 num = rswitch_get_num_cur_queues(gq); 1080 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num); 1081 gq->dirty = rswitch_next_queue_index(gq, false, num); 1082 } 1083 1084 static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id) 1085 { 1086 struct rswitch_private *priv = dev_id; 1087 1088 if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) { 1089 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS); 1090 rswitch_ts(priv); 1091 1092 return IRQ_HANDLED; 1093 } 1094 1095 return IRQ_NONE; 1096 } 1097 1098 static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv) 1099 { 1100 int irq; 1101 1102 irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME); 1103 if (irq < 0) 1104 return irq; 1105 1106 return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq, 1107 0, GWCA_TS_IRQ_NAME, priv); 1108 } 1109 1110 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */ 1111 static int rswitch_etha_change_mode(struct rswitch_etha *etha, 1112 enum rswitch_etha_mode mode) 1113 { 1114 int ret; 1115 1116 if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index)) 1117 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1); 1118 1119 iowrite32(mode, etha->addr + EAMC); 1120 1121 ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode); 1122 1123 if (mode == EAMC_OPC_DISABLE) 1124 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0); 1125 1126 return ret; 1127 } 1128 1129 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha) 1130 { 1131 u32 mrmac0 = ioread32(etha->addr + MRMAC0); 1132 u32 mrmac1 = ioread32(etha->addr + MRMAC1); 1133 u8 *mac = ða->mac_addr[0]; 1134 1135 mac[0] = (mrmac0 >> 8) & 0xFF; 1136 mac[1] = (mrmac0 >> 0) & 0xFF; 1137 mac[2] = (mrmac1 >> 24) & 0xFF; 1138 mac[3] = (mrmac1 >> 16) & 0xFF; 1139 mac[4] = (mrmac1 >> 8) & 0xFF; 1140 mac[5] = (mrmac1 >> 0) & 0xFF; 1141 } 1142 1143 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac) 1144 { 1145 iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0); 1146 iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], 1147 etha->addr + MRMAC1); 1148 } 1149 1150 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha) 1151 { 1152 iowrite32(MLVC_PLV, etha->addr + MLVC); 1153 1154 return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0); 1155 } 1156 1157 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) 1158 { 1159 u32 pis, lsc; 1160 1161 rswitch_etha_write_mac_address(etha, mac); 1162 1163 switch (etha->phy_interface) { 1164 case PHY_INTERFACE_MODE_SGMII: 1165 pis = MPIC_PIS_GMII; 1166 break; 1167 case PHY_INTERFACE_MODE_USXGMII: 1168 case PHY_INTERFACE_MODE_5GBASER: 1169 pis = MPIC_PIS_XGMII; 1170 break; 1171 default: 1172 pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC)); 1173 break; 1174 } 1175 1176 switch (etha->speed) { 1177 case 100: 1178 lsc = MPIC_LSC_100M; 1179 break; 1180 case 1000: 1181 lsc = MPIC_LSC_1G; 1182 break; 1183 case 2500: 1184 lsc = MPIC_LSC_2_5G; 1185 break; 1186 default: 1187 lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC)); 1188 break; 1189 } 1190 1191 rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC, 1192 FIELD_PREP(MPIC_PIS, pis) | FIELD_PREP(MPIC_LSC, lsc)); 1193 } 1194 1195 static void rswitch_etha_enable_mii(struct rswitch_etha *etha) 1196 { 1197 rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT, 1198 FIELD_PREP(MPIC_PSMCS, etha->psmcs) | 1199 FIELD_PREP(MPIC_PSMHT, 0x06)); 1200 } 1201 1202 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac) 1203 { 1204 int err; 1205 1206 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1207 if (err < 0) 1208 return err; 1209 err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG); 1210 if (err < 0) 1211 return err; 1212 1213 iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC); 1214 rswitch_rmac_setting(etha, mac); 1215 rswitch_etha_enable_mii(etha); 1216 1217 err = rswitch_etha_wait_link_verification(etha); 1218 if (err < 0) 1219 return err; 1220 1221 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1222 if (err < 0) 1223 return err; 1224 1225 return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION); 1226 } 1227 1228 static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read, 1229 unsigned int mmf, unsigned int pda, 1230 unsigned int pra, unsigned int pop, 1231 unsigned int prd) 1232 { 1233 u32 val; 1234 int ret; 1235 1236 val = MPSM_PSME | 1237 FIELD_PREP(MPSM_MFF, mmf) | 1238 FIELD_PREP(MPSM_PDA, pda) | 1239 FIELD_PREP(MPSM_PRA, pra) | 1240 FIELD_PREP(MPSM_POP, pop) | 1241 FIELD_PREP(MPSM_PRD, prd); 1242 iowrite32(val, etha->addr + MPSM); 1243 1244 ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0); 1245 if (ret) 1246 return ret; 1247 1248 if (read) { 1249 val = ioread32(etha->addr + MPSM); 1250 ret = FIELD_GET(MPSM_PRD, val); 1251 } 1252 1253 return ret; 1254 } 1255 1256 static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad, 1257 int regad) 1258 { 1259 struct rswitch_etha *etha = bus->priv; 1260 int ret; 1261 1262 ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad, 1263 MPSM_POP_ADDRESS, regad); 1264 if (ret) 1265 return ret; 1266 1267 return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad, 1268 MPSM_POP_READ_C45, 0); 1269 } 1270 1271 static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad, 1272 int regad, u16 val) 1273 { 1274 struct rswitch_etha *etha = bus->priv; 1275 int ret; 1276 1277 ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad, 1278 MPSM_POP_ADDRESS, regad); 1279 if (ret) 1280 return ret; 1281 1282 return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad, 1283 MPSM_POP_WRITE, val); 1284 } 1285 1286 static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad) 1287 { 1288 struct rswitch_etha *etha = bus->priv; 1289 1290 return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad, 1291 MPSM_POP_READ_C22, 0); 1292 } 1293 1294 static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad, 1295 int regad, u16 val) 1296 { 1297 struct rswitch_etha *etha = bus->priv; 1298 1299 return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad, 1300 MPSM_POP_WRITE, val); 1301 } 1302 1303 /* Call of_node_put(port) after done */ 1304 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev) 1305 { 1306 struct device_node *ports, *port; 1307 int err = 0; 1308 u32 index; 1309 1310 ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node, 1311 "ethernet-ports"); 1312 if (!ports) 1313 return NULL; 1314 1315 for_each_available_child_of_node(ports, port) { 1316 err = of_property_read_u32(port, "reg", &index); 1317 if (err < 0) { 1318 port = NULL; 1319 goto out; 1320 } 1321 if (index == rdev->etha->index) 1322 break; 1323 } 1324 1325 out: 1326 of_node_put(ports); 1327 1328 return port; 1329 } 1330 1331 static int rswitch_etha_get_params(struct rswitch_device *rdev) 1332 { 1333 u32 max_speed; 1334 int err; 1335 1336 if (!rdev->np_port) 1337 return 0; /* ignored */ 1338 1339 err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface); 1340 if (err) 1341 return err; 1342 1343 err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed); 1344 if (!err) { 1345 rdev->etha->speed = max_speed; 1346 return 0; 1347 } 1348 1349 /* if no "max-speed" property, let's use default speed */ 1350 switch (rdev->etha->phy_interface) { 1351 case PHY_INTERFACE_MODE_MII: 1352 rdev->etha->speed = SPEED_100; 1353 break; 1354 case PHY_INTERFACE_MODE_SGMII: 1355 rdev->etha->speed = SPEED_1000; 1356 break; 1357 case PHY_INTERFACE_MODE_USXGMII: 1358 rdev->etha->speed = SPEED_2500; 1359 break; 1360 default: 1361 return -EINVAL; 1362 } 1363 1364 return 0; 1365 } 1366 1367 static int rswitch_mii_register(struct rswitch_device *rdev) 1368 { 1369 struct device_node *mdio_np; 1370 struct mii_bus *mii_bus; 1371 int err; 1372 1373 mii_bus = mdiobus_alloc(); 1374 if (!mii_bus) 1375 return -ENOMEM; 1376 1377 mii_bus->name = "rswitch_mii"; 1378 sprintf(mii_bus->id, "etha%d", rdev->etha->index); 1379 mii_bus->priv = rdev->etha; 1380 mii_bus->read_c45 = rswitch_etha_mii_read_c45; 1381 mii_bus->write_c45 = rswitch_etha_mii_write_c45; 1382 mii_bus->read = rswitch_etha_mii_read_c22; 1383 mii_bus->write = rswitch_etha_mii_write_c22; 1384 mii_bus->parent = &rdev->priv->pdev->dev; 1385 1386 mdio_np = of_get_child_by_name(rdev->np_port, "mdio"); 1387 err = of_mdiobus_register(mii_bus, mdio_np); 1388 if (err < 0) { 1389 mdiobus_free(mii_bus); 1390 goto out; 1391 } 1392 1393 rdev->etha->mii = mii_bus; 1394 1395 out: 1396 of_node_put(mdio_np); 1397 1398 return err; 1399 } 1400 1401 static void rswitch_mii_unregister(struct rswitch_device *rdev) 1402 { 1403 if (rdev->etha->mii) { 1404 mdiobus_unregister(rdev->etha->mii); 1405 mdiobus_free(rdev->etha->mii); 1406 rdev->etha->mii = NULL; 1407 } 1408 } 1409 1410 static void rswitch_adjust_link(struct net_device *ndev) 1411 { 1412 struct rswitch_device *rdev = netdev_priv(ndev); 1413 struct phy_device *phydev = ndev->phydev; 1414 1415 if (phydev->link != rdev->etha->link) { 1416 phy_print_status(phydev); 1417 if (phydev->link) 1418 phy_power_on(rdev->serdes); 1419 else if (rdev->serdes->power_count) 1420 phy_power_off(rdev->serdes); 1421 1422 rdev->etha->link = phydev->link; 1423 1424 if (!rdev->priv->etha_no_runtime_change && 1425 phydev->speed != rdev->etha->speed) { 1426 rdev->etha->speed = phydev->speed; 1427 1428 rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); 1429 phy_set_speed(rdev->serdes, rdev->etha->speed); 1430 } 1431 } 1432 } 1433 1434 static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev, 1435 struct phy_device *phydev) 1436 { 1437 if (!rdev->priv->etha_no_runtime_change) 1438 return; 1439 1440 switch (rdev->etha->speed) { 1441 case SPEED_2500: 1442 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1443 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1444 break; 1445 case SPEED_1000: 1446 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1447 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1448 break; 1449 case SPEED_100: 1450 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1451 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1452 break; 1453 default: 1454 break; 1455 } 1456 1457 phy_set_max_speed(phydev, rdev->etha->speed); 1458 } 1459 1460 static int rswitch_phy_device_init(struct rswitch_device *rdev) 1461 { 1462 struct phy_device *phydev; 1463 struct device_node *phy; 1464 int err = -ENOENT; 1465 1466 if (!rdev->np_port) 1467 return -ENODEV; 1468 1469 phy = of_parse_phandle(rdev->np_port, "phy-handle", 0); 1470 if (!phy) 1471 return -ENODEV; 1472 1473 /* Set phydev->host_interfaces before calling of_phy_connect() to 1474 * configure the PHY with the information of host_interfaces. 1475 */ 1476 phydev = of_phy_find_device(phy); 1477 if (!phydev) 1478 goto out; 1479 __set_bit(rdev->etha->phy_interface, phydev->host_interfaces); 1480 phydev->mac_managed_pm = true; 1481 1482 phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0, 1483 rdev->etha->phy_interface); 1484 if (!phydev) 1485 goto out; 1486 1487 phy_set_max_speed(phydev, SPEED_2500); 1488 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 1489 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); 1490 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 1491 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 1492 rswitch_phy_remove_link_mode(rdev, phydev); 1493 1494 phy_attached_info(phydev); 1495 1496 err = 0; 1497 out: 1498 of_node_put(phy); 1499 1500 return err; 1501 } 1502 1503 static void rswitch_phy_device_deinit(struct rswitch_device *rdev) 1504 { 1505 if (rdev->ndev->phydev) 1506 phy_disconnect(rdev->ndev->phydev); 1507 } 1508 1509 static int rswitch_serdes_set_params(struct rswitch_device *rdev) 1510 { 1511 int err; 1512 1513 err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET, 1514 rdev->etha->phy_interface); 1515 if (err < 0) 1516 return err; 1517 1518 return phy_set_speed(rdev->serdes, rdev->etha->speed); 1519 } 1520 1521 static int rswitch_ether_port_init_one(struct rswitch_device *rdev) 1522 { 1523 int err; 1524 1525 if (!rdev->etha->operated) { 1526 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); 1527 if (err < 0) 1528 return err; 1529 if (rdev->priv->etha_no_runtime_change) 1530 rdev->etha->operated = true; 1531 } 1532 1533 err = rswitch_mii_register(rdev); 1534 if (err < 0) 1535 return err; 1536 1537 err = rswitch_phy_device_init(rdev); 1538 if (err < 0) 1539 goto err_phy_device_init; 1540 1541 rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL); 1542 if (IS_ERR(rdev->serdes)) { 1543 err = PTR_ERR(rdev->serdes); 1544 goto err_serdes_phy_get; 1545 } 1546 1547 err = rswitch_serdes_set_params(rdev); 1548 if (err < 0) 1549 goto err_serdes_set_params; 1550 1551 return 0; 1552 1553 err_serdes_set_params: 1554 err_serdes_phy_get: 1555 rswitch_phy_device_deinit(rdev); 1556 1557 err_phy_device_init: 1558 rswitch_mii_unregister(rdev); 1559 1560 return err; 1561 } 1562 1563 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev) 1564 { 1565 rswitch_phy_device_deinit(rdev); 1566 rswitch_mii_unregister(rdev); 1567 } 1568 1569 static int rswitch_ether_port_init_all(struct rswitch_private *priv) 1570 { 1571 unsigned int i; 1572 int err; 1573 1574 rswitch_for_each_enabled_port(priv, i) { 1575 err = rswitch_ether_port_init_one(priv->rdev[i]); 1576 if (err) 1577 goto err_init_one; 1578 } 1579 1580 rswitch_for_each_enabled_port(priv, i) { 1581 err = phy_init(priv->rdev[i]->serdes); 1582 if (err) 1583 goto err_serdes; 1584 } 1585 1586 return 0; 1587 1588 err_serdes: 1589 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1590 phy_exit(priv->rdev[i]->serdes); 1591 i = RSWITCH_NUM_PORTS; 1592 1593 err_init_one: 1594 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1595 rswitch_ether_port_deinit_one(priv->rdev[i]); 1596 1597 return err; 1598 } 1599 1600 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) 1601 { 1602 unsigned int i; 1603 1604 rswitch_for_each_enabled_port(priv, i) { 1605 phy_exit(priv->rdev[i]->serdes); 1606 rswitch_ether_port_deinit_one(priv->rdev[i]); 1607 } 1608 } 1609 1610 static int rswitch_open(struct net_device *ndev) 1611 { 1612 struct rswitch_device *rdev = netdev_priv(ndev); 1613 unsigned long flags; 1614 1615 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) 1616 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); 1617 1618 napi_enable(&rdev->napi); 1619 1620 spin_lock_irqsave(&rdev->priv->lock, flags); 1621 bitmap_set(rdev->priv->opened_ports, rdev->port, 1); 1622 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); 1623 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); 1624 spin_unlock_irqrestore(&rdev->priv->lock, flags); 1625 1626 phy_start(ndev->phydev); 1627 1628 netif_start_queue(ndev); 1629 1630 if (rdev->brdev) 1631 rswitch_update_l2_offload(rdev->priv); 1632 1633 return 0; 1634 } 1635 1636 static int rswitch_stop(struct net_device *ndev) 1637 { 1638 struct rswitch_device *rdev = netdev_priv(ndev); 1639 struct sk_buff *ts_skb; 1640 unsigned long flags; 1641 unsigned int tag; 1642 1643 netif_tx_stop_all_queues(ndev); 1644 1645 phy_stop(ndev->phydev); 1646 1647 spin_lock_irqsave(&rdev->priv->lock, flags); 1648 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 1649 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 1650 bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); 1651 spin_unlock_irqrestore(&rdev->priv->lock, flags); 1652 1653 napi_disable(&rdev->napi); 1654 1655 if (rdev->brdev) 1656 rswitch_update_l2_offload(rdev->priv); 1657 1658 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) 1659 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); 1660 1661 for_each_set_bit(tag, rdev->ts_skb_used, TS_TAGS_PER_PORT) { 1662 ts_skb = xchg(&rdev->ts_skb[tag], NULL); 1663 clear_bit(tag, rdev->ts_skb_used); 1664 if (ts_skb) 1665 dev_kfree_skb(ts_skb); 1666 } 1667 1668 return 0; 1669 } 1670 1671 static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev, 1672 struct sk_buff *skb, 1673 struct rswitch_ext_desc *desc) 1674 { 1675 desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | 1676 INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT); 1677 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 1678 unsigned int tag; 1679 1680 tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT); 1681 if (tag == TS_TAGS_PER_PORT) 1682 return false; 1683 smp_mb(); /* order bitmap read before rdev->ts_skb[] write */ 1684 rdev->ts_skb[tag] = skb_get(skb); 1685 set_bit(tag, rdev->ts_skb_used); 1686 1687 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1688 desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC); 1689 1690 skb_tx_timestamp(skb); 1691 } 1692 1693 return true; 1694 } 1695 1696 static bool rswitch_ext_desc_set(struct rswitch_device *rdev, 1697 struct sk_buff *skb, 1698 struct rswitch_ext_desc *desc, 1699 dma_addr_t dma_addr, u16 len, u8 die_dt) 1700 { 1701 rswitch_desc_set_dptr(&desc->desc, dma_addr); 1702 desc->desc.info_ds = cpu_to_le16(len); 1703 if (!rswitch_ext_desc_set_info1(rdev, skb, desc)) 1704 return false; 1705 1706 dma_wmb(); 1707 1708 desc->desc.die_dt = die_dt; 1709 1710 return true; 1711 } 1712 1713 static u8 rswitch_ext_desc_get_die_dt(unsigned int nr_desc, unsigned int index) 1714 { 1715 if (nr_desc == 1) 1716 return DT_FSINGLE | DIE; 1717 if (index == 0) 1718 return DT_FSTART; 1719 if (nr_desc - 1 == index) 1720 return DT_FEND | DIE; 1721 return DT_FMID; 1722 } 1723 1724 static u16 rswitch_ext_desc_get_len(u8 die_dt, unsigned int orig_len) 1725 { 1726 switch (die_dt & DT_MASK) { 1727 case DT_FSINGLE: 1728 case DT_FEND: 1729 return (orig_len % RSWITCH_DESC_BUF_SIZE) ?: RSWITCH_DESC_BUF_SIZE; 1730 case DT_FSTART: 1731 case DT_FMID: 1732 return RSWITCH_DESC_BUF_SIZE; 1733 default: 1734 return 0; 1735 } 1736 } 1737 1738 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1739 { 1740 struct rswitch_device *rdev = netdev_priv(ndev); 1741 struct rswitch_gwca_queue *gq = rdev->tx_queue; 1742 dma_addr_t dma_addr, dma_addr_orig; 1743 netdev_tx_t ret = NETDEV_TX_OK; 1744 struct rswitch_ext_desc *desc; 1745 unsigned int i, nr_desc; 1746 u8 die_dt; 1747 u16 len; 1748 1749 nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1; 1750 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) { 1751 netif_stop_subqueue(ndev, 0); 1752 return NETDEV_TX_BUSY; 1753 } 1754 1755 if (skb_put_padto(skb, ETH_ZLEN)) 1756 return ret; 1757 1758 dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); 1759 if (dma_mapping_error(ndev->dev.parent, dma_addr_orig)) 1760 goto err_kfree; 1761 1762 /* Stored the skb at the last descriptor to avoid skb free before hardware completes send */ 1763 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb; 1764 gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig; 1765 1766 dma_wmb(); 1767 1768 /* DT_FSTART should be set at last. So, this is reverse order. */ 1769 for (i = nr_desc; i-- > 0; ) { 1770 desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)]; 1771 die_dt = rswitch_ext_desc_get_die_dt(nr_desc, i); 1772 dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE; 1773 len = rswitch_ext_desc_get_len(die_dt, skb->len); 1774 if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt)) 1775 goto err_unmap; 1776 } 1777 1778 gq->cur = rswitch_next_queue_index(gq, true, nr_desc); 1779 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); 1780 1781 return ret; 1782 1783 err_unmap: 1784 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL; 1785 dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE); 1786 1787 err_kfree: 1788 dev_kfree_skb_any(skb); 1789 1790 return ret; 1791 } 1792 1793 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev) 1794 { 1795 return &ndev->stats; 1796 } 1797 1798 static int rswitch_hwstamp_get(struct net_device *ndev, 1799 struct kernel_hwtstamp_config *config) 1800 { 1801 struct rswitch_device *rdev = netdev_priv(ndev); 1802 struct rswitch_private *priv = rdev->priv; 1803 1804 config->flags = 0; 1805 config->tx_type = priv->tstamp_tx_ctrl; 1806 config->rx_filter = priv->tstamp_rx_ctrl; 1807 1808 return 0; 1809 } 1810 1811 static int rswitch_hwstamp_set(struct net_device *ndev, 1812 struct kernel_hwtstamp_config *config, 1813 struct netlink_ext_ack *extack) 1814 { 1815 struct rswitch_device *rdev = netdev_priv(ndev); 1816 enum hwtstamp_rx_filters tstamp_rx_ctrl; 1817 enum hwtstamp_tx_types tstamp_tx_ctrl; 1818 1819 if (config->flags) 1820 return -EINVAL; 1821 1822 switch (config->tx_type) { 1823 case HWTSTAMP_TX_OFF: 1824 case HWTSTAMP_TX_ON: 1825 tstamp_tx_ctrl = config->tx_type; 1826 break; 1827 default: 1828 return -ERANGE; 1829 } 1830 1831 switch (config->rx_filter) { 1832 case HWTSTAMP_FILTER_NONE: 1833 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1834 tstamp_rx_ctrl = config->rx_filter; 1835 break; 1836 default: 1837 config->rx_filter = HWTSTAMP_FILTER_ALL; 1838 tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL; 1839 break; 1840 } 1841 1842 rdev->priv->tstamp_tx_ctrl = tstamp_tx_ctrl; 1843 rdev->priv->tstamp_rx_ctrl = tstamp_rx_ctrl; 1844 1845 return 0; 1846 } 1847 1848 static int rswitch_get_port_parent_id(struct net_device *ndev, 1849 struct netdev_phys_item_id *ppid) 1850 { 1851 struct rswitch_device *rdev = netdev_priv(ndev); 1852 const char *name; 1853 1854 name = dev_name(&rdev->priv->pdev->dev); 1855 ppid->id_len = min_t(size_t, strlen(name), sizeof(ppid->id)); 1856 memcpy(ppid->id, name, ppid->id_len); 1857 1858 return 0; 1859 } 1860 1861 static int rswitch_get_phys_port_name(struct net_device *ndev, 1862 char *name, size_t len) 1863 { 1864 struct rswitch_device *rdev = netdev_priv(ndev); 1865 1866 snprintf(name, len, "tsn%d", rdev->port); 1867 1868 return 0; 1869 } 1870 1871 static const struct net_device_ops rswitch_netdev_ops = { 1872 .ndo_open = rswitch_open, 1873 .ndo_stop = rswitch_stop, 1874 .ndo_start_xmit = rswitch_start_xmit, 1875 .ndo_get_stats = rswitch_get_stats, 1876 .ndo_eth_ioctl = phy_do_ioctl_running, 1877 .ndo_get_port_parent_id = rswitch_get_port_parent_id, 1878 .ndo_get_phys_port_name = rswitch_get_phys_port_name, 1879 .ndo_validate_addr = eth_validate_addr, 1880 .ndo_set_mac_address = eth_mac_addr, 1881 .ndo_hwtstamp_get = rswitch_hwstamp_get, 1882 .ndo_hwtstamp_set = rswitch_hwstamp_set, 1883 }; 1884 1885 bool is_rdev(const struct net_device *ndev) 1886 { 1887 return (ndev->netdev_ops == &rswitch_netdev_ops); 1888 } 1889 1890 static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) 1891 { 1892 struct rswitch_device *rdev = netdev_priv(ndev); 1893 1894 info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock); 1895 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 1896 SOF_TIMESTAMPING_TX_HARDWARE | 1897 SOF_TIMESTAMPING_RX_HARDWARE | 1898 SOF_TIMESTAMPING_RAW_HARDWARE; 1899 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 1900 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); 1901 1902 return 0; 1903 } 1904 1905 static const struct ethtool_ops rswitch_ethtool_ops = { 1906 .get_ts_info = rswitch_get_ts_info, 1907 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1908 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1909 }; 1910 1911 static const struct of_device_id renesas_eth_sw_of_table[] = { 1912 { .compatible = "renesas,r8a779f0-ether-switch", }, 1913 { } 1914 }; 1915 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table); 1916 1917 static void rswitch_etha_init(struct rswitch_private *priv, unsigned int index) 1918 { 1919 struct rswitch_etha *etha = &priv->etha[index]; 1920 1921 memset(etha, 0, sizeof(*etha)); 1922 etha->index = index; 1923 etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; 1924 etha->coma_addr = priv->addr; 1925 1926 /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1. 1927 * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply 1928 * both the numerator and the denominator by 10. 1929 */ 1930 etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1; 1931 } 1932 1933 static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index) 1934 { 1935 struct platform_device *pdev = priv->pdev; 1936 struct rswitch_device *rdev; 1937 struct net_device *ndev; 1938 int err; 1939 1940 if (index >= RSWITCH_NUM_PORTS) 1941 return -EINVAL; 1942 1943 ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1); 1944 if (!ndev) 1945 return -ENOMEM; 1946 1947 SET_NETDEV_DEV(ndev, &pdev->dev); 1948 ether_setup(ndev); 1949 1950 rdev = netdev_priv(ndev); 1951 rdev->ndev = ndev; 1952 rdev->priv = priv; 1953 priv->rdev[index] = rdev; 1954 rdev->port = index; 1955 rdev->etha = &priv->etha[index]; 1956 rdev->addr = priv->addr; 1957 1958 ndev->base_addr = (unsigned long)rdev->addr; 1959 snprintf(ndev->name, IFNAMSIZ, "tsn%d", index); 1960 ndev->netdev_ops = &rswitch_netdev_ops; 1961 ndev->ethtool_ops = &rswitch_ethtool_ops; 1962 ndev->max_mtu = RSWITCH_MAX_MTU; 1963 ndev->min_mtu = ETH_MIN_MTU; 1964 1965 netif_napi_add(ndev, &rdev->napi, rswitch_poll); 1966 1967 rdev->np_port = rswitch_get_port_node(rdev); 1968 rdev->disabled = !rdev->np_port; 1969 err = of_get_ethdev_address(rdev->np_port, ndev); 1970 if (err) { 1971 if (is_valid_ether_addr(rdev->etha->mac_addr)) 1972 eth_hw_addr_set(ndev, rdev->etha->mac_addr); 1973 else 1974 eth_hw_addr_random(ndev); 1975 } 1976 1977 err = rswitch_etha_get_params(rdev); 1978 if (err < 0) 1979 goto out_get_params; 1980 1981 err = rswitch_rxdmac_alloc(ndev); 1982 if (err < 0) 1983 goto out_rxdmac; 1984 1985 err = rswitch_txdmac_alloc(ndev); 1986 if (err < 0) 1987 goto out_txdmac; 1988 1989 list_add_tail(&rdev->list, &priv->port_list); 1990 1991 return 0; 1992 1993 out_txdmac: 1994 rswitch_rxdmac_free(ndev); 1995 1996 out_rxdmac: 1997 out_get_params: 1998 of_node_put(rdev->np_port); 1999 netif_napi_del(&rdev->napi); 2000 free_netdev(ndev); 2001 2002 return err; 2003 } 2004 2005 static void rswitch_device_free(struct rswitch_private *priv, unsigned int index) 2006 { 2007 struct rswitch_device *rdev = priv->rdev[index]; 2008 struct net_device *ndev = rdev->ndev; 2009 2010 list_del(&rdev->list); 2011 rswitch_txdmac_free(ndev); 2012 rswitch_rxdmac_free(ndev); 2013 of_node_put(rdev->np_port); 2014 netif_napi_del(&rdev->napi); 2015 free_netdev(ndev); 2016 } 2017 2018 static int rswitch_init(struct rswitch_private *priv) 2019 { 2020 unsigned int i; 2021 int err; 2022 2023 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 2024 rswitch_etha_init(priv, i); 2025 2026 rswitch_clock_enable(priv); 2027 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 2028 rswitch_etha_read_mac_address(&priv->etha[i]); 2029 2030 rswitch_reset(priv); 2031 2032 rswitch_clock_enable(priv); 2033 rswitch_top_init(priv); 2034 err = rswitch_bpool_config(priv); 2035 if (err < 0) 2036 return err; 2037 2038 rswitch_coma_init(priv); 2039 2040 err = rswitch_gwca_linkfix_alloc(priv); 2041 if (err < 0) 2042 return -ENOMEM; 2043 2044 err = rswitch_gwca_ts_queue_alloc(priv); 2045 if (err < 0) 2046 goto err_ts_queue_alloc; 2047 2048 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 2049 err = rswitch_device_alloc(priv, i); 2050 if (err < 0) { 2051 for (; i-- > 0; ) 2052 rswitch_device_free(priv, i); 2053 goto err_device_alloc; 2054 } 2055 } 2056 2057 err = rswitch_fwd_init(priv); 2058 if (err < 0) 2059 goto err_fwd_init; 2060 2061 err = rcar_gen4_ptp_register(priv->ptp_priv, clk_get_rate(priv->clk)); 2062 if (err < 0) 2063 goto err_ptp_register; 2064 2065 err = rswitch_gwca_request_irqs(priv); 2066 if (err < 0) 2067 goto err_gwca_request_irq; 2068 2069 err = rswitch_gwca_ts_request_irqs(priv); 2070 if (err < 0) 2071 goto err_gwca_ts_request_irq; 2072 2073 err = rswitch_gwca_hw_init(priv); 2074 if (err < 0) 2075 goto err_gwca_hw_init; 2076 2077 err = rswitch_ether_port_init_all(priv); 2078 if (err) 2079 goto err_ether_port_init_all; 2080 2081 rswitch_for_each_enabled_port(priv, i) { 2082 err = register_netdev(priv->rdev[i]->ndev); 2083 if (err) { 2084 rswitch_for_each_enabled_port_continue_reverse(priv, i) 2085 unregister_netdev(priv->rdev[i]->ndev); 2086 goto err_register_netdev; 2087 } 2088 } 2089 2090 rswitch_for_each_enabled_port(priv, i) 2091 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n", 2092 priv->rdev[i]->ndev->dev_addr); 2093 2094 return 0; 2095 2096 err_register_netdev: 2097 rswitch_ether_port_deinit_all(priv); 2098 2099 err_ether_port_init_all: 2100 rswitch_gwca_hw_deinit(priv); 2101 2102 err_gwca_hw_init: 2103 err_gwca_ts_request_irq: 2104 err_gwca_request_irq: 2105 rcar_gen4_ptp_unregister(priv->ptp_priv); 2106 2107 err_fwd_init: 2108 err_ptp_register: 2109 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 2110 rswitch_device_free(priv, i); 2111 2112 err_device_alloc: 2113 rswitch_gwca_ts_queue_free(priv); 2114 2115 err_ts_queue_alloc: 2116 rswitch_gwca_linkfix_free(priv); 2117 2118 return err; 2119 } 2120 2121 static const struct soc_device_attribute rswitch_soc_no_speed_change[] = { 2122 { .soc_id = "r8a779f0", .revision = "ES1.0" }, 2123 { /* Sentinel */ } 2124 }; 2125 2126 static int renesas_eth_sw_probe(struct platform_device *pdev) 2127 { 2128 const struct soc_device_attribute *attr; 2129 struct rswitch_private *priv; 2130 struct resource *res; 2131 int ret; 2132 2133 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base"); 2134 if (!res) { 2135 dev_err(&pdev->dev, "invalid resource\n"); 2136 return -EINVAL; 2137 } 2138 2139 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 2140 if (!priv) 2141 return -ENOMEM; 2142 2143 spin_lock_init(&priv->lock); 2144 2145 priv->clk = devm_clk_get(&pdev->dev, NULL); 2146 if (IS_ERR(priv->clk)) 2147 return PTR_ERR(priv->clk); 2148 2149 attr = soc_device_match(rswitch_soc_no_speed_change); 2150 if (attr) 2151 priv->etha_no_runtime_change = true; 2152 2153 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); 2154 if (!priv->ptp_priv) 2155 return -ENOMEM; 2156 2157 platform_set_drvdata(pdev, priv); 2158 priv->pdev = pdev; 2159 priv->addr = devm_ioremap_resource(&pdev->dev, res); 2160 if (IS_ERR(priv->addr)) 2161 return PTR_ERR(priv->addr); 2162 2163 priv->ptp_priv->addr = priv->addr + RSWITCH_GPTP_OFFSET_S4; 2164 2165 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 2166 if (ret < 0) { 2167 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2168 if (ret < 0) 2169 return ret; 2170 } 2171 2172 priv->gwca.index = AGENT_INDEX_GWCA; 2173 priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV, 2174 RSWITCH_MAX_NUM_QUEUES); 2175 priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues, 2176 sizeof(*priv->gwca.queues), GFP_KERNEL); 2177 if (!priv->gwca.queues) 2178 return -ENOMEM; 2179 2180 INIT_LIST_HEAD(&priv->port_list); 2181 2182 pm_runtime_enable(&pdev->dev); 2183 pm_runtime_get_sync(&pdev->dev); 2184 2185 ret = rswitch_init(priv); 2186 if (ret < 0) { 2187 pm_runtime_put(&pdev->dev); 2188 pm_runtime_disable(&pdev->dev); 2189 return ret; 2190 } 2191 2192 if (list_empty(&priv->port_list)) 2193 dev_warn(&pdev->dev, "could not initialize any ports\n"); 2194 2195 ret = rswitch_register_notifiers(); 2196 if (ret) { 2197 dev_err(&pdev->dev, "could not register notifiers\n"); 2198 return ret; 2199 } 2200 2201 device_set_wakeup_capable(&pdev->dev, 1); 2202 2203 return ret; 2204 } 2205 2206 static void rswitch_deinit(struct rswitch_private *priv) 2207 { 2208 unsigned int i; 2209 2210 rswitch_gwca_hw_deinit(priv); 2211 rcar_gen4_ptp_unregister(priv->ptp_priv); 2212 2213 rswitch_for_each_enabled_port(priv, i) { 2214 struct rswitch_device *rdev = priv->rdev[i]; 2215 2216 unregister_netdev(rdev->ndev); 2217 rswitch_ether_port_deinit_one(rdev); 2218 phy_exit(priv->rdev[i]->serdes); 2219 } 2220 2221 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 2222 rswitch_device_free(priv, i); 2223 2224 rswitch_gwca_ts_queue_free(priv); 2225 rswitch_gwca_linkfix_free(priv); 2226 2227 rswitch_clock_disable(priv); 2228 } 2229 2230 static void renesas_eth_sw_remove(struct platform_device *pdev) 2231 { 2232 struct rswitch_private *priv = platform_get_drvdata(pdev); 2233 2234 rswitch_unregister_notifiers(); 2235 rswitch_deinit(priv); 2236 2237 pm_runtime_put(&pdev->dev); 2238 pm_runtime_disable(&pdev->dev); 2239 2240 platform_set_drvdata(pdev, NULL); 2241 } 2242 2243 static int renesas_eth_sw_suspend(struct device *dev) 2244 { 2245 struct rswitch_private *priv = dev_get_drvdata(dev); 2246 struct net_device *ndev; 2247 unsigned int i; 2248 2249 rswitch_for_each_enabled_port(priv, i) { 2250 ndev = priv->rdev[i]->ndev; 2251 if (netif_running(ndev)) { 2252 netif_device_detach(ndev); 2253 rswitch_stop(ndev); 2254 } 2255 if (priv->rdev[i]->serdes->init_count) 2256 phy_exit(priv->rdev[i]->serdes); 2257 } 2258 2259 return 0; 2260 } 2261 2262 static int renesas_eth_sw_resume(struct device *dev) 2263 { 2264 struct rswitch_private *priv = dev_get_drvdata(dev); 2265 struct net_device *ndev; 2266 unsigned int i; 2267 2268 rswitch_for_each_enabled_port(priv, i) { 2269 phy_init(priv->rdev[i]->serdes); 2270 ndev = priv->rdev[i]->ndev; 2271 if (netif_running(ndev)) { 2272 rswitch_open(ndev); 2273 netif_device_attach(ndev); 2274 } 2275 } 2276 2277 return 0; 2278 } 2279 2280 static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend, 2281 renesas_eth_sw_resume); 2282 2283 static struct platform_driver renesas_eth_sw_driver_platform = { 2284 .probe = renesas_eth_sw_probe, 2285 .remove = renesas_eth_sw_remove, 2286 .driver = { 2287 .name = "renesas_eth_sw", 2288 .pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops), 2289 .of_match_table = renesas_eth_sw_of_table, 2290 } 2291 }; 2292 module_platform_driver(renesas_eth_sw_driver_platform); 2293 MODULE_AUTHOR("Yoshihiro Shimoda"); 2294 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver"); 2295 MODULE_LICENSE("GPL"); 2296