1 // SPDX-License-Identifier: GPL-2.0 2 /* Renesas Ethernet Switch device driver 3 * 4 * Copyright (C) 2022-2025 Renesas Electronics Corporation 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/err.h> 10 #include <linux/etherdevice.h> 11 #include <linux/ethtool.h> 12 #include <linux/ip.h> 13 #include <linux/iopoll.h> 14 #include <linux/kernel.h> 15 #include <linux/list.h> 16 #include <linux/module.h> 17 #include <linux/net_tstamp.h> 18 #include <linux/of.h> 19 #include <linux/of_mdio.h> 20 #include <linux/of_net.h> 21 #include <linux/phy/phy.h> 22 #include <linux/platform_device.h> 23 #include <linux/pm.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/rtnetlink.h> 26 #include <linux/slab.h> 27 #include <linux/spinlock.h> 28 #include <linux/sys_soc.h> 29 30 #include "rswitch.h" 31 #include "rswitch_l2.h" 32 33 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected) 34 { 35 u32 val; 36 37 return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected, 38 1, RSWITCH_TIMEOUT_US); 39 } 40 41 void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set) 42 { 43 iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg); 44 } 45 46 /* Common Agent block (COMA) */ 47 static void rswitch_reset(struct rswitch_private *priv) 48 { 49 iowrite32(RRC_RR, priv->addr + RRC); 50 iowrite32(RRC_RR_CLR, priv->addr + RRC); 51 } 52 53 static void rswitch_clock_enable(struct rswitch_private *priv) 54 { 55 iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC); 56 } 57 58 static void rswitch_clock_disable(struct rswitch_private *priv) 59 { 60 iowrite32(RCDC_RCD, priv->addr + RCDC); 61 } 62 63 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, 64 unsigned int port) 65 { 66 u32 val = ioread32(coma_addr + RCEC); 67 68 if (val & RCEC_RCE) 69 return (val & BIT(port)) ? true : false; 70 else 71 return false; 72 } 73 74 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, unsigned int port, 75 int enable) 76 { 77 u32 val; 78 79 if (enable) { 80 val = ioread32(coma_addr + RCEC); 81 iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC); 82 } else { 83 val = ioread32(coma_addr + RCDC); 84 iowrite32(val | BIT(port), coma_addr + RCDC); 85 } 86 } 87 88 static int rswitch_bpool_config(struct rswitch_private *priv) 89 { 90 u32 val; 91 92 val = ioread32(priv->addr + CABPIRM); 93 if (val & CABPIRM_BPR) 94 return 0; 95 96 iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM); 97 98 return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR); 99 } 100 101 static void rswitch_coma_init(struct rswitch_private *priv) 102 { 103 iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0); 104 } 105 106 /* R-Switch-2 block (TOP) */ 107 static void rswitch_top_init(struct rswitch_private *priv) 108 { 109 unsigned int i; 110 111 for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++) 112 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i)); 113 } 114 115 /* Forwarding engine block (MFWD) */ 116 static int rswitch_fwd_init(struct rswitch_private *priv) 117 { 118 u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0); 119 unsigned int i; 120 u32 reg_val; 121 122 /* Start with empty configuration */ 123 for (i = 0; i < RSWITCH_NUM_AGENTS; i++) { 124 /* Disable all port features */ 125 iowrite32(0, priv->addr + FWPC0(i)); 126 /* Disallow L3 forwarding and direct descriptor forwarding */ 127 iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask), 128 priv->addr + FWPC1(i)); 129 /* Disallow L2 forwarding */ 130 iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask), 131 priv->addr + FWPC2(i)); 132 /* Disallow port based forwarding */ 133 iowrite32(0, priv->addr + FWPBFC(i)); 134 } 135 136 /* Configure MAC table aging */ 137 rswitch_modify(priv->addr, FWMACAGUSPC, FWMACAGUSPC_MACAGUSP, 138 FIELD_PREP(FWMACAGUSPC_MACAGUSP, RSW_AGEING_CLK_PER_US)); 139 140 reg_val = FIELD_PREP(FWMACAGC_MACAGT, RSW_AGEING_TIME); 141 reg_val |= FWMACAGC_MACAGE | FWMACAGC_MACAGSL; 142 iowrite32(reg_val, priv->addr + FWMACAGC); 143 144 /* For enabled ETHA ports, setup port based forwarding */ 145 rswitch_for_each_enabled_port(priv, i) { 146 /* Port based forwarding from port i to GWCA port */ 147 rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV, 148 FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index))); 149 /* Within GWCA port, forward to Rx queue for port i */ 150 iowrite32(priv->rdev[i]->rx_queue->index, 151 priv->addr + FWPBFCSDC(GWCA_INDEX, i)); 152 } 153 154 /* For GWCA port, allow direct descriptor forwarding */ 155 rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE); 156 157 /* Initialize hardware L2 forwarding table */ 158 159 /* Allow entire table to be used for "unsecure" entries */ 160 rswitch_modify(priv->addr, FWMACHEC, 0, FWMACHEC_MACHMUE_MASK); 161 162 /* Initialize MAC hash table */ 163 iowrite32(FWMACTIM_MACTIOG, priv->addr + FWMACTIM); 164 165 return rswitch_reg_wait(priv->addr, FWMACTIM, FWMACTIM_MACTIOG, 0); 166 } 167 168 /* Gateway CPU agent block (GWCA) */ 169 static int rswitch_gwca_change_mode(struct rswitch_private *priv, 170 enum rswitch_gwca_mode mode) 171 { 172 int ret; 173 174 if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index)) 175 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1); 176 177 iowrite32(mode, priv->addr + GWMC); 178 179 ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode); 180 181 if (mode == GWMC_OPC_DISABLE) 182 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0); 183 184 return ret; 185 } 186 187 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv) 188 { 189 iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM); 190 191 return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR); 192 } 193 194 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv) 195 { 196 iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM); 197 198 return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR); 199 } 200 201 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx) 202 { 203 u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits; 204 unsigned int i; 205 206 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 207 if (dis[i] & mask[i]) 208 return true; 209 } 210 211 return false; 212 } 213 214 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis) 215 { 216 unsigned int i; 217 218 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 219 dis[i] = ioread32(priv->addr + GWDIS(i)); 220 dis[i] &= ioread32(priv->addr + GWDIE(i)); 221 } 222 } 223 224 static void rswitch_enadis_data_irq(struct rswitch_private *priv, 225 unsigned int index, bool enable) 226 { 227 u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32); 228 229 iowrite32(BIT(index % 32), priv->addr + offs); 230 } 231 232 static void rswitch_ack_data_irq(struct rswitch_private *priv, 233 unsigned int index) 234 { 235 u32 offs = GWDIS(index / 32); 236 237 iowrite32(BIT(index % 32), priv->addr + offs); 238 } 239 240 static unsigned int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, 241 bool cur, unsigned int num) 242 { 243 unsigned int index = cur ? gq->cur : gq->dirty; 244 245 if (index + num >= gq->ring_size) 246 index = (index + num) % gq->ring_size; 247 else 248 index += num; 249 250 return index; 251 } 252 253 static unsigned int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq) 254 { 255 if (gq->cur >= gq->dirty) 256 return gq->cur - gq->dirty; 257 else 258 return gq->ring_size - gq->dirty + gq->cur; 259 } 260 261 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq) 262 { 263 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty]; 264 265 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) 266 return true; 267 268 return false; 269 } 270 271 static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq, 272 unsigned int start_index, 273 unsigned int num) 274 { 275 unsigned int i, index; 276 277 for (i = 0; i < num; i++) { 278 index = (i + start_index) % gq->ring_size; 279 if (gq->rx_bufs[index]) 280 continue; 281 gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE); 282 if (!gq->rx_bufs[index]) 283 goto err; 284 } 285 286 return 0; 287 288 err: 289 for (; i-- > 0; ) { 290 index = (i + start_index) % gq->ring_size; 291 skb_free_frag(gq->rx_bufs[index]); 292 gq->rx_bufs[index] = NULL; 293 } 294 295 return -ENOMEM; 296 } 297 298 static void rswitch_gwca_queue_free(struct net_device *ndev, 299 struct rswitch_gwca_queue *gq) 300 { 301 unsigned int i; 302 303 if (!gq->dir_tx) { 304 dma_free_coherent(ndev->dev.parent, 305 sizeof(struct rswitch_ext_ts_desc) * 306 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma); 307 gq->rx_ring = NULL; 308 309 for (i = 0; i < gq->ring_size; i++) 310 skb_free_frag(gq->rx_bufs[i]); 311 kfree(gq->rx_bufs); 312 gq->rx_bufs = NULL; 313 } else { 314 dma_free_coherent(ndev->dev.parent, 315 sizeof(struct rswitch_ext_desc) * 316 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma); 317 gq->tx_ring = NULL; 318 kfree(gq->skbs); 319 gq->skbs = NULL; 320 kfree(gq->unmap_addrs); 321 gq->unmap_addrs = NULL; 322 } 323 } 324 325 static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv) 326 { 327 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 328 329 dma_free_coherent(&priv->pdev->dev, 330 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1), 331 gq->ts_ring, gq->ring_dma); 332 gq->ts_ring = NULL; 333 } 334 335 static int rswitch_gwca_queue_alloc(struct net_device *ndev, 336 struct rswitch_private *priv, 337 struct rswitch_gwca_queue *gq, 338 bool dir_tx, unsigned int ring_size) 339 { 340 unsigned int i, bit; 341 342 gq->dir_tx = dir_tx; 343 gq->ring_size = ring_size; 344 gq->ndev = ndev; 345 346 if (!dir_tx) { 347 gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL); 348 if (!gq->rx_bufs) 349 return -ENOMEM; 350 if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0) 351 goto out; 352 353 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent, 354 sizeof(struct rswitch_ext_ts_desc) * 355 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 356 } else { 357 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL); 358 if (!gq->skbs) 359 return -ENOMEM; 360 gq->unmap_addrs = kcalloc(gq->ring_size, sizeof(*gq->unmap_addrs), GFP_KERNEL); 361 if (!gq->unmap_addrs) 362 goto out; 363 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent, 364 sizeof(struct rswitch_ext_desc) * 365 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 366 } 367 368 if (!gq->rx_ring && !gq->tx_ring) 369 goto out; 370 371 i = gq->index / 32; 372 bit = BIT(gq->index % 32); 373 if (dir_tx) 374 priv->gwca.tx_irq_bits[i] |= bit; 375 else 376 priv->gwca.rx_irq_bits[i] |= bit; 377 378 return 0; 379 380 out: 381 rswitch_gwca_queue_free(ndev, gq); 382 383 return -ENOMEM; 384 } 385 386 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr) 387 { 388 desc->dptrl = cpu_to_le32(lower_32_bits(addr)); 389 desc->dptrh = upper_32_bits(addr) & 0xff; 390 } 391 392 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc) 393 { 394 return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32; 395 } 396 397 static int rswitch_gwca_queue_format(struct net_device *ndev, 398 struct rswitch_private *priv, 399 struct rswitch_gwca_queue *gq) 400 { 401 unsigned int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size; 402 struct rswitch_ext_desc *desc; 403 struct rswitch_desc *linkfix; 404 dma_addr_t dma_addr; 405 unsigned int i; 406 407 memset(gq->tx_ring, 0, ring_size); 408 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) { 409 if (!gq->dir_tx) { 410 dma_addr = dma_map_single(ndev->dev.parent, 411 gq->rx_bufs[i] + RSWITCH_HEADROOM, 412 RSWITCH_MAP_BUF_SIZE, 413 DMA_FROM_DEVICE); 414 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 415 goto err; 416 417 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE); 418 rswitch_desc_set_dptr(&desc->desc, dma_addr); 419 desc->desc.die_dt = DT_FEMPTY | DIE; 420 } else { 421 desc->desc.die_dt = DT_EEMPTY | DIE; 422 } 423 } 424 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 425 desc->desc.die_dt = DT_LINKFIX; 426 427 linkfix = &priv->gwca.linkfix_table[gq->index]; 428 linkfix->die_dt = DT_LINKFIX; 429 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 430 431 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE, 432 priv->addr + GWDCC_OFFS(gq->index)); 433 434 return 0; 435 436 err: 437 if (!gq->dir_tx) { 438 for (desc = gq->tx_ring; i-- > 0; desc++) { 439 dma_addr = rswitch_desc_get_dptr(&desc->desc); 440 dma_unmap_single(ndev->dev.parent, dma_addr, 441 RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE); 442 } 443 } 444 445 return -ENOMEM; 446 } 447 448 static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv, 449 unsigned int start_index, 450 unsigned int num) 451 { 452 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 453 struct rswitch_ts_desc *desc; 454 unsigned int i, index; 455 456 for (i = 0; i < num; i++) { 457 index = (i + start_index) % gq->ring_size; 458 desc = &gq->ts_ring[index]; 459 desc->desc.die_dt = DT_FEMPTY_ND | DIE; 460 } 461 } 462 463 static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev, 464 struct rswitch_gwca_queue *gq, 465 unsigned int start_index, 466 unsigned int num) 467 { 468 struct rswitch_device *rdev = netdev_priv(ndev); 469 struct rswitch_ext_ts_desc *desc; 470 unsigned int i, index; 471 dma_addr_t dma_addr; 472 473 for (i = 0; i < num; i++) { 474 index = (i + start_index) % gq->ring_size; 475 desc = &gq->rx_ring[index]; 476 if (!gq->dir_tx) { 477 dma_addr = dma_map_single(ndev->dev.parent, 478 gq->rx_bufs[index] + RSWITCH_HEADROOM, 479 RSWITCH_MAP_BUF_SIZE, 480 DMA_FROM_DEVICE); 481 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 482 goto err; 483 484 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE); 485 rswitch_desc_set_dptr(&desc->desc, dma_addr); 486 dma_wmb(); 487 desc->desc.die_dt = DT_FEMPTY | DIE; 488 desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index)); 489 } else { 490 desc->desc.die_dt = DT_EEMPTY | DIE; 491 } 492 } 493 494 return 0; 495 496 err: 497 if (!gq->dir_tx) { 498 for (; i-- > 0; ) { 499 index = (i + start_index) % gq->ring_size; 500 desc = &gq->rx_ring[index]; 501 dma_addr = rswitch_desc_get_dptr(&desc->desc); 502 dma_unmap_single(ndev->dev.parent, dma_addr, 503 RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE); 504 } 505 } 506 507 return -ENOMEM; 508 } 509 510 static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev, 511 struct rswitch_private *priv, 512 struct rswitch_gwca_queue *gq) 513 { 514 unsigned int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size; 515 struct rswitch_ext_ts_desc *desc; 516 struct rswitch_desc *linkfix; 517 int err; 518 519 memset(gq->rx_ring, 0, ring_size); 520 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size); 521 if (err < 0) 522 return err; 523 524 desc = &gq->rx_ring[gq->ring_size]; /* Last */ 525 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 526 desc->desc.die_dt = DT_LINKFIX; 527 528 linkfix = &priv->gwca.linkfix_table[gq->index]; 529 linkfix->die_dt = DT_LINKFIX; 530 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 531 532 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | 533 GWDCC_ETS | GWDCC_EDE, 534 priv->addr + GWDCC_OFFS(gq->index)); 535 536 return 0; 537 } 538 539 static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv) 540 { 541 unsigned int i, num_queues = priv->gwca.num_queues; 542 struct rswitch_gwca *gwca = &priv->gwca; 543 struct device *dev = &priv->pdev->dev; 544 545 gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues; 546 gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size, 547 &gwca->linkfix_table_dma, GFP_KERNEL); 548 if (!gwca->linkfix_table) 549 return -ENOMEM; 550 for (i = 0; i < num_queues; i++) 551 gwca->linkfix_table[i].die_dt = DT_EOS; 552 553 return 0; 554 } 555 556 static void rswitch_gwca_linkfix_free(struct rswitch_private *priv) 557 { 558 struct rswitch_gwca *gwca = &priv->gwca; 559 560 if (gwca->linkfix_table) 561 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size, 562 gwca->linkfix_table, gwca->linkfix_table_dma); 563 gwca->linkfix_table = NULL; 564 } 565 566 static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) 567 { 568 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 569 struct rswitch_ts_desc *desc; 570 571 gq->ring_size = TS_RING_SIZE; 572 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, 573 sizeof(struct rswitch_ts_desc) * 574 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 575 576 if (!gq->ts_ring) 577 return -ENOMEM; 578 579 rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE); 580 desc = &gq->ts_ring[gq->ring_size]; 581 desc->desc.die_dt = DT_LINKFIX; 582 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 583 584 return 0; 585 } 586 587 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv) 588 { 589 struct rswitch_gwca_queue *gq; 590 unsigned int index; 591 592 index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues); 593 if (index >= priv->gwca.num_queues) 594 return NULL; 595 set_bit(index, priv->gwca.used); 596 gq = &priv->gwca.queues[index]; 597 memset(gq, 0, sizeof(*gq)); 598 gq->index = index; 599 600 return gq; 601 } 602 603 static void rswitch_gwca_put(struct rswitch_private *priv, 604 struct rswitch_gwca_queue *gq) 605 { 606 clear_bit(gq->index, priv->gwca.used); 607 } 608 609 static int rswitch_txdmac_alloc(struct net_device *ndev) 610 { 611 struct rswitch_device *rdev = netdev_priv(ndev); 612 struct rswitch_private *priv = rdev->priv; 613 int err; 614 615 rdev->tx_queue = rswitch_gwca_get(priv); 616 if (!rdev->tx_queue) 617 return -EBUSY; 618 619 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE); 620 if (err < 0) { 621 rswitch_gwca_put(priv, rdev->tx_queue); 622 return err; 623 } 624 625 return 0; 626 } 627 628 static void rswitch_txdmac_free(struct net_device *ndev) 629 { 630 struct rswitch_device *rdev = netdev_priv(ndev); 631 632 rswitch_gwca_queue_free(ndev, rdev->tx_queue); 633 rswitch_gwca_put(rdev->priv, rdev->tx_queue); 634 } 635 636 static int rswitch_txdmac_init(struct rswitch_private *priv, unsigned int index) 637 { 638 struct rswitch_device *rdev = priv->rdev[index]; 639 640 return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue); 641 } 642 643 static int rswitch_rxdmac_alloc(struct net_device *ndev) 644 { 645 struct rswitch_device *rdev = netdev_priv(ndev); 646 struct rswitch_private *priv = rdev->priv; 647 int err; 648 649 rdev->rx_queue = rswitch_gwca_get(priv); 650 if (!rdev->rx_queue) 651 return -EBUSY; 652 653 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE); 654 if (err < 0) { 655 rswitch_gwca_put(priv, rdev->rx_queue); 656 return err; 657 } 658 659 return 0; 660 } 661 662 static void rswitch_rxdmac_free(struct net_device *ndev) 663 { 664 struct rswitch_device *rdev = netdev_priv(ndev); 665 666 rswitch_gwca_queue_free(ndev, rdev->rx_queue); 667 rswitch_gwca_put(rdev->priv, rdev->rx_queue); 668 } 669 670 static int rswitch_rxdmac_init(struct rswitch_private *priv, unsigned int index) 671 { 672 struct rswitch_device *rdev = priv->rdev[index]; 673 struct net_device *ndev = rdev->ndev; 674 675 return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue); 676 } 677 678 static int rswitch_gwca_hw_init(struct rswitch_private *priv) 679 { 680 unsigned int i; 681 int err; 682 683 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 684 if (err < 0) 685 return err; 686 err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG); 687 if (err < 0) 688 return err; 689 690 err = rswitch_gwca_mcast_table_reset(priv); 691 if (err < 0) 692 return err; 693 err = rswitch_gwca_axi_ram_reset(priv); 694 if (err < 0) 695 return err; 696 697 iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC); 698 iowrite32(0, priv->addr + GWTTFC); 699 iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1); 700 iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0); 701 iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10); 702 iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00); 703 iowrite32(GWMDNC_TSDMN(1) | GWMDNC_TXDMN(0x1e) | GWMDNC_RXDMN(0x1f), 704 priv->addr + GWMDNC); 705 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0); 706 707 iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0); 708 709 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 710 err = rswitch_rxdmac_init(priv, i); 711 if (err < 0) 712 return err; 713 err = rswitch_txdmac_init(priv, i); 714 if (err < 0) 715 return err; 716 } 717 718 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 719 if (err < 0) 720 return err; 721 return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION); 722 } 723 724 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv) 725 { 726 int err; 727 728 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 729 if (err < 0) 730 return err; 731 err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET); 732 if (err < 0) 733 return err; 734 735 return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 736 } 737 738 static int rswitch_gwca_halt(struct rswitch_private *priv) 739 { 740 int err; 741 742 priv->gwca_halt = true; 743 err = rswitch_gwca_hw_deinit(priv); 744 dev_err(&priv->pdev->dev, "halted (%d)\n", err); 745 746 return err; 747 } 748 749 static struct sk_buff *rswitch_rx_handle_desc(struct net_device *ndev, 750 struct rswitch_gwca_queue *gq, 751 struct rswitch_ext_ts_desc *desc) 752 { 753 dma_addr_t dma_addr = rswitch_desc_get_dptr(&desc->desc); 754 u16 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS; 755 u8 die_dt = desc->desc.die_dt & DT_MASK; 756 struct sk_buff *skb = NULL; 757 758 dma_unmap_single(ndev->dev.parent, dma_addr, RSWITCH_MAP_BUF_SIZE, 759 DMA_FROM_DEVICE); 760 761 /* The RX descriptor order will be one of the following: 762 * - FSINGLE 763 * - FSTART -> FEND 764 * - FSTART -> FMID -> FEND 765 */ 766 767 /* Check whether the descriptor is unexpected order */ 768 switch (die_dt) { 769 case DT_FSTART: 770 case DT_FSINGLE: 771 if (gq->skb_fstart) { 772 dev_kfree_skb_any(gq->skb_fstart); 773 gq->skb_fstart = NULL; 774 ndev->stats.rx_dropped++; 775 } 776 break; 777 case DT_FMID: 778 case DT_FEND: 779 if (!gq->skb_fstart) { 780 ndev->stats.rx_dropped++; 781 return NULL; 782 } 783 break; 784 default: 785 break; 786 } 787 788 /* Handle the descriptor */ 789 switch (die_dt) { 790 case DT_FSTART: 791 case DT_FSINGLE: 792 skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE); 793 if (skb) { 794 skb_reserve(skb, RSWITCH_HEADROOM); 795 skb_put(skb, pkt_len); 796 gq->pkt_len = pkt_len; 797 if (die_dt == DT_FSTART) { 798 gq->skb_fstart = skb; 799 skb = NULL; 800 } 801 } 802 break; 803 case DT_FMID: 804 case DT_FEND: 805 skb_add_rx_frag(gq->skb_fstart, skb_shinfo(gq->skb_fstart)->nr_frags, 806 virt_to_page(gq->rx_bufs[gq->cur]), 807 offset_in_page(gq->rx_bufs[gq->cur]) + RSWITCH_HEADROOM, 808 pkt_len, RSWITCH_BUF_SIZE); 809 if (die_dt == DT_FEND) { 810 skb = gq->skb_fstart; 811 gq->skb_fstart = NULL; 812 } 813 gq->pkt_len += pkt_len; 814 break; 815 default: 816 netdev_err(ndev, "%s: unexpected value (%x)\n", __func__, die_dt); 817 break; 818 } 819 820 return skb; 821 } 822 823 static bool rswitch_rx(struct net_device *ndev, int *quota) 824 { 825 struct rswitch_device *rdev = netdev_priv(ndev); 826 struct rswitch_gwca_queue *gq = rdev->rx_queue; 827 struct rswitch_ext_ts_desc *desc; 828 int limit, boguscnt, ret; 829 struct sk_buff *skb; 830 unsigned int num; 831 u32 get_ts; 832 833 if (*quota <= 0) 834 return true; 835 836 boguscnt = min_t(int, gq->ring_size, *quota); 837 limit = boguscnt; 838 839 desc = &gq->rx_ring[gq->cur]; 840 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { 841 dma_rmb(); 842 skb = rswitch_rx_handle_desc(ndev, gq, desc); 843 if (!skb) 844 goto out; 845 846 get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; 847 if (get_ts) { 848 struct skb_shared_hwtstamps *shhwtstamps; 849 struct timespec64 ts; 850 851 shhwtstamps = skb_hwtstamps(skb); 852 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 853 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 854 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 855 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); 856 } 857 skb->protocol = eth_type_trans(skb, ndev); 858 napi_gro_receive(&rdev->napi, skb); 859 rdev->ndev->stats.rx_packets++; 860 rdev->ndev->stats.rx_bytes += gq->pkt_len; 861 862 out: 863 gq->rx_bufs[gq->cur] = NULL; 864 gq->cur = rswitch_next_queue_index(gq, true, 1); 865 desc = &gq->rx_ring[gq->cur]; 866 867 if (--boguscnt <= 0) 868 break; 869 } 870 871 num = rswitch_get_num_cur_queues(gq); 872 ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num); 873 if (ret < 0) 874 goto err; 875 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num); 876 if (ret < 0) 877 goto err; 878 gq->dirty = rswitch_next_queue_index(gq, false, num); 879 880 *quota -= limit - boguscnt; 881 882 return boguscnt <= 0; 883 884 err: 885 rswitch_gwca_halt(rdev->priv); 886 887 return 0; 888 } 889 890 static void rswitch_tx_free(struct net_device *ndev) 891 { 892 struct rswitch_device *rdev = netdev_priv(ndev); 893 struct rswitch_gwca_queue *gq = rdev->tx_queue; 894 struct rswitch_ext_desc *desc; 895 struct sk_buff *skb; 896 897 desc = &gq->tx_ring[gq->dirty]; 898 while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) { 899 dma_rmb(); 900 901 skb = gq->skbs[gq->dirty]; 902 if (skb) { 903 rdev->ndev->stats.tx_packets++; 904 rdev->ndev->stats.tx_bytes += skb->len; 905 dma_unmap_single(ndev->dev.parent, 906 gq->unmap_addrs[gq->dirty], 907 skb->len, DMA_TO_DEVICE); 908 dev_kfree_skb_any(gq->skbs[gq->dirty]); 909 gq->skbs[gq->dirty] = NULL; 910 } 911 912 desc->desc.die_dt = DT_EEMPTY; 913 gq->dirty = rswitch_next_queue_index(gq, false, 1); 914 desc = &gq->tx_ring[gq->dirty]; 915 } 916 } 917 918 static int rswitch_poll(struct napi_struct *napi, int budget) 919 { 920 struct net_device *ndev = napi->dev; 921 struct rswitch_private *priv; 922 struct rswitch_device *rdev; 923 unsigned long flags; 924 int quota = budget; 925 926 rdev = netdev_priv(ndev); 927 priv = rdev->priv; 928 929 retry: 930 rswitch_tx_free(ndev); 931 932 if (rswitch_rx(ndev, "a)) 933 goto out; 934 else if (rdev->priv->gwca_halt) 935 goto err; 936 else if (rswitch_is_queue_rxed(rdev->rx_queue)) 937 goto retry; 938 939 netif_wake_subqueue(ndev, 0); 940 941 if (napi_complete_done(napi, budget - quota)) { 942 spin_lock_irqsave(&priv->lock, flags); 943 if (test_bit(rdev->port, priv->opened_ports)) { 944 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); 945 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); 946 } 947 spin_unlock_irqrestore(&priv->lock, flags); 948 } 949 950 out: 951 return budget - quota; 952 953 err: 954 napi_complete(napi); 955 956 return 0; 957 } 958 959 static void rswitch_queue_interrupt(struct net_device *ndev) 960 { 961 struct rswitch_device *rdev = netdev_priv(ndev); 962 963 if (napi_schedule_prep(&rdev->napi)) { 964 spin_lock(&rdev->priv->lock); 965 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 966 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 967 spin_unlock(&rdev->priv->lock); 968 __napi_schedule(&rdev->napi); 969 } 970 } 971 972 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis) 973 { 974 struct rswitch_gwca_queue *gq; 975 unsigned int i, index, bit; 976 977 for (i = 0; i < priv->gwca.num_queues; i++) { 978 gq = &priv->gwca.queues[i]; 979 index = gq->index / 32; 980 bit = BIT(gq->index % 32); 981 if (!(dis[index] & bit)) 982 continue; 983 984 rswitch_ack_data_irq(priv, gq->index); 985 rswitch_queue_interrupt(gq->ndev); 986 } 987 988 return IRQ_HANDLED; 989 } 990 991 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id) 992 { 993 struct rswitch_private *priv = dev_id; 994 u32 dis[RSWITCH_NUM_IRQ_REGS]; 995 irqreturn_t ret = IRQ_NONE; 996 997 rswitch_get_data_irq_status(priv, dis); 998 999 if (rswitch_is_any_data_irq(priv, dis, true) || 1000 rswitch_is_any_data_irq(priv, dis, false)) 1001 ret = rswitch_data_irq(priv, dis); 1002 1003 return ret; 1004 } 1005 1006 static int rswitch_gwca_request_irqs(struct rswitch_private *priv) 1007 { 1008 char *resource_name, *irq_name; 1009 int i, ret, irq; 1010 1011 for (i = 0; i < GWCA_NUM_IRQS; i++) { 1012 resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i); 1013 if (!resource_name) 1014 return -ENOMEM; 1015 1016 irq = platform_get_irq_byname(priv->pdev, resource_name); 1017 kfree(resource_name); 1018 if (irq < 0) 1019 return irq; 1020 1021 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, 1022 GWCA_IRQ_NAME, i); 1023 if (!irq_name) 1024 return -ENOMEM; 1025 1026 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq, 1027 0, irq_name, priv); 1028 if (ret < 0) 1029 return ret; 1030 } 1031 1032 return 0; 1033 } 1034 1035 static void rswitch_ts(struct rswitch_private *priv) 1036 { 1037 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 1038 struct skb_shared_hwtstamps shhwtstamps; 1039 struct rswitch_ts_desc *desc; 1040 struct rswitch_device *rdev; 1041 struct sk_buff *ts_skb; 1042 struct timespec64 ts; 1043 unsigned int num; 1044 u32 tag, port; 1045 1046 desc = &gq->ts_ring[gq->cur]; 1047 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) { 1048 dma_rmb(); 1049 1050 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl)); 1051 if (unlikely(port >= RSWITCH_NUM_PORTS)) 1052 goto next; 1053 rdev = priv->rdev[port]; 1054 1055 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl)); 1056 if (unlikely(tag >= TS_TAGS_PER_PORT)) 1057 goto next; 1058 ts_skb = xchg(&rdev->ts_skb[tag], NULL); 1059 smp_mb(); /* order rdev->ts_skb[] read before bitmap update */ 1060 clear_bit(tag, rdev->ts_skb_used); 1061 1062 if (unlikely(!ts_skb)) 1063 goto next; 1064 1065 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 1066 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 1067 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 1068 shhwtstamps.hwtstamp = timespec64_to_ktime(ts); 1069 skb_tstamp_tx(ts_skb, &shhwtstamps); 1070 dev_consume_skb_irq(ts_skb); 1071 1072 next: 1073 gq->cur = rswitch_next_queue_index(gq, true, 1); 1074 desc = &gq->ts_ring[gq->cur]; 1075 } 1076 1077 num = rswitch_get_num_cur_queues(gq); 1078 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num); 1079 gq->dirty = rswitch_next_queue_index(gq, false, num); 1080 } 1081 1082 static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id) 1083 { 1084 struct rswitch_private *priv = dev_id; 1085 1086 if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) { 1087 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS); 1088 rswitch_ts(priv); 1089 1090 return IRQ_HANDLED; 1091 } 1092 1093 return IRQ_NONE; 1094 } 1095 1096 static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv) 1097 { 1098 int irq; 1099 1100 irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME); 1101 if (irq < 0) 1102 return irq; 1103 1104 return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq, 1105 0, GWCA_TS_IRQ_NAME, priv); 1106 } 1107 1108 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */ 1109 static int rswitch_etha_change_mode(struct rswitch_etha *etha, 1110 enum rswitch_etha_mode mode) 1111 { 1112 int ret; 1113 1114 if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index)) 1115 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1); 1116 1117 iowrite32(mode, etha->addr + EAMC); 1118 1119 ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode); 1120 1121 if (mode == EAMC_OPC_DISABLE) 1122 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0); 1123 1124 return ret; 1125 } 1126 1127 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha) 1128 { 1129 u32 mrmac0 = ioread32(etha->addr + MRMAC0); 1130 u32 mrmac1 = ioread32(etha->addr + MRMAC1); 1131 u8 *mac = ða->mac_addr[0]; 1132 1133 mac[0] = (mrmac0 >> 8) & 0xFF; 1134 mac[1] = (mrmac0 >> 0) & 0xFF; 1135 mac[2] = (mrmac1 >> 24) & 0xFF; 1136 mac[3] = (mrmac1 >> 16) & 0xFF; 1137 mac[4] = (mrmac1 >> 8) & 0xFF; 1138 mac[5] = (mrmac1 >> 0) & 0xFF; 1139 } 1140 1141 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac) 1142 { 1143 iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0); 1144 iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], 1145 etha->addr + MRMAC1); 1146 } 1147 1148 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha) 1149 { 1150 iowrite32(MLVC_PLV, etha->addr + MLVC); 1151 1152 return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0); 1153 } 1154 1155 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) 1156 { 1157 u32 pis, lsc; 1158 1159 rswitch_etha_write_mac_address(etha, mac); 1160 1161 switch (etha->phy_interface) { 1162 case PHY_INTERFACE_MODE_SGMII: 1163 pis = MPIC_PIS_GMII; 1164 break; 1165 case PHY_INTERFACE_MODE_USXGMII: 1166 case PHY_INTERFACE_MODE_5GBASER: 1167 pis = MPIC_PIS_XGMII; 1168 break; 1169 default: 1170 pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC)); 1171 break; 1172 } 1173 1174 switch (etha->speed) { 1175 case 100: 1176 lsc = MPIC_LSC_100M; 1177 break; 1178 case 1000: 1179 lsc = MPIC_LSC_1G; 1180 break; 1181 case 2500: 1182 lsc = MPIC_LSC_2_5G; 1183 break; 1184 default: 1185 lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC)); 1186 break; 1187 } 1188 1189 rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC, 1190 FIELD_PREP(MPIC_PIS, pis) | FIELD_PREP(MPIC_LSC, lsc)); 1191 } 1192 1193 static void rswitch_etha_enable_mii(struct rswitch_etha *etha) 1194 { 1195 rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT, 1196 FIELD_PREP(MPIC_PSMCS, etha->psmcs) | 1197 FIELD_PREP(MPIC_PSMHT, 0x06)); 1198 } 1199 1200 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac) 1201 { 1202 int err; 1203 1204 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1205 if (err < 0) 1206 return err; 1207 err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG); 1208 if (err < 0) 1209 return err; 1210 1211 iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC); 1212 rswitch_rmac_setting(etha, mac); 1213 rswitch_etha_enable_mii(etha); 1214 1215 err = rswitch_etha_wait_link_verification(etha); 1216 if (err < 0) 1217 return err; 1218 1219 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1220 if (err < 0) 1221 return err; 1222 1223 return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION); 1224 } 1225 1226 static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read, 1227 unsigned int mmf, unsigned int pda, 1228 unsigned int pra, unsigned int pop, 1229 unsigned int prd) 1230 { 1231 u32 val; 1232 int ret; 1233 1234 val = MPSM_PSME | 1235 FIELD_PREP(MPSM_MFF, mmf) | 1236 FIELD_PREP(MPSM_PDA, pda) | 1237 FIELD_PREP(MPSM_PRA, pra) | 1238 FIELD_PREP(MPSM_POP, pop) | 1239 FIELD_PREP(MPSM_PRD, prd); 1240 iowrite32(val, etha->addr + MPSM); 1241 1242 ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0); 1243 if (ret) 1244 return ret; 1245 1246 if (read) { 1247 val = ioread32(etha->addr + MPSM); 1248 ret = FIELD_GET(MPSM_PRD, val); 1249 } 1250 1251 return ret; 1252 } 1253 1254 static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad, 1255 int regad) 1256 { 1257 struct rswitch_etha *etha = bus->priv; 1258 int ret; 1259 1260 ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad, 1261 MPSM_POP_ADDRESS, regad); 1262 if (ret) 1263 return ret; 1264 1265 return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad, 1266 MPSM_POP_READ_C45, 0); 1267 } 1268 1269 static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad, 1270 int regad, u16 val) 1271 { 1272 struct rswitch_etha *etha = bus->priv; 1273 int ret; 1274 1275 ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad, 1276 MPSM_POP_ADDRESS, regad); 1277 if (ret) 1278 return ret; 1279 1280 return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad, 1281 MPSM_POP_WRITE, val); 1282 } 1283 1284 static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad) 1285 { 1286 struct rswitch_etha *etha = bus->priv; 1287 1288 return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad, 1289 MPSM_POP_READ_C22, 0); 1290 } 1291 1292 static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad, 1293 int regad, u16 val) 1294 { 1295 struct rswitch_etha *etha = bus->priv; 1296 1297 return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad, 1298 MPSM_POP_WRITE, val); 1299 } 1300 1301 /* Call of_node_put(port) after done */ 1302 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev) 1303 { 1304 struct device_node *ports, *port; 1305 int err = 0; 1306 u32 index; 1307 1308 ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node, 1309 "ethernet-ports"); 1310 if (!ports) 1311 return NULL; 1312 1313 for_each_available_child_of_node(ports, port) { 1314 err = of_property_read_u32(port, "reg", &index); 1315 if (err < 0) { 1316 port = NULL; 1317 goto out; 1318 } 1319 if (index == rdev->etha->index) 1320 break; 1321 } 1322 1323 out: 1324 of_node_put(ports); 1325 1326 return port; 1327 } 1328 1329 static int rswitch_etha_get_params(struct rswitch_device *rdev) 1330 { 1331 u32 max_speed; 1332 int err; 1333 1334 if (!rdev->np_port) 1335 return 0; /* ignored */ 1336 1337 err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface); 1338 if (err) 1339 return err; 1340 1341 err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed); 1342 if (!err) { 1343 rdev->etha->speed = max_speed; 1344 return 0; 1345 } 1346 1347 /* if no "max-speed" property, let's use default speed */ 1348 switch (rdev->etha->phy_interface) { 1349 case PHY_INTERFACE_MODE_MII: 1350 rdev->etha->speed = SPEED_100; 1351 break; 1352 case PHY_INTERFACE_MODE_SGMII: 1353 rdev->etha->speed = SPEED_1000; 1354 break; 1355 case PHY_INTERFACE_MODE_USXGMII: 1356 rdev->etha->speed = SPEED_2500; 1357 break; 1358 default: 1359 return -EINVAL; 1360 } 1361 1362 return 0; 1363 } 1364 1365 static int rswitch_mii_register(struct rswitch_device *rdev) 1366 { 1367 struct device_node *mdio_np; 1368 struct mii_bus *mii_bus; 1369 int err; 1370 1371 mii_bus = mdiobus_alloc(); 1372 if (!mii_bus) 1373 return -ENOMEM; 1374 1375 mii_bus->name = "rswitch_mii"; 1376 sprintf(mii_bus->id, "etha%d", rdev->etha->index); 1377 mii_bus->priv = rdev->etha; 1378 mii_bus->read_c45 = rswitch_etha_mii_read_c45; 1379 mii_bus->write_c45 = rswitch_etha_mii_write_c45; 1380 mii_bus->read = rswitch_etha_mii_read_c22; 1381 mii_bus->write = rswitch_etha_mii_write_c22; 1382 mii_bus->parent = &rdev->priv->pdev->dev; 1383 1384 mdio_np = of_get_child_by_name(rdev->np_port, "mdio"); 1385 err = of_mdiobus_register(mii_bus, mdio_np); 1386 if (err < 0) { 1387 mdiobus_free(mii_bus); 1388 goto out; 1389 } 1390 1391 rdev->etha->mii = mii_bus; 1392 1393 out: 1394 of_node_put(mdio_np); 1395 1396 return err; 1397 } 1398 1399 static void rswitch_mii_unregister(struct rswitch_device *rdev) 1400 { 1401 if (rdev->etha->mii) { 1402 mdiobus_unregister(rdev->etha->mii); 1403 mdiobus_free(rdev->etha->mii); 1404 rdev->etha->mii = NULL; 1405 } 1406 } 1407 1408 static void rswitch_adjust_link(struct net_device *ndev) 1409 { 1410 struct rswitch_device *rdev = netdev_priv(ndev); 1411 struct phy_device *phydev = ndev->phydev; 1412 1413 if (phydev->link != rdev->etha->link) { 1414 phy_print_status(phydev); 1415 if (phydev->link) 1416 phy_power_on(rdev->serdes); 1417 else if (rdev->serdes->power_count) 1418 phy_power_off(rdev->serdes); 1419 1420 rdev->etha->link = phydev->link; 1421 1422 if (!rdev->priv->etha_no_runtime_change && 1423 phydev->speed != rdev->etha->speed) { 1424 rdev->etha->speed = phydev->speed; 1425 1426 rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); 1427 phy_set_speed(rdev->serdes, rdev->etha->speed); 1428 } 1429 } 1430 } 1431 1432 static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev, 1433 struct phy_device *phydev) 1434 { 1435 if (!rdev->priv->etha_no_runtime_change) 1436 return; 1437 1438 switch (rdev->etha->speed) { 1439 case SPEED_2500: 1440 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1441 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1442 break; 1443 case SPEED_1000: 1444 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1445 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1446 break; 1447 case SPEED_100: 1448 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1449 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1450 break; 1451 default: 1452 break; 1453 } 1454 1455 phy_set_max_speed(phydev, rdev->etha->speed); 1456 } 1457 1458 static int rswitch_phy_device_init(struct rswitch_device *rdev) 1459 { 1460 struct phy_device *phydev; 1461 struct device_node *phy; 1462 int err = -ENOENT; 1463 1464 if (!rdev->np_port) 1465 return -ENODEV; 1466 1467 phy = of_parse_phandle(rdev->np_port, "phy-handle", 0); 1468 if (!phy) 1469 return -ENODEV; 1470 1471 /* Set phydev->host_interfaces before calling of_phy_connect() to 1472 * configure the PHY with the information of host_interfaces. 1473 */ 1474 phydev = of_phy_find_device(phy); 1475 if (!phydev) 1476 goto out; 1477 __set_bit(rdev->etha->phy_interface, phydev->host_interfaces); 1478 phydev->mac_managed_pm = true; 1479 1480 phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0, 1481 rdev->etha->phy_interface); 1482 if (!phydev) 1483 goto out; 1484 1485 phy_set_max_speed(phydev, SPEED_2500); 1486 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 1487 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); 1488 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 1489 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 1490 rswitch_phy_remove_link_mode(rdev, phydev); 1491 1492 phy_attached_info(phydev); 1493 1494 err = 0; 1495 out: 1496 of_node_put(phy); 1497 1498 return err; 1499 } 1500 1501 static void rswitch_phy_device_deinit(struct rswitch_device *rdev) 1502 { 1503 if (rdev->ndev->phydev) 1504 phy_disconnect(rdev->ndev->phydev); 1505 } 1506 1507 static int rswitch_serdes_set_params(struct rswitch_device *rdev) 1508 { 1509 int err; 1510 1511 err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET, 1512 rdev->etha->phy_interface); 1513 if (err < 0) 1514 return err; 1515 1516 return phy_set_speed(rdev->serdes, rdev->etha->speed); 1517 } 1518 1519 static int rswitch_ether_port_init_one(struct rswitch_device *rdev) 1520 { 1521 int err; 1522 1523 if (!rdev->etha->operated) { 1524 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); 1525 if (err < 0) 1526 return err; 1527 if (rdev->priv->etha_no_runtime_change) 1528 rdev->etha->operated = true; 1529 } 1530 1531 err = rswitch_mii_register(rdev); 1532 if (err < 0) 1533 return err; 1534 1535 err = rswitch_phy_device_init(rdev); 1536 if (err < 0) 1537 goto err_phy_device_init; 1538 1539 rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL); 1540 if (IS_ERR(rdev->serdes)) { 1541 err = PTR_ERR(rdev->serdes); 1542 goto err_serdes_phy_get; 1543 } 1544 1545 err = rswitch_serdes_set_params(rdev); 1546 if (err < 0) 1547 goto err_serdes_set_params; 1548 1549 return 0; 1550 1551 err_serdes_set_params: 1552 err_serdes_phy_get: 1553 rswitch_phy_device_deinit(rdev); 1554 1555 err_phy_device_init: 1556 rswitch_mii_unregister(rdev); 1557 1558 return err; 1559 } 1560 1561 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev) 1562 { 1563 rswitch_phy_device_deinit(rdev); 1564 rswitch_mii_unregister(rdev); 1565 } 1566 1567 static int rswitch_ether_port_init_all(struct rswitch_private *priv) 1568 { 1569 unsigned int i; 1570 int err; 1571 1572 rswitch_for_each_enabled_port(priv, i) { 1573 err = rswitch_ether_port_init_one(priv->rdev[i]); 1574 if (err) 1575 goto err_init_one; 1576 } 1577 1578 rswitch_for_each_enabled_port(priv, i) { 1579 err = phy_init(priv->rdev[i]->serdes); 1580 if (err) 1581 goto err_serdes; 1582 } 1583 1584 return 0; 1585 1586 err_serdes: 1587 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1588 phy_exit(priv->rdev[i]->serdes); 1589 i = RSWITCH_NUM_PORTS; 1590 1591 err_init_one: 1592 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1593 rswitch_ether_port_deinit_one(priv->rdev[i]); 1594 1595 return err; 1596 } 1597 1598 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) 1599 { 1600 unsigned int i; 1601 1602 rswitch_for_each_enabled_port(priv, i) { 1603 phy_exit(priv->rdev[i]->serdes); 1604 rswitch_ether_port_deinit_one(priv->rdev[i]); 1605 } 1606 } 1607 1608 static int rswitch_open(struct net_device *ndev) 1609 { 1610 struct rswitch_device *rdev = netdev_priv(ndev); 1611 unsigned long flags; 1612 1613 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) 1614 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); 1615 1616 napi_enable(&rdev->napi); 1617 1618 spin_lock_irqsave(&rdev->priv->lock, flags); 1619 bitmap_set(rdev->priv->opened_ports, rdev->port, 1); 1620 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); 1621 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); 1622 spin_unlock_irqrestore(&rdev->priv->lock, flags); 1623 1624 phy_start(ndev->phydev); 1625 1626 netif_start_queue(ndev); 1627 1628 if (rdev->brdev) 1629 rswitch_update_l2_offload(rdev->priv); 1630 1631 return 0; 1632 }; 1633 1634 static int rswitch_stop(struct net_device *ndev) 1635 { 1636 struct rswitch_device *rdev = netdev_priv(ndev); 1637 struct sk_buff *ts_skb; 1638 unsigned long flags; 1639 unsigned int tag; 1640 1641 netif_tx_stop_all_queues(ndev); 1642 1643 phy_stop(ndev->phydev); 1644 1645 spin_lock_irqsave(&rdev->priv->lock, flags); 1646 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 1647 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 1648 bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); 1649 spin_unlock_irqrestore(&rdev->priv->lock, flags); 1650 1651 napi_disable(&rdev->napi); 1652 1653 if (rdev->brdev) 1654 rswitch_update_l2_offload(rdev->priv); 1655 1656 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) 1657 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); 1658 1659 for (tag = find_first_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT); 1660 tag < TS_TAGS_PER_PORT; 1661 tag = find_next_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT, tag + 1)) { 1662 ts_skb = xchg(&rdev->ts_skb[tag], NULL); 1663 clear_bit(tag, rdev->ts_skb_used); 1664 if (ts_skb) 1665 dev_kfree_skb(ts_skb); 1666 } 1667 1668 return 0; 1669 }; 1670 1671 static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev, 1672 struct sk_buff *skb, 1673 struct rswitch_ext_desc *desc) 1674 { 1675 desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | 1676 INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT); 1677 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 1678 unsigned int tag; 1679 1680 tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT); 1681 if (tag == TS_TAGS_PER_PORT) 1682 return false; 1683 smp_mb(); /* order bitmap read before rdev->ts_skb[] write */ 1684 rdev->ts_skb[tag] = skb_get(skb); 1685 set_bit(tag, rdev->ts_skb_used); 1686 1687 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1688 desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC); 1689 1690 skb_tx_timestamp(skb); 1691 } 1692 1693 return true; 1694 } 1695 1696 static bool rswitch_ext_desc_set(struct rswitch_device *rdev, 1697 struct sk_buff *skb, 1698 struct rswitch_ext_desc *desc, 1699 dma_addr_t dma_addr, u16 len, u8 die_dt) 1700 { 1701 rswitch_desc_set_dptr(&desc->desc, dma_addr); 1702 desc->desc.info_ds = cpu_to_le16(len); 1703 if (!rswitch_ext_desc_set_info1(rdev, skb, desc)) 1704 return false; 1705 1706 dma_wmb(); 1707 1708 desc->desc.die_dt = die_dt; 1709 1710 return true; 1711 } 1712 1713 static u8 rswitch_ext_desc_get_die_dt(unsigned int nr_desc, unsigned int index) 1714 { 1715 if (nr_desc == 1) 1716 return DT_FSINGLE | DIE; 1717 if (index == 0) 1718 return DT_FSTART; 1719 if (nr_desc - 1 == index) 1720 return DT_FEND | DIE; 1721 return DT_FMID; 1722 } 1723 1724 static u16 rswitch_ext_desc_get_len(u8 die_dt, unsigned int orig_len) 1725 { 1726 switch (die_dt & DT_MASK) { 1727 case DT_FSINGLE: 1728 case DT_FEND: 1729 return (orig_len % RSWITCH_DESC_BUF_SIZE) ?: RSWITCH_DESC_BUF_SIZE; 1730 case DT_FSTART: 1731 case DT_FMID: 1732 return RSWITCH_DESC_BUF_SIZE; 1733 default: 1734 return 0; 1735 } 1736 } 1737 1738 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1739 { 1740 struct rswitch_device *rdev = netdev_priv(ndev); 1741 struct rswitch_gwca_queue *gq = rdev->tx_queue; 1742 dma_addr_t dma_addr, dma_addr_orig; 1743 netdev_tx_t ret = NETDEV_TX_OK; 1744 struct rswitch_ext_desc *desc; 1745 unsigned int i, nr_desc; 1746 u8 die_dt; 1747 u16 len; 1748 1749 nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1; 1750 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) { 1751 netif_stop_subqueue(ndev, 0); 1752 return NETDEV_TX_BUSY; 1753 } 1754 1755 if (skb_put_padto(skb, ETH_ZLEN)) 1756 return ret; 1757 1758 dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); 1759 if (dma_mapping_error(ndev->dev.parent, dma_addr_orig)) 1760 goto err_kfree; 1761 1762 /* Stored the skb at the last descriptor to avoid skb free before hardware completes send */ 1763 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb; 1764 gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig; 1765 1766 dma_wmb(); 1767 1768 /* DT_FSTART should be set at last. So, this is reverse order. */ 1769 for (i = nr_desc; i-- > 0; ) { 1770 desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)]; 1771 die_dt = rswitch_ext_desc_get_die_dt(nr_desc, i); 1772 dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE; 1773 len = rswitch_ext_desc_get_len(die_dt, skb->len); 1774 if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt)) 1775 goto err_unmap; 1776 } 1777 1778 gq->cur = rswitch_next_queue_index(gq, true, nr_desc); 1779 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); 1780 1781 return ret; 1782 1783 err_unmap: 1784 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL; 1785 dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE); 1786 1787 err_kfree: 1788 dev_kfree_skb_any(skb); 1789 1790 return ret; 1791 } 1792 1793 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev) 1794 { 1795 return &ndev->stats; 1796 } 1797 1798 static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req) 1799 { 1800 struct rswitch_device *rdev = netdev_priv(ndev); 1801 struct rcar_gen4_ptp_private *ptp_priv; 1802 struct hwtstamp_config config; 1803 1804 ptp_priv = rdev->priv->ptp_priv; 1805 1806 config.flags = 0; 1807 config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : 1808 HWTSTAMP_TX_OFF; 1809 switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { 1810 case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT: 1811 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 1812 break; 1813 case RCAR_GEN4_RXTSTAMP_TYPE_ALL: 1814 config.rx_filter = HWTSTAMP_FILTER_ALL; 1815 break; 1816 default: 1817 config.rx_filter = HWTSTAMP_FILTER_NONE; 1818 break; 1819 } 1820 1821 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; 1822 } 1823 1824 static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req) 1825 { 1826 struct rswitch_device *rdev = netdev_priv(ndev); 1827 u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED; 1828 struct hwtstamp_config config; 1829 u32 tstamp_tx_ctrl; 1830 1831 if (copy_from_user(&config, req->ifr_data, sizeof(config))) 1832 return -EFAULT; 1833 1834 if (config.flags) 1835 return -EINVAL; 1836 1837 switch (config.tx_type) { 1838 case HWTSTAMP_TX_OFF: 1839 tstamp_tx_ctrl = 0; 1840 break; 1841 case HWTSTAMP_TX_ON: 1842 tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED; 1843 break; 1844 default: 1845 return -ERANGE; 1846 } 1847 1848 switch (config.rx_filter) { 1849 case HWTSTAMP_FILTER_NONE: 1850 tstamp_rx_ctrl = 0; 1851 break; 1852 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1853 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; 1854 break; 1855 default: 1856 config.rx_filter = HWTSTAMP_FILTER_ALL; 1857 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL; 1858 break; 1859 } 1860 1861 rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; 1862 rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; 1863 1864 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; 1865 } 1866 1867 static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) 1868 { 1869 if (!netif_running(ndev)) 1870 return -EINVAL; 1871 1872 switch (cmd) { 1873 case SIOCGHWTSTAMP: 1874 return rswitch_hwstamp_get(ndev, req); 1875 case SIOCSHWTSTAMP: 1876 return rswitch_hwstamp_set(ndev, req); 1877 default: 1878 return phy_mii_ioctl(ndev->phydev, req, cmd); 1879 } 1880 } 1881 1882 static int rswitch_get_port_parent_id(struct net_device *ndev, 1883 struct netdev_phys_item_id *ppid) 1884 { 1885 struct rswitch_device *rdev = netdev_priv(ndev); 1886 const char *name; 1887 1888 name = dev_name(&rdev->priv->pdev->dev); 1889 ppid->id_len = min_t(size_t, strlen(name), sizeof(ppid->id)); 1890 memcpy(ppid->id, name, ppid->id_len); 1891 1892 return 0; 1893 } 1894 1895 static int rswitch_get_phys_port_name(struct net_device *ndev, 1896 char *name, size_t len) 1897 { 1898 struct rswitch_device *rdev = netdev_priv(ndev); 1899 1900 snprintf(name, len, "tsn%d", rdev->port); 1901 1902 return 0; 1903 } 1904 1905 static const struct net_device_ops rswitch_netdev_ops = { 1906 .ndo_open = rswitch_open, 1907 .ndo_stop = rswitch_stop, 1908 .ndo_start_xmit = rswitch_start_xmit, 1909 .ndo_get_stats = rswitch_get_stats, 1910 .ndo_eth_ioctl = rswitch_eth_ioctl, 1911 .ndo_get_port_parent_id = rswitch_get_port_parent_id, 1912 .ndo_get_phys_port_name = rswitch_get_phys_port_name, 1913 .ndo_validate_addr = eth_validate_addr, 1914 .ndo_set_mac_address = eth_mac_addr, 1915 }; 1916 1917 bool is_rdev(const struct net_device *ndev) 1918 { 1919 return (ndev->netdev_ops == &rswitch_netdev_ops); 1920 } 1921 1922 static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) 1923 { 1924 struct rswitch_device *rdev = netdev_priv(ndev); 1925 1926 info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock); 1927 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 1928 SOF_TIMESTAMPING_TX_HARDWARE | 1929 SOF_TIMESTAMPING_RX_HARDWARE | 1930 SOF_TIMESTAMPING_RAW_HARDWARE; 1931 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 1932 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); 1933 1934 return 0; 1935 } 1936 1937 static const struct ethtool_ops rswitch_ethtool_ops = { 1938 .get_ts_info = rswitch_get_ts_info, 1939 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1940 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1941 }; 1942 1943 static const struct of_device_id renesas_eth_sw_of_table[] = { 1944 { .compatible = "renesas,r8a779f0-ether-switch", }, 1945 { } 1946 }; 1947 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table); 1948 1949 static void rswitch_etha_init(struct rswitch_private *priv, unsigned int index) 1950 { 1951 struct rswitch_etha *etha = &priv->etha[index]; 1952 1953 memset(etha, 0, sizeof(*etha)); 1954 etha->index = index; 1955 etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; 1956 etha->coma_addr = priv->addr; 1957 1958 /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1. 1959 * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply 1960 * both the numerator and the denominator by 10. 1961 */ 1962 etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1; 1963 } 1964 1965 static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index) 1966 { 1967 struct platform_device *pdev = priv->pdev; 1968 struct rswitch_device *rdev; 1969 struct net_device *ndev; 1970 int err; 1971 1972 if (index >= RSWITCH_NUM_PORTS) 1973 return -EINVAL; 1974 1975 ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1); 1976 if (!ndev) 1977 return -ENOMEM; 1978 1979 SET_NETDEV_DEV(ndev, &pdev->dev); 1980 ether_setup(ndev); 1981 1982 rdev = netdev_priv(ndev); 1983 rdev->ndev = ndev; 1984 rdev->priv = priv; 1985 priv->rdev[index] = rdev; 1986 rdev->port = index; 1987 rdev->etha = &priv->etha[index]; 1988 rdev->addr = priv->addr; 1989 1990 ndev->base_addr = (unsigned long)rdev->addr; 1991 snprintf(ndev->name, IFNAMSIZ, "tsn%d", index); 1992 ndev->netdev_ops = &rswitch_netdev_ops; 1993 ndev->ethtool_ops = &rswitch_ethtool_ops; 1994 ndev->max_mtu = RSWITCH_MAX_MTU; 1995 ndev->min_mtu = ETH_MIN_MTU; 1996 1997 netif_napi_add(ndev, &rdev->napi, rswitch_poll); 1998 1999 rdev->np_port = rswitch_get_port_node(rdev); 2000 rdev->disabled = !rdev->np_port; 2001 err = of_get_ethdev_address(rdev->np_port, ndev); 2002 if (err) { 2003 if (is_valid_ether_addr(rdev->etha->mac_addr)) 2004 eth_hw_addr_set(ndev, rdev->etha->mac_addr); 2005 else 2006 eth_hw_addr_random(ndev); 2007 } 2008 2009 err = rswitch_etha_get_params(rdev); 2010 if (err < 0) 2011 goto out_get_params; 2012 2013 err = rswitch_rxdmac_alloc(ndev); 2014 if (err < 0) 2015 goto out_rxdmac; 2016 2017 err = rswitch_txdmac_alloc(ndev); 2018 if (err < 0) 2019 goto out_txdmac; 2020 2021 list_add_tail(&rdev->list, &priv->port_list); 2022 2023 return 0; 2024 2025 out_txdmac: 2026 rswitch_rxdmac_free(ndev); 2027 2028 out_rxdmac: 2029 out_get_params: 2030 of_node_put(rdev->np_port); 2031 netif_napi_del(&rdev->napi); 2032 free_netdev(ndev); 2033 2034 return err; 2035 } 2036 2037 static void rswitch_device_free(struct rswitch_private *priv, unsigned int index) 2038 { 2039 struct rswitch_device *rdev = priv->rdev[index]; 2040 struct net_device *ndev = rdev->ndev; 2041 2042 list_del(&rdev->list); 2043 rswitch_txdmac_free(ndev); 2044 rswitch_rxdmac_free(ndev); 2045 of_node_put(rdev->np_port); 2046 netif_napi_del(&rdev->napi); 2047 free_netdev(ndev); 2048 } 2049 2050 static int rswitch_init(struct rswitch_private *priv) 2051 { 2052 unsigned int i; 2053 int err; 2054 2055 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 2056 rswitch_etha_init(priv, i); 2057 2058 rswitch_clock_enable(priv); 2059 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 2060 rswitch_etha_read_mac_address(&priv->etha[i]); 2061 2062 rswitch_reset(priv); 2063 2064 rswitch_clock_enable(priv); 2065 rswitch_top_init(priv); 2066 err = rswitch_bpool_config(priv); 2067 if (err < 0) 2068 return err; 2069 2070 rswitch_coma_init(priv); 2071 2072 err = rswitch_gwca_linkfix_alloc(priv); 2073 if (err < 0) 2074 return -ENOMEM; 2075 2076 err = rswitch_gwca_ts_queue_alloc(priv); 2077 if (err < 0) 2078 goto err_ts_queue_alloc; 2079 2080 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 2081 err = rswitch_device_alloc(priv, i); 2082 if (err < 0) { 2083 for (; i-- > 0; ) 2084 rswitch_device_free(priv, i); 2085 goto err_device_alloc; 2086 } 2087 } 2088 2089 err = rswitch_fwd_init(priv); 2090 if (err < 0) 2091 goto err_fwd_init; 2092 2093 err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT, 2094 clk_get_rate(priv->clk)); 2095 if (err < 0) 2096 goto err_ptp_register; 2097 2098 err = rswitch_gwca_request_irqs(priv); 2099 if (err < 0) 2100 goto err_gwca_request_irq; 2101 2102 err = rswitch_gwca_ts_request_irqs(priv); 2103 if (err < 0) 2104 goto err_gwca_ts_request_irq; 2105 2106 err = rswitch_gwca_hw_init(priv); 2107 if (err < 0) 2108 goto err_gwca_hw_init; 2109 2110 err = rswitch_ether_port_init_all(priv); 2111 if (err) 2112 goto err_ether_port_init_all; 2113 2114 rswitch_for_each_enabled_port(priv, i) { 2115 err = register_netdev(priv->rdev[i]->ndev); 2116 if (err) { 2117 rswitch_for_each_enabled_port_continue_reverse(priv, i) 2118 unregister_netdev(priv->rdev[i]->ndev); 2119 goto err_register_netdev; 2120 } 2121 } 2122 2123 rswitch_for_each_enabled_port(priv, i) 2124 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n", 2125 priv->rdev[i]->ndev->dev_addr); 2126 2127 return 0; 2128 2129 err_register_netdev: 2130 rswitch_ether_port_deinit_all(priv); 2131 2132 err_ether_port_init_all: 2133 rswitch_gwca_hw_deinit(priv); 2134 2135 err_gwca_hw_init: 2136 err_gwca_ts_request_irq: 2137 err_gwca_request_irq: 2138 rcar_gen4_ptp_unregister(priv->ptp_priv); 2139 2140 err_fwd_init: 2141 err_ptp_register: 2142 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 2143 rswitch_device_free(priv, i); 2144 2145 err_device_alloc: 2146 rswitch_gwca_ts_queue_free(priv); 2147 2148 err_ts_queue_alloc: 2149 rswitch_gwca_linkfix_free(priv); 2150 2151 return err; 2152 } 2153 2154 static const struct soc_device_attribute rswitch_soc_no_speed_change[] = { 2155 { .soc_id = "r8a779f0", .revision = "ES1.0" }, 2156 { /* Sentinel */ } 2157 }; 2158 2159 static int renesas_eth_sw_probe(struct platform_device *pdev) 2160 { 2161 const struct soc_device_attribute *attr; 2162 struct rswitch_private *priv; 2163 struct resource *res; 2164 int ret; 2165 2166 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base"); 2167 if (!res) { 2168 dev_err(&pdev->dev, "invalid resource\n"); 2169 return -EINVAL; 2170 } 2171 2172 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 2173 if (!priv) 2174 return -ENOMEM; 2175 2176 spin_lock_init(&priv->lock); 2177 2178 priv->clk = devm_clk_get(&pdev->dev, NULL); 2179 if (IS_ERR(priv->clk)) 2180 return PTR_ERR(priv->clk); 2181 2182 attr = soc_device_match(rswitch_soc_no_speed_change); 2183 if (attr) 2184 priv->etha_no_runtime_change = true; 2185 2186 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); 2187 if (!priv->ptp_priv) 2188 return -ENOMEM; 2189 2190 platform_set_drvdata(pdev, priv); 2191 priv->pdev = pdev; 2192 priv->addr = devm_ioremap_resource(&pdev->dev, res); 2193 if (IS_ERR(priv->addr)) 2194 return PTR_ERR(priv->addr); 2195 2196 priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4; 2197 2198 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 2199 if (ret < 0) { 2200 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2201 if (ret < 0) 2202 return ret; 2203 } 2204 2205 priv->gwca.index = AGENT_INDEX_GWCA; 2206 priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV, 2207 RSWITCH_MAX_NUM_QUEUES); 2208 priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues, 2209 sizeof(*priv->gwca.queues), GFP_KERNEL); 2210 if (!priv->gwca.queues) 2211 return -ENOMEM; 2212 2213 INIT_LIST_HEAD(&priv->port_list); 2214 2215 pm_runtime_enable(&pdev->dev); 2216 pm_runtime_get_sync(&pdev->dev); 2217 2218 ret = rswitch_init(priv); 2219 if (ret < 0) { 2220 pm_runtime_put(&pdev->dev); 2221 pm_runtime_disable(&pdev->dev); 2222 return ret; 2223 } 2224 2225 if (list_empty(&priv->port_list)) 2226 dev_warn(&pdev->dev, "could not initialize any ports\n"); 2227 2228 ret = rswitch_register_notifiers(); 2229 if (ret) { 2230 dev_err(&pdev->dev, "could not register notifiers\n"); 2231 return ret; 2232 } 2233 2234 device_set_wakeup_capable(&pdev->dev, 1); 2235 2236 return ret; 2237 } 2238 2239 static void rswitch_deinit(struct rswitch_private *priv) 2240 { 2241 unsigned int i; 2242 2243 rswitch_gwca_hw_deinit(priv); 2244 rcar_gen4_ptp_unregister(priv->ptp_priv); 2245 2246 rswitch_for_each_enabled_port(priv, i) { 2247 struct rswitch_device *rdev = priv->rdev[i]; 2248 2249 unregister_netdev(rdev->ndev); 2250 rswitch_ether_port_deinit_one(rdev); 2251 phy_exit(priv->rdev[i]->serdes); 2252 } 2253 2254 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 2255 rswitch_device_free(priv, i); 2256 2257 rswitch_gwca_ts_queue_free(priv); 2258 rswitch_gwca_linkfix_free(priv); 2259 2260 rswitch_clock_disable(priv); 2261 } 2262 2263 static void renesas_eth_sw_remove(struct platform_device *pdev) 2264 { 2265 struct rswitch_private *priv = platform_get_drvdata(pdev); 2266 2267 rswitch_unregister_notifiers(); 2268 rswitch_deinit(priv); 2269 2270 pm_runtime_put(&pdev->dev); 2271 pm_runtime_disable(&pdev->dev); 2272 2273 platform_set_drvdata(pdev, NULL); 2274 } 2275 2276 static int renesas_eth_sw_suspend(struct device *dev) 2277 { 2278 struct rswitch_private *priv = dev_get_drvdata(dev); 2279 struct net_device *ndev; 2280 unsigned int i; 2281 2282 rswitch_for_each_enabled_port(priv, i) { 2283 ndev = priv->rdev[i]->ndev; 2284 if (netif_running(ndev)) { 2285 netif_device_detach(ndev); 2286 rswitch_stop(ndev); 2287 } 2288 if (priv->rdev[i]->serdes->init_count) 2289 phy_exit(priv->rdev[i]->serdes); 2290 } 2291 2292 return 0; 2293 } 2294 2295 static int renesas_eth_sw_resume(struct device *dev) 2296 { 2297 struct rswitch_private *priv = dev_get_drvdata(dev); 2298 struct net_device *ndev; 2299 unsigned int i; 2300 2301 rswitch_for_each_enabled_port(priv, i) { 2302 phy_init(priv->rdev[i]->serdes); 2303 ndev = priv->rdev[i]->ndev; 2304 if (netif_running(ndev)) { 2305 rswitch_open(ndev); 2306 netif_device_attach(ndev); 2307 } 2308 } 2309 2310 return 0; 2311 } 2312 2313 static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend, 2314 renesas_eth_sw_resume); 2315 2316 static struct platform_driver renesas_eth_sw_driver_platform = { 2317 .probe = renesas_eth_sw_probe, 2318 .remove = renesas_eth_sw_remove, 2319 .driver = { 2320 .name = "renesas_eth_sw", 2321 .pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops), 2322 .of_match_table = renesas_eth_sw_of_table, 2323 } 2324 }; 2325 module_platform_driver(renesas_eth_sw_driver_platform); 2326 MODULE_AUTHOR("Yoshihiro Shimoda"); 2327 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver"); 2328 MODULE_LICENSE("GPL"); 2329