1 // SPDX-License-Identifier: GPL-2.0 2 /* Renesas Ethernet Switch device driver 3 * 4 * Copyright (C) 2022-2025 Renesas Electronics Corporation 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/err.h> 10 #include <linux/etherdevice.h> 11 #include <linux/ethtool.h> 12 #include <linux/ip.h> 13 #include <linux/iopoll.h> 14 #include <linux/kernel.h> 15 #include <linux/list.h> 16 #include <linux/module.h> 17 #include <linux/net_tstamp.h> 18 #include <linux/of.h> 19 #include <linux/of_mdio.h> 20 #include <linux/of_net.h> 21 #include <linux/phy/phy.h> 22 #include <linux/platform_device.h> 23 #include <linux/pm.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/rtnetlink.h> 26 #include <linux/slab.h> 27 #include <linux/spinlock.h> 28 #include <linux/sys_soc.h> 29 30 #include "rswitch.h" 31 #include "rswitch_l2.h" 32 33 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected) 34 { 35 u32 val; 36 37 return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected, 38 1, RSWITCH_TIMEOUT_US); 39 } 40 41 void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set) 42 { 43 iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg); 44 } 45 46 /* Common Agent block (COMA) */ 47 static void rswitch_reset(struct rswitch_private *priv) 48 { 49 iowrite32(RRC_RR, priv->addr + RRC); 50 iowrite32(RRC_RR_CLR, priv->addr + RRC); 51 } 52 53 static void rswitch_clock_enable(struct rswitch_private *priv) 54 { 55 iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC); 56 } 57 58 static void rswitch_clock_disable(struct rswitch_private *priv) 59 { 60 iowrite32(RCDC_RCD, priv->addr + RCDC); 61 } 62 63 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, 64 unsigned int port) 65 { 66 u32 val = ioread32(coma_addr + RCEC); 67 68 if (val & RCEC_RCE) 69 return (val & BIT(port)) ? true : false; 70 else 71 return false; 72 } 73 74 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, unsigned int port, 75 int enable) 76 { 77 u32 val; 78 79 if (enable) { 80 val = ioread32(coma_addr + RCEC); 81 iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC); 82 } else { 83 val = ioread32(coma_addr + RCDC); 84 iowrite32(val | BIT(port), coma_addr + RCDC); 85 } 86 } 87 88 static int rswitch_bpool_config(struct rswitch_private *priv) 89 { 90 u32 val; 91 92 val = ioread32(priv->addr + CABPIRM); 93 if (val & CABPIRM_BPR) 94 return 0; 95 96 iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM); 97 98 return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR); 99 } 100 101 static void rswitch_coma_init(struct rswitch_private *priv) 102 { 103 iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0); 104 } 105 106 /* R-Switch-2 block (TOP) */ 107 static void rswitch_top_init(struct rswitch_private *priv) 108 { 109 unsigned int i; 110 111 for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++) 112 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i)); 113 } 114 115 /* Forwarding engine block (MFWD) */ 116 static int rswitch_fwd_init(struct rswitch_private *priv) 117 { 118 u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0); 119 unsigned int i; 120 u32 reg_val; 121 122 /* Start with empty configuration */ 123 for (i = 0; i < RSWITCH_NUM_AGENTS; i++) { 124 /* Disable all port features */ 125 iowrite32(0, priv->addr + FWPC0(i)); 126 /* Disallow L3 forwarding and direct descriptor forwarding */ 127 iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask), 128 priv->addr + FWPC1(i)); 129 /* Disallow L2 forwarding */ 130 iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask), 131 priv->addr + FWPC2(i)); 132 /* Disallow port based forwarding */ 133 iowrite32(0, priv->addr + FWPBFC(i)); 134 } 135 136 /* Configure MAC table aging */ 137 rswitch_modify(priv->addr, FWMACAGUSPC, FWMACAGUSPC_MACAGUSP, 138 FIELD_PREP(FWMACAGUSPC_MACAGUSP, RSW_AGEING_CLK_PER_US)); 139 140 reg_val = FIELD_PREP(FWMACAGC_MACAGT, RSW_AGEING_TIME); 141 reg_val |= FWMACAGC_MACAGE | FWMACAGC_MACAGSL; 142 iowrite32(reg_val, priv->addr + FWMACAGC); 143 144 /* For enabled ETHA ports, setup port based forwarding */ 145 rswitch_for_each_enabled_port(priv, i) { 146 /* Port based forwarding from port i to GWCA port */ 147 rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV, 148 FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index))); 149 /* Within GWCA port, forward to Rx queue for port i */ 150 iowrite32(priv->rdev[i]->rx_queue->index, 151 priv->addr + FWPBFCSDC(GWCA_INDEX, i)); 152 } 153 154 /* For GWCA port, allow direct descriptor forwarding */ 155 rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE); 156 157 /* Initialize hardware L2 forwarding table */ 158 159 /* Allow entire table to be used for "unsecure" entries */ 160 rswitch_modify(priv->addr, FWMACHEC, 0, FWMACHEC_MACHMUE_MASK); 161 162 /* Initialize MAC hash table */ 163 iowrite32(FWMACTIM_MACTIOG, priv->addr + FWMACTIM); 164 165 return rswitch_reg_wait(priv->addr, FWMACTIM, FWMACTIM_MACTIOG, 0); 166 } 167 168 /* Gateway CPU agent block (GWCA) */ 169 static int rswitch_gwca_change_mode(struct rswitch_private *priv, 170 enum rswitch_gwca_mode mode) 171 { 172 int ret; 173 174 if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index)) 175 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1); 176 177 iowrite32(mode, priv->addr + GWMC); 178 179 ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode); 180 181 if (mode == GWMC_OPC_DISABLE) 182 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0); 183 184 return ret; 185 } 186 187 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv) 188 { 189 iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM); 190 191 return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR); 192 } 193 194 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv) 195 { 196 iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM); 197 198 return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR); 199 } 200 201 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx) 202 { 203 u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits; 204 unsigned int i; 205 206 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 207 if (dis[i] & mask[i]) 208 return true; 209 } 210 211 return false; 212 } 213 214 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis) 215 { 216 unsigned int i; 217 218 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 219 dis[i] = ioread32(priv->addr + GWDIS(i)); 220 dis[i] &= ioread32(priv->addr + GWDIE(i)); 221 } 222 } 223 224 static void rswitch_enadis_data_irq(struct rswitch_private *priv, 225 unsigned int index, bool enable) 226 { 227 u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32); 228 229 iowrite32(BIT(index % 32), priv->addr + offs); 230 } 231 232 static void rswitch_ack_data_irq(struct rswitch_private *priv, 233 unsigned int index) 234 { 235 u32 offs = GWDIS(index / 32); 236 237 iowrite32(BIT(index % 32), priv->addr + offs); 238 } 239 240 static unsigned int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, 241 bool cur, unsigned int num) 242 { 243 unsigned int index = cur ? gq->cur : gq->dirty; 244 245 if (index + num >= gq->ring_size) 246 index = (index + num) % gq->ring_size; 247 else 248 index += num; 249 250 return index; 251 } 252 253 static unsigned int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq) 254 { 255 if (gq->cur >= gq->dirty) 256 return gq->cur - gq->dirty; 257 else 258 return gq->ring_size - gq->dirty + gq->cur; 259 } 260 261 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq) 262 { 263 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty]; 264 265 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) 266 return true; 267 268 return false; 269 } 270 271 static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq, 272 unsigned int start_index, 273 unsigned int num) 274 { 275 unsigned int i, index; 276 277 for (i = 0; i < num; i++) { 278 index = (i + start_index) % gq->ring_size; 279 if (gq->rx_bufs[index]) 280 continue; 281 gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE); 282 if (!gq->rx_bufs[index]) 283 goto err; 284 } 285 286 return 0; 287 288 err: 289 for (; i-- > 0; ) { 290 index = (i + start_index) % gq->ring_size; 291 skb_free_frag(gq->rx_bufs[index]); 292 gq->rx_bufs[index] = NULL; 293 } 294 295 return -ENOMEM; 296 } 297 298 static void rswitch_gwca_queue_free(struct net_device *ndev, 299 struct rswitch_gwca_queue *gq) 300 { 301 unsigned int i; 302 303 if (!gq->dir_tx) { 304 dma_free_coherent(ndev->dev.parent, 305 sizeof(struct rswitch_ext_ts_desc) * 306 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma); 307 gq->rx_ring = NULL; 308 309 for (i = 0; i < gq->ring_size; i++) 310 skb_free_frag(gq->rx_bufs[i]); 311 kfree(gq->rx_bufs); 312 gq->rx_bufs = NULL; 313 } else { 314 dma_free_coherent(ndev->dev.parent, 315 sizeof(struct rswitch_ext_desc) * 316 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma); 317 gq->tx_ring = NULL; 318 kfree(gq->skbs); 319 gq->skbs = NULL; 320 kfree(gq->unmap_addrs); 321 gq->unmap_addrs = NULL; 322 } 323 } 324 325 static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv) 326 { 327 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 328 329 dma_free_coherent(&priv->pdev->dev, 330 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1), 331 gq->ts_ring, gq->ring_dma); 332 gq->ts_ring = NULL; 333 } 334 335 static int rswitch_gwca_queue_alloc(struct net_device *ndev, 336 struct rswitch_private *priv, 337 struct rswitch_gwca_queue *gq, 338 bool dir_tx, unsigned int ring_size) 339 { 340 unsigned int i, bit; 341 342 gq->dir_tx = dir_tx; 343 gq->ring_size = ring_size; 344 gq->ndev = ndev; 345 346 if (!dir_tx) { 347 gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL); 348 if (!gq->rx_bufs) 349 return -ENOMEM; 350 if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0) 351 goto out; 352 353 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent, 354 sizeof(struct rswitch_ext_ts_desc) * 355 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 356 } else { 357 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL); 358 if (!gq->skbs) 359 return -ENOMEM; 360 gq->unmap_addrs = kcalloc(gq->ring_size, sizeof(*gq->unmap_addrs), GFP_KERNEL); 361 if (!gq->unmap_addrs) 362 goto out; 363 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent, 364 sizeof(struct rswitch_ext_desc) * 365 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 366 } 367 368 if (!gq->rx_ring && !gq->tx_ring) 369 goto out; 370 371 i = gq->index / 32; 372 bit = BIT(gq->index % 32); 373 if (dir_tx) 374 priv->gwca.tx_irq_bits[i] |= bit; 375 else 376 priv->gwca.rx_irq_bits[i] |= bit; 377 378 return 0; 379 380 out: 381 rswitch_gwca_queue_free(ndev, gq); 382 383 return -ENOMEM; 384 } 385 386 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr) 387 { 388 desc->dptrl = cpu_to_le32(lower_32_bits(addr)); 389 desc->dptrh = upper_32_bits(addr) & 0xff; 390 } 391 392 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc) 393 { 394 return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32; 395 } 396 397 static int rswitch_gwca_queue_format(struct net_device *ndev, 398 struct rswitch_private *priv, 399 struct rswitch_gwca_queue *gq) 400 { 401 unsigned int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size; 402 struct rswitch_ext_desc *desc; 403 struct rswitch_desc *linkfix; 404 dma_addr_t dma_addr; 405 unsigned int i; 406 407 memset(gq->tx_ring, 0, ring_size); 408 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) { 409 if (!gq->dir_tx) { 410 dma_addr = dma_map_single(ndev->dev.parent, 411 gq->rx_bufs[i] + RSWITCH_HEADROOM, 412 RSWITCH_MAP_BUF_SIZE, 413 DMA_FROM_DEVICE); 414 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 415 goto err; 416 417 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE); 418 rswitch_desc_set_dptr(&desc->desc, dma_addr); 419 desc->desc.die_dt = DT_FEMPTY | DIE; 420 } else { 421 desc->desc.die_dt = DT_EEMPTY | DIE; 422 } 423 } 424 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 425 desc->desc.die_dt = DT_LINKFIX; 426 427 linkfix = &priv->gwca.linkfix_table[gq->index]; 428 linkfix->die_dt = DT_LINKFIX; 429 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 430 431 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE, 432 priv->addr + GWDCC_OFFS(gq->index)); 433 434 return 0; 435 436 err: 437 if (!gq->dir_tx) { 438 for (desc = gq->tx_ring; i-- > 0; desc++) { 439 dma_addr = rswitch_desc_get_dptr(&desc->desc); 440 dma_unmap_single(ndev->dev.parent, dma_addr, 441 RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE); 442 } 443 } 444 445 return -ENOMEM; 446 } 447 448 static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv, 449 unsigned int start_index, 450 unsigned int num) 451 { 452 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 453 struct rswitch_ts_desc *desc; 454 unsigned int i, index; 455 456 for (i = 0; i < num; i++) { 457 index = (i + start_index) % gq->ring_size; 458 desc = &gq->ts_ring[index]; 459 desc->desc.die_dt = DT_FEMPTY_ND | DIE; 460 } 461 } 462 463 static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev, 464 struct rswitch_gwca_queue *gq, 465 unsigned int start_index, 466 unsigned int num) 467 { 468 struct rswitch_device *rdev = netdev_priv(ndev); 469 struct rswitch_ext_ts_desc *desc; 470 unsigned int i, index; 471 dma_addr_t dma_addr; 472 473 for (i = 0; i < num; i++) { 474 index = (i + start_index) % gq->ring_size; 475 desc = &gq->rx_ring[index]; 476 if (!gq->dir_tx) { 477 dma_addr = dma_map_single(ndev->dev.parent, 478 gq->rx_bufs[index] + RSWITCH_HEADROOM, 479 RSWITCH_MAP_BUF_SIZE, 480 DMA_FROM_DEVICE); 481 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 482 goto err; 483 484 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE); 485 rswitch_desc_set_dptr(&desc->desc, dma_addr); 486 dma_wmb(); 487 desc->desc.die_dt = DT_FEMPTY | DIE; 488 desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index)); 489 } else { 490 desc->desc.die_dt = DT_EEMPTY | DIE; 491 } 492 } 493 494 return 0; 495 496 err: 497 if (!gq->dir_tx) { 498 for (; i-- > 0; ) { 499 index = (i + start_index) % gq->ring_size; 500 desc = &gq->rx_ring[index]; 501 dma_addr = rswitch_desc_get_dptr(&desc->desc); 502 dma_unmap_single(ndev->dev.parent, dma_addr, 503 RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE); 504 } 505 } 506 507 return -ENOMEM; 508 } 509 510 static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev, 511 struct rswitch_private *priv, 512 struct rswitch_gwca_queue *gq) 513 { 514 unsigned int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size; 515 struct rswitch_ext_ts_desc *desc; 516 struct rswitch_desc *linkfix; 517 int err; 518 519 memset(gq->rx_ring, 0, ring_size); 520 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size); 521 if (err < 0) 522 return err; 523 524 desc = &gq->rx_ring[gq->ring_size]; /* Last */ 525 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 526 desc->desc.die_dt = DT_LINKFIX; 527 528 linkfix = &priv->gwca.linkfix_table[gq->index]; 529 linkfix->die_dt = DT_LINKFIX; 530 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 531 532 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | 533 GWDCC_ETS | GWDCC_EDE, 534 priv->addr + GWDCC_OFFS(gq->index)); 535 536 return 0; 537 } 538 539 static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv) 540 { 541 unsigned int i, num_queues = priv->gwca.num_queues; 542 struct rswitch_gwca *gwca = &priv->gwca; 543 struct device *dev = &priv->pdev->dev; 544 545 gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues; 546 gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size, 547 &gwca->linkfix_table_dma, GFP_KERNEL); 548 if (!gwca->linkfix_table) 549 return -ENOMEM; 550 for (i = 0; i < num_queues; i++) 551 gwca->linkfix_table[i].die_dt = DT_EOS; 552 553 return 0; 554 } 555 556 static void rswitch_gwca_linkfix_free(struct rswitch_private *priv) 557 { 558 struct rswitch_gwca *gwca = &priv->gwca; 559 560 if (gwca->linkfix_table) 561 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size, 562 gwca->linkfix_table, gwca->linkfix_table_dma); 563 gwca->linkfix_table = NULL; 564 } 565 566 static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) 567 { 568 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 569 struct rswitch_ts_desc *desc; 570 571 gq->ring_size = TS_RING_SIZE; 572 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, 573 sizeof(struct rswitch_ts_desc) * 574 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 575 576 if (!gq->ts_ring) 577 return -ENOMEM; 578 579 rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE); 580 desc = &gq->ts_ring[gq->ring_size]; 581 desc->desc.die_dt = DT_LINKFIX; 582 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 583 584 return 0; 585 } 586 587 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv) 588 { 589 struct rswitch_gwca_queue *gq; 590 unsigned int index; 591 592 index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues); 593 if (index >= priv->gwca.num_queues) 594 return NULL; 595 set_bit(index, priv->gwca.used); 596 gq = &priv->gwca.queues[index]; 597 memset(gq, 0, sizeof(*gq)); 598 gq->index = index; 599 600 return gq; 601 } 602 603 static void rswitch_gwca_put(struct rswitch_private *priv, 604 struct rswitch_gwca_queue *gq) 605 { 606 clear_bit(gq->index, priv->gwca.used); 607 } 608 609 static int rswitch_txdmac_alloc(struct net_device *ndev) 610 { 611 struct rswitch_device *rdev = netdev_priv(ndev); 612 struct rswitch_private *priv = rdev->priv; 613 int err; 614 615 rdev->tx_queue = rswitch_gwca_get(priv); 616 if (!rdev->tx_queue) 617 return -EBUSY; 618 619 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE); 620 if (err < 0) { 621 rswitch_gwca_put(priv, rdev->tx_queue); 622 return err; 623 } 624 625 return 0; 626 } 627 628 static void rswitch_txdmac_free(struct net_device *ndev) 629 { 630 struct rswitch_device *rdev = netdev_priv(ndev); 631 632 rswitch_gwca_queue_free(ndev, rdev->tx_queue); 633 rswitch_gwca_put(rdev->priv, rdev->tx_queue); 634 } 635 636 static int rswitch_txdmac_init(struct rswitch_private *priv, unsigned int index) 637 { 638 struct rswitch_device *rdev = priv->rdev[index]; 639 640 return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue); 641 } 642 643 static int rswitch_rxdmac_alloc(struct net_device *ndev) 644 { 645 struct rswitch_device *rdev = netdev_priv(ndev); 646 struct rswitch_private *priv = rdev->priv; 647 int err; 648 649 rdev->rx_queue = rswitch_gwca_get(priv); 650 if (!rdev->rx_queue) 651 return -EBUSY; 652 653 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE); 654 if (err < 0) { 655 rswitch_gwca_put(priv, rdev->rx_queue); 656 return err; 657 } 658 659 return 0; 660 } 661 662 static void rswitch_rxdmac_free(struct net_device *ndev) 663 { 664 struct rswitch_device *rdev = netdev_priv(ndev); 665 666 rswitch_gwca_queue_free(ndev, rdev->rx_queue); 667 rswitch_gwca_put(rdev->priv, rdev->rx_queue); 668 } 669 670 static int rswitch_rxdmac_init(struct rswitch_private *priv, unsigned int index) 671 { 672 struct rswitch_device *rdev = priv->rdev[index]; 673 struct net_device *ndev = rdev->ndev; 674 675 return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue); 676 } 677 678 static int rswitch_gwca_hw_init(struct rswitch_private *priv) 679 { 680 unsigned int i; 681 int err; 682 683 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 684 if (err < 0) 685 return err; 686 err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG); 687 if (err < 0) 688 return err; 689 690 err = rswitch_gwca_mcast_table_reset(priv); 691 if (err < 0) 692 return err; 693 err = rswitch_gwca_axi_ram_reset(priv); 694 if (err < 0) 695 return err; 696 697 iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC); 698 iowrite32(0, priv->addr + GWTTFC); 699 iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1); 700 iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0); 701 iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10); 702 iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00); 703 iowrite32(GWMDNC_TSDMN(1) | GWMDNC_TXDMN(0x1e) | GWMDNC_RXDMN(0x1f), 704 priv->addr + GWMDNC); 705 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0); 706 707 iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0); 708 709 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 710 err = rswitch_rxdmac_init(priv, i); 711 if (err < 0) 712 return err; 713 err = rswitch_txdmac_init(priv, i); 714 if (err < 0) 715 return err; 716 } 717 718 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 719 if (err < 0) 720 return err; 721 return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION); 722 } 723 724 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv) 725 { 726 int err; 727 728 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 729 if (err < 0) 730 return err; 731 err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET); 732 if (err < 0) 733 return err; 734 735 return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 736 } 737 738 static int rswitch_gwca_halt(struct rswitch_private *priv) 739 { 740 int err; 741 742 priv->gwca_halt = true; 743 err = rswitch_gwca_hw_deinit(priv); 744 dev_err(&priv->pdev->dev, "halted (%d)\n", err); 745 746 return err; 747 } 748 749 static struct sk_buff *rswitch_rx_handle_desc(struct net_device *ndev, 750 struct rswitch_gwca_queue *gq, 751 struct rswitch_ext_ts_desc *desc) 752 { 753 dma_addr_t dma_addr = rswitch_desc_get_dptr(&desc->desc); 754 u16 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS; 755 u8 die_dt = desc->desc.die_dt & DT_MASK; 756 struct sk_buff *skb = NULL; 757 758 dma_unmap_single(ndev->dev.parent, dma_addr, RSWITCH_MAP_BUF_SIZE, 759 DMA_FROM_DEVICE); 760 761 /* The RX descriptor order will be one of the following: 762 * - FSINGLE 763 * - FSTART -> FEND 764 * - FSTART -> FMID -> FEND 765 */ 766 767 /* Check whether the descriptor is unexpected order */ 768 switch (die_dt) { 769 case DT_FSTART: 770 case DT_FSINGLE: 771 if (gq->skb_fstart) { 772 dev_kfree_skb_any(gq->skb_fstart); 773 gq->skb_fstart = NULL; 774 ndev->stats.rx_dropped++; 775 } 776 break; 777 case DT_FMID: 778 case DT_FEND: 779 if (!gq->skb_fstart) { 780 ndev->stats.rx_dropped++; 781 return NULL; 782 } 783 break; 784 default: 785 break; 786 } 787 788 /* Handle the descriptor */ 789 switch (die_dt) { 790 case DT_FSTART: 791 case DT_FSINGLE: 792 skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE); 793 if (skb) { 794 skb_reserve(skb, RSWITCH_HEADROOM); 795 skb_put(skb, pkt_len); 796 gq->pkt_len = pkt_len; 797 if (die_dt == DT_FSTART) { 798 gq->skb_fstart = skb; 799 skb = NULL; 800 } 801 } 802 break; 803 case DT_FMID: 804 case DT_FEND: 805 skb_add_rx_frag(gq->skb_fstart, skb_shinfo(gq->skb_fstart)->nr_frags, 806 virt_to_page(gq->rx_bufs[gq->cur]), 807 offset_in_page(gq->rx_bufs[gq->cur]) + RSWITCH_HEADROOM, 808 pkt_len, RSWITCH_BUF_SIZE); 809 if (die_dt == DT_FEND) { 810 skb = gq->skb_fstart; 811 gq->skb_fstart = NULL; 812 } 813 gq->pkt_len += pkt_len; 814 break; 815 default: 816 netdev_err(ndev, "%s: unexpected value (%x)\n", __func__, die_dt); 817 break; 818 } 819 820 return skb; 821 } 822 823 static bool rswitch_rx(struct net_device *ndev, int *quota) 824 { 825 struct rswitch_device *rdev = netdev_priv(ndev); 826 struct rswitch_gwca_queue *gq = rdev->rx_queue; 827 struct rswitch_ext_ts_desc *desc; 828 int limit, boguscnt, ret; 829 struct sk_buff *skb; 830 unsigned int num; 831 u32 get_ts; 832 833 if (*quota <= 0) 834 return true; 835 836 boguscnt = min_t(int, gq->ring_size, *quota); 837 limit = boguscnt; 838 839 desc = &gq->rx_ring[gq->cur]; 840 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { 841 dma_rmb(); 842 skb = rswitch_rx_handle_desc(ndev, gq, desc); 843 if (!skb) 844 goto out; 845 846 get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; 847 if (get_ts) { 848 struct skb_shared_hwtstamps *shhwtstamps; 849 struct timespec64 ts; 850 851 shhwtstamps = skb_hwtstamps(skb); 852 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 853 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 854 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 855 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); 856 } 857 skb->protocol = eth_type_trans(skb, ndev); 858 napi_gro_receive(&rdev->napi, skb); 859 rdev->ndev->stats.rx_packets++; 860 rdev->ndev->stats.rx_bytes += gq->pkt_len; 861 862 out: 863 gq->rx_bufs[gq->cur] = NULL; 864 gq->cur = rswitch_next_queue_index(gq, true, 1); 865 desc = &gq->rx_ring[gq->cur]; 866 867 if (--boguscnt <= 0) 868 break; 869 } 870 871 num = rswitch_get_num_cur_queues(gq); 872 ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num); 873 if (ret < 0) 874 goto err; 875 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num); 876 if (ret < 0) 877 goto err; 878 gq->dirty = rswitch_next_queue_index(gq, false, num); 879 880 *quota -= limit - boguscnt; 881 882 return boguscnt <= 0; 883 884 err: 885 rswitch_gwca_halt(rdev->priv); 886 887 return 0; 888 } 889 890 static void rswitch_tx_free(struct net_device *ndev) 891 { 892 struct rswitch_device *rdev = netdev_priv(ndev); 893 struct rswitch_gwca_queue *gq = rdev->tx_queue; 894 struct rswitch_ext_desc *desc; 895 struct sk_buff *skb; 896 897 desc = &gq->tx_ring[gq->dirty]; 898 while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) { 899 dma_rmb(); 900 901 skb = gq->skbs[gq->dirty]; 902 if (skb) { 903 rdev->ndev->stats.tx_packets++; 904 rdev->ndev->stats.tx_bytes += skb->len; 905 dma_unmap_single(ndev->dev.parent, 906 gq->unmap_addrs[gq->dirty], 907 skb->len, DMA_TO_DEVICE); 908 dev_kfree_skb_any(gq->skbs[gq->dirty]); 909 gq->skbs[gq->dirty] = NULL; 910 } 911 912 desc->desc.die_dt = DT_EEMPTY; 913 gq->dirty = rswitch_next_queue_index(gq, false, 1); 914 desc = &gq->tx_ring[gq->dirty]; 915 } 916 } 917 918 static int rswitch_poll(struct napi_struct *napi, int budget) 919 { 920 struct net_device *ndev = napi->dev; 921 struct rswitch_private *priv; 922 struct rswitch_device *rdev; 923 unsigned long flags; 924 int quota = budget; 925 926 rdev = netdev_priv(ndev); 927 priv = rdev->priv; 928 929 retry: 930 rswitch_tx_free(ndev); 931 932 if (rswitch_rx(ndev, "a)) 933 goto out; 934 else if (rdev->priv->gwca_halt) 935 goto err; 936 else if (rswitch_is_queue_rxed(rdev->rx_queue)) 937 goto retry; 938 939 netif_wake_subqueue(ndev, 0); 940 941 if (napi_complete_done(napi, budget - quota)) { 942 spin_lock_irqsave(&priv->lock, flags); 943 if (test_bit(rdev->port, priv->opened_ports)) { 944 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); 945 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); 946 } 947 spin_unlock_irqrestore(&priv->lock, flags); 948 } 949 950 out: 951 return budget - quota; 952 953 err: 954 napi_complete(napi); 955 956 return 0; 957 } 958 959 static void rswitch_queue_interrupt(struct net_device *ndev) 960 { 961 struct rswitch_device *rdev = netdev_priv(ndev); 962 963 if (napi_schedule_prep(&rdev->napi)) { 964 spin_lock(&rdev->priv->lock); 965 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 966 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 967 spin_unlock(&rdev->priv->lock); 968 __napi_schedule(&rdev->napi); 969 } 970 } 971 972 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis) 973 { 974 struct rswitch_gwca_queue *gq; 975 unsigned int i, index, bit; 976 977 for (i = 0; i < priv->gwca.num_queues; i++) { 978 gq = &priv->gwca.queues[i]; 979 index = gq->index / 32; 980 bit = BIT(gq->index % 32); 981 if (!(dis[index] & bit)) 982 continue; 983 984 rswitch_ack_data_irq(priv, gq->index); 985 rswitch_queue_interrupt(gq->ndev); 986 } 987 988 return IRQ_HANDLED; 989 } 990 991 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id) 992 { 993 struct rswitch_private *priv = dev_id; 994 u32 dis[RSWITCH_NUM_IRQ_REGS]; 995 irqreturn_t ret = IRQ_NONE; 996 997 rswitch_get_data_irq_status(priv, dis); 998 999 if (rswitch_is_any_data_irq(priv, dis, true) || 1000 rswitch_is_any_data_irq(priv, dis, false)) 1001 ret = rswitch_data_irq(priv, dis); 1002 1003 return ret; 1004 } 1005 1006 static int rswitch_gwca_request_irqs(struct rswitch_private *priv) 1007 { 1008 char *resource_name, *irq_name; 1009 int i, ret, irq; 1010 1011 for (i = 0; i < GWCA_NUM_IRQS; i++) { 1012 resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i); 1013 if (!resource_name) 1014 return -ENOMEM; 1015 1016 irq = platform_get_irq_byname(priv->pdev, resource_name); 1017 kfree(resource_name); 1018 if (irq < 0) 1019 return irq; 1020 1021 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, 1022 GWCA_IRQ_NAME, i); 1023 if (!irq_name) 1024 return -ENOMEM; 1025 1026 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq, 1027 0, irq_name, priv); 1028 if (ret < 0) 1029 return ret; 1030 } 1031 1032 return 0; 1033 } 1034 1035 static void rswitch_ts(struct rswitch_private *priv) 1036 { 1037 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 1038 struct skb_shared_hwtstamps shhwtstamps; 1039 struct rswitch_ts_desc *desc; 1040 struct rswitch_device *rdev; 1041 struct sk_buff *ts_skb; 1042 struct timespec64 ts; 1043 unsigned int num; 1044 u32 tag, port; 1045 1046 desc = &gq->ts_ring[gq->cur]; 1047 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) { 1048 dma_rmb(); 1049 1050 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl)); 1051 if (unlikely(port >= RSWITCH_NUM_PORTS)) 1052 goto next; 1053 rdev = priv->rdev[port]; 1054 1055 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl)); 1056 if (unlikely(tag >= TS_TAGS_PER_PORT)) 1057 goto next; 1058 ts_skb = xchg(&rdev->ts_skb[tag], NULL); 1059 smp_mb(); /* order rdev->ts_skb[] read before bitmap update */ 1060 clear_bit(tag, rdev->ts_skb_used); 1061 1062 if (unlikely(!ts_skb)) 1063 goto next; 1064 1065 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 1066 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 1067 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 1068 shhwtstamps.hwtstamp = timespec64_to_ktime(ts); 1069 skb_tstamp_tx(ts_skb, &shhwtstamps); 1070 dev_consume_skb_irq(ts_skb); 1071 1072 next: 1073 gq->cur = rswitch_next_queue_index(gq, true, 1); 1074 desc = &gq->ts_ring[gq->cur]; 1075 } 1076 1077 num = rswitch_get_num_cur_queues(gq); 1078 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num); 1079 gq->dirty = rswitch_next_queue_index(gq, false, num); 1080 } 1081 1082 static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id) 1083 { 1084 struct rswitch_private *priv = dev_id; 1085 1086 if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) { 1087 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS); 1088 rswitch_ts(priv); 1089 1090 return IRQ_HANDLED; 1091 } 1092 1093 return IRQ_NONE; 1094 } 1095 1096 static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv) 1097 { 1098 int irq; 1099 1100 irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME); 1101 if (irq < 0) 1102 return irq; 1103 1104 return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq, 1105 0, GWCA_TS_IRQ_NAME, priv); 1106 } 1107 1108 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */ 1109 static int rswitch_etha_change_mode(struct rswitch_etha *etha, 1110 enum rswitch_etha_mode mode) 1111 { 1112 int ret; 1113 1114 if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index)) 1115 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1); 1116 1117 iowrite32(mode, etha->addr + EAMC); 1118 1119 ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode); 1120 1121 if (mode == EAMC_OPC_DISABLE) 1122 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0); 1123 1124 return ret; 1125 } 1126 1127 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha) 1128 { 1129 u32 mrmac0 = ioread32(etha->addr + MRMAC0); 1130 u32 mrmac1 = ioread32(etha->addr + MRMAC1); 1131 u8 *mac = ða->mac_addr[0]; 1132 1133 mac[0] = (mrmac0 >> 8) & 0xFF; 1134 mac[1] = (mrmac0 >> 0) & 0xFF; 1135 mac[2] = (mrmac1 >> 24) & 0xFF; 1136 mac[3] = (mrmac1 >> 16) & 0xFF; 1137 mac[4] = (mrmac1 >> 8) & 0xFF; 1138 mac[5] = (mrmac1 >> 0) & 0xFF; 1139 } 1140 1141 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac) 1142 { 1143 iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0); 1144 iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], 1145 etha->addr + MRMAC1); 1146 } 1147 1148 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha) 1149 { 1150 iowrite32(MLVC_PLV, etha->addr + MLVC); 1151 1152 return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0); 1153 } 1154 1155 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) 1156 { 1157 u32 pis, lsc; 1158 1159 rswitch_etha_write_mac_address(etha, mac); 1160 1161 switch (etha->phy_interface) { 1162 case PHY_INTERFACE_MODE_SGMII: 1163 pis = MPIC_PIS_GMII; 1164 break; 1165 case PHY_INTERFACE_MODE_USXGMII: 1166 case PHY_INTERFACE_MODE_5GBASER: 1167 pis = MPIC_PIS_XGMII; 1168 break; 1169 default: 1170 pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC)); 1171 break; 1172 } 1173 1174 switch (etha->speed) { 1175 case 100: 1176 lsc = MPIC_LSC_100M; 1177 break; 1178 case 1000: 1179 lsc = MPIC_LSC_1G; 1180 break; 1181 case 2500: 1182 lsc = MPIC_LSC_2_5G; 1183 break; 1184 default: 1185 lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC)); 1186 break; 1187 } 1188 1189 rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC, 1190 FIELD_PREP(MPIC_PIS, pis) | FIELD_PREP(MPIC_LSC, lsc)); 1191 } 1192 1193 static void rswitch_etha_enable_mii(struct rswitch_etha *etha) 1194 { 1195 rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT, 1196 FIELD_PREP(MPIC_PSMCS, etha->psmcs) | 1197 FIELD_PREP(MPIC_PSMHT, 0x06)); 1198 } 1199 1200 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac) 1201 { 1202 int err; 1203 1204 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1205 if (err < 0) 1206 return err; 1207 err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG); 1208 if (err < 0) 1209 return err; 1210 1211 iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC); 1212 rswitch_rmac_setting(etha, mac); 1213 rswitch_etha_enable_mii(etha); 1214 1215 err = rswitch_etha_wait_link_verification(etha); 1216 if (err < 0) 1217 return err; 1218 1219 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1220 if (err < 0) 1221 return err; 1222 1223 return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION); 1224 } 1225 1226 static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read, 1227 unsigned int mmf, unsigned int pda, 1228 unsigned int pra, unsigned int pop, 1229 unsigned int prd) 1230 { 1231 u32 val; 1232 int ret; 1233 1234 val = MPSM_PSME | 1235 FIELD_PREP(MPSM_MFF, mmf) | 1236 FIELD_PREP(MPSM_PDA, pda) | 1237 FIELD_PREP(MPSM_PRA, pra) | 1238 FIELD_PREP(MPSM_POP, pop) | 1239 FIELD_PREP(MPSM_PRD, prd); 1240 iowrite32(val, etha->addr + MPSM); 1241 1242 ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0); 1243 if (ret) 1244 return ret; 1245 1246 if (read) { 1247 val = ioread32(etha->addr + MPSM); 1248 ret = FIELD_GET(MPSM_PRD, val); 1249 } 1250 1251 return ret; 1252 } 1253 1254 static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad, 1255 int regad) 1256 { 1257 struct rswitch_etha *etha = bus->priv; 1258 int ret; 1259 1260 ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad, 1261 MPSM_POP_ADDRESS, regad); 1262 if (ret) 1263 return ret; 1264 1265 return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad, 1266 MPSM_POP_READ_C45, 0); 1267 } 1268 1269 static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad, 1270 int regad, u16 val) 1271 { 1272 struct rswitch_etha *etha = bus->priv; 1273 int ret; 1274 1275 ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad, 1276 MPSM_POP_ADDRESS, regad); 1277 if (ret) 1278 return ret; 1279 1280 return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad, 1281 MPSM_POP_WRITE, val); 1282 } 1283 1284 static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad) 1285 { 1286 struct rswitch_etha *etha = bus->priv; 1287 1288 return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad, 1289 MPSM_POP_READ_C22, 0); 1290 } 1291 1292 static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad, 1293 int regad, u16 val) 1294 { 1295 struct rswitch_etha *etha = bus->priv; 1296 1297 return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad, 1298 MPSM_POP_WRITE, val); 1299 } 1300 1301 /* Call of_node_put(port) after done */ 1302 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev) 1303 { 1304 struct device_node *ports, *port; 1305 int err = 0; 1306 u32 index; 1307 1308 ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node, 1309 "ethernet-ports"); 1310 if (!ports) 1311 return NULL; 1312 1313 for_each_available_child_of_node(ports, port) { 1314 err = of_property_read_u32(port, "reg", &index); 1315 if (err < 0) { 1316 port = NULL; 1317 goto out; 1318 } 1319 if (index == rdev->etha->index) 1320 break; 1321 } 1322 1323 out: 1324 of_node_put(ports); 1325 1326 return port; 1327 } 1328 1329 static int rswitch_etha_get_params(struct rswitch_device *rdev) 1330 { 1331 u32 max_speed; 1332 int err; 1333 1334 if (!rdev->np_port) 1335 return 0; /* ignored */ 1336 1337 err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface); 1338 if (err) 1339 return err; 1340 1341 err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed); 1342 if (!err) { 1343 rdev->etha->speed = max_speed; 1344 return 0; 1345 } 1346 1347 /* if no "max-speed" property, let's use default speed */ 1348 switch (rdev->etha->phy_interface) { 1349 case PHY_INTERFACE_MODE_MII: 1350 rdev->etha->speed = SPEED_100; 1351 break; 1352 case PHY_INTERFACE_MODE_SGMII: 1353 rdev->etha->speed = SPEED_1000; 1354 break; 1355 case PHY_INTERFACE_MODE_USXGMII: 1356 rdev->etha->speed = SPEED_2500; 1357 break; 1358 default: 1359 return -EINVAL; 1360 } 1361 1362 return 0; 1363 } 1364 1365 static int rswitch_mii_register(struct rswitch_device *rdev) 1366 { 1367 struct device_node *mdio_np; 1368 struct mii_bus *mii_bus; 1369 int err; 1370 1371 mii_bus = mdiobus_alloc(); 1372 if (!mii_bus) 1373 return -ENOMEM; 1374 1375 mii_bus->name = "rswitch_mii"; 1376 sprintf(mii_bus->id, "etha%d", rdev->etha->index); 1377 mii_bus->priv = rdev->etha; 1378 mii_bus->read_c45 = rswitch_etha_mii_read_c45; 1379 mii_bus->write_c45 = rswitch_etha_mii_write_c45; 1380 mii_bus->read = rswitch_etha_mii_read_c22; 1381 mii_bus->write = rswitch_etha_mii_write_c22; 1382 mii_bus->parent = &rdev->priv->pdev->dev; 1383 1384 mdio_np = of_get_child_by_name(rdev->np_port, "mdio"); 1385 err = of_mdiobus_register(mii_bus, mdio_np); 1386 if (err < 0) { 1387 mdiobus_free(mii_bus); 1388 goto out; 1389 } 1390 1391 rdev->etha->mii = mii_bus; 1392 1393 out: 1394 of_node_put(mdio_np); 1395 1396 return err; 1397 } 1398 1399 static void rswitch_mii_unregister(struct rswitch_device *rdev) 1400 { 1401 if (rdev->etha->mii) { 1402 mdiobus_unregister(rdev->etha->mii); 1403 mdiobus_free(rdev->etha->mii); 1404 rdev->etha->mii = NULL; 1405 } 1406 } 1407 1408 static void rswitch_adjust_link(struct net_device *ndev) 1409 { 1410 struct rswitch_device *rdev = netdev_priv(ndev); 1411 struct phy_device *phydev = ndev->phydev; 1412 1413 if (phydev->link != rdev->etha->link) { 1414 phy_print_status(phydev); 1415 if (phydev->link) 1416 phy_power_on(rdev->serdes); 1417 else if (rdev->serdes->power_count) 1418 phy_power_off(rdev->serdes); 1419 1420 rdev->etha->link = phydev->link; 1421 1422 if (!rdev->priv->etha_no_runtime_change && 1423 phydev->speed != rdev->etha->speed) { 1424 rdev->etha->speed = phydev->speed; 1425 1426 rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); 1427 phy_set_speed(rdev->serdes, rdev->etha->speed); 1428 } 1429 } 1430 } 1431 1432 static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev, 1433 struct phy_device *phydev) 1434 { 1435 if (!rdev->priv->etha_no_runtime_change) 1436 return; 1437 1438 switch (rdev->etha->speed) { 1439 case SPEED_2500: 1440 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1441 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1442 break; 1443 case SPEED_1000: 1444 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1445 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1446 break; 1447 case SPEED_100: 1448 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1449 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1450 break; 1451 default: 1452 break; 1453 } 1454 1455 phy_set_max_speed(phydev, rdev->etha->speed); 1456 } 1457 1458 static int rswitch_phy_device_init(struct rswitch_device *rdev) 1459 { 1460 struct phy_device *phydev; 1461 struct device_node *phy; 1462 int err = -ENOENT; 1463 1464 if (!rdev->np_port) 1465 return -ENODEV; 1466 1467 phy = of_parse_phandle(rdev->np_port, "phy-handle", 0); 1468 if (!phy) 1469 return -ENODEV; 1470 1471 /* Set phydev->host_interfaces before calling of_phy_connect() to 1472 * configure the PHY with the information of host_interfaces. 1473 */ 1474 phydev = of_phy_find_device(phy); 1475 if (!phydev) 1476 goto out; 1477 __set_bit(rdev->etha->phy_interface, phydev->host_interfaces); 1478 phydev->mac_managed_pm = true; 1479 1480 phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0, 1481 rdev->etha->phy_interface); 1482 if (!phydev) 1483 goto out; 1484 1485 phy_set_max_speed(phydev, SPEED_2500); 1486 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 1487 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); 1488 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 1489 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 1490 rswitch_phy_remove_link_mode(rdev, phydev); 1491 1492 phy_attached_info(phydev); 1493 1494 err = 0; 1495 out: 1496 of_node_put(phy); 1497 1498 return err; 1499 } 1500 1501 static void rswitch_phy_device_deinit(struct rswitch_device *rdev) 1502 { 1503 if (rdev->ndev->phydev) 1504 phy_disconnect(rdev->ndev->phydev); 1505 } 1506 1507 static int rswitch_serdes_set_params(struct rswitch_device *rdev) 1508 { 1509 int err; 1510 1511 err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET, 1512 rdev->etha->phy_interface); 1513 if (err < 0) 1514 return err; 1515 1516 return phy_set_speed(rdev->serdes, rdev->etha->speed); 1517 } 1518 1519 static int rswitch_ether_port_init_one(struct rswitch_device *rdev) 1520 { 1521 int err; 1522 1523 if (!rdev->etha->operated) { 1524 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); 1525 if (err < 0) 1526 return err; 1527 if (rdev->priv->etha_no_runtime_change) 1528 rdev->etha->operated = true; 1529 } 1530 1531 err = rswitch_mii_register(rdev); 1532 if (err < 0) 1533 return err; 1534 1535 err = rswitch_phy_device_init(rdev); 1536 if (err < 0) 1537 goto err_phy_device_init; 1538 1539 rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL); 1540 if (IS_ERR(rdev->serdes)) { 1541 err = PTR_ERR(rdev->serdes); 1542 goto err_serdes_phy_get; 1543 } 1544 1545 err = rswitch_serdes_set_params(rdev); 1546 if (err < 0) 1547 goto err_serdes_set_params; 1548 1549 return 0; 1550 1551 err_serdes_set_params: 1552 err_serdes_phy_get: 1553 rswitch_phy_device_deinit(rdev); 1554 1555 err_phy_device_init: 1556 rswitch_mii_unregister(rdev); 1557 1558 return err; 1559 } 1560 1561 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev) 1562 { 1563 rswitch_phy_device_deinit(rdev); 1564 rswitch_mii_unregister(rdev); 1565 } 1566 1567 static int rswitch_ether_port_init_all(struct rswitch_private *priv) 1568 { 1569 unsigned int i; 1570 int err; 1571 1572 rswitch_for_each_enabled_port(priv, i) { 1573 err = rswitch_ether_port_init_one(priv->rdev[i]); 1574 if (err) 1575 goto err_init_one; 1576 } 1577 1578 rswitch_for_each_enabled_port(priv, i) { 1579 err = phy_init(priv->rdev[i]->serdes); 1580 if (err) 1581 goto err_serdes; 1582 } 1583 1584 return 0; 1585 1586 err_serdes: 1587 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1588 phy_exit(priv->rdev[i]->serdes); 1589 i = RSWITCH_NUM_PORTS; 1590 1591 err_init_one: 1592 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1593 rswitch_ether_port_deinit_one(priv->rdev[i]); 1594 1595 return err; 1596 } 1597 1598 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) 1599 { 1600 unsigned int i; 1601 1602 rswitch_for_each_enabled_port(priv, i) { 1603 phy_exit(priv->rdev[i]->serdes); 1604 rswitch_ether_port_deinit_one(priv->rdev[i]); 1605 } 1606 } 1607 1608 static int rswitch_open(struct net_device *ndev) 1609 { 1610 struct rswitch_device *rdev = netdev_priv(ndev); 1611 unsigned long flags; 1612 1613 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) 1614 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); 1615 1616 napi_enable(&rdev->napi); 1617 1618 spin_lock_irqsave(&rdev->priv->lock, flags); 1619 bitmap_set(rdev->priv->opened_ports, rdev->port, 1); 1620 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); 1621 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); 1622 spin_unlock_irqrestore(&rdev->priv->lock, flags); 1623 1624 phy_start(ndev->phydev); 1625 1626 netif_start_queue(ndev); 1627 1628 if (rdev->brdev) 1629 rswitch_update_l2_offload(rdev->priv); 1630 1631 return 0; 1632 }; 1633 1634 static int rswitch_stop(struct net_device *ndev) 1635 { 1636 struct rswitch_device *rdev = netdev_priv(ndev); 1637 struct sk_buff *ts_skb; 1638 unsigned long flags; 1639 unsigned int tag; 1640 1641 netif_tx_stop_all_queues(ndev); 1642 1643 phy_stop(ndev->phydev); 1644 1645 spin_lock_irqsave(&rdev->priv->lock, flags); 1646 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 1647 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 1648 bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); 1649 spin_unlock_irqrestore(&rdev->priv->lock, flags); 1650 1651 napi_disable(&rdev->napi); 1652 1653 if (rdev->brdev) 1654 rswitch_update_l2_offload(rdev->priv); 1655 1656 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) 1657 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); 1658 1659 for_each_set_bit(tag, rdev->ts_skb_used, TS_TAGS_PER_PORT) { 1660 ts_skb = xchg(&rdev->ts_skb[tag], NULL); 1661 clear_bit(tag, rdev->ts_skb_used); 1662 if (ts_skb) 1663 dev_kfree_skb(ts_skb); 1664 } 1665 1666 return 0; 1667 }; 1668 1669 static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev, 1670 struct sk_buff *skb, 1671 struct rswitch_ext_desc *desc) 1672 { 1673 desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | 1674 INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT); 1675 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 1676 unsigned int tag; 1677 1678 tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT); 1679 if (tag == TS_TAGS_PER_PORT) 1680 return false; 1681 smp_mb(); /* order bitmap read before rdev->ts_skb[] write */ 1682 rdev->ts_skb[tag] = skb_get(skb); 1683 set_bit(tag, rdev->ts_skb_used); 1684 1685 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1686 desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC); 1687 1688 skb_tx_timestamp(skb); 1689 } 1690 1691 return true; 1692 } 1693 1694 static bool rswitch_ext_desc_set(struct rswitch_device *rdev, 1695 struct sk_buff *skb, 1696 struct rswitch_ext_desc *desc, 1697 dma_addr_t dma_addr, u16 len, u8 die_dt) 1698 { 1699 rswitch_desc_set_dptr(&desc->desc, dma_addr); 1700 desc->desc.info_ds = cpu_to_le16(len); 1701 if (!rswitch_ext_desc_set_info1(rdev, skb, desc)) 1702 return false; 1703 1704 dma_wmb(); 1705 1706 desc->desc.die_dt = die_dt; 1707 1708 return true; 1709 } 1710 1711 static u8 rswitch_ext_desc_get_die_dt(unsigned int nr_desc, unsigned int index) 1712 { 1713 if (nr_desc == 1) 1714 return DT_FSINGLE | DIE; 1715 if (index == 0) 1716 return DT_FSTART; 1717 if (nr_desc - 1 == index) 1718 return DT_FEND | DIE; 1719 return DT_FMID; 1720 } 1721 1722 static u16 rswitch_ext_desc_get_len(u8 die_dt, unsigned int orig_len) 1723 { 1724 switch (die_dt & DT_MASK) { 1725 case DT_FSINGLE: 1726 case DT_FEND: 1727 return (orig_len % RSWITCH_DESC_BUF_SIZE) ?: RSWITCH_DESC_BUF_SIZE; 1728 case DT_FSTART: 1729 case DT_FMID: 1730 return RSWITCH_DESC_BUF_SIZE; 1731 default: 1732 return 0; 1733 } 1734 } 1735 1736 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1737 { 1738 struct rswitch_device *rdev = netdev_priv(ndev); 1739 struct rswitch_gwca_queue *gq = rdev->tx_queue; 1740 dma_addr_t dma_addr, dma_addr_orig; 1741 netdev_tx_t ret = NETDEV_TX_OK; 1742 struct rswitch_ext_desc *desc; 1743 unsigned int i, nr_desc; 1744 u8 die_dt; 1745 u16 len; 1746 1747 nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1; 1748 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) { 1749 netif_stop_subqueue(ndev, 0); 1750 return NETDEV_TX_BUSY; 1751 } 1752 1753 if (skb_put_padto(skb, ETH_ZLEN)) 1754 return ret; 1755 1756 dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); 1757 if (dma_mapping_error(ndev->dev.parent, dma_addr_orig)) 1758 goto err_kfree; 1759 1760 /* Stored the skb at the last descriptor to avoid skb free before hardware completes send */ 1761 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb; 1762 gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig; 1763 1764 dma_wmb(); 1765 1766 /* DT_FSTART should be set at last. So, this is reverse order. */ 1767 for (i = nr_desc; i-- > 0; ) { 1768 desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)]; 1769 die_dt = rswitch_ext_desc_get_die_dt(nr_desc, i); 1770 dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE; 1771 len = rswitch_ext_desc_get_len(die_dt, skb->len); 1772 if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt)) 1773 goto err_unmap; 1774 } 1775 1776 gq->cur = rswitch_next_queue_index(gq, true, nr_desc); 1777 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); 1778 1779 return ret; 1780 1781 err_unmap: 1782 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL; 1783 dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE); 1784 1785 err_kfree: 1786 dev_kfree_skb_any(skb); 1787 1788 return ret; 1789 } 1790 1791 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev) 1792 { 1793 return &ndev->stats; 1794 } 1795 1796 static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req) 1797 { 1798 struct rswitch_device *rdev = netdev_priv(ndev); 1799 struct rcar_gen4_ptp_private *ptp_priv; 1800 struct hwtstamp_config config; 1801 1802 ptp_priv = rdev->priv->ptp_priv; 1803 1804 config.flags = 0; 1805 config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : 1806 HWTSTAMP_TX_OFF; 1807 switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { 1808 case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT: 1809 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 1810 break; 1811 case RCAR_GEN4_RXTSTAMP_TYPE_ALL: 1812 config.rx_filter = HWTSTAMP_FILTER_ALL; 1813 break; 1814 default: 1815 config.rx_filter = HWTSTAMP_FILTER_NONE; 1816 break; 1817 } 1818 1819 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; 1820 } 1821 1822 static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req) 1823 { 1824 struct rswitch_device *rdev = netdev_priv(ndev); 1825 u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED; 1826 struct hwtstamp_config config; 1827 u32 tstamp_tx_ctrl; 1828 1829 if (copy_from_user(&config, req->ifr_data, sizeof(config))) 1830 return -EFAULT; 1831 1832 if (config.flags) 1833 return -EINVAL; 1834 1835 switch (config.tx_type) { 1836 case HWTSTAMP_TX_OFF: 1837 tstamp_tx_ctrl = 0; 1838 break; 1839 case HWTSTAMP_TX_ON: 1840 tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED; 1841 break; 1842 default: 1843 return -ERANGE; 1844 } 1845 1846 switch (config.rx_filter) { 1847 case HWTSTAMP_FILTER_NONE: 1848 tstamp_rx_ctrl = 0; 1849 break; 1850 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1851 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; 1852 break; 1853 default: 1854 config.rx_filter = HWTSTAMP_FILTER_ALL; 1855 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL; 1856 break; 1857 } 1858 1859 rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; 1860 rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; 1861 1862 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; 1863 } 1864 1865 static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) 1866 { 1867 if (!netif_running(ndev)) 1868 return -EINVAL; 1869 1870 switch (cmd) { 1871 case SIOCGHWTSTAMP: 1872 return rswitch_hwstamp_get(ndev, req); 1873 case SIOCSHWTSTAMP: 1874 return rswitch_hwstamp_set(ndev, req); 1875 default: 1876 return phy_mii_ioctl(ndev->phydev, req, cmd); 1877 } 1878 } 1879 1880 static int rswitch_get_port_parent_id(struct net_device *ndev, 1881 struct netdev_phys_item_id *ppid) 1882 { 1883 struct rswitch_device *rdev = netdev_priv(ndev); 1884 const char *name; 1885 1886 name = dev_name(&rdev->priv->pdev->dev); 1887 ppid->id_len = min_t(size_t, strlen(name), sizeof(ppid->id)); 1888 memcpy(ppid->id, name, ppid->id_len); 1889 1890 return 0; 1891 } 1892 1893 static int rswitch_get_phys_port_name(struct net_device *ndev, 1894 char *name, size_t len) 1895 { 1896 struct rswitch_device *rdev = netdev_priv(ndev); 1897 1898 snprintf(name, len, "tsn%d", rdev->port); 1899 1900 return 0; 1901 } 1902 1903 static const struct net_device_ops rswitch_netdev_ops = { 1904 .ndo_open = rswitch_open, 1905 .ndo_stop = rswitch_stop, 1906 .ndo_start_xmit = rswitch_start_xmit, 1907 .ndo_get_stats = rswitch_get_stats, 1908 .ndo_eth_ioctl = rswitch_eth_ioctl, 1909 .ndo_get_port_parent_id = rswitch_get_port_parent_id, 1910 .ndo_get_phys_port_name = rswitch_get_phys_port_name, 1911 .ndo_validate_addr = eth_validate_addr, 1912 .ndo_set_mac_address = eth_mac_addr, 1913 }; 1914 1915 bool is_rdev(const struct net_device *ndev) 1916 { 1917 return (ndev->netdev_ops == &rswitch_netdev_ops); 1918 } 1919 1920 static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) 1921 { 1922 struct rswitch_device *rdev = netdev_priv(ndev); 1923 1924 info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock); 1925 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 1926 SOF_TIMESTAMPING_TX_HARDWARE | 1927 SOF_TIMESTAMPING_RX_HARDWARE | 1928 SOF_TIMESTAMPING_RAW_HARDWARE; 1929 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 1930 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); 1931 1932 return 0; 1933 } 1934 1935 static const struct ethtool_ops rswitch_ethtool_ops = { 1936 .get_ts_info = rswitch_get_ts_info, 1937 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1938 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1939 }; 1940 1941 static const struct of_device_id renesas_eth_sw_of_table[] = { 1942 { .compatible = "renesas,r8a779f0-ether-switch", }, 1943 { } 1944 }; 1945 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table); 1946 1947 static void rswitch_etha_init(struct rswitch_private *priv, unsigned int index) 1948 { 1949 struct rswitch_etha *etha = &priv->etha[index]; 1950 1951 memset(etha, 0, sizeof(*etha)); 1952 etha->index = index; 1953 etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; 1954 etha->coma_addr = priv->addr; 1955 1956 /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1. 1957 * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply 1958 * both the numerator and the denominator by 10. 1959 */ 1960 etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1; 1961 } 1962 1963 static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index) 1964 { 1965 struct platform_device *pdev = priv->pdev; 1966 struct rswitch_device *rdev; 1967 struct net_device *ndev; 1968 int err; 1969 1970 if (index >= RSWITCH_NUM_PORTS) 1971 return -EINVAL; 1972 1973 ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1); 1974 if (!ndev) 1975 return -ENOMEM; 1976 1977 SET_NETDEV_DEV(ndev, &pdev->dev); 1978 ether_setup(ndev); 1979 1980 rdev = netdev_priv(ndev); 1981 rdev->ndev = ndev; 1982 rdev->priv = priv; 1983 priv->rdev[index] = rdev; 1984 rdev->port = index; 1985 rdev->etha = &priv->etha[index]; 1986 rdev->addr = priv->addr; 1987 1988 ndev->base_addr = (unsigned long)rdev->addr; 1989 snprintf(ndev->name, IFNAMSIZ, "tsn%d", index); 1990 ndev->netdev_ops = &rswitch_netdev_ops; 1991 ndev->ethtool_ops = &rswitch_ethtool_ops; 1992 ndev->max_mtu = RSWITCH_MAX_MTU; 1993 ndev->min_mtu = ETH_MIN_MTU; 1994 1995 netif_napi_add(ndev, &rdev->napi, rswitch_poll); 1996 1997 rdev->np_port = rswitch_get_port_node(rdev); 1998 rdev->disabled = !rdev->np_port; 1999 err = of_get_ethdev_address(rdev->np_port, ndev); 2000 if (err) { 2001 if (is_valid_ether_addr(rdev->etha->mac_addr)) 2002 eth_hw_addr_set(ndev, rdev->etha->mac_addr); 2003 else 2004 eth_hw_addr_random(ndev); 2005 } 2006 2007 err = rswitch_etha_get_params(rdev); 2008 if (err < 0) 2009 goto out_get_params; 2010 2011 err = rswitch_rxdmac_alloc(ndev); 2012 if (err < 0) 2013 goto out_rxdmac; 2014 2015 err = rswitch_txdmac_alloc(ndev); 2016 if (err < 0) 2017 goto out_txdmac; 2018 2019 list_add_tail(&rdev->list, &priv->port_list); 2020 2021 return 0; 2022 2023 out_txdmac: 2024 rswitch_rxdmac_free(ndev); 2025 2026 out_rxdmac: 2027 out_get_params: 2028 of_node_put(rdev->np_port); 2029 netif_napi_del(&rdev->napi); 2030 free_netdev(ndev); 2031 2032 return err; 2033 } 2034 2035 static void rswitch_device_free(struct rswitch_private *priv, unsigned int index) 2036 { 2037 struct rswitch_device *rdev = priv->rdev[index]; 2038 struct net_device *ndev = rdev->ndev; 2039 2040 list_del(&rdev->list); 2041 rswitch_txdmac_free(ndev); 2042 rswitch_rxdmac_free(ndev); 2043 of_node_put(rdev->np_port); 2044 netif_napi_del(&rdev->napi); 2045 free_netdev(ndev); 2046 } 2047 2048 static int rswitch_init(struct rswitch_private *priv) 2049 { 2050 unsigned int i; 2051 int err; 2052 2053 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 2054 rswitch_etha_init(priv, i); 2055 2056 rswitch_clock_enable(priv); 2057 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 2058 rswitch_etha_read_mac_address(&priv->etha[i]); 2059 2060 rswitch_reset(priv); 2061 2062 rswitch_clock_enable(priv); 2063 rswitch_top_init(priv); 2064 err = rswitch_bpool_config(priv); 2065 if (err < 0) 2066 return err; 2067 2068 rswitch_coma_init(priv); 2069 2070 err = rswitch_gwca_linkfix_alloc(priv); 2071 if (err < 0) 2072 return -ENOMEM; 2073 2074 err = rswitch_gwca_ts_queue_alloc(priv); 2075 if (err < 0) 2076 goto err_ts_queue_alloc; 2077 2078 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 2079 err = rswitch_device_alloc(priv, i); 2080 if (err < 0) { 2081 for (; i-- > 0; ) 2082 rswitch_device_free(priv, i); 2083 goto err_device_alloc; 2084 } 2085 } 2086 2087 err = rswitch_fwd_init(priv); 2088 if (err < 0) 2089 goto err_fwd_init; 2090 2091 err = rcar_gen4_ptp_register(priv->ptp_priv, clk_get_rate(priv->clk)); 2092 if (err < 0) 2093 goto err_ptp_register; 2094 2095 err = rswitch_gwca_request_irqs(priv); 2096 if (err < 0) 2097 goto err_gwca_request_irq; 2098 2099 err = rswitch_gwca_ts_request_irqs(priv); 2100 if (err < 0) 2101 goto err_gwca_ts_request_irq; 2102 2103 err = rswitch_gwca_hw_init(priv); 2104 if (err < 0) 2105 goto err_gwca_hw_init; 2106 2107 err = rswitch_ether_port_init_all(priv); 2108 if (err) 2109 goto err_ether_port_init_all; 2110 2111 rswitch_for_each_enabled_port(priv, i) { 2112 err = register_netdev(priv->rdev[i]->ndev); 2113 if (err) { 2114 rswitch_for_each_enabled_port_continue_reverse(priv, i) 2115 unregister_netdev(priv->rdev[i]->ndev); 2116 goto err_register_netdev; 2117 } 2118 } 2119 2120 rswitch_for_each_enabled_port(priv, i) 2121 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n", 2122 priv->rdev[i]->ndev->dev_addr); 2123 2124 return 0; 2125 2126 err_register_netdev: 2127 rswitch_ether_port_deinit_all(priv); 2128 2129 err_ether_port_init_all: 2130 rswitch_gwca_hw_deinit(priv); 2131 2132 err_gwca_hw_init: 2133 err_gwca_ts_request_irq: 2134 err_gwca_request_irq: 2135 rcar_gen4_ptp_unregister(priv->ptp_priv); 2136 2137 err_fwd_init: 2138 err_ptp_register: 2139 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 2140 rswitch_device_free(priv, i); 2141 2142 err_device_alloc: 2143 rswitch_gwca_ts_queue_free(priv); 2144 2145 err_ts_queue_alloc: 2146 rswitch_gwca_linkfix_free(priv); 2147 2148 return err; 2149 } 2150 2151 static const struct soc_device_attribute rswitch_soc_no_speed_change[] = { 2152 { .soc_id = "r8a779f0", .revision = "ES1.0" }, 2153 { /* Sentinel */ } 2154 }; 2155 2156 static int renesas_eth_sw_probe(struct platform_device *pdev) 2157 { 2158 const struct soc_device_attribute *attr; 2159 struct rswitch_private *priv; 2160 struct resource *res; 2161 int ret; 2162 2163 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base"); 2164 if (!res) { 2165 dev_err(&pdev->dev, "invalid resource\n"); 2166 return -EINVAL; 2167 } 2168 2169 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 2170 if (!priv) 2171 return -ENOMEM; 2172 2173 spin_lock_init(&priv->lock); 2174 2175 priv->clk = devm_clk_get(&pdev->dev, NULL); 2176 if (IS_ERR(priv->clk)) 2177 return PTR_ERR(priv->clk); 2178 2179 attr = soc_device_match(rswitch_soc_no_speed_change); 2180 if (attr) 2181 priv->etha_no_runtime_change = true; 2182 2183 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); 2184 if (!priv->ptp_priv) 2185 return -ENOMEM; 2186 2187 platform_set_drvdata(pdev, priv); 2188 priv->pdev = pdev; 2189 priv->addr = devm_ioremap_resource(&pdev->dev, res); 2190 if (IS_ERR(priv->addr)) 2191 return PTR_ERR(priv->addr); 2192 2193 priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4; 2194 2195 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 2196 if (ret < 0) { 2197 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2198 if (ret < 0) 2199 return ret; 2200 } 2201 2202 priv->gwca.index = AGENT_INDEX_GWCA; 2203 priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV, 2204 RSWITCH_MAX_NUM_QUEUES); 2205 priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues, 2206 sizeof(*priv->gwca.queues), GFP_KERNEL); 2207 if (!priv->gwca.queues) 2208 return -ENOMEM; 2209 2210 INIT_LIST_HEAD(&priv->port_list); 2211 2212 pm_runtime_enable(&pdev->dev); 2213 pm_runtime_get_sync(&pdev->dev); 2214 2215 ret = rswitch_init(priv); 2216 if (ret < 0) { 2217 pm_runtime_put(&pdev->dev); 2218 pm_runtime_disable(&pdev->dev); 2219 return ret; 2220 } 2221 2222 if (list_empty(&priv->port_list)) 2223 dev_warn(&pdev->dev, "could not initialize any ports\n"); 2224 2225 ret = rswitch_register_notifiers(); 2226 if (ret) { 2227 dev_err(&pdev->dev, "could not register notifiers\n"); 2228 return ret; 2229 } 2230 2231 device_set_wakeup_capable(&pdev->dev, 1); 2232 2233 return ret; 2234 } 2235 2236 static void rswitch_deinit(struct rswitch_private *priv) 2237 { 2238 unsigned int i; 2239 2240 rswitch_gwca_hw_deinit(priv); 2241 rcar_gen4_ptp_unregister(priv->ptp_priv); 2242 2243 rswitch_for_each_enabled_port(priv, i) { 2244 struct rswitch_device *rdev = priv->rdev[i]; 2245 2246 unregister_netdev(rdev->ndev); 2247 rswitch_ether_port_deinit_one(rdev); 2248 phy_exit(priv->rdev[i]->serdes); 2249 } 2250 2251 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 2252 rswitch_device_free(priv, i); 2253 2254 rswitch_gwca_ts_queue_free(priv); 2255 rswitch_gwca_linkfix_free(priv); 2256 2257 rswitch_clock_disable(priv); 2258 } 2259 2260 static void renesas_eth_sw_remove(struct platform_device *pdev) 2261 { 2262 struct rswitch_private *priv = platform_get_drvdata(pdev); 2263 2264 rswitch_unregister_notifiers(); 2265 rswitch_deinit(priv); 2266 2267 pm_runtime_put(&pdev->dev); 2268 pm_runtime_disable(&pdev->dev); 2269 2270 platform_set_drvdata(pdev, NULL); 2271 } 2272 2273 static int renesas_eth_sw_suspend(struct device *dev) 2274 { 2275 struct rswitch_private *priv = dev_get_drvdata(dev); 2276 struct net_device *ndev; 2277 unsigned int i; 2278 2279 rswitch_for_each_enabled_port(priv, i) { 2280 ndev = priv->rdev[i]->ndev; 2281 if (netif_running(ndev)) { 2282 netif_device_detach(ndev); 2283 rswitch_stop(ndev); 2284 } 2285 if (priv->rdev[i]->serdes->init_count) 2286 phy_exit(priv->rdev[i]->serdes); 2287 } 2288 2289 return 0; 2290 } 2291 2292 static int renesas_eth_sw_resume(struct device *dev) 2293 { 2294 struct rswitch_private *priv = dev_get_drvdata(dev); 2295 struct net_device *ndev; 2296 unsigned int i; 2297 2298 rswitch_for_each_enabled_port(priv, i) { 2299 phy_init(priv->rdev[i]->serdes); 2300 ndev = priv->rdev[i]->ndev; 2301 if (netif_running(ndev)) { 2302 rswitch_open(ndev); 2303 netif_device_attach(ndev); 2304 } 2305 } 2306 2307 return 0; 2308 } 2309 2310 static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend, 2311 renesas_eth_sw_resume); 2312 2313 static struct platform_driver renesas_eth_sw_driver_platform = { 2314 .probe = renesas_eth_sw_probe, 2315 .remove = renesas_eth_sw_remove, 2316 .driver = { 2317 .name = "renesas_eth_sw", 2318 .pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops), 2319 .of_match_table = renesas_eth_sw_of_table, 2320 } 2321 }; 2322 module_platform_driver(renesas_eth_sw_driver_platform); 2323 MODULE_AUTHOR("Yoshihiro Shimoda"); 2324 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver"); 2325 MODULE_LICENSE("GPL"); 2326