1 // SPDX-License-Identifier: GPL-2.0 2 /* Renesas Ethernet Switch device driver 3 * 4 * Copyright (C) 2022 Renesas Electronics Corporation 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/err.h> 10 #include <linux/etherdevice.h> 11 #include <linux/iopoll.h> 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/net_tstamp.h> 15 #include <linux/of.h> 16 #include <linux/of_mdio.h> 17 #include <linux/of_net.h> 18 #include <linux/phy/phy.h> 19 #include <linux/platform_device.h> 20 #include <linux/pm.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/rtnetlink.h> 23 #include <linux/slab.h> 24 #include <linux/spinlock.h> 25 #include <linux/sys_soc.h> 26 27 #include "rswitch.h" 28 29 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected) 30 { 31 u32 val; 32 33 return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected, 34 1, RSWITCH_TIMEOUT_US); 35 } 36 37 static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set) 38 { 39 iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg); 40 } 41 42 /* Common Agent block (COMA) */ 43 static void rswitch_reset(struct rswitch_private *priv) 44 { 45 iowrite32(RRC_RR, priv->addr + RRC); 46 iowrite32(RRC_RR_CLR, priv->addr + RRC); 47 } 48 49 static void rswitch_clock_enable(struct rswitch_private *priv) 50 { 51 iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC); 52 } 53 54 static void rswitch_clock_disable(struct rswitch_private *priv) 55 { 56 iowrite32(RCDC_RCD, priv->addr + RCDC); 57 } 58 59 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, 60 unsigned int port) 61 { 62 u32 val = ioread32(coma_addr + RCEC); 63 64 if (val & RCEC_RCE) 65 return (val & BIT(port)) ? true : false; 66 else 67 return false; 68 } 69 70 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, unsigned int port, 71 int enable) 72 { 73 u32 val; 74 75 if (enable) { 76 val = ioread32(coma_addr + RCEC); 77 iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC); 78 } else { 79 val = ioread32(coma_addr + RCDC); 80 iowrite32(val | BIT(port), coma_addr + RCDC); 81 } 82 } 83 84 static int rswitch_bpool_config(struct rswitch_private *priv) 85 { 86 u32 val; 87 88 val = ioread32(priv->addr + CABPIRM); 89 if (val & CABPIRM_BPR) 90 return 0; 91 92 iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM); 93 94 return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR); 95 } 96 97 static void rswitch_coma_init(struct rswitch_private *priv) 98 { 99 iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0); 100 } 101 102 /* R-Switch-2 block (TOP) */ 103 static void rswitch_top_init(struct rswitch_private *priv) 104 { 105 unsigned int i; 106 107 for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++) 108 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i)); 109 } 110 111 /* Forwarding engine block (MFWD) */ 112 static void rswitch_fwd_init(struct rswitch_private *priv) 113 { 114 u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0); 115 unsigned int i; 116 117 /* Start with empty configuration */ 118 for (i = 0; i < RSWITCH_NUM_AGENTS; i++) { 119 /* Disable all port features */ 120 iowrite32(0, priv->addr + FWPC0(i)); 121 /* Disallow L3 forwarding and direct descriptor forwarding */ 122 iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask), 123 priv->addr + FWPC1(i)); 124 /* Disallow L2 forwarding */ 125 iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask), 126 priv->addr + FWPC2(i)); 127 /* Disallow port based forwarding */ 128 iowrite32(0, priv->addr + FWPBFC(i)); 129 } 130 131 /* For enabled ETHA ports, setup port based forwarding */ 132 rswitch_for_each_enabled_port(priv, i) { 133 /* Port based forwarding from port i to GWCA port */ 134 rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV, 135 FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index))); 136 /* Within GWCA port, forward to Rx queue for port i */ 137 iowrite32(priv->rdev[i]->rx_queue->index, 138 priv->addr + FWPBFCSDC(GWCA_INDEX, i)); 139 } 140 141 /* For GWCA port, allow direct descriptor forwarding */ 142 rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE); 143 } 144 145 /* Gateway CPU agent block (GWCA) */ 146 static int rswitch_gwca_change_mode(struct rswitch_private *priv, 147 enum rswitch_gwca_mode mode) 148 { 149 int ret; 150 151 if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index)) 152 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1); 153 154 iowrite32(mode, priv->addr + GWMC); 155 156 ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode); 157 158 if (mode == GWMC_OPC_DISABLE) 159 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0); 160 161 return ret; 162 } 163 164 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv) 165 { 166 iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM); 167 168 return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR); 169 } 170 171 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv) 172 { 173 iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM); 174 175 return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR); 176 } 177 178 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx) 179 { 180 u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits; 181 unsigned int i; 182 183 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 184 if (dis[i] & mask[i]) 185 return true; 186 } 187 188 return false; 189 } 190 191 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis) 192 { 193 unsigned int i; 194 195 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) { 196 dis[i] = ioread32(priv->addr + GWDIS(i)); 197 dis[i] &= ioread32(priv->addr + GWDIE(i)); 198 } 199 } 200 201 static void rswitch_enadis_data_irq(struct rswitch_private *priv, 202 unsigned int index, bool enable) 203 { 204 u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32); 205 206 iowrite32(BIT(index % 32), priv->addr + offs); 207 } 208 209 static void rswitch_ack_data_irq(struct rswitch_private *priv, 210 unsigned int index) 211 { 212 u32 offs = GWDIS(index / 32); 213 214 iowrite32(BIT(index % 32), priv->addr + offs); 215 } 216 217 static unsigned int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, 218 bool cur, unsigned int num) 219 { 220 unsigned int index = cur ? gq->cur : gq->dirty; 221 222 if (index + num >= gq->ring_size) 223 index = (index + num) % gq->ring_size; 224 else 225 index += num; 226 227 return index; 228 } 229 230 static unsigned int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq) 231 { 232 if (gq->cur >= gq->dirty) 233 return gq->cur - gq->dirty; 234 else 235 return gq->ring_size - gq->dirty + gq->cur; 236 } 237 238 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq) 239 { 240 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty]; 241 242 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) 243 return true; 244 245 return false; 246 } 247 248 static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq, 249 unsigned int start_index, 250 unsigned int num) 251 { 252 unsigned int i, index; 253 254 for (i = 0; i < num; i++) { 255 index = (i + start_index) % gq->ring_size; 256 if (gq->rx_bufs[index]) 257 continue; 258 gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE); 259 if (!gq->rx_bufs[index]) 260 goto err; 261 } 262 263 return 0; 264 265 err: 266 for (; i-- > 0; ) { 267 index = (i + start_index) % gq->ring_size; 268 skb_free_frag(gq->rx_bufs[index]); 269 gq->rx_bufs[index] = NULL; 270 } 271 272 return -ENOMEM; 273 } 274 275 static void rswitch_gwca_queue_free(struct net_device *ndev, 276 struct rswitch_gwca_queue *gq) 277 { 278 unsigned int i; 279 280 if (!gq->dir_tx) { 281 dma_free_coherent(ndev->dev.parent, 282 sizeof(struct rswitch_ext_ts_desc) * 283 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma); 284 gq->rx_ring = NULL; 285 286 for (i = 0; i < gq->ring_size; i++) 287 skb_free_frag(gq->rx_bufs[i]); 288 kfree(gq->rx_bufs); 289 gq->rx_bufs = NULL; 290 } else { 291 dma_free_coherent(ndev->dev.parent, 292 sizeof(struct rswitch_ext_desc) * 293 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma); 294 gq->tx_ring = NULL; 295 kfree(gq->skbs); 296 gq->skbs = NULL; 297 kfree(gq->unmap_addrs); 298 gq->unmap_addrs = NULL; 299 } 300 } 301 302 static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv) 303 { 304 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 305 306 dma_free_coherent(&priv->pdev->dev, 307 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1), 308 gq->ts_ring, gq->ring_dma); 309 gq->ts_ring = NULL; 310 } 311 312 static int rswitch_gwca_queue_alloc(struct net_device *ndev, 313 struct rswitch_private *priv, 314 struct rswitch_gwca_queue *gq, 315 bool dir_tx, unsigned int ring_size) 316 { 317 unsigned int i, bit; 318 319 gq->dir_tx = dir_tx; 320 gq->ring_size = ring_size; 321 gq->ndev = ndev; 322 323 if (!dir_tx) { 324 gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL); 325 if (!gq->rx_bufs) 326 return -ENOMEM; 327 if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0) 328 goto out; 329 330 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent, 331 sizeof(struct rswitch_ext_ts_desc) * 332 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 333 } else { 334 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL); 335 if (!gq->skbs) 336 return -ENOMEM; 337 gq->unmap_addrs = kcalloc(gq->ring_size, sizeof(*gq->unmap_addrs), GFP_KERNEL); 338 if (!gq->unmap_addrs) 339 goto out; 340 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent, 341 sizeof(struct rswitch_ext_desc) * 342 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 343 } 344 345 if (!gq->rx_ring && !gq->tx_ring) 346 goto out; 347 348 i = gq->index / 32; 349 bit = BIT(gq->index % 32); 350 if (dir_tx) 351 priv->gwca.tx_irq_bits[i] |= bit; 352 else 353 priv->gwca.rx_irq_bits[i] |= bit; 354 355 return 0; 356 357 out: 358 rswitch_gwca_queue_free(ndev, gq); 359 360 return -ENOMEM; 361 } 362 363 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr) 364 { 365 desc->dptrl = cpu_to_le32(lower_32_bits(addr)); 366 desc->dptrh = upper_32_bits(addr) & 0xff; 367 } 368 369 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc) 370 { 371 return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32; 372 } 373 374 static int rswitch_gwca_queue_format(struct net_device *ndev, 375 struct rswitch_private *priv, 376 struct rswitch_gwca_queue *gq) 377 { 378 unsigned int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size; 379 struct rswitch_ext_desc *desc; 380 struct rswitch_desc *linkfix; 381 dma_addr_t dma_addr; 382 unsigned int i; 383 384 memset(gq->tx_ring, 0, ring_size); 385 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) { 386 if (!gq->dir_tx) { 387 dma_addr = dma_map_single(ndev->dev.parent, 388 gq->rx_bufs[i] + RSWITCH_HEADROOM, 389 RSWITCH_MAP_BUF_SIZE, 390 DMA_FROM_DEVICE); 391 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 392 goto err; 393 394 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE); 395 rswitch_desc_set_dptr(&desc->desc, dma_addr); 396 desc->desc.die_dt = DT_FEMPTY | DIE; 397 } else { 398 desc->desc.die_dt = DT_EEMPTY | DIE; 399 } 400 } 401 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 402 desc->desc.die_dt = DT_LINKFIX; 403 404 linkfix = &priv->gwca.linkfix_table[gq->index]; 405 linkfix->die_dt = DT_LINKFIX; 406 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 407 408 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE, 409 priv->addr + GWDCC_OFFS(gq->index)); 410 411 return 0; 412 413 err: 414 if (!gq->dir_tx) { 415 for (desc = gq->tx_ring; i-- > 0; desc++) { 416 dma_addr = rswitch_desc_get_dptr(&desc->desc); 417 dma_unmap_single(ndev->dev.parent, dma_addr, 418 RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE); 419 } 420 } 421 422 return -ENOMEM; 423 } 424 425 static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv, 426 unsigned int start_index, 427 unsigned int num) 428 { 429 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 430 struct rswitch_ts_desc *desc; 431 unsigned int i, index; 432 433 for (i = 0; i < num; i++) { 434 index = (i + start_index) % gq->ring_size; 435 desc = &gq->ts_ring[index]; 436 desc->desc.die_dt = DT_FEMPTY_ND | DIE; 437 } 438 } 439 440 static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev, 441 struct rswitch_gwca_queue *gq, 442 unsigned int start_index, 443 unsigned int num) 444 { 445 struct rswitch_device *rdev = netdev_priv(ndev); 446 struct rswitch_ext_ts_desc *desc; 447 unsigned int i, index; 448 dma_addr_t dma_addr; 449 450 for (i = 0; i < num; i++) { 451 index = (i + start_index) % gq->ring_size; 452 desc = &gq->rx_ring[index]; 453 if (!gq->dir_tx) { 454 dma_addr = dma_map_single(ndev->dev.parent, 455 gq->rx_bufs[index] + RSWITCH_HEADROOM, 456 RSWITCH_MAP_BUF_SIZE, 457 DMA_FROM_DEVICE); 458 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 459 goto err; 460 461 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE); 462 rswitch_desc_set_dptr(&desc->desc, dma_addr); 463 dma_wmb(); 464 desc->desc.die_dt = DT_FEMPTY | DIE; 465 desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index)); 466 } else { 467 desc->desc.die_dt = DT_EEMPTY | DIE; 468 } 469 } 470 471 return 0; 472 473 err: 474 if (!gq->dir_tx) { 475 for (; i-- > 0; ) { 476 index = (i + start_index) % gq->ring_size; 477 desc = &gq->rx_ring[index]; 478 dma_addr = rswitch_desc_get_dptr(&desc->desc); 479 dma_unmap_single(ndev->dev.parent, dma_addr, 480 RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE); 481 } 482 } 483 484 return -ENOMEM; 485 } 486 487 static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev, 488 struct rswitch_private *priv, 489 struct rswitch_gwca_queue *gq) 490 { 491 unsigned int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size; 492 struct rswitch_ext_ts_desc *desc; 493 struct rswitch_desc *linkfix; 494 int err; 495 496 memset(gq->rx_ring, 0, ring_size); 497 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size); 498 if (err < 0) 499 return err; 500 501 desc = &gq->rx_ring[gq->ring_size]; /* Last */ 502 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 503 desc->desc.die_dt = DT_LINKFIX; 504 505 linkfix = &priv->gwca.linkfix_table[gq->index]; 506 linkfix->die_dt = DT_LINKFIX; 507 rswitch_desc_set_dptr(linkfix, gq->ring_dma); 508 509 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | 510 GWDCC_ETS | GWDCC_EDE, 511 priv->addr + GWDCC_OFFS(gq->index)); 512 513 return 0; 514 } 515 516 static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv) 517 { 518 unsigned int i, num_queues = priv->gwca.num_queues; 519 struct rswitch_gwca *gwca = &priv->gwca; 520 struct device *dev = &priv->pdev->dev; 521 522 gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues; 523 gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size, 524 &gwca->linkfix_table_dma, GFP_KERNEL); 525 if (!gwca->linkfix_table) 526 return -ENOMEM; 527 for (i = 0; i < num_queues; i++) 528 gwca->linkfix_table[i].die_dt = DT_EOS; 529 530 return 0; 531 } 532 533 static void rswitch_gwca_linkfix_free(struct rswitch_private *priv) 534 { 535 struct rswitch_gwca *gwca = &priv->gwca; 536 537 if (gwca->linkfix_table) 538 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size, 539 gwca->linkfix_table, gwca->linkfix_table_dma); 540 gwca->linkfix_table = NULL; 541 } 542 543 static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) 544 { 545 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 546 struct rswitch_ts_desc *desc; 547 548 gq->ring_size = TS_RING_SIZE; 549 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, 550 sizeof(struct rswitch_ts_desc) * 551 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); 552 553 if (!gq->ts_ring) 554 return -ENOMEM; 555 556 rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE); 557 desc = &gq->ts_ring[gq->ring_size]; 558 desc->desc.die_dt = DT_LINKFIX; 559 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); 560 INIT_LIST_HEAD(&priv->gwca.ts_info_list); 561 562 return 0; 563 } 564 565 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv) 566 { 567 struct rswitch_gwca_queue *gq; 568 unsigned int index; 569 570 index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues); 571 if (index >= priv->gwca.num_queues) 572 return NULL; 573 set_bit(index, priv->gwca.used); 574 gq = &priv->gwca.queues[index]; 575 memset(gq, 0, sizeof(*gq)); 576 gq->index = index; 577 578 return gq; 579 } 580 581 static void rswitch_gwca_put(struct rswitch_private *priv, 582 struct rswitch_gwca_queue *gq) 583 { 584 clear_bit(gq->index, priv->gwca.used); 585 } 586 587 static int rswitch_txdmac_alloc(struct net_device *ndev) 588 { 589 struct rswitch_device *rdev = netdev_priv(ndev); 590 struct rswitch_private *priv = rdev->priv; 591 int err; 592 593 rdev->tx_queue = rswitch_gwca_get(priv); 594 if (!rdev->tx_queue) 595 return -EBUSY; 596 597 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE); 598 if (err < 0) { 599 rswitch_gwca_put(priv, rdev->tx_queue); 600 return err; 601 } 602 603 return 0; 604 } 605 606 static void rswitch_txdmac_free(struct net_device *ndev) 607 { 608 struct rswitch_device *rdev = netdev_priv(ndev); 609 610 rswitch_gwca_queue_free(ndev, rdev->tx_queue); 611 rswitch_gwca_put(rdev->priv, rdev->tx_queue); 612 } 613 614 static int rswitch_txdmac_init(struct rswitch_private *priv, unsigned int index) 615 { 616 struct rswitch_device *rdev = priv->rdev[index]; 617 618 return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue); 619 } 620 621 static int rswitch_rxdmac_alloc(struct net_device *ndev) 622 { 623 struct rswitch_device *rdev = netdev_priv(ndev); 624 struct rswitch_private *priv = rdev->priv; 625 int err; 626 627 rdev->rx_queue = rswitch_gwca_get(priv); 628 if (!rdev->rx_queue) 629 return -EBUSY; 630 631 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE); 632 if (err < 0) { 633 rswitch_gwca_put(priv, rdev->rx_queue); 634 return err; 635 } 636 637 return 0; 638 } 639 640 static void rswitch_rxdmac_free(struct net_device *ndev) 641 { 642 struct rswitch_device *rdev = netdev_priv(ndev); 643 644 rswitch_gwca_queue_free(ndev, rdev->rx_queue); 645 rswitch_gwca_put(rdev->priv, rdev->rx_queue); 646 } 647 648 static int rswitch_rxdmac_init(struct rswitch_private *priv, unsigned int index) 649 { 650 struct rswitch_device *rdev = priv->rdev[index]; 651 struct net_device *ndev = rdev->ndev; 652 653 return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue); 654 } 655 656 static int rswitch_gwca_hw_init(struct rswitch_private *priv) 657 { 658 unsigned int i; 659 int err; 660 661 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 662 if (err < 0) 663 return err; 664 err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG); 665 if (err < 0) 666 return err; 667 668 err = rswitch_gwca_mcast_table_reset(priv); 669 if (err < 0) 670 return err; 671 err = rswitch_gwca_axi_ram_reset(priv); 672 if (err < 0) 673 return err; 674 675 iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC); 676 iowrite32(0, priv->addr + GWTTFC); 677 iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1); 678 iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0); 679 iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10); 680 iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00); 681 iowrite32(GWMDNC_TSDMN(1) | GWMDNC_TXDMN(0x1e) | GWMDNC_RXDMN(0x1f), 682 priv->addr + GWMDNC); 683 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0); 684 685 iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0); 686 687 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 688 err = rswitch_rxdmac_init(priv, i); 689 if (err < 0) 690 return err; 691 err = rswitch_txdmac_init(priv, i); 692 if (err < 0) 693 return err; 694 } 695 696 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 697 if (err < 0) 698 return err; 699 return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION); 700 } 701 702 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv) 703 { 704 int err; 705 706 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 707 if (err < 0) 708 return err; 709 err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET); 710 if (err < 0) 711 return err; 712 713 return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE); 714 } 715 716 static int rswitch_gwca_halt(struct rswitch_private *priv) 717 { 718 int err; 719 720 priv->gwca_halt = true; 721 err = rswitch_gwca_hw_deinit(priv); 722 dev_err(&priv->pdev->dev, "halted (%d)\n", err); 723 724 return err; 725 } 726 727 static struct sk_buff *rswitch_rx_handle_desc(struct net_device *ndev, 728 struct rswitch_gwca_queue *gq, 729 struct rswitch_ext_ts_desc *desc) 730 { 731 dma_addr_t dma_addr = rswitch_desc_get_dptr(&desc->desc); 732 u16 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS; 733 u8 die_dt = desc->desc.die_dt & DT_MASK; 734 struct sk_buff *skb = NULL; 735 736 dma_unmap_single(ndev->dev.parent, dma_addr, RSWITCH_MAP_BUF_SIZE, 737 DMA_FROM_DEVICE); 738 739 /* The RX descriptor order will be one of the following: 740 * - FSINGLE 741 * - FSTART -> FEND 742 * - FSTART -> FMID -> FEND 743 */ 744 745 /* Check whether the descriptor is unexpected order */ 746 switch (die_dt) { 747 case DT_FSTART: 748 case DT_FSINGLE: 749 if (gq->skb_fstart) { 750 dev_kfree_skb_any(gq->skb_fstart); 751 gq->skb_fstart = NULL; 752 ndev->stats.rx_dropped++; 753 } 754 break; 755 case DT_FMID: 756 case DT_FEND: 757 if (!gq->skb_fstart) { 758 ndev->stats.rx_dropped++; 759 return NULL; 760 } 761 break; 762 default: 763 break; 764 } 765 766 /* Handle the descriptor */ 767 switch (die_dt) { 768 case DT_FSTART: 769 case DT_FSINGLE: 770 skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE); 771 if (skb) { 772 skb_reserve(skb, RSWITCH_HEADROOM); 773 skb_put(skb, pkt_len); 774 gq->pkt_len = pkt_len; 775 if (die_dt == DT_FSTART) { 776 gq->skb_fstart = skb; 777 skb = NULL; 778 } 779 } 780 break; 781 case DT_FMID: 782 case DT_FEND: 783 skb_add_rx_frag(gq->skb_fstart, skb_shinfo(gq->skb_fstart)->nr_frags, 784 virt_to_page(gq->rx_bufs[gq->cur]), 785 offset_in_page(gq->rx_bufs[gq->cur]) + RSWITCH_HEADROOM, 786 pkt_len, RSWITCH_BUF_SIZE); 787 if (die_dt == DT_FEND) { 788 skb = gq->skb_fstart; 789 gq->skb_fstart = NULL; 790 } 791 gq->pkt_len += pkt_len; 792 break; 793 default: 794 netdev_err(ndev, "%s: unexpected value (%x)\n", __func__, die_dt); 795 break; 796 } 797 798 return skb; 799 } 800 801 static bool rswitch_rx(struct net_device *ndev, int *quota) 802 { 803 struct rswitch_device *rdev = netdev_priv(ndev); 804 struct rswitch_gwca_queue *gq = rdev->rx_queue; 805 struct rswitch_ext_ts_desc *desc; 806 int limit, boguscnt, ret; 807 struct sk_buff *skb; 808 unsigned int num; 809 u32 get_ts; 810 811 if (*quota <= 0) 812 return true; 813 814 boguscnt = min_t(int, gq->ring_size, *quota); 815 limit = boguscnt; 816 817 desc = &gq->rx_ring[gq->cur]; 818 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { 819 dma_rmb(); 820 skb = rswitch_rx_handle_desc(ndev, gq, desc); 821 if (!skb) 822 goto out; 823 824 get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; 825 if (get_ts) { 826 struct skb_shared_hwtstamps *shhwtstamps; 827 struct timespec64 ts; 828 829 shhwtstamps = skb_hwtstamps(skb); 830 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 831 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 832 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 833 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); 834 } 835 skb->protocol = eth_type_trans(skb, ndev); 836 napi_gro_receive(&rdev->napi, skb); 837 rdev->ndev->stats.rx_packets++; 838 rdev->ndev->stats.rx_bytes += gq->pkt_len; 839 840 out: 841 gq->rx_bufs[gq->cur] = NULL; 842 gq->cur = rswitch_next_queue_index(gq, true, 1); 843 desc = &gq->rx_ring[gq->cur]; 844 845 if (--boguscnt <= 0) 846 break; 847 } 848 849 num = rswitch_get_num_cur_queues(gq); 850 ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num); 851 if (ret < 0) 852 goto err; 853 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num); 854 if (ret < 0) 855 goto err; 856 gq->dirty = rswitch_next_queue_index(gq, false, num); 857 858 *quota -= limit - boguscnt; 859 860 return boguscnt <= 0; 861 862 err: 863 rswitch_gwca_halt(rdev->priv); 864 865 return 0; 866 } 867 868 static void rswitch_tx_free(struct net_device *ndev) 869 { 870 struct rswitch_device *rdev = netdev_priv(ndev); 871 struct rswitch_gwca_queue *gq = rdev->tx_queue; 872 struct rswitch_ext_desc *desc; 873 struct sk_buff *skb; 874 875 desc = &gq->tx_ring[gq->dirty]; 876 while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) { 877 dma_rmb(); 878 879 skb = gq->skbs[gq->dirty]; 880 if (skb) { 881 rdev->ndev->stats.tx_packets++; 882 rdev->ndev->stats.tx_bytes += skb->len; 883 dma_unmap_single(ndev->dev.parent, 884 gq->unmap_addrs[gq->dirty], 885 skb->len, DMA_TO_DEVICE); 886 dev_kfree_skb_any(gq->skbs[gq->dirty]); 887 gq->skbs[gq->dirty] = NULL; 888 } 889 890 desc->desc.die_dt = DT_EEMPTY; 891 gq->dirty = rswitch_next_queue_index(gq, false, 1); 892 desc = &gq->tx_ring[gq->dirty]; 893 } 894 } 895 896 static int rswitch_poll(struct napi_struct *napi, int budget) 897 { 898 struct net_device *ndev = napi->dev; 899 struct rswitch_private *priv; 900 struct rswitch_device *rdev; 901 unsigned long flags; 902 int quota = budget; 903 904 rdev = netdev_priv(ndev); 905 priv = rdev->priv; 906 907 retry: 908 rswitch_tx_free(ndev); 909 910 if (rswitch_rx(ndev, "a)) 911 goto out; 912 else if (rdev->priv->gwca_halt) 913 goto err; 914 else if (rswitch_is_queue_rxed(rdev->rx_queue)) 915 goto retry; 916 917 netif_wake_subqueue(ndev, 0); 918 919 if (napi_complete_done(napi, budget - quota)) { 920 spin_lock_irqsave(&priv->lock, flags); 921 if (test_bit(rdev->port, priv->opened_ports)) { 922 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); 923 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); 924 } 925 spin_unlock_irqrestore(&priv->lock, flags); 926 } 927 928 out: 929 return budget - quota; 930 931 err: 932 napi_complete(napi); 933 934 return 0; 935 } 936 937 static void rswitch_queue_interrupt(struct net_device *ndev) 938 { 939 struct rswitch_device *rdev = netdev_priv(ndev); 940 941 if (napi_schedule_prep(&rdev->napi)) { 942 spin_lock(&rdev->priv->lock); 943 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 944 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 945 spin_unlock(&rdev->priv->lock); 946 __napi_schedule(&rdev->napi); 947 } 948 } 949 950 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis) 951 { 952 struct rswitch_gwca_queue *gq; 953 unsigned int i, index, bit; 954 955 for (i = 0; i < priv->gwca.num_queues; i++) { 956 gq = &priv->gwca.queues[i]; 957 index = gq->index / 32; 958 bit = BIT(gq->index % 32); 959 if (!(dis[index] & bit)) 960 continue; 961 962 rswitch_ack_data_irq(priv, gq->index); 963 rswitch_queue_interrupt(gq->ndev); 964 } 965 966 return IRQ_HANDLED; 967 } 968 969 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id) 970 { 971 struct rswitch_private *priv = dev_id; 972 u32 dis[RSWITCH_NUM_IRQ_REGS]; 973 irqreturn_t ret = IRQ_NONE; 974 975 rswitch_get_data_irq_status(priv, dis); 976 977 if (rswitch_is_any_data_irq(priv, dis, true) || 978 rswitch_is_any_data_irq(priv, dis, false)) 979 ret = rswitch_data_irq(priv, dis); 980 981 return ret; 982 } 983 984 static int rswitch_gwca_request_irqs(struct rswitch_private *priv) 985 { 986 char *resource_name, *irq_name; 987 int i, ret, irq; 988 989 for (i = 0; i < GWCA_NUM_IRQS; i++) { 990 resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i); 991 if (!resource_name) 992 return -ENOMEM; 993 994 irq = platform_get_irq_byname(priv->pdev, resource_name); 995 kfree(resource_name); 996 if (irq < 0) 997 return irq; 998 999 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, 1000 GWCA_IRQ_NAME, i); 1001 if (!irq_name) 1002 return -ENOMEM; 1003 1004 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq, 1005 0, irq_name, priv); 1006 if (ret < 0) 1007 return ret; 1008 } 1009 1010 return 0; 1011 } 1012 1013 static void rswitch_ts(struct rswitch_private *priv) 1014 { 1015 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; 1016 struct rswitch_gwca_ts_info *ts_info, *ts_info2; 1017 struct skb_shared_hwtstamps shhwtstamps; 1018 struct rswitch_ts_desc *desc; 1019 struct timespec64 ts; 1020 unsigned int num; 1021 u32 tag, port; 1022 1023 desc = &gq->ts_ring[gq->cur]; 1024 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) { 1025 dma_rmb(); 1026 1027 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl)); 1028 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl)); 1029 1030 list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) { 1031 if (!(ts_info->port == port && ts_info->tag == tag)) 1032 continue; 1033 1034 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 1035 ts.tv_sec = __le32_to_cpu(desc->ts_sec); 1036 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); 1037 shhwtstamps.hwtstamp = timespec64_to_ktime(ts); 1038 skb_tstamp_tx(ts_info->skb, &shhwtstamps); 1039 dev_consume_skb_irq(ts_info->skb); 1040 list_del(&ts_info->list); 1041 kfree(ts_info); 1042 break; 1043 } 1044 1045 gq->cur = rswitch_next_queue_index(gq, true, 1); 1046 desc = &gq->ts_ring[gq->cur]; 1047 } 1048 1049 num = rswitch_get_num_cur_queues(gq); 1050 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num); 1051 gq->dirty = rswitch_next_queue_index(gq, false, num); 1052 } 1053 1054 static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id) 1055 { 1056 struct rswitch_private *priv = dev_id; 1057 1058 if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) { 1059 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS); 1060 rswitch_ts(priv); 1061 1062 return IRQ_HANDLED; 1063 } 1064 1065 return IRQ_NONE; 1066 } 1067 1068 static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv) 1069 { 1070 int irq; 1071 1072 irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME); 1073 if (irq < 0) 1074 return irq; 1075 1076 return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq, 1077 0, GWCA_TS_IRQ_NAME, priv); 1078 } 1079 1080 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */ 1081 static int rswitch_etha_change_mode(struct rswitch_etha *etha, 1082 enum rswitch_etha_mode mode) 1083 { 1084 int ret; 1085 1086 if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index)) 1087 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1); 1088 1089 iowrite32(mode, etha->addr + EAMC); 1090 1091 ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode); 1092 1093 if (mode == EAMC_OPC_DISABLE) 1094 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0); 1095 1096 return ret; 1097 } 1098 1099 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha) 1100 { 1101 u32 mrmac0 = ioread32(etha->addr + MRMAC0); 1102 u32 mrmac1 = ioread32(etha->addr + MRMAC1); 1103 u8 *mac = ða->mac_addr[0]; 1104 1105 mac[0] = (mrmac0 >> 8) & 0xFF; 1106 mac[1] = (mrmac0 >> 0) & 0xFF; 1107 mac[2] = (mrmac1 >> 24) & 0xFF; 1108 mac[3] = (mrmac1 >> 16) & 0xFF; 1109 mac[4] = (mrmac1 >> 8) & 0xFF; 1110 mac[5] = (mrmac1 >> 0) & 0xFF; 1111 } 1112 1113 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac) 1114 { 1115 iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0); 1116 iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], 1117 etha->addr + MRMAC1); 1118 } 1119 1120 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha) 1121 { 1122 iowrite32(MLVC_PLV, etha->addr + MLVC); 1123 1124 return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0); 1125 } 1126 1127 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) 1128 { 1129 u32 pis, lsc; 1130 1131 rswitch_etha_write_mac_address(etha, mac); 1132 1133 switch (etha->phy_interface) { 1134 case PHY_INTERFACE_MODE_SGMII: 1135 pis = MPIC_PIS_GMII; 1136 break; 1137 case PHY_INTERFACE_MODE_USXGMII: 1138 case PHY_INTERFACE_MODE_5GBASER: 1139 pis = MPIC_PIS_XGMII; 1140 break; 1141 default: 1142 pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC)); 1143 break; 1144 } 1145 1146 switch (etha->speed) { 1147 case 100: 1148 lsc = MPIC_LSC_100M; 1149 break; 1150 case 1000: 1151 lsc = MPIC_LSC_1G; 1152 break; 1153 case 2500: 1154 lsc = MPIC_LSC_2_5G; 1155 break; 1156 default: 1157 lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC)); 1158 break; 1159 } 1160 1161 rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC, 1162 FIELD_PREP(MPIC_PIS, pis) | FIELD_PREP(MPIC_LSC, lsc)); 1163 } 1164 1165 static void rswitch_etha_enable_mii(struct rswitch_etha *etha) 1166 { 1167 rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT, 1168 FIELD_PREP(MPIC_PSMCS, etha->psmcs) | 1169 FIELD_PREP(MPIC_PSMHT, 0x06)); 1170 } 1171 1172 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac) 1173 { 1174 int err; 1175 1176 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1177 if (err < 0) 1178 return err; 1179 err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG); 1180 if (err < 0) 1181 return err; 1182 1183 iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC); 1184 rswitch_rmac_setting(etha, mac); 1185 rswitch_etha_enable_mii(etha); 1186 1187 err = rswitch_etha_wait_link_verification(etha); 1188 if (err < 0) 1189 return err; 1190 1191 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE); 1192 if (err < 0) 1193 return err; 1194 1195 return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION); 1196 } 1197 1198 static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read, 1199 unsigned int mmf, unsigned int pda, 1200 unsigned int pra, unsigned int pop, 1201 unsigned int prd) 1202 { 1203 u32 val; 1204 int ret; 1205 1206 val = MPSM_PSME | 1207 FIELD_PREP(MPSM_MFF, mmf) | 1208 FIELD_PREP(MPSM_PDA, pda) | 1209 FIELD_PREP(MPSM_PRA, pra) | 1210 FIELD_PREP(MPSM_POP, pop) | 1211 FIELD_PREP(MPSM_PRD, prd); 1212 iowrite32(val, etha->addr + MPSM); 1213 1214 ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0); 1215 if (ret) 1216 return ret; 1217 1218 if (read) { 1219 val = ioread32(etha->addr + MPSM); 1220 ret = FIELD_GET(MPSM_PRD, val); 1221 } 1222 1223 return ret; 1224 } 1225 1226 static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad, 1227 int regad) 1228 { 1229 struct rswitch_etha *etha = bus->priv; 1230 int ret; 1231 1232 ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad, 1233 MPSM_POP_ADDRESS, regad); 1234 if (ret) 1235 return ret; 1236 1237 return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad, 1238 MPSM_POP_READ_C45, 0); 1239 } 1240 1241 static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad, 1242 int regad, u16 val) 1243 { 1244 struct rswitch_etha *etha = bus->priv; 1245 int ret; 1246 1247 ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad, 1248 MPSM_POP_ADDRESS, regad); 1249 if (ret) 1250 return ret; 1251 1252 return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad, 1253 MPSM_POP_WRITE, val); 1254 } 1255 1256 static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad) 1257 { 1258 struct rswitch_etha *etha = bus->priv; 1259 1260 return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad, 1261 MPSM_POP_READ_C22, 0); 1262 } 1263 1264 static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad, 1265 int regad, u16 val) 1266 { 1267 struct rswitch_etha *etha = bus->priv; 1268 1269 return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad, 1270 MPSM_POP_WRITE, val); 1271 } 1272 1273 /* Call of_node_put(port) after done */ 1274 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev) 1275 { 1276 struct device_node *ports, *port; 1277 int err = 0; 1278 u32 index; 1279 1280 ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node, 1281 "ethernet-ports"); 1282 if (!ports) 1283 return NULL; 1284 1285 for_each_child_of_node(ports, port) { 1286 err = of_property_read_u32(port, "reg", &index); 1287 if (err < 0) { 1288 port = NULL; 1289 goto out; 1290 } 1291 if (index == rdev->etha->index) { 1292 if (!of_device_is_available(port)) 1293 port = NULL; 1294 break; 1295 } 1296 } 1297 1298 out: 1299 of_node_put(ports); 1300 1301 return port; 1302 } 1303 1304 static int rswitch_etha_get_params(struct rswitch_device *rdev) 1305 { 1306 u32 max_speed; 1307 int err; 1308 1309 if (!rdev->np_port) 1310 return 0; /* ignored */ 1311 1312 err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface); 1313 if (err) 1314 return err; 1315 1316 err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed); 1317 if (!err) { 1318 rdev->etha->speed = max_speed; 1319 return 0; 1320 } 1321 1322 /* if no "max-speed" property, let's use default speed */ 1323 switch (rdev->etha->phy_interface) { 1324 case PHY_INTERFACE_MODE_MII: 1325 rdev->etha->speed = SPEED_100; 1326 break; 1327 case PHY_INTERFACE_MODE_SGMII: 1328 rdev->etha->speed = SPEED_1000; 1329 break; 1330 case PHY_INTERFACE_MODE_USXGMII: 1331 rdev->etha->speed = SPEED_2500; 1332 break; 1333 default: 1334 return -EINVAL; 1335 } 1336 1337 return 0; 1338 } 1339 1340 static int rswitch_mii_register(struct rswitch_device *rdev) 1341 { 1342 struct device_node *mdio_np; 1343 struct mii_bus *mii_bus; 1344 int err; 1345 1346 mii_bus = mdiobus_alloc(); 1347 if (!mii_bus) 1348 return -ENOMEM; 1349 1350 mii_bus->name = "rswitch_mii"; 1351 sprintf(mii_bus->id, "etha%d", rdev->etha->index); 1352 mii_bus->priv = rdev->etha; 1353 mii_bus->read_c45 = rswitch_etha_mii_read_c45; 1354 mii_bus->write_c45 = rswitch_etha_mii_write_c45; 1355 mii_bus->read = rswitch_etha_mii_read_c22; 1356 mii_bus->write = rswitch_etha_mii_write_c22; 1357 mii_bus->parent = &rdev->priv->pdev->dev; 1358 1359 mdio_np = of_get_child_by_name(rdev->np_port, "mdio"); 1360 err = of_mdiobus_register(mii_bus, mdio_np); 1361 if (err < 0) { 1362 mdiobus_free(mii_bus); 1363 goto out; 1364 } 1365 1366 rdev->etha->mii = mii_bus; 1367 1368 out: 1369 of_node_put(mdio_np); 1370 1371 return err; 1372 } 1373 1374 static void rswitch_mii_unregister(struct rswitch_device *rdev) 1375 { 1376 if (rdev->etha->mii) { 1377 mdiobus_unregister(rdev->etha->mii); 1378 mdiobus_free(rdev->etha->mii); 1379 rdev->etha->mii = NULL; 1380 } 1381 } 1382 1383 static void rswitch_adjust_link(struct net_device *ndev) 1384 { 1385 struct rswitch_device *rdev = netdev_priv(ndev); 1386 struct phy_device *phydev = ndev->phydev; 1387 1388 if (phydev->link != rdev->etha->link) { 1389 phy_print_status(phydev); 1390 if (phydev->link) 1391 phy_power_on(rdev->serdes); 1392 else if (rdev->serdes->power_count) 1393 phy_power_off(rdev->serdes); 1394 1395 rdev->etha->link = phydev->link; 1396 1397 if (!rdev->priv->etha_no_runtime_change && 1398 phydev->speed != rdev->etha->speed) { 1399 rdev->etha->speed = phydev->speed; 1400 1401 rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); 1402 phy_set_speed(rdev->serdes, rdev->etha->speed); 1403 } 1404 } 1405 } 1406 1407 static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev, 1408 struct phy_device *phydev) 1409 { 1410 if (!rdev->priv->etha_no_runtime_change) 1411 return; 1412 1413 switch (rdev->etha->speed) { 1414 case SPEED_2500: 1415 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1416 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1417 break; 1418 case SPEED_1000: 1419 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1420 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); 1421 break; 1422 case SPEED_100: 1423 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT); 1424 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); 1425 break; 1426 default: 1427 break; 1428 } 1429 1430 phy_set_max_speed(phydev, rdev->etha->speed); 1431 } 1432 1433 static int rswitch_phy_device_init(struct rswitch_device *rdev) 1434 { 1435 struct phy_device *phydev; 1436 struct device_node *phy; 1437 int err = -ENOENT; 1438 1439 if (!rdev->np_port) 1440 return -ENODEV; 1441 1442 phy = of_parse_phandle(rdev->np_port, "phy-handle", 0); 1443 if (!phy) 1444 return -ENODEV; 1445 1446 /* Set phydev->host_interfaces before calling of_phy_connect() to 1447 * configure the PHY with the information of host_interfaces. 1448 */ 1449 phydev = of_phy_find_device(phy); 1450 if (!phydev) 1451 goto out; 1452 __set_bit(rdev->etha->phy_interface, phydev->host_interfaces); 1453 phydev->mac_managed_pm = true; 1454 1455 phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0, 1456 rdev->etha->phy_interface); 1457 if (!phydev) 1458 goto out; 1459 1460 phy_set_max_speed(phydev, SPEED_2500); 1461 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 1462 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); 1463 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 1464 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 1465 rswitch_phy_remove_link_mode(rdev, phydev); 1466 1467 phy_attached_info(phydev); 1468 1469 err = 0; 1470 out: 1471 of_node_put(phy); 1472 1473 return err; 1474 } 1475 1476 static void rswitch_phy_device_deinit(struct rswitch_device *rdev) 1477 { 1478 if (rdev->ndev->phydev) 1479 phy_disconnect(rdev->ndev->phydev); 1480 } 1481 1482 static int rswitch_serdes_set_params(struct rswitch_device *rdev) 1483 { 1484 int err; 1485 1486 err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET, 1487 rdev->etha->phy_interface); 1488 if (err < 0) 1489 return err; 1490 1491 return phy_set_speed(rdev->serdes, rdev->etha->speed); 1492 } 1493 1494 static int rswitch_ether_port_init_one(struct rswitch_device *rdev) 1495 { 1496 int err; 1497 1498 if (!rdev->etha->operated) { 1499 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); 1500 if (err < 0) 1501 return err; 1502 if (rdev->priv->etha_no_runtime_change) 1503 rdev->etha->operated = true; 1504 } 1505 1506 err = rswitch_mii_register(rdev); 1507 if (err < 0) 1508 return err; 1509 1510 err = rswitch_phy_device_init(rdev); 1511 if (err < 0) 1512 goto err_phy_device_init; 1513 1514 rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL); 1515 if (IS_ERR(rdev->serdes)) { 1516 err = PTR_ERR(rdev->serdes); 1517 goto err_serdes_phy_get; 1518 } 1519 1520 err = rswitch_serdes_set_params(rdev); 1521 if (err < 0) 1522 goto err_serdes_set_params; 1523 1524 return 0; 1525 1526 err_serdes_set_params: 1527 err_serdes_phy_get: 1528 rswitch_phy_device_deinit(rdev); 1529 1530 err_phy_device_init: 1531 rswitch_mii_unregister(rdev); 1532 1533 return err; 1534 } 1535 1536 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev) 1537 { 1538 rswitch_phy_device_deinit(rdev); 1539 rswitch_mii_unregister(rdev); 1540 } 1541 1542 static int rswitch_ether_port_init_all(struct rswitch_private *priv) 1543 { 1544 unsigned int i; 1545 int err; 1546 1547 rswitch_for_each_enabled_port(priv, i) { 1548 err = rswitch_ether_port_init_one(priv->rdev[i]); 1549 if (err) 1550 goto err_init_one; 1551 } 1552 1553 rswitch_for_each_enabled_port(priv, i) { 1554 err = phy_init(priv->rdev[i]->serdes); 1555 if (err) 1556 goto err_serdes; 1557 } 1558 1559 return 0; 1560 1561 err_serdes: 1562 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1563 phy_exit(priv->rdev[i]->serdes); 1564 i = RSWITCH_NUM_PORTS; 1565 1566 err_init_one: 1567 rswitch_for_each_enabled_port_continue_reverse(priv, i) 1568 rswitch_ether_port_deinit_one(priv->rdev[i]); 1569 1570 return err; 1571 } 1572 1573 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) 1574 { 1575 unsigned int i; 1576 1577 rswitch_for_each_enabled_port(priv, i) { 1578 phy_exit(priv->rdev[i]->serdes); 1579 rswitch_ether_port_deinit_one(priv->rdev[i]); 1580 } 1581 } 1582 1583 static int rswitch_open(struct net_device *ndev) 1584 { 1585 struct rswitch_device *rdev = netdev_priv(ndev); 1586 unsigned long flags; 1587 1588 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) 1589 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); 1590 1591 napi_enable(&rdev->napi); 1592 1593 spin_lock_irqsave(&rdev->priv->lock, flags); 1594 bitmap_set(rdev->priv->opened_ports, rdev->port, 1); 1595 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); 1596 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); 1597 spin_unlock_irqrestore(&rdev->priv->lock, flags); 1598 1599 phy_start(ndev->phydev); 1600 1601 netif_start_queue(ndev); 1602 1603 return 0; 1604 }; 1605 1606 static int rswitch_stop(struct net_device *ndev) 1607 { 1608 struct rswitch_device *rdev = netdev_priv(ndev); 1609 struct rswitch_gwca_ts_info *ts_info, *ts_info2; 1610 unsigned long flags; 1611 1612 netif_tx_stop_all_queues(ndev); 1613 1614 phy_stop(ndev->phydev); 1615 1616 spin_lock_irqsave(&rdev->priv->lock, flags); 1617 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); 1618 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); 1619 bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); 1620 spin_unlock_irqrestore(&rdev->priv->lock, flags); 1621 1622 napi_disable(&rdev->napi); 1623 1624 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) 1625 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); 1626 1627 list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) { 1628 if (ts_info->port != rdev->port) 1629 continue; 1630 dev_kfree_skb_irq(ts_info->skb); 1631 list_del(&ts_info->list); 1632 kfree(ts_info); 1633 } 1634 1635 return 0; 1636 }; 1637 1638 static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev, 1639 struct sk_buff *skb, 1640 struct rswitch_ext_desc *desc) 1641 { 1642 desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | 1643 INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT); 1644 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 1645 struct rswitch_gwca_ts_info *ts_info; 1646 1647 ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC); 1648 if (!ts_info) 1649 return false; 1650 1651 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1652 rdev->ts_tag++; 1653 desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC); 1654 1655 ts_info->skb = skb_get(skb); 1656 ts_info->port = rdev->port; 1657 ts_info->tag = rdev->ts_tag; 1658 list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list); 1659 1660 skb_tx_timestamp(skb); 1661 } 1662 1663 return true; 1664 } 1665 1666 static bool rswitch_ext_desc_set(struct rswitch_device *rdev, 1667 struct sk_buff *skb, 1668 struct rswitch_ext_desc *desc, 1669 dma_addr_t dma_addr, u16 len, u8 die_dt) 1670 { 1671 rswitch_desc_set_dptr(&desc->desc, dma_addr); 1672 desc->desc.info_ds = cpu_to_le16(len); 1673 if (!rswitch_ext_desc_set_info1(rdev, skb, desc)) 1674 return false; 1675 1676 dma_wmb(); 1677 1678 desc->desc.die_dt = die_dt; 1679 1680 return true; 1681 } 1682 1683 static u8 rswitch_ext_desc_get_die_dt(unsigned int nr_desc, unsigned int index) 1684 { 1685 if (nr_desc == 1) 1686 return DT_FSINGLE | DIE; 1687 if (index == 0) 1688 return DT_FSTART; 1689 if (nr_desc - 1 == index) 1690 return DT_FEND | DIE; 1691 return DT_FMID; 1692 } 1693 1694 static u16 rswitch_ext_desc_get_len(u8 die_dt, unsigned int orig_len) 1695 { 1696 switch (die_dt & DT_MASK) { 1697 case DT_FSINGLE: 1698 case DT_FEND: 1699 return (orig_len % RSWITCH_DESC_BUF_SIZE) ?: RSWITCH_DESC_BUF_SIZE; 1700 case DT_FSTART: 1701 case DT_FMID: 1702 return RSWITCH_DESC_BUF_SIZE; 1703 default: 1704 return 0; 1705 } 1706 } 1707 1708 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1709 { 1710 struct rswitch_device *rdev = netdev_priv(ndev); 1711 struct rswitch_gwca_queue *gq = rdev->tx_queue; 1712 dma_addr_t dma_addr, dma_addr_orig; 1713 netdev_tx_t ret = NETDEV_TX_OK; 1714 struct rswitch_ext_desc *desc; 1715 unsigned int i, nr_desc; 1716 u8 die_dt; 1717 u16 len; 1718 1719 nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1; 1720 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) { 1721 netif_stop_subqueue(ndev, 0); 1722 return NETDEV_TX_BUSY; 1723 } 1724 1725 if (skb_put_padto(skb, ETH_ZLEN)) 1726 return ret; 1727 1728 dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); 1729 if (dma_mapping_error(ndev->dev.parent, dma_addr_orig)) 1730 goto err_kfree; 1731 1732 /* Stored the skb at the last descriptor to avoid skb free before hardware completes send */ 1733 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb; 1734 gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig; 1735 1736 dma_wmb(); 1737 1738 /* DT_FSTART should be set at last. So, this is reverse order. */ 1739 for (i = nr_desc; i-- > 0; ) { 1740 desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)]; 1741 die_dt = rswitch_ext_desc_get_die_dt(nr_desc, i); 1742 dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE; 1743 len = rswitch_ext_desc_get_len(die_dt, skb->len); 1744 if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt)) 1745 goto err_unmap; 1746 } 1747 1748 gq->cur = rswitch_next_queue_index(gq, true, nr_desc); 1749 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); 1750 1751 return ret; 1752 1753 err_unmap: 1754 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL; 1755 dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE); 1756 1757 err_kfree: 1758 dev_kfree_skb_any(skb); 1759 1760 return ret; 1761 } 1762 1763 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev) 1764 { 1765 return &ndev->stats; 1766 } 1767 1768 static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req) 1769 { 1770 struct rswitch_device *rdev = netdev_priv(ndev); 1771 struct rcar_gen4_ptp_private *ptp_priv; 1772 struct hwtstamp_config config; 1773 1774 ptp_priv = rdev->priv->ptp_priv; 1775 1776 config.flags = 0; 1777 config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : 1778 HWTSTAMP_TX_OFF; 1779 switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { 1780 case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT: 1781 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 1782 break; 1783 case RCAR_GEN4_RXTSTAMP_TYPE_ALL: 1784 config.rx_filter = HWTSTAMP_FILTER_ALL; 1785 break; 1786 default: 1787 config.rx_filter = HWTSTAMP_FILTER_NONE; 1788 break; 1789 } 1790 1791 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; 1792 } 1793 1794 static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req) 1795 { 1796 struct rswitch_device *rdev = netdev_priv(ndev); 1797 u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED; 1798 struct hwtstamp_config config; 1799 u32 tstamp_tx_ctrl; 1800 1801 if (copy_from_user(&config, req->ifr_data, sizeof(config))) 1802 return -EFAULT; 1803 1804 if (config.flags) 1805 return -EINVAL; 1806 1807 switch (config.tx_type) { 1808 case HWTSTAMP_TX_OFF: 1809 tstamp_tx_ctrl = 0; 1810 break; 1811 case HWTSTAMP_TX_ON: 1812 tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED; 1813 break; 1814 default: 1815 return -ERANGE; 1816 } 1817 1818 switch (config.rx_filter) { 1819 case HWTSTAMP_FILTER_NONE: 1820 tstamp_rx_ctrl = 0; 1821 break; 1822 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1823 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; 1824 break; 1825 default: 1826 config.rx_filter = HWTSTAMP_FILTER_ALL; 1827 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL; 1828 break; 1829 } 1830 1831 rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; 1832 rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; 1833 1834 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; 1835 } 1836 1837 static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) 1838 { 1839 if (!netif_running(ndev)) 1840 return -EINVAL; 1841 1842 switch (cmd) { 1843 case SIOCGHWTSTAMP: 1844 return rswitch_hwstamp_get(ndev, req); 1845 case SIOCSHWTSTAMP: 1846 return rswitch_hwstamp_set(ndev, req); 1847 default: 1848 return phy_mii_ioctl(ndev->phydev, req, cmd); 1849 } 1850 } 1851 1852 static const struct net_device_ops rswitch_netdev_ops = { 1853 .ndo_open = rswitch_open, 1854 .ndo_stop = rswitch_stop, 1855 .ndo_start_xmit = rswitch_start_xmit, 1856 .ndo_get_stats = rswitch_get_stats, 1857 .ndo_eth_ioctl = rswitch_eth_ioctl, 1858 .ndo_validate_addr = eth_validate_addr, 1859 .ndo_set_mac_address = eth_mac_addr, 1860 }; 1861 1862 static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) 1863 { 1864 struct rswitch_device *rdev = netdev_priv(ndev); 1865 1866 info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock); 1867 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 1868 SOF_TIMESTAMPING_TX_HARDWARE | 1869 SOF_TIMESTAMPING_RX_HARDWARE | 1870 SOF_TIMESTAMPING_RAW_HARDWARE; 1871 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 1872 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); 1873 1874 return 0; 1875 } 1876 1877 static const struct ethtool_ops rswitch_ethtool_ops = { 1878 .get_ts_info = rswitch_get_ts_info, 1879 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1880 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1881 }; 1882 1883 static const struct of_device_id renesas_eth_sw_of_table[] = { 1884 { .compatible = "renesas,r8a779f0-ether-switch", }, 1885 { } 1886 }; 1887 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table); 1888 1889 static void rswitch_etha_init(struct rswitch_private *priv, unsigned int index) 1890 { 1891 struct rswitch_etha *etha = &priv->etha[index]; 1892 1893 memset(etha, 0, sizeof(*etha)); 1894 etha->index = index; 1895 etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; 1896 etha->coma_addr = priv->addr; 1897 1898 /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1. 1899 * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply 1900 * both the numerator and the denominator by 10. 1901 */ 1902 etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1; 1903 } 1904 1905 static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index) 1906 { 1907 struct platform_device *pdev = priv->pdev; 1908 struct rswitch_device *rdev; 1909 struct net_device *ndev; 1910 int err; 1911 1912 if (index >= RSWITCH_NUM_PORTS) 1913 return -EINVAL; 1914 1915 ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1); 1916 if (!ndev) 1917 return -ENOMEM; 1918 1919 SET_NETDEV_DEV(ndev, &pdev->dev); 1920 ether_setup(ndev); 1921 1922 rdev = netdev_priv(ndev); 1923 rdev->ndev = ndev; 1924 rdev->priv = priv; 1925 priv->rdev[index] = rdev; 1926 rdev->port = index; 1927 rdev->etha = &priv->etha[index]; 1928 rdev->addr = priv->addr; 1929 1930 ndev->base_addr = (unsigned long)rdev->addr; 1931 snprintf(ndev->name, IFNAMSIZ, "tsn%d", index); 1932 ndev->netdev_ops = &rswitch_netdev_ops; 1933 ndev->ethtool_ops = &rswitch_ethtool_ops; 1934 ndev->max_mtu = RSWITCH_MAX_MTU; 1935 ndev->min_mtu = ETH_MIN_MTU; 1936 1937 netif_napi_add(ndev, &rdev->napi, rswitch_poll); 1938 1939 rdev->np_port = rswitch_get_port_node(rdev); 1940 rdev->disabled = !rdev->np_port; 1941 err = of_get_ethdev_address(rdev->np_port, ndev); 1942 if (err) { 1943 if (is_valid_ether_addr(rdev->etha->mac_addr)) 1944 eth_hw_addr_set(ndev, rdev->etha->mac_addr); 1945 else 1946 eth_hw_addr_random(ndev); 1947 } 1948 1949 err = rswitch_etha_get_params(rdev); 1950 if (err < 0) 1951 goto out_get_params; 1952 1953 err = rswitch_rxdmac_alloc(ndev); 1954 if (err < 0) 1955 goto out_rxdmac; 1956 1957 err = rswitch_txdmac_alloc(ndev); 1958 if (err < 0) 1959 goto out_txdmac; 1960 1961 return 0; 1962 1963 out_txdmac: 1964 rswitch_rxdmac_free(ndev); 1965 1966 out_rxdmac: 1967 out_get_params: 1968 of_node_put(rdev->np_port); 1969 netif_napi_del(&rdev->napi); 1970 free_netdev(ndev); 1971 1972 return err; 1973 } 1974 1975 static void rswitch_device_free(struct rswitch_private *priv, unsigned int index) 1976 { 1977 struct rswitch_device *rdev = priv->rdev[index]; 1978 struct net_device *ndev = rdev->ndev; 1979 1980 rswitch_txdmac_free(ndev); 1981 rswitch_rxdmac_free(ndev); 1982 of_node_put(rdev->np_port); 1983 netif_napi_del(&rdev->napi); 1984 free_netdev(ndev); 1985 } 1986 1987 static int rswitch_init(struct rswitch_private *priv) 1988 { 1989 unsigned int i; 1990 int err; 1991 1992 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 1993 rswitch_etha_init(priv, i); 1994 1995 rswitch_clock_enable(priv); 1996 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 1997 rswitch_etha_read_mac_address(&priv->etha[i]); 1998 1999 rswitch_reset(priv); 2000 2001 rswitch_clock_enable(priv); 2002 rswitch_top_init(priv); 2003 err = rswitch_bpool_config(priv); 2004 if (err < 0) 2005 return err; 2006 2007 rswitch_coma_init(priv); 2008 2009 err = rswitch_gwca_linkfix_alloc(priv); 2010 if (err < 0) 2011 return -ENOMEM; 2012 2013 err = rswitch_gwca_ts_queue_alloc(priv); 2014 if (err < 0) 2015 goto err_ts_queue_alloc; 2016 2017 for (i = 0; i < RSWITCH_NUM_PORTS; i++) { 2018 err = rswitch_device_alloc(priv, i); 2019 if (err < 0) { 2020 for (; i-- > 0; ) 2021 rswitch_device_free(priv, i); 2022 goto err_device_alloc; 2023 } 2024 } 2025 2026 rswitch_fwd_init(priv); 2027 2028 err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT, 2029 clk_get_rate(priv->clk)); 2030 if (err < 0) 2031 goto err_ptp_register; 2032 2033 err = rswitch_gwca_request_irqs(priv); 2034 if (err < 0) 2035 goto err_gwca_request_irq; 2036 2037 err = rswitch_gwca_ts_request_irqs(priv); 2038 if (err < 0) 2039 goto err_gwca_ts_request_irq; 2040 2041 err = rswitch_gwca_hw_init(priv); 2042 if (err < 0) 2043 goto err_gwca_hw_init; 2044 2045 err = rswitch_ether_port_init_all(priv); 2046 if (err) 2047 goto err_ether_port_init_all; 2048 2049 rswitch_for_each_enabled_port(priv, i) { 2050 err = register_netdev(priv->rdev[i]->ndev); 2051 if (err) { 2052 rswitch_for_each_enabled_port_continue_reverse(priv, i) 2053 unregister_netdev(priv->rdev[i]->ndev); 2054 goto err_register_netdev; 2055 } 2056 } 2057 2058 rswitch_for_each_enabled_port(priv, i) 2059 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n", 2060 priv->rdev[i]->ndev->dev_addr); 2061 2062 return 0; 2063 2064 err_register_netdev: 2065 rswitch_ether_port_deinit_all(priv); 2066 2067 err_ether_port_init_all: 2068 rswitch_gwca_hw_deinit(priv); 2069 2070 err_gwca_hw_init: 2071 err_gwca_ts_request_irq: 2072 err_gwca_request_irq: 2073 rcar_gen4_ptp_unregister(priv->ptp_priv); 2074 2075 err_ptp_register: 2076 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 2077 rswitch_device_free(priv, i); 2078 2079 err_device_alloc: 2080 rswitch_gwca_ts_queue_free(priv); 2081 2082 err_ts_queue_alloc: 2083 rswitch_gwca_linkfix_free(priv); 2084 2085 return err; 2086 } 2087 2088 static const struct soc_device_attribute rswitch_soc_no_speed_change[] = { 2089 { .soc_id = "r8a779f0", .revision = "ES1.0" }, 2090 { /* Sentinel */ } 2091 }; 2092 2093 static int renesas_eth_sw_probe(struct platform_device *pdev) 2094 { 2095 const struct soc_device_attribute *attr; 2096 struct rswitch_private *priv; 2097 struct resource *res; 2098 int ret; 2099 2100 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base"); 2101 if (!res) { 2102 dev_err(&pdev->dev, "invalid resource\n"); 2103 return -EINVAL; 2104 } 2105 2106 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 2107 if (!priv) 2108 return -ENOMEM; 2109 spin_lock_init(&priv->lock); 2110 2111 priv->clk = devm_clk_get(&pdev->dev, NULL); 2112 if (IS_ERR(priv->clk)) 2113 return PTR_ERR(priv->clk); 2114 2115 attr = soc_device_match(rswitch_soc_no_speed_change); 2116 if (attr) 2117 priv->etha_no_runtime_change = true; 2118 2119 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); 2120 if (!priv->ptp_priv) 2121 return -ENOMEM; 2122 2123 platform_set_drvdata(pdev, priv); 2124 priv->pdev = pdev; 2125 priv->addr = devm_ioremap_resource(&pdev->dev, res); 2126 if (IS_ERR(priv->addr)) 2127 return PTR_ERR(priv->addr); 2128 2129 priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4; 2130 2131 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 2132 if (ret < 0) { 2133 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2134 if (ret < 0) 2135 return ret; 2136 } 2137 2138 priv->gwca.index = AGENT_INDEX_GWCA; 2139 priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV, 2140 RSWITCH_MAX_NUM_QUEUES); 2141 priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues, 2142 sizeof(*priv->gwca.queues), GFP_KERNEL); 2143 if (!priv->gwca.queues) 2144 return -ENOMEM; 2145 2146 pm_runtime_enable(&pdev->dev); 2147 pm_runtime_get_sync(&pdev->dev); 2148 2149 ret = rswitch_init(priv); 2150 if (ret < 0) { 2151 pm_runtime_put(&pdev->dev); 2152 pm_runtime_disable(&pdev->dev); 2153 return ret; 2154 } 2155 2156 device_set_wakeup_capable(&pdev->dev, 1); 2157 2158 return ret; 2159 } 2160 2161 static void rswitch_deinit(struct rswitch_private *priv) 2162 { 2163 unsigned int i; 2164 2165 rswitch_gwca_hw_deinit(priv); 2166 rcar_gen4_ptp_unregister(priv->ptp_priv); 2167 2168 rswitch_for_each_enabled_port(priv, i) { 2169 struct rswitch_device *rdev = priv->rdev[i]; 2170 2171 unregister_netdev(rdev->ndev); 2172 rswitch_ether_port_deinit_one(rdev); 2173 phy_exit(priv->rdev[i]->serdes); 2174 } 2175 2176 for (i = 0; i < RSWITCH_NUM_PORTS; i++) 2177 rswitch_device_free(priv, i); 2178 2179 rswitch_gwca_ts_queue_free(priv); 2180 rswitch_gwca_linkfix_free(priv); 2181 2182 rswitch_clock_disable(priv); 2183 } 2184 2185 static void renesas_eth_sw_remove(struct platform_device *pdev) 2186 { 2187 struct rswitch_private *priv = platform_get_drvdata(pdev); 2188 2189 rswitch_deinit(priv); 2190 2191 pm_runtime_put(&pdev->dev); 2192 pm_runtime_disable(&pdev->dev); 2193 2194 platform_set_drvdata(pdev, NULL); 2195 } 2196 2197 static int renesas_eth_sw_suspend(struct device *dev) 2198 { 2199 struct rswitch_private *priv = dev_get_drvdata(dev); 2200 struct net_device *ndev; 2201 unsigned int i; 2202 2203 rswitch_for_each_enabled_port(priv, i) { 2204 ndev = priv->rdev[i]->ndev; 2205 if (netif_running(ndev)) { 2206 netif_device_detach(ndev); 2207 rswitch_stop(ndev); 2208 } 2209 if (priv->rdev[i]->serdes->init_count) 2210 phy_exit(priv->rdev[i]->serdes); 2211 } 2212 2213 return 0; 2214 } 2215 2216 static int renesas_eth_sw_resume(struct device *dev) 2217 { 2218 struct rswitch_private *priv = dev_get_drvdata(dev); 2219 struct net_device *ndev; 2220 unsigned int i; 2221 2222 rswitch_for_each_enabled_port(priv, i) { 2223 phy_init(priv->rdev[i]->serdes); 2224 ndev = priv->rdev[i]->ndev; 2225 if (netif_running(ndev)) { 2226 rswitch_open(ndev); 2227 netif_device_attach(ndev); 2228 } 2229 } 2230 2231 return 0; 2232 } 2233 2234 static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend, 2235 renesas_eth_sw_resume); 2236 2237 static struct platform_driver renesas_eth_sw_driver_platform = { 2238 .probe = renesas_eth_sw_probe, 2239 .remove = renesas_eth_sw_remove, 2240 .driver = { 2241 .name = "renesas_eth_sw", 2242 .pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops), 2243 .of_match_table = renesas_eth_sw_of_table, 2244 } 2245 }; 2246 module_platform_driver(renesas_eth_sw_driver_platform); 2247 MODULE_AUTHOR("Yoshihiro Shimoda"); 2248 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver"); 2249 MODULE_LICENSE("GPL"); 2250