Lines Matching +full:serdes +full:- +full:clk

1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/clk.h>
8 #include <linux/dma-mapping.h>
45 iowrite32(RRC_RR, priv->addr + RRC); in rswitch_reset()
46 iowrite32(RRC_RR_CLR, priv->addr + RRC); in rswitch_reset()
51 iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC); in rswitch_clock_enable()
56 iowrite32(RCDC_RCD, priv->addr + RCDC); in rswitch_clock_disable()
88 val = ioread32(priv->addr + CABPIRM); in rswitch_bpool_config()
92 iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM); in rswitch_bpool_config()
94 return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR); in rswitch_bpool_config()
99 iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0); in rswitch_coma_init()
102 /* R-Switch-2 block (TOP) */
108 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i)); in rswitch_top_init()
118 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i)); in rswitch_fwd_init()
119 iowrite32(0, priv->addr + FWPBFC(i)); in rswitch_fwd_init()
123 iowrite32(priv->rdev[i]->rx_queue->index, in rswitch_fwd_init()
124 priv->addr + FWPBFCSDC(GWCA_INDEX, i)); in rswitch_fwd_init()
125 iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i)); in rswitch_fwd_init()
129 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index)); in rswitch_fwd_init()
130 iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index)); in rswitch_fwd_init()
131 iowrite32(0, priv->addr + FWPBFC(priv->gwca.index)); in rswitch_fwd_init()
132 iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index)); in rswitch_fwd_init()
141 if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index)) in rswitch_gwca_change_mode()
142 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1); in rswitch_gwca_change_mode()
144 iowrite32(mode, priv->addr + GWMC); in rswitch_gwca_change_mode()
146 ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode); in rswitch_gwca_change_mode()
149 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0); in rswitch_gwca_change_mode()
156 iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM); in rswitch_gwca_mcast_table_reset()
158 return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR); in rswitch_gwca_mcast_table_reset()
163 iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM); in rswitch_gwca_axi_ram_reset()
165 return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR); in rswitch_gwca_axi_ram_reset()
170 u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits; in rswitch_is_any_data_irq()
186 dis[i] = ioread32(priv->addr + GWDIS(i)); in rswitch_get_data_irq_status()
187 dis[i] &= ioread32(priv->addr + GWDIE(i)); in rswitch_get_data_irq_status()
196 iowrite32(BIT(index % 32), priv->addr + offs); in rswitch_enadis_data_irq()
204 iowrite32(BIT(index % 32), priv->addr + offs); in rswitch_ack_data_irq()
210 unsigned int index = cur ? gq->cur : gq->dirty; in rswitch_next_queue_index()
212 if (index + num >= gq->ring_size) in rswitch_next_queue_index()
213 index = (index + num) % gq->ring_size; in rswitch_next_queue_index()
222 if (gq->cur >= gq->dirty) in rswitch_get_num_cur_queues()
223 return gq->cur - gq->dirty; in rswitch_get_num_cur_queues()
225 return gq->ring_size - gq->dirty + gq->cur; in rswitch_get_num_cur_queues()
230 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty]; in rswitch_is_queue_rxed()
232 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) in rswitch_is_queue_rxed()
245 index = (i + start_index) % gq->ring_size; in rswitch_gwca_queue_alloc_rx_buf()
246 if (gq->rx_bufs[index]) in rswitch_gwca_queue_alloc_rx_buf()
248 gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE); in rswitch_gwca_queue_alloc_rx_buf()
249 if (!gq->rx_bufs[index]) in rswitch_gwca_queue_alloc_rx_buf()
256 for (; i-- > 0; ) { in rswitch_gwca_queue_alloc_rx_buf()
257 index = (i + start_index) % gq->ring_size; in rswitch_gwca_queue_alloc_rx_buf()
258 skb_free_frag(gq->rx_bufs[index]); in rswitch_gwca_queue_alloc_rx_buf()
259 gq->rx_bufs[index] = NULL; in rswitch_gwca_queue_alloc_rx_buf()
262 return -ENOMEM; in rswitch_gwca_queue_alloc_rx_buf()
270 if (!gq->dir_tx) { in rswitch_gwca_queue_free()
271 dma_free_coherent(ndev->dev.parent, in rswitch_gwca_queue_free()
273 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma); in rswitch_gwca_queue_free()
274 gq->rx_ring = NULL; in rswitch_gwca_queue_free()
276 for (i = 0; i < gq->ring_size; i++) in rswitch_gwca_queue_free()
277 skb_free_frag(gq->rx_bufs[i]); in rswitch_gwca_queue_free()
278 kfree(gq->rx_bufs); in rswitch_gwca_queue_free()
279 gq->rx_bufs = NULL; in rswitch_gwca_queue_free()
281 dma_free_coherent(ndev->dev.parent, in rswitch_gwca_queue_free()
283 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma); in rswitch_gwca_queue_free()
284 gq->tx_ring = NULL; in rswitch_gwca_queue_free()
285 kfree(gq->skbs); in rswitch_gwca_queue_free()
286 gq->skbs = NULL; in rswitch_gwca_queue_free()
287 kfree(gq->unmap_addrs); in rswitch_gwca_queue_free()
288 gq->unmap_addrs = NULL; in rswitch_gwca_queue_free()
294 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; in rswitch_gwca_ts_queue_free()
296 dma_free_coherent(&priv->pdev->dev, in rswitch_gwca_ts_queue_free()
297 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1), in rswitch_gwca_ts_queue_free()
298 gq->ts_ring, gq->ring_dma); in rswitch_gwca_ts_queue_free()
299 gq->ts_ring = NULL; in rswitch_gwca_ts_queue_free()
309 gq->dir_tx = dir_tx; in rswitch_gwca_queue_alloc()
310 gq->ring_size = ring_size; in rswitch_gwca_queue_alloc()
311 gq->ndev = ndev; in rswitch_gwca_queue_alloc()
314 gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL); in rswitch_gwca_queue_alloc()
315 if (!gq->rx_bufs) in rswitch_gwca_queue_alloc()
316 return -ENOMEM; in rswitch_gwca_queue_alloc()
317 if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0) in rswitch_gwca_queue_alloc()
320 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent, in rswitch_gwca_queue_alloc()
322 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); in rswitch_gwca_queue_alloc()
324 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL); in rswitch_gwca_queue_alloc()
325 if (!gq->skbs) in rswitch_gwca_queue_alloc()
326 return -ENOMEM; in rswitch_gwca_queue_alloc()
327 gq->unmap_addrs = kcalloc(gq->ring_size, sizeof(*gq->unmap_addrs), GFP_KERNEL); in rswitch_gwca_queue_alloc()
328 if (!gq->unmap_addrs) in rswitch_gwca_queue_alloc()
330 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent, in rswitch_gwca_queue_alloc()
332 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); in rswitch_gwca_queue_alloc()
335 if (!gq->rx_ring && !gq->tx_ring) in rswitch_gwca_queue_alloc()
338 i = gq->index / 32; in rswitch_gwca_queue_alloc()
339 bit = BIT(gq->index % 32); in rswitch_gwca_queue_alloc()
341 priv->gwca.tx_irq_bits[i] |= bit; in rswitch_gwca_queue_alloc()
343 priv->gwca.rx_irq_bits[i] |= bit; in rswitch_gwca_queue_alloc()
350 return -ENOMEM; in rswitch_gwca_queue_alloc()
355 desc->dptrl = cpu_to_le32(lower_32_bits(addr)); in rswitch_desc_set_dptr()
356 desc->dptrh = upper_32_bits(addr) & 0xff; in rswitch_desc_set_dptr()
361 return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32; in rswitch_desc_get_dptr()
368 unsigned int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size; in rswitch_gwca_queue_format()
374 memset(gq->tx_ring, 0, ring_size); in rswitch_gwca_queue_format()
375 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) { in rswitch_gwca_queue_format()
376 if (!gq->dir_tx) { in rswitch_gwca_queue_format()
377 dma_addr = dma_map_single(ndev->dev.parent, in rswitch_gwca_queue_format()
378 gq->rx_bufs[i] + RSWITCH_HEADROOM, in rswitch_gwca_queue_format()
381 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in rswitch_gwca_queue_format()
384 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE); in rswitch_gwca_queue_format()
385 rswitch_desc_set_dptr(&desc->desc, dma_addr); in rswitch_gwca_queue_format()
386 desc->desc.die_dt = DT_FEMPTY | DIE; in rswitch_gwca_queue_format()
388 desc->desc.die_dt = DT_EEMPTY | DIE; in rswitch_gwca_queue_format()
391 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); in rswitch_gwca_queue_format()
392 desc->desc.die_dt = DT_LINKFIX; in rswitch_gwca_queue_format()
394 linkfix = &priv->gwca.linkfix_table[gq->index]; in rswitch_gwca_queue_format()
395 linkfix->die_dt = DT_LINKFIX; in rswitch_gwca_queue_format()
396 rswitch_desc_set_dptr(linkfix, gq->ring_dma); in rswitch_gwca_queue_format()
398 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE, in rswitch_gwca_queue_format()
399 priv->addr + GWDCC_OFFS(gq->index)); in rswitch_gwca_queue_format()
404 if (!gq->dir_tx) { in rswitch_gwca_queue_format()
405 for (desc = gq->tx_ring; i-- > 0; desc++) { in rswitch_gwca_queue_format()
406 dma_addr = rswitch_desc_get_dptr(&desc->desc); in rswitch_gwca_queue_format()
407 dma_unmap_single(ndev->dev.parent, dma_addr, in rswitch_gwca_queue_format()
412 return -ENOMEM; in rswitch_gwca_queue_format()
419 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; in rswitch_gwca_ts_queue_fill()
424 index = (i + start_index) % gq->ring_size; in rswitch_gwca_ts_queue_fill()
425 desc = &gq->ts_ring[index]; in rswitch_gwca_ts_queue_fill()
426 desc->desc.die_dt = DT_FEMPTY_ND | DIE; in rswitch_gwca_ts_queue_fill()
441 index = (i + start_index) % gq->ring_size; in rswitch_gwca_queue_ext_ts_fill()
442 desc = &gq->rx_ring[index]; in rswitch_gwca_queue_ext_ts_fill()
443 if (!gq->dir_tx) { in rswitch_gwca_queue_ext_ts_fill()
444 dma_addr = dma_map_single(ndev->dev.parent, in rswitch_gwca_queue_ext_ts_fill()
445 gq->rx_bufs[index] + RSWITCH_HEADROOM, in rswitch_gwca_queue_ext_ts_fill()
448 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in rswitch_gwca_queue_ext_ts_fill()
451 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE); in rswitch_gwca_queue_ext_ts_fill()
452 rswitch_desc_set_dptr(&desc->desc, dma_addr); in rswitch_gwca_queue_ext_ts_fill()
454 desc->desc.die_dt = DT_FEMPTY | DIE; in rswitch_gwca_queue_ext_ts_fill()
455 desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index)); in rswitch_gwca_queue_ext_ts_fill()
457 desc->desc.die_dt = DT_EEMPTY | DIE; in rswitch_gwca_queue_ext_ts_fill()
464 if (!gq->dir_tx) { in rswitch_gwca_queue_ext_ts_fill()
465 for (; i-- > 0; ) { in rswitch_gwca_queue_ext_ts_fill()
466 index = (i + start_index) % gq->ring_size; in rswitch_gwca_queue_ext_ts_fill()
467 desc = &gq->rx_ring[index]; in rswitch_gwca_queue_ext_ts_fill()
468 dma_addr = rswitch_desc_get_dptr(&desc->desc); in rswitch_gwca_queue_ext_ts_fill()
469 dma_unmap_single(ndev->dev.parent, dma_addr, in rswitch_gwca_queue_ext_ts_fill()
474 return -ENOMEM; in rswitch_gwca_queue_ext_ts_fill()
481 unsigned int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size; in rswitch_gwca_queue_ext_ts_format()
486 memset(gq->rx_ring, 0, ring_size); in rswitch_gwca_queue_ext_ts_format()
487 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size); in rswitch_gwca_queue_ext_ts_format()
491 desc = &gq->rx_ring[gq->ring_size]; /* Last */ in rswitch_gwca_queue_ext_ts_format()
492 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); in rswitch_gwca_queue_ext_ts_format()
493 desc->desc.die_dt = DT_LINKFIX; in rswitch_gwca_queue_ext_ts_format()
495 linkfix = &priv->gwca.linkfix_table[gq->index]; in rswitch_gwca_queue_ext_ts_format()
496 linkfix->die_dt = DT_LINKFIX; in rswitch_gwca_queue_ext_ts_format()
497 rswitch_desc_set_dptr(linkfix, gq->ring_dma); in rswitch_gwca_queue_ext_ts_format()
499 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | in rswitch_gwca_queue_ext_ts_format()
501 priv->addr + GWDCC_OFFS(gq->index)); in rswitch_gwca_queue_ext_ts_format()
508 unsigned int i, num_queues = priv->gwca.num_queues; in rswitch_gwca_linkfix_alloc()
509 struct rswitch_gwca *gwca = &priv->gwca; in rswitch_gwca_linkfix_alloc()
510 struct device *dev = &priv->pdev->dev; in rswitch_gwca_linkfix_alloc()
512 gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues; in rswitch_gwca_linkfix_alloc()
513 gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size, in rswitch_gwca_linkfix_alloc()
514 &gwca->linkfix_table_dma, GFP_KERNEL); in rswitch_gwca_linkfix_alloc()
515 if (!gwca->linkfix_table) in rswitch_gwca_linkfix_alloc()
516 return -ENOMEM; in rswitch_gwca_linkfix_alloc()
518 gwca->linkfix_table[i].die_dt = DT_EOS; in rswitch_gwca_linkfix_alloc()
525 struct rswitch_gwca *gwca = &priv->gwca; in rswitch_gwca_linkfix_free()
527 if (gwca->linkfix_table) in rswitch_gwca_linkfix_free()
528 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size, in rswitch_gwca_linkfix_free()
529 gwca->linkfix_table, gwca->linkfix_table_dma); in rswitch_gwca_linkfix_free()
530 gwca->linkfix_table = NULL; in rswitch_gwca_linkfix_free()
535 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; in rswitch_gwca_ts_queue_alloc()
538 gq->ring_size = TS_RING_SIZE; in rswitch_gwca_ts_queue_alloc()
539 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, in rswitch_gwca_ts_queue_alloc()
541 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); in rswitch_gwca_ts_queue_alloc()
543 if (!gq->ts_ring) in rswitch_gwca_ts_queue_alloc()
544 return -ENOMEM; in rswitch_gwca_ts_queue_alloc()
547 desc = &gq->ts_ring[gq->ring_size]; in rswitch_gwca_ts_queue_alloc()
548 desc->desc.die_dt = DT_LINKFIX; in rswitch_gwca_ts_queue_alloc()
549 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); in rswitch_gwca_ts_queue_alloc()
550 INIT_LIST_HEAD(&priv->gwca.ts_info_list); in rswitch_gwca_ts_queue_alloc()
560 index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues); in rswitch_gwca_get()
561 if (index >= priv->gwca.num_queues) in rswitch_gwca_get()
563 set_bit(index, priv->gwca.used); in rswitch_gwca_get()
564 gq = &priv->gwca.queues[index]; in rswitch_gwca_get()
566 gq->index = index; in rswitch_gwca_get()
574 clear_bit(gq->index, priv->gwca.used); in rswitch_gwca_put()
580 struct rswitch_private *priv = rdev->priv; in rswitch_txdmac_alloc()
583 rdev->tx_queue = rswitch_gwca_get(priv); in rswitch_txdmac_alloc()
584 if (!rdev->tx_queue) in rswitch_txdmac_alloc()
585 return -EBUSY; in rswitch_txdmac_alloc()
587 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE); in rswitch_txdmac_alloc()
589 rswitch_gwca_put(priv, rdev->tx_queue); in rswitch_txdmac_alloc()
600 rswitch_gwca_queue_free(ndev, rdev->tx_queue); in rswitch_txdmac_free()
601 rswitch_gwca_put(rdev->priv, rdev->tx_queue); in rswitch_txdmac_free()
606 struct rswitch_device *rdev = priv->rdev[index]; in rswitch_txdmac_init()
608 return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue); in rswitch_txdmac_init()
614 struct rswitch_private *priv = rdev->priv; in rswitch_rxdmac_alloc()
617 rdev->rx_queue = rswitch_gwca_get(priv); in rswitch_rxdmac_alloc()
618 if (!rdev->rx_queue) in rswitch_rxdmac_alloc()
619 return -EBUSY; in rswitch_rxdmac_alloc()
621 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE); in rswitch_rxdmac_alloc()
623 rswitch_gwca_put(priv, rdev->rx_queue); in rswitch_rxdmac_alloc()
634 rswitch_gwca_queue_free(ndev, rdev->rx_queue); in rswitch_rxdmac_free()
635 rswitch_gwca_put(rdev->priv, rdev->rx_queue); in rswitch_rxdmac_free()
640 struct rswitch_device *rdev = priv->rdev[index]; in rswitch_rxdmac_init()
641 struct net_device *ndev = rdev->ndev; in rswitch_rxdmac_init()
643 return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue); in rswitch_rxdmac_init()
665 iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC); in rswitch_gwca_hw_init()
666 iowrite32(0, priv->addr + GWTTFC); in rswitch_gwca_hw_init()
667 iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1); in rswitch_gwca_hw_init()
668 iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0); in rswitch_gwca_hw_init()
669 iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10); in rswitch_gwca_hw_init()
670 iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00); in rswitch_gwca_hw_init()
672 priv->addr + GWMDNC); in rswitch_gwca_hw_init()
673 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0); in rswitch_gwca_hw_init()
675 iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0); in rswitch_gwca_hw_init()
710 priv->gwca_halt = true; in rswitch_gwca_halt()
712 dev_err(&priv->pdev->dev, "halted (%d)\n", err); in rswitch_gwca_halt()
721 dma_addr_t dma_addr = rswitch_desc_get_dptr(&desc->desc); in rswitch_rx_handle_desc()
722 u16 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS; in rswitch_rx_handle_desc()
723 u8 die_dt = desc->desc.die_dt & DT_MASK; in rswitch_rx_handle_desc()
726 dma_unmap_single(ndev->dev.parent, dma_addr, RSWITCH_MAP_BUF_SIZE, in rswitch_rx_handle_desc()
730 * - FSINGLE in rswitch_rx_handle_desc()
731 * - FSTART -> FEND in rswitch_rx_handle_desc()
732 * - FSTART -> FMID -> FEND in rswitch_rx_handle_desc()
739 if (gq->skb_fstart) { in rswitch_rx_handle_desc()
740 dev_kfree_skb_any(gq->skb_fstart); in rswitch_rx_handle_desc()
741 gq->skb_fstart = NULL; in rswitch_rx_handle_desc()
742 ndev->stats.rx_dropped++; in rswitch_rx_handle_desc()
747 if (!gq->skb_fstart) { in rswitch_rx_handle_desc()
748 ndev->stats.rx_dropped++; in rswitch_rx_handle_desc()
760 skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE); in rswitch_rx_handle_desc()
764 gq->pkt_len = pkt_len; in rswitch_rx_handle_desc()
766 gq->skb_fstart = skb; in rswitch_rx_handle_desc()
773 skb_add_rx_frag(gq->skb_fstart, skb_shinfo(gq->skb_fstart)->nr_frags, in rswitch_rx_handle_desc()
774 virt_to_page(gq->rx_bufs[gq->cur]), in rswitch_rx_handle_desc()
775 offset_in_page(gq->rx_bufs[gq->cur]) + RSWITCH_HEADROOM, in rswitch_rx_handle_desc()
778 skb = gq->skb_fstart; in rswitch_rx_handle_desc()
779 gq->skb_fstart = NULL; in rswitch_rx_handle_desc()
781 gq->pkt_len += pkt_len; in rswitch_rx_handle_desc()
794 struct rswitch_gwca_queue *gq = rdev->rx_queue; in rswitch_rx()
804 boguscnt = min_t(int, gq->ring_size, *quota); in rswitch_rx()
807 desc = &gq->rx_ring[gq->cur]; in rswitch_rx()
808 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { in rswitch_rx()
814 get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; in rswitch_rx()
821 ts.tv_sec = __le32_to_cpu(desc->ts_sec); in rswitch_rx()
822 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); in rswitch_rx()
823 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); in rswitch_rx()
825 skb->protocol = eth_type_trans(skb, ndev); in rswitch_rx()
826 napi_gro_receive(&rdev->napi, skb); in rswitch_rx()
827 rdev->ndev->stats.rx_packets++; in rswitch_rx()
828 rdev->ndev->stats.rx_bytes += gq->pkt_len; in rswitch_rx()
831 gq->rx_bufs[gq->cur] = NULL; in rswitch_rx()
832 gq->cur = rswitch_next_queue_index(gq, true, 1); in rswitch_rx()
833 desc = &gq->rx_ring[gq->cur]; in rswitch_rx()
835 if (--boguscnt <= 0) in rswitch_rx()
840 ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num); in rswitch_rx()
843 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num); in rswitch_rx()
846 gq->dirty = rswitch_next_queue_index(gq, false, num); in rswitch_rx()
848 *quota -= limit - boguscnt; in rswitch_rx()
853 rswitch_gwca_halt(rdev->priv); in rswitch_rx()
861 struct rswitch_gwca_queue *gq = rdev->tx_queue; in rswitch_tx_free()
866 gq->dirty = rswitch_next_queue_index(gq, false, 1)) { in rswitch_tx_free()
867 desc = &gq->tx_ring[gq->dirty]; in rswitch_tx_free()
868 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) in rswitch_tx_free()
872 skb = gq->skbs[gq->dirty]; in rswitch_tx_free()
874 rdev->ndev->stats.tx_packets++; in rswitch_tx_free()
875 rdev->ndev->stats.tx_bytes += skb->len; in rswitch_tx_free()
876 dma_unmap_single(ndev->dev.parent, in rswitch_tx_free()
877 gq->unmap_addrs[gq->dirty], in rswitch_tx_free()
878 skb->len, DMA_TO_DEVICE); in rswitch_tx_free()
879 dev_kfree_skb_any(gq->skbs[gq->dirty]); in rswitch_tx_free()
880 gq->skbs[gq->dirty] = NULL; in rswitch_tx_free()
882 desc->desc.die_dt = DT_EEMPTY; in rswitch_tx_free()
888 struct net_device *ndev = napi->dev; in rswitch_poll()
895 priv = rdev->priv; in rswitch_poll()
902 else if (rdev->priv->gwca_halt) in rswitch_poll()
904 else if (rswitch_is_queue_rxed(rdev->rx_queue)) in rswitch_poll()
909 if (napi_complete_done(napi, budget - quota)) { in rswitch_poll()
910 spin_lock_irqsave(&priv->lock, flags); in rswitch_poll()
911 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); in rswitch_poll()
912 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); in rswitch_poll()
913 spin_unlock_irqrestore(&priv->lock, flags); in rswitch_poll()
917 return budget - quota; in rswitch_poll()
929 if (napi_schedule_prep(&rdev->napi)) { in rswitch_queue_interrupt()
930 spin_lock(&rdev->priv->lock); in rswitch_queue_interrupt()
931 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); in rswitch_queue_interrupt()
932 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); in rswitch_queue_interrupt()
933 spin_unlock(&rdev->priv->lock); in rswitch_queue_interrupt()
934 __napi_schedule(&rdev->napi); in rswitch_queue_interrupt()
943 for (i = 0; i < priv->gwca.num_queues; i++) { in rswitch_data_irq()
944 gq = &priv->gwca.queues[i]; in rswitch_data_irq()
945 index = gq->index / 32; in rswitch_data_irq()
946 bit = BIT(gq->index % 32); in rswitch_data_irq()
950 rswitch_ack_data_irq(priv, gq->index); in rswitch_data_irq()
951 rswitch_queue_interrupt(gq->ndev); in rswitch_data_irq()
980 return -ENOMEM; in rswitch_gwca_request_irqs()
982 irq = platform_get_irq_byname(priv->pdev, resource_name); in rswitch_gwca_request_irqs()
987 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, in rswitch_gwca_request_irqs()
990 return -ENOMEM; in rswitch_gwca_request_irqs()
992 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq, in rswitch_gwca_request_irqs()
1003 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; in rswitch_ts()
1011 desc = &gq->ts_ring[gq->cur]; in rswitch_ts()
1012 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) { in rswitch_ts()
1015 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl)); in rswitch_ts()
1016 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl)); in rswitch_ts()
1018 list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) { in rswitch_ts()
1019 if (!(ts_info->port == port && ts_info->tag == tag)) in rswitch_ts()
1023 ts.tv_sec = __le32_to_cpu(desc->ts_sec); in rswitch_ts()
1024 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); in rswitch_ts()
1026 skb_tstamp_tx(ts_info->skb, &shhwtstamps); in rswitch_ts()
1027 dev_consume_skb_irq(ts_info->skb); in rswitch_ts()
1028 list_del(&ts_info->list); in rswitch_ts()
1033 gq->cur = rswitch_next_queue_index(gq, true, 1); in rswitch_ts()
1034 desc = &gq->ts_ring[gq->cur]; in rswitch_ts()
1038 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num); in rswitch_ts()
1039 gq->dirty = rswitch_next_queue_index(gq, false, num); in rswitch_ts()
1046 if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) { in rswitch_gwca_ts_irq()
1047 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS); in rswitch_gwca_ts_irq()
1060 irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME); in rswitch_gwca_ts_request_irqs()
1064 return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq, in rswitch_gwca_ts_request_irqs()
1074 if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index)) in rswitch_etha_change_mode()
1075 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1); in rswitch_etha_change_mode()
1077 iowrite32(mode, etha->addr + EAMC); in rswitch_etha_change_mode()
1079 ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode); in rswitch_etha_change_mode()
1082 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0); in rswitch_etha_change_mode()
1089 u32 mrmac0 = ioread32(etha->addr + MRMAC0); in rswitch_etha_read_mac_address()
1090 u32 mrmac1 = ioread32(etha->addr + MRMAC1); in rswitch_etha_read_mac_address()
1091 u8 *mac = &etha->mac_addr[0]; in rswitch_etha_read_mac_address()
1103 iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0); in rswitch_etha_write_mac_address()
1105 etha->addr + MRMAC1); in rswitch_etha_write_mac_address()
1110 iowrite32(MLVC_PLV, etha->addr + MLVC); in rswitch_etha_wait_link_verification()
1112 return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0); in rswitch_etha_wait_link_verification()
1121 switch (etha->speed) { in rswitch_rmac_setting()
1135 iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC); in rswitch_rmac_setting()
1140 rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, in rswitch_etha_enable_mii()
1141 MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06)); in rswitch_etha_enable_mii()
1142 rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45); in rswitch_etha_enable_mii()
1156 iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC); in rswitch_etha_hw_init()
1179 return -ENODEV; in rswitch_etha_set_access()
1181 writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1); in rswitch_etha_set_access()
1184 iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM); in rswitch_etha_set_access()
1186 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS); in rswitch_etha_set_access()
1190 rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS); in rswitch_etha_set_access()
1193 writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM); in rswitch_etha_set_access()
1195 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS); in rswitch_etha_set_access()
1199 ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16; in rswitch_etha_set_access()
1201 rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS); in rswitch_etha_set_access()
1204 etha->addr + MPSM); in rswitch_etha_set_access()
1206 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS); in rswitch_etha_set_access()
1215 struct rswitch_etha *etha = bus->priv; in rswitch_etha_mii_read_c45()
1223 struct rswitch_etha *etha = bus->priv; in rswitch_etha_mii_write_c45()
1235 ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node, in rswitch_get_port_node()
1236 "ethernet-ports"); in rswitch_get_port_node()
1246 if (index == rdev->etha->index) { in rswitch_get_port_node()
1264 if (!rdev->np_port) in rswitch_etha_get_params()
1267 err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface); in rswitch_etha_get_params()
1271 err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed); in rswitch_etha_get_params()
1273 rdev->etha->speed = max_speed; in rswitch_etha_get_params()
1277 /* if no "max-speed" property, let's use default speed */ in rswitch_etha_get_params()
1278 switch (rdev->etha->phy_interface) { in rswitch_etha_get_params()
1280 rdev->etha->speed = SPEED_100; in rswitch_etha_get_params()
1283 rdev->etha->speed = SPEED_1000; in rswitch_etha_get_params()
1286 rdev->etha->speed = SPEED_2500; in rswitch_etha_get_params()
1289 return -EINVAL; in rswitch_etha_get_params()
1303 return -ENOMEM; in rswitch_mii_register()
1305 mii_bus->name = "rswitch_mii"; in rswitch_mii_register()
1306 sprintf(mii_bus->id, "etha%d", rdev->etha->index); in rswitch_mii_register()
1307 mii_bus->priv = rdev->etha; in rswitch_mii_register()
1308 mii_bus->read_c45 = rswitch_etha_mii_read_c45; in rswitch_mii_register()
1309 mii_bus->write_c45 = rswitch_etha_mii_write_c45; in rswitch_mii_register()
1310 mii_bus->parent = &rdev->priv->pdev->dev; in rswitch_mii_register()
1312 mdio_np = of_get_child_by_name(rdev->np_port, "mdio"); in rswitch_mii_register()
1319 rdev->etha->mii = mii_bus; in rswitch_mii_register()
1329 if (rdev->etha->mii) { in rswitch_mii_unregister()
1330 mdiobus_unregister(rdev->etha->mii); in rswitch_mii_unregister()
1331 mdiobus_free(rdev->etha->mii); in rswitch_mii_unregister()
1332 rdev->etha->mii = NULL; in rswitch_mii_unregister()
1339 struct phy_device *phydev = ndev->phydev; in rswitch_adjust_link()
1341 if (phydev->link != rdev->etha->link) { in rswitch_adjust_link()
1343 if (phydev->link) in rswitch_adjust_link()
1344 phy_power_on(rdev->serdes); in rswitch_adjust_link()
1345 else if (rdev->serdes->power_count) in rswitch_adjust_link()
1346 phy_power_off(rdev->serdes); in rswitch_adjust_link()
1348 rdev->etha->link = phydev->link; in rswitch_adjust_link()
1350 if (!rdev->priv->etha_no_runtime_change && in rswitch_adjust_link()
1351 phydev->speed != rdev->etha->speed) { in rswitch_adjust_link()
1352 rdev->etha->speed = phydev->speed; in rswitch_adjust_link()
1354 rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); in rswitch_adjust_link()
1355 phy_set_speed(rdev->serdes, rdev->etha->speed); in rswitch_adjust_link()
1363 if (!rdev->priv->etha_no_runtime_change) in rswitch_phy_remove_link_mode()
1366 switch (rdev->etha->speed) { in rswitch_phy_remove_link_mode()
1383 phy_set_max_speed(phydev, rdev->etha->speed); in rswitch_phy_remove_link_mode()
1390 int err = -ENOENT; in rswitch_phy_device_init()
1392 if (!rdev->np_port) in rswitch_phy_device_init()
1393 return -ENODEV; in rswitch_phy_device_init()
1395 phy = of_parse_phandle(rdev->np_port, "phy-handle", 0); in rswitch_phy_device_init()
1397 return -ENODEV; in rswitch_phy_device_init()
1399 /* Set phydev->host_interfaces before calling of_phy_connect() to in rswitch_phy_device_init()
1405 __set_bit(rdev->etha->phy_interface, phydev->host_interfaces); in rswitch_phy_device_init()
1406 phydev->mac_managed_pm = true; in rswitch_phy_device_init()
1408 phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0, in rswitch_phy_device_init()
1409 rdev->etha->phy_interface); in rswitch_phy_device_init()
1431 if (rdev->ndev->phydev) in rswitch_phy_device_deinit()
1432 phy_disconnect(rdev->ndev->phydev); in rswitch_phy_device_deinit()
1439 err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET, in rswitch_serdes_set_params()
1440 rdev->etha->phy_interface); in rswitch_serdes_set_params()
1444 return phy_set_speed(rdev->serdes, rdev->etha->speed); in rswitch_serdes_set_params()
1451 if (!rdev->etha->operated) { in rswitch_ether_port_init_one()
1452 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); in rswitch_ether_port_init_one()
1455 if (rdev->priv->etha_no_runtime_change) in rswitch_ether_port_init_one()
1456 rdev->etha->operated = true; in rswitch_ether_port_init_one()
1467 rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL); in rswitch_ether_port_init_one()
1468 if (IS_ERR(rdev->serdes)) { in rswitch_ether_port_init_one()
1469 err = PTR_ERR(rdev->serdes); in rswitch_ether_port_init_one()
1501 err = rswitch_ether_port_init_one(priv->rdev[i]); in rswitch_ether_port_init_all()
1507 err = phy_init(priv->rdev[i]->serdes); in rswitch_ether_port_init_all()
1516 phy_exit(priv->rdev[i]->serdes); in rswitch_ether_port_init_all()
1521 rswitch_ether_port_deinit_one(priv->rdev[i]); in rswitch_ether_port_init_all()
1531 phy_exit(priv->rdev[i]->serdes); in rswitch_ether_port_deinit_all()
1532 rswitch_ether_port_deinit_one(priv->rdev[i]); in rswitch_ether_port_deinit_all()
1541 phy_start(ndev->phydev); in rswitch_open()
1543 napi_enable(&rdev->napi); in rswitch_open()
1546 spin_lock_irqsave(&rdev->priv->lock, flags); in rswitch_open()
1547 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); in rswitch_open()
1548 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); in rswitch_open()
1549 spin_unlock_irqrestore(&rdev->priv->lock, flags); in rswitch_open()
1551 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) in rswitch_open()
1552 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); in rswitch_open()
1554 bitmap_set(rdev->priv->opened_ports, rdev->port, 1); in rswitch_open()
1566 bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); in rswitch_stop()
1568 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) in rswitch_stop()
1569 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); in rswitch_stop()
1571 list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) { in rswitch_stop()
1572 if (ts_info->port != rdev->port) in rswitch_stop()
1574 dev_kfree_skb_irq(ts_info->skb); in rswitch_stop()
1575 list_del(&ts_info->list); in rswitch_stop()
1579 spin_lock_irqsave(&rdev->priv->lock, flags); in rswitch_stop()
1580 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); in rswitch_stop()
1581 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); in rswitch_stop()
1582 spin_unlock_irqrestore(&rdev->priv->lock, flags); in rswitch_stop()
1584 phy_stop(ndev->phydev); in rswitch_stop()
1585 napi_disable(&rdev->napi); in rswitch_stop()
1594 desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | in rswitch_ext_desc_set_info1()
1596 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { in rswitch_ext_desc_set_info1()
1603 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in rswitch_ext_desc_set_info1()
1604 rdev->ts_tag++; in rswitch_ext_desc_set_info1()
1605 desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC); in rswitch_ext_desc_set_info1()
1607 ts_info->skb = skb_get(skb); in rswitch_ext_desc_set_info1()
1608 ts_info->port = rdev->port; in rswitch_ext_desc_set_info1()
1609 ts_info->tag = rdev->ts_tag; in rswitch_ext_desc_set_info1()
1610 list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list); in rswitch_ext_desc_set_info1()
1623 rswitch_desc_set_dptr(&desc->desc, dma_addr); in rswitch_ext_desc_set()
1624 desc->desc.info_ds = cpu_to_le16(len); in rswitch_ext_desc_set()
1630 desc->desc.die_dt = die_dt; in rswitch_ext_desc_set()
1641 if (nr_desc - 1 == index) in rswitch_ext_desc_get_die_dt()
1663 struct rswitch_gwca_queue *gq = rdev->tx_queue; in rswitch_start_xmit()
1671 nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1; in rswitch_start_xmit()
1672 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) { in rswitch_start_xmit()
1680 dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); in rswitch_start_xmit()
1681 if (dma_mapping_error(ndev->dev.parent, dma_addr_orig)) in rswitch_start_xmit()
1684 gq->skbs[gq->cur] = skb; in rswitch_start_xmit()
1685 gq->unmap_addrs[gq->cur] = dma_addr_orig; in rswitch_start_xmit()
1688 for (i = nr_desc; i-- > 0; ) { in rswitch_start_xmit()
1689 desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)]; in rswitch_start_xmit()
1692 len = rswitch_ext_desc_get_len(die_dt, skb->len); in rswitch_start_xmit()
1697 wmb(); /* gq->cur must be incremented after die_dt was set */ in rswitch_start_xmit()
1699 gq->cur = rswitch_next_queue_index(gq, true, nr_desc); in rswitch_start_xmit()
1700 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); in rswitch_start_xmit()
1705 dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE); in rswitch_start_xmit()
1715 return &ndev->stats; in rswitch_get_stats()
1724 ptp_priv = rdev->priv->ptp_priv; in rswitch_hwstamp_get()
1727 config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : in rswitch_hwstamp_get()
1729 switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { in rswitch_hwstamp_get()
1741 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; in rswitch_hwstamp_get()
1751 if (copy_from_user(&config, req->ifr_data, sizeof(config))) in rswitch_hwstamp_set()
1752 return -EFAULT; in rswitch_hwstamp_set()
1755 return -EINVAL; in rswitch_hwstamp_set()
1765 return -ERANGE; in rswitch_hwstamp_set()
1781 rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; in rswitch_hwstamp_set()
1782 rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; in rswitch_hwstamp_set()
1784 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; in rswitch_hwstamp_set()
1790 return -EINVAL; in rswitch_eth_ioctl()
1798 return phy_mii_ioctl(ndev->phydev, req, cmd); in rswitch_eth_ioctl()
1816 info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock); in rswitch_get_ts_info()
1817 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | in rswitch_get_ts_info()
1821 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); in rswitch_get_ts_info()
1822 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); in rswitch_get_ts_info()
1834 { .compatible = "renesas,r8a779f0-ether-switch", },
1841 struct rswitch_etha *etha = &priv->etha[index]; in rswitch_etha_init()
1844 etha->index = index; in rswitch_etha_init()
1845 etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; in rswitch_etha_init()
1846 etha->coma_addr = priv->addr; in rswitch_etha_init()
1848 /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1. in rswitch_etha_init()
1852 etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1; in rswitch_etha_init()
1857 struct platform_device *pdev = priv->pdev; in rswitch_device_alloc()
1863 return -EINVAL; in rswitch_device_alloc()
1867 return -ENOMEM; in rswitch_device_alloc()
1869 SET_NETDEV_DEV(ndev, &pdev->dev); in rswitch_device_alloc()
1873 rdev->ndev = ndev; in rswitch_device_alloc()
1874 rdev->priv = priv; in rswitch_device_alloc()
1875 priv->rdev[index] = rdev; in rswitch_device_alloc()
1876 rdev->port = index; in rswitch_device_alloc()
1877 rdev->etha = &priv->etha[index]; in rswitch_device_alloc()
1878 rdev->addr = priv->addr; in rswitch_device_alloc()
1880 ndev->base_addr = (unsigned long)rdev->addr; in rswitch_device_alloc()
1881 snprintf(ndev->name, IFNAMSIZ, "tsn%d", index); in rswitch_device_alloc()
1882 ndev->netdev_ops = &rswitch_netdev_ops; in rswitch_device_alloc()
1883 ndev->ethtool_ops = &rswitch_ethtool_ops; in rswitch_device_alloc()
1884 ndev->max_mtu = RSWITCH_MAX_MTU; in rswitch_device_alloc()
1885 ndev->min_mtu = ETH_MIN_MTU; in rswitch_device_alloc()
1887 netif_napi_add(ndev, &rdev->napi, rswitch_poll); in rswitch_device_alloc()
1889 rdev->np_port = rswitch_get_port_node(rdev); in rswitch_device_alloc()
1890 rdev->disabled = !rdev->np_port; in rswitch_device_alloc()
1891 err = of_get_ethdev_address(rdev->np_port, ndev); in rswitch_device_alloc()
1892 of_node_put(rdev->np_port); in rswitch_device_alloc()
1894 if (is_valid_ether_addr(rdev->etha->mac_addr)) in rswitch_device_alloc()
1895 eth_hw_addr_set(ndev, rdev->etha->mac_addr); in rswitch_device_alloc()
1904 if (rdev->priv->gwca.speed < rdev->etha->speed) in rswitch_device_alloc()
1905 rdev->priv->gwca.speed = rdev->etha->speed; in rswitch_device_alloc()
1922 netif_napi_del(&rdev->napi); in rswitch_device_alloc()
1930 struct rswitch_device *rdev = priv->rdev[index]; in rswitch_device_free()
1931 struct net_device *ndev = rdev->ndev; in rswitch_device_free()
1935 netif_napi_del(&rdev->napi); in rswitch_device_free()
1949 rswitch_etha_read_mac_address(&priv->etha[i]); in rswitch_init()
1963 return -ENOMEM; in rswitch_init()
1972 for (; i-- > 0; ) in rswitch_init()
1980 err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT, in rswitch_init()
1981 clk_get_rate(priv->clk)); in rswitch_init()
2002 err = register_netdev(priv->rdev[i]->ndev); in rswitch_init()
2005 unregister_netdev(priv->rdev[i]->ndev); in rswitch_init()
2011 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n", in rswitch_init()
2012 priv->rdev[i]->ndev->dev_addr); in rswitch_init()
2025 rcar_gen4_ptp_unregister(priv->ptp_priv); in rswitch_init()
2054 dev_err(&pdev->dev, "invalid resource\n"); in renesas_eth_sw_probe()
2055 return -EINVAL; in renesas_eth_sw_probe()
2058 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); in renesas_eth_sw_probe()
2060 return -ENOMEM; in renesas_eth_sw_probe()
2061 spin_lock_init(&priv->lock); in renesas_eth_sw_probe()
2063 priv->clk = devm_clk_get(&pdev->dev, NULL); in renesas_eth_sw_probe()
2064 if (IS_ERR(priv->clk)) in renesas_eth_sw_probe()
2065 return PTR_ERR(priv->clk); in renesas_eth_sw_probe()
2069 priv->etha_no_runtime_change = true; in renesas_eth_sw_probe()
2071 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); in renesas_eth_sw_probe()
2072 if (!priv->ptp_priv) in renesas_eth_sw_probe()
2073 return -ENOMEM; in renesas_eth_sw_probe()
2076 priv->pdev = pdev; in renesas_eth_sw_probe()
2077 priv->addr = devm_ioremap_resource(&pdev->dev, res); in renesas_eth_sw_probe()
2078 if (IS_ERR(priv->addr)) in renesas_eth_sw_probe()
2079 return PTR_ERR(priv->addr); in renesas_eth_sw_probe()
2081 priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4; in renesas_eth_sw_probe()
2083 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); in renesas_eth_sw_probe()
2085 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in renesas_eth_sw_probe()
2090 priv->gwca.index = AGENT_INDEX_GWCA; in renesas_eth_sw_probe()
2091 priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV, in renesas_eth_sw_probe()
2093 priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues, in renesas_eth_sw_probe()
2094 sizeof(*priv->gwca.queues), GFP_KERNEL); in renesas_eth_sw_probe()
2095 if (!priv->gwca.queues) in renesas_eth_sw_probe()
2096 return -ENOMEM; in renesas_eth_sw_probe()
2098 pm_runtime_enable(&pdev->dev); in renesas_eth_sw_probe()
2099 pm_runtime_get_sync(&pdev->dev); in renesas_eth_sw_probe()
2103 pm_runtime_put(&pdev->dev); in renesas_eth_sw_probe()
2104 pm_runtime_disable(&pdev->dev); in renesas_eth_sw_probe()
2108 device_set_wakeup_capable(&pdev->dev, 1); in renesas_eth_sw_probe()
2118 rcar_gen4_ptp_unregister(priv->ptp_priv); in rswitch_deinit()
2121 struct rswitch_device *rdev = priv->rdev[i]; in rswitch_deinit()
2123 unregister_netdev(rdev->ndev); in rswitch_deinit()
2125 phy_exit(priv->rdev[i]->serdes); in rswitch_deinit()
2143 pm_runtime_put(&pdev->dev); in renesas_eth_sw_remove()
2144 pm_runtime_disable(&pdev->dev); in renesas_eth_sw_remove()
2156 ndev = priv->rdev[i]->ndev; in renesas_eth_sw_suspend()
2161 if (priv->rdev[i]->serdes->init_count) in renesas_eth_sw_suspend()
2162 phy_exit(priv->rdev[i]->serdes); in renesas_eth_sw_suspend()
2175 phy_init(priv->rdev[i]->serdes); in renesas_eth_sw_resume()
2176 ndev = priv->rdev[i]->ndev; in renesas_eth_sw_resume()