Lines Matching +full:r8a779f0 +full:- +full:ether +full:- +full:serdes
1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/dma-mapping.h>
45 iowrite32(RRC_RR, priv->addr + RRC); in rswitch_reset()
46 iowrite32(RRC_RR_CLR, priv->addr + RRC); in rswitch_reset()
51 iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC); in rswitch_clock_enable()
56 iowrite32(RCDC_RCD, priv->addr + RCDC); in rswitch_clock_disable()
88 val = ioread32(priv->addr + CABPIRM); in rswitch_bpool_config()
92 iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM); in rswitch_bpool_config()
94 return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR); in rswitch_bpool_config()
99 iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0); in rswitch_coma_init()
102 /* R-Switch-2 block (TOP) */
108 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i)); in rswitch_top_init()
114 u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0); in rswitch_fwd_init()
120 iowrite32(0, priv->addr + FWPC0(i)); in rswitch_fwd_init()
123 priv->addr + FWPC1(i)); in rswitch_fwd_init()
126 priv->addr + FWPC2(i)); in rswitch_fwd_init()
128 iowrite32(0, priv->addr + FWPBFC(i)); in rswitch_fwd_init()
134 rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV, in rswitch_fwd_init()
135 FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index))); in rswitch_fwd_init()
137 iowrite32(priv->rdev[i]->rx_queue->index, in rswitch_fwd_init()
138 priv->addr + FWPBFCSDC(GWCA_INDEX, i)); in rswitch_fwd_init()
142 rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE); in rswitch_fwd_init()
151 if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index)) in rswitch_gwca_change_mode()
152 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1); in rswitch_gwca_change_mode()
154 iowrite32(mode, priv->addr + GWMC); in rswitch_gwca_change_mode()
156 ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode); in rswitch_gwca_change_mode()
159 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0); in rswitch_gwca_change_mode()
166 iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM); in rswitch_gwca_mcast_table_reset()
168 return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR); in rswitch_gwca_mcast_table_reset()
173 iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM); in rswitch_gwca_axi_ram_reset()
175 return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR); in rswitch_gwca_axi_ram_reset()
180 u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits; in rswitch_is_any_data_irq()
196 dis[i] = ioread32(priv->addr + GWDIS(i)); in rswitch_get_data_irq_status()
197 dis[i] &= ioread32(priv->addr + GWDIE(i)); in rswitch_get_data_irq_status()
206 iowrite32(BIT(index % 32), priv->addr + offs); in rswitch_enadis_data_irq()
214 iowrite32(BIT(index % 32), priv->addr + offs); in rswitch_ack_data_irq()
220 unsigned int index = cur ? gq->cur : gq->dirty; in rswitch_next_queue_index()
222 if (index + num >= gq->ring_size) in rswitch_next_queue_index()
223 index = (index + num) % gq->ring_size; in rswitch_next_queue_index()
232 if (gq->cur >= gq->dirty) in rswitch_get_num_cur_queues()
233 return gq->cur - gq->dirty; in rswitch_get_num_cur_queues()
235 return gq->ring_size - gq->dirty + gq->cur; in rswitch_get_num_cur_queues()
240 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty]; in rswitch_is_queue_rxed()
242 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) in rswitch_is_queue_rxed()
255 index = (i + start_index) % gq->ring_size; in rswitch_gwca_queue_alloc_rx_buf()
256 if (gq->rx_bufs[index]) in rswitch_gwca_queue_alloc_rx_buf()
258 gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE); in rswitch_gwca_queue_alloc_rx_buf()
259 if (!gq->rx_bufs[index]) in rswitch_gwca_queue_alloc_rx_buf()
266 for (; i-- > 0; ) { in rswitch_gwca_queue_alloc_rx_buf()
267 index = (i + start_index) % gq->ring_size; in rswitch_gwca_queue_alloc_rx_buf()
268 skb_free_frag(gq->rx_bufs[index]); in rswitch_gwca_queue_alloc_rx_buf()
269 gq->rx_bufs[index] = NULL; in rswitch_gwca_queue_alloc_rx_buf()
272 return -ENOMEM; in rswitch_gwca_queue_alloc_rx_buf()
280 if (!gq->dir_tx) { in rswitch_gwca_queue_free()
281 dma_free_coherent(ndev->dev.parent, in rswitch_gwca_queue_free()
283 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma); in rswitch_gwca_queue_free()
284 gq->rx_ring = NULL; in rswitch_gwca_queue_free()
286 for (i = 0; i < gq->ring_size; i++) in rswitch_gwca_queue_free()
287 skb_free_frag(gq->rx_bufs[i]); in rswitch_gwca_queue_free()
288 kfree(gq->rx_bufs); in rswitch_gwca_queue_free()
289 gq->rx_bufs = NULL; in rswitch_gwca_queue_free()
291 dma_free_coherent(ndev->dev.parent, in rswitch_gwca_queue_free()
293 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma); in rswitch_gwca_queue_free()
294 gq->tx_ring = NULL; in rswitch_gwca_queue_free()
295 kfree(gq->skbs); in rswitch_gwca_queue_free()
296 gq->skbs = NULL; in rswitch_gwca_queue_free()
297 kfree(gq->unmap_addrs); in rswitch_gwca_queue_free()
298 gq->unmap_addrs = NULL; in rswitch_gwca_queue_free()
304 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; in rswitch_gwca_ts_queue_free()
306 dma_free_coherent(&priv->pdev->dev, in rswitch_gwca_ts_queue_free()
307 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1), in rswitch_gwca_ts_queue_free()
308 gq->ts_ring, gq->ring_dma); in rswitch_gwca_ts_queue_free()
309 gq->ts_ring = NULL; in rswitch_gwca_ts_queue_free()
319 gq->dir_tx = dir_tx; in rswitch_gwca_queue_alloc()
320 gq->ring_size = ring_size; in rswitch_gwca_queue_alloc()
321 gq->ndev = ndev; in rswitch_gwca_queue_alloc()
324 gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL); in rswitch_gwca_queue_alloc()
325 if (!gq->rx_bufs) in rswitch_gwca_queue_alloc()
326 return -ENOMEM; in rswitch_gwca_queue_alloc()
327 if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0) in rswitch_gwca_queue_alloc()
330 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent, in rswitch_gwca_queue_alloc()
332 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); in rswitch_gwca_queue_alloc()
334 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL); in rswitch_gwca_queue_alloc()
335 if (!gq->skbs) in rswitch_gwca_queue_alloc()
336 return -ENOMEM; in rswitch_gwca_queue_alloc()
337 gq->unmap_addrs = kcalloc(gq->ring_size, sizeof(*gq->unmap_addrs), GFP_KERNEL); in rswitch_gwca_queue_alloc()
338 if (!gq->unmap_addrs) in rswitch_gwca_queue_alloc()
340 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent, in rswitch_gwca_queue_alloc()
342 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); in rswitch_gwca_queue_alloc()
345 if (!gq->rx_ring && !gq->tx_ring) in rswitch_gwca_queue_alloc()
348 i = gq->index / 32; in rswitch_gwca_queue_alloc()
349 bit = BIT(gq->index % 32); in rswitch_gwca_queue_alloc()
351 priv->gwca.tx_irq_bits[i] |= bit; in rswitch_gwca_queue_alloc()
353 priv->gwca.rx_irq_bits[i] |= bit; in rswitch_gwca_queue_alloc()
360 return -ENOMEM; in rswitch_gwca_queue_alloc()
365 desc->dptrl = cpu_to_le32(lower_32_bits(addr)); in rswitch_desc_set_dptr()
366 desc->dptrh = upper_32_bits(addr) & 0xff; in rswitch_desc_set_dptr()
371 return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32; in rswitch_desc_get_dptr()
378 unsigned int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size; in rswitch_gwca_queue_format()
384 memset(gq->tx_ring, 0, ring_size); in rswitch_gwca_queue_format()
385 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) { in rswitch_gwca_queue_format()
386 if (!gq->dir_tx) { in rswitch_gwca_queue_format()
387 dma_addr = dma_map_single(ndev->dev.parent, in rswitch_gwca_queue_format()
388 gq->rx_bufs[i] + RSWITCH_HEADROOM, in rswitch_gwca_queue_format()
391 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in rswitch_gwca_queue_format()
394 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE); in rswitch_gwca_queue_format()
395 rswitch_desc_set_dptr(&desc->desc, dma_addr); in rswitch_gwca_queue_format()
396 desc->desc.die_dt = DT_FEMPTY | DIE; in rswitch_gwca_queue_format()
398 desc->desc.die_dt = DT_EEMPTY | DIE; in rswitch_gwca_queue_format()
401 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); in rswitch_gwca_queue_format()
402 desc->desc.die_dt = DT_LINKFIX; in rswitch_gwca_queue_format()
404 linkfix = &priv->gwca.linkfix_table[gq->index]; in rswitch_gwca_queue_format()
405 linkfix->die_dt = DT_LINKFIX; in rswitch_gwca_queue_format()
406 rswitch_desc_set_dptr(linkfix, gq->ring_dma); in rswitch_gwca_queue_format()
408 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE, in rswitch_gwca_queue_format()
409 priv->addr + GWDCC_OFFS(gq->index)); in rswitch_gwca_queue_format()
414 if (!gq->dir_tx) { in rswitch_gwca_queue_format()
415 for (desc = gq->tx_ring; i-- > 0; desc++) { in rswitch_gwca_queue_format()
416 dma_addr = rswitch_desc_get_dptr(&desc->desc); in rswitch_gwca_queue_format()
417 dma_unmap_single(ndev->dev.parent, dma_addr, in rswitch_gwca_queue_format()
422 return -ENOMEM; in rswitch_gwca_queue_format()
429 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; in rswitch_gwca_ts_queue_fill()
434 index = (i + start_index) % gq->ring_size; in rswitch_gwca_ts_queue_fill()
435 desc = &gq->ts_ring[index]; in rswitch_gwca_ts_queue_fill()
436 desc->desc.die_dt = DT_FEMPTY_ND | DIE; in rswitch_gwca_ts_queue_fill()
451 index = (i + start_index) % gq->ring_size; in rswitch_gwca_queue_ext_ts_fill()
452 desc = &gq->rx_ring[index]; in rswitch_gwca_queue_ext_ts_fill()
453 if (!gq->dir_tx) { in rswitch_gwca_queue_ext_ts_fill()
454 dma_addr = dma_map_single(ndev->dev.parent, in rswitch_gwca_queue_ext_ts_fill()
455 gq->rx_bufs[index] + RSWITCH_HEADROOM, in rswitch_gwca_queue_ext_ts_fill()
458 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in rswitch_gwca_queue_ext_ts_fill()
461 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE); in rswitch_gwca_queue_ext_ts_fill()
462 rswitch_desc_set_dptr(&desc->desc, dma_addr); in rswitch_gwca_queue_ext_ts_fill()
464 desc->desc.die_dt = DT_FEMPTY | DIE; in rswitch_gwca_queue_ext_ts_fill()
465 desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index)); in rswitch_gwca_queue_ext_ts_fill()
467 desc->desc.die_dt = DT_EEMPTY | DIE; in rswitch_gwca_queue_ext_ts_fill()
474 if (!gq->dir_tx) { in rswitch_gwca_queue_ext_ts_fill()
475 for (; i-- > 0; ) { in rswitch_gwca_queue_ext_ts_fill()
476 index = (i + start_index) % gq->ring_size; in rswitch_gwca_queue_ext_ts_fill()
477 desc = &gq->rx_ring[index]; in rswitch_gwca_queue_ext_ts_fill()
478 dma_addr = rswitch_desc_get_dptr(&desc->desc); in rswitch_gwca_queue_ext_ts_fill()
479 dma_unmap_single(ndev->dev.parent, dma_addr, in rswitch_gwca_queue_ext_ts_fill()
484 return -ENOMEM; in rswitch_gwca_queue_ext_ts_fill()
491 unsigned int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size; in rswitch_gwca_queue_ext_ts_format()
496 memset(gq->rx_ring, 0, ring_size); in rswitch_gwca_queue_ext_ts_format()
497 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size); in rswitch_gwca_queue_ext_ts_format()
501 desc = &gq->rx_ring[gq->ring_size]; /* Last */ in rswitch_gwca_queue_ext_ts_format()
502 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); in rswitch_gwca_queue_ext_ts_format()
503 desc->desc.die_dt = DT_LINKFIX; in rswitch_gwca_queue_ext_ts_format()
505 linkfix = &priv->gwca.linkfix_table[gq->index]; in rswitch_gwca_queue_ext_ts_format()
506 linkfix->die_dt = DT_LINKFIX; in rswitch_gwca_queue_ext_ts_format()
507 rswitch_desc_set_dptr(linkfix, gq->ring_dma); in rswitch_gwca_queue_ext_ts_format()
509 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | in rswitch_gwca_queue_ext_ts_format()
511 priv->addr + GWDCC_OFFS(gq->index)); in rswitch_gwca_queue_ext_ts_format()
518 unsigned int i, num_queues = priv->gwca.num_queues; in rswitch_gwca_linkfix_alloc()
519 struct rswitch_gwca *gwca = &priv->gwca; in rswitch_gwca_linkfix_alloc()
520 struct device *dev = &priv->pdev->dev; in rswitch_gwca_linkfix_alloc()
522 gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues; in rswitch_gwca_linkfix_alloc()
523 gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size, in rswitch_gwca_linkfix_alloc()
524 &gwca->linkfix_table_dma, GFP_KERNEL); in rswitch_gwca_linkfix_alloc()
525 if (!gwca->linkfix_table) in rswitch_gwca_linkfix_alloc()
526 return -ENOMEM; in rswitch_gwca_linkfix_alloc()
528 gwca->linkfix_table[i].die_dt = DT_EOS; in rswitch_gwca_linkfix_alloc()
535 struct rswitch_gwca *gwca = &priv->gwca; in rswitch_gwca_linkfix_free()
537 if (gwca->linkfix_table) in rswitch_gwca_linkfix_free()
538 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size, in rswitch_gwca_linkfix_free()
539 gwca->linkfix_table, gwca->linkfix_table_dma); in rswitch_gwca_linkfix_free()
540 gwca->linkfix_table = NULL; in rswitch_gwca_linkfix_free()
545 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; in rswitch_gwca_ts_queue_alloc()
548 gq->ring_size = TS_RING_SIZE; in rswitch_gwca_ts_queue_alloc()
549 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, in rswitch_gwca_ts_queue_alloc()
551 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); in rswitch_gwca_ts_queue_alloc()
553 if (!gq->ts_ring) in rswitch_gwca_ts_queue_alloc()
554 return -ENOMEM; in rswitch_gwca_ts_queue_alloc()
557 desc = &gq->ts_ring[gq->ring_size]; in rswitch_gwca_ts_queue_alloc()
558 desc->desc.die_dt = DT_LINKFIX; in rswitch_gwca_ts_queue_alloc()
559 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); in rswitch_gwca_ts_queue_alloc()
569 index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues); in rswitch_gwca_get()
570 if (index >= priv->gwca.num_queues) in rswitch_gwca_get()
572 set_bit(index, priv->gwca.used); in rswitch_gwca_get()
573 gq = &priv->gwca.queues[index]; in rswitch_gwca_get()
575 gq->index = index; in rswitch_gwca_get()
583 clear_bit(gq->index, priv->gwca.used); in rswitch_gwca_put()
589 struct rswitch_private *priv = rdev->priv; in rswitch_txdmac_alloc()
592 rdev->tx_queue = rswitch_gwca_get(priv); in rswitch_txdmac_alloc()
593 if (!rdev->tx_queue) in rswitch_txdmac_alloc()
594 return -EBUSY; in rswitch_txdmac_alloc()
596 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE); in rswitch_txdmac_alloc()
598 rswitch_gwca_put(priv, rdev->tx_queue); in rswitch_txdmac_alloc()
609 rswitch_gwca_queue_free(ndev, rdev->tx_queue); in rswitch_txdmac_free()
610 rswitch_gwca_put(rdev->priv, rdev->tx_queue); in rswitch_txdmac_free()
615 struct rswitch_device *rdev = priv->rdev[index]; in rswitch_txdmac_init()
617 return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue); in rswitch_txdmac_init()
623 struct rswitch_private *priv = rdev->priv; in rswitch_rxdmac_alloc()
626 rdev->rx_queue = rswitch_gwca_get(priv); in rswitch_rxdmac_alloc()
627 if (!rdev->rx_queue) in rswitch_rxdmac_alloc()
628 return -EBUSY; in rswitch_rxdmac_alloc()
630 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE); in rswitch_rxdmac_alloc()
632 rswitch_gwca_put(priv, rdev->rx_queue); in rswitch_rxdmac_alloc()
643 rswitch_gwca_queue_free(ndev, rdev->rx_queue); in rswitch_rxdmac_free()
644 rswitch_gwca_put(rdev->priv, rdev->rx_queue); in rswitch_rxdmac_free()
649 struct rswitch_device *rdev = priv->rdev[index]; in rswitch_rxdmac_init()
650 struct net_device *ndev = rdev->ndev; in rswitch_rxdmac_init()
652 return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue); in rswitch_rxdmac_init()
674 iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC); in rswitch_gwca_hw_init()
675 iowrite32(0, priv->addr + GWTTFC); in rswitch_gwca_hw_init()
676 iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1); in rswitch_gwca_hw_init()
677 iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0); in rswitch_gwca_hw_init()
678 iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10); in rswitch_gwca_hw_init()
679 iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00); in rswitch_gwca_hw_init()
681 priv->addr + GWMDNC); in rswitch_gwca_hw_init()
682 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0); in rswitch_gwca_hw_init()
684 iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0); in rswitch_gwca_hw_init()
719 priv->gwca_halt = true; in rswitch_gwca_halt()
721 dev_err(&priv->pdev->dev, "halted (%d)\n", err); in rswitch_gwca_halt()
730 dma_addr_t dma_addr = rswitch_desc_get_dptr(&desc->desc); in rswitch_rx_handle_desc()
731 u16 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS; in rswitch_rx_handle_desc()
732 u8 die_dt = desc->desc.die_dt & DT_MASK; in rswitch_rx_handle_desc()
735 dma_unmap_single(ndev->dev.parent, dma_addr, RSWITCH_MAP_BUF_SIZE, in rswitch_rx_handle_desc()
739 * - FSINGLE in rswitch_rx_handle_desc()
740 * - FSTART -> FEND in rswitch_rx_handle_desc()
741 * - FSTART -> FMID -> FEND in rswitch_rx_handle_desc()
748 if (gq->skb_fstart) { in rswitch_rx_handle_desc()
749 dev_kfree_skb_any(gq->skb_fstart); in rswitch_rx_handle_desc()
750 gq->skb_fstart = NULL; in rswitch_rx_handle_desc()
751 ndev->stats.rx_dropped++; in rswitch_rx_handle_desc()
756 if (!gq->skb_fstart) { in rswitch_rx_handle_desc()
757 ndev->stats.rx_dropped++; in rswitch_rx_handle_desc()
769 skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE); in rswitch_rx_handle_desc()
773 gq->pkt_len = pkt_len; in rswitch_rx_handle_desc()
775 gq->skb_fstart = skb; in rswitch_rx_handle_desc()
782 skb_add_rx_frag(gq->skb_fstart, skb_shinfo(gq->skb_fstart)->nr_frags, in rswitch_rx_handle_desc()
783 virt_to_page(gq->rx_bufs[gq->cur]), in rswitch_rx_handle_desc()
784 offset_in_page(gq->rx_bufs[gq->cur]) + RSWITCH_HEADROOM, in rswitch_rx_handle_desc()
787 skb = gq->skb_fstart; in rswitch_rx_handle_desc()
788 gq->skb_fstart = NULL; in rswitch_rx_handle_desc()
790 gq->pkt_len += pkt_len; in rswitch_rx_handle_desc()
803 struct rswitch_gwca_queue *gq = rdev->rx_queue; in rswitch_rx()
813 boguscnt = min_t(int, gq->ring_size, *quota); in rswitch_rx()
816 desc = &gq->rx_ring[gq->cur]; in rswitch_rx()
817 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { in rswitch_rx()
823 get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; in rswitch_rx()
830 ts.tv_sec = __le32_to_cpu(desc->ts_sec); in rswitch_rx()
831 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); in rswitch_rx()
832 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); in rswitch_rx()
834 skb->protocol = eth_type_trans(skb, ndev); in rswitch_rx()
835 napi_gro_receive(&rdev->napi, skb); in rswitch_rx()
836 rdev->ndev->stats.rx_packets++; in rswitch_rx()
837 rdev->ndev->stats.rx_bytes += gq->pkt_len; in rswitch_rx()
840 gq->rx_bufs[gq->cur] = NULL; in rswitch_rx()
841 gq->cur = rswitch_next_queue_index(gq, true, 1); in rswitch_rx()
842 desc = &gq->rx_ring[gq->cur]; in rswitch_rx()
844 if (--boguscnt <= 0) in rswitch_rx()
849 ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num); in rswitch_rx()
852 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num); in rswitch_rx()
855 gq->dirty = rswitch_next_queue_index(gq, false, num); in rswitch_rx()
857 *quota -= limit - boguscnt; in rswitch_rx()
862 rswitch_gwca_halt(rdev->priv); in rswitch_rx()
870 struct rswitch_gwca_queue *gq = rdev->tx_queue; in rswitch_tx_free()
874 desc = &gq->tx_ring[gq->dirty]; in rswitch_tx_free()
875 while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) { in rswitch_tx_free()
878 skb = gq->skbs[gq->dirty]; in rswitch_tx_free()
880 rdev->ndev->stats.tx_packets++; in rswitch_tx_free()
881 rdev->ndev->stats.tx_bytes += skb->len; in rswitch_tx_free()
882 dma_unmap_single(ndev->dev.parent, in rswitch_tx_free()
883 gq->unmap_addrs[gq->dirty], in rswitch_tx_free()
884 skb->len, DMA_TO_DEVICE); in rswitch_tx_free()
885 dev_kfree_skb_any(gq->skbs[gq->dirty]); in rswitch_tx_free()
886 gq->skbs[gq->dirty] = NULL; in rswitch_tx_free()
889 desc->desc.die_dt = DT_EEMPTY; in rswitch_tx_free()
890 gq->dirty = rswitch_next_queue_index(gq, false, 1); in rswitch_tx_free()
891 desc = &gq->tx_ring[gq->dirty]; in rswitch_tx_free()
897 struct net_device *ndev = napi->dev; in rswitch_poll()
904 priv = rdev->priv; in rswitch_poll()
911 else if (rdev->priv->gwca_halt) in rswitch_poll()
913 else if (rswitch_is_queue_rxed(rdev->rx_queue)) in rswitch_poll()
918 if (napi_complete_done(napi, budget - quota)) { in rswitch_poll()
919 spin_lock_irqsave(&priv->lock, flags); in rswitch_poll()
920 if (test_bit(rdev->port, priv->opened_ports)) { in rswitch_poll()
921 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); in rswitch_poll()
922 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); in rswitch_poll()
924 spin_unlock_irqrestore(&priv->lock, flags); in rswitch_poll()
928 return budget - quota; in rswitch_poll()
940 if (napi_schedule_prep(&rdev->napi)) { in rswitch_queue_interrupt()
941 spin_lock(&rdev->priv->lock); in rswitch_queue_interrupt()
942 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); in rswitch_queue_interrupt()
943 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); in rswitch_queue_interrupt()
944 spin_unlock(&rdev->priv->lock); in rswitch_queue_interrupt()
945 __napi_schedule(&rdev->napi); in rswitch_queue_interrupt()
954 for (i = 0; i < priv->gwca.num_queues; i++) { in rswitch_data_irq()
955 gq = &priv->gwca.queues[i]; in rswitch_data_irq()
956 index = gq->index / 32; in rswitch_data_irq()
957 bit = BIT(gq->index % 32); in rswitch_data_irq()
961 rswitch_ack_data_irq(priv, gq->index); in rswitch_data_irq()
962 rswitch_queue_interrupt(gq->ndev); in rswitch_data_irq()
991 return -ENOMEM; in rswitch_gwca_request_irqs()
993 irq = platform_get_irq_byname(priv->pdev, resource_name); in rswitch_gwca_request_irqs()
998 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, in rswitch_gwca_request_irqs()
1001 return -ENOMEM; in rswitch_gwca_request_irqs()
1003 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq, in rswitch_gwca_request_irqs()
1014 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; in rswitch_ts()
1023 desc = &gq->ts_ring[gq->cur]; in rswitch_ts()
1024 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) { in rswitch_ts()
1027 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl)); in rswitch_ts()
1030 rdev = priv->rdev[port]; in rswitch_ts()
1032 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl)); in rswitch_ts()
1035 ts_skb = xchg(&rdev->ts_skb[tag], NULL); in rswitch_ts()
1036 smp_mb(); /* order rdev->ts_skb[] read before bitmap update */ in rswitch_ts()
1037 clear_bit(tag, rdev->ts_skb_used); in rswitch_ts()
1043 ts.tv_sec = __le32_to_cpu(desc->ts_sec); in rswitch_ts()
1044 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); in rswitch_ts()
1050 gq->cur = rswitch_next_queue_index(gq, true, 1); in rswitch_ts()
1051 desc = &gq->ts_ring[gq->cur]; in rswitch_ts()
1055 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num); in rswitch_ts()
1056 gq->dirty = rswitch_next_queue_index(gq, false, num); in rswitch_ts()
1063 if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) { in rswitch_gwca_ts_irq()
1064 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS); in rswitch_gwca_ts_irq()
1077 irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME); in rswitch_gwca_ts_request_irqs()
1081 return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq, in rswitch_gwca_ts_request_irqs()
1091 if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index)) in rswitch_etha_change_mode()
1092 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1); in rswitch_etha_change_mode()
1094 iowrite32(mode, etha->addr + EAMC); in rswitch_etha_change_mode()
1096 ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode); in rswitch_etha_change_mode()
1099 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0); in rswitch_etha_change_mode()
1106 u32 mrmac0 = ioread32(etha->addr + MRMAC0); in rswitch_etha_read_mac_address()
1107 u32 mrmac1 = ioread32(etha->addr + MRMAC1); in rswitch_etha_read_mac_address()
1108 u8 *mac = ða->mac_addr[0]; in rswitch_etha_read_mac_address()
1120 iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0); in rswitch_etha_write_mac_address()
1122 etha->addr + MRMAC1); in rswitch_etha_write_mac_address()
1127 iowrite32(MLVC_PLV, etha->addr + MLVC); in rswitch_etha_wait_link_verification()
1129 return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0); in rswitch_etha_wait_link_verification()
1138 switch (etha->phy_interface) { in rswitch_rmac_setting()
1147 pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC)); in rswitch_rmac_setting()
1151 switch (etha->speed) { in rswitch_rmac_setting()
1162 lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC)); in rswitch_rmac_setting()
1166 rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC, in rswitch_rmac_setting()
1172 rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT, in rswitch_etha_enable_mii()
1173 FIELD_PREP(MPIC_PSMCS, etha->psmcs) | in rswitch_etha_enable_mii()
1188 iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC); in rswitch_etha_hw_init()
1217 iowrite32(val, etha->addr + MPSM); in rswitch_etha_mpsm_op()
1219 ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0); in rswitch_etha_mpsm_op()
1224 val = ioread32(etha->addr + MPSM); in rswitch_etha_mpsm_op()
1234 struct rswitch_etha *etha = bus->priv; in rswitch_etha_mii_read_c45()
1249 struct rswitch_etha *etha = bus->priv; in rswitch_etha_mii_write_c45()
1263 struct rswitch_etha *etha = bus->priv; in rswitch_etha_mii_read_c22()
1272 struct rswitch_etha *etha = bus->priv; in rswitch_etha_mii_write_c22()
1285 ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node, in rswitch_get_port_node()
1286 "ethernet-ports"); in rswitch_get_port_node()
1296 if (index == rdev->etha->index) in rswitch_get_port_node()
1311 if (!rdev->np_port) in rswitch_etha_get_params()
1314 err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface); in rswitch_etha_get_params()
1318 err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed); in rswitch_etha_get_params()
1320 rdev->etha->speed = max_speed; in rswitch_etha_get_params()
1324 /* if no "max-speed" property, let's use default speed */ in rswitch_etha_get_params()
1325 switch (rdev->etha->phy_interface) { in rswitch_etha_get_params()
1327 rdev->etha->speed = SPEED_100; in rswitch_etha_get_params()
1330 rdev->etha->speed = SPEED_1000; in rswitch_etha_get_params()
1333 rdev->etha->speed = SPEED_2500; in rswitch_etha_get_params()
1336 return -EINVAL; in rswitch_etha_get_params()
1350 return -ENOMEM; in rswitch_mii_register()
1352 mii_bus->name = "rswitch_mii"; in rswitch_mii_register()
1353 sprintf(mii_bus->id, "etha%d", rdev->etha->index); in rswitch_mii_register()
1354 mii_bus->priv = rdev->etha; in rswitch_mii_register()
1355 mii_bus->read_c45 = rswitch_etha_mii_read_c45; in rswitch_mii_register()
1356 mii_bus->write_c45 = rswitch_etha_mii_write_c45; in rswitch_mii_register()
1357 mii_bus->read = rswitch_etha_mii_read_c22; in rswitch_mii_register()
1358 mii_bus->write = rswitch_etha_mii_write_c22; in rswitch_mii_register()
1359 mii_bus->parent = &rdev->priv->pdev->dev; in rswitch_mii_register()
1361 mdio_np = of_get_child_by_name(rdev->np_port, "mdio"); in rswitch_mii_register()
1368 rdev->etha->mii = mii_bus; in rswitch_mii_register()
1378 if (rdev->etha->mii) { in rswitch_mii_unregister()
1379 mdiobus_unregister(rdev->etha->mii); in rswitch_mii_unregister()
1380 mdiobus_free(rdev->etha->mii); in rswitch_mii_unregister()
1381 rdev->etha->mii = NULL; in rswitch_mii_unregister()
1388 struct phy_device *phydev = ndev->phydev; in rswitch_adjust_link()
1390 if (phydev->link != rdev->etha->link) { in rswitch_adjust_link()
1392 if (phydev->link) in rswitch_adjust_link()
1393 phy_power_on(rdev->serdes); in rswitch_adjust_link()
1394 else if (rdev->serdes->power_count) in rswitch_adjust_link()
1395 phy_power_off(rdev->serdes); in rswitch_adjust_link()
1397 rdev->etha->link = phydev->link; in rswitch_adjust_link()
1399 if (!rdev->priv->etha_no_runtime_change && in rswitch_adjust_link()
1400 phydev->speed != rdev->etha->speed) { in rswitch_adjust_link()
1401 rdev->etha->speed = phydev->speed; in rswitch_adjust_link()
1403 rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); in rswitch_adjust_link()
1404 phy_set_speed(rdev->serdes, rdev->etha->speed); in rswitch_adjust_link()
1412 if (!rdev->priv->etha_no_runtime_change) in rswitch_phy_remove_link_mode()
1415 switch (rdev->etha->speed) { in rswitch_phy_remove_link_mode()
1432 phy_set_max_speed(phydev, rdev->etha->speed); in rswitch_phy_remove_link_mode()
1439 int err = -ENOENT; in rswitch_phy_device_init()
1441 if (!rdev->np_port) in rswitch_phy_device_init()
1442 return -ENODEV; in rswitch_phy_device_init()
1444 phy = of_parse_phandle(rdev->np_port, "phy-handle", 0); in rswitch_phy_device_init()
1446 return -ENODEV; in rswitch_phy_device_init()
1448 /* Set phydev->host_interfaces before calling of_phy_connect() to in rswitch_phy_device_init()
1454 __set_bit(rdev->etha->phy_interface, phydev->host_interfaces); in rswitch_phy_device_init()
1455 phydev->mac_managed_pm = true; in rswitch_phy_device_init()
1457 phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0, in rswitch_phy_device_init()
1458 rdev->etha->phy_interface); in rswitch_phy_device_init()
1480 if (rdev->ndev->phydev) in rswitch_phy_device_deinit()
1481 phy_disconnect(rdev->ndev->phydev); in rswitch_phy_device_deinit()
1488 err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET, in rswitch_serdes_set_params()
1489 rdev->etha->phy_interface); in rswitch_serdes_set_params()
1493 return phy_set_speed(rdev->serdes, rdev->etha->speed); in rswitch_serdes_set_params()
1500 if (!rdev->etha->operated) { in rswitch_ether_port_init_one()
1501 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); in rswitch_ether_port_init_one()
1504 if (rdev->priv->etha_no_runtime_change) in rswitch_ether_port_init_one()
1505 rdev->etha->operated = true; in rswitch_ether_port_init_one()
1516 rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL); in rswitch_ether_port_init_one()
1517 if (IS_ERR(rdev->serdes)) { in rswitch_ether_port_init_one()
1518 err = PTR_ERR(rdev->serdes); in rswitch_ether_port_init_one()
1550 err = rswitch_ether_port_init_one(priv->rdev[i]); in rswitch_ether_port_init_all()
1556 err = phy_init(priv->rdev[i]->serdes); in rswitch_ether_port_init_all()
1565 phy_exit(priv->rdev[i]->serdes); in rswitch_ether_port_init_all()
1570 rswitch_ether_port_deinit_one(priv->rdev[i]); in rswitch_ether_port_init_all()
1580 phy_exit(priv->rdev[i]->serdes); in rswitch_ether_port_deinit_all()
1581 rswitch_ether_port_deinit_one(priv->rdev[i]); in rswitch_ether_port_deinit_all()
1590 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) in rswitch_open()
1591 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); in rswitch_open()
1593 napi_enable(&rdev->napi); in rswitch_open()
1595 spin_lock_irqsave(&rdev->priv->lock, flags); in rswitch_open()
1596 bitmap_set(rdev->priv->opened_ports, rdev->port, 1); in rswitch_open()
1597 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); in rswitch_open()
1598 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); in rswitch_open()
1599 spin_unlock_irqrestore(&rdev->priv->lock, flags); in rswitch_open()
1601 phy_start(ndev->phydev); in rswitch_open()
1617 phy_stop(ndev->phydev); in rswitch_stop()
1619 spin_lock_irqsave(&rdev->priv->lock, flags); in rswitch_stop()
1620 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); in rswitch_stop()
1621 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); in rswitch_stop()
1622 bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); in rswitch_stop()
1623 spin_unlock_irqrestore(&rdev->priv->lock, flags); in rswitch_stop()
1625 napi_disable(&rdev->napi); in rswitch_stop()
1627 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) in rswitch_stop()
1628 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); in rswitch_stop()
1630 for (tag = find_first_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT); in rswitch_stop()
1632 tag = find_next_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT, tag + 1)) { in rswitch_stop()
1633 ts_skb = xchg(&rdev->ts_skb[tag], NULL); in rswitch_stop()
1634 clear_bit(tag, rdev->ts_skb_used); in rswitch_stop()
1646 desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | in rswitch_ext_desc_set_info1()
1648 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { in rswitch_ext_desc_set_info1()
1651 tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT); in rswitch_ext_desc_set_info1()
1654 smp_mb(); /* order bitmap read before rdev->ts_skb[] write */ in rswitch_ext_desc_set_info1()
1655 rdev->ts_skb[tag] = skb_get(skb); in rswitch_ext_desc_set_info1()
1656 set_bit(tag, rdev->ts_skb_used); in rswitch_ext_desc_set_info1()
1658 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in rswitch_ext_desc_set_info1()
1659 desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC); in rswitch_ext_desc_set_info1()
1672 rswitch_desc_set_dptr(&desc->desc, dma_addr); in rswitch_ext_desc_set()
1673 desc->desc.info_ds = cpu_to_le16(len); in rswitch_ext_desc_set()
1679 desc->desc.die_dt = die_dt; in rswitch_ext_desc_set()
1690 if (nr_desc - 1 == index) in rswitch_ext_desc_get_die_dt()
1712 struct rswitch_gwca_queue *gq = rdev->tx_queue; in rswitch_start_xmit()
1720 nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1; in rswitch_start_xmit()
1721 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) { in rswitch_start_xmit()
1729 dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); in rswitch_start_xmit()
1730 if (dma_mapping_error(ndev->dev.parent, dma_addr_orig)) in rswitch_start_xmit()
1734 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb; in rswitch_start_xmit()
1735 gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig; in rswitch_start_xmit()
1740 for (i = nr_desc; i-- > 0; ) { in rswitch_start_xmit()
1741 desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)]; in rswitch_start_xmit()
1744 len = rswitch_ext_desc_get_len(die_dt, skb->len); in rswitch_start_xmit()
1749 gq->cur = rswitch_next_queue_index(gq, true, nr_desc); in rswitch_start_xmit()
1750 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); in rswitch_start_xmit()
1755 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL; in rswitch_start_xmit()
1756 dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE); in rswitch_start_xmit()
1766 return &ndev->stats; in rswitch_get_stats()
1775 ptp_priv = rdev->priv->ptp_priv; in rswitch_hwstamp_get()
1778 config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : in rswitch_hwstamp_get()
1780 switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { in rswitch_hwstamp_get()
1792 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; in rswitch_hwstamp_get()
1802 if (copy_from_user(&config, req->ifr_data, sizeof(config))) in rswitch_hwstamp_set()
1803 return -EFAULT; in rswitch_hwstamp_set()
1806 return -EINVAL; in rswitch_hwstamp_set()
1816 return -ERANGE; in rswitch_hwstamp_set()
1832 rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; in rswitch_hwstamp_set()
1833 rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; in rswitch_hwstamp_set()
1835 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; in rswitch_hwstamp_set()
1841 return -EINVAL; in rswitch_eth_ioctl()
1849 return phy_mii_ioctl(ndev->phydev, req, cmd); in rswitch_eth_ioctl()
1867 info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock); in rswitch_get_ts_info()
1868 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | in rswitch_get_ts_info()
1872 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); in rswitch_get_ts_info()
1873 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); in rswitch_get_ts_info()
1885 { .compatible = "renesas,r8a779f0-ether-switch", },
1892 struct rswitch_etha *etha = &priv->etha[index]; in rswitch_etha_init()
1895 etha->index = index; in rswitch_etha_init()
1896 etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; in rswitch_etha_init()
1897 etha->coma_addr = priv->addr; in rswitch_etha_init()
1899 /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1. in rswitch_etha_init()
1903 etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1; in rswitch_etha_init()
1908 struct platform_device *pdev = priv->pdev; in rswitch_device_alloc()
1914 return -EINVAL; in rswitch_device_alloc()
1918 return -ENOMEM; in rswitch_device_alloc()
1920 SET_NETDEV_DEV(ndev, &pdev->dev); in rswitch_device_alloc()
1924 rdev->ndev = ndev; in rswitch_device_alloc()
1925 rdev->priv = priv; in rswitch_device_alloc()
1926 priv->rdev[index] = rdev; in rswitch_device_alloc()
1927 rdev->port = index; in rswitch_device_alloc()
1928 rdev->etha = &priv->etha[index]; in rswitch_device_alloc()
1929 rdev->addr = priv->addr; in rswitch_device_alloc()
1931 ndev->base_addr = (unsigned long)rdev->addr; in rswitch_device_alloc()
1932 snprintf(ndev->name, IFNAMSIZ, "tsn%d", index); in rswitch_device_alloc()
1933 ndev->netdev_ops = &rswitch_netdev_ops; in rswitch_device_alloc()
1934 ndev->ethtool_ops = &rswitch_ethtool_ops; in rswitch_device_alloc()
1935 ndev->max_mtu = RSWITCH_MAX_MTU; in rswitch_device_alloc()
1936 ndev->min_mtu = ETH_MIN_MTU; in rswitch_device_alloc()
1938 netif_napi_add(ndev, &rdev->napi, rswitch_poll); in rswitch_device_alloc()
1940 rdev->np_port = rswitch_get_port_node(rdev); in rswitch_device_alloc()
1941 rdev->disabled = !rdev->np_port; in rswitch_device_alloc()
1942 err = of_get_ethdev_address(rdev->np_port, ndev); in rswitch_device_alloc()
1944 if (is_valid_ether_addr(rdev->etha->mac_addr)) in rswitch_device_alloc()
1945 eth_hw_addr_set(ndev, rdev->etha->mac_addr); in rswitch_device_alloc()
1969 of_node_put(rdev->np_port); in rswitch_device_alloc()
1970 netif_napi_del(&rdev->napi); in rswitch_device_alloc()
1978 struct rswitch_device *rdev = priv->rdev[index]; in rswitch_device_free()
1979 struct net_device *ndev = rdev->ndev; in rswitch_device_free()
1983 of_node_put(rdev->np_port); in rswitch_device_free()
1984 netif_napi_del(&rdev->napi); in rswitch_device_free()
1998 rswitch_etha_read_mac_address(&priv->etha[i]); in rswitch_init()
2012 return -ENOMEM; in rswitch_init()
2021 for (; i-- > 0; ) in rswitch_init()
2029 err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT, in rswitch_init()
2030 clk_get_rate(priv->clk)); in rswitch_init()
2051 err = register_netdev(priv->rdev[i]->ndev); in rswitch_init()
2054 unregister_netdev(priv->rdev[i]->ndev); in rswitch_init()
2060 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n", in rswitch_init()
2061 priv->rdev[i]->ndev->dev_addr); in rswitch_init()
2074 rcar_gen4_ptp_unregister(priv->ptp_priv); in rswitch_init()
2090 { .soc_id = "r8a779f0", .revision = "ES1.0" },
2103 dev_err(&pdev->dev, "invalid resource\n"); in renesas_eth_sw_probe()
2104 return -EINVAL; in renesas_eth_sw_probe()
2107 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); in renesas_eth_sw_probe()
2109 return -ENOMEM; in renesas_eth_sw_probe()
2110 spin_lock_init(&priv->lock); in renesas_eth_sw_probe()
2112 priv->clk = devm_clk_get(&pdev->dev, NULL); in renesas_eth_sw_probe()
2113 if (IS_ERR(priv->clk)) in renesas_eth_sw_probe()
2114 return PTR_ERR(priv->clk); in renesas_eth_sw_probe()
2118 priv->etha_no_runtime_change = true; in renesas_eth_sw_probe()
2120 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); in renesas_eth_sw_probe()
2121 if (!priv->ptp_priv) in renesas_eth_sw_probe()
2122 return -ENOMEM; in renesas_eth_sw_probe()
2125 priv->pdev = pdev; in renesas_eth_sw_probe()
2126 priv->addr = devm_ioremap_resource(&pdev->dev, res); in renesas_eth_sw_probe()
2127 if (IS_ERR(priv->addr)) in renesas_eth_sw_probe()
2128 return PTR_ERR(priv->addr); in renesas_eth_sw_probe()
2130 priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4; in renesas_eth_sw_probe()
2132 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); in renesas_eth_sw_probe()
2134 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in renesas_eth_sw_probe()
2139 priv->gwca.index = AGENT_INDEX_GWCA; in renesas_eth_sw_probe()
2140 priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV, in renesas_eth_sw_probe()
2142 priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues, in renesas_eth_sw_probe()
2143 sizeof(*priv->gwca.queues), GFP_KERNEL); in renesas_eth_sw_probe()
2144 if (!priv->gwca.queues) in renesas_eth_sw_probe()
2145 return -ENOMEM; in renesas_eth_sw_probe()
2147 pm_runtime_enable(&pdev->dev); in renesas_eth_sw_probe()
2148 pm_runtime_get_sync(&pdev->dev); in renesas_eth_sw_probe()
2152 pm_runtime_put(&pdev->dev); in renesas_eth_sw_probe()
2153 pm_runtime_disable(&pdev->dev); in renesas_eth_sw_probe()
2157 device_set_wakeup_capable(&pdev->dev, 1); in renesas_eth_sw_probe()
2167 rcar_gen4_ptp_unregister(priv->ptp_priv); in rswitch_deinit()
2170 struct rswitch_device *rdev = priv->rdev[i]; in rswitch_deinit()
2172 unregister_netdev(rdev->ndev); in rswitch_deinit()
2174 phy_exit(priv->rdev[i]->serdes); in rswitch_deinit()
2192 pm_runtime_put(&pdev->dev); in renesas_eth_sw_remove()
2193 pm_runtime_disable(&pdev->dev); in renesas_eth_sw_remove()
2205 ndev = priv->rdev[i]->ndev; in renesas_eth_sw_suspend()
2210 if (priv->rdev[i]->serdes->init_count) in renesas_eth_sw_suspend()
2211 phy_exit(priv->rdev[i]->serdes); in renesas_eth_sw_suspend()
2224 phy_init(priv->rdev[i]->serdes); in renesas_eth_sw_resume()
2225 ndev = priv->rdev[i]->ndev; in renesas_eth_sw_resume()