Lines Matching full:lp
133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i) in axienet_get_rx_desc() argument
135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)]; in axienet_get_rx_desc()
138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i) in axienet_get_tx_desc() argument
140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)]; in axienet_get_tx_desc()
145 * @lp: Pointer to axienet local structure
152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) in axienet_dma_in32() argument
154 return ioread32(lp->dma_regs + reg); in axienet_dma_in32()
157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, in desc_set_phys_addr() argument
161 if (lp->features & XAE_FEATURE_DMA_64BIT) in desc_set_phys_addr()
165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, in desc_get_phys_addr() argument
170 if (lp->features & XAE_FEATURE_DMA_64BIT) in desc_get_phys_addr()
187 struct axienet_local *lp = netdev_priv(ndev); in axienet_dma_bd_release() local
190 dma_free_coherent(lp->dev, in axienet_dma_bd_release()
191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, in axienet_dma_bd_release()
192 lp->tx_bd_v, in axienet_dma_bd_release()
193 lp->tx_bd_p); in axienet_dma_bd_release()
195 if (!lp->rx_bd_v) in axienet_dma_bd_release()
198 for (i = 0; i < lp->rx_bd_num; i++) { in axienet_dma_bd_release()
204 if (!lp->rx_bd_v[i].skb) in axienet_dma_bd_release()
207 dev_kfree_skb(lp->rx_bd_v[i].skb); in axienet_dma_bd_release()
213 if (lp->rx_bd_v[i].cntrl) { in axienet_dma_bd_release()
214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); in axienet_dma_bd_release()
215 dma_unmap_single(lp->dev, phys, in axienet_dma_bd_release()
216 lp->max_frm_size, DMA_FROM_DEVICE); in axienet_dma_bd_release()
220 dma_free_coherent(lp->dev, in axienet_dma_bd_release()
221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, in axienet_dma_bd_release()
222 lp->rx_bd_v, in axienet_dma_bd_release()
223 lp->rx_bd_p); in axienet_dma_bd_release()
226 static u64 axienet_dma_rate(struct axienet_local *lp) in axienet_dma_rate() argument
228 if (lp->axi_clk) in axienet_dma_rate()
229 return clk_get_rate(lp->axi_clk); in axienet_dma_rate()
235 * @lp: Device private data
244 static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec) in axienet_calc_cr() argument
254 u64 clk_rate = axienet_dma_rate(lp); in axienet_calc_cr()
271 * @lp: Device private data
276 static void axienet_coalesce_params(struct axienet_local *lp, u32 cr, in axienet_coalesce_params() argument
279 u64 clk_rate = axienet_dma_rate(lp); in axienet_coalesce_params()
288 * @lp: Pointer to the axienet_local structure
290 static void axienet_dma_start(struct axienet_local *lp) in axienet_dma_start() argument
292 spin_lock_irq(&lp->rx_cr_lock); in axienet_dma_start()
295 lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK; in axienet_dma_start()
296 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); in axienet_dma_start()
301 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); in axienet_dma_start()
302 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; in axienet_dma_start()
303 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); in axienet_dma_start()
304 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + in axienet_dma_start()
305 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); in axienet_dma_start()
306 lp->rx_dma_started = true; in axienet_dma_start()
308 spin_unlock_irq(&lp->rx_cr_lock); in axienet_dma_start()
309 spin_lock_irq(&lp->tx_cr_lock); in axienet_dma_start()
312 lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK; in axienet_dma_start()
313 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); in axienet_dma_start()
319 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); in axienet_dma_start()
320 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; in axienet_dma_start()
321 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); in axienet_dma_start()
322 lp->tx_dma_started = true; in axienet_dma_start()
324 spin_unlock_irq(&lp->tx_cr_lock); in axienet_dma_start()
341 struct axienet_local *lp = netdev_priv(ndev); in axienet_dma_bd_init() local
344 lp->tx_bd_ci = 0; in axienet_dma_bd_init()
345 lp->tx_bd_tail = 0; in axienet_dma_bd_init()
346 lp->rx_bd_ci = 0; in axienet_dma_bd_init()
349 lp->tx_bd_v = dma_alloc_coherent(lp->dev, in axienet_dma_bd_init()
350 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, in axienet_dma_bd_init()
351 &lp->tx_bd_p, GFP_KERNEL); in axienet_dma_bd_init()
352 if (!lp->tx_bd_v) in axienet_dma_bd_init()
355 lp->rx_bd_v = dma_alloc_coherent(lp->dev, in axienet_dma_bd_init()
356 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, in axienet_dma_bd_init()
357 &lp->rx_bd_p, GFP_KERNEL); in axienet_dma_bd_init()
358 if (!lp->rx_bd_v) in axienet_dma_bd_init()
361 for (i = 0; i < lp->tx_bd_num; i++) { in axienet_dma_bd_init()
362 dma_addr_t addr = lp->tx_bd_p + in axienet_dma_bd_init()
363 sizeof(*lp->tx_bd_v) * in axienet_dma_bd_init()
364 ((i + 1) % lp->tx_bd_num); in axienet_dma_bd_init()
366 lp->tx_bd_v[i].next = lower_32_bits(addr); in axienet_dma_bd_init()
367 if (lp->features & XAE_FEATURE_DMA_64BIT) in axienet_dma_bd_init()
368 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); in axienet_dma_bd_init()
371 for (i = 0; i < lp->rx_bd_num; i++) { in axienet_dma_bd_init()
374 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * in axienet_dma_bd_init()
375 ((i + 1) % lp->rx_bd_num); in axienet_dma_bd_init()
376 lp->rx_bd_v[i].next = lower_32_bits(addr); in axienet_dma_bd_init()
377 if (lp->features & XAE_FEATURE_DMA_64BIT) in axienet_dma_bd_init()
378 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); in axienet_dma_bd_init()
380 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); in axienet_dma_bd_init()
384 lp->rx_bd_v[i].skb = skb; in axienet_dma_bd_init()
385 addr = dma_map_single(lp->dev, skb->data, in axienet_dma_bd_init()
386 lp->max_frm_size, DMA_FROM_DEVICE); in axienet_dma_bd_init()
387 if (dma_mapping_error(lp->dev, addr)) { in axienet_dma_bd_init()
391 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); in axienet_dma_bd_init()
393 lp->rx_bd_v[i].cntrl = lp->max_frm_size; in axienet_dma_bd_init()
396 axienet_dma_start(lp); in axienet_dma_bd_init()
415 struct axienet_local *lp = netdev_priv(ndev); in axienet_set_mac_address() local
423 axienet_iow(lp, XAE_UAW0_OFFSET, in axienet_set_mac_address()
428 axienet_iow(lp, XAE_UAW1_OFFSET, in axienet_set_mac_address()
429 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & in axienet_set_mac_address()
469 struct axienet_local *lp = netdev_priv(ndev); in axienet_set_multicast_list() local
471 reg = axienet_ior(lp, XAE_FMI_OFFSET); in axienet_set_multicast_list()
477 axienet_iow(lp, XAE_FMI_OFFSET, reg); in axienet_set_multicast_list()
482 axienet_iow(lp, XAE_FMI_OFFSET, reg); in axienet_set_multicast_list()
483 axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */ in axienet_set_multicast_list()
484 axienet_iow(lp, XAE_AF1_OFFSET, 0); in axienet_set_multicast_list()
485 axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */ in axienet_set_multicast_list()
486 axienet_iow(lp, XAE_AM1_OFFSET, 0); in axienet_set_multicast_list()
487 axienet_iow(lp, XAE_FFE_OFFSET, 1); in axienet_set_multicast_list()
507 axienet_iow(lp, XAE_FMI_OFFSET, reg); in axienet_set_multicast_list()
508 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); in axienet_set_multicast_list()
509 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); in axienet_set_multicast_list()
510 axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff); in axienet_set_multicast_list()
511 axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff); in axienet_set_multicast_list()
512 axienet_iow(lp, XAE_FFE_OFFSET, 1); in axienet_set_multicast_list()
520 axienet_iow(lp, XAE_FMI_OFFSET, reg); in axienet_set_multicast_list()
521 axienet_iow(lp, XAE_FFE_OFFSET, 0); in axienet_set_multicast_list()
539 struct axienet_local *lp = netdev_priv(ndev); in axienet_setoptions() local
543 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); in axienet_setoptions()
546 axienet_iow(lp, tp->reg, reg); in axienet_setoptions()
550 lp->options |= options; in axienet_setoptions()
553 static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat) in axienet_stat() argument
557 if (lp->reset_in_progress) in axienet_stat()
558 return lp->hw_stat_base[stat]; in axienet_stat()
560 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); in axienet_stat()
561 return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]); in axienet_stat()
564 static void axienet_stats_update(struct axienet_local *lp, bool reset) in axienet_stats_update() argument
568 write_seqcount_begin(&lp->hw_stats_seqcount); in axienet_stats_update()
569 lp->reset_in_progress = reset; in axienet_stats_update()
571 u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); in axienet_stats_update()
573 lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat]; in axienet_stats_update()
574 lp->hw_last_counter[stat] = counter; in axienet_stats_update()
576 write_seqcount_end(&lp->hw_stats_seqcount); in axienet_stats_update()
581 struct axienet_local *lp = container_of(work, struct axienet_local, in axienet_refresh_stats() local
584 mutex_lock(&lp->stats_lock); in axienet_refresh_stats()
585 axienet_stats_update(lp, false); in axienet_refresh_stats()
586 mutex_unlock(&lp->stats_lock); in axienet_refresh_stats()
589 schedule_delayed_work(&lp->stats_work, 13 * HZ); in axienet_refresh_stats()
592 static int __axienet_device_reset(struct axienet_local *lp) in __axienet_device_reset() argument
598 mutex_lock(&lp->stats_lock); in __axienet_device_reset()
599 if (lp->features & XAE_FEATURE_STATS) in __axienet_device_reset()
600 axienet_stats_update(lp, true); in __axienet_device_reset()
609 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); in __axienet_device_reset()
612 DELAY_OF_ONE_MILLISEC, 50000, false, lp, in __axienet_device_reset()
615 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); in __axienet_device_reset()
622 DELAY_OF_ONE_MILLISEC, 50000, false, lp, in __axienet_device_reset()
625 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); in __axienet_device_reset()
630 if (lp->features & XAE_FEATURE_STATS) { in __axienet_device_reset()
633 write_seqcount_begin(&lp->hw_stats_seqcount); in __axienet_device_reset()
634 lp->reset_in_progress = false; in __axienet_device_reset()
637 axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); in __axienet_device_reset()
639 lp->hw_stat_base[stat] += in __axienet_device_reset()
640 lp->hw_last_counter[stat] - counter; in __axienet_device_reset()
641 lp->hw_last_counter[stat] = counter; in __axienet_device_reset()
643 write_seqcount_end(&lp->hw_stats_seqcount); in __axienet_device_reset()
647 mutex_unlock(&lp->stats_lock); in __axienet_device_reset()
653 * @lp: Pointer to the axienet_local structure
655 static void axienet_dma_stop(struct axienet_local *lp) in axienet_dma_stop() argument
660 spin_lock_irq(&lp->rx_cr_lock); in axienet_dma_stop()
662 cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); in axienet_dma_stop()
663 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); in axienet_dma_stop()
664 lp->rx_dma_started = false; in axienet_dma_stop()
666 spin_unlock_irq(&lp->rx_cr_lock); in axienet_dma_stop()
667 synchronize_irq(lp->rx_irq); in axienet_dma_stop()
669 spin_lock_irq(&lp->tx_cr_lock); in axienet_dma_stop()
671 cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); in axienet_dma_stop()
672 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); in axienet_dma_stop()
673 lp->tx_dma_started = false; in axienet_dma_stop()
675 spin_unlock_irq(&lp->tx_cr_lock); in axienet_dma_stop()
676 synchronize_irq(lp->tx_irq); in axienet_dma_stop()
679 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); in axienet_dma_stop()
682 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); in axienet_dma_stop()
685 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); in axienet_dma_stop()
688 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); in axienet_dma_stop()
692 axienet_lock_mii(lp); in axienet_dma_stop()
693 __axienet_device_reset(lp); in axienet_dma_stop()
694 axienet_unlock_mii(lp); in axienet_dma_stop()
713 struct axienet_local *lp = netdev_priv(ndev); in axienet_device_reset() local
716 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; in axienet_device_reset()
717 lp->options |= XAE_OPTION_VLAN; in axienet_device_reset()
718 lp->options &= (~XAE_OPTION_JUMBO); in axienet_device_reset()
721 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + in axienet_device_reset()
724 if (lp->max_frm_size <= lp->rxmem) in axienet_device_reset()
725 lp->options |= XAE_OPTION_JUMBO; in axienet_device_reset()
728 if (!lp->use_dmaengine) { in axienet_device_reset()
729 ret = __axienet_device_reset(lp); in axienet_device_reset()
741 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); in axienet_device_reset()
743 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); in axienet_device_reset()
745 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); in axienet_device_reset()
747 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); in axienet_device_reset()
748 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? in axienet_device_reset()
751 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); in axienet_device_reset()
756 axienet_setoptions(ndev, lp->options & in axienet_device_reset()
760 axienet_setoptions(ndev, lp->options); in axienet_device_reset()
769 * @lp: Pointer to the axienet_local structure
782 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, in axienet_free_tx_chain() argument
791 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; in axienet_free_tx_chain()
802 phys = desc_get_phys_addr(lp, cur_p); in axienet_free_tx_chain()
803 dma_unmap_single(lp->dev, phys, in axienet_free_tx_chain()
827 lp->tx_bd_ci += i; in axienet_free_tx_chain()
828 if (lp->tx_bd_ci >= lp->tx_bd_num) in axienet_free_tx_chain()
829 lp->tx_bd_ci %= lp->tx_bd_num; in axienet_free_tx_chain()
837 * @lp: Pointer to the axienet_local structure
848 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, in axienet_check_tx_bd_space() argument
855 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % in axienet_check_tx_bd_space()
856 lp->tx_bd_num]; in axienet_check_tx_bd_space()
872 struct axienet_local *lp = data; in axienet_dma_tx_cb() local
876 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++); in axienet_dma_tx_cb()
878 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb); in axienet_dma_tx_cb()
879 u64_stats_update_begin(&lp->tx_stat_sync); in axienet_dma_tx_cb()
880 u64_stats_add(&lp->tx_bytes, len); in axienet_dma_tx_cb()
881 u64_stats_add(&lp->tx_packets, 1); in axienet_dma_tx_cb()
882 u64_stats_update_end(&lp->tx_stat_sync); in axienet_dma_tx_cb()
883 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE); in axienet_dma_tx_cb()
886 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), in axienet_dma_tx_cb()
909 struct axienet_local *lp = netdev_priv(ndev); in axienet_start_xmit_dmaengine() local
919 dma_dev = lp->tx_chan->device; in axienet_start_xmit_dmaengine()
921 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) { in axienet_start_xmit_dmaengine()
928 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head); in axienet_start_xmit_dmaengine()
932 lp->tx_ring_head++; in axienet_start_xmit_dmaengine()
938 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); in axienet_start_xmit_dmaengine()
944 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { in axienet_start_xmit_dmaengine()
947 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { in axienet_start_xmit_dmaengine()
958 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl, in axienet_start_xmit_dmaengine()
966 dma_tx_desc->callback_param = lp; in axienet_start_xmit_dmaengine()
968 txq = skb_get_tx_queue(lp->ndev, skb); in axienet_start_xmit_dmaengine()
970 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), in axienet_start_xmit_dmaengine()
974 dma_async_issue_pending(lp->tx_chan); in axienet_start_xmit_dmaengine()
978 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); in axienet_start_xmit_dmaengine()
1000 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); in axienet_tx_poll() local
1001 struct net_device *ndev = lp->ndev; in axienet_tx_poll()
1005 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false, in axienet_tx_poll()
1010 u64_stats_update_begin(&lp->tx_stat_sync); in axienet_tx_poll()
1011 u64_stats_add(&lp->tx_packets, packets); in axienet_tx_poll()
1012 u64_stats_add(&lp->tx_bytes, size); in axienet_tx_poll()
1013 u64_stats_update_end(&lp->tx_stat_sync); in axienet_tx_poll()
1018 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) in axienet_tx_poll()
1027 spin_lock_irq(&lp->tx_cr_lock); in axienet_tx_poll()
1028 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); in axienet_tx_poll()
1029 spin_unlock_irq(&lp->tx_cr_lock); in axienet_tx_poll()
1057 struct axienet_local *lp = netdev_priv(ndev); in axienet_start_xmit() local
1060 orig_tail_ptr = lp->tx_bd_tail; in axienet_start_xmit()
1064 cur_p = &lp->tx_bd_v[orig_tail_ptr]; in axienet_start_xmit()
1066 if (axienet_check_tx_bd_space(lp, num_frag + 1)) { in axienet_start_xmit()
1078 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { in axienet_start_xmit()
1081 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { in axienet_start_xmit()
1092 phys = dma_map_single(lp->dev, skb->data, in axienet_start_xmit()
1094 if (unlikely(dma_mapping_error(lp->dev, phys))) { in axienet_start_xmit()
1101 desc_set_phys_addr(lp, phys, cur_p); in axienet_start_xmit()
1105 if (++new_tail_ptr >= lp->tx_bd_num) in axienet_start_xmit()
1107 cur_p = &lp->tx_bd_v[new_tail_ptr]; in axienet_start_xmit()
1109 phys = dma_map_single(lp->dev, in axienet_start_xmit()
1113 if (unlikely(dma_mapping_error(lp->dev, phys))) { in axienet_start_xmit()
1117 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, in axienet_start_xmit()
1122 desc_set_phys_addr(lp, phys, cur_p); in axienet_start_xmit()
1129 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; in axienet_start_xmit()
1130 if (++new_tail_ptr >= lp->tx_bd_num) in axienet_start_xmit()
1132 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); in axienet_start_xmit()
1136 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); in axienet_start_xmit()
1139 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { in axienet_start_xmit()
1146 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) in axienet_start_xmit()
1164 struct axienet_local *lp = data; in axienet_dma_rx_cb() local
1169 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++); in axienet_dma_rx_cb()
1173 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size, in axienet_dma_rx_cb()
1178 netdev_err(lp->ndev, "Failed to get RX metadata pointer\n"); in axienet_dma_rx_cb()
1180 lp->ndev->stats.rx_dropped++; in axienet_dma_rx_cb()
1187 skb->protocol = eth_type_trans(skb, lp->ndev); in axienet_dma_rx_cb()
1191 u64_stats_update_begin(&lp->rx_stat_sync); in axienet_dma_rx_cb()
1192 u64_stats_add(&lp->rx_packets, 1); in axienet_dma_rx_cb()
1193 u64_stats_add(&lp->rx_bytes, rx_len); in axienet_dma_rx_cb()
1194 u64_stats_update_end(&lp->rx_stat_sync); in axienet_dma_rx_cb()
1197 for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail, in axienet_dma_rx_cb()
1199 axienet_rx_submit_desc(lp->ndev); in axienet_dma_rx_cb()
1200 dma_async_issue_pending(lp->rx_chan); in axienet_dma_rx_cb()
1219 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx); in axienet_rx_poll() local
1221 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; in axienet_rx_poll()
1240 phys = desc_get_phys_addr(lp, cur_p); in axienet_rx_poll()
1241 dma_unmap_single(lp->dev, phys, lp->max_frm_size, in axienet_rx_poll()
1245 skb->protocol = eth_type_trans(skb, lp->ndev); in axienet_rx_poll()
1250 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { in axienet_rx_poll()
1257 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { in axienet_rx_poll()
1268 new_skb = napi_alloc_skb(napi, lp->max_frm_size); in axienet_rx_poll()
1272 phys = dma_map_single(lp->dev, new_skb->data, in axienet_rx_poll()
1273 lp->max_frm_size, in axienet_rx_poll()
1275 if (unlikely(dma_mapping_error(lp->dev, phys))) { in axienet_rx_poll()
1277 netdev_err(lp->ndev, "RX DMA mapping error\n"); in axienet_rx_poll()
1281 desc_set_phys_addr(lp, phys, cur_p); in axienet_rx_poll()
1283 cur_p->cntrl = lp->max_frm_size; in axienet_rx_poll()
1290 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; in axienet_rx_poll()
1292 if (++lp->rx_bd_ci >= lp->rx_bd_num) in axienet_rx_poll()
1293 lp->rx_bd_ci = 0; in axienet_rx_poll()
1294 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; in axienet_rx_poll()
1297 u64_stats_update_begin(&lp->rx_stat_sync); in axienet_rx_poll()
1298 u64_stats_add(&lp->rx_packets, packets); in axienet_rx_poll()
1299 u64_stats_add(&lp->rx_bytes, size); in axienet_rx_poll()
1300 u64_stats_update_end(&lp->rx_stat_sync); in axienet_rx_poll()
1303 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); in axienet_rx_poll()
1306 if (READ_ONCE(lp->rx_dim_enabled)) { in axienet_rx_poll()
1310 .pkt_ctr = u64_stats_read(&lp->rx_packets), in axienet_rx_poll()
1311 .byte_ctr = u64_stats_read(&lp->rx_bytes), in axienet_rx_poll()
1312 .event_ctr = READ_ONCE(lp->rx_irqs), in axienet_rx_poll()
1315 net_dim(&lp->rx_dim, &sample); in axienet_rx_poll()
1322 spin_lock_irq(&lp->rx_cr_lock); in axienet_rx_poll()
1323 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); in axienet_rx_poll()
1324 spin_unlock_irq(&lp->rx_cr_lock); in axienet_rx_poll()
1343 struct axienet_local *lp = netdev_priv(ndev); in axienet_tx_irq() local
1345 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); in axienet_tx_irq()
1350 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); in axienet_tx_irq()
1355 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, in axienet_tx_irq()
1356 (lp->tx_bd_v[lp->tx_bd_ci]).phys); in axienet_tx_irq()
1357 schedule_work(&lp->dma_err_task); in axienet_tx_irq()
1362 if (napi_schedule_prep(&lp->napi_tx)) { in axienet_tx_irq()
1365 spin_lock(&lp->tx_cr_lock); in axienet_tx_irq()
1366 cr = lp->tx_dma_cr; in axienet_tx_irq()
1368 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); in axienet_tx_irq()
1369 spin_unlock(&lp->tx_cr_lock); in axienet_tx_irq()
1370 __napi_schedule(&lp->napi_tx); in axienet_tx_irq()
1391 struct axienet_local *lp = netdev_priv(ndev); in axienet_rx_irq() local
1393 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); in axienet_rx_irq()
1398 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); in axienet_rx_irq()
1403 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, in axienet_rx_irq()
1404 (lp->rx_bd_v[lp->rx_bd_ci]).phys); in axienet_rx_irq()
1405 schedule_work(&lp->dma_err_task); in axienet_rx_irq()
1410 WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1); in axienet_rx_irq()
1411 if (napi_schedule_prep(&lp->napi_rx)) { in axienet_rx_irq()
1414 spin_lock(&lp->rx_cr_lock); in axienet_rx_irq()
1415 cr = lp->rx_dma_cr; in axienet_rx_irq()
1417 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); in axienet_rx_irq()
1418 spin_unlock(&lp->rx_cr_lock); in axienet_rx_irq()
1420 __napi_schedule(&lp->napi_rx); in axienet_rx_irq()
1439 struct axienet_local *lp = netdev_priv(ndev); in axienet_eth_irq() local
1442 pending = axienet_ior(lp, XAE_IP_OFFSET); in axienet_eth_irq()
1452 axienet_iow(lp, XAE_IS_OFFSET, pending); in axienet_eth_irq()
1469 struct axienet_local *lp = netdev_priv(ndev); in axienet_rx_submit_desc() local
1474 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head); in axienet_rx_submit_desc()
1478 skb = netdev_alloc_skb(ndev, lp->max_frm_size); in axienet_rx_submit_desc()
1483 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE); in axienet_rx_submit_desc()
1484 if (unlikely(dma_mapping_error(lp->dev, addr))) { in axienet_rx_submit_desc()
1490 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size; in axienet_rx_submit_desc()
1491 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl, in axienet_rx_submit_desc()
1500 dma_rx_desc->callback_param = lp; in axienet_rx_submit_desc()
1502 lp->rx_ring_head++; in axienet_rx_submit_desc()
1508 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE); in axienet_rx_submit_desc()
1524 struct axienet_local *lp = netdev_priv(ndev); in axienet_init_dmaengine() local
1528 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0"); in axienet_init_dmaengine()
1529 if (IS_ERR(lp->tx_chan)) { in axienet_init_dmaengine()
1530 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n"); in axienet_init_dmaengine()
1531 return PTR_ERR(lp->tx_chan); in axienet_init_dmaengine()
1534 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0"); in axienet_init_dmaengine()
1535 if (IS_ERR(lp->rx_chan)) { in axienet_init_dmaengine()
1536 ret = PTR_ERR(lp->rx_chan); in axienet_init_dmaengine()
1537 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n"); in axienet_init_dmaengine()
1541 lp->tx_ring_tail = 0; in axienet_init_dmaengine()
1542 lp->tx_ring_head = 0; in axienet_init_dmaengine()
1543 lp->rx_ring_tail = 0; in axienet_init_dmaengine()
1544 lp->rx_ring_head = 0; in axienet_init_dmaengine()
1545 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring), in axienet_init_dmaengine()
1547 if (!lp->tx_skb_ring) { in axienet_init_dmaengine()
1557 lp->tx_skb_ring[i] = skbuf_dma; in axienet_init_dmaengine()
1560 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring), in axienet_init_dmaengine()
1562 if (!lp->rx_skb_ring) { in axienet_init_dmaengine()
1572 lp->rx_skb_ring[i] = skbuf_dma; in axienet_init_dmaengine()
1577 dma_async_issue_pending(lp->rx_chan); in axienet_init_dmaengine()
1583 kfree(lp->rx_skb_ring[i]); in axienet_init_dmaengine()
1584 kfree(lp->rx_skb_ring); in axienet_init_dmaengine()
1587 kfree(lp->tx_skb_ring[i]); in axienet_init_dmaengine()
1588 kfree(lp->tx_skb_ring); in axienet_init_dmaengine()
1590 dma_release_channel(lp->rx_chan); in axienet_init_dmaengine()
1592 dma_release_channel(lp->tx_chan); in axienet_init_dmaengine()
1610 struct axienet_local *lp = netdev_priv(ndev); in axienet_init_legacy_dma() local
1613 lp->stopping = false; in axienet_init_legacy_dma()
1614 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); in axienet_init_legacy_dma()
1616 napi_enable(&lp->napi_rx); in axienet_init_legacy_dma()
1617 napi_enable(&lp->napi_tx); in axienet_init_legacy_dma()
1620 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, in axienet_init_legacy_dma()
1625 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, in axienet_init_legacy_dma()
1630 if (lp->eth_irq > 0) { in axienet_init_legacy_dma()
1631 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, in axienet_init_legacy_dma()
1640 free_irq(lp->rx_irq, ndev); in axienet_init_legacy_dma()
1642 free_irq(lp->tx_irq, ndev); in axienet_init_legacy_dma()
1644 napi_disable(&lp->napi_tx); in axienet_init_legacy_dma()
1645 napi_disable(&lp->napi_rx); in axienet_init_legacy_dma()
1646 cancel_work_sync(&lp->dma_err_task); in axienet_init_legacy_dma()
1647 dev_err(lp->dev, "request_irq() failed\n"); in axienet_init_legacy_dma()
1667 struct axienet_local *lp = netdev_priv(ndev); in axienet_open() local
1673 axienet_lock_mii(lp); in axienet_open()
1675 axienet_unlock_mii(lp); in axienet_open()
1677 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); in axienet_open()
1679 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); in axienet_open()
1683 phylink_start(lp->phylink); in axienet_open()
1686 schedule_delayed_work(&lp->stats_work, 0); in axienet_open()
1688 if (lp->use_dmaengine) { in axienet_open()
1690 if (lp->eth_irq > 0) { in axienet_open()
1691 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, in axienet_open()
1709 if (lp->eth_irq > 0) in axienet_open()
1710 free_irq(lp->eth_irq, ndev); in axienet_open()
1712 cancel_work_sync(&lp->rx_dim.work); in axienet_open()
1713 cancel_delayed_work_sync(&lp->stats_work); in axienet_open()
1714 phylink_stop(lp->phylink); in axienet_open()
1715 phylink_disconnect_phy(lp->phylink); in axienet_open()
1731 struct axienet_local *lp = netdev_priv(ndev); in axienet_stop() local
1734 if (!lp->use_dmaengine) { in axienet_stop()
1735 WRITE_ONCE(lp->stopping, true); in axienet_stop()
1736 flush_work(&lp->dma_err_task); in axienet_stop()
1738 napi_disable(&lp->napi_tx); in axienet_stop()
1739 napi_disable(&lp->napi_rx); in axienet_stop()
1742 cancel_work_sync(&lp->rx_dim.work); in axienet_stop()
1743 cancel_delayed_work_sync(&lp->stats_work); in axienet_stop()
1745 phylink_stop(lp->phylink); in axienet_stop()
1746 phylink_disconnect_phy(lp->phylink); in axienet_stop()
1748 axienet_setoptions(ndev, lp->options & in axienet_stop()
1751 if (!lp->use_dmaengine) { in axienet_stop()
1752 axienet_dma_stop(lp); in axienet_stop()
1753 cancel_work_sync(&lp->dma_err_task); in axienet_stop()
1754 free_irq(lp->tx_irq, ndev); in axienet_stop()
1755 free_irq(lp->rx_irq, ndev); in axienet_stop()
1758 dmaengine_terminate_sync(lp->tx_chan); in axienet_stop()
1759 dmaengine_synchronize(lp->tx_chan); in axienet_stop()
1760 dmaengine_terminate_sync(lp->rx_chan); in axienet_stop()
1761 dmaengine_synchronize(lp->rx_chan); in axienet_stop()
1764 kfree(lp->tx_skb_ring[i]); in axienet_stop()
1765 kfree(lp->tx_skb_ring); in axienet_stop()
1767 kfree(lp->rx_skb_ring[i]); in axienet_stop()
1768 kfree(lp->rx_skb_ring); in axienet_stop()
1770 dma_release_channel(lp->rx_chan); in axienet_stop()
1771 dma_release_channel(lp->tx_chan); in axienet_stop()
1775 axienet_iow(lp, XAE_IE_OFFSET, 0); in axienet_stop()
1777 if (lp->eth_irq > 0) in axienet_stop()
1778 free_irq(lp->eth_irq, ndev); in axienet_stop()
1795 struct axienet_local *lp = netdev_priv(ndev); in axienet_change_mtu() local
1801 XAE_TRL_SIZE) > lp->rxmem) in axienet_change_mtu()
1819 struct axienet_local *lp = netdev_priv(ndev); in axienet_poll_controller() local
1821 disable_irq(lp->tx_irq); in axienet_poll_controller()
1822 disable_irq(lp->rx_irq); in axienet_poll_controller()
1823 axienet_rx_irq(lp->tx_irq, ndev); in axienet_poll_controller()
1824 axienet_tx_irq(lp->rx_irq, ndev); in axienet_poll_controller()
1825 enable_irq(lp->tx_irq); in axienet_poll_controller()
1826 enable_irq(lp->rx_irq); in axienet_poll_controller()
1832 struct axienet_local *lp = netdev_priv(dev); in axienet_ioctl() local
1837 return phylink_mii_ioctl(lp->phylink, rq, cmd); in axienet_ioctl()
1843 struct axienet_local *lp = netdev_priv(dev); in axienet_get_stats64() local
1849 start = u64_stats_fetch_begin(&lp->rx_stat_sync); in axienet_get_stats64()
1850 stats->rx_packets = u64_stats_read(&lp->rx_packets); in axienet_get_stats64()
1851 stats->rx_bytes = u64_stats_read(&lp->rx_bytes); in axienet_get_stats64()
1852 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start)); in axienet_get_stats64()
1855 start = u64_stats_fetch_begin(&lp->tx_stat_sync); in axienet_get_stats64()
1856 stats->tx_packets = u64_stats_read(&lp->tx_packets); in axienet_get_stats64()
1857 stats->tx_bytes = u64_stats_read(&lp->tx_bytes); in axienet_get_stats64()
1858 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); in axienet_get_stats64()
1860 if (!(lp->features & XAE_FEATURE_STATS)) in axienet_get_stats64()
1864 start = read_seqcount_begin(&lp->hw_stats_seqcount); in axienet_get_stats64()
1866 axienet_stat(lp, STAT_RX_LENGTH_ERRORS); in axienet_get_stats64()
1867 stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS); in axienet_get_stats64()
1869 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS); in axienet_get_stats64()
1870 stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) + in axienet_get_stats64()
1871 axienet_stat(lp, STAT_FRAGMENT_FRAMES) + in axienet_get_stats64()
1875 stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES); in axienet_get_stats64()
1878 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS); in axienet_get_stats64()
1880 axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS); in axienet_get_stats64()
1882 axienet_stat(lp, STAT_TX_LATE_COLLISIONS); in axienet_get_stats64()
1883 stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) + in axienet_get_stats64()
1887 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); in axienet_get_stats64()
1962 struct axienet_local *lp = netdev_priv(ndev); in axienet_ethtools_get_regs() local
1968 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); in axienet_ethtools_get_regs()
1969 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); in axienet_ethtools_get_regs()
1970 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); in axienet_ethtools_get_regs()
1971 data[3] = axienet_ior(lp, XAE_IS_OFFSET); in axienet_ethtools_get_regs()
1972 data[4] = axienet_ior(lp, XAE_IP_OFFSET); in axienet_ethtools_get_regs()
1973 data[5] = axienet_ior(lp, XAE_IE_OFFSET); in axienet_ethtools_get_regs()
1974 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); in axienet_ethtools_get_regs()
1975 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); in axienet_ethtools_get_regs()
1976 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); in axienet_ethtools_get_regs()
1977 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); in axienet_ethtools_get_regs()
1978 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); in axienet_ethtools_get_regs()
1979 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); in axienet_ethtools_get_regs()
1980 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); in axienet_ethtools_get_regs()
1981 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); in axienet_ethtools_get_regs()
1982 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); in axienet_ethtools_get_regs()
1983 data[15] = axienet_ior(lp, XAE_TC_OFFSET); in axienet_ethtools_get_regs()
1984 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); in axienet_ethtools_get_regs()
1985 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); in axienet_ethtools_get_regs()
1986 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); in axienet_ethtools_get_regs()
1987 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); in axienet_ethtools_get_regs()
1988 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); in axienet_ethtools_get_regs()
1989 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); in axienet_ethtools_get_regs()
1990 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); in axienet_ethtools_get_regs()
1991 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); in axienet_ethtools_get_regs()
1992 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); in axienet_ethtools_get_regs()
1993 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); in axienet_ethtools_get_regs()
1994 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); in axienet_ethtools_get_regs()
1995 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); in axienet_ethtools_get_regs()
1996 if (!lp->use_dmaengine) { in axienet_ethtools_get_regs()
1997 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); in axienet_ethtools_get_regs()
1998 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); in axienet_ethtools_get_regs()
1999 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); in axienet_ethtools_get_regs()
2000 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); in axienet_ethtools_get_regs()
2001 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); in axienet_ethtools_get_regs()
2002 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); in axienet_ethtools_get_regs()
2003 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); in axienet_ethtools_get_regs()
2004 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); in axienet_ethtools_get_regs()
2014 struct axienet_local *lp = netdev_priv(ndev); in axienet_ethtools_get_ringparam() local
2020 ering->rx_pending = lp->rx_bd_num; in axienet_ethtools_get_ringparam()
2023 ering->tx_pending = lp->tx_bd_num; in axienet_ethtools_get_ringparam()
2032 struct axienet_local *lp = netdev_priv(ndev); in axienet_ethtools_set_ringparam() local
2044 lp->rx_bd_num = ering->rx_pending; in axienet_ethtools_set_ringparam()
2045 lp->tx_bd_num = ering->tx_pending; in axienet_ethtools_set_ringparam()
2062 struct axienet_local *lp = netdev_priv(ndev); in axienet_ethtools_get_pauseparam() local
2064 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); in axienet_ethtools_get_pauseparam()
2083 struct axienet_local *lp = netdev_priv(ndev); in axienet_ethtools_set_pauseparam() local
2085 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); in axienet_ethtools_set_pauseparam()
2090 * @lp: Device private data
2094 static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr, in axienet_update_coalesce_rx() argument
2097 spin_lock_irq(&lp->rx_cr_lock); in axienet_update_coalesce_rx()
2098 lp->rx_dma_cr &= ~mask; in axienet_update_coalesce_rx()
2099 lp->rx_dma_cr |= cr; in axienet_update_coalesce_rx()
2103 if (lp->rx_dma_started) { in axienet_update_coalesce_rx()
2104 u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); in axienet_update_coalesce_rx()
2108 cr = lp->rx_dma_cr; in axienet_update_coalesce_rx()
2110 cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK; in axienet_update_coalesce_rx()
2111 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); in axienet_update_coalesce_rx()
2113 spin_unlock_irq(&lp->rx_cr_lock); in axienet_update_coalesce_rx()
2118 * @lp: Device private data
2122 static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp) in axienet_dim_coalesce_count_rx() argument
2124 return min(1 << (lp->rx_dim.profile_ix << 1), 255); in axienet_dim_coalesce_count_rx()
2133 struct axienet_local *lp = in axienet_rx_dim_work() local
2135 u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0); in axienet_rx_dim_work()
2139 axienet_update_coalesce_rx(lp, cr, mask); in axienet_rx_dim_work()
2140 lp->rx_dim.state = DIM_START_MEASURE; in axienet_rx_dim_work()
2145 * @lp: Device private data
2149 static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr, in axienet_update_coalesce_tx() argument
2152 spin_lock_irq(&lp->tx_cr_lock); in axienet_update_coalesce_tx()
2153 lp->tx_dma_cr &= ~mask; in axienet_update_coalesce_tx()
2154 lp->tx_dma_cr |= cr; in axienet_update_coalesce_tx()
2158 if (lp->tx_dma_started) { in axienet_update_coalesce_tx()
2159 u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); in axienet_update_coalesce_tx()
2163 cr = lp->tx_dma_cr; in axienet_update_coalesce_tx()
2165 cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK; in axienet_update_coalesce_tx()
2166 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); in axienet_update_coalesce_tx()
2168 spin_unlock_irq(&lp->tx_cr_lock); in axienet_update_coalesce_tx()
2190 struct axienet_local *lp = netdev_priv(ndev); in axienet_ethtools_get_coalesce() local
2193 ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled; in axienet_ethtools_get_coalesce()
2195 spin_lock_irq(&lp->rx_cr_lock); in axienet_ethtools_get_coalesce()
2196 cr = lp->rx_dma_cr; in axienet_ethtools_get_coalesce()
2197 spin_unlock_irq(&lp->rx_cr_lock); in axienet_ethtools_get_coalesce()
2198 axienet_coalesce_params(lp, cr, in axienet_ethtools_get_coalesce()
2202 spin_lock_irq(&lp->tx_cr_lock); in axienet_ethtools_get_coalesce()
2203 cr = lp->tx_dma_cr; in axienet_ethtools_get_coalesce()
2204 spin_unlock_irq(&lp->tx_cr_lock); in axienet_ethtools_get_coalesce()
2205 axienet_coalesce_params(lp, cr, in axienet_ethtools_get_coalesce()
2230 struct axienet_local *lp = netdev_priv(ndev); in axienet_ethtools_set_coalesce() local
2232 bool old_dim = lp->rx_dim_enabled; in axienet_ethtools_set_coalesce()
2257 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), in axienet_ethtools_set_coalesce()
2261 WRITE_ONCE(lp->rx_dim_enabled, false); in axienet_ethtools_set_coalesce()
2262 napi_synchronize(&lp->napi_rx); in axienet_ethtools_set_coalesce()
2263 flush_work(&lp->rx_dim.work); in axienet_ethtools_set_coalesce()
2266 cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames, in axienet_ethtools_set_coalesce()
2270 cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs); in axienet_ethtools_set_coalesce()
2274 axienet_update_coalesce_rx(lp, cr, mask); in axienet_ethtools_set_coalesce()
2276 WRITE_ONCE(lp->rx_dim_enabled, true); in axienet_ethtools_set_coalesce()
2278 cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames, in axienet_ethtools_set_coalesce()
2280 axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK); in axienet_ethtools_set_coalesce()
2288 struct axienet_local *lp = netdev_priv(ndev); in axienet_ethtools_get_link_ksettings() local
2290 return phylink_ethtool_ksettings_get(lp->phylink, cmd); in axienet_ethtools_get_link_ksettings()
2297 struct axienet_local *lp = netdev_priv(ndev); in axienet_ethtools_set_link_ksettings() local
2299 return phylink_ethtool_ksettings_set(lp->phylink, cmd); in axienet_ethtools_set_link_ksettings()
2304 struct axienet_local *lp = netdev_priv(dev); in axienet_ethtools_nway_reset() local
2306 return phylink_ethtool_nway_reset(lp->phylink); in axienet_ethtools_nway_reset()
2313 struct axienet_local *lp = netdev_priv(dev); in axienet_ethtools_get_ethtool_stats() local
2317 start = read_seqcount_begin(&lp->hw_stats_seqcount); in axienet_ethtools_get_ethtool_stats()
2318 data[0] = axienet_stat(lp, STAT_RX_BYTES); in axienet_ethtools_get_ethtool_stats()
2319 data[1] = axienet_stat(lp, STAT_TX_BYTES); in axienet_ethtools_get_ethtool_stats()
2320 data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES); in axienet_ethtools_get_ethtool_stats()
2321 data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES); in axienet_ethtools_get_ethtool_stats()
2322 data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES); in axienet_ethtools_get_ethtool_stats()
2323 data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES); in axienet_ethtools_get_ethtool_stats()
2324 data[8] = axienet_stat(lp, STAT_USER_DEFINED0); in axienet_ethtools_get_ethtool_stats()
2325 data[9] = axienet_stat(lp, STAT_USER_DEFINED1); in axienet_ethtools_get_ethtool_stats()
2326 data[10] = axienet_stat(lp, STAT_USER_DEFINED2); in axienet_ethtools_get_ethtool_stats()
2327 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); in axienet_ethtools_get_ethtool_stats()
2354 struct axienet_local *lp = netdev_priv(dev); in axienet_ethtools_get_sset_count() local
2358 if (lp->features & XAE_FEATURE_STATS) in axienet_ethtools_get_sset_count()
2370 struct axienet_local *lp = netdev_priv(dev); in axienet_ethtools_get_pause_stats() local
2373 if (!(lp->features & XAE_FEATURE_STATS)) in axienet_ethtools_get_pause_stats()
2377 start = read_seqcount_begin(&lp->hw_stats_seqcount); in axienet_ethtools_get_pause_stats()
2379 axienet_stat(lp, STAT_TX_PAUSE_FRAMES); in axienet_ethtools_get_pause_stats()
2381 axienet_stat(lp, STAT_RX_PAUSE_FRAMES); in axienet_ethtools_get_pause_stats()
2382 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); in axienet_ethtools_get_pause_stats()
2389 struct axienet_local *lp = netdev_priv(dev); in axienet_ethtool_get_eth_mac_stats() local
2392 if (!(lp->features & XAE_FEATURE_STATS)) in axienet_ethtool_get_eth_mac_stats()
2396 start = read_seqcount_begin(&lp->hw_stats_seqcount); in axienet_ethtool_get_eth_mac_stats()
2398 axienet_stat(lp, STAT_TX_GOOD_FRAMES); in axienet_ethtool_get_eth_mac_stats()
2400 axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES); in axienet_ethtool_get_eth_mac_stats()
2402 axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES); in axienet_ethtool_get_eth_mac_stats()
2404 axienet_stat(lp, STAT_RX_GOOD_FRAMES); in axienet_ethtool_get_eth_mac_stats()
2406 axienet_stat(lp, STAT_RX_FCS_ERRORS); in axienet_ethtool_get_eth_mac_stats()
2408 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS); in axienet_ethtool_get_eth_mac_stats()
2410 axienet_stat(lp, STAT_TX_DEFERRED_FRAMES); in axienet_ethtool_get_eth_mac_stats()
2412 axienet_stat(lp, STAT_TX_LATE_COLLISIONS); in axienet_ethtool_get_eth_mac_stats()
2414 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS); in axienet_ethtool_get_eth_mac_stats()
2416 axienet_stat(lp, STAT_TX_MULTICAST_FRAMES); in axienet_ethtool_get_eth_mac_stats()
2418 axienet_stat(lp, STAT_TX_BROADCAST_FRAMES); in axienet_ethtool_get_eth_mac_stats()
2420 axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL); in axienet_ethtool_get_eth_mac_stats()
2422 axienet_stat(lp, STAT_RX_MULTICAST_FRAMES); in axienet_ethtool_get_eth_mac_stats()
2424 axienet_stat(lp, STAT_RX_BROADCAST_FRAMES); in axienet_ethtool_get_eth_mac_stats()
2426 axienet_stat(lp, STAT_RX_LENGTH_ERRORS); in axienet_ethtool_get_eth_mac_stats()
2427 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); in axienet_ethtool_get_eth_mac_stats()
2434 struct axienet_local *lp = netdev_priv(dev); in axienet_ethtool_get_eth_ctrl_stats() local
2437 if (!(lp->features & XAE_FEATURE_STATS)) in axienet_ethtool_get_eth_ctrl_stats()
2441 start = read_seqcount_begin(&lp->hw_stats_seqcount); in axienet_ethtool_get_eth_ctrl_stats()
2443 axienet_stat(lp, STAT_TX_CONTROL_FRAMES); in axienet_ethtool_get_eth_ctrl_stats()
2445 axienet_stat(lp, STAT_RX_CONTROL_FRAMES); in axienet_ethtool_get_eth_ctrl_stats()
2447 axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS); in axienet_ethtool_get_eth_ctrl_stats()
2448 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); in axienet_ethtool_get_eth_ctrl_stats()
2467 struct axienet_local *lp = netdev_priv(dev); in axienet_ethtool_get_rmon_stats() local
2470 if (!(lp->features & XAE_FEATURE_STATS)) in axienet_ethtool_get_rmon_stats()
2474 start = read_seqcount_begin(&lp->hw_stats_seqcount); in axienet_ethtool_get_rmon_stats()
2476 axienet_stat(lp, STAT_UNDERSIZE_FRAMES); in axienet_ethtool_get_rmon_stats()
2478 axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES); in axienet_ethtool_get_rmon_stats()
2480 axienet_stat(lp, STAT_FRAGMENT_FRAMES); in axienet_ethtool_get_rmon_stats()
2483 axienet_stat(lp, STAT_RX_64_BYTE_FRAMES); in axienet_ethtool_get_rmon_stats()
2485 axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES); in axienet_ethtool_get_rmon_stats()
2487 axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES); in axienet_ethtool_get_rmon_stats()
2489 axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES); in axienet_ethtool_get_rmon_stats()
2491 axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES); in axienet_ethtool_get_rmon_stats()
2493 axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES); in axienet_ethtool_get_rmon_stats()
2498 axienet_stat(lp, STAT_TX_64_BYTE_FRAMES); in axienet_ethtool_get_rmon_stats()
2500 axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES); in axienet_ethtool_get_rmon_stats()
2502 axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES); in axienet_ethtool_get_rmon_stats()
2504 axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES); in axienet_ethtool_get_rmon_stats()
2506 axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES); in axienet_ethtool_get_rmon_stats()
2508 axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES); in axienet_ethtool_get_rmon_stats()
2510 axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES); in axienet_ethtool_get_rmon_stats()
2511 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); in axienet_ethtool_get_rmon_stats()
2570 struct axienet_local *lp = netdev_priv(ndev); in axienet_pcs_config() local
2573 if (lp->switch_x_sgmii) { in axienet_pcs_config()
2603 struct axienet_local *lp = netdev_priv(ndev); in axienet_mac_select_pcs() local
2607 return &lp->pcs; in axienet_mac_select_pcs()
2632 struct axienet_local *lp = netdev_priv(ndev); in axienet_mac_link_up() local
2635 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); in axienet_mac_link_up()
2654 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); in axienet_mac_link_up()
2656 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); in axienet_mac_link_up()
2665 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); in axienet_mac_link_up()
2687 struct axienet_local *lp = container_of(work, struct axienet_local, in axienet_dma_err_handler() local
2689 struct net_device *ndev = lp->ndev; in axienet_dma_err_handler()
2692 if (READ_ONCE(lp->stopping)) in axienet_dma_err_handler()
2695 napi_disable(&lp->napi_tx); in axienet_dma_err_handler()
2696 napi_disable(&lp->napi_rx); in axienet_dma_err_handler()
2698 axienet_setoptions(ndev, lp->options & in axienet_dma_err_handler()
2701 axienet_dma_stop(lp); in axienet_dma_err_handler()
2704 for (i = 0; i < lp->tx_bd_num; i++) { in axienet_dma_err_handler()
2705 cur_p = &lp->tx_bd_v[i]; in axienet_dma_err_handler()
2707 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); in axienet_dma_err_handler()
2709 dma_unmap_single(lp->dev, addr, in axienet_dma_err_handler()
2728 for (i = 0; i < lp->rx_bd_num; i++) { in axienet_dma_err_handler()
2729 cur_p = &lp->rx_bd_v[i]; in axienet_dma_err_handler()
2738 lp->tx_bd_ci = 0; in axienet_dma_err_handler()
2739 lp->tx_bd_tail = 0; in axienet_dma_err_handler()
2740 lp->rx_bd_ci = 0; in axienet_dma_err_handler()
2742 axienet_dma_start(lp); in axienet_dma_err_handler()
2744 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); in axienet_dma_err_handler()
2746 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); in axienet_dma_err_handler()
2748 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); in axienet_dma_err_handler()
2750 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); in axienet_dma_err_handler()
2751 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? in axienet_dma_err_handler()
2753 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); in axienet_dma_err_handler()
2758 axienet_setoptions(ndev, lp->options & in axienet_dma_err_handler()
2762 napi_enable(&lp->napi_rx); in axienet_dma_err_handler()
2763 napi_enable(&lp->napi_tx); in axienet_dma_err_handler()
2764 axienet_setoptions(ndev, lp->options); in axienet_dma_err_handler()
2783 struct axienet_local *lp; in axienet_probe() local
2790 ndev = alloc_etherdev(sizeof(*lp)); in axienet_probe()
2804 lp = netdev_priv(ndev); in axienet_probe()
2805 lp->ndev = ndev; in axienet_probe()
2806 lp->dev = &pdev->dev; in axienet_probe()
2807 lp->options = XAE_OPTION_DEFAULTS; in axienet_probe()
2808 lp->rx_bd_num = RX_BD_NUM_DEFAULT; in axienet_probe()
2809 lp->tx_bd_num = TX_BD_NUM_DEFAULT; in axienet_probe()
2811 u64_stats_init(&lp->rx_stat_sync); in axienet_probe()
2812 u64_stats_init(&lp->tx_stat_sync); in axienet_probe()
2814 mutex_init(&lp->stats_lock); in axienet_probe()
2815 seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock); in axienet_probe()
2816 INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats); in axienet_probe()
2818 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); in axienet_probe()
2819 if (!lp->axi_clk) { in axienet_probe()
2823 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); in axienet_probe()
2825 if (IS_ERR(lp->axi_clk)) { in axienet_probe()
2826 ret = PTR_ERR(lp->axi_clk); in axienet_probe()
2829 ret = clk_prepare_enable(lp->axi_clk); in axienet_probe()
2835 lp->misc_clks[0].id = "axis_clk"; in axienet_probe()
2836 lp->misc_clks[1].id = "ref_clk"; in axienet_probe()
2837 lp->misc_clks[2].id = "mgt_clk"; in axienet_probe()
2839 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); in axienet_probe()
2843 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); in axienet_probe()
2848 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres); in axienet_probe()
2849 if (IS_ERR(lp->regs)) { in axienet_probe()
2850 ret = PTR_ERR(lp->regs); in axienet_probe()
2853 lp->regs_start = ethres->start; in axienet_probe()
2856 lp->features = 0; in axienet_probe()
2858 if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS) in axienet_probe()
2859 lp->features |= XAE_FEATURE_STATS; in axienet_probe()
2865 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; in axienet_probe()
2870 lp->features |= XAE_FEATURE_FULL_TX_CSUM; in axienet_probe()
2880 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; in axienet_probe()
2884 lp->features |= XAE_FEATURE_FULL_RX_CSUM; in axienet_probe()
2895 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); in axienet_probe()
2897 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, in axienet_probe()
2906 lp->phy_mode = PHY_INTERFACE_MODE_MII; in axienet_probe()
2909 lp->phy_mode = PHY_INTERFACE_MODE_GMII; in axienet_probe()
2912 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; in axienet_probe()
2915 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; in axienet_probe()
2918 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; in axienet_probe()
2925 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); in axienet_probe()
2929 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && in axienet_probe()
2930 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { in axienet_probe()
2950 lp->dma_regs = devm_ioremap_resource(&pdev->dev, in axienet_probe()
2952 lp->rx_irq = irq_of_parse_and_map(np, 1); in axienet_probe()
2953 lp->tx_irq = irq_of_parse_and_map(np, 0); in axienet_probe()
2955 lp->eth_irq = platform_get_irq_optional(pdev, 0); in axienet_probe()
2958 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); in axienet_probe()
2959 lp->rx_irq = platform_get_irq(pdev, 1); in axienet_probe()
2960 lp->tx_irq = platform_get_irq(pdev, 0); in axienet_probe()
2961 lp->eth_irq = platform_get_irq_optional(pdev, 2); in axienet_probe()
2963 if (IS_ERR(lp->dma_regs)) { in axienet_probe()
2965 ret = PTR_ERR(lp->dma_regs); in axienet_probe()
2968 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) { in axienet_probe()
2975 ret = __axienet_device_reset(lp); in axienet_probe()
2987 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { in axienet_probe()
2988 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; in axienet_probe()
2994 lp->features |= XAE_FEATURE_DMA_64BIT; in axienet_probe()
3002 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { in axienet_probe()
3013 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); in axienet_probe()
3014 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); in axienet_probe()
3019 lp->eth_irq = platform_get_irq_optional(pdev, 0); in axienet_probe()
3020 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) { in axienet_probe()
3021 ret = lp->eth_irq; in axienet_probe()
3024 tx_chan = dma_request_chan(lp->dev, "tx_chan0"); in axienet_probe()
3027 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n"); in axienet_probe()
3041 lp->use_dmaengine = 1; in axienet_probe()
3044 if (lp->use_dmaengine) in axienet_probe()
3049 if (lp->eth_irq <= 0) in axienet_probe()
3062 spin_lock_init(&lp->rx_cr_lock); in axienet_probe()
3063 spin_lock_init(&lp->tx_cr_lock); in axienet_probe()
3064 INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work); in axienet_probe()
3065 lp->rx_dim_enabled = true; in axienet_probe()
3066 lp->rx_dim.profile_ix = 1; in axienet_probe()
3067 lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), in axienet_probe()
3069 lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD, in axienet_probe()
3072 ret = axienet_mdio_setup(lp); in axienet_probe()
3077 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || in axienet_probe()
3078 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { in axienet_probe()
3092 lp->pcs_phy = of_mdio_find_device(np); in axienet_probe()
3093 if (!lp->pcs_phy) { in axienet_probe()
3099 lp->pcs.ops = &axienet_pcs_ops; in axienet_probe()
3100 lp->pcs.poll = true; in axienet_probe()
3103 lp->phylink_config.dev = &ndev->dev; in axienet_probe()
3104 lp->phylink_config.type = PHYLINK_NETDEV; in axienet_probe()
3105 lp->phylink_config.mac_managed_pm = true; in axienet_probe()
3106 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | in axienet_probe()
3109 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); in axienet_probe()
3110 if (lp->switch_x_sgmii) { in axienet_probe()
3112 lp->phylink_config.supported_interfaces); in axienet_probe()
3114 lp->phylink_config.supported_interfaces); in axienet_probe()
3117 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, in axienet_probe()
3118 lp->phy_mode, in axienet_probe()
3120 if (IS_ERR(lp->phylink)) { in axienet_probe()
3121 ret = PTR_ERR(lp->phylink); in axienet_probe()
3126 ret = register_netdev(lp->ndev); in axienet_probe()
3128 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); in axienet_probe()
3135 phylink_destroy(lp->phylink); in axienet_probe()
3138 if (lp->pcs_phy) in axienet_probe()
3139 put_device(&lp->pcs_phy->dev); in axienet_probe()
3140 if (lp->mii_bus) in axienet_probe()
3141 axienet_mdio_teardown(lp); in axienet_probe()
3143 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); in axienet_probe()
3144 clk_disable_unprepare(lp->axi_clk); in axienet_probe()
3155 struct axienet_local *lp = netdev_priv(ndev); in axienet_remove() local
3159 if (lp->phylink) in axienet_remove()
3160 phylink_destroy(lp->phylink); in axienet_remove()
3162 if (lp->pcs_phy) in axienet_remove()
3163 put_device(&lp->pcs_phy->dev); in axienet_remove()
3165 axienet_mdio_teardown(lp); in axienet_remove()
3167 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); in axienet_remove()
3168 clk_disable_unprepare(lp->axi_clk); in axienet_remove()