Lines Matching +full:pps +full:- +full:channel
1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
3 * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
20 #include "xgbe-common.h"
67 for (i = 0; i < ARRAY_SIZE(pdata->channel); i++) { in xgbe_free_channels()
68 if (!pdata->channel[i]) in xgbe_free_channels()
71 kfree(pdata->channel[i]->rx_ring); in xgbe_free_channels()
72 kfree(pdata->channel[i]->tx_ring); in xgbe_free_channels()
73 kfree(pdata->channel[i]); in xgbe_free_channels()
75 pdata->channel[i] = NULL; in xgbe_free_channels()
78 pdata->channel_count = 0; in xgbe_free_channels()
83 struct xgbe_channel *channel; in xgbe_alloc_channels() local
89 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); in xgbe_alloc_channels()
92 cpu = cpumask_local_spread(i, dev_to_node(pdata->dev)); in xgbe_alloc_channels()
97 channel = xgbe_alloc_node(sizeof(*channel), node); in xgbe_alloc_channels()
98 if (!channel) in xgbe_alloc_channels()
100 pdata->channel[i] = channel; in xgbe_alloc_channels()
102 snprintf(channel->name, sizeof(channel->name), "channel-%u", i); in xgbe_alloc_channels()
103 channel->pdata = pdata; in xgbe_alloc_channels()
104 channel->queue_index = i; in xgbe_alloc_channels()
105 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + in xgbe_alloc_channels()
107 channel->node = node; in xgbe_alloc_channels()
108 cpumask_set_cpu(cpu, &channel->affinity_mask); in xgbe_alloc_channels()
110 if (pdata->per_channel_irq) in xgbe_alloc_channels()
111 channel->dma_irq = pdata->channel_irq[i]; in xgbe_alloc_channels()
113 if (i < pdata->tx_ring_count) { in xgbe_alloc_channels()
118 spin_lock_init(&ring->lock); in xgbe_alloc_channels()
119 ring->node = node; in xgbe_alloc_channels()
121 channel->tx_ring = ring; in xgbe_alloc_channels()
124 if (i < pdata->rx_ring_count) { in xgbe_alloc_channels()
129 spin_lock_init(&ring->lock); in xgbe_alloc_channels()
130 ring->node = node; in xgbe_alloc_channels()
132 channel->rx_ring = ring; in xgbe_alloc_channels()
135 netif_dbg(pdata, drv, pdata->netdev, in xgbe_alloc_channels()
136 "%s: cpu=%u, node=%d\n", channel->name, cpu, node); in xgbe_alloc_channels()
138 netif_dbg(pdata, drv, pdata->netdev, in xgbe_alloc_channels()
140 channel->name, channel->dma_regs, channel->dma_irq, in xgbe_alloc_channels()
141 channel->tx_ring, channel->rx_ring); in xgbe_alloc_channels()
144 pdata->channel_count = count; in xgbe_alloc_channels()
151 return -ENOMEM; in xgbe_alloc_channels()
156 return (ring->rdesc_count - (ring->cur - ring->dirty)); in xgbe_tx_avail_desc()
161 return (ring->cur - ring->dirty); in xgbe_rx_dirty_desc()
164 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel, in xgbe_maybe_stop_tx_queue() argument
167 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_maybe_stop_tx_queue()
170 netif_info(pdata, drv, pdata->netdev, in xgbe_maybe_stop_tx_queue()
172 netif_stop_subqueue(pdata->netdev, channel->queue_index); in xgbe_maybe_stop_tx_queue()
173 ring->tx.queue_stopped = 1; in xgbe_maybe_stop_tx_queue()
178 if (ring->tx.xmit_more) in xgbe_maybe_stop_tx_queue()
179 pdata->hw_if.tx_start_xmit(channel, ring); in xgbe_maybe_stop_tx_queue()
194 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & in xgbe_calc_rx_buf_size()
195 ~(XGBE_RX_BUF_ALIGN - 1); in xgbe_calc_rx_buf_size()
201 struct xgbe_channel *channel) in xgbe_enable_rx_tx_int() argument
203 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_enable_rx_tx_int()
206 if (channel->tx_ring && channel->rx_ring) in xgbe_enable_rx_tx_int()
208 else if (channel->tx_ring) in xgbe_enable_rx_tx_int()
210 else if (channel->rx_ring) in xgbe_enable_rx_tx_int()
215 hw_if->enable_int(channel, int_id); in xgbe_enable_rx_tx_int()
222 for (i = 0; i < pdata->channel_count; i++) in xgbe_enable_rx_tx_ints()
223 xgbe_enable_rx_tx_int(pdata, pdata->channel[i]); in xgbe_enable_rx_tx_ints()
227 struct xgbe_channel *channel) in xgbe_disable_rx_tx_int() argument
229 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_disable_rx_tx_int()
232 if (channel->tx_ring && channel->rx_ring) in xgbe_disable_rx_tx_int()
234 else if (channel->tx_ring) in xgbe_disable_rx_tx_int()
236 else if (channel->rx_ring) in xgbe_disable_rx_tx_int()
241 hw_if->disable_int(channel, int_id); in xgbe_disable_rx_tx_int()
248 for (i = 0; i < pdata->channel_count; i++) in xgbe_disable_rx_tx_ints()
249 xgbe_disable_rx_tx_int(pdata, pdata->channel[i]); in xgbe_disable_rx_tx_ints()
263 dev_warn_once(pdata->dev, in xgbe_ecc_sec()
268 dev_warn_once(pdata->dev, in xgbe_ecc_sec()
288 netdev_alert(pdata->netdev, in xgbe_ecc_ded()
306 netif_dbg(pdata, intr, pdata->netdev, "ECC_ISR=%#010x\n", ecc_isr); in xgbe_ecc_isr_bh_work()
309 stop |= xgbe_ecc_ded(pdata, &pdata->tx_ded_period, in xgbe_ecc_isr_bh_work()
310 &pdata->tx_ded_count, "TX fifo"); in xgbe_ecc_isr_bh_work()
314 stop |= xgbe_ecc_ded(pdata, &pdata->rx_ded_period, in xgbe_ecc_isr_bh_work()
315 &pdata->rx_ded_count, "RX fifo"); in xgbe_ecc_isr_bh_work()
319 stop |= xgbe_ecc_ded(pdata, &pdata->desc_ded_period, in xgbe_ecc_isr_bh_work()
320 &pdata->desc_ded_count, in xgbe_ecc_isr_bh_work()
325 pdata->hw_if.disable_ecc_ded(pdata); in xgbe_ecc_isr_bh_work()
326 schedule_work(&pdata->stopdev_work); in xgbe_ecc_isr_bh_work()
331 if (xgbe_ecc_sec(pdata, &pdata->tx_sec_period, in xgbe_ecc_isr_bh_work()
332 &pdata->tx_sec_count, "TX fifo")) in xgbe_ecc_isr_bh_work()
333 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_TX); in xgbe_ecc_isr_bh_work()
337 if (xgbe_ecc_sec(pdata, &pdata->rx_sec_period, in xgbe_ecc_isr_bh_work()
338 &pdata->rx_sec_count, "RX fifo")) in xgbe_ecc_isr_bh_work()
339 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_RX); in xgbe_ecc_isr_bh_work()
342 if (xgbe_ecc_sec(pdata, &pdata->desc_sec_period, in xgbe_ecc_isr_bh_work()
343 &pdata->desc_sec_count, "descriptor cache")) in xgbe_ecc_isr_bh_work()
344 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_DESC); in xgbe_ecc_isr_bh_work()
351 if (pdata->vdata->irq_reissue_support) in xgbe_ecc_isr_bh_work()
359 if (pdata->isr_as_bh_work) in xgbe_ecc_isr()
360 queue_work(system_bh_wq, &pdata->ecc_bh_work); in xgbe_ecc_isr()
362 xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work); in xgbe_ecc_isr()
370 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_isr_bh_work()
371 struct xgbe_channel *channel; in xgbe_isr_bh_work() local
378 * this register to be non-zero in xgbe_isr_bh_work()
384 netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr); in xgbe_isr_bh_work()
386 for (i = 0; i < pdata->channel_count; i++) { in xgbe_isr_bh_work()
390 channel = pdata->channel[i]; in xgbe_isr_bh_work()
392 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); in xgbe_isr_bh_work()
393 netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", in xgbe_isr_bh_work()
397 * per channel DMA interrupts. Check to be sure those are not in xgbe_isr_bh_work()
400 if (!pdata->per_channel_irq && in xgbe_isr_bh_work()
403 if (napi_schedule_prep(&pdata->napi)) { in xgbe_isr_bh_work()
408 __napi_schedule(&pdata->napi); in xgbe_isr_bh_work()
411 /* Don't clear Rx/Tx status if doing per channel DMA in xgbe_isr_bh_work()
413 * per channel DMA interrupts. in xgbe_isr_bh_work()
420 pdata->ext_stats.rx_buffer_unavailable++; in xgbe_isr_bh_work()
424 schedule_work(&pdata->restart_work); in xgbe_isr_bh_work()
427 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); in xgbe_isr_bh_work()
433 netif_dbg(pdata, intr, pdata->netdev, "MAC_ISR=%#010x\n", in xgbe_isr_bh_work()
437 hw_if->tx_mmc_int(pdata); in xgbe_isr_bh_work()
440 hw_if->rx_mmc_int(pdata); in xgbe_isr_bh_work()
445 netif_dbg(pdata, intr, pdata->netdev, in xgbe_isr_bh_work()
450 pdata->tx_tstamp = in xgbe_isr_bh_work()
452 queue_work(pdata->dev_workqueue, in xgbe_isr_bh_work()
453 &pdata->tx_tstamp_work); in xgbe_isr_bh_work()
460 netif_dbg(pdata, intr, pdata->netdev, in xgbe_isr_bh_work()
465 complete(&pdata->mdio_complete); in xgbe_isr_bh_work()
471 if (pdata->dev_irq == pdata->an_irq) in xgbe_isr_bh_work()
472 pdata->phy_if.an_isr(pdata); in xgbe_isr_bh_work()
475 if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq)) in xgbe_isr_bh_work()
476 xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work); in xgbe_isr_bh_work()
479 if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq)) in xgbe_isr_bh_work()
480 pdata->i2c_if.i2c_isr(pdata); in xgbe_isr_bh_work()
483 if (pdata->vdata->irq_reissue_support) { in xgbe_isr_bh_work()
487 if (!pdata->per_channel_irq) in xgbe_isr_bh_work()
498 if (pdata->isr_as_bh_work) in xgbe_isr()
499 queue_work(system_bh_wq, &pdata->dev_bh_work); in xgbe_isr()
501 xgbe_isr_bh_work(&pdata->dev_bh_work); in xgbe_isr()
508 struct xgbe_channel *channel = data; in xgbe_dma_isr() local
509 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_dma_isr()
512 /* Per channel DMA interrupts are enabled, so we use the per in xgbe_dma_isr()
513 * channel napi structure and not the private data napi structure in xgbe_dma_isr()
515 if (napi_schedule_prep(&channel->napi)) { in xgbe_dma_isr()
517 if (pdata->channel_irq_mode) in xgbe_dma_isr()
518 xgbe_disable_rx_tx_int(pdata, channel); in xgbe_dma_isr()
520 disable_irq_nosync(channel->dma_irq); in xgbe_dma_isr()
523 __napi_schedule_irqoff(&channel->napi); in xgbe_dma_isr()
530 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status); in xgbe_dma_isr()
537 struct xgbe_channel *channel = timer_container_of(channel, t, in xgbe_tx_timer() local
539 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_tx_timer()
542 DBGPR("-->xgbe_tx_timer\n"); in xgbe_tx_timer()
544 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; in xgbe_tx_timer()
548 if (pdata->per_channel_irq) in xgbe_tx_timer()
549 if (pdata->channel_irq_mode) in xgbe_tx_timer()
550 xgbe_disable_rx_tx_int(pdata, channel); in xgbe_tx_timer()
552 disable_irq_nosync(channel->dma_irq); in xgbe_tx_timer()
560 channel->tx_timer_active = 0; in xgbe_tx_timer()
562 DBGPR("<--xgbe_tx_timer\n"); in xgbe_tx_timer()
571 pdata->phy_if.phy_status(pdata); in xgbe_service()
578 struct xgbe_channel *channel; in xgbe_service_timer() local
581 queue_work(pdata->dev_workqueue, &pdata->service_work); in xgbe_service_timer()
583 mod_timer(&pdata->service_timer, jiffies + HZ); in xgbe_service_timer()
585 if (!pdata->tx_usecs) in xgbe_service_timer()
588 for (i = 0; i < pdata->channel_count; i++) { in xgbe_service_timer()
589 channel = pdata->channel[i]; in xgbe_service_timer()
590 if (!channel->tx_ring || channel->tx_timer_active) in xgbe_service_timer()
592 channel->tx_timer_active = 1; in xgbe_service_timer()
593 mod_timer(&channel->tx_timer, in xgbe_service_timer()
594 jiffies + usecs_to_jiffies(pdata->tx_usecs)); in xgbe_service_timer()
600 struct xgbe_channel *channel; in xgbe_init_timers() local
603 timer_setup(&pdata->service_timer, xgbe_service_timer, 0); in xgbe_init_timers()
605 for (i = 0; i < pdata->channel_count; i++) { in xgbe_init_timers()
606 channel = pdata->channel[i]; in xgbe_init_timers()
607 if (!channel->tx_ring) in xgbe_init_timers()
610 timer_setup(&channel->tx_timer, xgbe_tx_timer, 0); in xgbe_init_timers()
616 mod_timer(&pdata->service_timer, jiffies + HZ); in xgbe_start_timers()
621 struct xgbe_channel *channel; in xgbe_stop_timers() local
624 timer_delete_sync(&pdata->service_timer); in xgbe_stop_timers()
626 for (i = 0; i < pdata->channel_count; i++) { in xgbe_stop_timers()
627 channel = pdata->channel[i]; in xgbe_stop_timers()
628 if (!channel->tx_ring) in xgbe_stop_timers()
632 timer_delete_sync(&channel->tx_timer); in xgbe_stop_timers()
633 channel->tx_timer_active = 0; in xgbe_stop_timers()
640 struct xgbe_hw_features *hw_feat = &pdata->hw_feat; in xgbe_get_all_hw_features()
648 hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR); in xgbe_get_all_hw_features()
651 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); in xgbe_get_all_hw_features()
652 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); in xgbe_get_all_hw_features()
653 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); in xgbe_get_all_hw_features()
654 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); in xgbe_get_all_hw_features()
655 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); in xgbe_get_all_hw_features()
656 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); in xgbe_get_all_hw_features()
657 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); in xgbe_get_all_hw_features()
658 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); in xgbe_get_all_hw_features()
659 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); in xgbe_get_all_hw_features()
660 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); in xgbe_get_all_hw_features()
661 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); in xgbe_get_all_hw_features()
662 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, in xgbe_get_all_hw_features()
664 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); in xgbe_get_all_hw_features()
665 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); in xgbe_get_all_hw_features()
666 hw_feat->vxn = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN); in xgbe_get_all_hw_features()
669 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, in xgbe_get_all_hw_features()
671 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, in xgbe_get_all_hw_features()
673 hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD); in xgbe_get_all_hw_features()
674 hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); in xgbe_get_all_hw_features()
675 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); in xgbe_get_all_hw_features()
676 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); in xgbe_get_all_hw_features()
677 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); in xgbe_get_all_hw_features()
678 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); in xgbe_get_all_hw_features()
679 hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); in xgbe_get_all_hw_features()
680 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); in xgbe_get_all_hw_features()
681 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, in xgbe_get_all_hw_features()
683 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, in xgbe_get_all_hw_features()
687 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); in xgbe_get_all_hw_features()
688 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); in xgbe_get_all_hw_features()
689 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); in xgbe_get_all_hw_features()
690 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); in xgbe_get_all_hw_features()
691 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); in xgbe_get_all_hw_features()
692 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM); in xgbe_get_all_hw_features()
695 if (hw_feat->pps_out_num > XGBE_MAX_PPS_OUT) { in xgbe_get_all_hw_features()
696 dev_warn(pdata->dev, in xgbe_get_all_hw_features()
697 "Hardware reports %u PPS outputs, limiting to %u\n", in xgbe_get_all_hw_features()
698 hw_feat->pps_out_num, XGBE_MAX_PPS_OUT); in xgbe_get_all_hw_features()
699 hw_feat->pps_out_num = XGBE_MAX_PPS_OUT; in xgbe_get_all_hw_features()
702 if (hw_feat->aux_snap_num > XGBE_MAX_AUX_SNAP) { in xgbe_get_all_hw_features()
703 dev_warn(pdata->dev, in xgbe_get_all_hw_features()
705 hw_feat->aux_snap_num, XGBE_MAX_AUX_SNAP); in xgbe_get_all_hw_features()
706 hw_feat->aux_snap_num = XGBE_MAX_AUX_SNAP; in xgbe_get_all_hw_features()
710 switch (hw_feat->hash_table_size) { in xgbe_get_all_hw_features()
714 hw_feat->hash_table_size = 64; in xgbe_get_all_hw_features()
717 hw_feat->hash_table_size = 128; in xgbe_get_all_hw_features()
720 hw_feat->hash_table_size = 256; in xgbe_get_all_hw_features()
725 switch (hw_feat->dma_width) { in xgbe_get_all_hw_features()
727 hw_feat->dma_width = 32; in xgbe_get_all_hw_features()
730 hw_feat->dma_width = 40; in xgbe_get_all_hw_features()
733 hw_feat->dma_width = 48; in xgbe_get_all_hw_features()
736 hw_feat->dma_width = 32; in xgbe_get_all_hw_features()
739 /* The Queue, Channel and TC counts are zero based so increment them in xgbe_get_all_hw_features()
742 hw_feat->rx_q_cnt++; in xgbe_get_all_hw_features()
743 hw_feat->tx_q_cnt++; in xgbe_get_all_hw_features()
744 hw_feat->rx_ch_cnt++; in xgbe_get_all_hw_features()
745 hw_feat->tx_ch_cnt++; in xgbe_get_all_hw_features()
746 hw_feat->tc_cnt++; in xgbe_get_all_hw_features()
749 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); in xgbe_get_all_hw_features()
750 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); in xgbe_get_all_hw_features()
753 dev_dbg(pdata->dev, "Hardware features:\n"); in xgbe_get_all_hw_features()
756 dev_dbg(pdata->dev, " 1GbE support : %s\n", in xgbe_get_all_hw_features()
757 hw_feat->gmii ? "yes" : "no"); in xgbe_get_all_hw_features()
758 dev_dbg(pdata->dev, " VLAN hash filter : %s\n", in xgbe_get_all_hw_features()
759 hw_feat->vlhash ? "yes" : "no"); in xgbe_get_all_hw_features()
760 dev_dbg(pdata->dev, " MDIO interface : %s\n", in xgbe_get_all_hw_features()
761 hw_feat->sma ? "yes" : "no"); in xgbe_get_all_hw_features()
762 dev_dbg(pdata->dev, " Wake-up packet support : %s\n", in xgbe_get_all_hw_features()
763 hw_feat->rwk ? "yes" : "no"); in xgbe_get_all_hw_features()
764 dev_dbg(pdata->dev, " Magic packet support : %s\n", in xgbe_get_all_hw_features()
765 hw_feat->mgk ? "yes" : "no"); in xgbe_get_all_hw_features()
766 dev_dbg(pdata->dev, " Management counters : %s\n", in xgbe_get_all_hw_features()
767 hw_feat->mmc ? "yes" : "no"); in xgbe_get_all_hw_features()
768 dev_dbg(pdata->dev, " ARP offload : %s\n", in xgbe_get_all_hw_features()
769 hw_feat->aoe ? "yes" : "no"); in xgbe_get_all_hw_features()
770 dev_dbg(pdata->dev, " IEEE 1588-2008 Timestamp : %s\n", in xgbe_get_all_hw_features()
771 hw_feat->ts ? "yes" : "no"); in xgbe_get_all_hw_features()
772 dev_dbg(pdata->dev, " Energy Efficient Ethernet : %s\n", in xgbe_get_all_hw_features()
773 hw_feat->eee ? "yes" : "no"); in xgbe_get_all_hw_features()
774 dev_dbg(pdata->dev, " TX checksum offload : %s\n", in xgbe_get_all_hw_features()
775 hw_feat->tx_coe ? "yes" : "no"); in xgbe_get_all_hw_features()
776 dev_dbg(pdata->dev, " RX checksum offload : %s\n", in xgbe_get_all_hw_features()
777 hw_feat->rx_coe ? "yes" : "no"); in xgbe_get_all_hw_features()
778 dev_dbg(pdata->dev, " Additional MAC addresses : %u\n", in xgbe_get_all_hw_features()
779 hw_feat->addn_mac); in xgbe_get_all_hw_features()
780 dev_dbg(pdata->dev, " Timestamp source : %s\n", in xgbe_get_all_hw_features()
781 (hw_feat->ts_src == 1) ? "internal" : in xgbe_get_all_hw_features()
782 (hw_feat->ts_src == 2) ? "external" : in xgbe_get_all_hw_features()
783 (hw_feat->ts_src == 3) ? "internal/external" : "n/a"); in xgbe_get_all_hw_features()
784 dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n", in xgbe_get_all_hw_features()
785 hw_feat->sa_vlan_ins ? "yes" : "no"); in xgbe_get_all_hw_features()
786 dev_dbg(pdata->dev, " VXLAN/NVGRE support : %s\n", in xgbe_get_all_hw_features()
787 hw_feat->vxn ? "yes" : "no"); in xgbe_get_all_hw_features()
790 dev_dbg(pdata->dev, " RX fifo size : %u\n", in xgbe_get_all_hw_features()
791 hw_feat->rx_fifo_size); in xgbe_get_all_hw_features()
792 dev_dbg(pdata->dev, " TX fifo size : %u\n", in xgbe_get_all_hw_features()
793 hw_feat->tx_fifo_size); in xgbe_get_all_hw_features()
794 dev_dbg(pdata->dev, " IEEE 1588 high word : %s\n", in xgbe_get_all_hw_features()
795 hw_feat->adv_ts_hi ? "yes" : "no"); in xgbe_get_all_hw_features()
796 dev_dbg(pdata->dev, " DMA width : %u\n", in xgbe_get_all_hw_features()
797 hw_feat->dma_width); in xgbe_get_all_hw_features()
798 dev_dbg(pdata->dev, " Data Center Bridging : %s\n", in xgbe_get_all_hw_features()
799 hw_feat->dcb ? "yes" : "no"); in xgbe_get_all_hw_features()
800 dev_dbg(pdata->dev, " Split header : %s\n", in xgbe_get_all_hw_features()
801 hw_feat->sph ? "yes" : "no"); in xgbe_get_all_hw_features()
802 dev_dbg(pdata->dev, " TCP Segmentation Offload : %s\n", in xgbe_get_all_hw_features()
803 hw_feat->tso ? "yes" : "no"); in xgbe_get_all_hw_features()
804 dev_dbg(pdata->dev, " Debug memory interface : %s\n", in xgbe_get_all_hw_features()
805 hw_feat->dma_debug ? "yes" : "no"); in xgbe_get_all_hw_features()
806 dev_dbg(pdata->dev, " Receive Side Scaling : %s\n", in xgbe_get_all_hw_features()
807 hw_feat->rss ? "yes" : "no"); in xgbe_get_all_hw_features()
808 dev_dbg(pdata->dev, " Traffic Class count : %u\n", in xgbe_get_all_hw_features()
809 hw_feat->tc_cnt); in xgbe_get_all_hw_features()
810 dev_dbg(pdata->dev, " Hash table size : %u\n", in xgbe_get_all_hw_features()
811 hw_feat->hash_table_size); in xgbe_get_all_hw_features()
812 dev_dbg(pdata->dev, " L3/L4 Filters : %u\n", in xgbe_get_all_hw_features()
813 hw_feat->l3l4_filter_num); in xgbe_get_all_hw_features()
816 dev_dbg(pdata->dev, " RX queue count : %u\n", in xgbe_get_all_hw_features()
817 hw_feat->rx_q_cnt); in xgbe_get_all_hw_features()
818 dev_dbg(pdata->dev, " TX queue count : %u\n", in xgbe_get_all_hw_features()
819 hw_feat->tx_q_cnt); in xgbe_get_all_hw_features()
820 dev_dbg(pdata->dev, " RX DMA channel count : %u\n", in xgbe_get_all_hw_features()
821 hw_feat->rx_ch_cnt); in xgbe_get_all_hw_features()
822 dev_dbg(pdata->dev, " TX DMA channel count : %u\n", in xgbe_get_all_hw_features()
823 hw_feat->rx_ch_cnt); in xgbe_get_all_hw_features()
824 dev_dbg(pdata->dev, " PPS outputs : %u\n", in xgbe_get_all_hw_features()
825 hw_feat->pps_out_num); in xgbe_get_all_hw_features()
826 dev_dbg(pdata->dev, " Auxiliary snapshot inputs : %u\n", in xgbe_get_all_hw_features()
827 hw_feat->aux_snap_num); in xgbe_get_all_hw_features()
836 pdata->vxlan_port = be16_to_cpu(ti->port); in xgbe_vxlan_set_port()
837 pdata->hw_if.enable_vxlan(pdata); in xgbe_vxlan_set_port()
847 pdata->hw_if.disable_vxlan(pdata); in xgbe_vxlan_unset_port()
848 pdata->vxlan_port = 0; in xgbe_vxlan_unset_port()
869 struct xgbe_channel *channel; in xgbe_napi_enable() local
872 if (pdata->per_channel_irq) { in xgbe_napi_enable()
873 for (i = 0; i < pdata->channel_count; i++) { in xgbe_napi_enable()
874 channel = pdata->channel[i]; in xgbe_napi_enable()
876 netif_napi_add(pdata->netdev, &channel->napi, in xgbe_napi_enable()
879 napi_enable(&channel->napi); in xgbe_napi_enable()
883 netif_napi_add(pdata->netdev, &pdata->napi, in xgbe_napi_enable()
886 napi_enable(&pdata->napi); in xgbe_napi_enable()
892 struct xgbe_channel *channel; in xgbe_napi_disable() local
895 if (pdata->per_channel_irq) { in xgbe_napi_disable()
896 for (i = 0; i < pdata->channel_count; i++) { in xgbe_napi_disable()
897 channel = pdata->channel[i]; in xgbe_napi_disable()
898 napi_disable(&channel->napi); in xgbe_napi_disable()
901 netif_napi_del(&channel->napi); in xgbe_napi_disable()
904 napi_disable(&pdata->napi); in xgbe_napi_disable()
907 netif_napi_del(&pdata->napi); in xgbe_napi_disable()
913 struct xgbe_channel *channel; in xgbe_request_irqs() local
914 struct net_device *netdev = pdata->netdev; in xgbe_request_irqs()
918 INIT_WORK(&pdata->dev_bh_work, xgbe_isr_bh_work); in xgbe_request_irqs()
919 INIT_WORK(&pdata->ecc_bh_work, xgbe_ecc_isr_bh_work); in xgbe_request_irqs()
921 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, in xgbe_request_irqs()
925 pdata->dev_irq); in xgbe_request_irqs()
929 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) { in xgbe_request_irqs()
930 ret = devm_request_irq(pdata->dev, pdata->ecc_irq, xgbe_ecc_isr, in xgbe_request_irqs()
931 0, pdata->ecc_name, pdata); in xgbe_request_irqs()
934 pdata->ecc_irq); in xgbe_request_irqs()
939 if (!pdata->per_channel_irq) in xgbe_request_irqs()
942 for (i = 0; i < pdata->channel_count; i++) { in xgbe_request_irqs()
943 channel = pdata->channel[i]; in xgbe_request_irqs()
944 snprintf(channel->dma_irq_name, in xgbe_request_irqs()
945 sizeof(channel->dma_irq_name) - 1, in xgbe_request_irqs()
946 "%s-TxRx-%u", netdev_name(netdev), in xgbe_request_irqs()
947 channel->queue_index); in xgbe_request_irqs()
949 ret = devm_request_irq(pdata->dev, channel->dma_irq, in xgbe_request_irqs()
951 channel->dma_irq_name, channel); in xgbe_request_irqs()
954 channel->dma_irq); in xgbe_request_irqs()
958 irq_set_affinity_hint(channel->dma_irq, in xgbe_request_irqs()
959 &channel->affinity_mask); in xgbe_request_irqs()
966 for (i--; i < pdata->channel_count; i--) { in xgbe_request_irqs()
967 channel = pdata->channel[i]; in xgbe_request_irqs()
969 irq_set_affinity_hint(channel->dma_irq, NULL); in xgbe_request_irqs()
970 devm_free_irq(pdata->dev, channel->dma_irq, channel); in xgbe_request_irqs()
973 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) in xgbe_request_irqs()
974 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata); in xgbe_request_irqs()
977 devm_free_irq(pdata->dev, pdata->dev_irq, pdata); in xgbe_request_irqs()
984 struct xgbe_channel *channel; in xgbe_free_irqs() local
987 devm_free_irq(pdata->dev, pdata->dev_irq, pdata); in xgbe_free_irqs()
989 cancel_work_sync(&pdata->dev_bh_work); in xgbe_free_irqs()
990 cancel_work_sync(&pdata->ecc_bh_work); in xgbe_free_irqs()
992 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) in xgbe_free_irqs()
993 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata); in xgbe_free_irqs()
995 if (!pdata->per_channel_irq) in xgbe_free_irqs()
998 for (i = 0; i < pdata->channel_count; i++) { in xgbe_free_irqs()
999 channel = pdata->channel[i]; in xgbe_free_irqs()
1001 irq_set_affinity_hint(channel->dma_irq, NULL); in xgbe_free_irqs()
1002 devm_free_irq(pdata->dev, channel->dma_irq, channel); in xgbe_free_irqs()
1008 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_init_tx_coalesce()
1010 DBGPR("-->xgbe_init_tx_coalesce\n"); in xgbe_init_tx_coalesce()
1012 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS; in xgbe_init_tx_coalesce()
1013 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES; in xgbe_init_tx_coalesce()
1015 hw_if->config_tx_coalesce(pdata); in xgbe_init_tx_coalesce()
1017 DBGPR("<--xgbe_init_tx_coalesce\n"); in xgbe_init_tx_coalesce()
1022 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_init_rx_coalesce()
1024 DBGPR("-->xgbe_init_rx_coalesce\n"); in xgbe_init_rx_coalesce()
1026 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS); in xgbe_init_rx_coalesce()
1027 pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS; in xgbe_init_rx_coalesce()
1028 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES; in xgbe_init_rx_coalesce()
1030 hw_if->config_rx_coalesce(pdata); in xgbe_init_rx_coalesce()
1032 DBGPR("<--xgbe_init_rx_coalesce\n"); in xgbe_init_rx_coalesce()
1037 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_free_tx_data()
1042 DBGPR("-->xgbe_free_tx_data\n"); in xgbe_free_tx_data()
1044 for (i = 0; i < pdata->channel_count; i++) { in xgbe_free_tx_data()
1045 ring = pdata->channel[i]->tx_ring; in xgbe_free_tx_data()
1049 for (j = 0; j < ring->rdesc_count; j++) { in xgbe_free_tx_data()
1051 desc_if->unmap_rdata(pdata, rdata); in xgbe_free_tx_data()
1055 DBGPR("<--xgbe_free_tx_data\n"); in xgbe_free_tx_data()
1060 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_free_rx_data()
1065 DBGPR("-->xgbe_free_rx_data\n"); in xgbe_free_rx_data()
1067 for (i = 0; i < pdata->channel_count; i++) { in xgbe_free_rx_data()
1068 ring = pdata->channel[i]->rx_ring; in xgbe_free_rx_data()
1072 for (j = 0; j < ring->rdesc_count; j++) { in xgbe_free_rx_data()
1074 desc_if->unmap_rdata(pdata, rdata); in xgbe_free_rx_data()
1078 DBGPR("<--xgbe_free_rx_data\n"); in xgbe_free_rx_data()
1083 pdata->phy_speed = SPEED_UNKNOWN; in xgbe_phy_reset()
1085 return pdata->phy_if.phy_reset(pdata); in xgbe_phy_reset()
1091 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_powerdown()
1094 DBGPR("-->xgbe_powerdown\n"); in xgbe_powerdown()
1097 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) { in xgbe_powerdown()
1099 DBGPR("<--xgbe_powerdown\n"); in xgbe_powerdown()
1100 return -EINVAL; in xgbe_powerdown()
1103 spin_lock_irqsave(&pdata->lock, flags); in xgbe_powerdown()
1111 flush_workqueue(pdata->dev_workqueue); in xgbe_powerdown()
1113 hw_if->powerdown_tx(pdata); in xgbe_powerdown()
1114 hw_if->powerdown_rx(pdata); in xgbe_powerdown()
1118 pdata->power_down = 1; in xgbe_powerdown()
1120 spin_unlock_irqrestore(&pdata->lock, flags); in xgbe_powerdown()
1122 DBGPR("<--xgbe_powerdown\n"); in xgbe_powerdown()
1130 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_powerup()
1133 DBGPR("-->xgbe_powerup\n"); in xgbe_powerup()
1136 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) { in xgbe_powerup()
1138 DBGPR("<--xgbe_powerup\n"); in xgbe_powerup()
1139 return -EINVAL; in xgbe_powerup()
1142 spin_lock_irqsave(&pdata->lock, flags); in xgbe_powerup()
1144 pdata->power_down = 0; in xgbe_powerup()
1148 hw_if->powerup_tx(pdata); in xgbe_powerup()
1149 hw_if->powerup_rx(pdata); in xgbe_powerup()
1158 spin_unlock_irqrestore(&pdata->lock, flags); in xgbe_powerup()
1160 DBGPR("<--xgbe_powerup\n"); in xgbe_powerup()
1167 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_free_memory()
1170 desc_if->free_ring_resources(pdata); in xgbe_free_memory()
1172 /* Free the channel and ring structures */ in xgbe_free_memory()
1178 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_alloc_memory()
1179 struct net_device *netdev = pdata->netdev; in xgbe_alloc_memory()
1182 if (pdata->new_tx_ring_count) { in xgbe_alloc_memory()
1183 pdata->tx_ring_count = pdata->new_tx_ring_count; in xgbe_alloc_memory()
1184 pdata->tx_q_count = pdata->tx_ring_count; in xgbe_alloc_memory()
1185 pdata->new_tx_ring_count = 0; in xgbe_alloc_memory()
1188 if (pdata->new_rx_ring_count) { in xgbe_alloc_memory()
1189 pdata->rx_ring_count = pdata->new_rx_ring_count; in xgbe_alloc_memory()
1190 pdata->new_rx_ring_count = 0; in xgbe_alloc_memory()
1194 pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu); in xgbe_alloc_memory()
1196 /* Allocate the channel and ring structures */ in xgbe_alloc_memory()
1202 ret = desc_if->alloc_ring_resources(pdata); in xgbe_alloc_memory()
1219 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_start()
1220 struct xgbe_phy_if *phy_if = &pdata->phy_if; in xgbe_start()
1221 struct net_device *netdev = pdata->netdev; in xgbe_start()
1226 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); in xgbe_start()
1232 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); in xgbe_start()
1240 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, in xgbe_start()
1241 i % pdata->rx_ring_count); in xgbe_start()
1243 ret = hw_if->init(pdata); in xgbe_start()
1253 ret = phy_if->phy_start(pdata); in xgbe_start()
1257 hw_if->enable_tx(pdata); in xgbe_start()
1258 hw_if->enable_rx(pdata); in xgbe_start()
1265 queue_work(pdata->dev_workqueue, &pdata->service_work); in xgbe_start()
1267 clear_bit(XGBE_STOPPED, &pdata->dev_state); in xgbe_start()
1277 hw_if->exit(pdata); in xgbe_start()
1284 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_stop()
1285 struct xgbe_phy_if *phy_if = &pdata->phy_if; in xgbe_stop()
1286 struct xgbe_channel *channel; in xgbe_stop() local
1287 struct net_device *netdev = pdata->netdev; in xgbe_stop()
1291 DBGPR("-->xgbe_stop\n"); in xgbe_stop()
1293 if (test_bit(XGBE_STOPPED, &pdata->dev_state)) in xgbe_stop()
1297 netif_carrier_off(pdata->netdev); in xgbe_stop()
1300 flush_workqueue(pdata->dev_workqueue); in xgbe_stop()
1304 hw_if->disable_tx(pdata); in xgbe_stop()
1305 hw_if->disable_rx(pdata); in xgbe_stop()
1307 phy_if->phy_stop(pdata); in xgbe_stop()
1313 hw_if->exit(pdata); in xgbe_stop()
1315 for (i = 0; i < pdata->channel_count; i++) { in xgbe_stop()
1316 channel = pdata->channel[i]; in xgbe_stop()
1317 if (!channel->tx_ring) in xgbe_stop()
1320 txq = netdev_get_tx_queue(netdev, channel->queue_index); in xgbe_stop()
1324 set_bit(XGBE_STOPPED, &pdata->dev_state); in xgbe_stop()
1326 DBGPR("<--xgbe_stop\n"); in xgbe_stop()
1344 netdev_alert(pdata->netdev, "device stopped\n"); in xgbe_stopdev()
1350 if (!netif_running(pdata->netdev)) in xgbe_full_restart_dev()
1364 if (!netif_running(pdata->netdev)) in xgbe_restart_dev()
1391 packet->vlan_ctag = skb_vlan_tag_get(skb); in xgbe_prep_vlan()
1398 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_prep_tso()
1406 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) { in xgbe_prep_tso()
1407 packet->header_len = skb_inner_tcp_all_headers(skb); in xgbe_prep_tso()
1408 packet->tcp_header_len = inner_tcp_hdrlen(skb); in xgbe_prep_tso()
1410 packet->header_len = skb_tcp_all_headers(skb); in xgbe_prep_tso()
1411 packet->tcp_header_len = tcp_hdrlen(skb); in xgbe_prep_tso()
1413 packet->tcp_payload_len = skb->len - packet->header_len; in xgbe_prep_tso()
1414 packet->mss = skb_shinfo(skb)->gso_size; in xgbe_prep_tso()
1416 DBGPR(" packet->header_len=%u\n", packet->header_len); in xgbe_prep_tso()
1417 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n", in xgbe_prep_tso()
1418 packet->tcp_header_len, packet->tcp_payload_len); in xgbe_prep_tso()
1419 DBGPR(" packet->mss=%u\n", packet->mss); in xgbe_prep_tso()
1424 packet->tx_packets = skb_shinfo(skb)->gso_segs; in xgbe_prep_tso()
1425 packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len; in xgbe_prep_tso()
1432 if (!skb->encapsulation) in xgbe_is_vxlan()
1435 if (skb->ip_summed != CHECKSUM_PARTIAL) in xgbe_is_vxlan()
1438 switch (skb->protocol) { in xgbe_is_vxlan()
1440 if (ip_hdr(skb)->protocol != IPPROTO_UDP) in xgbe_is_vxlan()
1445 if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP) in xgbe_is_vxlan()
1453 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || in xgbe_is_vxlan()
1454 skb->inner_protocol != htons(ETH_P_TEB) || in xgbe_is_vxlan()
1455 (skb_inner_mac_header(skb) - skb_transport_header(skb) != in xgbe_is_vxlan()
1464 if (skb->ip_summed != CHECKSUM_PARTIAL) in xgbe_is_tso()
1484 packet->skb = skb; in xgbe_packet_info()
1487 packet->rdesc_count = 0; in xgbe_packet_info()
1489 packet->tx_packets = 1; in xgbe_packet_info()
1490 packet->tx_bytes = skb->len; in xgbe_packet_info()
1494 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { in xgbe_packet_info()
1496 packet->rdesc_count++; in xgbe_packet_info()
1500 packet->rdesc_count++; in xgbe_packet_info()
1502 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_packet_info()
1504 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_packet_info()
1506 } else if (skb->ip_summed == CHECKSUM_PARTIAL) in xgbe_packet_info()
1507 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_packet_info()
1511 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_packet_info()
1516 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) in xgbe_packet_info()
1520 packet->rdesc_count++; in xgbe_packet_info()
1523 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_packet_info()
1527 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in xgbe_packet_info()
1528 (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON)) in xgbe_packet_info()
1529 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_packet_info()
1533 packet->rdesc_count++; in xgbe_packet_info()
1534 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); in xgbe_packet_info()
1537 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in xgbe_packet_info()
1538 frag = &skb_shinfo(skb)->frags[i]; in xgbe_packet_info()
1540 packet->rdesc_count++; in xgbe_packet_info()
1541 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); in xgbe_packet_info()
1552 snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs", in xgbe_open()
1555 snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc", in xgbe_open()
1558 snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c", in xgbe_open()
1562 pdata->dev_workqueue = in xgbe_open()
1564 if (!pdata->dev_workqueue) { in xgbe_open()
1566 return -ENOMEM; in xgbe_open()
1569 pdata->an_workqueue = in xgbe_open()
1570 create_singlethread_workqueue(pdata->an_name); in xgbe_open()
1571 if (!pdata->an_workqueue) { in xgbe_open()
1573 ret = -ENOMEM; in xgbe_open()
1583 ret = clk_prepare_enable(pdata->sysclk); in xgbe_open()
1589 ret = clk_prepare_enable(pdata->ptpclk); in xgbe_open()
1595 INIT_WORK(&pdata->service_work, xgbe_service); in xgbe_open()
1596 INIT_WORK(&pdata->restart_work, xgbe_restart); in xgbe_open()
1597 INIT_WORK(&pdata->stopdev_work, xgbe_stopdev); in xgbe_open()
1598 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); in xgbe_open()
1611 clear_bit(XGBE_DOWN, &pdata->dev_state); in xgbe_open()
1619 clk_disable_unprepare(pdata->ptpclk); in xgbe_open()
1622 clk_disable_unprepare(pdata->sysclk); in xgbe_open()
1625 destroy_workqueue(pdata->an_workqueue); in xgbe_open()
1628 destroy_workqueue(pdata->dev_workqueue); in xgbe_open()
1643 clk_disable_unprepare(pdata->ptpclk); in xgbe_close()
1644 clk_disable_unprepare(pdata->sysclk); in xgbe_close()
1646 destroy_workqueue(pdata->an_workqueue); in xgbe_close()
1648 destroy_workqueue(pdata->dev_workqueue); in xgbe_close()
1650 set_bit(XGBE_DOWN, &pdata->dev_state); in xgbe_close()
1658 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_xmit()
1659 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_xmit()
1660 struct xgbe_channel *channel; in xgbe_xmit() local
1666 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); in xgbe_xmit()
1668 channel = pdata->channel[skb->queue_mapping]; in xgbe_xmit()
1669 txq = netdev_get_tx_queue(netdev, channel->queue_index); in xgbe_xmit()
1670 ring = channel->tx_ring; in xgbe_xmit()
1671 packet = &ring->packet_data; in xgbe_xmit()
1675 if (skb->len == 0) { in xgbe_xmit()
1687 ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count); in xgbe_xmit()
1700 if (!desc_if->map_tx_skb(channel, skb)) { in xgbe_xmit()
1708 netdev_tx_sent_queue(txq, packet->tx_bytes); in xgbe_xmit()
1711 hw_if->dev_xmit(channel); in xgbe_xmit()
1717 xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS); in xgbe_xmit()
1728 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_set_rx_mode()
1730 DBGPR("-->xgbe_set_rx_mode\n"); in xgbe_set_rx_mode()
1732 hw_if->config_rx_mode(pdata); in xgbe_set_rx_mode()
1734 DBGPR("<--xgbe_set_rx_mode\n"); in xgbe_set_rx_mode()
1740 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_set_mac_address()
1743 DBGPR("-->xgbe_set_mac_address\n"); in xgbe_set_mac_address()
1745 if (!is_valid_ether_addr(saddr->sa_data)) in xgbe_set_mac_address()
1746 return -EADDRNOTAVAIL; in xgbe_set_mac_address()
1748 eth_hw_addr_set(netdev, saddr->sa_data); in xgbe_set_mac_address()
1750 hw_if->set_mac_address(pdata, netdev->dev_addr); in xgbe_set_mac_address()
1752 DBGPR("<--xgbe_set_mac_address\n"); in xgbe_set_mac_address()
1772 ret = -EOPNOTSUPP; in xgbe_ioctl()
1783 DBGPR("-->xgbe_change_mtu\n"); in xgbe_change_mtu()
1789 pdata->rx_buf_size = ret; in xgbe_change_mtu()
1790 WRITE_ONCE(netdev->mtu, mtu); in xgbe_change_mtu()
1794 DBGPR("<--xgbe_change_mtu\n"); in xgbe_change_mtu()
1804 schedule_work(&pdata->restart_work); in xgbe_tx_timeout()
1811 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; in xgbe_get_stats64()
1813 DBGPR("-->%s\n", __func__); in xgbe_get_stats64()
1815 pdata->hw_if.read_mmc_stats(pdata); in xgbe_get_stats64()
1817 s->rx_packets = pstats->rxframecount_gb; in xgbe_get_stats64()
1818 s->rx_bytes = pstats->rxoctetcount_gb; in xgbe_get_stats64()
1819 s->rx_errors = pstats->rxframecount_gb - in xgbe_get_stats64()
1820 pstats->rxbroadcastframes_g - in xgbe_get_stats64()
1821 pstats->rxmulticastframes_g - in xgbe_get_stats64()
1822 pstats->rxunicastframes_g; in xgbe_get_stats64()
1823 s->multicast = pstats->rxmulticastframes_g; in xgbe_get_stats64()
1824 s->rx_length_errors = pstats->rxlengtherror; in xgbe_get_stats64()
1825 s->rx_crc_errors = pstats->rxcrcerror; in xgbe_get_stats64()
1826 s->rx_fifo_errors = pstats->rxfifooverflow; in xgbe_get_stats64()
1828 s->tx_packets = pstats->txframecount_gb; in xgbe_get_stats64()
1829 s->tx_bytes = pstats->txoctetcount_gb; in xgbe_get_stats64()
1830 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g; in xgbe_get_stats64()
1831 s->tx_dropped = netdev->stats.tx_dropped; in xgbe_get_stats64()
1833 DBGPR("<--%s\n", __func__); in xgbe_get_stats64()
1840 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_vlan_rx_add_vid()
1842 DBGPR("-->%s\n", __func__); in xgbe_vlan_rx_add_vid()
1844 set_bit(vid, pdata->active_vlans); in xgbe_vlan_rx_add_vid()
1845 hw_if->update_vlan_hash_table(pdata); in xgbe_vlan_rx_add_vid()
1847 DBGPR("<--%s\n", __func__); in xgbe_vlan_rx_add_vid()
1856 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_vlan_rx_kill_vid()
1858 DBGPR("-->%s\n", __func__); in xgbe_vlan_rx_kill_vid()
1860 clear_bit(vid, pdata->active_vlans); in xgbe_vlan_rx_kill_vid()
1861 hw_if->update_vlan_hash_table(pdata); in xgbe_vlan_rx_kill_vid()
1863 DBGPR("<--%s\n", __func__); in xgbe_vlan_rx_kill_vid()
1872 struct xgbe_channel *channel; in xgbe_poll_controller() local
1875 DBGPR("-->xgbe_poll_controller\n"); in xgbe_poll_controller()
1877 if (pdata->per_channel_irq) { in xgbe_poll_controller()
1878 for (i = 0; i < pdata->channel_count; i++) { in xgbe_poll_controller()
1879 channel = pdata->channel[i]; in xgbe_poll_controller()
1880 xgbe_dma_isr(channel->dma_irq, channel); in xgbe_poll_controller()
1883 disable_irq(pdata->dev_irq); in xgbe_poll_controller()
1884 xgbe_isr(pdata->dev_irq, pdata); in xgbe_poll_controller()
1885 enable_irq(pdata->dev_irq); in xgbe_poll_controller()
1888 DBGPR("<--xgbe_poll_controller\n"); in xgbe_poll_controller()
1900 return -EOPNOTSUPP; in xgbe_setup_tc()
1902 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; in xgbe_setup_tc()
1903 tc = mqprio->num_tc; in xgbe_setup_tc()
1905 if (tc > pdata->hw_feat.tc_cnt) in xgbe_setup_tc()
1906 return -EINVAL; in xgbe_setup_tc()
1908 pdata->num_tcs = tc; in xgbe_setup_tc()
1909 pdata->hw_if.config_tc(pdata); in xgbe_setup_tc()
1922 if (!pdata->hw_feat.vxn) in xgbe_fix_features()
1961 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_set_features()
1965 rxhash = pdata->netdev_features & NETIF_F_RXHASH; in xgbe_set_features()
1966 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; in xgbe_set_features()
1967 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; in xgbe_set_features()
1968 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; in xgbe_set_features()
1971 ret = hw_if->enable_rss(pdata); in xgbe_set_features()
1973 ret = hw_if->disable_rss(pdata); in xgbe_set_features()
1978 hw_if->enable_sph(pdata); in xgbe_set_features()
1979 hw_if->enable_vxlan(pdata); in xgbe_set_features()
1980 hw_if->enable_rx_csum(pdata); in xgbe_set_features()
1981 schedule_work(&pdata->restart_work); in xgbe_set_features()
1983 hw_if->disable_sph(pdata); in xgbe_set_features()
1984 hw_if->disable_vxlan(pdata); in xgbe_set_features()
1985 hw_if->disable_rx_csum(pdata); in xgbe_set_features()
1986 schedule_work(&pdata->restart_work); in xgbe_set_features()
1990 hw_if->enable_rx_vlan_stripping(pdata); in xgbe_set_features()
1992 hw_if->disable_rx_vlan_stripping(pdata); in xgbe_set_features()
1995 hw_if->enable_rx_vlan_filtering(pdata); in xgbe_set_features()
1997 hw_if->disable_rx_vlan_filtering(pdata); in xgbe_set_features()
1999 pdata->netdev_features = features; in xgbe_set_features()
2001 DBGPR("<--xgbe_set_features\n"); in xgbe_set_features()
2043 static void xgbe_rx_refresh(struct xgbe_channel *channel) in xgbe_rx_refresh() argument
2045 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_rx_refresh()
2046 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_rx_refresh()
2047 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_rx_refresh()
2048 struct xgbe_ring *ring = channel->rx_ring; in xgbe_rx_refresh()
2051 while (ring->dirty != ring->cur) { in xgbe_rx_refresh()
2052 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); in xgbe_rx_refresh()
2055 desc_if->unmap_rdata(pdata, rdata); in xgbe_rx_refresh()
2057 if (desc_if->map_rx_buffer(pdata, ring, rdata)) in xgbe_rx_refresh()
2060 hw_if->rx_desc_reset(pdata, rdata, ring->dirty); in xgbe_rx_refresh()
2062 ring->dirty++; in xgbe_rx_refresh()
2070 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1); in xgbe_rx_refresh()
2071 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, in xgbe_rx_refresh()
2072 lower_32_bits(rdata->rdesc_dma)); in xgbe_rx_refresh()
2083 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); in xgbe_create_skb()
2090 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, in xgbe_create_skb()
2091 rdata->rx.hdr.dma_off, in xgbe_create_skb()
2092 rdata->rx.hdr.dma_len, DMA_FROM_DEVICE); in xgbe_create_skb()
2094 packet = page_address(rdata->rx.hdr.pa.pages) + in xgbe_create_skb()
2095 rdata->rx.hdr.pa.pages_offset; in xgbe_create_skb()
2106 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) in xgbe_rx_buf1_len()
2110 if (rdata->rx.hdr_len) in xgbe_rx_buf1_len()
2111 return rdata->rx.hdr_len; in xgbe_rx_buf1_len()
2116 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) in xgbe_rx_buf1_len()
2117 return rdata->rx.hdr.dma_len; in xgbe_rx_buf1_len()
2122 return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len); in xgbe_rx_buf1_len()
2130 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) in xgbe_rx_buf2_len()
2131 return rdata->rx.buf.dma_len; in xgbe_rx_buf2_len()
2136 return rdata->rx.len - len; in xgbe_rx_buf2_len()
2139 static int xgbe_tx_poll(struct xgbe_channel *channel) in xgbe_tx_poll() argument
2141 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_tx_poll()
2142 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_tx_poll()
2143 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_tx_poll()
2144 struct xgbe_ring *ring = channel->tx_ring; in xgbe_tx_poll()
2147 struct net_device *netdev = pdata->netdev; in xgbe_tx_poll()
2153 DBGPR("-->xgbe_tx_poll\n"); in xgbe_tx_poll()
2155 /* Nothing to do if there isn't a Tx ring for this channel */ in xgbe_tx_poll()
2159 cur = ring->cur; in xgbe_tx_poll()
2161 /* Be sure we get ring->cur before accessing descriptor data */ in xgbe_tx_poll()
2164 txq = netdev_get_tx_queue(netdev, channel->queue_index); in xgbe_tx_poll()
2167 (ring->dirty != cur)) { in xgbe_tx_poll()
2168 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); in xgbe_tx_poll()
2169 rdesc = rdata->rdesc; in xgbe_tx_poll()
2171 if (!hw_if->tx_complete(rdesc)) in xgbe_tx_poll()
2179 xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0); in xgbe_tx_poll()
2181 if (hw_if->is_last_desc(rdesc)) { in xgbe_tx_poll()
2182 tx_packets += rdata->tx.packets; in xgbe_tx_poll()
2183 tx_bytes += rdata->tx.bytes; in xgbe_tx_poll()
2186 /* Free the SKB and reset the descriptor for re-use */ in xgbe_tx_poll()
2187 desc_if->unmap_rdata(pdata, rdata); in xgbe_tx_poll()
2188 hw_if->tx_desc_reset(rdata); in xgbe_tx_poll()
2191 ring->dirty++; in xgbe_tx_poll()
2199 if ((ring->tx.queue_stopped == 1) && in xgbe_tx_poll()
2201 ring->tx.queue_stopped = 0; in xgbe_tx_poll()
2205 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed); in xgbe_tx_poll()
2210 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) in xgbe_rx_poll() argument
2212 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_rx_poll()
2213 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_rx_poll()
2214 struct xgbe_ring *ring = channel->rx_ring; in xgbe_rx_poll()
2217 struct net_device *netdev = pdata->netdev; in xgbe_rx_poll()
2226 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); in xgbe_rx_poll()
2228 /* Nothing to do if there isn't a Rx ring for this channel */ in xgbe_rx_poll()
2235 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; in xgbe_rx_poll()
2237 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); in xgbe_rx_poll()
2238 packet = &ring->packet_data; in xgbe_rx_poll()
2240 DBGPR(" cur = %d\n", ring->cur); in xgbe_rx_poll()
2243 if (!received && rdata->state_saved) { in xgbe_rx_poll()
2244 skb = rdata->state.skb; in xgbe_rx_poll()
2245 error = rdata->state.error; in xgbe_rx_poll()
2246 len = rdata->state.len; in xgbe_rx_poll()
2255 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); in xgbe_rx_poll()
2258 xgbe_rx_refresh(channel); in xgbe_rx_poll()
2260 if (hw_if->dev_read(channel)) in xgbe_rx_poll()
2264 ring->cur++; in xgbe_rx_poll()
2266 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_rx_poll()
2268 context_next = XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2271 context = XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2279 if (error || packet->errors) { in xgbe_rx_poll()
2280 if (packet->errors) in xgbe_rx_poll()
2294 if (buf2_len > rdata->rx.buf.dma_len) { in xgbe_rx_poll()
2312 dma_sync_single_range_for_cpu(pdata->dev, in xgbe_rx_poll()
2313 rdata->rx.buf.dma_base, in xgbe_rx_poll()
2314 rdata->rx.buf.dma_off, in xgbe_rx_poll()
2315 rdata->rx.buf.dma_len, in xgbe_rx_poll()
2318 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in xgbe_rx_poll()
2319 rdata->rx.buf.pa.pages, in xgbe_rx_poll()
2320 rdata->rx.buf.pa.pages_offset, in xgbe_rx_poll()
2322 rdata->rx.buf.dma_len); in xgbe_rx_poll()
2323 rdata->rx.buf.pa.pages = NULL; in xgbe_rx_poll()
2337 max_len = netdev->mtu + ETH_HLEN; in xgbe_rx_poll()
2338 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && in xgbe_rx_poll()
2339 (skb->protocol == htons(ETH_P_8021Q))) in xgbe_rx_poll()
2342 if (skb->len > max_len) { in xgbe_rx_poll()
2353 if (XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2355 skb->ip_summed = CHECKSUM_UNNECESSARY; in xgbe_rx_poll()
2357 if (XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2359 skb->encapsulation = 1; in xgbe_rx_poll()
2361 if (XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2363 skb->csum_level = 1; in xgbe_rx_poll()
2366 if (XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2369 packet->vlan_ctag); in xgbe_rx_poll()
2371 if (XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2374 hwtstamps->hwtstamp = ns_to_ktime(packet->rx_tstamp); in xgbe_rx_poll()
2377 if (XGMAC_GET_BITS(packet->attributes, in xgbe_rx_poll()
2379 skb_set_hash(skb, packet->rss_hash, in xgbe_rx_poll()
2380 packet->rss_hash_type); in xgbe_rx_poll()
2382 skb->dev = netdev; in xgbe_rx_poll()
2383 skb->protocol = eth_type_trans(skb, netdev); in xgbe_rx_poll()
2384 skb_record_rx_queue(skb, channel->queue_index); in xgbe_rx_poll()
2394 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); in xgbe_rx_poll()
2395 rdata->state_saved = 1; in xgbe_rx_poll()
2396 rdata->state.skb = skb; in xgbe_rx_poll()
2397 rdata->state.len = len; in xgbe_rx_poll()
2398 rdata->state.error = error; in xgbe_rx_poll()
2401 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count); in xgbe_rx_poll()
2408 struct xgbe_channel *channel = container_of(napi, struct xgbe_channel, in xgbe_one_poll() local
2410 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_one_poll()
2413 DBGPR("-->xgbe_one_poll: budget=%d\n", budget); in xgbe_one_poll()
2416 xgbe_tx_poll(channel); in xgbe_one_poll()
2419 processed = xgbe_rx_poll(channel, budget); in xgbe_one_poll()
2424 if (pdata->channel_irq_mode) in xgbe_one_poll()
2425 xgbe_enable_rx_tx_int(pdata, channel); in xgbe_one_poll()
2427 enable_irq(channel->dma_irq); in xgbe_one_poll()
2430 DBGPR("<--xgbe_one_poll: received = %d\n", processed); in xgbe_one_poll()
2439 struct xgbe_channel *channel; in xgbe_all_poll() local
2444 DBGPR("-->xgbe_all_poll: budget=%d\n", budget); in xgbe_all_poll()
2447 ring_budget = budget / pdata->rx_ring_count; in xgbe_all_poll()
2451 for (i = 0; i < pdata->channel_count; i++) { in xgbe_all_poll()
2452 channel = pdata->channel[i]; in xgbe_all_poll()
2455 xgbe_tx_poll(channel); in xgbe_all_poll()
2458 if (ring_budget > (budget - processed)) in xgbe_all_poll()
2459 ring_budget = budget - processed; in xgbe_all_poll()
2460 processed += xgbe_rx_poll(channel, ring_budget); in xgbe_all_poll()
2470 DBGPR("<--xgbe_all_poll: received = %d\n", processed); in xgbe_all_poll()
2481 while (count--) { in xgbe_dump_tx_desc()
2483 rdesc = rdata->rdesc; in xgbe_dump_tx_desc()
2484 netdev_dbg(pdata->netdev, in xgbe_dump_tx_desc()
2487 le32_to_cpu(rdesc->desc0), in xgbe_dump_tx_desc()
2488 le32_to_cpu(rdesc->desc1), in xgbe_dump_tx_desc()
2489 le32_to_cpu(rdesc->desc2), in xgbe_dump_tx_desc()
2490 le32_to_cpu(rdesc->desc3)); in xgbe_dump_tx_desc()
2502 rdesc = rdata->rdesc; in xgbe_dump_rx_desc()
2503 netdev_dbg(pdata->netdev, in xgbe_dump_rx_desc()
2505 idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), in xgbe_dump_rx_desc()
2506 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); in xgbe_dump_rx_desc()
2511 struct ethhdr *eth = (struct ethhdr *)skb->data; in xgbe_print_pkt()
2518 (tx_rx ? "TX" : "RX"), skb->len); in xgbe_print_pkt()
2520 netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest); in xgbe_print_pkt()
2521 netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source); in xgbe_print_pkt()
2522 netdev_dbg(netdev, "Protocol: %#06x\n", ntohs(eth->h_proto)); in xgbe_print_pkt()
2524 for (i = 0; i < skb->len; i += 32) { in xgbe_print_pkt()
2525 unsigned int len = min(skb->len - i, 32U); in xgbe_print_pkt()
2527 hex_dump_to_buffer(&skb->data[i], len, 32, 1, in xgbe_print_pkt()