Lines Matching +full:multi +full:- +full:gmac

1 // SPDX-License-Identifier: GPL-2.0-only
14 #include <linux/dma-mapping.h>
54 static int debug = -1;
73 * sxgbe_verify_args - verify the driver parameters.
86 if (!priv->tx_path_in_lpi_mode) in sxgbe_enable_eee_mode()
87 priv->hw->mac->set_eee_mode(priv->ioaddr); in sxgbe_enable_eee_mode()
93 priv->hw->mac->reset_eee_mode(priv->ioaddr); in sxgbe_disable_eee_mode()
94 del_timer_sync(&priv->eee_ctrl_timer); in sxgbe_disable_eee_mode()
95 priv->tx_path_in_lpi_mode = false; in sxgbe_disable_eee_mode()
110 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer)); in sxgbe_eee_ctrl_timer()
118 * if the GMAC actually supports the EEE (from the HW cap reg) and the
124 struct net_device *ndev = priv->dev; in sxgbe_eee_init()
128 if (priv->hw_cap.eee) { in sxgbe_eee_init()
130 if (phy_init_eee(ndev->phydev, true)) in sxgbe_eee_init()
133 timer_setup(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, 0); in sxgbe_eee_init()
134 priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer); in sxgbe_eee_init()
135 add_timer(&priv->eee_ctrl_timer); in sxgbe_eee_init()
137 priv->hw->mac->set_eee_timer(priv->ioaddr, in sxgbe_eee_init()
139 priv->tx_lpi_timer); in sxgbe_eee_init()
141 pr_info("Energy-Efficient Ethernet initialized\n"); in sxgbe_eee_init()
151 struct net_device *ndev = priv->dev; in sxgbe_eee_adjust()
157 if (priv->eee_enabled) in sxgbe_eee_adjust()
158 priv->hw->mac->set_eee_pls(priv->ioaddr, ndev->phydev->link); in sxgbe_eee_adjust()
162 * sxgbe_clk_csr_set - dynamically set the MDC clock
169 u32 clk_rate = clk_get_rate(priv->sxgbe_clk); in sxgbe_clk_csr_set()
175 priv->clk_csr = SXGBE_CSR_100_150M; in sxgbe_clk_csr_set()
177 priv->clk_csr = SXGBE_CSR_150_250M; in sxgbe_clk_csr_set()
179 priv->clk_csr = SXGBE_CSR_250_300M; in sxgbe_clk_csr_set()
181 priv->clk_csr = SXGBE_CSR_300_350M; in sxgbe_clk_csr_set()
183 priv->clk_csr = SXGBE_CSR_350_400M; in sxgbe_clk_csr_set()
185 priv->clk_csr = SXGBE_CSR_400_500M; in sxgbe_clk_csr_set()
189 #define SXGBE_TX_THRESH(x) (x->dma_tx_size/4)
193 return queue->dirty_tx + tx_qsize - queue->cur_tx - 1; in sxgbe_tx_avail()
204 struct phy_device *phydev = dev->phydev; in sxgbe_adjust_link()
211 /* SXGBE is not supporting auto-negotiation and in sxgbe_adjust_link()
215 if (phydev->link) { in sxgbe_adjust_link()
216 if (phydev->speed != priv->speed) { in sxgbe_adjust_link()
218 switch (phydev->speed) { in sxgbe_adjust_link()
231 phydev->speed); in sxgbe_adjust_link()
234 priv->speed = phydev->speed; in sxgbe_adjust_link()
235 priv->hw->mac->set_speed(priv->ioaddr, speed); in sxgbe_adjust_link()
238 if (!priv->oldlink) { in sxgbe_adjust_link()
240 priv->oldlink = 1; in sxgbe_adjust_link()
242 } else if (priv->oldlink) { in sxgbe_adjust_link()
244 priv->oldlink = 0; in sxgbe_adjust_link()
245 priv->speed = SPEED_UNKNOWN; in sxgbe_adjust_link()
256 * sxgbe_init_phy - PHY initialization
269 int phy_iface = priv->plat->interface; in sxgbe_init_phy()
272 priv->oldlink = 0; in sxgbe_init_phy()
273 priv->speed = SPEED_UNKNOWN; in sxgbe_init_phy()
274 priv->oldduplex = DUPLEX_UNKNOWN; in sxgbe_init_phy()
276 if (priv->plat->phy_bus_name) in sxgbe_init_phy()
277 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", in sxgbe_init_phy()
278 priv->plat->phy_bus_name, priv->plat->bus_id); in sxgbe_init_phy()
280 snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x", in sxgbe_init_phy()
281 priv->plat->bus_id); in sxgbe_init_phy()
284 priv->plat->phy_addr); in sxgbe_init_phy()
299 if (phydev->phy_id == 0) { in sxgbe_init_phy()
301 return -ENODEV; in sxgbe_init_phy()
305 __func__, phydev->phy_id, phydev->link); in sxgbe_init_phy()
319 unsigned int txsize = priv->dma_tx_size; in sxgbe_clear_descriptors()
320 unsigned int rxsize = priv->dma_rx_size; in sxgbe_clear_descriptors()
325 priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i], in sxgbe_clear_descriptors()
326 priv->use_riwt, priv->mode, in sxgbe_clear_descriptors()
327 (i == rxsize - 1)); in sxgbe_clear_descriptors()
332 priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]); in sxgbe_clear_descriptors()
346 return -ENOMEM; in sxgbe_init_rx_buffers()
348 rx_ring->rx_skbuff[i] = skb; in sxgbe_init_rx_buffers()
349 rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, in sxgbe_init_rx_buffers()
352 if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) { in sxgbe_init_rx_buffers()
355 return -EINVAL; in sxgbe_init_rx_buffers()
358 p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i]; in sxgbe_init_rx_buffers()
364 * sxgbe_free_rx_buffers - free what sxgbe_init_rx_buffers() allocated
380 kfree_skb(rx_ring->rx_skbuff[i]); in sxgbe_free_rx_buffers()
381 dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i], in sxgbe_free_rx_buffers()
386 * init_tx_ring - init the TX descriptor ring
399 return -ENOMEM; in init_tx_ring()
403 tx_ring->dma_tx = dma_alloc_coherent(dev, in init_tx_ring()
405 &tx_ring->dma_tx_phy, GFP_KERNEL); in init_tx_ring()
406 if (!tx_ring->dma_tx) in init_tx_ring()
407 return -ENOMEM; in init_tx_ring()
410 tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize, in init_tx_ring()
412 if (!tx_ring->tx_skbuff_dma) in init_tx_ring()
415 tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize, in init_tx_ring()
418 if (!tx_ring->tx_skbuff) in init_tx_ring()
422 tx_ring->queue_no = queue_no; in init_tx_ring()
425 tx_ring->dirty_tx = 0; in init_tx_ring()
426 tx_ring->cur_tx = 0; in init_tx_ring()
432 tx_ring->dma_tx, tx_ring->dma_tx_phy); in init_tx_ring()
433 return -ENOMEM; in init_tx_ring()
437 * free_rx_ring - free the RX descriptor ring
447 rx_ring->dma_rx, rx_ring->dma_rx_phy); in free_rx_ring()
448 kfree(rx_ring->rx_skbuff_dma); in free_rx_ring()
449 kfree(rx_ring->rx_skbuff); in free_rx_ring()
453 * init_rx_ring - init the RX descriptor ring
469 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8); in init_rx_ring()
476 return -ENOMEM; in init_rx_ring()
480 rx_ring->queue_no = queue_no; in init_rx_ring()
483 rx_ring->dma_rx = dma_alloc_coherent(priv->device, in init_rx_ring()
485 &rx_ring->dma_rx_phy, GFP_KERNEL); in init_rx_ring()
487 if (rx_ring->dma_rx == NULL) in init_rx_ring()
488 return -ENOMEM; in init_rx_ring()
491 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize, in init_rx_ring()
493 if (!rx_ring->rx_skbuff_dma) { in init_rx_ring()
494 ret = -ENOMEM; in init_rx_ring()
498 rx_ring->rx_skbuff = kmalloc_array(rx_rsize, in init_rx_ring()
500 if (!rx_ring->rx_skbuff) { in init_rx_ring()
501 ret = -ENOMEM; in init_rx_ring()
508 p = rx_ring->dma_rx + desc_index; in init_rx_ring()
516 rx_ring->cur_rx = 0; in init_rx_ring()
517 rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize); in init_rx_ring()
518 priv->dma_buf_sz = bfsize; in init_rx_ring()
523 while (--desc_index >= 0) { in init_rx_ring()
526 p = rx_ring->dma_rx + desc_index; in init_rx_ring()
529 kfree(rx_ring->rx_skbuff); in init_rx_ring()
531 kfree(rx_ring->rx_skbuff_dma); in init_rx_ring()
533 dma_free_coherent(priv->device, in init_rx_ring()
535 rx_ring->dma_rx, rx_ring->dma_rx_phy); in init_rx_ring()
540 * free_tx_ring - free the TX descriptor ring
550 tx_ring->dma_tx, tx_ring->dma_tx_phy); in free_tx_ring()
554 * init_dma_desc_rings - init the RX/TX descriptor rings
564 int tx_rsize = priv->dma_tx_size; in init_dma_desc_rings()
565 int rx_rsize = priv->dma_rx_size; in init_dma_desc_rings()
569 ret = init_tx_ring(priv->device, queue_num, in init_dma_desc_rings()
570 priv->txq[queue_num], tx_rsize); in init_dma_desc_rings()
572 dev_err(&netd->dev, "TX DMA ring allocation failed!\n"); in init_dma_desc_rings()
579 priv->txq[queue_num]->priv_ptr = priv; in init_dma_desc_rings()
585 priv->rxq[queue_num], rx_rsize); in init_dma_desc_rings()
594 priv->rxq[queue_num]->priv_ptr = priv; in init_dma_desc_rings()
602 while (queue_num--) in init_dma_desc_rings()
603 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize); in init_dma_desc_rings()
607 while (queue_num--) in init_dma_desc_rings()
608 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize); in init_dma_desc_rings()
615 struct sxgbe_priv_data *priv = txqueue->priv_ptr; in tx_free_ring_skbufs()
616 int tx_rsize = priv->dma_tx_size; in tx_free_ring_skbufs()
619 struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc; in tx_free_ring_skbufs()
621 if (txqueue->tx_skbuff_dma[dma_desc]) in tx_free_ring_skbufs()
622 dma_unmap_single(priv->device, in tx_free_ring_skbufs()
623 txqueue->tx_skbuff_dma[dma_desc], in tx_free_ring_skbufs()
624 priv->hw->desc->get_tx_len(tdesc), in tx_free_ring_skbufs()
627 dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]); in tx_free_ring_skbufs()
628 txqueue->tx_skbuff[dma_desc] = NULL; in tx_free_ring_skbufs()
629 txqueue->tx_skbuff_dma[dma_desc] = 0; in tx_free_ring_skbufs()
639 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; in dma_free_tx_skbufs()
647 int tx_rsize = priv->dma_tx_size; in free_dma_desc_resources()
648 int rx_rsize = priv->dma_rx_size; in free_dma_desc_resources()
655 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize); in free_dma_desc_resources()
660 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize); in free_dma_desc_resources()
669 priv->txq[queue_num] = devm_kmalloc(priv->device, in txring_mem_alloc()
671 if (!priv->txq[queue_num]) in txring_mem_alloc()
672 return -ENOMEM; in txring_mem_alloc()
683 priv->rxq[queue_num] = devm_kmalloc(priv->device, in rxring_mem_alloc()
685 if (!priv->rxq[queue_num]) in rxring_mem_alloc()
686 return -ENOMEM; in rxring_mem_alloc()
693 * sxgbe_mtl_operation_mode - HW MTL operation mode
696 * or Store-And-Forward capability.
703 if (likely(priv->plat->force_sf_dma_mode)) { in sxgbe_mtl_operation_mode()
705 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num) in sxgbe_mtl_operation_mode()
706 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num, in sxgbe_mtl_operation_mode()
708 priv->tx_tc = SXGBE_MTL_SFMODE; in sxgbe_mtl_operation_mode()
711 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num) in sxgbe_mtl_operation_mode()
712 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num, in sxgbe_mtl_operation_mode()
714 priv->rx_tc = SXGBE_MTL_SFMODE; in sxgbe_mtl_operation_mode()
715 } else if (unlikely(priv->plat->force_thresh_dma_mode)) { in sxgbe_mtl_operation_mode()
717 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num) in sxgbe_mtl_operation_mode()
718 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num, in sxgbe_mtl_operation_mode()
719 priv->tx_tc); in sxgbe_mtl_operation_mode()
721 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num) in sxgbe_mtl_operation_mode()
722 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num, in sxgbe_mtl_operation_mode()
723 priv->rx_tc); in sxgbe_mtl_operation_mode()
736 struct sxgbe_priv_data *priv = tqueue->priv_ptr; in sxgbe_tx_queue_clean()
737 unsigned int tx_rsize = priv->dma_tx_size; in sxgbe_tx_queue_clean()
739 u8 queue_no = tqueue->queue_no; in sxgbe_tx_queue_clean()
741 dev_txq = netdev_get_tx_queue(priv->dev, queue_no); in sxgbe_tx_queue_clean()
745 priv->xstats.tx_clean++; in sxgbe_tx_queue_clean()
746 while (tqueue->dirty_tx != tqueue->cur_tx) { in sxgbe_tx_queue_clean()
747 unsigned int entry = tqueue->dirty_tx % tx_rsize; in sxgbe_tx_queue_clean()
748 struct sk_buff *skb = tqueue->tx_skbuff[entry]; in sxgbe_tx_queue_clean()
751 p = tqueue->dma_tx + entry; in sxgbe_tx_queue_clean()
754 if (priv->hw->desc->get_tx_owner(p)) in sxgbe_tx_queue_clean()
759 __func__, tqueue->cur_tx, tqueue->dirty_tx); in sxgbe_tx_queue_clean()
761 if (likely(tqueue->tx_skbuff_dma[entry])) { in sxgbe_tx_queue_clean()
762 dma_unmap_single(priv->device, in sxgbe_tx_queue_clean()
763 tqueue->tx_skbuff_dma[entry], in sxgbe_tx_queue_clean()
764 priv->hw->desc->get_tx_len(p), in sxgbe_tx_queue_clean()
766 tqueue->tx_skbuff_dma[entry] = 0; in sxgbe_tx_queue_clean()
771 tqueue->tx_skbuff[entry] = NULL; in sxgbe_tx_queue_clean()
774 priv->hw->desc->release_tx_desc(p); in sxgbe_tx_queue_clean()
776 tqueue->dirty_tx++; in sxgbe_tx_queue_clean()
800 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; in sxgbe_tx_all_clean()
805 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { in sxgbe_tx_all_clean()
807 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer)); in sxgbe_tx_all_clean()
820 struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num]; in sxgbe_restart_tx_queue()
821 struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev, in sxgbe_restart_tx_queue()
828 priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num); in sxgbe_restart_tx_queue()
834 tx_ring->cur_tx = 0; in sxgbe_restart_tx_queue()
835 tx_ring->dirty_tx = 0; in sxgbe_restart_tx_queue()
838 priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num); in sxgbe_restart_tx_queue()
840 priv->dev->stats.tx_errors++; in sxgbe_restart_tx_queue()
867 * new GMAC chip generations have a new register to indicate the
870 * platform and necessary for old MAC10/100 and GMAC chips.
875 struct sxgbe_hw_features *features = &priv->hw_cap; in sxgbe_get_hw_features()
878 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0); in sxgbe_get_hw_features()
880 features->pmt_remote_wake_up = in sxgbe_get_hw_features()
882 features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval); in sxgbe_get_hw_features()
883 features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval); in sxgbe_get_hw_features()
884 features->tx_csum_offload = in sxgbe_get_hw_features()
886 features->rx_csum_offload = in sxgbe_get_hw_features()
888 features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval); in sxgbe_get_hw_features()
889 features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval); in sxgbe_get_hw_features()
890 features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval); in sxgbe_get_hw_features()
891 features->eee = SXGBE_HW_FEAT_EEE(rval); in sxgbe_get_hw_features()
895 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1); in sxgbe_get_hw_features()
897 features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval); in sxgbe_get_hw_features()
898 features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval); in sxgbe_get_hw_features()
899 features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval); in sxgbe_get_hw_features()
900 features->dcb_enable = SXGBE_HW_FEAT_DCB(rval); in sxgbe_get_hw_features()
901 features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval); in sxgbe_get_hw_features()
902 features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval); in sxgbe_get_hw_features()
903 features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval); in sxgbe_get_hw_features()
904 features->rss_enable = SXGBE_HW_FEAT_RSS(rval); in sxgbe_get_hw_features()
905 features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval); in sxgbe_get_hw_features()
906 features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval); in sxgbe_get_hw_features()
910 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2); in sxgbe_get_hw_features()
912 features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval); in sxgbe_get_hw_features()
913 features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval); in sxgbe_get_hw_features()
914 features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval); in sxgbe_get_hw_features()
915 features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval); in sxgbe_get_hw_features()
916 features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval); in sxgbe_get_hw_features()
917 features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval); in sxgbe_get_hw_features()
932 if (!is_valid_ether_addr(priv->dev->dev_addr)) { in sxgbe_check_ether_addr()
935 priv->hw->mac->get_umac_addr((void __iomem *) in sxgbe_check_ether_addr()
936 priv->ioaddr, addr, 0); in sxgbe_check_ether_addr()
938 eth_hw_addr_set(priv->dev, addr); in sxgbe_check_ether_addr()
940 eth_hw_addr_random(priv->dev); in sxgbe_check_ether_addr()
942 dev_info(priv->device, "device MAC address %pM\n", in sxgbe_check_ether_addr()
943 priv->dev->dev_addr); in sxgbe_check_ether_addr()
952 * in case of these are not passed a default is kept for the MAC or GMAC.
959 if (priv->plat->dma_cfg) { in sxgbe_init_dma_engine()
960 pbl = priv->plat->dma_cfg->pbl; in sxgbe_init_dma_engine()
961 fixed_burst = priv->plat->dma_cfg->fixed_burst; in sxgbe_init_dma_engine()
962 burst_map = priv->plat->dma_cfg->burst_map; in sxgbe_init_dma_engine()
966 priv->hw->dma->cha_init(priv->ioaddr, queue_num, in sxgbe_init_dma_engine()
968 (priv->txq[queue_num])->dma_tx_phy, in sxgbe_init_dma_engine()
969 (priv->rxq[queue_num])->dma_rx_phy, in sxgbe_init_dma_engine()
970 priv->dma_tx_size, priv->dma_rx_size); in sxgbe_init_dma_engine()
972 return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map); in sxgbe_init_dma_engine()
986 priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num, in sxgbe_init_mtl_engine()
987 priv->hw_cap.tx_mtl_qsize); in sxgbe_init_mtl_engine()
988 priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num); in sxgbe_init_mtl_engine()
1003 priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num); in sxgbe_disable_mtl_engine()
1032 struct sxgbe_tx_queue *p = priv->txq[queue_num]; in sxgbe_tx_init_coalesce()
1033 p->tx_coal_frames = SXGBE_TX_FRAMES; in sxgbe_tx_init_coalesce()
1034 p->tx_coal_timer = SXGBE_COAL_TX_TIMER; in sxgbe_tx_init_coalesce()
1035 timer_setup(&p->txtimer, sxgbe_tx_timer, 0); in sxgbe_tx_init_coalesce()
1036 p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer); in sxgbe_tx_init_coalesce()
1037 add_timer(&p->txtimer); in sxgbe_tx_init_coalesce()
1046 struct sxgbe_tx_queue *p = priv->txq[queue_num]; in sxgbe_tx_del_timer()
1047 del_timer_sync(&p->txtimer); in sxgbe_tx_del_timer()
1052 * sxgbe_open - open entry point of the driver
1057 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1065 clk_prepare_enable(priv->sxgbe_clk); in sxgbe_open()
1078 priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE); in sxgbe_open()
1079 priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE); in sxgbe_open()
1080 priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE); in sxgbe_open()
1081 priv->tx_tc = TC_DEFAULT; in sxgbe_open()
1082 priv->rx_tc = TC_DEFAULT; in sxgbe_open()
1096 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0); in sxgbe_open()
1099 priv->hw->mac->core_init(priv->ioaddr); in sxgbe_open()
1101 priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num); in sxgbe_open()
1105 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt, in sxgbe_open()
1106 IRQF_SHARED, dev->name, dev); in sxgbe_open()
1109 __func__, priv->irq, ret); in sxgbe_open()
1116 if (priv->lpi_irq != dev->irq) { in sxgbe_open()
1117 ret = devm_request_irq(priv->device, priv->lpi_irq, in sxgbe_open()
1119 IRQF_SHARED, dev->name, dev); in sxgbe_open()
1122 __func__, priv->lpi_irq, ret); in sxgbe_open()
1129 ret = devm_request_irq(priv->device, in sxgbe_open()
1130 (priv->txq[queue_num])->irq_no, in sxgbe_open()
1132 dev->name, priv->txq[queue_num]); in sxgbe_open()
1135 __func__, priv->irq, ret); in sxgbe_open()
1142 ret = devm_request_irq(priv->device, in sxgbe_open()
1143 (priv->rxq[queue_num])->irq_no, in sxgbe_open()
1145 dev->name, priv->rxq[queue_num]); in sxgbe_open()
1148 __func__, priv->irq, ret); in sxgbe_open()
1154 priv->hw->mac->enable_tx(priv->ioaddr, true); in sxgbe_open()
1155 priv->hw->mac->enable_rx(priv->ioaddr, true); in sxgbe_open()
1161 memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats)); in sxgbe_open()
1163 priv->xstats.tx_threshold = priv->tx_tc; in sxgbe_open()
1164 priv->xstats.rx_threshold = priv->rx_tc; in sxgbe_open()
1168 priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES); in sxgbe_open()
1169 priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES); in sxgbe_open()
1171 if (dev->phydev) in sxgbe_open()
1172 phy_start(dev->phydev); in sxgbe_open()
1177 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { in sxgbe_open()
1178 priv->rx_riwt = SXGBE_MAX_DMA_RIWT; in sxgbe_open()
1179 priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT); in sxgbe_open()
1182 priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER; in sxgbe_open()
1183 priv->eee_enabled = sxgbe_eee_init(priv); in sxgbe_open()
1185 napi_enable(&priv->napi); in sxgbe_open()
1192 if (dev->phydev) in sxgbe_open()
1193 phy_disconnect(dev->phydev); in sxgbe_open()
1195 clk_disable_unprepare(priv->sxgbe_clk); in sxgbe_open()
1201 * sxgbe_release - close entry point of the driver
1210 if (priv->eee_enabled) in sxgbe_release()
1211 del_timer_sync(&priv->eee_ctrl_timer); in sxgbe_release()
1214 if (dev->phydev) { in sxgbe_release()
1215 phy_stop(dev->phydev); in sxgbe_release()
1216 phy_disconnect(dev->phydev); in sxgbe_release()
1221 napi_disable(&priv->napi); in sxgbe_release()
1227 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); in sxgbe_release()
1228 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); in sxgbe_release()
1237 priv->hw->mac->enable_tx(priv->ioaddr, false); in sxgbe_release()
1238 priv->hw->mac->enable_rx(priv->ioaddr, false); in sxgbe_release()
1240 clk_disable_unprepare(priv->sxgbe_clk); in sxgbe_release()
1255 first_desc->tdes01 = dma_map_single(priv->device, skb->data, in sxgbe_tso_prepare()
1257 if (dma_mapping_error(priv->device, first_desc->tdes01)) in sxgbe_tso_prepare()
1260 first_desc->tdes23.tx_rd_des23.first_desc = 1; in sxgbe_tso_prepare()
1261 priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len, in sxgbe_tso_prepare()
1263 skb->len - total_hdr_len); in sxgbe_tso_prepare()
1281 unsigned int tx_rsize = priv->dma_tx_size; in sxgbe_xmit()
1282 struct sxgbe_tx_queue *tqueue = priv->txq[txq_index]; in sxgbe_xmit()
1285 int nr_frags = skb_shinfo(skb)->nr_frags; in sxgbe_xmit()
1288 u16 cur_mss = skb_shinfo(skb)->gso_size; in sxgbe_xmit()
1294 if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss)) in sxgbe_xmit()
1298 ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in sxgbe_xmit()
1299 tqueue->hwts_tx_en))) in sxgbe_xmit()
1302 if (priv->tx_path_in_lpi_mode) in sxgbe_xmit()
1314 entry = tqueue->cur_tx % tx_rsize; in sxgbe_xmit()
1315 tx_desc = tqueue->dma_tx + entry; in sxgbe_xmit()
1322 tqueue->tx_skbuff[entry] = skb; in sxgbe_xmit()
1327 if (unlikely(tqueue->prev_mss != cur_mss)) { in sxgbe_xmit()
1328 priv->hw->desc->tx_ctxt_desc_set_mss( in sxgbe_xmit()
1330 priv->hw->desc->tx_ctxt_desc_set_tcmssv( in sxgbe_xmit()
1332 priv->hw->desc->tx_ctxt_desc_reset_ostc( in sxgbe_xmit()
1334 priv->hw->desc->tx_ctxt_desc_set_ctxt( in sxgbe_xmit()
1336 priv->hw->desc->tx_ctxt_desc_set_owner( in sxgbe_xmit()
1339 entry = (++tqueue->cur_tx) % tx_rsize; in sxgbe_xmit()
1340 first_desc = tqueue->dma_tx + entry; in sxgbe_xmit()
1342 tqueue->prev_mss = cur_mss; in sxgbe_xmit()
1346 tx_desc->tdes01 = dma_map_single(priv->device, in sxgbe_xmit()
1347 skb->data, no_pagedlen, DMA_TO_DEVICE); in sxgbe_xmit()
1348 if (dma_mapping_error(priv->device, tx_desc->tdes01)) in sxgbe_xmit()
1352 priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen, in sxgbe_xmit()
1358 const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num]; in sxgbe_xmit()
1361 entry = (++tqueue->cur_tx) % tx_rsize; in sxgbe_xmit()
1362 tx_desc = tqueue->dma_tx + entry; in sxgbe_xmit()
1363 tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len, in sxgbe_xmit()
1366 tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01; in sxgbe_xmit()
1367 tqueue->tx_skbuff[entry] = NULL; in sxgbe_xmit()
1370 priv->hw->desc->prepare_tx_desc(tx_desc, 0, len, in sxgbe_xmit()
1376 priv->hw->desc->set_tx_owner(tx_desc); in sxgbe_xmit()
1380 priv->hw->desc->close_tx_desc(tx_desc); in sxgbe_xmit()
1385 tqueue->tx_count_frames += nr_frags + 1; in sxgbe_xmit()
1386 if (tqueue->tx_count_frames > tqueue->tx_coal_frames) { in sxgbe_xmit()
1387 priv->hw->desc->clear_tx_ic(tx_desc); in sxgbe_xmit()
1388 priv->xstats.tx_reset_ic_bit++; in sxgbe_xmit()
1389 mod_timer(&tqueue->txtimer, in sxgbe_xmit()
1390 SXGBE_COAL_TIMER(tqueue->tx_coal_timer)); in sxgbe_xmit()
1392 tqueue->tx_count_frames = 0; in sxgbe_xmit()
1396 priv->hw->desc->set_tx_owner(first_desc); in sxgbe_xmit()
1401 tqueue->cur_tx++; in sxgbe_xmit()
1405 __func__, tqueue->cur_tx % tx_rsize, in sxgbe_xmit()
1406 tqueue->dirty_tx % tx_rsize, entry, in sxgbe_xmit()
1415 dev->stats.tx_bytes += skb->len; in sxgbe_xmit()
1417 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in sxgbe_xmit()
1418 tqueue->hwts_tx_en)) { in sxgbe_xmit()
1420 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in sxgbe_xmit()
1421 priv->hw->desc->tx_enable_tstamp(first_desc); in sxgbe_xmit()
1426 priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index); in sxgbe_xmit()
1435 * that is based on zero-copy.
1439 unsigned int rxsize = priv->dma_rx_size; in sxgbe_rx_refill()
1440 int bfsize = priv->dma_buf_sz; in sxgbe_rx_refill()
1441 u8 qnum = priv->cur_rx_qnum; in sxgbe_rx_refill()
1443 for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0; in sxgbe_rx_refill()
1444 priv->rxq[qnum]->dirty_rx++) { in sxgbe_rx_refill()
1445 unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize; in sxgbe_rx_refill()
1448 p = priv->rxq[qnum]->dma_rx + entry; in sxgbe_rx_refill()
1450 if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) { in sxgbe_rx_refill()
1453 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); in sxgbe_rx_refill()
1458 priv->rxq[qnum]->rx_skbuff[entry] = skb; in sxgbe_rx_refill()
1459 priv->rxq[qnum]->rx_skbuff_dma[entry] = in sxgbe_rx_refill()
1460 dma_map_single(priv->device, skb->data, bfsize, in sxgbe_rx_refill()
1463 p->rdes23.rx_rd_des23.buf2_addr = in sxgbe_rx_refill()
1464 priv->rxq[qnum]->rx_skbuff_dma[entry]; in sxgbe_rx_refill()
1469 priv->hw->desc->set_rx_owner(p); in sxgbe_rx_refill()
1470 priv->hw->desc->set_rx_int_on_com(p); in sxgbe_rx_refill()
1485 u8 qnum = priv->cur_rx_qnum; in sxgbe_rx()
1486 unsigned int rxsize = priv->dma_rx_size; in sxgbe_rx()
1487 unsigned int entry = priv->rxq[qnum]->cur_rx; in sxgbe_rx()
1498 p = priv->rxq[qnum]->dma_rx + entry; in sxgbe_rx()
1500 if (priv->hw->desc->get_rx_owner(p)) in sxgbe_rx()
1505 next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize; in sxgbe_rx()
1506 prefetch(priv->rxq[qnum]->dma_rx + next_entry); in sxgbe_rx()
1512 status = priv->hw->desc->rx_wbstatus(p, &priv->xstats, in sxgbe_rx()
1518 if (unlikely(!priv->rxcsum_insertion)) in sxgbe_rx()
1521 skb = priv->rxq[qnum]->rx_skbuff[entry]; in sxgbe_rx()
1524 netdev_err(priv->dev, "rx descriptor is not consistent\n"); in sxgbe_rx()
1526 prefetch(skb->data - NET_IP_ALIGN); in sxgbe_rx()
1527 priv->rxq[qnum]->rx_skbuff[entry] = NULL; in sxgbe_rx()
1529 frame_len = priv->hw->desc->get_rx_frame_len(p); in sxgbe_rx()
1533 skb->ip_summed = checksum; in sxgbe_rx()
1537 napi_gro_receive(&priv->napi, skb); in sxgbe_rx()
1548 * sxgbe_poll - sxgbe poll method (NAPI)
1560 u8 qnum = priv->cur_rx_qnum; in sxgbe_poll()
1562 priv->xstats.napi_poll++; in sxgbe_poll()
1569 priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum); in sxgbe_poll()
1592 * sxgbe_common_interrupt - main ISR
1605 status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats); in sxgbe_common_interrupt()
1608 priv->xstats.tx_lpi_entry_n++; in sxgbe_common_interrupt()
1609 priv->tx_path_in_lpi_mode = true; in sxgbe_common_interrupt()
1612 priv->xstats.tx_lpi_exit_n++; in sxgbe_common_interrupt()
1613 priv->tx_path_in_lpi_mode = false; in sxgbe_common_interrupt()
1616 priv->xstats.rx_lpi_entry_n++; in sxgbe_common_interrupt()
1618 priv->xstats.rx_lpi_exit_n++; in sxgbe_common_interrupt()
1624 * sxgbe_tx_interrupt - TX DMA ISR
1633 struct sxgbe_priv_data *priv = txq->priv_ptr; in sxgbe_tx_interrupt()
1636 status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no, in sxgbe_tx_interrupt()
1637 &priv->xstats); in sxgbe_tx_interrupt()
1640 napi_schedule(&priv->napi); in sxgbe_tx_interrupt()
1644 sxgbe_restart_tx_queue(priv, txq->queue_no); in sxgbe_tx_interrupt()
1648 (priv->tx_tc != SXGBE_MTL_SFMODE) && in sxgbe_tx_interrupt()
1649 (priv->tx_tc < 512))) { in sxgbe_tx_interrupt()
1651 priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64; in sxgbe_tx_interrupt()
1652 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, in sxgbe_tx_interrupt()
1653 txq->queue_no, priv->tx_tc); in sxgbe_tx_interrupt()
1654 priv->xstats.tx_threshold = priv->tx_tc; in sxgbe_tx_interrupt()
1661 * sxgbe_rx_interrupt - RX DMA ISR
1670 struct sxgbe_priv_data *priv = rxq->priv_ptr; in sxgbe_rx_interrupt()
1673 status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no, in sxgbe_rx_interrupt()
1674 &priv->xstats); in sxgbe_rx_interrupt()
1676 if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) { in sxgbe_rx_interrupt()
1677 priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no); in sxgbe_rx_interrupt()
1678 __napi_schedule(&priv->napi); in sxgbe_rx_interrupt()
1683 (priv->rx_tc != SXGBE_MTL_SFMODE) && in sxgbe_rx_interrupt()
1684 (priv->rx_tc < 128))) { in sxgbe_rx_interrupt()
1686 priv->rx_tc += 32; in sxgbe_rx_interrupt()
1687 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, in sxgbe_rx_interrupt()
1688 rxq->queue_no, priv->rx_tc); in sxgbe_rx_interrupt()
1689 priv->xstats.rx_threshold = priv->rx_tc; in sxgbe_rx_interrupt()
1705 /* sxgbe_get_stats64 - entry point to see statistical information of device
1717 void __iomem *ioaddr = priv->ioaddr; in sxgbe_get_stats64()
1720 spin_lock(&priv->stats_lock); in sxgbe_get_stats64()
1726 stats->rx_bytes = sxgbe_get_stat64(ioaddr, in sxgbe_get_stats64()
1730 stats->rx_packets = sxgbe_get_stat64(ioaddr, in sxgbe_get_stats64()
1734 stats->multicast = sxgbe_get_stat64(ioaddr, in sxgbe_get_stats64()
1738 stats->rx_crc_errors = sxgbe_get_stat64(ioaddr, in sxgbe_get_stats64()
1742 stats->rx_length_errors = sxgbe_get_stat64(ioaddr, in sxgbe_get_stats64()
1746 stats->rx_missed_errors = sxgbe_get_stat64(ioaddr, in sxgbe_get_stats64()
1750 stats->tx_bytes = sxgbe_get_stat64(ioaddr, in sxgbe_get_stats64()
1757 stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG, in sxgbe_get_stats64()
1759 stats->tx_errors = count - stats->tx_errors; in sxgbe_get_stats64()
1760 stats->tx_packets = count; in sxgbe_get_stats64()
1761 stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG, in sxgbe_get_stats64()
1764 spin_unlock(&priv->stats_lock); in sxgbe_get_stats64()
1767 /* sxgbe_set_features - entry point to set offload features of the device.
1780 netdev_features_t changed = dev->features ^ features; in sxgbe_set_features()
1784 priv->hw->mac->enable_rx_csum(priv->ioaddr); in sxgbe_set_features()
1785 priv->rxcsum_insertion = true; in sxgbe_set_features()
1787 priv->hw->mac->disable_rx_csum(priv->ioaddr); in sxgbe_set_features()
1788 priv->rxcsum_insertion = false; in sxgbe_set_features()
1795 /* sxgbe_change_mtu - entry point to change MTU size for the device.
1802 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1807 WRITE_ONCE(dev->mtu, new_mtu); in sxgbe_change_mtu()
1836 * sxgbe_set_rx_mode - entry point for setting different receive mode of
1849 void __iomem *ioaddr = (void __iomem *)priv->ioaddr; in sxgbe_set_rx_mode()
1858 if (dev->flags & IFF_PROMISC) { in sxgbe_set_rx_mode()
1862 (dev->flags & IFF_ALLMULTI)) { in sxgbe_set_rx_mode()
1863 value = SXGBE_FRAME_FILTER_PM; /* pass all multi */ in sxgbe_set_rx_mode()
1876 int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; in sxgbe_set_rx_mode()
1896 sxgbe_set_umac_addr(ioaddr, ha->addr, reg); in sxgbe_set_rx_mode()
1914 * sxgbe_poll_controller - entry point for polling receive by device
1926 disable_irq(priv->irq); in sxgbe_poll_controller()
1927 sxgbe_rx_interrupt(priv->irq, dev); in sxgbe_poll_controller()
1928 enable_irq(priv->irq); in sxgbe_poll_controller()
1932 /* sxgbe_ioctl - Entry point for the Ioctl
1942 int ret = -EOPNOTSUPP; in sxgbe_ioctl()
1945 return -EINVAL; in sxgbe_ioctl()
1979 ops_ptr->mac = sxgbe_get_core_ops(); in sxgbe_get_ops()
1980 ops_ptr->desc = sxgbe_get_desc_ops(); in sxgbe_get_ops()
1981 ops_ptr->dma = sxgbe_get_dma_ops(); in sxgbe_get_ops()
1982 ops_ptr->mtl = sxgbe_get_mtl_ops(); in sxgbe_get_ops()
1985 ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG; in sxgbe_get_ops()
1986 ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG; in sxgbe_get_ops()
1992 ops_ptr->link.port = 0; in sxgbe_get_ops()
1993 ops_ptr->link.duplex = 0; in sxgbe_get_ops()
1994 ops_ptr->link.speed = SXGBE_SPEED_10G; in sxgbe_get_ops()
1998 * sxgbe_hw_init - Init the GMAC device
2007 priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL); in sxgbe_hw_init()
2008 if(!priv->hw) in sxgbe_hw_init()
2009 return -ENOMEM; in sxgbe_hw_init()
2012 sxgbe_get_ops(priv->hw); in sxgbe_hw_init()
2015 ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr); in sxgbe_hw_init()
2016 priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16; in sxgbe_hw_init()
2017 priv->hw->ctrl_id = (ctrl_ids & 0x000000ff); in sxgbe_hw_init()
2019 priv->hw->ctrl_uid, priv->hw->ctrl_id); in sxgbe_hw_init()
2025 if (priv->hw_cap.tx_csum_offload) in sxgbe_hw_init()
2028 if (priv->hw_cap.rx_csum_offload) in sxgbe_hw_init()
2039 while (retry_count--) { in sxgbe_sw_reset()
2047 return -EBUSY; in sxgbe_sw_reset()
2077 priv->device = device; in sxgbe_drv_probe()
2078 priv->dev = ndev; in sxgbe_drv_probe()
2081 priv->plat = plat_dat; in sxgbe_drv_probe()
2082 priv->ioaddr = addr; in sxgbe_drv_probe()
2084 ret = sxgbe_sw_reset(priv->ioaddr); in sxgbe_drv_probe()
2105 ndev->netdev_ops = &sxgbe_netdev_ops; in sxgbe_drv_probe()
2107 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in sxgbe_drv_probe()
2110 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; in sxgbe_drv_probe()
2111 ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO); in sxgbe_drv_probe()
2114 ndev->priv_flags |= IFF_UNICAST_FLT; in sxgbe_drv_probe()
2116 /* MTU range: 68 - 9000 */ in sxgbe_drv_probe()
2117 ndev->min_mtu = MIN_MTU; in sxgbe_drv_probe()
2118 ndev->max_mtu = MAX_MTU; in sxgbe_drv_probe()
2120 priv->msg_enable = netif_msg_init(debug, default_msg_level); in sxgbe_drv_probe()
2123 if (priv->hw_cap.tcpseg_offload) { in sxgbe_drv_probe()
2125 priv->hw->dma->enable_tso(priv->ioaddr, queue_num); in sxgbe_drv_probe()
2130 if (priv->hw_cap.rx_csum_offload) { in sxgbe_drv_probe()
2131 priv->hw->mac->enable_rx_csum(priv->ioaddr); in sxgbe_drv_probe()
2132 priv->rxcsum_insertion = true; in sxgbe_drv_probe()
2136 priv->rx_pause = 1; in sxgbe_drv_probe()
2137 priv->tx_pause = 1; in sxgbe_drv_probe()
2140 if (!priv->plat->riwt_off) { in sxgbe_drv_probe()
2141 priv->use_riwt = 1; in sxgbe_drv_probe()
2145 netif_napi_add(ndev, &priv->napi, sxgbe_poll); in sxgbe_drv_probe()
2147 spin_lock_init(&priv->stats_lock); in sxgbe_drv_probe()
2149 priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME); in sxgbe_drv_probe()
2150 if (IS_ERR(priv->sxgbe_clk)) { in sxgbe_drv_probe()
2158 * changed at run-time and it is fixed. Viceversa the driver'll try to in sxgbe_drv_probe()
2162 if (!priv->plat->clk_csr) in sxgbe_drv_probe()
2165 priv->clk_csr = priv->plat->clk_csr; in sxgbe_drv_probe()
2171 __func__, priv->plat->bus_id); in sxgbe_drv_probe()
2188 clk_put(priv->sxgbe_clk); in sxgbe_drv_probe()
2190 netif_napi_del(&priv->napi); in sxgbe_drv_probe()
2192 kfree(priv->hw); in sxgbe_drv_probe()
2213 priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num); in sxgbe_drv_remove()
2216 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); in sxgbe_drv_remove()
2217 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); in sxgbe_drv_remove()
2219 priv->hw->mac->enable_tx(priv->ioaddr, false); in sxgbe_drv_remove()
2220 priv->hw->mac->enable_rx(priv->ioaddr, false); in sxgbe_drv_remove()
2226 clk_put(priv->sxgbe_clk); in sxgbe_drv_remove()
2228 netif_napi_del(&priv->napi); in sxgbe_drv_remove()
2230 kfree(priv->hw); in sxgbe_drv_remove()
2248 return -ENOSYS; in sxgbe_freeze()
2253 return -ENOSYS; in sxgbe_restore()
2306 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
2307 MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");