Lines Matching +full:tx +full:- +full:device
18 * ------------------------------
21 * controllers which support up to 2.5 GbE and generally only supports BASE-T
22 * copper phys. This device is yet another variant on the venerable Intel 1 GbE
38 * 1) We believe that the device only supports up to 4 RX and TX queues.
39 * 2) There is only one TX context for each TX queue and it is mapped to the
42 * 4) This does otherwise support both the MSI-X and MSI/INTx interrupt
43 * management which are shaped very differently in the device.
44 * 5) The 2500BASE-T PHY support is unique, but the other PHY settings are
50 * ------------
52 * ------------
63 * of functionality related to the device. In particular:
90 * igc_ring.c This implements the core I/O routines of the device
92 * down as well as DMA, descriptor ring, and per-frame
104 * +---------------+
106 * | -|-+
108 * +---------------+ v
109 * +------------------------------+ +---------------------+
111 * | per-instance primary | +---->| |
112 * | structure | |+--->| Notes a MAC address | ...
114 * | igc_addr_t *igc_ucast -|--+| +---------------------+
115 * | igc_addr_t *igc_mcast -|---+ +---------------------------+
116 * | struct igc_hw *igc_hw -|--------->| struct igc_hw (core code) |
117 * | igc_tx_ring_t *igc_tx_rings -|--+ | |
118 * | igc_rx_ring_t *igc_rx_rings -|--|---+ | igc_mac_info mac |
119 * +------------------------------+ | | | igc_fc_info fc |
121 * +----------------------------------+ | | igc_nvm_info nvm |
122 * | v +---------------------------+
123 * | +--------------------------------------+
126 * | | igc_adv_rx_desc *irr_ring ---|--> rx hw descriptor ring
127 * | | uint32_t irr_next ---|--> next entry to look for data
128 * | | igc_rx_buffer_t **irr_work_list ---|--> corresponds to ring entries
129 * | | uint32_t irr_nfree ---|--> number of free list entries
130 * | | igc_rx_buffer_t **irr_free_list ---|--> set of buffers free for bind
131 * | | igc_rx_buffer_t *irr_arena ---|-+> array of all rx buffers
132 * | +--------------------------------------+ |
134 * | +----------------------------+ |
135 * | | igc_rx_buffer_t |<--+
137 * | | mblk_t *igb_mp -|---> mblk_t for rx buffer
138 * | | igc_dma_buffer_t irb_dma -|---> DMA memory for rx buffer
139 * | +----------------------------+
141 * | +------------------------------------+
142 * +-->| igc_tx_ring_t |
144 * | icc_adv_tx_desc *itr_ring --|--> tx hw descriptor ring
145 * | uin32_t itr_ring_head --|--> next descriptor to recycle
146 * | uin32_t itr_ring_fail --|--> next descriptor to place
147 * | uin32_t itr_ring_free --|--> free descriptors in ring
148 * | igc_tx_buffer_t **itr_work_list |--> corresponds to ring entries
149 * | list_t itr_free_list --|--> available tx buffers
150 * | igc_tx_buffer_t *itr_arena --|-+> array of all tx buffers
151 * +------------------------------------+ |
153 * +---------------------------------+ |
154 * | igc_tx_buffer_t |<-+
156 * | mblk_t *itb_mp --|--> mblk to tx (only in first)
157 * | igc_dma_buffer_t itb_dma --|--> tx DMA buffer for copy
158 * | ddi_dma_handle_t itb_bind_hdl --|--> DMA handle for bind
159 * +---------------------------------+
165 * access to the device's registers and it embeds the
170 * information that the device uses. In general, this
178 * See the 'TX Data Path Design' section for more
187 * igc_tx_buffer_t This represents a single tx buffer in the driver. A tx
190 * can be used to bind a specific mblk_t to it. tx buffers
193 * will end up with a 2 KiB buffer due to the device's
215 * igc_addr_t This represents a 48-bit Ethernet MAC address slot in
219 * --------------------
221 * --------------------
223 * The I225/226 controller like the I210 supports up to 4 rx and tx rings. Due
226 * different sets of interrupt modes. One where MSI-X is used and a mode where
228 * the MSI-X mode as that gives us more flexibility and due to the fact that the
233 * Each rx queue and tx queue is mapped to a specific bit position in the IVAR
235 * state changes. While the IVAR register allows for several bits for MSI-X
239 * MSI-X mode causes the device's various interrupt registers to be split into
241 * extended ones all start with 'E'. When in MSI-X mode, the EICR (cause), EICS
242 * (cause set), EIAC (auto-clear), EIMS (mask set) registers all operate with
243 * indexes that refer to the MSI-X. The primary way to temporarily disable
244 * interrupts for polling is to remove the given MSI-X from the auto-clear set
248 * for polling on a per-MSI-X basis. This generally means that the design for
249 * interrupts and rings is that all the tx rings and the link state change
250 * events share interrupt 0, while rx rings use interrupts 1-4. Because the x86
252 * only supporting a single rx and tx ring for the time being, though the driver
255 * -------------------
257 * -------------------
303 * +-------------+ +-----------+
304 * | Work List |<---*-------------------| Free List |
306 * +-------------+ loaned buffers +-----------+
309 * | +-+ copy is done . . . Returned to driver via
313 * +-------------------+ |
314 * | Loaned |------------------------+
316 * +-------------------+
321 * cost. It is possible to design this to be more like the tx subsystem where we
325 * -------------------
326 * TX Data Path Design
327 * -------------------
329 * The tx data path is a bit different in design from the rx data path. When the
330 * system wants to tx data there are two fundamental building blocks that we
336 * 2) We utilize the DMA handle that is in the tx buffer (but not the buffer's
340 * Because a given tx buffer may end up using more than one descriptor and we
343 * number of transmit buffers equal to the ring size. In addition, the tx data
348 * The tx descriptor ring is used in a bit of a different way. While part of the
359 * into the current tx buffer. A given tx buffer can be used to copy multiple
361 * packet split into 125 byte chunks, this would end up using a single tx data
363 * spread across several mblk_t's so we may end up leveraging multiple tx data
379 * hardware has processed internal to the driver) due to a tx interrupt or
386 * 2) This will then be turned into descriptors in the ring. Each tx data buffer
410 * tx buffer. There will always be a tx buffer in the same index in the
420 * freemsgchain() at the end. The fact that we won't free any tx buffers
439 * -------
441 * -------
451 * 1) One should not hold locks for both the rx rings and tx rings at the same
457 * -------------------
459 * -------------------
466 * - Multiple ring, RSS support: As the OS changes towards offering more
469 * rx group with multiple rings and leverage the tx pseudo-group support.
471 * - TCP segmentation offload support: Right now the driver does not support
473 * information for TSO is in the tx data path right now.
475 * - FMA Support: Currently the driver does not rig up support for FMA.
476 * Participating in that and more generally being able to reset the device
479 * - TX stall detection: Related to the above, carefully designing a tx stall
480 * detection and resetting the device when that happens would probably be
483 * - UFM support: Exposing the NVM and PBA (printed board assembly) through the
486 * - Dynamic MTU changing: Right now the driver takes advantage of the
487 * simplification of not allowing the MTU to change once the device has been
516 ASSERT3U(reg, <, igc->igc_regs_size); in igc_read32()
517 addr = (uint32_t *)(igc->igc_regs_base + reg); in igc_read32()
518 return (ddi_get32(igc->igc_regs_hdl, addr)); in igc_read32()
525 ASSERT3U(reg, <, igc->igc_regs_size); in igc_write32()
526 addr = (uint32_t *)(igc->igc_regs_base + reg); in igc_write32()
527 ddi_put32(igc->igc_regs_hdl, addr, val); in igc_write32()
533 * this looks like for non-copper PHYs if that ever becomes relevant.
538 ASSERT(MUTEX_HELD(&igc->igc_lock)); in igc_link_up()
544 (void) igc_check_for_link(&igc->igc_hw); in igc_link_up()
545 return (!igc->igc_hw.mac.get_link_status); in igc_link_up()
554 mutex_enter(&igc->igc_lock); in igc_intr_lsc()
555 orig_state = igc->igc_link_state; in igc_intr_lsc()
560 igc->igc_hw.mac.get_link_status = true; in igc_intr_lsc()
564 (void) igc_get_speed_and_duplex(&igc->igc_hw, in igc_intr_lsc()
565 &igc->igc_link_speed, &duplex); in igc_intr_lsc()
569 igc->igc_link_duplex = LINK_DUPLEX_HALF; in igc_intr_lsc()
572 igc->igc_link_duplex = LINK_DUPLEX_FULL; in igc_intr_lsc()
575 igc->igc_link_duplex = LINK_DUPLEX_UNKNOWN; in igc_intr_lsc()
578 igc->igc_link_state = LINK_STATE_UP; in igc_intr_lsc()
580 igc->igc_link_state = LINK_STATE_DOWN; in igc_intr_lsc()
581 igc->igc_link_speed = 0; in igc_intr_lsc()
582 igc->igc_link_duplex = LINK_DUPLEX_UNKNOWN; in igc_intr_lsc()
584 new_state = igc->igc_link_state; in igc_intr_lsc()
589 (void) igc_read_phy_reg(&igc->igc_hw, PHY_CONTROL, &igc->igc_phy_ctrl); in igc_intr_lsc()
590 (void) igc_read_phy_reg(&igc->igc_hw, PHY_STATUS, &igc->igc_phy_status); in igc_intr_lsc()
591 (void) igc_read_phy_reg(&igc->igc_hw, PHY_AUTONEG_ADV, in igc_intr_lsc()
592 &igc->igc_phy_an_adv); in igc_intr_lsc()
593 (void) igc_read_phy_reg(&igc->igc_hw, PHY_LP_ABILITY, in igc_intr_lsc()
594 &igc->igc_phy_lp); in igc_intr_lsc()
595 (void) igc_read_phy_reg(&igc->igc_hw, PHY_AUTONEG_EXP, in igc_intr_lsc()
596 &igc->igc_phy_an_exp); in igc_intr_lsc()
597 (void) igc_read_phy_reg(&igc->igc_hw, PHY_1000T_CTRL, in igc_intr_lsc()
598 &igc->igc_phy_1000t_ctrl); in igc_intr_lsc()
599 (void) igc_read_phy_reg(&igc->igc_hw, PHY_1000T_STATUS, in igc_intr_lsc()
600 &igc->igc_phy_1000t_status); in igc_intr_lsc()
601 (void) igc_read_phy_reg(&igc->igc_hw, PHY_EXT_STATUS, in igc_intr_lsc()
602 &igc->igc_phy_ext_status); in igc_intr_lsc()
603 (void) igc_read_phy_reg(&igc->igc_hw, PHY_EXT_STATUS, in igc_intr_lsc()
604 &igc->igc_phy_ext_status); in igc_intr_lsc()
607 (void) igc_read_phy_reg(&igc->igc_hw, mmd_base | ANEG_MULTIGBT_AN_CTRL, in igc_intr_lsc()
608 &igc->igc_phy_mmd_ctrl); in igc_intr_lsc()
609 (void) igc_read_phy_reg(&igc->igc_hw, mmd_base | ANEG_MULTIGBT_AN_STS1, in igc_intr_lsc()
610 &igc->igc_phy_mmd_sts); in igc_intr_lsc()
611 mutex_exit(&igc->igc_lock); in igc_intr_lsc()
614 mac_link_update(igc->igc_mac_hdl, new_state); in igc_intr_lsc()
626 ASSERT3U(queue, <, igc->igc_nrx_rings); in igc_intr_rx_queue()
627 ring = &igc->igc_rx_rings[queue]; in igc_intr_rx_queue()
629 mutex_enter(&ring->irr_lock); in igc_intr_rx_queue()
630 if ((ring->irr_flags & IGC_RXR_F_POLL) == 0) { in igc_intr_rx_queue()
633 mutex_exit(&ring->irr_lock); in igc_intr_rx_queue()
636 mac_rx_ring(igc->igc_mac_hdl, ring->irr_rh, mp, ring->irr_gen); in igc_intr_rx_queue()
648 igc_tx_recycle(igc, &igc->igc_tx_rings[0]); in igc_intr_tx_other()
663 if (pci_config_setup(igc->igc_dip, &igc->igc_cfgspace) != DDI_SUCCESS) { in igc_setup_regs()
664 dev_err(igc->igc_dip, CE_WARN, "failed to map config space"); in igc_setup_regs()
668 if (ddi_dev_regsize(igc->igc_dip, IGC_PCI_BAR, &igc->igc_regs_size) != in igc_setup_regs()
670 dev_err(igc->igc_dip, CE_WARN, "failed to get BAR %u size", in igc_setup_regs()
671 IGC_PCI_BAR - 1); in igc_setup_regs()
681 if ((ret = ddi_regs_map_setup(igc->igc_dip, IGC_PCI_BAR, in igc_setup_regs()
682 &igc->igc_regs_base, 0, igc->igc_regs_size, &da, in igc_setup_regs()
683 &igc->igc_regs_hdl)) != DDI_SUCCESS) { in igc_setup_regs()
684 dev_err(igc->igc_dip, CE_WARN, "failed to map registers: %d", in igc_setup_regs()
705 igc->igc_hw.back = igc; in igc_core_code_init()
706 igc->igc_hw.vendor_id = pci_config_get16(igc->igc_cfgspace, in igc_core_code_init()
708 igc->igc_hw.device_id = pci_config_get16(igc->igc_cfgspace, in igc_core_code_init()
710 igc->igc_hw.revision_id = pci_config_get8(igc->igc_cfgspace, in igc_core_code_init()
712 igc->igc_hw.subsystem_vendor_id = pci_config_get16(igc->igc_cfgspace, in igc_core_code_init()
714 igc->igc_hw.subsystem_device_id = pci_config_get16(igc->igc_cfgspace, in igc_core_code_init()
717 if ((ret = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, igc->igc_dip, in igc_core_code_init()
719 dev_err(igc->igc_dip, CE_WARN, "failed to look up 'reg' " in igc_core_code_init()
731 igc->igc_hw.bus.func = PCI_REG_FUNC_G(regs[0]); in igc_core_code_init()
732 igc->igc_hw.bus.pci_cmd_word = pci_config_get16(igc->igc_cfgspace, in igc_core_code_init()
740 igc->igc_hw.hw_addr = (uint8_t *)igc->igc_regs_base; in igc_core_code_init()
742 if ((ret = igc_set_mac_type(&igc->igc_hw)) != IGC_SUCCESS) { in igc_core_code_init()
743 dev_err(igc->igc_dip, CE_WARN, "failed to set mac type: %d", in igc_core_code_init()
748 if ((ret = igc_setup_init_funcs(&igc->igc_hw, true)) != IGC_SUCCESS) { in igc_core_code_init()
749 dev_err(igc->igc_dip, CE_WARN, "failed to setup core code " in igc_core_code_init()
758 if ((ret = igc_get_bus_info(&igc->igc_hw)) != IGC_SUCCESS) { in igc_core_code_init()
759 dev_err(igc->igc_dip, CE_WARN, "core code failed to get bus " in igc_core_code_init()
770 switch (igc->igc_hw.mac.type) { in igc_limits_init()
772 igc->igc_limits.il_max_rx_rings = IGC_MAX_RX_RINGS_I225; in igc_limits_init()
773 igc->igc_limits.il_max_tx_rings = IGC_MAX_RX_RINGS_I225; in igc_limits_init()
774 igc->igc_limits.il_max_mtu = IGC_MAX_MTU_I225; in igc_limits_init()
777 dev_err(igc->igc_dip, CE_WARN, "unknown MAC type: %u", in igc_limits_init()
778 igc->igc_hw.mac.type); in igc_limits_init()
790 * 1) The hardware requires that the rx and tx sizes all be 1 KiB (0x400) byte
792 * 2) Our tx engine can handle copying across multiple descriptors, so we cap
793 * the maximum tx buffer size at one page.
803 unsigned long pagesize = ddi_ptob(igc->igc_dip, 1); in igc_hw_buf_update()
806 igc->igc_max_frame = igc->igc_mtu + sizeof (struct ether_vlan_header) + in igc_hw_buf_update()
808 igc->igc_rx_buf_size = P2ROUNDUP_TYPED(igc->igc_max_frame + in igc_hw_buf_update()
810 tx_mtu = P2ROUNDUP_TYPED(igc->igc_max_frame, IGC_BUF_ALIGN, uint32_t); in igc_hw_buf_update()
811 igc->igc_tx_buf_size = MIN(tx_mtu, pagesize); in igc_hw_buf_update()
820 if ((ret = ddi_intr_get_supported_types(igc->igc_dip, &types)) != in igc_intr_init()
822 dev_err(igc->igc_dip, CE_WARN, "failed to get supported " in igc_intr_init()
828 * For now, we simplify our lives and device support by only supporting in igc_intr_init()
829 * MSI-X interrupts. When we find versions of this without MSI-X in igc_intr_init()
833 dev_err(igc->igc_dip, CE_WARN, "device does not support MSI-X, " in igc_intr_init()
838 if ((ret = ddi_intr_get_nintrs(igc->igc_dip, DDI_INTR_TYPE_MSIX, in igc_intr_init()
840 dev_err(igc->igc_dip, CE_WARN, "failed to get number of " in igc_intr_init()
841 "supported MSI-X interrupts: %d", ret); in igc_intr_init()
846 dev_err(igc->igc_dip, CE_WARN, "igc driver currently requires " in igc_intr_init()
847 "%d MSI-X interrupts be supported, found %d", min_nintrs, in igc_intr_init()
852 if ((ret = ddi_intr_get_navail(igc->igc_dip, DDI_INTR_TYPE_MSIX, in igc_intr_init()
854 dev_err(igc->igc_dip, CE_WARN, "failed to get number of " in igc_intr_init()
855 "available MSI-X interrupts: %d", ret); in igc_intr_init()
860 dev_err(igc->igc_dip, CE_WARN, "igc driver currently requires " in igc_intr_init()
861 "%d MSI-X interrupts be available, found %d", min_nintrs, in igc_intr_init()
868 * device supports, but for now it's limited to two. See 'Rings and in igc_intr_init()
873 igc->igc_intr_size = req * sizeof (ddi_intr_handle_t); in igc_intr_init()
874 igc->igc_intr_handles = kmem_alloc(igc->igc_intr_size, KM_SLEEP); in igc_intr_init()
876 if ((ret = ddi_intr_alloc(igc->igc_dip, igc->igc_intr_handles, in igc_intr_init()
877 DDI_INTR_TYPE_MSIX, 0, req, &igc->igc_nintrs, in igc_intr_init()
879 dev_err(igc->igc_dip, CE_WARN, "failed to allocate interrupts: " in igc_intr_init()
884 igc->igc_intr_type = DDI_INTR_TYPE_MSIX; in igc_intr_init()
885 igc->igc_attach |= IGC_ATTACH_INTR_ALLOC; in igc_intr_init()
886 if (igc->igc_nintrs < min_nintrs) { in igc_intr_init()
887 dev_err(igc->igc_dip, CE_WARN, "received %d interrupts, but " in igc_intr_init()
888 "needed at least %d", igc->igc_nintrs, min_nintrs); in igc_intr_init()
892 if ((ret = ddi_intr_get_pri(igc->igc_intr_handles[0], in igc_intr_init()
893 &igc->igc_intr_pri)) != DDI_SUCCESS) { in igc_intr_init()
894 dev_err(igc->igc_dip, CE_WARN, "failed to get interrupt " in igc_intr_init()
899 if ((ret = ddi_intr_get_cap(igc->igc_intr_handles[0], in igc_intr_init()
900 &igc->igc_intr_cap)) != DDI_SUCCESS) { in igc_intr_init()
901 dev_err(igc->igc_dip, CE_WARN, "failed to get interrupt " in igc_intr_init()
911 * interrupt assignments. All tx rings share interrupt 0. All rx rings have
913 * in the face of actual multi-ring support
919 igc->igc_tx_rings = kmem_zalloc(sizeof (igc_tx_ring_t) * in igc_rings_alloc()
920 igc->igc_ntx_rings, KM_SLEEP); in igc_rings_alloc()
922 for (uint32_t i = 0; i < igc->igc_ntx_rings; i++) { in igc_rings_alloc()
923 igc->igc_tx_rings[i].itr_igc = igc; in igc_rings_alloc()
924 igc->igc_tx_rings[i].itr_idx = i; in igc_rings_alloc()
925 igc->igc_tx_rings[i].itr_intr_idx = intr; in igc_rings_alloc()
926 mutex_init(&igc->igc_tx_rings[i].itr_lock, NULL, MUTEX_DRIVER, in igc_rings_alloc()
927 DDI_INTR_PRI(igc->igc_intr_pri)); in igc_rings_alloc()
928 if (!igc_tx_ring_stats_init(igc, &igc->igc_tx_rings[i])) { in igc_rings_alloc()
933 igc->igc_rx_rings = kmem_zalloc(sizeof (igc_rx_ring_t) * in igc_rings_alloc()
934 igc->igc_nrx_rings, KM_SLEEP); in igc_rings_alloc()
937 for (uint32_t i = 0; i < igc->igc_nrx_rings; i++, intr++) { in igc_rings_alloc()
938 igc->igc_rx_rings[i].irr_igc = igc; in igc_rings_alloc()
939 igc->igc_rx_rings[i].irr_idx = i; in igc_rings_alloc()
940 igc->igc_rx_rings[i].irr_intr_idx = intr; in igc_rings_alloc()
941 mutex_init(&igc->igc_rx_rings[i].irr_lock, NULL, MUTEX_DRIVER, in igc_rings_alloc()
942 DDI_INTR_PRI(igc->igc_intr_pri)); in igc_rings_alloc()
943 mutex_init(&igc->igc_rx_rings[i].irr_free_lock, NULL, in igc_rings_alloc()
944 MUTEX_DRIVER, DDI_INTR_PRI(igc->igc_intr_pri)); in igc_rings_alloc()
945 cv_init(&igc->igc_rx_rings[i].irr_free_cv, NULL, CV_DRIVER, in igc_rings_alloc()
947 if (!igc_rx_ring_stats_init(igc, &igc->igc_rx_rings[i])) { in igc_rings_alloc()
952 ASSERT3U(intr, ==, igc->igc_nintrs); in igc_rings_alloc()
958 * Allocate our interrupts. Note, we have more or less constrained the device
967 if ((ret = ddi_intr_add_handler(igc->igc_intr_handles[0], in igc_intr_hdlr_init()
969 dev_err(igc->igc_dip, CE_WARN, "failed to add tx/other " in igc_intr_hdlr_init()
974 if ((ret = ddi_intr_add_handler(igc->igc_intr_handles[1], in igc_intr_hdlr_init()
976 dev_err(igc->igc_dip, CE_WARN, "failed to add rx interrupt " in igc_intr_hdlr_init()
978 if ((ret = ddi_intr_remove_handler(igc->igc_intr_handles[0])) != in igc_intr_hdlr_init()
980 dev_err(igc->igc_dip, CE_WARN, "failed to remove " in igc_intr_hdlr_init()
981 "tx/other interrupt handler"); in igc_intr_hdlr_init()
1004 * Basic device initialization and sanity check. This covers that we can
1005 * properly reset the device, validate its checksum, and get a valid MAC
1014 if ((ret = igc_reset_hw(&igc->igc_hw)) != IGC_SUCCESS) { in igc_hw_init()
1015 dev_err(igc->igc_dip, CE_WARN, "failed to reset device: %d", in igc_hw_init()
1026 * Check the NVM validiity if a device is present. in igc_hw_init()
1030 if ((ret = igc_validate_nvm_checksum(&igc->igc_hw)) != in igc_hw_init()
1032 dev_err(igc->igc_dip, CE_WARN, "failed to validate " in igc_hw_init()
1038 if ((ret = igc_read_mac_addr(&igc->igc_hw)) != IGC_SUCCESS) { in igc_hw_init()
1039 dev_err(igc->igc_dip, CE_WARN, "failed to read MAC address: %d", in igc_hw_init()
1044 if ((ret = igc_get_phy_id(&igc->igc_hw)) != IGC_SUCCESS) { in igc_hw_init()
1045 dev_err(igc->igc_dip, CE_WARN, "failed to get PHY id: %d", ret); in igc_hw_init()
1054 * that back to the defaults we got when we started up the device.
1059 igc_write32(igc, IGC_LEDCTL, igc->igc_ledctl); in igc_led_fini()
1122 * by default (though the NVM would likely be better). We then create pre-canned
1124 * the caveats in definitions here. Note, we only tweak the non-activity LEDs
1133 igc->igc_ledctl = led; in igc_led_init()
1134 igc->igc_ledctl_on = led; in igc_led_init()
1135 igc->igc_ledctl_off = led; in igc_led_init()
1136 igc->igc_ledctl_blink = led; in igc_led_init()
1147 igc->igc_ledctl_on = igc_led_set_mode(i, in igc_led_init()
1148 igc->igc_ledctl_on, I225_LED_M_OFF); in igc_led_init()
1149 igc->igc_ledctl_off = igc_led_set_mode(i, in igc_led_init()
1150 igc->igc_ledctl_off, I225_LED_M_ON); in igc_led_init()
1151 igc->igc_ledctl_blink = igc_led_set_mode(i, in igc_led_init()
1152 igc->igc_ledctl_blink, I225_LED_M_OFF); in igc_led_init()
1154 igc->igc_ledctl_on = igc_led_set_mode(i, in igc_led_init()
1155 igc->igc_ledctl_on, I225_LED_M_ON); in igc_led_init()
1156 igc->igc_ledctl_off = igc_led_set_mode(i, in igc_led_init()
1157 igc->igc_ledctl_off, I225_LED_M_OFF); in igc_led_init()
1158 igc->igc_ledctl_blink = igc_led_set_mode(i, in igc_led_init()
1159 igc->igc_ledctl_blink, I225_LED_M_ON); in igc_led_init()
1163 igc->igc_ledctl_blink = igc_led_set_blink(i, in igc_led_init()
1164 igc->igc_ledctl_blink, true); in igc_led_init()
1167 igc->igc_led_mode = MAC_LED_DEFAULT; in igc_led_init()
1191 bitend = bitoff + IGC_IVAR_ENT_LEN - 1; in igc_write_ivar()
1196 igc->igc_eims |= 1 << msix; in igc_write_ivar()
1201 * interrupts are mapped to causes. The device must be specifically enabled for
1202 * MSI-X and then this is also where we go ensure that all of our interrupt
1204 * to enable MSI-X settings otherwise later settings won't do anything.
1223 igc->igc_eims = 1; in igc_hw_intr_init()
1227 * register handles mapping a given queue to an MSI-X. Each IVAR handles in igc_hw_intr_init()
1230 for (uint32_t i = 0; i < igc->igc_ntx_rings; i++) { in igc_hw_intr_init()
1232 igc->igc_tx_rings[i].itr_intr_idx); in igc_hw_intr_init()
1235 for (uint32_t i = 0; i < igc->igc_nrx_rings; i++) { in igc_hw_intr_init()
1236 igc_write_ivar(igc, i, true, igc->igc_rx_rings[i].irr_intr_idx); in igc_hw_intr_init()
1239 for (uint32_t i = 0; i < igc->igc_nintrs; i++) { in igc_hw_intr_init()
1240 igc_write32(igc, IGC_EITR(i), igc->igc_eitr); in igc_hw_intr_init()
1245 * Synchronize our sense of the unicast table over to the device. If this is the
1252 ASSERT(MUTEX_HELD(&igc->igc_lock)); in igc_unicast_sync()
1254 if (igc->igc_ucast == NULL) { in igc_unicast_sync()
1255 igc->igc_nucast = igc->igc_hw.mac.rar_entry_count; in igc_unicast_sync()
1256 igc->igc_ucast = kmem_zalloc(sizeof (igc_addr_t) * in igc_unicast_sync()
1257 igc->igc_nucast, KM_SLEEP); in igc_unicast_sync()
1260 for (uint16_t i = 0; i < igc->igc_nucast; i++) { in igc_unicast_sync()
1261 int ret = igc_rar_set(&igc->igc_hw, igc->igc_ucast[i].ia_mac, in igc_unicast_sync()
1285 ASSERT(MUTEX_HELD(&igc->igc_lock)); in igc_multicast_sync()
1287 if (igc->igc_mcast == NULL) { in igc_multicast_sync()
1288 igc->igc_nmcast = igc->igc_hw.mac.mta_reg_count; in igc_multicast_sync()
1289 igc->igc_mcast = kmem_zalloc(sizeof (igc_addr_t) * in igc_multicast_sync()
1290 igc->igc_nmcast, KM_SLEEP); in igc_multicast_sync()
1291 igc->igc_mcast_raw = kmem_alloc(sizeof (ether_addr_t) * in igc_multicast_sync()
1292 igc->igc_nmcast, KM_SLEEP); in igc_multicast_sync()
1295 bzero(igc->igc_mcast_raw, sizeof (ether_addr_t) * igc->igc_nmcast); in igc_multicast_sync()
1297 for (uint16_t i = 0; i < igc->igc_nmcast; i++) { in igc_multicast_sync()
1298 ether_addr_t *targ = &igc->igc_mcast_raw[nvalid]; in igc_multicast_sync()
1300 if (!igc->igc_mcast[i].ia_valid) in igc_multicast_sync()
1302 bcopy(igc->igc_mcast[i].ia_mac, targ, sizeof (ether_addr_t)); in igc_multicast_sync()
1306 igc_update_mc_addr_list(&igc->igc_hw, (uint8_t *)igc->igc_mcast_raw, in igc_multicast_sync()
1320 struct igc_hw *hw = &igc->igc_hw; in igc_hw_common_init()
1326 * currently leave the RXPBS and TXPBS at their power-on-reset defaults. in igc_hw_common_init()
1329 * have 16-byte granularity. The general guidelines from there was that in igc_hw_common_init()
1332 * - After an XOFF, you want to receive at least two frames. We use in igc_hw_common_init()
1334 * - The low water mark apparently wants to be closer to the high water in igc_hw_common_init()
1342 hwm2x = (pba << 10) - 2 * igc->igc_max_frame; in igc_hw_common_init()
1345 hw->fc.high_water = hwm & 0xfffffff0; in igc_hw_common_init()
1346 hw->fc.low_water = igc->igc_hw.fc.high_water - 16; in igc_hw_common_init()
1351 hw->fc.pause_time = IGC_FC_PAUSE_TIME; in igc_hw_common_init()
1352 hw->fc.send_xon = true; in igc_hw_common_init()
1355 dev_err(igc->igc_dip, CE_WARN, "failed to reset device: %d", in igc_hw_common_init()
1361 dev_err(igc->igc_dip, CE_WARN, "failed to init hardware: %d", in igc_hw_common_init()
1373 dev_err(igc->igc_dip, CE_WARN, "failed to set D0 LPLU mode: %d", in igc_hw_common_init()
1384 dev_err(igc->igc_dip, CE_WARN, "failed to set EEE mode: %d", in igc_hw_common_init()
1391 mutex_enter(&igc->igc_lock); in igc_hw_common_init()
1395 igc->igc_hw.mac.get_link_status = true; in igc_hw_common_init()
1398 mutex_exit(&igc->igc_lock); in igc_hw_common_init()
1408 if ((igc->igc_intr_cap & DDI_INTR_FLAG_BLOCK) != 0) { in igc_intr_en()
1409 ret = ddi_intr_block_enable(igc->igc_intr_handles, in igc_intr_en()
1410 igc->igc_nintrs); in igc_intr_en()
1412 dev_err(igc->igc_dip, CE_WARN, "failed to block " in igc_intr_en()
1417 for (int i = 0; i < igc->igc_nintrs; i++) { in igc_intr_en()
1418 ret = ddi_intr_enable(igc->igc_intr_handles[i]); in igc_intr_en()
1420 dev_err(igc->igc_dip, CE_WARN, "failed to " in igc_intr_en()
1424 igc->igc_intr_handles[clean]); in igc_intr_en()
1426 dev_err(igc->igc_dip, CE_WARN, in igc_intr_en()
1459 * on the device itself.
1472 * The hardware has extended and non-extended interrupt masks and in igc_hw_intr_enable()
1473 * auto-clear registers. We always disable auto-clear for the in igc_hw_intr_enable()
1474 * non-extended portions. See the I210 datasheet 'Setting Interrupt in igc_hw_intr_enable()
1478 * device reset assertions. in igc_hw_intr_enable()
1482 igc_write32(igc, IGC_EIAC, igc->igc_eims); in igc_hw_intr_enable()
1483 igc_write32(igc, IGC_EIMS, igc->igc_eims); in igc_hw_intr_enable()
1491 if (igc->igc_mcast != NULL) { in igc_cleanup()
1492 ASSERT3U(igc->igc_nmcast, !=, 0); in igc_cleanup()
1493 kmem_free(igc->igc_mcast_raw, sizeof (ether_addr_t) * in igc_cleanup()
1494 igc->igc_nmcast); in igc_cleanup()
1495 kmem_free(igc->igc_mcast, sizeof (igc_addr_t) * in igc_cleanup()
1496 igc->igc_nmcast); in igc_cleanup()
1497 igc->igc_nmcast = 0; in igc_cleanup()
1498 igc->igc_mcast = NULL; in igc_cleanup()
1501 if (igc->igc_ucast != NULL) { in igc_cleanup()
1502 ASSERT3U(igc->igc_nucast, !=, 0); in igc_cleanup()
1503 kmem_free(igc->igc_ucast, sizeof (igc_addr_t) * in igc_cleanup()
1504 igc->igc_nucast); in igc_cleanup()
1505 igc->igc_nucast = 0; in igc_cleanup()
1506 igc->igc_ucast = NULL; in igc_cleanup()
1509 if ((igc->igc_attach & IGC_ATTACH_INTR_EN) != 0) { in igc_cleanup()
1511 if ((igc->igc_intr_cap & DDI_INTR_FLAG_BLOCK) != 0) { in igc_cleanup()
1512 ret = ddi_intr_block_disable(igc->igc_intr_handles, in igc_cleanup()
1513 igc->igc_nintrs); in igc_cleanup()
1515 dev_err(igc->igc_dip, CE_WARN, "failed to " in igc_cleanup()
1519 for (int i = 0; i < igc->igc_nintrs; i++) { in igc_cleanup()
1521 igc->igc_intr_handles[i]); in igc_cleanup()
1523 dev_err(igc->igc_dip, CE_WARN, "failed " in igc_cleanup()
1529 igc->igc_attach &= ~IGC_ATTACH_INTR_EN; in igc_cleanup()
1532 if ((igc->igc_attach & IGC_ATTACH_MAC) != 0) { in igc_cleanup()
1533 int ret = mac_unregister(igc->igc_mac_hdl); in igc_cleanup()
1535 dev_err(igc->igc_dip, CE_WARN, "failed to unregister " in igc_cleanup()
1538 igc->igc_attach &= ~IGC_ATTACH_MAC; in igc_cleanup()
1541 if ((igc->igc_attach & IGC_ATTACH_STATS) != 0) { in igc_cleanup()
1543 igc->igc_attach &= ~IGC_ATTACH_STATS; in igc_cleanup()
1546 if ((igc->igc_attach & IGC_ATTACH_LED) != 0) { in igc_cleanup()
1548 igc->igc_attach &= ~IGC_ATTACH_LED; in igc_cleanup()
1551 if ((igc->igc_attach & IGC_ATTACH_INTR_HANDLER) != 0) { in igc_cleanup()
1552 for (int i = 0; i < igc->igc_nintrs; i++) { in igc_cleanup()
1554 ddi_intr_remove_handler(igc->igc_intr_handles[i]); in igc_cleanup()
1556 dev_err(igc->igc_dip, CE_WARN, "failed to " in igc_cleanup()
1560 igc->igc_attach &= ~IGC_ATTACH_INTR_HANDLER; in igc_cleanup()
1563 if (igc->igc_tx_rings != NULL) { in igc_cleanup()
1564 for (uint32_t i = 0; i < igc->igc_ntx_rings; i++) { in igc_cleanup()
1565 igc_tx_ring_stats_fini(&igc->igc_tx_rings[i]); in igc_cleanup()
1566 mutex_destroy(&igc->igc_tx_rings[i].itr_lock); in igc_cleanup()
1568 kmem_free(igc->igc_tx_rings, sizeof (igc_tx_ring_t) * in igc_cleanup()
1569 igc->igc_ntx_rings); in igc_cleanup()
1570 igc->igc_tx_rings = NULL; in igc_cleanup()
1573 if (igc->igc_rx_rings != NULL) { in igc_cleanup()
1574 for (uint32_t i = 0; i < igc->igc_nrx_rings; i++) { in igc_cleanup()
1575 igc_rx_ring_stats_fini(&igc->igc_rx_rings[i]); in igc_cleanup()
1576 cv_destroy(&igc->igc_rx_rings[i].irr_free_cv); in igc_cleanup()
1577 mutex_destroy(&igc->igc_rx_rings[i].irr_free_lock); in igc_cleanup()
1578 mutex_destroy(&igc->igc_rx_rings[i].irr_lock); in igc_cleanup()
1580 kmem_free(igc->igc_rx_rings, sizeof (igc_rx_ring_t) * in igc_cleanup()
1581 igc->igc_nrx_rings); in igc_cleanup()
1582 igc->igc_rx_rings = NULL; in igc_cleanup()
1585 if ((igc->igc_attach & IGC_ATTACH_MUTEX) != 0) { in igc_cleanup()
1586 mutex_destroy(&igc->igc_lock); in igc_cleanup()
1587 igc->igc_attach &= ~IGC_ATTACH_MUTEX; in igc_cleanup()
1590 if ((igc->igc_attach & IGC_ATTACH_INTR_ALLOC) != 0) { in igc_cleanup()
1591 for (int i = 0; i < igc->igc_nintrs; i++) { in igc_cleanup()
1592 int ret = ddi_intr_free(igc->igc_intr_handles[i]); in igc_cleanup()
1594 dev_err(igc->igc_dip, CE_WARN, "unexpected " in igc_cleanup()
1598 igc->igc_attach &= ~IGC_ATTACH_INTR_ALLOC; in igc_cleanup()
1601 if (igc->igc_intr_handles != NULL) { in igc_cleanup()
1602 ASSERT3U(igc->igc_intr_size, !=, 0); in igc_cleanup()
1603 kmem_free(igc->igc_intr_handles, igc->igc_intr_size); in igc_cleanup()
1612 if (igc->igc_regs_hdl != NULL) { in igc_cleanup()
1613 ddi_regs_map_free(&igc->igc_regs_hdl); in igc_cleanup()
1614 igc->igc_regs_base = NULL; in igc_cleanup()
1617 if (igc->igc_cfgspace != NULL) { in igc_cleanup()
1618 pci_config_teardown(&igc->igc_cfgspace); in igc_cleanup()
1620 igc->igc_attach &= ~IGC_ATTACH_REGS; in igc_cleanup()
1622 ddi_set_driver_private(igc->igc_dip, NULL); in igc_cleanup()
1623 igc->igc_dip = NULL; in igc_cleanup()
1625 VERIFY0(igc->igc_attach); in igc_cleanup()
1641 igc->igc_dip = dip; in igc_attach()
1644 * Initialize a few members that are not zero-based. in igc_attach()
1646 igc->igc_link_duplex = LINK_DUPLEX_UNKNOWN; in igc_attach()
1647 igc->igc_link_state = LINK_STATE_UNKNOWN; in igc_attach()
1655 igc->igc_attach |= IGC_ATTACH_REGS; in igc_attach()
1676 * Initialize our main mutex for the device now that we have an in igc_attach()
1679 mutex_init(&igc->igc_lock, NULL, MUTEX_DRIVER, in igc_attach()
1680 DDI_INTR_PRI(igc->igc_intr_pri)); in igc_attach()
1681 igc->igc_attach |= IGC_ATTACH_MUTEX; in igc_attach()
1684 * We now want to determine the total number of rx and tx rings that we in igc_attach()
1686 * perform the rest of the device setup that is required. The various in igc_attach()
1687 * queues that we have are mapped to a given MSI-X through the IVAR in igc_attach()
1688 * registers in the device. There is also an IVAR_MISC register that in igc_attach()
1691 * There isn't strictly per-queue interrupt generation control. Instead, in igc_attach()
1692 * when in MSI-X mode, the device has an extended interrupt cause and in igc_attach()
1697 * use to 2 for now: 1 for tx and 1 for rx. Interrupt 0 is for tx/other in igc_attach()
1700 igc->igc_nrx_rings = 1; in igc_attach()
1701 igc->igc_ntx_rings = 1; in igc_attach()
1706 igc->igc_mtu = ETHERMTU; in igc_attach()
1712 * 1/3rd of them. We allocate an even number of tx descriptors. in igc_attach()
1714 igc->igc_rx_ndesc = IGC_DEF_RX_RING_SIZE; in igc_attach()
1715 igc->igc_tx_ndesc = IGC_DEF_TX_RING_SIZE; in igc_attach()
1716 igc->igc_rx_nbuf = igc->igc_rx_ndesc + (igc->igc_rx_ndesc >> 1); in igc_attach()
1717 igc->igc_tx_nbuf = igc->igc_tx_ndesc; in igc_attach()
1718 igc->igc_rx_nfree = igc->igc_rx_nbuf - igc->igc_rx_ndesc; in igc_attach()
1719 igc->igc_rx_intr_nframes = IGC_DEF_RX_RING_INTR_LIMIT; in igc_attach()
1720 igc->igc_rx_bind_thresh = IGC_DEF_RX_BIND; in igc_attach()
1721 igc->igc_tx_bind_thresh = IGC_DEF_TX_BIND; in igc_attach()
1722 igc->igc_tx_notify_thresh = IGC_DEF_TX_NOTIFY_MIN; in igc_attach()
1723 igc->igc_tx_recycle_thresh = IGC_DEF_TX_RECYCLE_MIN; in igc_attach()
1724 igc->igc_tx_gap = IGC_DEF_TX_GAP; in igc_attach()
1725 igc->igc_eitr = IGC_DEF_EITR; in igc_attach()
1734 igc->igc_attach |= IGC_ATTACH_INTR_HANDLER; in igc_attach()
1737 * Next reset the device before we begin initializing anything else. As in igc_attach()
1739 * initialization that we would only do once per device. Other in igc_attach()
1748 igc->igc_attach |= IGC_ATTACH_LED; in igc_attach()
1752 * device. We start with always enabling auto-negotiation and in igc_attach()
1755 * doesn't maintain a proper inter-packet gap. Despite that, we default in igc_attach()
1761 igc->igc_hw.mac.autoneg = true; in igc_attach()
1762 igc->igc_hw.phy.autoneg_wait_to_complete = false; in igc_attach()
1763 igc->igc_hw.phy.autoneg_advertised = IGC_DEFAULT_ADV; in igc_attach()
1764 igc->igc_hw.fc.requested_mode = igc_fc_default; in igc_attach()
1765 igc->igc_hw.fc.current_mode = igc_fc_default; in igc_attach()
1774 igc->igc_attach |= IGC_ATTACH_STATS; in igc_attach()
1782 igc->igc_attach |= IGC_ATTACH_MAC; in igc_attach()
1790 igc->igc_attach |= IGC_ATTACH_INTR_EN; in igc_attach()