xref: /illumos-gate/usr/src/uts/common/io/igc/core/igc_base.c (revision 6bbbd4428a21d3f4d1d329851dbc599121cb5d31)
1 /*-
2  * Copyright 2021 Intel Corp
3  * Copyright 2021 Rubicon Communications, LLC (Netgate)
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include "igc_hw.h"
8 #include "igc_i225.h"
9 #include "igc_mac.h"
10 #include "igc_base.h"
11 
12 /**
13  *  igc_acquire_phy_base - Acquire rights to access PHY
14  *  @hw: pointer to the HW structure
15  *
16  *  Acquire access rights to the correct PHY.
17  **/
igc_acquire_phy_base(struct igc_hw * hw)18 s32 igc_acquire_phy_base(struct igc_hw *hw)
19 {
20 	u16 mask = IGC_SWFW_PHY0_SM;
21 
22 	DEBUGFUNC("igc_acquire_phy_base");
23 
24 	if (hw->bus.func == IGC_FUNC_1)
25 		mask = IGC_SWFW_PHY1_SM;
26 
27 	return hw->mac.ops.acquire_swfw_sync(hw, mask);
28 }
29 
30 /**
31  *  igc_release_phy_base - Release rights to access PHY
32  *  @hw: pointer to the HW structure
33  *
34  *  A wrapper to release access rights to the correct PHY.
35  **/
igc_release_phy_base(struct igc_hw * hw)36 void igc_release_phy_base(struct igc_hw *hw)
37 {
38 	u16 mask = IGC_SWFW_PHY0_SM;
39 
40 	DEBUGFUNC("igc_release_phy_base");
41 
42 	if (hw->bus.func == IGC_FUNC_1)
43 		mask = IGC_SWFW_PHY1_SM;
44 
45 	hw->mac.ops.release_swfw_sync(hw, mask);
46 }
47 
48 /**
49  *  igc_init_hw_base - Initialize hardware
50  *  @hw: pointer to the HW structure
51  *
52  *  This inits the hardware readying it for operation.
53  **/
igc_init_hw_base(struct igc_hw * hw)54 s32 igc_init_hw_base(struct igc_hw *hw)
55 {
56 	struct igc_mac_info *mac = &hw->mac;
57 	s32 ret_val;
58 	u16 i, rar_count = mac->rar_entry_count;
59 
60 	DEBUGFUNC("igc_init_hw_base");
61 
62 	/* Setup the receive address */
63 	igc_init_rx_addrs_generic(hw, rar_count);
64 
65 	/* Zero out the Multicast HASH table */
66 	DEBUGOUT("Zeroing the MTA\n");
67 	for (i = 0; i < mac->mta_reg_count; i++)
68 		IGC_WRITE_REG_ARRAY(hw, IGC_MTA, i, 0);
69 
70 	/* Zero out the Unicast HASH table */
71 	DEBUGOUT("Zeroing the UTA\n");
72 	for (i = 0; i < mac->uta_reg_count; i++)
73 		IGC_WRITE_REG_ARRAY(hw, IGC_UTA, i, 0);
74 
75 	/* Setup link and flow control */
76 	ret_val = mac->ops.setup_link(hw);
77 	/*
78 	 * Clear all of the statistics registers (clear on read).  It is
79 	 * important that we do this after we have tried to establish link
80 	 * because the symbol error count will increment wildly if there
81 	 * is no link.
82 	 */
83 	igc_clear_hw_cntrs_base_generic(hw);
84 
85 	return ret_val;
86 }
87 
88 /**
89  * igc_power_down_phy_copper_base - Remove link during PHY power down
90  * @hw: pointer to the HW structure
91  *
92  * In the case of a PHY power down to save power, or to turn off link during a
93  * driver unload, or wake on lan is not enabled, remove the link.
94  **/
igc_power_down_phy_copper_base(struct igc_hw * hw)95 void igc_power_down_phy_copper_base(struct igc_hw *hw)
96 {
97 	struct igc_phy_info *phy = &hw->phy;
98 
99 	if (!(phy->ops.check_reset_block))
100 		return;
101 
102 	/* If the management interface is not enabled, then power down */
103 	if (phy->ops.check_reset_block(hw))
104 		igc_power_down_phy_copper(hw);
105 
106 	return;
107 }
108 
109 /**
110  *  igc_rx_fifo_flush_base - Clean Rx FIFO after Rx enable
111  *  @hw: pointer to the HW structure
112  *
113  *  After Rx enable, if manageability is enabled then there is likely some
114  *  bad data at the start of the FIFO and possibly in the DMA FIFO.  This
115  *  function clears the FIFOs and flushes any packets that came in as Rx was
116  *  being enabled.
117  **/
igc_rx_fifo_flush_base(struct igc_hw * hw)118 void igc_rx_fifo_flush_base(struct igc_hw *hw)
119 {
120 	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
121 	int i, ms_wait;
122 
123 	DEBUGFUNC("igc_rx_fifo_flush_base");
124 
125 	/* disable IPv6 options as per hardware errata */
126 	rfctl = IGC_READ_REG(hw, IGC_RFCTL);
127 	rfctl |= IGC_RFCTL_IPV6_EX_DIS;
128 	IGC_WRITE_REG(hw, IGC_RFCTL, rfctl);
129 
130 	if (!(IGC_READ_REG(hw, IGC_MANC) & IGC_MANC_RCV_TCO_EN))
131 		return;
132 
133 	/* Disable all Rx queues */
134 	for (i = 0; i < 4; i++) {
135 		rxdctl[i] = IGC_READ_REG(hw, IGC_RXDCTL(i));
136 		IGC_WRITE_REG(hw, IGC_RXDCTL(i),
137 				rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
138 	}
139 	/* Poll all queues to verify they have shut down */
140 	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
141 		msec_delay(1);
142 		rx_enabled = 0;
143 		for (i = 0; i < 4; i++)
144 			rx_enabled |= IGC_READ_REG(hw, IGC_RXDCTL(i));
145 		if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
146 			break;
147 	}
148 
149 	if (ms_wait == 10)
150 		DEBUGOUT("Queue disable timed out after 10ms\n");
151 
152 	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
153 	 * incoming packets are rejected.  Set enable and wait 2ms so that
154 	 * any packet that was coming in as RCTL.EN was set is flushed
155 	 */
156 	IGC_WRITE_REG(hw, IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
157 
158 	rlpml = IGC_READ_REG(hw, IGC_RLPML);
159 	IGC_WRITE_REG(hw, IGC_RLPML, 0);
160 
161 	rctl = IGC_READ_REG(hw, IGC_RCTL);
162 	temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
163 	temp_rctl |= IGC_RCTL_LPE;
164 
165 	IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl);
166 	IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl | IGC_RCTL_EN);
167 	IGC_WRITE_FLUSH(hw);
168 	msec_delay(2);
169 
170 	/* Enable Rx queues that were previously enabled and restore our
171 	 * previous state
172 	 */
173 	for (i = 0; i < 4; i++)
174 		IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl[i]);
175 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
176 	IGC_WRITE_FLUSH(hw);
177 
178 	IGC_WRITE_REG(hw, IGC_RLPML, rlpml);
179 	IGC_WRITE_REG(hw, IGC_RFCTL, rfctl);
180 
181 	/* Flush receive errors generated by workaround */
182 	IGC_READ_REG(hw, IGC_ROC);
183 	IGC_READ_REG(hw, IGC_RNBC);
184 	IGC_READ_REG(hw, IGC_MPC);
185 }
186