xref: /freebsd/sys/dev/e1000/e1000_base.c (revision 86a2c910c05c65d1318aef81ddbde8ac7eab79b9)
1 /******************************************************************************
2   SPDX-License-Identifier: BSD-3-Clause
3 
4   Copyright (c) 2001-2020, Intel Corporation
5   All rights reserved.
6 
7   Redistribution and use in source and binary forms, with or without
8   modification, are permitted provided that the following conditions are met:
9 
10    1. Redistributions of source code must retain the above copyright notice,
11       this list of conditions and the following disclaimer.
12 
13    2. Redistributions in binary form must reproduce the above copyright
14       notice, this list of conditions and the following disclaimer in the
15       documentation and/or other materials provided with the distribution.
16 
17    3. Neither the name of the Intel Corporation nor the names of its
18       contributors may be used to endorse or promote products derived from
19       this software without specific prior written permission.
20 
21   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31   POSSIBILITY OF SUCH DAMAGE.
32 
33 ******************************************************************************/
34 
35 #include "e1000_hw.h"
36 #include "e1000_82575.h"
37 #include "e1000_mac.h"
38 #include "e1000_base.h"
39 #include "e1000_manage.h"
40 
41 /**
42  *  e1000_acquire_phy_base - Acquire rights to access PHY
43  *  @hw: pointer to the HW structure
44  *
45  *  Acquire access rights to the correct PHY.
46  **/
47 s32 e1000_acquire_phy_base(struct e1000_hw *hw)
48 {
49 	u16 mask = E1000_SWFW_PHY0_SM;
50 
51 	DEBUGFUNC("e1000_acquire_phy_base");
52 
53 	if (hw->bus.func == E1000_FUNC_1)
54 		mask = E1000_SWFW_PHY1_SM;
55 	else if (hw->bus.func == E1000_FUNC_2)
56 		mask = E1000_SWFW_PHY2_SM;
57 	else if (hw->bus.func == E1000_FUNC_3)
58 		mask = E1000_SWFW_PHY3_SM;
59 
60 	return hw->mac.ops.acquire_swfw_sync(hw, mask);
61 }
62 
63 /**
64  *  e1000_release_phy_base - Release rights to access PHY
65  *  @hw: pointer to the HW structure
66  *
67  *  A wrapper to release access rights to the correct PHY.
68  **/
69 void e1000_release_phy_base(struct e1000_hw *hw)
70 {
71 	u16 mask = E1000_SWFW_PHY0_SM;
72 
73 	DEBUGFUNC("e1000_release_phy_base");
74 
75 	if (hw->bus.func == E1000_FUNC_1)
76 		mask = E1000_SWFW_PHY1_SM;
77 	else if (hw->bus.func == E1000_FUNC_2)
78 		mask = E1000_SWFW_PHY2_SM;
79 	else if (hw->bus.func == E1000_FUNC_3)
80 		mask = E1000_SWFW_PHY3_SM;
81 
82 	hw->mac.ops.release_swfw_sync(hw, mask);
83 }
84 
85 /**
86  *  e1000_init_hw_base - Initialize hardware
87  *  @hw: pointer to the HW structure
88  *
89  *  This inits the hardware readying it for operation.
90  **/
91 s32 e1000_init_hw_base(struct e1000_hw *hw)
92 {
93 	struct e1000_mac_info *mac = &hw->mac;
94 	s32 ret_val;
95 	u16 i, rar_count = mac->rar_entry_count;
96 
97 	DEBUGFUNC("e1000_init_hw_base");
98 
99 	/* Setup the receive address */
100 	e1000_init_rx_addrs_generic(hw, rar_count);
101 
102 	/* Zero out the Multicast HASH table */
103 	DEBUGOUT("Zeroing the MTA\n");
104 	for (i = 0; i < mac->mta_reg_count; i++)
105 		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
106 
107 	/* Zero out the Unicast HASH table */
108 	DEBUGOUT("Zeroing the UTA\n");
109 	for (i = 0; i < mac->uta_reg_count; i++)
110 		E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
111 
112 	/* Setup link and flow control */
113 	ret_val = mac->ops.setup_link(hw);
114 
115 	/* Clear all of the statistics registers (clear on read).  It is
116 	 * important that we do this after we have tried to establish link
117 	 * because the symbol error count will increment wildly if there
118 	 * is no link.
119 	 */
120 	e1000_clear_hw_cntrs_base_generic(hw);
121 
122 	return ret_val;
123 }
124 
125 /**
126  * e1000_power_down_phy_copper_base - Remove link during PHY power down
127  * @hw: pointer to the HW structure
128  *
129  * In the case of a PHY power down to save power, or to turn off link during a
130  * driver unload, or wake on lan is not enabled, remove the link.
131  **/
132 void e1000_power_down_phy_copper_base(struct e1000_hw *hw)
133 {
134 	struct e1000_phy_info *phy = &hw->phy;
135 
136 	if (!(phy->ops.check_reset_block))
137 		return;
138 
139 	/* If the management interface is not enabled, then power down */
140 	if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
141 		e1000_power_down_phy_copper(hw);
142 }
143 
144 /**
145  *  e1000_rx_fifo_flush_base - Clean Rx FIFO after Rx enable
146  *  @hw: pointer to the HW structure
147  *
148  *  After Rx enable, if manageability is enabled then there is likely some
149  *  bad data at the start of the FIFO and possibly in the DMA FIFO.  This
150  *  function clears the FIFOs and flushes any packets that came in as Rx was
151  *  being enabled.
152  **/
153 void e1000_rx_fifo_flush_base(struct e1000_hw *hw)
154 {
155 	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
156 	int i, ms_wait;
157 
158 	DEBUGFUNC("e1000_rx_fifo_flush_base");
159 
160 	/* disable IPv6 options as per hardware errata */
161 	rfctl = E1000_READ_REG(hw, E1000_RFCTL);
162 	rfctl |= E1000_RFCTL_IPV6_EX_DIS;
163 	E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
164 
165 	if (hw->mac.type != e1000_82575 ||
166 	    !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
167 		return;
168 
169 	/* Disable all Rx queues */
170 	for (i = 0; i < 4; i++) {
171 		rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
172 		E1000_WRITE_REG(hw, E1000_RXDCTL(i),
173 				rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
174 	}
175 	/* Poll all queues to verify they have shut down */
176 	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
177 		msec_delay(1);
178 		rx_enabled = 0;
179 		for (i = 0; i < 4; i++)
180 			rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
181 		if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
182 			break;
183 	}
184 
185 	if (ms_wait == 10)
186 		DEBUGOUT("Queue disable timed out after 10ms\n");
187 
188 	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
189 	 * incoming packets are rejected.  Set enable and wait 2ms so that
190 	 * any packet that was coming in as RCTL.EN was set is flushed
191 	 */
192 	E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
193 
194 	rlpml = E1000_READ_REG(hw, E1000_RLPML);
195 	E1000_WRITE_REG(hw, E1000_RLPML, 0);
196 
197 	rctl = E1000_READ_REG(hw, E1000_RCTL);
198 	temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
199 	temp_rctl |= E1000_RCTL_LPE;
200 
201 	E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
202 	E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
203 	E1000_WRITE_FLUSH(hw);
204 	msec_delay(2);
205 
206 	/* Enable Rx queues that were previously enabled and restore our
207 	 * previous state
208 	 */
209 	for (i = 0; i < 4; i++)
210 		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
211 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
212 	E1000_WRITE_FLUSH(hw);
213 
214 	E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
215 	E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
216 
217 	/* Flush receive errors generated by workaround */
218 	E1000_READ_REG(hw, E1000_ROC);
219 	E1000_READ_REG(hw, E1000_RNBC);
220 	E1000_READ_REG(hw, E1000_MPC);
221 }
222