xref: /freebsd/sys/dev/igc/igc_base.c (revision 685dc743dc3b5645e34836464128e1c0558b404b)
1*517904deSPeter Grehan /*-
2*517904deSPeter Grehan  * Copyright 2021 Intel Corp
3*517904deSPeter Grehan  * Copyright 2021 Rubicon Communications, LLC (Netgate)
4*517904deSPeter Grehan  * SPDX-License-Identifier: BSD-3-Clause
5*517904deSPeter Grehan  */
6*517904deSPeter Grehan 
7*517904deSPeter Grehan #include <sys/cdefs.h>
8*517904deSPeter Grehan #include "igc_hw.h"
9*517904deSPeter Grehan #include "igc_i225.h"
10*517904deSPeter Grehan #include "igc_mac.h"
11*517904deSPeter Grehan #include "igc_base.h"
12*517904deSPeter Grehan 
13*517904deSPeter Grehan /**
14*517904deSPeter Grehan  *  igc_acquire_phy_base - Acquire rights to access PHY
15*517904deSPeter Grehan  *  @hw: pointer to the HW structure
16*517904deSPeter Grehan  *
17*517904deSPeter Grehan  *  Acquire access rights to the correct PHY.
18*517904deSPeter Grehan  **/
igc_acquire_phy_base(struct igc_hw * hw)19*517904deSPeter Grehan s32 igc_acquire_phy_base(struct igc_hw *hw)
20*517904deSPeter Grehan {
21*517904deSPeter Grehan 	u16 mask = IGC_SWFW_PHY0_SM;
22*517904deSPeter Grehan 
23*517904deSPeter Grehan 	DEBUGFUNC("igc_acquire_phy_base");
24*517904deSPeter Grehan 
25*517904deSPeter Grehan 	if (hw->bus.func == IGC_FUNC_1)
26*517904deSPeter Grehan 		mask = IGC_SWFW_PHY1_SM;
27*517904deSPeter Grehan 
28*517904deSPeter Grehan 	return hw->mac.ops.acquire_swfw_sync(hw, mask);
29*517904deSPeter Grehan }
30*517904deSPeter Grehan 
31*517904deSPeter Grehan /**
32*517904deSPeter Grehan  *  igc_release_phy_base - Release rights to access PHY
33*517904deSPeter Grehan  *  @hw: pointer to the HW structure
34*517904deSPeter Grehan  *
35*517904deSPeter Grehan  *  A wrapper to release access rights to the correct PHY.
36*517904deSPeter Grehan  **/
igc_release_phy_base(struct igc_hw * hw)37*517904deSPeter Grehan void igc_release_phy_base(struct igc_hw *hw)
38*517904deSPeter Grehan {
39*517904deSPeter Grehan 	u16 mask = IGC_SWFW_PHY0_SM;
40*517904deSPeter Grehan 
41*517904deSPeter Grehan 	DEBUGFUNC("igc_release_phy_base");
42*517904deSPeter Grehan 
43*517904deSPeter Grehan 	if (hw->bus.func == IGC_FUNC_1)
44*517904deSPeter Grehan 		mask = IGC_SWFW_PHY1_SM;
45*517904deSPeter Grehan 
46*517904deSPeter Grehan 	hw->mac.ops.release_swfw_sync(hw, mask);
47*517904deSPeter Grehan }
48*517904deSPeter Grehan 
49*517904deSPeter Grehan /**
50*517904deSPeter Grehan  *  igc_init_hw_base - Initialize hardware
51*517904deSPeter Grehan  *  @hw: pointer to the HW structure
52*517904deSPeter Grehan  *
53*517904deSPeter Grehan  *  This inits the hardware readying it for operation.
54*517904deSPeter Grehan  **/
igc_init_hw_base(struct igc_hw * hw)55*517904deSPeter Grehan s32 igc_init_hw_base(struct igc_hw *hw)
56*517904deSPeter Grehan {
57*517904deSPeter Grehan 	struct igc_mac_info *mac = &hw->mac;
58*517904deSPeter Grehan 	s32 ret_val;
59*517904deSPeter Grehan 	u16 i, rar_count = mac->rar_entry_count;
60*517904deSPeter Grehan 
61*517904deSPeter Grehan 	DEBUGFUNC("igc_init_hw_base");
62*517904deSPeter Grehan 
63*517904deSPeter Grehan 	/* Setup the receive address */
64*517904deSPeter Grehan 	igc_init_rx_addrs_generic(hw, rar_count);
65*517904deSPeter Grehan 
66*517904deSPeter Grehan 	/* Zero out the Multicast HASH table */
67*517904deSPeter Grehan 	DEBUGOUT("Zeroing the MTA\n");
68*517904deSPeter Grehan 	for (i = 0; i < mac->mta_reg_count; i++)
69*517904deSPeter Grehan 		IGC_WRITE_REG_ARRAY(hw, IGC_MTA, i, 0);
70*517904deSPeter Grehan 
71*517904deSPeter Grehan 	/* Zero out the Unicast HASH table */
72*517904deSPeter Grehan 	DEBUGOUT("Zeroing the UTA\n");
73*517904deSPeter Grehan 	for (i = 0; i < mac->uta_reg_count; i++)
74*517904deSPeter Grehan 		IGC_WRITE_REG_ARRAY(hw, IGC_UTA, i, 0);
75*517904deSPeter Grehan 
76*517904deSPeter Grehan 	/* Setup link and flow control */
77*517904deSPeter Grehan 	ret_val = mac->ops.setup_link(hw);
78*517904deSPeter Grehan 	/*
79*517904deSPeter Grehan 	 * Clear all of the statistics registers (clear on read).  It is
80*517904deSPeter Grehan 	 * important that we do this after we have tried to establish link
81*517904deSPeter Grehan 	 * because the symbol error count will increment wildly if there
82*517904deSPeter Grehan 	 * is no link.
83*517904deSPeter Grehan 	 */
84*517904deSPeter Grehan 	igc_clear_hw_cntrs_base_generic(hw);
85*517904deSPeter Grehan 
86*517904deSPeter Grehan 	return ret_val;
87*517904deSPeter Grehan }
88*517904deSPeter Grehan 
89*517904deSPeter Grehan /**
90*517904deSPeter Grehan  * igc_power_down_phy_copper_base - Remove link during PHY power down
91*517904deSPeter Grehan  * @hw: pointer to the HW structure
92*517904deSPeter Grehan  *
93*517904deSPeter Grehan  * In the case of a PHY power down to save power, or to turn off link during a
94*517904deSPeter Grehan  * driver unload, or wake on lan is not enabled, remove the link.
95*517904deSPeter Grehan  **/
igc_power_down_phy_copper_base(struct igc_hw * hw)96*517904deSPeter Grehan void igc_power_down_phy_copper_base(struct igc_hw *hw)
97*517904deSPeter Grehan {
98*517904deSPeter Grehan 	struct igc_phy_info *phy = &hw->phy;
99*517904deSPeter Grehan 
100*517904deSPeter Grehan 	if (!(phy->ops.check_reset_block))
101*517904deSPeter Grehan 		return;
102*517904deSPeter Grehan 
103*517904deSPeter Grehan 	/* If the management interface is not enabled, then power down */
104*517904deSPeter Grehan 	if (phy->ops.check_reset_block(hw))
105*517904deSPeter Grehan 		igc_power_down_phy_copper(hw);
106*517904deSPeter Grehan 
107*517904deSPeter Grehan 	return;
108*517904deSPeter Grehan }
109*517904deSPeter Grehan 
110*517904deSPeter Grehan /**
111*517904deSPeter Grehan  *  igc_rx_fifo_flush_base - Clean Rx FIFO after Rx enable
112*517904deSPeter Grehan  *  @hw: pointer to the HW structure
113*517904deSPeter Grehan  *
114*517904deSPeter Grehan  *  After Rx enable, if manageability is enabled then there is likely some
115*517904deSPeter Grehan  *  bad data at the start of the FIFO and possibly in the DMA FIFO.  This
116*517904deSPeter Grehan  *  function clears the FIFOs and flushes any packets that came in as Rx was
117*517904deSPeter Grehan  *  being enabled.
118*517904deSPeter Grehan  **/
igc_rx_fifo_flush_base(struct igc_hw * hw)119*517904deSPeter Grehan void igc_rx_fifo_flush_base(struct igc_hw *hw)
120*517904deSPeter Grehan {
121*517904deSPeter Grehan 	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
122*517904deSPeter Grehan 	int i, ms_wait;
123*517904deSPeter Grehan 
124*517904deSPeter Grehan 	DEBUGFUNC("igc_rx_fifo_flush_base");
125*517904deSPeter Grehan 
126*517904deSPeter Grehan 	/* disable IPv6 options as per hardware errata */
127*517904deSPeter Grehan 	rfctl = IGC_READ_REG(hw, IGC_RFCTL);
128*517904deSPeter Grehan 	rfctl |= IGC_RFCTL_IPV6_EX_DIS;
129*517904deSPeter Grehan 	IGC_WRITE_REG(hw, IGC_RFCTL, rfctl);
130*517904deSPeter Grehan 
131*517904deSPeter Grehan 	if (!(IGC_READ_REG(hw, IGC_MANC) & IGC_MANC_RCV_TCO_EN))
132*517904deSPeter Grehan 		return;
133*517904deSPeter Grehan 
134*517904deSPeter Grehan 	/* Disable all Rx queues */
135*517904deSPeter Grehan 	for (i = 0; i < 4; i++) {
136*517904deSPeter Grehan 		rxdctl[i] = IGC_READ_REG(hw, IGC_RXDCTL(i));
137*517904deSPeter Grehan 		IGC_WRITE_REG(hw, IGC_RXDCTL(i),
138*517904deSPeter Grehan 				rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
139*517904deSPeter Grehan 	}
140*517904deSPeter Grehan 	/* Poll all queues to verify they have shut down */
141*517904deSPeter Grehan 	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
142*517904deSPeter Grehan 		msec_delay(1);
143*517904deSPeter Grehan 		rx_enabled = 0;
144*517904deSPeter Grehan 		for (i = 0; i < 4; i++)
145*517904deSPeter Grehan 			rx_enabled |= IGC_READ_REG(hw, IGC_RXDCTL(i));
146*517904deSPeter Grehan 		if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
147*517904deSPeter Grehan 			break;
148*517904deSPeter Grehan 	}
149*517904deSPeter Grehan 
150*517904deSPeter Grehan 	if (ms_wait == 10)
151*517904deSPeter Grehan 		DEBUGOUT("Queue disable timed out after 10ms\n");
152*517904deSPeter Grehan 
153*517904deSPeter Grehan 	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
154*517904deSPeter Grehan 	 * incoming packets are rejected.  Set enable and wait 2ms so that
155*517904deSPeter Grehan 	 * any packet that was coming in as RCTL.EN was set is flushed
156*517904deSPeter Grehan 	 */
157*517904deSPeter Grehan 	IGC_WRITE_REG(hw, IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
158*517904deSPeter Grehan 
159*517904deSPeter Grehan 	rlpml = IGC_READ_REG(hw, IGC_RLPML);
160*517904deSPeter Grehan 	IGC_WRITE_REG(hw, IGC_RLPML, 0);
161*517904deSPeter Grehan 
162*517904deSPeter Grehan 	rctl = IGC_READ_REG(hw, IGC_RCTL);
163*517904deSPeter Grehan 	temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
164*517904deSPeter Grehan 	temp_rctl |= IGC_RCTL_LPE;
165*517904deSPeter Grehan 
166*517904deSPeter Grehan 	IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl);
167*517904deSPeter Grehan 	IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl | IGC_RCTL_EN);
168*517904deSPeter Grehan 	IGC_WRITE_FLUSH(hw);
169*517904deSPeter Grehan 	msec_delay(2);
170*517904deSPeter Grehan 
171*517904deSPeter Grehan 	/* Enable Rx queues that were previously enabled and restore our
172*517904deSPeter Grehan 	 * previous state
173*517904deSPeter Grehan 	 */
174*517904deSPeter Grehan 	for (i = 0; i < 4; i++)
175*517904deSPeter Grehan 		IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl[i]);
176*517904deSPeter Grehan 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
177*517904deSPeter Grehan 	IGC_WRITE_FLUSH(hw);
178*517904deSPeter Grehan 
179*517904deSPeter Grehan 	IGC_WRITE_REG(hw, IGC_RLPML, rlpml);
180*517904deSPeter Grehan 	IGC_WRITE_REG(hw, IGC_RFCTL, rfctl);
181*517904deSPeter Grehan 
182*517904deSPeter Grehan 	/* Flush receive errors generated by workaround */
183*517904deSPeter Grehan 	IGC_READ_REG(hw, IGC_ROC);
184*517904deSPeter Grehan 	IGC_READ_REG(hw, IGC_RNBC);
185*517904deSPeter Grehan 	IGC_READ_REG(hw, IGC_MPC);
186*517904deSPeter Grehan }
187