xref: /linux/drivers/net/ethernet/nvidia/forcedeth.c (revision 995231c820e3bd3633cb38bf4ea6f2541e1da331)
1 /*
2  * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
3  *
4  * Note: This driver is a cleanroom reimplementation based on reverse
5  *      engineered documentation written by Carl-Daniel Hailfinger
6  *      and Andrew de Quincey.
7  *
8  * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9  * trademarks of NVIDIA Corporation in the United States and other
10  * countries.
11  *
12  * Copyright (C) 2003,4,5 Manfred Spraul
13  * Copyright (C) 2004 Andrew de Quincey (wol support)
14  * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15  *		IRQ rate fixes, bigendian fixes, cleanups, verification)
16  * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
17  *
18  * This program is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License as published by
20  * the Free Software Foundation; either version 2 of the License, or
21  * (at your option) any later version.
22  *
23  * This program is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26  * GNU General Public License for more details.
27  *
28  * You should have received a copy of the GNU General Public License
29  * along with this program; if not, see <http://www.gnu.org/licenses/>.
30  *
31  * Known bugs:
32  * We suspect that on some hardware no TX done interrupts are generated.
33  * This means recovery from netif_stop_queue only happens if the hw timer
34  * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
35  * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
36  * If your hardware reliably generates tx done interrupts, then you can remove
37  * DEV_NEED_TIMERIRQ from the driver_data flags.
38  * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
39  * superfluous timer interrupts from the nic.
40  */
41 
42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43 
44 #define FORCEDETH_VERSION		"0.64"
45 #define DRV_NAME			"forcedeth"
46 
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/pci.h>
50 #include <linux/interrupt.h>
51 #include <linux/netdevice.h>
52 #include <linux/etherdevice.h>
53 #include <linux/delay.h>
54 #include <linux/sched.h>
55 #include <linux/spinlock.h>
56 #include <linux/ethtool.h>
57 #include <linux/timer.h>
58 #include <linux/skbuff.h>
59 #include <linux/mii.h>
60 #include <linux/random.h>
61 #include <linux/if_vlan.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/slab.h>
64 #include <linux/uaccess.h>
65 #include <linux/prefetch.h>
66 #include <linux/u64_stats_sync.h>
67 #include <linux/io.h>
68 
69 #include <asm/irq.h>
70 
71 #define TX_WORK_PER_LOOP  64
72 #define RX_WORK_PER_LOOP  64
73 
74 /*
75  * Hardware access:
76  */
77 
78 #define DEV_NEED_TIMERIRQ          0x0000001  /* set the timer irq flag in the irq mask */
79 #define DEV_NEED_LINKTIMER         0x0000002  /* poll link settings. Relies on the timer irq */
80 #define DEV_HAS_LARGEDESC          0x0000004  /* device supports jumbo frames and needs packet format 2 */
81 #define DEV_HAS_HIGH_DMA           0x0000008  /* device supports 64bit dma */
82 #define DEV_HAS_CHECKSUM           0x0000010  /* device supports tx and rx checksum offloads */
83 #define DEV_HAS_VLAN               0x0000020  /* device supports vlan tagging and striping */
84 #define DEV_HAS_MSI                0x0000040  /* device supports MSI */
85 #define DEV_HAS_MSI_X              0x0000080  /* device supports MSI-X */
86 #define DEV_HAS_POWER_CNTRL        0x0000100  /* device supports power savings */
87 #define DEV_HAS_STATISTICS_V1      0x0000200  /* device supports hw statistics version 1 */
88 #define DEV_HAS_STATISTICS_V2      0x0000400  /* device supports hw statistics version 2 */
89 #define DEV_HAS_STATISTICS_V3      0x0000800  /* device supports hw statistics version 3 */
90 #define DEV_HAS_STATISTICS_V12     0x0000600  /* device supports hw statistics version 1 and 2 */
91 #define DEV_HAS_STATISTICS_V123    0x0000e00  /* device supports hw statistics version 1, 2, and 3 */
92 #define DEV_HAS_TEST_EXTENDED      0x0001000  /* device supports extended diagnostic test */
93 #define DEV_HAS_MGMT_UNIT          0x0002000  /* device supports management unit */
94 #define DEV_HAS_CORRECT_MACADDR    0x0004000  /* device supports correct mac address order */
95 #define DEV_HAS_COLLISION_FIX      0x0008000  /* device supports tx collision fix */
96 #define DEV_HAS_PAUSEFRAME_TX_V1   0x0010000  /* device supports tx pause frames version 1 */
97 #define DEV_HAS_PAUSEFRAME_TX_V2   0x0020000  /* device supports tx pause frames version 2 */
98 #define DEV_HAS_PAUSEFRAME_TX_V3   0x0040000  /* device supports tx pause frames version 3 */
99 #define DEV_NEED_TX_LIMIT          0x0080000  /* device needs to limit tx */
100 #define DEV_NEED_TX_LIMIT2         0x0180000  /* device needs to limit tx, expect for some revs */
101 #define DEV_HAS_GEAR_MODE          0x0200000  /* device supports gear mode */
102 #define DEV_NEED_PHY_INIT_FIX      0x0400000  /* device needs specific phy workaround */
103 #define DEV_NEED_LOW_POWER_FIX     0x0800000  /* device needs special power up workaround */
104 #define DEV_NEED_MSI_FIX           0x1000000  /* device needs msi workaround */
105 
106 enum {
107 	NvRegIrqStatus = 0x000,
108 #define NVREG_IRQSTAT_MIIEVENT	0x040
109 #define NVREG_IRQSTAT_MASK		0x83ff
110 	NvRegIrqMask = 0x004,
111 #define NVREG_IRQ_RX_ERROR		0x0001
112 #define NVREG_IRQ_RX			0x0002
113 #define NVREG_IRQ_RX_NOBUF		0x0004
114 #define NVREG_IRQ_TX_ERR		0x0008
115 #define NVREG_IRQ_TX_OK			0x0010
116 #define NVREG_IRQ_TIMER			0x0020
117 #define NVREG_IRQ_LINK			0x0040
118 #define NVREG_IRQ_RX_FORCED		0x0080
119 #define NVREG_IRQ_TX_FORCED		0x0100
120 #define NVREG_IRQ_RECOVER_ERROR		0x8200
121 #define NVREG_IRQMASK_THROUGHPUT	0x00df
122 #define NVREG_IRQMASK_CPU		0x0060
123 #define NVREG_IRQ_TX_ALL		(NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
124 #define NVREG_IRQ_RX_ALL		(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
125 #define NVREG_IRQ_OTHER			(NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
126 
127 	NvRegUnknownSetupReg6 = 0x008,
128 #define NVREG_UNKSETUP6_VAL		3
129 
130 /*
131  * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
132  * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
133  */
134 	NvRegPollingInterval = 0x00c,
135 #define NVREG_POLL_DEFAULT_THROUGHPUT	65535 /* backup tx cleanup if loop max reached */
136 #define NVREG_POLL_DEFAULT_CPU	13
137 	NvRegMSIMap0 = 0x020,
138 	NvRegMSIMap1 = 0x024,
139 	NvRegMSIIrqMask = 0x030,
140 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
141 	NvRegMisc1 = 0x080,
142 #define NVREG_MISC1_PAUSE_TX	0x01
143 #define NVREG_MISC1_HD		0x02
144 #define NVREG_MISC1_FORCE	0x3b0f3c
145 
146 	NvRegMacReset = 0x34,
147 #define NVREG_MAC_RESET_ASSERT	0x0F3
148 	NvRegTransmitterControl = 0x084,
149 #define NVREG_XMITCTL_START	0x01
150 #define NVREG_XMITCTL_MGMT_ST	0x40000000
151 #define NVREG_XMITCTL_SYNC_MASK		0x000f0000
152 #define NVREG_XMITCTL_SYNC_NOT_READY	0x0
153 #define NVREG_XMITCTL_SYNC_PHY_INIT	0x00040000
154 #define NVREG_XMITCTL_MGMT_SEMA_MASK	0x00000f00
155 #define NVREG_XMITCTL_MGMT_SEMA_FREE	0x0
156 #define NVREG_XMITCTL_HOST_SEMA_MASK	0x0000f000
157 #define NVREG_XMITCTL_HOST_SEMA_ACQ	0x0000f000
158 #define NVREG_XMITCTL_HOST_LOADED	0x00004000
159 #define NVREG_XMITCTL_TX_PATH_EN	0x01000000
160 #define NVREG_XMITCTL_DATA_START	0x00100000
161 #define NVREG_XMITCTL_DATA_READY	0x00010000
162 #define NVREG_XMITCTL_DATA_ERROR	0x00020000
163 	NvRegTransmitterStatus = 0x088,
164 #define NVREG_XMITSTAT_BUSY	0x01
165 
166 	NvRegPacketFilterFlags = 0x8c,
167 #define NVREG_PFF_PAUSE_RX	0x08
168 #define NVREG_PFF_ALWAYS	0x7F0000
169 #define NVREG_PFF_PROMISC	0x80
170 #define NVREG_PFF_MYADDR	0x20
171 #define NVREG_PFF_LOOPBACK	0x10
172 
173 	NvRegOffloadConfig = 0x90,
174 #define NVREG_OFFLOAD_HOMEPHY	0x601
175 #define NVREG_OFFLOAD_NORMAL	RX_NIC_BUFSIZE
176 	NvRegReceiverControl = 0x094,
177 #define NVREG_RCVCTL_START	0x01
178 #define NVREG_RCVCTL_RX_PATH_EN	0x01000000
179 	NvRegReceiverStatus = 0x98,
180 #define NVREG_RCVSTAT_BUSY	0x01
181 
182 	NvRegSlotTime = 0x9c,
183 #define NVREG_SLOTTIME_LEGBF_ENABLED	0x80000000
184 #define NVREG_SLOTTIME_10_100_FULL	0x00007f00
185 #define NVREG_SLOTTIME_1000_FULL	0x0003ff00
186 #define NVREG_SLOTTIME_HALF		0x0000ff00
187 #define NVREG_SLOTTIME_DEFAULT		0x00007f00
188 #define NVREG_SLOTTIME_MASK		0x000000ff
189 
190 	NvRegTxDeferral = 0xA0,
191 #define NVREG_TX_DEFERRAL_DEFAULT		0x15050f
192 #define NVREG_TX_DEFERRAL_RGMII_10_100		0x16070f
193 #define NVREG_TX_DEFERRAL_RGMII_1000		0x14050f
194 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10	0x16190f
195 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100	0x16300f
196 #define NVREG_TX_DEFERRAL_MII_STRETCH		0x152000
197 	NvRegRxDeferral = 0xA4,
198 #define NVREG_RX_DEFERRAL_DEFAULT	0x16
199 	NvRegMacAddrA = 0xA8,
200 	NvRegMacAddrB = 0xAC,
201 	NvRegMulticastAddrA = 0xB0,
202 #define NVREG_MCASTADDRA_FORCE	0x01
203 	NvRegMulticastAddrB = 0xB4,
204 	NvRegMulticastMaskA = 0xB8,
205 #define NVREG_MCASTMASKA_NONE		0xffffffff
206 	NvRegMulticastMaskB = 0xBC,
207 #define NVREG_MCASTMASKB_NONE		0xffff
208 
209 	NvRegPhyInterface = 0xC0,
210 #define PHY_RGMII		0x10000000
211 	NvRegBackOffControl = 0xC4,
212 #define NVREG_BKOFFCTRL_DEFAULT			0x70000000
213 #define NVREG_BKOFFCTRL_SEED_MASK		0x000003ff
214 #define NVREG_BKOFFCTRL_SELECT			24
215 #define NVREG_BKOFFCTRL_GEAR			12
216 
217 	NvRegTxRingPhysAddr = 0x100,
218 	NvRegRxRingPhysAddr = 0x104,
219 	NvRegRingSizes = 0x108,
220 #define NVREG_RINGSZ_TXSHIFT 0
221 #define NVREG_RINGSZ_RXSHIFT 16
222 	NvRegTransmitPoll = 0x10c,
223 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV	0x00008000
224 	NvRegLinkSpeed = 0x110,
225 #define NVREG_LINKSPEED_FORCE 0x10000
226 #define NVREG_LINKSPEED_10	1000
227 #define NVREG_LINKSPEED_100	100
228 #define NVREG_LINKSPEED_1000	50
229 #define NVREG_LINKSPEED_MASK	(0xFFF)
230 	NvRegUnknownSetupReg5 = 0x130,
231 #define NVREG_UNKSETUP5_BIT31	(1<<31)
232 	NvRegTxWatermark = 0x13c,
233 #define NVREG_TX_WM_DESC1_DEFAULT	0x0200010
234 #define NVREG_TX_WM_DESC2_3_DEFAULT	0x1e08000
235 #define NVREG_TX_WM_DESC2_3_1000	0xfe08000
236 	NvRegTxRxControl = 0x144,
237 #define NVREG_TXRXCTL_KICK	0x0001
238 #define NVREG_TXRXCTL_BIT1	0x0002
239 #define NVREG_TXRXCTL_BIT2	0x0004
240 #define NVREG_TXRXCTL_IDLE	0x0008
241 #define NVREG_TXRXCTL_RESET	0x0010
242 #define NVREG_TXRXCTL_RXCHECK	0x0400
243 #define NVREG_TXRXCTL_DESC_1	0
244 #define NVREG_TXRXCTL_DESC_2	0x002100
245 #define NVREG_TXRXCTL_DESC_3	0xc02200
246 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
247 #define NVREG_TXRXCTL_VLANINS	0x00080
248 	NvRegTxRingPhysAddrHigh = 0x148,
249 	NvRegRxRingPhysAddrHigh = 0x14C,
250 	NvRegTxPauseFrame = 0x170,
251 #define NVREG_TX_PAUSEFRAME_DISABLE	0x0fff0080
252 #define NVREG_TX_PAUSEFRAME_ENABLE_V1	0x01800010
253 #define NVREG_TX_PAUSEFRAME_ENABLE_V2	0x056003f0
254 #define NVREG_TX_PAUSEFRAME_ENABLE_V3	0x09f00880
255 	NvRegTxPauseFrameLimit = 0x174,
256 #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE	0x00010000
257 	NvRegMIIStatus = 0x180,
258 #define NVREG_MIISTAT_ERROR		0x0001
259 #define NVREG_MIISTAT_LINKCHANGE	0x0008
260 #define NVREG_MIISTAT_MASK_RW		0x0007
261 #define NVREG_MIISTAT_MASK_ALL		0x000f
262 	NvRegMIIMask = 0x184,
263 #define NVREG_MII_LINKCHANGE		0x0008
264 
265 	NvRegAdapterControl = 0x188,
266 #define NVREG_ADAPTCTL_START	0x02
267 #define NVREG_ADAPTCTL_LINKUP	0x04
268 #define NVREG_ADAPTCTL_PHYVALID	0x40000
269 #define NVREG_ADAPTCTL_RUNNING	0x100000
270 #define NVREG_ADAPTCTL_PHYSHIFT	24
271 	NvRegMIISpeed = 0x18c,
272 #define NVREG_MIISPEED_BIT8	(1<<8)
273 #define NVREG_MIIDELAY	5
274 	NvRegMIIControl = 0x190,
275 #define NVREG_MIICTL_INUSE	0x08000
276 #define NVREG_MIICTL_WRITE	0x00400
277 #define NVREG_MIICTL_ADDRSHIFT	5
278 	NvRegMIIData = 0x194,
279 	NvRegTxUnicast = 0x1a0,
280 	NvRegTxMulticast = 0x1a4,
281 	NvRegTxBroadcast = 0x1a8,
282 	NvRegWakeUpFlags = 0x200,
283 #define NVREG_WAKEUPFLAGS_VAL		0x7770
284 #define NVREG_WAKEUPFLAGS_BUSYSHIFT	24
285 #define NVREG_WAKEUPFLAGS_ENABLESHIFT	16
286 #define NVREG_WAKEUPFLAGS_D3SHIFT	12
287 #define NVREG_WAKEUPFLAGS_D2SHIFT	8
288 #define NVREG_WAKEUPFLAGS_D1SHIFT	4
289 #define NVREG_WAKEUPFLAGS_D0SHIFT	0
290 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT		0x01
291 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT	0x02
292 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE	0x04
293 #define NVREG_WAKEUPFLAGS_ENABLE	0x1111
294 
295 	NvRegMgmtUnitGetVersion = 0x204,
296 #define NVREG_MGMTUNITGETVERSION	0x01
297 	NvRegMgmtUnitVersion = 0x208,
298 #define NVREG_MGMTUNITVERSION		0x08
299 	NvRegPowerCap = 0x268,
300 #define NVREG_POWERCAP_D3SUPP	(1<<30)
301 #define NVREG_POWERCAP_D2SUPP	(1<<26)
302 #define NVREG_POWERCAP_D1SUPP	(1<<25)
303 	NvRegPowerState = 0x26c,
304 #define NVREG_POWERSTATE_POWEREDUP	0x8000
305 #define NVREG_POWERSTATE_VALID		0x0100
306 #define NVREG_POWERSTATE_MASK		0x0003
307 #define NVREG_POWERSTATE_D0		0x0000
308 #define NVREG_POWERSTATE_D1		0x0001
309 #define NVREG_POWERSTATE_D2		0x0002
310 #define NVREG_POWERSTATE_D3		0x0003
311 	NvRegMgmtUnitControl = 0x278,
312 #define NVREG_MGMTUNITCONTROL_INUSE	0x20000
313 	NvRegTxCnt = 0x280,
314 	NvRegTxZeroReXmt = 0x284,
315 	NvRegTxOneReXmt = 0x288,
316 	NvRegTxManyReXmt = 0x28c,
317 	NvRegTxLateCol = 0x290,
318 	NvRegTxUnderflow = 0x294,
319 	NvRegTxLossCarrier = 0x298,
320 	NvRegTxExcessDef = 0x29c,
321 	NvRegTxRetryErr = 0x2a0,
322 	NvRegRxFrameErr = 0x2a4,
323 	NvRegRxExtraByte = 0x2a8,
324 	NvRegRxLateCol = 0x2ac,
325 	NvRegRxRunt = 0x2b0,
326 	NvRegRxFrameTooLong = 0x2b4,
327 	NvRegRxOverflow = 0x2b8,
328 	NvRegRxFCSErr = 0x2bc,
329 	NvRegRxFrameAlignErr = 0x2c0,
330 	NvRegRxLenErr = 0x2c4,
331 	NvRegRxUnicast = 0x2c8,
332 	NvRegRxMulticast = 0x2cc,
333 	NvRegRxBroadcast = 0x2d0,
334 	NvRegTxDef = 0x2d4,
335 	NvRegTxFrame = 0x2d8,
336 	NvRegRxCnt = 0x2dc,
337 	NvRegTxPause = 0x2e0,
338 	NvRegRxPause = 0x2e4,
339 	NvRegRxDropFrame = 0x2e8,
340 	NvRegVlanControl = 0x300,
341 #define NVREG_VLANCONTROL_ENABLE	0x2000
342 	NvRegMSIXMap0 = 0x3e0,
343 	NvRegMSIXMap1 = 0x3e4,
344 	NvRegMSIXIrqStatus = 0x3f0,
345 
346 	NvRegPowerState2 = 0x600,
347 #define NVREG_POWERSTATE2_POWERUP_MASK		0x0F15
348 #define NVREG_POWERSTATE2_POWERUP_REV_A3	0x0001
349 #define NVREG_POWERSTATE2_PHY_RESET		0x0004
350 #define NVREG_POWERSTATE2_GATE_CLOCKS		0x0F00
351 };
352 
353 /* Big endian: should work, but is untested */
354 struct ring_desc {
355 	__le32 buf;
356 	__le32 flaglen;
357 };
358 
359 struct ring_desc_ex {
360 	__le32 bufhigh;
361 	__le32 buflow;
362 	__le32 txvlan;
363 	__le32 flaglen;
364 };
365 
366 union ring_type {
367 	struct ring_desc *orig;
368 	struct ring_desc_ex *ex;
369 };
370 
371 #define FLAG_MASK_V1 0xffff0000
372 #define FLAG_MASK_V2 0xffffc000
373 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
374 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
375 
376 #define NV_TX_LASTPACKET	(1<<16)
377 #define NV_TX_RETRYERROR	(1<<19)
378 #define NV_TX_RETRYCOUNT_MASK	(0xF<<20)
379 #define NV_TX_FORCED_INTERRUPT	(1<<24)
380 #define NV_TX_DEFERRED		(1<<26)
381 #define NV_TX_CARRIERLOST	(1<<27)
382 #define NV_TX_LATECOLLISION	(1<<28)
383 #define NV_TX_UNDERFLOW		(1<<29)
384 #define NV_TX_ERROR		(1<<30)
385 #define NV_TX_VALID		(1<<31)
386 
387 #define NV_TX2_LASTPACKET	(1<<29)
388 #define NV_TX2_RETRYERROR	(1<<18)
389 #define NV_TX2_RETRYCOUNT_MASK	(0xF<<19)
390 #define NV_TX2_FORCED_INTERRUPT	(1<<30)
391 #define NV_TX2_DEFERRED		(1<<25)
392 #define NV_TX2_CARRIERLOST	(1<<26)
393 #define NV_TX2_LATECOLLISION	(1<<27)
394 #define NV_TX2_UNDERFLOW	(1<<28)
395 /* error and valid are the same for both */
396 #define NV_TX2_ERROR		(1<<30)
397 #define NV_TX2_VALID		(1<<31)
398 #define NV_TX2_TSO		(1<<28)
399 #define NV_TX2_TSO_SHIFT	14
400 #define NV_TX2_TSO_MAX_SHIFT	14
401 #define NV_TX2_TSO_MAX_SIZE	(1<<NV_TX2_TSO_MAX_SHIFT)
402 #define NV_TX2_CHECKSUM_L3	(1<<27)
403 #define NV_TX2_CHECKSUM_L4	(1<<26)
404 
405 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
406 
407 #define NV_RX_DESCRIPTORVALID	(1<<16)
408 #define NV_RX_MISSEDFRAME	(1<<17)
409 #define NV_RX_SUBTRACT1		(1<<18)
410 #define NV_RX_ERROR1		(1<<23)
411 #define NV_RX_ERROR2		(1<<24)
412 #define NV_RX_ERROR3		(1<<25)
413 #define NV_RX_ERROR4		(1<<26)
414 #define NV_RX_CRCERR		(1<<27)
415 #define NV_RX_OVERFLOW		(1<<28)
416 #define NV_RX_FRAMINGERR	(1<<29)
417 #define NV_RX_ERROR		(1<<30)
418 #define NV_RX_AVAIL		(1<<31)
419 #define NV_RX_ERROR_MASK	(NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
420 
421 #define NV_RX2_CHECKSUMMASK	(0x1C000000)
422 #define NV_RX2_CHECKSUM_IP	(0x10000000)
423 #define NV_RX2_CHECKSUM_IP_TCP	(0x14000000)
424 #define NV_RX2_CHECKSUM_IP_UDP	(0x18000000)
425 #define NV_RX2_DESCRIPTORVALID	(1<<29)
426 #define NV_RX2_SUBTRACT1	(1<<25)
427 #define NV_RX2_ERROR1		(1<<18)
428 #define NV_RX2_ERROR2		(1<<19)
429 #define NV_RX2_ERROR3		(1<<20)
430 #define NV_RX2_ERROR4		(1<<21)
431 #define NV_RX2_CRCERR		(1<<22)
432 #define NV_RX2_OVERFLOW		(1<<23)
433 #define NV_RX2_FRAMINGERR	(1<<24)
434 /* error and avail are the same for both */
435 #define NV_RX2_ERROR		(1<<30)
436 #define NV_RX2_AVAIL		(1<<31)
437 #define NV_RX2_ERROR_MASK	(NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
438 
439 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
440 #define NV_RX3_VLAN_TAG_MASK	(0x0000FFFF)
441 
442 /* Miscellaneous hardware related defines: */
443 #define NV_PCI_REGSZ_VER1	0x270
444 #define NV_PCI_REGSZ_VER2	0x2d4
445 #define NV_PCI_REGSZ_VER3	0x604
446 #define NV_PCI_REGSZ_MAX	0x604
447 
448 /* various timeout delays: all in usec */
449 #define NV_TXRX_RESET_DELAY	4
450 #define NV_TXSTOP_DELAY1	10
451 #define NV_TXSTOP_DELAY1MAX	500000
452 #define NV_TXSTOP_DELAY2	100
453 #define NV_RXSTOP_DELAY1	10
454 #define NV_RXSTOP_DELAY1MAX	500000
455 #define NV_RXSTOP_DELAY2	100
456 #define NV_SETUP5_DELAY		5
457 #define NV_SETUP5_DELAYMAX	50000
458 #define NV_POWERUP_DELAY	5
459 #define NV_POWERUP_DELAYMAX	5000
460 #define NV_MIIBUSY_DELAY	50
461 #define NV_MIIPHY_DELAY	10
462 #define NV_MIIPHY_DELAYMAX	10000
463 #define NV_MAC_RESET_DELAY	64
464 
465 #define NV_WAKEUPPATTERNS	5
466 #define NV_WAKEUPMASKENTRIES	4
467 
468 /* General driver defaults */
469 #define NV_WATCHDOG_TIMEO	(5*HZ)
470 
471 #define RX_RING_DEFAULT		512
472 #define TX_RING_DEFAULT		256
473 #define RX_RING_MIN		128
474 #define TX_RING_MIN		64
475 #define RING_MAX_DESC_VER_1	1024
476 #define RING_MAX_DESC_VER_2_3	16384
477 
478 /* rx/tx mac addr + type + vlan + align + slack*/
479 #define NV_RX_HEADERS		(64)
480 /* even more slack. */
481 #define NV_RX_ALLOC_PAD		(64)
482 
483 /* maximum mtu size */
484 #define NV_PKTLIMIT_1	ETH_DATA_LEN	/* hard limit not known */
485 #define NV_PKTLIMIT_2	9100	/* Actual limit according to NVidia: 9202 */
486 
487 #define OOM_REFILL	(1+HZ/20)
488 #define POLL_WAIT	(1+HZ/100)
489 #define LINK_TIMEOUT	(3*HZ)
490 #define STATS_INTERVAL	(10*HZ)
491 
492 /*
493  * desc_ver values:
494  * The nic supports three different descriptor types:
495  * - DESC_VER_1: Original
496  * - DESC_VER_2: support for jumbo frames.
497  * - DESC_VER_3: 64-bit format.
498  */
499 #define DESC_VER_1	1
500 #define DESC_VER_2	2
501 #define DESC_VER_3	3
502 
503 /* PHY defines */
504 #define PHY_OUI_MARVELL		0x5043
505 #define PHY_OUI_CICADA		0x03f1
506 #define PHY_OUI_VITESSE		0x01c1
507 #define PHY_OUI_REALTEK		0x0732
508 #define PHY_OUI_REALTEK2	0x0020
509 #define PHYID1_OUI_MASK	0x03ff
510 #define PHYID1_OUI_SHFT	6
511 #define PHYID2_OUI_MASK	0xfc00
512 #define PHYID2_OUI_SHFT	10
513 #define PHYID2_MODEL_MASK		0x03f0
514 #define PHY_MODEL_REALTEK_8211		0x0110
515 #define PHY_REV_MASK			0x0001
516 #define PHY_REV_REALTEK_8211B		0x0000
517 #define PHY_REV_REALTEK_8211C		0x0001
518 #define PHY_MODEL_REALTEK_8201		0x0200
519 #define PHY_MODEL_MARVELL_E3016		0x0220
520 #define PHY_MARVELL_E3016_INITMASK	0x0300
521 #define PHY_CICADA_INIT1	0x0f000
522 #define PHY_CICADA_INIT2	0x0e00
523 #define PHY_CICADA_INIT3	0x01000
524 #define PHY_CICADA_INIT4	0x0200
525 #define PHY_CICADA_INIT5	0x0004
526 #define PHY_CICADA_INIT6	0x02000
527 #define PHY_VITESSE_INIT_REG1	0x1f
528 #define PHY_VITESSE_INIT_REG2	0x10
529 #define PHY_VITESSE_INIT_REG3	0x11
530 #define PHY_VITESSE_INIT_REG4	0x12
531 #define PHY_VITESSE_INIT_MSK1	0xc
532 #define PHY_VITESSE_INIT_MSK2	0x0180
533 #define PHY_VITESSE_INIT1	0x52b5
534 #define PHY_VITESSE_INIT2	0xaf8a
535 #define PHY_VITESSE_INIT3	0x8
536 #define PHY_VITESSE_INIT4	0x8f8a
537 #define PHY_VITESSE_INIT5	0xaf86
538 #define PHY_VITESSE_INIT6	0x8f86
539 #define PHY_VITESSE_INIT7	0xaf82
540 #define PHY_VITESSE_INIT8	0x0100
541 #define PHY_VITESSE_INIT9	0x8f82
542 #define PHY_VITESSE_INIT10	0x0
543 #define PHY_REALTEK_INIT_REG1	0x1f
544 #define PHY_REALTEK_INIT_REG2	0x19
545 #define PHY_REALTEK_INIT_REG3	0x13
546 #define PHY_REALTEK_INIT_REG4	0x14
547 #define PHY_REALTEK_INIT_REG5	0x18
548 #define PHY_REALTEK_INIT_REG6	0x11
549 #define PHY_REALTEK_INIT_REG7	0x01
550 #define PHY_REALTEK_INIT1	0x0000
551 #define PHY_REALTEK_INIT2	0x8e00
552 #define PHY_REALTEK_INIT3	0x0001
553 #define PHY_REALTEK_INIT4	0xad17
554 #define PHY_REALTEK_INIT5	0xfb54
555 #define PHY_REALTEK_INIT6	0xf5c7
556 #define PHY_REALTEK_INIT7	0x1000
557 #define PHY_REALTEK_INIT8	0x0003
558 #define PHY_REALTEK_INIT9	0x0008
559 #define PHY_REALTEK_INIT10	0x0005
560 #define PHY_REALTEK_INIT11	0x0200
561 #define PHY_REALTEK_INIT_MSK1	0x0003
562 
563 #define PHY_GIGABIT	0x0100
564 
565 #define PHY_TIMEOUT	0x1
566 #define PHY_ERROR	0x2
567 
568 #define PHY_100	0x1
569 #define PHY_1000	0x2
570 #define PHY_HALF	0x100
571 
572 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
573 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
574 #define NV_PAUSEFRAME_RX_ENABLE  0x0004
575 #define NV_PAUSEFRAME_TX_ENABLE  0x0008
576 #define NV_PAUSEFRAME_RX_REQ     0x0010
577 #define NV_PAUSEFRAME_TX_REQ     0x0020
578 #define NV_PAUSEFRAME_AUTONEG    0x0040
579 
580 /* MSI/MSI-X defines */
581 #define NV_MSI_X_MAX_VECTORS  8
582 #define NV_MSI_X_VECTORS_MASK 0x000f
583 #define NV_MSI_CAPABLE        0x0010
584 #define NV_MSI_X_CAPABLE      0x0020
585 #define NV_MSI_ENABLED        0x0040
586 #define NV_MSI_X_ENABLED      0x0080
587 
588 #define NV_MSI_X_VECTOR_ALL   0x0
589 #define NV_MSI_X_VECTOR_RX    0x0
590 #define NV_MSI_X_VECTOR_TX    0x1
591 #define NV_MSI_X_VECTOR_OTHER 0x2
592 
593 #define NV_MSI_PRIV_OFFSET 0x68
594 #define NV_MSI_PRIV_VALUE  0xffffffff
595 
596 #define NV_RESTART_TX         0x1
597 #define NV_RESTART_RX         0x2
598 
599 #define NV_TX_LIMIT_COUNT     16
600 
601 #define NV_DYNAMIC_THRESHOLD        4
602 #define NV_DYNAMIC_MAX_QUIET_COUNT  2048
603 
604 /* statistics */
605 struct nv_ethtool_str {
606 	char name[ETH_GSTRING_LEN];
607 };
608 
609 static const struct nv_ethtool_str nv_estats_str[] = {
610 	{ "tx_bytes" }, /* includes Ethernet FCS CRC */
611 	{ "tx_zero_rexmt" },
612 	{ "tx_one_rexmt" },
613 	{ "tx_many_rexmt" },
614 	{ "tx_late_collision" },
615 	{ "tx_fifo_errors" },
616 	{ "tx_carrier_errors" },
617 	{ "tx_excess_deferral" },
618 	{ "tx_retry_error" },
619 	{ "rx_frame_error" },
620 	{ "rx_extra_byte" },
621 	{ "rx_late_collision" },
622 	{ "rx_runt" },
623 	{ "rx_frame_too_long" },
624 	{ "rx_over_errors" },
625 	{ "rx_crc_errors" },
626 	{ "rx_frame_align_error" },
627 	{ "rx_length_error" },
628 	{ "rx_unicast" },
629 	{ "rx_multicast" },
630 	{ "rx_broadcast" },
631 	{ "rx_packets" },
632 	{ "rx_errors_total" },
633 	{ "tx_errors_total" },
634 
635 	/* version 2 stats */
636 	{ "tx_deferral" },
637 	{ "tx_packets" },
638 	{ "rx_bytes" }, /* includes Ethernet FCS CRC */
639 	{ "tx_pause" },
640 	{ "rx_pause" },
641 	{ "rx_drop_frame" },
642 
643 	/* version 3 stats */
644 	{ "tx_unicast" },
645 	{ "tx_multicast" },
646 	{ "tx_broadcast" }
647 };
648 
649 struct nv_ethtool_stats {
650 	u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */
651 	u64 tx_zero_rexmt;
652 	u64 tx_one_rexmt;
653 	u64 tx_many_rexmt;
654 	u64 tx_late_collision;
655 	u64 tx_fifo_errors;
656 	u64 tx_carrier_errors;
657 	u64 tx_excess_deferral;
658 	u64 tx_retry_error;
659 	u64 rx_frame_error;
660 	u64 rx_extra_byte;
661 	u64 rx_late_collision;
662 	u64 rx_runt;
663 	u64 rx_frame_too_long;
664 	u64 rx_over_errors;
665 	u64 rx_crc_errors;
666 	u64 rx_frame_align_error;
667 	u64 rx_length_error;
668 	u64 rx_unicast;
669 	u64 rx_multicast;
670 	u64 rx_broadcast;
671 	u64 rx_packets; /* should be ifconfig->rx_packets */
672 	u64 rx_errors_total;
673 	u64 tx_errors_total;
674 
675 	/* version 2 stats */
676 	u64 tx_deferral;
677 	u64 tx_packets; /* should be ifconfig->tx_packets */
678 	u64 rx_bytes;   /* should be ifconfig->rx_bytes + 4*rx_packets */
679 	u64 tx_pause;
680 	u64 rx_pause;
681 	u64 rx_drop_frame;
682 
683 	/* version 3 stats */
684 	u64 tx_unicast;
685 	u64 tx_multicast;
686 	u64 tx_broadcast;
687 };
688 
689 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
690 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
691 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
692 
693 /* diagnostics */
694 #define NV_TEST_COUNT_BASE 3
695 #define NV_TEST_COUNT_EXTENDED 4
696 
697 static const struct nv_ethtool_str nv_etests_str[] = {
698 	{ "link      (online/offline)" },
699 	{ "register  (offline)       " },
700 	{ "interrupt (offline)       " },
701 	{ "loopback  (offline)       " }
702 };
703 
704 struct register_test {
705 	__u32 reg;
706 	__u32 mask;
707 };
708 
709 static const struct register_test nv_registers_test[] = {
710 	{ NvRegUnknownSetupReg6, 0x01 },
711 	{ NvRegMisc1, 0x03c },
712 	{ NvRegOffloadConfig, 0x03ff },
713 	{ NvRegMulticastAddrA, 0xffffffff },
714 	{ NvRegTxWatermark, 0x0ff },
715 	{ NvRegWakeUpFlags, 0x07777 },
716 	{ 0, 0 }
717 };
718 
719 struct nv_skb_map {
720 	struct sk_buff *skb;
721 	dma_addr_t dma;
722 	unsigned int dma_len:31;
723 	unsigned int dma_single:1;
724 	struct ring_desc_ex *first_tx_desc;
725 	struct nv_skb_map *next_tx_ctx;
726 };
727 
728 /*
729  * SMP locking:
730  * All hardware access under netdev_priv(dev)->lock, except the performance
731  * critical parts:
732  * - rx is (pseudo-) lockless: it relies on the single-threading provided
733  *	by the arch code for interrupts.
734  * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
735  *	needs netdev_priv(dev)->lock :-(
736  * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
737  *
738  * Hardware stats updates are protected by hwstats_lock:
739  * - updated by nv_do_stats_poll (timer). This is meant to avoid
740  *   integer wraparound in the NIC stats registers, at low frequency
741  *   (0.1 Hz)
742  * - updated by nv_get_ethtool_stats + nv_get_stats64
743  *
744  * Software stats are accessed only through 64b synchronization points
745  * and are not subject to other synchronization techniques (single
746  * update thread on the TX or RX paths).
747  */
748 
749 /* in dev: base, irq */
750 struct fe_priv {
751 	spinlock_t lock;
752 
753 	struct net_device *dev;
754 	struct napi_struct napi;
755 
756 	/* hardware stats are updated in syscall and timer */
757 	spinlock_t hwstats_lock;
758 	struct nv_ethtool_stats estats;
759 
760 	int in_shutdown;
761 	u32 linkspeed;
762 	int duplex;
763 	int autoneg;
764 	int fixed_mode;
765 	int phyaddr;
766 	int wolenabled;
767 	unsigned int phy_oui;
768 	unsigned int phy_model;
769 	unsigned int phy_rev;
770 	u16 gigabit;
771 	int intr_test;
772 	int recover_error;
773 	int quiet_count;
774 
775 	/* General data: RO fields */
776 	dma_addr_t ring_addr;
777 	struct pci_dev *pci_dev;
778 	u32 orig_mac[2];
779 	u32 events;
780 	u32 irqmask;
781 	u32 desc_ver;
782 	u32 txrxctl_bits;
783 	u32 vlanctl_bits;
784 	u32 driver_data;
785 	u32 device_id;
786 	u32 register_size;
787 	u32 mac_in_use;
788 	int mgmt_version;
789 	int mgmt_sema;
790 
791 	void __iomem *base;
792 
793 	/* rx specific fields.
794 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
795 	 */
796 	union ring_type get_rx, put_rx, first_rx, last_rx;
797 	struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
798 	struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
799 	struct nv_skb_map *rx_skb;
800 
801 	union ring_type rx_ring;
802 	unsigned int rx_buf_sz;
803 	unsigned int pkt_limit;
804 	struct timer_list oom_kick;
805 	struct timer_list nic_poll;
806 	struct timer_list stats_poll;
807 	u32 nic_poll_irq;
808 	int rx_ring_size;
809 
810 	/* RX software stats */
811 	struct u64_stats_sync swstats_rx_syncp;
812 	u64 stat_rx_packets;
813 	u64 stat_rx_bytes; /* not always available in HW */
814 	u64 stat_rx_missed_errors;
815 	u64 stat_rx_dropped;
816 
817 	/* media detection workaround.
818 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
819 	 */
820 	int need_linktimer;
821 	unsigned long link_timeout;
822 	/*
823 	 * tx specific fields.
824 	 */
825 	union ring_type get_tx, put_tx, first_tx, last_tx;
826 	struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
827 	struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
828 	struct nv_skb_map *tx_skb;
829 
830 	union ring_type tx_ring;
831 	u32 tx_flags;
832 	int tx_ring_size;
833 	int tx_limit;
834 	u32 tx_pkts_in_progress;
835 	struct nv_skb_map *tx_change_owner;
836 	struct nv_skb_map *tx_end_flip;
837 	int tx_stop;
838 
839 	/* TX software stats */
840 	struct u64_stats_sync swstats_tx_syncp;
841 	u64 stat_tx_packets; /* not always available in HW */
842 	u64 stat_tx_bytes;
843 	u64 stat_tx_dropped;
844 
845 	/* msi/msi-x fields */
846 	u32 msi_flags;
847 	struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
848 
849 	/* flow control */
850 	u32 pause_flags;
851 
852 	/* power saved state */
853 	u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
854 
855 	/* for different msi-x irq type */
856 	char name_rx[IFNAMSIZ + 3];       /* -rx    */
857 	char name_tx[IFNAMSIZ + 3];       /* -tx    */
858 	char name_other[IFNAMSIZ + 6];    /* -other */
859 };
860 
861 /*
862  * Maximum number of loops until we assume that a bit in the irq mask
863  * is stuck. Overridable with module param.
864  */
865 static int max_interrupt_work = 4;
866 
867 /*
868  * Optimization can be either throuput mode or cpu mode
869  *
870  * Throughput Mode: Every tx and rx packet will generate an interrupt.
871  * CPU Mode: Interrupts are controlled by a timer.
872  */
873 enum {
874 	NV_OPTIMIZATION_MODE_THROUGHPUT,
875 	NV_OPTIMIZATION_MODE_CPU,
876 	NV_OPTIMIZATION_MODE_DYNAMIC
877 };
878 static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
879 
880 /*
881  * Poll interval for timer irq
882  *
883  * This interval determines how frequent an interrupt is generated.
884  * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
885  * Min = 0, and Max = 65535
886  */
887 static int poll_interval = -1;
888 
889 /*
890  * MSI interrupts
891  */
892 enum {
893 	NV_MSI_INT_DISABLED,
894 	NV_MSI_INT_ENABLED
895 };
896 static int msi = NV_MSI_INT_ENABLED;
897 
898 /*
899  * MSIX interrupts
900  */
901 enum {
902 	NV_MSIX_INT_DISABLED,
903 	NV_MSIX_INT_ENABLED
904 };
905 static int msix = NV_MSIX_INT_ENABLED;
906 
907 /*
908  * DMA 64bit
909  */
910 enum {
911 	NV_DMA_64BIT_DISABLED,
912 	NV_DMA_64BIT_ENABLED
913 };
914 static int dma_64bit = NV_DMA_64BIT_ENABLED;
915 
916 /*
917  * Debug output control for tx_timeout
918  */
919 static bool debug_tx_timeout = false;
920 
921 /*
922  * Crossover Detection
923  * Realtek 8201 phy + some OEM boards do not work properly.
924  */
925 enum {
926 	NV_CROSSOVER_DETECTION_DISABLED,
927 	NV_CROSSOVER_DETECTION_ENABLED
928 };
929 static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
930 
931 /*
932  * Power down phy when interface is down (persists through reboot;
933  * older Linux and other OSes may not power it up again)
934  */
935 static int phy_power_down;
936 
937 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
938 {
939 	return netdev_priv(dev);
940 }
941 
942 static inline u8 __iomem *get_hwbase(struct net_device *dev)
943 {
944 	return ((struct fe_priv *)netdev_priv(dev))->base;
945 }
946 
947 static inline void pci_push(u8 __iomem *base)
948 {
949 	/* force out pending posted writes */
950 	readl(base);
951 }
952 
953 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
954 {
955 	return le32_to_cpu(prd->flaglen)
956 		& ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
957 }
958 
959 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
960 {
961 	return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
962 }
963 
964 static bool nv_optimized(struct fe_priv *np)
965 {
966 	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
967 		return false;
968 	return true;
969 }
970 
971 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
972 		     int delay, int delaymax)
973 {
974 	u8 __iomem *base = get_hwbase(dev);
975 
976 	pci_push(base);
977 	do {
978 		udelay(delay);
979 		delaymax -= delay;
980 		if (delaymax < 0)
981 			return 1;
982 	} while ((readl(base + offset) & mask) != target);
983 	return 0;
984 }
985 
986 #define NV_SETUP_RX_RING 0x01
987 #define NV_SETUP_TX_RING 0x02
988 
989 static inline u32 dma_low(dma_addr_t addr)
990 {
991 	return addr;
992 }
993 
994 static inline u32 dma_high(dma_addr_t addr)
995 {
996 	return addr>>31>>1;	/* 0 if 32bit, shift down by 32 if 64bit */
997 }
998 
999 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
1000 {
1001 	struct fe_priv *np = get_nvpriv(dev);
1002 	u8 __iomem *base = get_hwbase(dev);
1003 
1004 	if (!nv_optimized(np)) {
1005 		if (rxtx_flags & NV_SETUP_RX_RING)
1006 			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
1007 		if (rxtx_flags & NV_SETUP_TX_RING)
1008 			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1009 	} else {
1010 		if (rxtx_flags & NV_SETUP_RX_RING) {
1011 			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
1012 			writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
1013 		}
1014 		if (rxtx_flags & NV_SETUP_TX_RING) {
1015 			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1016 			writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
1017 		}
1018 	}
1019 }
1020 
1021 static void free_rings(struct net_device *dev)
1022 {
1023 	struct fe_priv *np = get_nvpriv(dev);
1024 
1025 	if (!nv_optimized(np)) {
1026 		if (np->rx_ring.orig)
1027 			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
1028 					    np->rx_ring.orig, np->ring_addr);
1029 	} else {
1030 		if (np->rx_ring.ex)
1031 			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1032 					    np->rx_ring.ex, np->ring_addr);
1033 	}
1034 	kfree(np->rx_skb);
1035 	kfree(np->tx_skb);
1036 }
1037 
1038 static int using_multi_irqs(struct net_device *dev)
1039 {
1040 	struct fe_priv *np = get_nvpriv(dev);
1041 
1042 	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1043 	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
1044 	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
1045 		return 0;
1046 	else
1047 		return 1;
1048 }
1049 
1050 static void nv_txrx_gate(struct net_device *dev, bool gate)
1051 {
1052 	struct fe_priv *np = get_nvpriv(dev);
1053 	u8 __iomem *base = get_hwbase(dev);
1054 	u32 powerstate;
1055 
1056 	if (!np->mac_in_use &&
1057 	    (np->driver_data & DEV_HAS_POWER_CNTRL)) {
1058 		powerstate = readl(base + NvRegPowerState2);
1059 		if (gate)
1060 			powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
1061 		else
1062 			powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
1063 		writel(powerstate, base + NvRegPowerState2);
1064 	}
1065 }
1066 
1067 static void nv_enable_irq(struct net_device *dev)
1068 {
1069 	struct fe_priv *np = get_nvpriv(dev);
1070 
1071 	if (!using_multi_irqs(dev)) {
1072 		if (np->msi_flags & NV_MSI_X_ENABLED)
1073 			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1074 		else
1075 			enable_irq(np->pci_dev->irq);
1076 	} else {
1077 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1078 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1079 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1080 	}
1081 }
1082 
1083 static void nv_disable_irq(struct net_device *dev)
1084 {
1085 	struct fe_priv *np = get_nvpriv(dev);
1086 
1087 	if (!using_multi_irqs(dev)) {
1088 		if (np->msi_flags & NV_MSI_X_ENABLED)
1089 			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1090 		else
1091 			disable_irq(np->pci_dev->irq);
1092 	} else {
1093 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1094 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1095 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1096 	}
1097 }
1098 
1099 /* In MSIX mode, a write to irqmask behaves as XOR */
1100 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1101 {
1102 	u8 __iomem *base = get_hwbase(dev);
1103 
1104 	writel(mask, base + NvRegIrqMask);
1105 }
1106 
1107 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1108 {
1109 	struct fe_priv *np = get_nvpriv(dev);
1110 	u8 __iomem *base = get_hwbase(dev);
1111 
1112 	if (np->msi_flags & NV_MSI_X_ENABLED) {
1113 		writel(mask, base + NvRegIrqMask);
1114 	} else {
1115 		if (np->msi_flags & NV_MSI_ENABLED)
1116 			writel(0, base + NvRegMSIIrqMask);
1117 		writel(0, base + NvRegIrqMask);
1118 	}
1119 }
1120 
1121 static void nv_napi_enable(struct net_device *dev)
1122 {
1123 	struct fe_priv *np = get_nvpriv(dev);
1124 
1125 	napi_enable(&np->napi);
1126 }
1127 
1128 static void nv_napi_disable(struct net_device *dev)
1129 {
1130 	struct fe_priv *np = get_nvpriv(dev);
1131 
1132 	napi_disable(&np->napi);
1133 }
1134 
1135 #define MII_READ	(-1)
1136 /* mii_rw: read/write a register on the PHY.
1137  *
1138  * Caller must guarantee serialization
1139  */
1140 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1141 {
1142 	u8 __iomem *base = get_hwbase(dev);
1143 	u32 reg;
1144 	int retval;
1145 
1146 	writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1147 
1148 	reg = readl(base + NvRegMIIControl);
1149 	if (reg & NVREG_MIICTL_INUSE) {
1150 		writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1151 		udelay(NV_MIIBUSY_DELAY);
1152 	}
1153 
1154 	reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1155 	if (value != MII_READ) {
1156 		writel(value, base + NvRegMIIData);
1157 		reg |= NVREG_MIICTL_WRITE;
1158 	}
1159 	writel(reg, base + NvRegMIIControl);
1160 
1161 	if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1162 			NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
1163 		retval = -1;
1164 	} else if (value != MII_READ) {
1165 		/* it was a write operation - fewer failures are detectable */
1166 		retval = 0;
1167 	} else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1168 		retval = -1;
1169 	} else {
1170 		retval = readl(base + NvRegMIIData);
1171 	}
1172 
1173 	return retval;
1174 }
1175 
1176 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1177 {
1178 	struct fe_priv *np = netdev_priv(dev);
1179 	u32 miicontrol;
1180 	unsigned int tries = 0;
1181 
1182 	miicontrol = BMCR_RESET | bmcr_setup;
1183 	if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1184 		return -1;
1185 
1186 	/* wait for 500ms */
1187 	msleep(500);
1188 
1189 	/* must wait till reset is deasserted */
1190 	while (miicontrol & BMCR_RESET) {
1191 		usleep_range(10000, 20000);
1192 		miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1193 		/* FIXME: 100 tries seem excessive */
1194 		if (tries++ > 100)
1195 			return -1;
1196 	}
1197 	return 0;
1198 }
1199 
1200 static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
1201 {
1202 	static const struct {
1203 		int reg;
1204 		int init;
1205 	} ri[] = {
1206 		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1207 		{ PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
1208 		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
1209 		{ PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
1210 		{ PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
1211 		{ PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
1212 		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1213 	};
1214 	int i;
1215 
1216 	for (i = 0; i < ARRAY_SIZE(ri); i++) {
1217 		if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
1218 			return PHY_ERROR;
1219 	}
1220 
1221 	return 0;
1222 }
1223 
1224 static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
1225 {
1226 	u32 reg;
1227 	u8 __iomem *base = get_hwbase(dev);
1228 	u32 powerstate = readl(base + NvRegPowerState2);
1229 
1230 	/* need to perform hw phy reset */
1231 	powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1232 	writel(powerstate, base + NvRegPowerState2);
1233 	msleep(25);
1234 
1235 	powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1236 	writel(powerstate, base + NvRegPowerState2);
1237 	msleep(25);
1238 
1239 	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1240 	reg |= PHY_REALTEK_INIT9;
1241 	if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
1242 		return PHY_ERROR;
1243 	if (mii_rw(dev, np->phyaddr,
1244 		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
1245 		return PHY_ERROR;
1246 	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1247 	if (!(reg & PHY_REALTEK_INIT11)) {
1248 		reg |= PHY_REALTEK_INIT11;
1249 		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
1250 			return PHY_ERROR;
1251 	}
1252 	if (mii_rw(dev, np->phyaddr,
1253 		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1254 		return PHY_ERROR;
1255 
1256 	return 0;
1257 }
1258 
1259 static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
1260 {
1261 	u32 phy_reserved;
1262 
1263 	if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1264 		phy_reserved = mii_rw(dev, np->phyaddr,
1265 				      PHY_REALTEK_INIT_REG6, MII_READ);
1266 		phy_reserved |= PHY_REALTEK_INIT7;
1267 		if (mii_rw(dev, np->phyaddr,
1268 			   PHY_REALTEK_INIT_REG6, phy_reserved))
1269 			return PHY_ERROR;
1270 	}
1271 
1272 	return 0;
1273 }
1274 
1275 static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
1276 {
1277 	u32 phy_reserved;
1278 
1279 	if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1280 		if (mii_rw(dev, np->phyaddr,
1281 			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
1282 			return PHY_ERROR;
1283 		phy_reserved = mii_rw(dev, np->phyaddr,
1284 				      PHY_REALTEK_INIT_REG2, MII_READ);
1285 		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1286 		phy_reserved |= PHY_REALTEK_INIT3;
1287 		if (mii_rw(dev, np->phyaddr,
1288 			   PHY_REALTEK_INIT_REG2, phy_reserved))
1289 			return PHY_ERROR;
1290 		if (mii_rw(dev, np->phyaddr,
1291 			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1292 			return PHY_ERROR;
1293 	}
1294 
1295 	return 0;
1296 }
1297 
1298 static int init_cicada(struct net_device *dev, struct fe_priv *np,
1299 		       u32 phyinterface)
1300 {
1301 	u32 phy_reserved;
1302 
1303 	if (phyinterface & PHY_RGMII) {
1304 		phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1305 		phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1306 		phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1307 		if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
1308 			return PHY_ERROR;
1309 		phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1310 		phy_reserved |= PHY_CICADA_INIT5;
1311 		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
1312 			return PHY_ERROR;
1313 	}
1314 	phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1315 	phy_reserved |= PHY_CICADA_INIT6;
1316 	if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
1317 		return PHY_ERROR;
1318 
1319 	return 0;
1320 }
1321 
1322 static int init_vitesse(struct net_device *dev, struct fe_priv *np)
1323 {
1324 	u32 phy_reserved;
1325 
1326 	if (mii_rw(dev, np->phyaddr,
1327 		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
1328 		return PHY_ERROR;
1329 	if (mii_rw(dev, np->phyaddr,
1330 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
1331 		return PHY_ERROR;
1332 	phy_reserved = mii_rw(dev, np->phyaddr,
1333 			      PHY_VITESSE_INIT_REG4, MII_READ);
1334 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1335 		return PHY_ERROR;
1336 	phy_reserved = mii_rw(dev, np->phyaddr,
1337 			      PHY_VITESSE_INIT_REG3, MII_READ);
1338 	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1339 	phy_reserved |= PHY_VITESSE_INIT3;
1340 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1341 		return PHY_ERROR;
1342 	if (mii_rw(dev, np->phyaddr,
1343 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
1344 		return PHY_ERROR;
1345 	if (mii_rw(dev, np->phyaddr,
1346 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
1347 		return PHY_ERROR;
1348 	phy_reserved = mii_rw(dev, np->phyaddr,
1349 			      PHY_VITESSE_INIT_REG4, MII_READ);
1350 	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1351 	phy_reserved |= PHY_VITESSE_INIT3;
1352 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1353 		return PHY_ERROR;
1354 	phy_reserved = mii_rw(dev, np->phyaddr,
1355 			      PHY_VITESSE_INIT_REG3, MII_READ);
1356 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1357 		return PHY_ERROR;
1358 	if (mii_rw(dev, np->phyaddr,
1359 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
1360 		return PHY_ERROR;
1361 	if (mii_rw(dev, np->phyaddr,
1362 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
1363 		return PHY_ERROR;
1364 	phy_reserved = mii_rw(dev, np->phyaddr,
1365 			      PHY_VITESSE_INIT_REG4, MII_READ);
1366 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1367 		return PHY_ERROR;
1368 	phy_reserved = mii_rw(dev, np->phyaddr,
1369 			      PHY_VITESSE_INIT_REG3, MII_READ);
1370 	phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1371 	phy_reserved |= PHY_VITESSE_INIT8;
1372 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1373 		return PHY_ERROR;
1374 	if (mii_rw(dev, np->phyaddr,
1375 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
1376 		return PHY_ERROR;
1377 	if (mii_rw(dev, np->phyaddr,
1378 		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
1379 		return PHY_ERROR;
1380 
1381 	return 0;
1382 }
1383 
1384 static int phy_init(struct net_device *dev)
1385 {
1386 	struct fe_priv *np = get_nvpriv(dev);
1387 	u8 __iomem *base = get_hwbase(dev);
1388 	u32 phyinterface;
1389 	u32 mii_status, mii_control, mii_control_1000, reg;
1390 
1391 	/* phy errata for E3016 phy */
1392 	if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1393 		reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1394 		reg &= ~PHY_MARVELL_E3016_INITMASK;
1395 		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1396 			netdev_info(dev, "%s: phy write to errata reg failed\n",
1397 				    pci_name(np->pci_dev));
1398 			return PHY_ERROR;
1399 		}
1400 	}
1401 	if (np->phy_oui == PHY_OUI_REALTEK) {
1402 		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1403 		    np->phy_rev == PHY_REV_REALTEK_8211B) {
1404 			if (init_realtek_8211b(dev, np)) {
1405 				netdev_info(dev, "%s: phy init failed\n",
1406 					    pci_name(np->pci_dev));
1407 				return PHY_ERROR;
1408 			}
1409 		} else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1410 			   np->phy_rev == PHY_REV_REALTEK_8211C) {
1411 			if (init_realtek_8211c(dev, np)) {
1412 				netdev_info(dev, "%s: phy init failed\n",
1413 					    pci_name(np->pci_dev));
1414 				return PHY_ERROR;
1415 			}
1416 		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1417 			if (init_realtek_8201(dev, np)) {
1418 				netdev_info(dev, "%s: phy init failed\n",
1419 					    pci_name(np->pci_dev));
1420 				return PHY_ERROR;
1421 			}
1422 		}
1423 	}
1424 
1425 	/* set advertise register */
1426 	reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1427 	reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
1428 		ADVERTISE_100HALF | ADVERTISE_100FULL |
1429 		ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
1430 	if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1431 		netdev_info(dev, "%s: phy write to advertise failed\n",
1432 			    pci_name(np->pci_dev));
1433 		return PHY_ERROR;
1434 	}
1435 
1436 	/* get phy interface type */
1437 	phyinterface = readl(base + NvRegPhyInterface);
1438 
1439 	/* see if gigabit phy */
1440 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1441 	if (mii_status & PHY_GIGABIT) {
1442 		np->gigabit = PHY_GIGABIT;
1443 		mii_control_1000 = mii_rw(dev, np->phyaddr,
1444 					  MII_CTRL1000, MII_READ);
1445 		mii_control_1000 &= ~ADVERTISE_1000HALF;
1446 		if (phyinterface & PHY_RGMII)
1447 			mii_control_1000 |= ADVERTISE_1000FULL;
1448 		else
1449 			mii_control_1000 &= ~ADVERTISE_1000FULL;
1450 
1451 		if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1452 			netdev_info(dev, "%s: phy init failed\n",
1453 				    pci_name(np->pci_dev));
1454 			return PHY_ERROR;
1455 		}
1456 	} else
1457 		np->gigabit = 0;
1458 
1459 	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1460 	mii_control |= BMCR_ANENABLE;
1461 
1462 	if (np->phy_oui == PHY_OUI_REALTEK &&
1463 	    np->phy_model == PHY_MODEL_REALTEK_8211 &&
1464 	    np->phy_rev == PHY_REV_REALTEK_8211C) {
1465 		/* start autoneg since we already performed hw reset above */
1466 		mii_control |= BMCR_ANRESTART;
1467 		if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1468 			netdev_info(dev, "%s: phy init failed\n",
1469 				    pci_name(np->pci_dev));
1470 			return PHY_ERROR;
1471 		}
1472 	} else {
1473 		/* reset the phy
1474 		 * (certain phys need bmcr to be setup with reset)
1475 		 */
1476 		if (phy_reset(dev, mii_control)) {
1477 			netdev_info(dev, "%s: phy reset failed\n",
1478 				    pci_name(np->pci_dev));
1479 			return PHY_ERROR;
1480 		}
1481 	}
1482 
1483 	/* phy vendor specific configuration */
1484 	if (np->phy_oui == PHY_OUI_CICADA) {
1485 		if (init_cicada(dev, np, phyinterface)) {
1486 			netdev_info(dev, "%s: phy init failed\n",
1487 				    pci_name(np->pci_dev));
1488 			return PHY_ERROR;
1489 		}
1490 	} else if (np->phy_oui == PHY_OUI_VITESSE) {
1491 		if (init_vitesse(dev, np)) {
1492 			netdev_info(dev, "%s: phy init failed\n",
1493 				    pci_name(np->pci_dev));
1494 			return PHY_ERROR;
1495 		}
1496 	} else if (np->phy_oui == PHY_OUI_REALTEK) {
1497 		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1498 		    np->phy_rev == PHY_REV_REALTEK_8211B) {
1499 			/* reset could have cleared these out, set them back */
1500 			if (init_realtek_8211b(dev, np)) {
1501 				netdev_info(dev, "%s: phy init failed\n",
1502 					    pci_name(np->pci_dev));
1503 				return PHY_ERROR;
1504 			}
1505 		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1506 			if (init_realtek_8201(dev, np) ||
1507 			    init_realtek_8201_cross(dev, np)) {
1508 				netdev_info(dev, "%s: phy init failed\n",
1509 					    pci_name(np->pci_dev));
1510 				return PHY_ERROR;
1511 			}
1512 		}
1513 	}
1514 
1515 	/* some phys clear out pause advertisement on reset, set it back */
1516 	mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1517 
1518 	/* restart auto negotiation, power down phy */
1519 	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1520 	mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1521 	if (phy_power_down)
1522 		mii_control |= BMCR_PDOWN;
1523 	if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1524 		return PHY_ERROR;
1525 
1526 	return 0;
1527 }
1528 
1529 static void nv_start_rx(struct net_device *dev)
1530 {
1531 	struct fe_priv *np = netdev_priv(dev);
1532 	u8 __iomem *base = get_hwbase(dev);
1533 	u32 rx_ctrl = readl(base + NvRegReceiverControl);
1534 
1535 	/* Already running? Stop it. */
1536 	if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1537 		rx_ctrl &= ~NVREG_RCVCTL_START;
1538 		writel(rx_ctrl, base + NvRegReceiverControl);
1539 		pci_push(base);
1540 	}
1541 	writel(np->linkspeed, base + NvRegLinkSpeed);
1542 	pci_push(base);
1543 	rx_ctrl |= NVREG_RCVCTL_START;
1544 	if (np->mac_in_use)
1545 		rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1546 	writel(rx_ctrl, base + NvRegReceiverControl);
1547 	pci_push(base);
1548 }
1549 
1550 static void nv_stop_rx(struct net_device *dev)
1551 {
1552 	struct fe_priv *np = netdev_priv(dev);
1553 	u8 __iomem *base = get_hwbase(dev);
1554 	u32 rx_ctrl = readl(base + NvRegReceiverControl);
1555 
1556 	if (!np->mac_in_use)
1557 		rx_ctrl &= ~NVREG_RCVCTL_START;
1558 	else
1559 		rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1560 	writel(rx_ctrl, base + NvRegReceiverControl);
1561 	if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1562 		      NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
1563 		netdev_info(dev, "%s: ReceiverStatus remained busy\n",
1564 			    __func__);
1565 
1566 	udelay(NV_RXSTOP_DELAY2);
1567 	if (!np->mac_in_use)
1568 		writel(0, base + NvRegLinkSpeed);
1569 }
1570 
1571 static void nv_start_tx(struct net_device *dev)
1572 {
1573 	struct fe_priv *np = netdev_priv(dev);
1574 	u8 __iomem *base = get_hwbase(dev);
1575 	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1576 
1577 	tx_ctrl |= NVREG_XMITCTL_START;
1578 	if (np->mac_in_use)
1579 		tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1580 	writel(tx_ctrl, base + NvRegTransmitterControl);
1581 	pci_push(base);
1582 }
1583 
1584 static void nv_stop_tx(struct net_device *dev)
1585 {
1586 	struct fe_priv *np = netdev_priv(dev);
1587 	u8 __iomem *base = get_hwbase(dev);
1588 	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1589 
1590 	if (!np->mac_in_use)
1591 		tx_ctrl &= ~NVREG_XMITCTL_START;
1592 	else
1593 		tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1594 	writel(tx_ctrl, base + NvRegTransmitterControl);
1595 	if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1596 		      NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
1597 		netdev_info(dev, "%s: TransmitterStatus remained busy\n",
1598 			    __func__);
1599 
1600 	udelay(NV_TXSTOP_DELAY2);
1601 	if (!np->mac_in_use)
1602 		writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1603 		       base + NvRegTransmitPoll);
1604 }
1605 
1606 static void nv_start_rxtx(struct net_device *dev)
1607 {
1608 	nv_start_rx(dev);
1609 	nv_start_tx(dev);
1610 }
1611 
1612 static void nv_stop_rxtx(struct net_device *dev)
1613 {
1614 	nv_stop_rx(dev);
1615 	nv_stop_tx(dev);
1616 }
1617 
1618 static void nv_txrx_reset(struct net_device *dev)
1619 {
1620 	struct fe_priv *np = netdev_priv(dev);
1621 	u8 __iomem *base = get_hwbase(dev);
1622 
1623 	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1624 	pci_push(base);
1625 	udelay(NV_TXRX_RESET_DELAY);
1626 	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1627 	pci_push(base);
1628 }
1629 
1630 static void nv_mac_reset(struct net_device *dev)
1631 {
1632 	struct fe_priv *np = netdev_priv(dev);
1633 	u8 __iomem *base = get_hwbase(dev);
1634 	u32 temp1, temp2, temp3;
1635 
1636 	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1637 	pci_push(base);
1638 
1639 	/* save registers since they will be cleared on reset */
1640 	temp1 = readl(base + NvRegMacAddrA);
1641 	temp2 = readl(base + NvRegMacAddrB);
1642 	temp3 = readl(base + NvRegTransmitPoll);
1643 
1644 	writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1645 	pci_push(base);
1646 	udelay(NV_MAC_RESET_DELAY);
1647 	writel(0, base + NvRegMacReset);
1648 	pci_push(base);
1649 	udelay(NV_MAC_RESET_DELAY);
1650 
1651 	/* restore saved registers */
1652 	writel(temp1, base + NvRegMacAddrA);
1653 	writel(temp2, base + NvRegMacAddrB);
1654 	writel(temp3, base + NvRegTransmitPoll);
1655 
1656 	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1657 	pci_push(base);
1658 }
1659 
1660 /* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
1661 static void nv_update_stats(struct net_device *dev)
1662 {
1663 	struct fe_priv *np = netdev_priv(dev);
1664 	u8 __iomem *base = get_hwbase(dev);
1665 
1666 	/* If it happens that this is run in top-half context, then
1667 	 * replace the spin_lock of hwstats_lock with
1668 	 * spin_lock_irqsave() in calling functions. */
1669 	WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half");
1670 	assert_spin_locked(&np->hwstats_lock);
1671 
1672 	/* query hardware */
1673 	np->estats.tx_bytes += readl(base + NvRegTxCnt);
1674 	np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1675 	np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1676 	np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1677 	np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1678 	np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1679 	np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1680 	np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1681 	np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1682 	np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1683 	np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1684 	np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1685 	np->estats.rx_runt += readl(base + NvRegRxRunt);
1686 	np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1687 	np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1688 	np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1689 	np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1690 	np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1691 	np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1692 	np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1693 	np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1694 	np->estats.rx_packets =
1695 		np->estats.rx_unicast +
1696 		np->estats.rx_multicast +
1697 		np->estats.rx_broadcast;
1698 	np->estats.rx_errors_total =
1699 		np->estats.rx_crc_errors +
1700 		np->estats.rx_over_errors +
1701 		np->estats.rx_frame_error +
1702 		(np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1703 		np->estats.rx_late_collision +
1704 		np->estats.rx_runt +
1705 		np->estats.rx_frame_too_long;
1706 	np->estats.tx_errors_total =
1707 		np->estats.tx_late_collision +
1708 		np->estats.tx_fifo_errors +
1709 		np->estats.tx_carrier_errors +
1710 		np->estats.tx_excess_deferral +
1711 		np->estats.tx_retry_error;
1712 
1713 	if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1714 		np->estats.tx_deferral += readl(base + NvRegTxDef);
1715 		np->estats.tx_packets += readl(base + NvRegTxFrame);
1716 		np->estats.rx_bytes += readl(base + NvRegRxCnt);
1717 		np->estats.tx_pause += readl(base + NvRegTxPause);
1718 		np->estats.rx_pause += readl(base + NvRegRxPause);
1719 		np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1720 		np->estats.rx_errors_total += np->estats.rx_drop_frame;
1721 	}
1722 
1723 	if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1724 		np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1725 		np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1726 		np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1727 	}
1728 }
1729 
1730 /*
1731  * nv_get_stats64: dev->ndo_get_stats64 function
1732  * Get latest stats value from the nic.
1733  * Called with read_lock(&dev_base_lock) held for read -
1734  * only synchronized against unregister_netdevice.
1735  */
1736 static void
1737 nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
1738 	__acquires(&netdev_priv(dev)->hwstats_lock)
1739 	__releases(&netdev_priv(dev)->hwstats_lock)
1740 {
1741 	struct fe_priv *np = netdev_priv(dev);
1742 	unsigned int syncp_start;
1743 
1744 	/*
1745 	 * Note: because HW stats are not always available and for
1746 	 * consistency reasons, the following ifconfig stats are
1747 	 * managed by software: rx_bytes, tx_bytes, rx_packets and
1748 	 * tx_packets. The related hardware stats reported by ethtool
1749 	 * should be equivalent to these ifconfig stats, with 4
1750 	 * additional bytes per packet (Ethernet FCS CRC), except for
1751 	 * tx_packets when TSO kicks in.
1752 	 */
1753 
1754 	/* software stats */
1755 	do {
1756 		syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
1757 		storage->rx_packets       = np->stat_rx_packets;
1758 		storage->rx_bytes         = np->stat_rx_bytes;
1759 		storage->rx_dropped       = np->stat_rx_dropped;
1760 		storage->rx_missed_errors = np->stat_rx_missed_errors;
1761 	} while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
1762 
1763 	do {
1764 		syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
1765 		storage->tx_packets = np->stat_tx_packets;
1766 		storage->tx_bytes   = np->stat_tx_bytes;
1767 		storage->tx_dropped = np->stat_tx_dropped;
1768 	} while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
1769 
1770 	/* If the nic supports hw counters then retrieve latest values */
1771 	if (np->driver_data & DEV_HAS_STATISTICS_V123) {
1772 		spin_lock_bh(&np->hwstats_lock);
1773 
1774 		nv_update_stats(dev);
1775 
1776 		/* generic stats */
1777 		storage->rx_errors = np->estats.rx_errors_total;
1778 		storage->tx_errors = np->estats.tx_errors_total;
1779 
1780 		/* meaningful only when NIC supports stats v3 */
1781 		storage->multicast = np->estats.rx_multicast;
1782 
1783 		/* detailed rx_errors */
1784 		storage->rx_length_errors = np->estats.rx_length_error;
1785 		storage->rx_over_errors   = np->estats.rx_over_errors;
1786 		storage->rx_crc_errors    = np->estats.rx_crc_errors;
1787 		storage->rx_frame_errors  = np->estats.rx_frame_align_error;
1788 		storage->rx_fifo_errors   = np->estats.rx_drop_frame;
1789 
1790 		/* detailed tx_errors */
1791 		storage->tx_carrier_errors = np->estats.tx_carrier_errors;
1792 		storage->tx_fifo_errors    = np->estats.tx_fifo_errors;
1793 
1794 		spin_unlock_bh(&np->hwstats_lock);
1795 	}
1796 }
1797 
1798 /*
1799  * nv_alloc_rx: fill rx ring entries.
1800  * Return 1 if the allocations for the skbs failed and the
1801  * rx engine is without Available descriptors
1802  */
1803 static int nv_alloc_rx(struct net_device *dev)
1804 {
1805 	struct fe_priv *np = netdev_priv(dev);
1806 	struct ring_desc *less_rx;
1807 
1808 	less_rx = np->get_rx.orig;
1809 	if (less_rx-- == np->first_rx.orig)
1810 		less_rx = np->last_rx.orig;
1811 
1812 	while (np->put_rx.orig != less_rx) {
1813 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1814 		if (skb) {
1815 			np->put_rx_ctx->skb = skb;
1816 			np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
1817 							     skb->data,
1818 							     skb_tailroom(skb),
1819 							     DMA_FROM_DEVICE);
1820 			if (unlikely(dma_mapping_error(&np->pci_dev->dev,
1821 						       np->put_rx_ctx->dma))) {
1822 				kfree_skb(skb);
1823 				goto packet_dropped;
1824 			}
1825 			np->put_rx_ctx->dma_len = skb_tailroom(skb);
1826 			np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1827 			wmb();
1828 			np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1829 			if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1830 				np->put_rx.orig = np->first_rx.orig;
1831 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1832 				np->put_rx_ctx = np->first_rx_ctx;
1833 		} else {
1834 packet_dropped:
1835 			u64_stats_update_begin(&np->swstats_rx_syncp);
1836 			np->stat_rx_dropped++;
1837 			u64_stats_update_end(&np->swstats_rx_syncp);
1838 			return 1;
1839 		}
1840 	}
1841 	return 0;
1842 }
1843 
1844 static int nv_alloc_rx_optimized(struct net_device *dev)
1845 {
1846 	struct fe_priv *np = netdev_priv(dev);
1847 	struct ring_desc_ex *less_rx;
1848 
1849 	less_rx = np->get_rx.ex;
1850 	if (less_rx-- == np->first_rx.ex)
1851 		less_rx = np->last_rx.ex;
1852 
1853 	while (np->put_rx.ex != less_rx) {
1854 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1855 		if (skb) {
1856 			np->put_rx_ctx->skb = skb;
1857 			np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
1858 							     skb->data,
1859 							     skb_tailroom(skb),
1860 							     DMA_FROM_DEVICE);
1861 			if (unlikely(dma_mapping_error(&np->pci_dev->dev,
1862 						       np->put_rx_ctx->dma))) {
1863 				kfree_skb(skb);
1864 				goto packet_dropped;
1865 			}
1866 			np->put_rx_ctx->dma_len = skb_tailroom(skb);
1867 			np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1868 			np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1869 			wmb();
1870 			np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1871 			if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1872 				np->put_rx.ex = np->first_rx.ex;
1873 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1874 				np->put_rx_ctx = np->first_rx_ctx;
1875 		} else {
1876 packet_dropped:
1877 			u64_stats_update_begin(&np->swstats_rx_syncp);
1878 			np->stat_rx_dropped++;
1879 			u64_stats_update_end(&np->swstats_rx_syncp);
1880 			return 1;
1881 		}
1882 	}
1883 	return 0;
1884 }
1885 
1886 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1887 static void nv_do_rx_refill(struct timer_list *t)
1888 {
1889 	struct fe_priv *np = from_timer(np, t, oom_kick);
1890 
1891 	/* Just reschedule NAPI rx processing */
1892 	napi_schedule(&np->napi);
1893 }
1894 
1895 static void nv_init_rx(struct net_device *dev)
1896 {
1897 	struct fe_priv *np = netdev_priv(dev);
1898 	int i;
1899 
1900 	np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1901 
1902 	if (!nv_optimized(np))
1903 		np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1904 	else
1905 		np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1906 	np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1907 	np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1908 
1909 	for (i = 0; i < np->rx_ring_size; i++) {
1910 		if (!nv_optimized(np)) {
1911 			np->rx_ring.orig[i].flaglen = 0;
1912 			np->rx_ring.orig[i].buf = 0;
1913 		} else {
1914 			np->rx_ring.ex[i].flaglen = 0;
1915 			np->rx_ring.ex[i].txvlan = 0;
1916 			np->rx_ring.ex[i].bufhigh = 0;
1917 			np->rx_ring.ex[i].buflow = 0;
1918 		}
1919 		np->rx_skb[i].skb = NULL;
1920 		np->rx_skb[i].dma = 0;
1921 	}
1922 }
1923 
1924 static void nv_init_tx(struct net_device *dev)
1925 {
1926 	struct fe_priv *np = netdev_priv(dev);
1927 	int i;
1928 
1929 	np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1930 
1931 	if (!nv_optimized(np))
1932 		np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1933 	else
1934 		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1935 	np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1936 	np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1937 	netdev_reset_queue(np->dev);
1938 	np->tx_pkts_in_progress = 0;
1939 	np->tx_change_owner = NULL;
1940 	np->tx_end_flip = NULL;
1941 	np->tx_stop = 0;
1942 
1943 	for (i = 0; i < np->tx_ring_size; i++) {
1944 		if (!nv_optimized(np)) {
1945 			np->tx_ring.orig[i].flaglen = 0;
1946 			np->tx_ring.orig[i].buf = 0;
1947 		} else {
1948 			np->tx_ring.ex[i].flaglen = 0;
1949 			np->tx_ring.ex[i].txvlan = 0;
1950 			np->tx_ring.ex[i].bufhigh = 0;
1951 			np->tx_ring.ex[i].buflow = 0;
1952 		}
1953 		np->tx_skb[i].skb = NULL;
1954 		np->tx_skb[i].dma = 0;
1955 		np->tx_skb[i].dma_len = 0;
1956 		np->tx_skb[i].dma_single = 0;
1957 		np->tx_skb[i].first_tx_desc = NULL;
1958 		np->tx_skb[i].next_tx_ctx = NULL;
1959 	}
1960 }
1961 
1962 static int nv_init_ring(struct net_device *dev)
1963 {
1964 	struct fe_priv *np = netdev_priv(dev);
1965 
1966 	nv_init_tx(dev);
1967 	nv_init_rx(dev);
1968 
1969 	if (!nv_optimized(np))
1970 		return nv_alloc_rx(dev);
1971 	else
1972 		return nv_alloc_rx_optimized(dev);
1973 }
1974 
1975 static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1976 {
1977 	if (tx_skb->dma) {
1978 		if (tx_skb->dma_single)
1979 			dma_unmap_single(&np->pci_dev->dev, tx_skb->dma,
1980 					 tx_skb->dma_len,
1981 					 DMA_TO_DEVICE);
1982 		else
1983 			pci_unmap_page(np->pci_dev, tx_skb->dma,
1984 				       tx_skb->dma_len,
1985 				       PCI_DMA_TODEVICE);
1986 		tx_skb->dma = 0;
1987 	}
1988 }
1989 
1990 static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1991 {
1992 	nv_unmap_txskb(np, tx_skb);
1993 	if (tx_skb->skb) {
1994 		dev_kfree_skb_any(tx_skb->skb);
1995 		tx_skb->skb = NULL;
1996 		return 1;
1997 	}
1998 	return 0;
1999 }
2000 
2001 static void nv_drain_tx(struct net_device *dev)
2002 {
2003 	struct fe_priv *np = netdev_priv(dev);
2004 	unsigned int i;
2005 
2006 	for (i = 0; i < np->tx_ring_size; i++) {
2007 		if (!nv_optimized(np)) {
2008 			np->tx_ring.orig[i].flaglen = 0;
2009 			np->tx_ring.orig[i].buf = 0;
2010 		} else {
2011 			np->tx_ring.ex[i].flaglen = 0;
2012 			np->tx_ring.ex[i].txvlan = 0;
2013 			np->tx_ring.ex[i].bufhigh = 0;
2014 			np->tx_ring.ex[i].buflow = 0;
2015 		}
2016 		if (nv_release_txskb(np, &np->tx_skb[i])) {
2017 			u64_stats_update_begin(&np->swstats_tx_syncp);
2018 			np->stat_tx_dropped++;
2019 			u64_stats_update_end(&np->swstats_tx_syncp);
2020 		}
2021 		np->tx_skb[i].dma = 0;
2022 		np->tx_skb[i].dma_len = 0;
2023 		np->tx_skb[i].dma_single = 0;
2024 		np->tx_skb[i].first_tx_desc = NULL;
2025 		np->tx_skb[i].next_tx_ctx = NULL;
2026 	}
2027 	np->tx_pkts_in_progress = 0;
2028 	np->tx_change_owner = NULL;
2029 	np->tx_end_flip = NULL;
2030 }
2031 
2032 static void nv_drain_rx(struct net_device *dev)
2033 {
2034 	struct fe_priv *np = netdev_priv(dev);
2035 	int i;
2036 
2037 	for (i = 0; i < np->rx_ring_size; i++) {
2038 		if (!nv_optimized(np)) {
2039 			np->rx_ring.orig[i].flaglen = 0;
2040 			np->rx_ring.orig[i].buf = 0;
2041 		} else {
2042 			np->rx_ring.ex[i].flaglen = 0;
2043 			np->rx_ring.ex[i].txvlan = 0;
2044 			np->rx_ring.ex[i].bufhigh = 0;
2045 			np->rx_ring.ex[i].buflow = 0;
2046 		}
2047 		wmb();
2048 		if (np->rx_skb[i].skb) {
2049 			dma_unmap_single(&np->pci_dev->dev, np->rx_skb[i].dma,
2050 					 (skb_end_pointer(np->rx_skb[i].skb) -
2051 					 np->rx_skb[i].skb->data),
2052 					 DMA_FROM_DEVICE);
2053 			dev_kfree_skb(np->rx_skb[i].skb);
2054 			np->rx_skb[i].skb = NULL;
2055 		}
2056 	}
2057 }
2058 
2059 static void nv_drain_rxtx(struct net_device *dev)
2060 {
2061 	nv_drain_tx(dev);
2062 	nv_drain_rx(dev);
2063 }
2064 
2065 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
2066 {
2067 	return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
2068 }
2069 
2070 static void nv_legacybackoff_reseed(struct net_device *dev)
2071 {
2072 	u8 __iomem *base = get_hwbase(dev);
2073 	u32 reg;
2074 	u32 low;
2075 	int tx_status = 0;
2076 
2077 	reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
2078 	get_random_bytes(&low, sizeof(low));
2079 	reg |= low & NVREG_SLOTTIME_MASK;
2080 
2081 	/* Need to stop tx before change takes effect.
2082 	 * Caller has already gained np->lock.
2083 	 */
2084 	tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
2085 	if (tx_status)
2086 		nv_stop_tx(dev);
2087 	nv_stop_rx(dev);
2088 	writel(reg, base + NvRegSlotTime);
2089 	if (tx_status)
2090 		nv_start_tx(dev);
2091 	nv_start_rx(dev);
2092 }
2093 
2094 /* Gear Backoff Seeds */
2095 #define BACKOFF_SEEDSET_ROWS	8
2096 #define BACKOFF_SEEDSET_LFSRS	15
2097 
2098 /* Known Good seed sets */
2099 static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2100 	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2101 	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2102 	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2103 	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2104 	{266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2105 	{266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2106 	{366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800,  84},
2107 	{466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2108 
2109 static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2110 	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2111 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2112 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2113 	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2114 	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2115 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2116 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2117 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2118 
2119 static void nv_gear_backoff_reseed(struct net_device *dev)
2120 {
2121 	u8 __iomem *base = get_hwbase(dev);
2122 	u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
2123 	u32 temp, seedset, combinedSeed;
2124 	int i;
2125 
2126 	/* Setup seed for free running LFSR */
2127 	/* We are going to read the time stamp counter 3 times
2128 	   and swizzle bits around to increase randomness */
2129 	get_random_bytes(&miniseed1, sizeof(miniseed1));
2130 	miniseed1 &= 0x0fff;
2131 	if (miniseed1 == 0)
2132 		miniseed1 = 0xabc;
2133 
2134 	get_random_bytes(&miniseed2, sizeof(miniseed2));
2135 	miniseed2 &= 0x0fff;
2136 	if (miniseed2 == 0)
2137 		miniseed2 = 0xabc;
2138 	miniseed2_reversed =
2139 		((miniseed2 & 0xF00) >> 8) |
2140 		 (miniseed2 & 0x0F0) |
2141 		 ((miniseed2 & 0x00F) << 8);
2142 
2143 	get_random_bytes(&miniseed3, sizeof(miniseed3));
2144 	miniseed3 &= 0x0fff;
2145 	if (miniseed3 == 0)
2146 		miniseed3 = 0xabc;
2147 	miniseed3_reversed =
2148 		((miniseed3 & 0xF00) >> 8) |
2149 		 (miniseed3 & 0x0F0) |
2150 		 ((miniseed3 & 0x00F) << 8);
2151 
2152 	combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
2153 		       (miniseed2 ^ miniseed3_reversed);
2154 
2155 	/* Seeds can not be zero */
2156 	if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
2157 		combinedSeed |= 0x08;
2158 	if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
2159 		combinedSeed |= 0x8000;
2160 
2161 	/* No need to disable tx here */
2162 	temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2163 	temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2164 	temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2165 	writel(temp, base + NvRegBackOffControl);
2166 
2167 	/* Setup seeds for all gear LFSRs. */
2168 	get_random_bytes(&seedset, sizeof(seedset));
2169 	seedset = seedset % BACKOFF_SEEDSET_ROWS;
2170 	for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
2171 		temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2172 		temp |= main_seedset[seedset][i-1] & 0x3ff;
2173 		temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
2174 		writel(temp, base + NvRegBackOffControl);
2175 	}
2176 }
2177 
2178 /*
2179  * nv_start_xmit: dev->hard_start_xmit function
2180  * Called with netif_tx_lock held.
2181  */
2182 static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2183 {
2184 	struct fe_priv *np = netdev_priv(dev);
2185 	u32 tx_flags = 0;
2186 	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2187 	unsigned int fragments = skb_shinfo(skb)->nr_frags;
2188 	unsigned int i;
2189 	u32 offset = 0;
2190 	u32 bcnt;
2191 	u32 size = skb_headlen(skb);
2192 	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2193 	u32 empty_slots;
2194 	struct ring_desc *put_tx;
2195 	struct ring_desc *start_tx;
2196 	struct ring_desc *prev_tx;
2197 	struct nv_skb_map *prev_tx_ctx;
2198 	struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
2199 	unsigned long flags;
2200 
2201 	/* add fragments to entries count */
2202 	for (i = 0; i < fragments; i++) {
2203 		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2204 
2205 		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2206 			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2207 	}
2208 
2209 	spin_lock_irqsave(&np->lock, flags);
2210 	empty_slots = nv_get_empty_tx_slots(np);
2211 	if (unlikely(empty_slots <= entries)) {
2212 		netif_stop_queue(dev);
2213 		np->tx_stop = 1;
2214 		spin_unlock_irqrestore(&np->lock, flags);
2215 		return NETDEV_TX_BUSY;
2216 	}
2217 	spin_unlock_irqrestore(&np->lock, flags);
2218 
2219 	start_tx = put_tx = np->put_tx.orig;
2220 
2221 	/* setup the header buffer */
2222 	do {
2223 		prev_tx = put_tx;
2224 		prev_tx_ctx = np->put_tx_ctx;
2225 		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2226 		np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
2227 						     skb->data + offset, bcnt,
2228 						     DMA_TO_DEVICE);
2229 		if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2230 					       np->put_tx_ctx->dma))) {
2231 			/* on DMA mapping error - drop the packet */
2232 			dev_kfree_skb_any(skb);
2233 			u64_stats_update_begin(&np->swstats_tx_syncp);
2234 			np->stat_tx_dropped++;
2235 			u64_stats_update_end(&np->swstats_tx_syncp);
2236 			return NETDEV_TX_OK;
2237 		}
2238 		np->put_tx_ctx->dma_len = bcnt;
2239 		np->put_tx_ctx->dma_single = 1;
2240 		put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2241 		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2242 
2243 		tx_flags = np->tx_flags;
2244 		offset += bcnt;
2245 		size -= bcnt;
2246 		if (unlikely(put_tx++ == np->last_tx.orig))
2247 			put_tx = np->first_tx.orig;
2248 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2249 			np->put_tx_ctx = np->first_tx_ctx;
2250 	} while (size);
2251 
2252 	/* setup the fragments */
2253 	for (i = 0; i < fragments; i++) {
2254 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2255 		u32 frag_size = skb_frag_size(frag);
2256 		offset = 0;
2257 
2258 		do {
2259 			prev_tx = put_tx;
2260 			prev_tx_ctx = np->put_tx_ctx;
2261 			if (!start_tx_ctx)
2262 				start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2263 
2264 			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2265 			np->put_tx_ctx->dma = skb_frag_dma_map(
2266 							&np->pci_dev->dev,
2267 							frag, offset,
2268 							bcnt,
2269 							DMA_TO_DEVICE);
2270 			if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2271 						       np->put_tx_ctx->dma))) {
2272 
2273 				/* Unwind the mapped fragments */
2274 				do {
2275 					nv_unmap_txskb(np, start_tx_ctx);
2276 					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2277 						tmp_tx_ctx = np->first_tx_ctx;
2278 				} while (tmp_tx_ctx != np->put_tx_ctx);
2279 				dev_kfree_skb_any(skb);
2280 				np->put_tx_ctx = start_tx_ctx;
2281 				u64_stats_update_begin(&np->swstats_tx_syncp);
2282 				np->stat_tx_dropped++;
2283 				u64_stats_update_end(&np->swstats_tx_syncp);
2284 				return NETDEV_TX_OK;
2285 			}
2286 
2287 			np->put_tx_ctx->dma_len = bcnt;
2288 			np->put_tx_ctx->dma_single = 0;
2289 			put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2290 			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2291 
2292 			offset += bcnt;
2293 			frag_size -= bcnt;
2294 			if (unlikely(put_tx++ == np->last_tx.orig))
2295 				put_tx = np->first_tx.orig;
2296 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2297 				np->put_tx_ctx = np->first_tx_ctx;
2298 		} while (frag_size);
2299 	}
2300 
2301 	/* set last fragment flag  */
2302 	prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
2303 
2304 	/* save skb in this slot's context area */
2305 	prev_tx_ctx->skb = skb;
2306 
2307 	if (skb_is_gso(skb))
2308 		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2309 	else
2310 		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2311 			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2312 
2313 	spin_lock_irqsave(&np->lock, flags);
2314 
2315 	/* set tx flags */
2316 	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2317 
2318 	netdev_sent_queue(np->dev, skb->len);
2319 
2320 	skb_tx_timestamp(skb);
2321 
2322 	np->put_tx.orig = put_tx;
2323 
2324 	spin_unlock_irqrestore(&np->lock, flags);
2325 
2326 	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2327 	return NETDEV_TX_OK;
2328 }
2329 
2330 static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2331 					   struct net_device *dev)
2332 {
2333 	struct fe_priv *np = netdev_priv(dev);
2334 	u32 tx_flags = 0;
2335 	u32 tx_flags_extra;
2336 	unsigned int fragments = skb_shinfo(skb)->nr_frags;
2337 	unsigned int i;
2338 	u32 offset = 0;
2339 	u32 bcnt;
2340 	u32 size = skb_headlen(skb);
2341 	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2342 	u32 empty_slots;
2343 	struct ring_desc_ex *put_tx;
2344 	struct ring_desc_ex *start_tx;
2345 	struct ring_desc_ex *prev_tx;
2346 	struct nv_skb_map *prev_tx_ctx;
2347 	struct nv_skb_map *start_tx_ctx = NULL;
2348 	struct nv_skb_map *tmp_tx_ctx = NULL;
2349 	unsigned long flags;
2350 
2351 	/* add fragments to entries count */
2352 	for (i = 0; i < fragments; i++) {
2353 		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2354 
2355 		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2356 			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2357 	}
2358 
2359 	spin_lock_irqsave(&np->lock, flags);
2360 	empty_slots = nv_get_empty_tx_slots(np);
2361 	if (unlikely(empty_slots <= entries)) {
2362 		netif_stop_queue(dev);
2363 		np->tx_stop = 1;
2364 		spin_unlock_irqrestore(&np->lock, flags);
2365 		return NETDEV_TX_BUSY;
2366 	}
2367 	spin_unlock_irqrestore(&np->lock, flags);
2368 
2369 	start_tx = put_tx = np->put_tx.ex;
2370 	start_tx_ctx = np->put_tx_ctx;
2371 
2372 	/* setup the header buffer */
2373 	do {
2374 		prev_tx = put_tx;
2375 		prev_tx_ctx = np->put_tx_ctx;
2376 		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2377 		np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
2378 						     skb->data + offset, bcnt,
2379 						     DMA_TO_DEVICE);
2380 		if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2381 					       np->put_tx_ctx->dma))) {
2382 			/* on DMA mapping error - drop the packet */
2383 			dev_kfree_skb_any(skb);
2384 			u64_stats_update_begin(&np->swstats_tx_syncp);
2385 			np->stat_tx_dropped++;
2386 			u64_stats_update_end(&np->swstats_tx_syncp);
2387 			return NETDEV_TX_OK;
2388 		}
2389 		np->put_tx_ctx->dma_len = bcnt;
2390 		np->put_tx_ctx->dma_single = 1;
2391 		put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2392 		put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2393 		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2394 
2395 		tx_flags = NV_TX2_VALID;
2396 		offset += bcnt;
2397 		size -= bcnt;
2398 		if (unlikely(put_tx++ == np->last_tx.ex))
2399 			put_tx = np->first_tx.ex;
2400 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2401 			np->put_tx_ctx = np->first_tx_ctx;
2402 	} while (size);
2403 
2404 	/* setup the fragments */
2405 	for (i = 0; i < fragments; i++) {
2406 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2407 		u32 frag_size = skb_frag_size(frag);
2408 		offset = 0;
2409 
2410 		do {
2411 			prev_tx = put_tx;
2412 			prev_tx_ctx = np->put_tx_ctx;
2413 			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2414 			if (!start_tx_ctx)
2415 				start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2416 			np->put_tx_ctx->dma = skb_frag_dma_map(
2417 							&np->pci_dev->dev,
2418 							frag, offset,
2419 							bcnt,
2420 							DMA_TO_DEVICE);
2421 
2422 			if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2423 						       np->put_tx_ctx->dma))) {
2424 
2425 				/* Unwind the mapped fragments */
2426 				do {
2427 					nv_unmap_txskb(np, start_tx_ctx);
2428 					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2429 						tmp_tx_ctx = np->first_tx_ctx;
2430 				} while (tmp_tx_ctx != np->put_tx_ctx);
2431 				dev_kfree_skb_any(skb);
2432 				np->put_tx_ctx = start_tx_ctx;
2433 				u64_stats_update_begin(&np->swstats_tx_syncp);
2434 				np->stat_tx_dropped++;
2435 				u64_stats_update_end(&np->swstats_tx_syncp);
2436 				return NETDEV_TX_OK;
2437 			}
2438 			np->put_tx_ctx->dma_len = bcnt;
2439 			np->put_tx_ctx->dma_single = 0;
2440 			put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2441 			put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2442 			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2443 
2444 			offset += bcnt;
2445 			frag_size -= bcnt;
2446 			if (unlikely(put_tx++ == np->last_tx.ex))
2447 				put_tx = np->first_tx.ex;
2448 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2449 				np->put_tx_ctx = np->first_tx_ctx;
2450 		} while (frag_size);
2451 	}
2452 
2453 	/* set last fragment flag  */
2454 	prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
2455 
2456 	/* save skb in this slot's context area */
2457 	prev_tx_ctx->skb = skb;
2458 
2459 	if (skb_is_gso(skb))
2460 		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2461 	else
2462 		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2463 			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2464 
2465 	/* vlan tag */
2466 	if (skb_vlan_tag_present(skb))
2467 		start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
2468 					skb_vlan_tag_get(skb));
2469 	else
2470 		start_tx->txvlan = 0;
2471 
2472 	spin_lock_irqsave(&np->lock, flags);
2473 
2474 	if (np->tx_limit) {
2475 		/* Limit the number of outstanding tx. Setup all fragments, but
2476 		 * do not set the VALID bit on the first descriptor. Save a pointer
2477 		 * to that descriptor and also for next skb_map element.
2478 		 */
2479 
2480 		if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2481 			if (!np->tx_change_owner)
2482 				np->tx_change_owner = start_tx_ctx;
2483 
2484 			/* remove VALID bit */
2485 			tx_flags &= ~NV_TX2_VALID;
2486 			start_tx_ctx->first_tx_desc = start_tx;
2487 			start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2488 			np->tx_end_flip = np->put_tx_ctx;
2489 		} else {
2490 			np->tx_pkts_in_progress++;
2491 		}
2492 	}
2493 
2494 	/* set tx flags */
2495 	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2496 
2497 	netdev_sent_queue(np->dev, skb->len);
2498 
2499 	skb_tx_timestamp(skb);
2500 
2501 	np->put_tx.ex = put_tx;
2502 
2503 	spin_unlock_irqrestore(&np->lock, flags);
2504 
2505 	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2506 	return NETDEV_TX_OK;
2507 }
2508 
2509 static inline void nv_tx_flip_ownership(struct net_device *dev)
2510 {
2511 	struct fe_priv *np = netdev_priv(dev);
2512 
2513 	np->tx_pkts_in_progress--;
2514 	if (np->tx_change_owner) {
2515 		np->tx_change_owner->first_tx_desc->flaglen |=
2516 			cpu_to_le32(NV_TX2_VALID);
2517 		np->tx_pkts_in_progress++;
2518 
2519 		np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2520 		if (np->tx_change_owner == np->tx_end_flip)
2521 			np->tx_change_owner = NULL;
2522 
2523 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2524 	}
2525 }
2526 
2527 /*
2528  * nv_tx_done: check for completed packets, release the skbs.
2529  *
2530  * Caller must own np->lock.
2531  */
2532 static int nv_tx_done(struct net_device *dev, int limit)
2533 {
2534 	struct fe_priv *np = netdev_priv(dev);
2535 	u32 flags;
2536 	int tx_work = 0;
2537 	struct ring_desc *orig_get_tx = np->get_tx.orig;
2538 	unsigned int bytes_compl = 0;
2539 
2540 	while ((np->get_tx.orig != np->put_tx.orig) &&
2541 	       !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2542 	       (tx_work < limit)) {
2543 
2544 		nv_unmap_txskb(np, np->get_tx_ctx);
2545 
2546 		if (np->desc_ver == DESC_VER_1) {
2547 			if (flags & NV_TX_LASTPACKET) {
2548 				if (flags & NV_TX_ERROR) {
2549 					if ((flags & NV_TX_RETRYERROR)
2550 					    && !(flags & NV_TX_RETRYCOUNT_MASK))
2551 						nv_legacybackoff_reseed(dev);
2552 				} else {
2553 					u64_stats_update_begin(&np->swstats_tx_syncp);
2554 					np->stat_tx_packets++;
2555 					np->stat_tx_bytes += np->get_tx_ctx->skb->len;
2556 					u64_stats_update_end(&np->swstats_tx_syncp);
2557 				}
2558 				bytes_compl += np->get_tx_ctx->skb->len;
2559 				dev_kfree_skb_any(np->get_tx_ctx->skb);
2560 				np->get_tx_ctx->skb = NULL;
2561 				tx_work++;
2562 			}
2563 		} else {
2564 			if (flags & NV_TX2_LASTPACKET) {
2565 				if (flags & NV_TX2_ERROR) {
2566 					if ((flags & NV_TX2_RETRYERROR)
2567 					    && !(flags & NV_TX2_RETRYCOUNT_MASK))
2568 						nv_legacybackoff_reseed(dev);
2569 				} else {
2570 					u64_stats_update_begin(&np->swstats_tx_syncp);
2571 					np->stat_tx_packets++;
2572 					np->stat_tx_bytes += np->get_tx_ctx->skb->len;
2573 					u64_stats_update_end(&np->swstats_tx_syncp);
2574 				}
2575 				bytes_compl += np->get_tx_ctx->skb->len;
2576 				dev_kfree_skb_any(np->get_tx_ctx->skb);
2577 				np->get_tx_ctx->skb = NULL;
2578 				tx_work++;
2579 			}
2580 		}
2581 		if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2582 			np->get_tx.orig = np->first_tx.orig;
2583 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2584 			np->get_tx_ctx = np->first_tx_ctx;
2585 	}
2586 
2587 	netdev_completed_queue(np->dev, tx_work, bytes_compl);
2588 
2589 	if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2590 		np->tx_stop = 0;
2591 		netif_wake_queue(dev);
2592 	}
2593 	return tx_work;
2594 }
2595 
2596 static int nv_tx_done_optimized(struct net_device *dev, int limit)
2597 {
2598 	struct fe_priv *np = netdev_priv(dev);
2599 	u32 flags;
2600 	int tx_work = 0;
2601 	struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2602 	unsigned long bytes_cleaned = 0;
2603 
2604 	while ((np->get_tx.ex != np->put_tx.ex) &&
2605 	       !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
2606 	       (tx_work < limit)) {
2607 
2608 		nv_unmap_txskb(np, np->get_tx_ctx);
2609 
2610 		if (flags & NV_TX2_LASTPACKET) {
2611 			if (flags & NV_TX2_ERROR) {
2612 				if ((flags & NV_TX2_RETRYERROR)
2613 				    && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2614 					if (np->driver_data & DEV_HAS_GEAR_MODE)
2615 						nv_gear_backoff_reseed(dev);
2616 					else
2617 						nv_legacybackoff_reseed(dev);
2618 				}
2619 			} else {
2620 				u64_stats_update_begin(&np->swstats_tx_syncp);
2621 				np->stat_tx_packets++;
2622 				np->stat_tx_bytes += np->get_tx_ctx->skb->len;
2623 				u64_stats_update_end(&np->swstats_tx_syncp);
2624 			}
2625 
2626 			bytes_cleaned += np->get_tx_ctx->skb->len;
2627 			dev_kfree_skb_any(np->get_tx_ctx->skb);
2628 			np->get_tx_ctx->skb = NULL;
2629 			tx_work++;
2630 
2631 			if (np->tx_limit)
2632 				nv_tx_flip_ownership(dev);
2633 		}
2634 
2635 		if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2636 			np->get_tx.ex = np->first_tx.ex;
2637 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2638 			np->get_tx_ctx = np->first_tx_ctx;
2639 	}
2640 
2641 	netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
2642 
2643 	if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2644 		np->tx_stop = 0;
2645 		netif_wake_queue(dev);
2646 	}
2647 	return tx_work;
2648 }
2649 
2650 /*
2651  * nv_tx_timeout: dev->tx_timeout function
2652  * Called with netif_tx_lock held.
2653  */
2654 static void nv_tx_timeout(struct net_device *dev)
2655 {
2656 	struct fe_priv *np = netdev_priv(dev);
2657 	u8 __iomem *base = get_hwbase(dev);
2658 	u32 status;
2659 	union ring_type put_tx;
2660 	int saved_tx_limit;
2661 
2662 	if (np->msi_flags & NV_MSI_X_ENABLED)
2663 		status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2664 	else
2665 		status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2666 
2667 	netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status);
2668 
2669 	if (unlikely(debug_tx_timeout)) {
2670 		int i;
2671 
2672 		netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
2673 		netdev_info(dev, "Dumping tx registers\n");
2674 		for (i = 0; i <= np->register_size; i += 32) {
2675 			netdev_info(dev,
2676 				    "%3x: %08x %08x %08x %08x "
2677 				    "%08x %08x %08x %08x\n",
2678 				    i,
2679 				    readl(base + i + 0), readl(base + i + 4),
2680 				    readl(base + i + 8), readl(base + i + 12),
2681 				    readl(base + i + 16), readl(base + i + 20),
2682 				    readl(base + i + 24), readl(base + i + 28));
2683 		}
2684 		netdev_info(dev, "Dumping tx ring\n");
2685 		for (i = 0; i < np->tx_ring_size; i += 4) {
2686 			if (!nv_optimized(np)) {
2687 				netdev_info(dev,
2688 					    "%03x: %08x %08x // %08x %08x "
2689 					    "// %08x %08x // %08x %08x\n",
2690 					    i,
2691 					    le32_to_cpu(np->tx_ring.orig[i].buf),
2692 					    le32_to_cpu(np->tx_ring.orig[i].flaglen),
2693 					    le32_to_cpu(np->tx_ring.orig[i+1].buf),
2694 					    le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2695 					    le32_to_cpu(np->tx_ring.orig[i+2].buf),
2696 					    le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2697 					    le32_to_cpu(np->tx_ring.orig[i+3].buf),
2698 					    le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2699 			} else {
2700 				netdev_info(dev,
2701 					    "%03x: %08x %08x %08x "
2702 					    "// %08x %08x %08x "
2703 					    "// %08x %08x %08x "
2704 					    "// %08x %08x %08x\n",
2705 					    i,
2706 					    le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2707 					    le32_to_cpu(np->tx_ring.ex[i].buflow),
2708 					    le32_to_cpu(np->tx_ring.ex[i].flaglen),
2709 					    le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2710 					    le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2711 					    le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2712 					    le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2713 					    le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2714 					    le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2715 					    le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2716 					    le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2717 					    le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2718 			}
2719 		}
2720 	}
2721 
2722 	spin_lock_irq(&np->lock);
2723 
2724 	/* 1) stop tx engine */
2725 	nv_stop_tx(dev);
2726 
2727 	/* 2) complete any outstanding tx and do not give HW any limited tx pkts */
2728 	saved_tx_limit = np->tx_limit;
2729 	np->tx_limit = 0; /* prevent giving HW any limited pkts */
2730 	np->tx_stop = 0;  /* prevent waking tx queue */
2731 	if (!nv_optimized(np))
2732 		nv_tx_done(dev, np->tx_ring_size);
2733 	else
2734 		nv_tx_done_optimized(dev, np->tx_ring_size);
2735 
2736 	/* save current HW position */
2737 	if (np->tx_change_owner)
2738 		put_tx.ex = np->tx_change_owner->first_tx_desc;
2739 	else
2740 		put_tx = np->put_tx;
2741 
2742 	/* 3) clear all tx state */
2743 	nv_drain_tx(dev);
2744 	nv_init_tx(dev);
2745 
2746 	/* 4) restore state to current HW position */
2747 	np->get_tx = np->put_tx = put_tx;
2748 	np->tx_limit = saved_tx_limit;
2749 
2750 	/* 5) restart tx engine */
2751 	nv_start_tx(dev);
2752 	netif_wake_queue(dev);
2753 	spin_unlock_irq(&np->lock);
2754 }
2755 
2756 /*
2757  * Called when the nic notices a mismatch between the actual data len on the
2758  * wire and the len indicated in the 802 header
2759  */
2760 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2761 {
2762 	int hdrlen;	/* length of the 802 header */
2763 	int protolen;	/* length as stored in the proto field */
2764 
2765 	/* 1) calculate len according to header */
2766 	if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2767 		protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2768 		hdrlen = VLAN_HLEN;
2769 	} else {
2770 		protolen = ntohs(((struct ethhdr *)packet)->h_proto);
2771 		hdrlen = ETH_HLEN;
2772 	}
2773 	if (protolen > ETH_DATA_LEN)
2774 		return datalen; /* Value in proto field not a len, no checks possible */
2775 
2776 	protolen += hdrlen;
2777 	/* consistency checks: */
2778 	if (datalen > ETH_ZLEN) {
2779 		if (datalen >= protolen) {
2780 			/* more data on wire than in 802 header, trim of
2781 			 * additional data.
2782 			 */
2783 			return protolen;
2784 		} else {
2785 			/* less data on wire than mentioned in header.
2786 			 * Discard the packet.
2787 			 */
2788 			return -1;
2789 		}
2790 	} else {
2791 		/* short packet. Accept only if 802 values are also short */
2792 		if (protolen > ETH_ZLEN) {
2793 			return -1;
2794 		}
2795 		return datalen;
2796 	}
2797 }
2798 
2799 static int nv_rx_process(struct net_device *dev, int limit)
2800 {
2801 	struct fe_priv *np = netdev_priv(dev);
2802 	u32 flags;
2803 	int rx_work = 0;
2804 	struct sk_buff *skb;
2805 	int len;
2806 
2807 	while ((np->get_rx.orig != np->put_rx.orig) &&
2808 	      !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2809 		(rx_work < limit)) {
2810 
2811 		/*
2812 		 * the packet is for us - immediately tear down the pci mapping.
2813 		 * TODO: check if a prefetch of the first cacheline improves
2814 		 * the performance.
2815 		 */
2816 		dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
2817 				 np->get_rx_ctx->dma_len,
2818 				 DMA_FROM_DEVICE);
2819 		skb = np->get_rx_ctx->skb;
2820 		np->get_rx_ctx->skb = NULL;
2821 
2822 		/* look at what we actually got: */
2823 		if (np->desc_ver == DESC_VER_1) {
2824 			if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2825 				len = flags & LEN_MASK_V1;
2826 				if (unlikely(flags & NV_RX_ERROR)) {
2827 					if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2828 						len = nv_getlen(dev, skb->data, len);
2829 						if (len < 0) {
2830 							dev_kfree_skb(skb);
2831 							goto next_pkt;
2832 						}
2833 					}
2834 					/* framing errors are soft errors */
2835 					else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2836 						if (flags & NV_RX_SUBTRACT1)
2837 							len--;
2838 					}
2839 					/* the rest are hard errors */
2840 					else {
2841 						if (flags & NV_RX_MISSEDFRAME) {
2842 							u64_stats_update_begin(&np->swstats_rx_syncp);
2843 							np->stat_rx_missed_errors++;
2844 							u64_stats_update_end(&np->swstats_rx_syncp);
2845 						}
2846 						dev_kfree_skb(skb);
2847 						goto next_pkt;
2848 					}
2849 				}
2850 			} else {
2851 				dev_kfree_skb(skb);
2852 				goto next_pkt;
2853 			}
2854 		} else {
2855 			if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2856 				len = flags & LEN_MASK_V2;
2857 				if (unlikely(flags & NV_RX2_ERROR)) {
2858 					if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2859 						len = nv_getlen(dev, skb->data, len);
2860 						if (len < 0) {
2861 							dev_kfree_skb(skb);
2862 							goto next_pkt;
2863 						}
2864 					}
2865 					/* framing errors are soft errors */
2866 					else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2867 						if (flags & NV_RX2_SUBTRACT1)
2868 							len--;
2869 					}
2870 					/* the rest are hard errors */
2871 					else {
2872 						dev_kfree_skb(skb);
2873 						goto next_pkt;
2874 					}
2875 				}
2876 				if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2877 				    ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
2878 					skb->ip_summed = CHECKSUM_UNNECESSARY;
2879 			} else {
2880 				dev_kfree_skb(skb);
2881 				goto next_pkt;
2882 			}
2883 		}
2884 		/* got a valid packet - forward it to the network core */
2885 		skb_put(skb, len);
2886 		skb->protocol = eth_type_trans(skb, dev);
2887 		napi_gro_receive(&np->napi, skb);
2888 		u64_stats_update_begin(&np->swstats_rx_syncp);
2889 		np->stat_rx_packets++;
2890 		np->stat_rx_bytes += len;
2891 		u64_stats_update_end(&np->swstats_rx_syncp);
2892 next_pkt:
2893 		if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2894 			np->get_rx.orig = np->first_rx.orig;
2895 		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2896 			np->get_rx_ctx = np->first_rx_ctx;
2897 
2898 		rx_work++;
2899 	}
2900 
2901 	return rx_work;
2902 }
2903 
2904 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2905 {
2906 	struct fe_priv *np = netdev_priv(dev);
2907 	u32 flags;
2908 	u32 vlanflags = 0;
2909 	int rx_work = 0;
2910 	struct sk_buff *skb;
2911 	int len;
2912 
2913 	while ((np->get_rx.ex != np->put_rx.ex) &&
2914 	      !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2915 	      (rx_work < limit)) {
2916 
2917 		/*
2918 		 * the packet is for us - immediately tear down the pci mapping.
2919 		 * TODO: check if a prefetch of the first cacheline improves
2920 		 * the performance.
2921 		 */
2922 		dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
2923 				 np->get_rx_ctx->dma_len,
2924 				 DMA_FROM_DEVICE);
2925 		skb = np->get_rx_ctx->skb;
2926 		np->get_rx_ctx->skb = NULL;
2927 
2928 		/* look at what we actually got: */
2929 		if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2930 			len = flags & LEN_MASK_V2;
2931 			if (unlikely(flags & NV_RX2_ERROR)) {
2932 				if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2933 					len = nv_getlen(dev, skb->data, len);
2934 					if (len < 0) {
2935 						dev_kfree_skb(skb);
2936 						goto next_pkt;
2937 					}
2938 				}
2939 				/* framing errors are soft errors */
2940 				else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2941 					if (flags & NV_RX2_SUBTRACT1)
2942 						len--;
2943 				}
2944 				/* the rest are hard errors */
2945 				else {
2946 					dev_kfree_skb(skb);
2947 					goto next_pkt;
2948 				}
2949 			}
2950 
2951 			if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2952 			    ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
2953 				skb->ip_summed = CHECKSUM_UNNECESSARY;
2954 
2955 			/* got a valid packet - forward it to the network core */
2956 			skb_put(skb, len);
2957 			skb->protocol = eth_type_trans(skb, dev);
2958 			prefetch(skb->data);
2959 
2960 			vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2961 
2962 			/*
2963 			 * There's need to check for NETIF_F_HW_VLAN_CTAG_RX
2964 			 * here. Even if vlan rx accel is disabled,
2965 			 * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
2966 			 */
2967 			if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2968 			    vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2969 				u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
2970 
2971 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2972 			}
2973 			napi_gro_receive(&np->napi, skb);
2974 			u64_stats_update_begin(&np->swstats_rx_syncp);
2975 			np->stat_rx_packets++;
2976 			np->stat_rx_bytes += len;
2977 			u64_stats_update_end(&np->swstats_rx_syncp);
2978 		} else {
2979 			dev_kfree_skb(skb);
2980 		}
2981 next_pkt:
2982 		if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2983 			np->get_rx.ex = np->first_rx.ex;
2984 		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2985 			np->get_rx_ctx = np->first_rx_ctx;
2986 
2987 		rx_work++;
2988 	}
2989 
2990 	return rx_work;
2991 }
2992 
2993 static void set_bufsize(struct net_device *dev)
2994 {
2995 	struct fe_priv *np = netdev_priv(dev);
2996 
2997 	if (dev->mtu <= ETH_DATA_LEN)
2998 		np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2999 	else
3000 		np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
3001 }
3002 
3003 /*
3004  * nv_change_mtu: dev->change_mtu function
3005  * Called with dev_base_lock held for read.
3006  */
3007 static int nv_change_mtu(struct net_device *dev, int new_mtu)
3008 {
3009 	struct fe_priv *np = netdev_priv(dev);
3010 	int old_mtu;
3011 
3012 	old_mtu = dev->mtu;
3013 	dev->mtu = new_mtu;
3014 
3015 	/* return early if the buffer sizes will not change */
3016 	if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
3017 		return 0;
3018 
3019 	/* synchronized against open : rtnl_lock() held by caller */
3020 	if (netif_running(dev)) {
3021 		u8 __iomem *base = get_hwbase(dev);
3022 		/*
3023 		 * It seems that the nic preloads valid ring entries into an
3024 		 * internal buffer. The procedure for flushing everything is
3025 		 * guessed, there is probably a simpler approach.
3026 		 * Changing the MTU is a rare event, it shouldn't matter.
3027 		 */
3028 		nv_disable_irq(dev);
3029 		nv_napi_disable(dev);
3030 		netif_tx_lock_bh(dev);
3031 		netif_addr_lock(dev);
3032 		spin_lock(&np->lock);
3033 		/* stop engines */
3034 		nv_stop_rxtx(dev);
3035 		nv_txrx_reset(dev);
3036 		/* drain rx queue */
3037 		nv_drain_rxtx(dev);
3038 		/* reinit driver view of the rx queue */
3039 		set_bufsize(dev);
3040 		if (nv_init_ring(dev)) {
3041 			if (!np->in_shutdown)
3042 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3043 		}
3044 		/* reinit nic view of the rx queue */
3045 		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3046 		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3047 		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3048 			base + NvRegRingSizes);
3049 		pci_push(base);
3050 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3051 		pci_push(base);
3052 
3053 		/* restart rx engine */
3054 		nv_start_rxtx(dev);
3055 		spin_unlock(&np->lock);
3056 		netif_addr_unlock(dev);
3057 		netif_tx_unlock_bh(dev);
3058 		nv_napi_enable(dev);
3059 		nv_enable_irq(dev);
3060 	}
3061 	return 0;
3062 }
3063 
3064 static void nv_copy_mac_to_hw(struct net_device *dev)
3065 {
3066 	u8 __iomem *base = get_hwbase(dev);
3067 	u32 mac[2];
3068 
3069 	mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
3070 			(dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
3071 	mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
3072 
3073 	writel(mac[0], base + NvRegMacAddrA);
3074 	writel(mac[1], base + NvRegMacAddrB);
3075 }
3076 
3077 /*
3078  * nv_set_mac_address: dev->set_mac_address function
3079  * Called with rtnl_lock() held.
3080  */
3081 static int nv_set_mac_address(struct net_device *dev, void *addr)
3082 {
3083 	struct fe_priv *np = netdev_priv(dev);
3084 	struct sockaddr *macaddr = (struct sockaddr *)addr;
3085 
3086 	if (!is_valid_ether_addr(macaddr->sa_data))
3087 		return -EADDRNOTAVAIL;
3088 
3089 	/* synchronized against open : rtnl_lock() held by caller */
3090 	memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
3091 
3092 	if (netif_running(dev)) {
3093 		netif_tx_lock_bh(dev);
3094 		netif_addr_lock(dev);
3095 		spin_lock_irq(&np->lock);
3096 
3097 		/* stop rx engine */
3098 		nv_stop_rx(dev);
3099 
3100 		/* set mac address */
3101 		nv_copy_mac_to_hw(dev);
3102 
3103 		/* restart rx engine */
3104 		nv_start_rx(dev);
3105 		spin_unlock_irq(&np->lock);
3106 		netif_addr_unlock(dev);
3107 		netif_tx_unlock_bh(dev);
3108 	} else {
3109 		nv_copy_mac_to_hw(dev);
3110 	}
3111 	return 0;
3112 }
3113 
3114 /*
3115  * nv_set_multicast: dev->set_multicast function
3116  * Called with netif_tx_lock held.
3117  */
3118 static void nv_set_multicast(struct net_device *dev)
3119 {
3120 	struct fe_priv *np = netdev_priv(dev);
3121 	u8 __iomem *base = get_hwbase(dev);
3122 	u32 addr[2];
3123 	u32 mask[2];
3124 	u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
3125 
3126 	memset(addr, 0, sizeof(addr));
3127 	memset(mask, 0, sizeof(mask));
3128 
3129 	if (dev->flags & IFF_PROMISC) {
3130 		pff |= NVREG_PFF_PROMISC;
3131 	} else {
3132 		pff |= NVREG_PFF_MYADDR;
3133 
3134 		if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
3135 			u32 alwaysOff[2];
3136 			u32 alwaysOn[2];
3137 
3138 			alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
3139 			if (dev->flags & IFF_ALLMULTI) {
3140 				alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
3141 			} else {
3142 				struct netdev_hw_addr *ha;
3143 
3144 				netdev_for_each_mc_addr(ha, dev) {
3145 					unsigned char *hw_addr = ha->addr;
3146 					u32 a, b;
3147 
3148 					a = le32_to_cpu(*(__le32 *) hw_addr);
3149 					b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
3150 					alwaysOn[0] &= a;
3151 					alwaysOff[0] &= ~a;
3152 					alwaysOn[1] &= b;
3153 					alwaysOff[1] &= ~b;
3154 				}
3155 			}
3156 			addr[0] = alwaysOn[0];
3157 			addr[1] = alwaysOn[1];
3158 			mask[0] = alwaysOn[0] | alwaysOff[0];
3159 			mask[1] = alwaysOn[1] | alwaysOff[1];
3160 		} else {
3161 			mask[0] = NVREG_MCASTMASKA_NONE;
3162 			mask[1] = NVREG_MCASTMASKB_NONE;
3163 		}
3164 	}
3165 	addr[0] |= NVREG_MCASTADDRA_FORCE;
3166 	pff |= NVREG_PFF_ALWAYS;
3167 	spin_lock_irq(&np->lock);
3168 	nv_stop_rx(dev);
3169 	writel(addr[0], base + NvRegMulticastAddrA);
3170 	writel(addr[1], base + NvRegMulticastAddrB);
3171 	writel(mask[0], base + NvRegMulticastMaskA);
3172 	writel(mask[1], base + NvRegMulticastMaskB);
3173 	writel(pff, base + NvRegPacketFilterFlags);
3174 	nv_start_rx(dev);
3175 	spin_unlock_irq(&np->lock);
3176 }
3177 
3178 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
3179 {
3180 	struct fe_priv *np = netdev_priv(dev);
3181 	u8 __iomem *base = get_hwbase(dev);
3182 
3183 	np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
3184 
3185 	if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
3186 		u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
3187 		if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
3188 			writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
3189 			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3190 		} else {
3191 			writel(pff, base + NvRegPacketFilterFlags);
3192 		}
3193 	}
3194 	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3195 		u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
3196 		if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
3197 			u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3198 			if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3199 				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
3200 			if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3201 				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
3202 				/* limit the number of tx pause frames to a default of 8 */
3203 				writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3204 			}
3205 			writel(pause_enable,  base + NvRegTxPauseFrame);
3206 			writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3207 			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3208 		} else {
3209 			writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
3210 			writel(regmisc, base + NvRegMisc1);
3211 		}
3212 	}
3213 }
3214 
3215 static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
3216 {
3217 	struct fe_priv *np = netdev_priv(dev);
3218 	u8 __iomem *base = get_hwbase(dev);
3219 	u32 phyreg, txreg;
3220 	int mii_status;
3221 
3222 	np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
3223 	np->duplex = duplex;
3224 
3225 	/* see if gigabit phy */
3226 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3227 	if (mii_status & PHY_GIGABIT) {
3228 		np->gigabit = PHY_GIGABIT;
3229 		phyreg = readl(base + NvRegSlotTime);
3230 		phyreg &= ~(0x3FF00);
3231 		if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
3232 			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3233 		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
3234 			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3235 		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3236 			phyreg |= NVREG_SLOTTIME_1000_FULL;
3237 		writel(phyreg, base + NvRegSlotTime);
3238 	}
3239 
3240 	phyreg = readl(base + NvRegPhyInterface);
3241 	phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3242 	if (np->duplex == 0)
3243 		phyreg |= PHY_HALF;
3244 	if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3245 		phyreg |= PHY_100;
3246 	else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3247 							NVREG_LINKSPEED_1000)
3248 		phyreg |= PHY_1000;
3249 	writel(phyreg, base + NvRegPhyInterface);
3250 
3251 	if (phyreg & PHY_RGMII) {
3252 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3253 							NVREG_LINKSPEED_1000)
3254 			txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3255 		else
3256 			txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3257 	} else {
3258 		txreg = NVREG_TX_DEFERRAL_DEFAULT;
3259 	}
3260 	writel(txreg, base + NvRegTxDeferral);
3261 
3262 	if (np->desc_ver == DESC_VER_1) {
3263 		txreg = NVREG_TX_WM_DESC1_DEFAULT;
3264 	} else {
3265 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3266 					 NVREG_LINKSPEED_1000)
3267 			txreg = NVREG_TX_WM_DESC2_3_1000;
3268 		else
3269 			txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3270 	}
3271 	writel(txreg, base + NvRegTxWatermark);
3272 
3273 	writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3274 			base + NvRegMisc1);
3275 	pci_push(base);
3276 	writel(np->linkspeed, base + NvRegLinkSpeed);
3277 	pci_push(base);
3278 }
3279 
3280 /**
3281  * nv_update_linkspeed - Setup the MAC according to the link partner
3282  * @dev: Network device to be configured
3283  *
3284  * The function queries the PHY and checks if there is a link partner.
3285  * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3286  * set to 10 MBit HD.
3287  *
3288  * The function returns 0 if there is no link partner and 1 if there is
3289  * a good link partner.
3290  */
3291 static int nv_update_linkspeed(struct net_device *dev)
3292 {
3293 	struct fe_priv *np = netdev_priv(dev);
3294 	u8 __iomem *base = get_hwbase(dev);
3295 	int adv = 0;
3296 	int lpa = 0;
3297 	int adv_lpa, adv_pause, lpa_pause;
3298 	int newls = np->linkspeed;
3299 	int newdup = np->duplex;
3300 	int mii_status;
3301 	u32 bmcr;
3302 	int retval = 0;
3303 	u32 control_1000, status_1000, phyreg, pause_flags, txreg;
3304 	u32 txrxFlags = 0;
3305 	u32 phy_exp;
3306 
3307 	/* If device loopback is enabled, set carrier on and enable max link
3308 	 * speed.
3309 	 */
3310 	bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3311 	if (bmcr & BMCR_LOOPBACK) {
3312 		if (netif_running(dev)) {
3313 			nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
3314 			if (!netif_carrier_ok(dev))
3315 				netif_carrier_on(dev);
3316 		}
3317 		return 1;
3318 	}
3319 
3320 	/* BMSR_LSTATUS is latched, read it twice:
3321 	 * we want the current value.
3322 	 */
3323 	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3324 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3325 
3326 	if (!(mii_status & BMSR_LSTATUS)) {
3327 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3328 		newdup = 0;
3329 		retval = 0;
3330 		goto set_speed;
3331 	}
3332 
3333 	if (np->autoneg == 0) {
3334 		if (np->fixed_mode & LPA_100FULL) {
3335 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3336 			newdup = 1;
3337 		} else if (np->fixed_mode & LPA_100HALF) {
3338 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3339 			newdup = 0;
3340 		} else if (np->fixed_mode & LPA_10FULL) {
3341 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3342 			newdup = 1;
3343 		} else {
3344 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3345 			newdup = 0;
3346 		}
3347 		retval = 1;
3348 		goto set_speed;
3349 	}
3350 	/* check auto negotiation is complete */
3351 	if (!(mii_status & BMSR_ANEGCOMPLETE)) {
3352 		/* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3353 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3354 		newdup = 0;
3355 		retval = 0;
3356 		goto set_speed;
3357 	}
3358 
3359 	adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3360 	lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3361 
3362 	retval = 1;
3363 	if (np->gigabit == PHY_GIGABIT) {
3364 		control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3365 		status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3366 
3367 		if ((control_1000 & ADVERTISE_1000FULL) &&
3368 			(status_1000 & LPA_1000FULL)) {
3369 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3370 			newdup = 1;
3371 			goto set_speed;
3372 		}
3373 	}
3374 
3375 	/* FIXME: handle parallel detection properly */
3376 	adv_lpa = lpa & adv;
3377 	if (adv_lpa & LPA_100FULL) {
3378 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3379 		newdup = 1;
3380 	} else if (adv_lpa & LPA_100HALF) {
3381 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3382 		newdup = 0;
3383 	} else if (adv_lpa & LPA_10FULL) {
3384 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3385 		newdup = 1;
3386 	} else if (adv_lpa & LPA_10HALF) {
3387 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3388 		newdup = 0;
3389 	} else {
3390 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3391 		newdup = 0;
3392 	}
3393 
3394 set_speed:
3395 	if (np->duplex == newdup && np->linkspeed == newls)
3396 		return retval;
3397 
3398 	np->duplex = newdup;
3399 	np->linkspeed = newls;
3400 
3401 	/* The transmitter and receiver must be restarted for safe update */
3402 	if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
3403 		txrxFlags |= NV_RESTART_TX;
3404 		nv_stop_tx(dev);
3405 	}
3406 	if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
3407 		txrxFlags |= NV_RESTART_RX;
3408 		nv_stop_rx(dev);
3409 	}
3410 
3411 	if (np->gigabit == PHY_GIGABIT) {
3412 		phyreg = readl(base + NvRegSlotTime);
3413 		phyreg &= ~(0x3FF00);
3414 		if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3415 		    ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3416 			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3417 		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3418 			phyreg |= NVREG_SLOTTIME_1000_FULL;
3419 		writel(phyreg, base + NvRegSlotTime);
3420 	}
3421 
3422 	phyreg = readl(base + NvRegPhyInterface);
3423 	phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3424 	if (np->duplex == 0)
3425 		phyreg |= PHY_HALF;
3426 	if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3427 		phyreg |= PHY_100;
3428 	else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3429 		phyreg |= PHY_1000;
3430 	writel(phyreg, base + NvRegPhyInterface);
3431 
3432 	phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
3433 	if (phyreg & PHY_RGMII) {
3434 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
3435 			txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3436 		} else {
3437 			if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3438 				if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3439 					txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
3440 				else
3441 					txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
3442 			} else {
3443 				txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3444 			}
3445 		}
3446 	} else {
3447 		if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3448 			txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
3449 		else
3450 			txreg = NVREG_TX_DEFERRAL_DEFAULT;
3451 	}
3452 	writel(txreg, base + NvRegTxDeferral);
3453 
3454 	if (np->desc_ver == DESC_VER_1) {
3455 		txreg = NVREG_TX_WM_DESC1_DEFAULT;
3456 	} else {
3457 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3458 			txreg = NVREG_TX_WM_DESC2_3_1000;
3459 		else
3460 			txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3461 	}
3462 	writel(txreg, base + NvRegTxWatermark);
3463 
3464 	writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3465 		base + NvRegMisc1);
3466 	pci_push(base);
3467 	writel(np->linkspeed, base + NvRegLinkSpeed);
3468 	pci_push(base);
3469 
3470 	pause_flags = 0;
3471 	/* setup pause frame */
3472 	if (netif_running(dev) && (np->duplex != 0)) {
3473 		if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3474 			adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3475 			lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
3476 
3477 			switch (adv_pause) {
3478 			case ADVERTISE_PAUSE_CAP:
3479 				if (lpa_pause & LPA_PAUSE_CAP) {
3480 					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3481 					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3482 						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3483 				}
3484 				break;
3485 			case ADVERTISE_PAUSE_ASYM:
3486 				if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
3487 					pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3488 				break;
3489 			case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
3490 				if (lpa_pause & LPA_PAUSE_CAP) {
3491 					pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
3492 					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3493 						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3494 				}
3495 				if (lpa_pause == LPA_PAUSE_ASYM)
3496 					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3497 				break;
3498 			}
3499 		} else {
3500 			pause_flags = np->pause_flags;
3501 		}
3502 	}
3503 	nv_update_pause(dev, pause_flags);
3504 
3505 	if (txrxFlags & NV_RESTART_TX)
3506 		nv_start_tx(dev);
3507 	if (txrxFlags & NV_RESTART_RX)
3508 		nv_start_rx(dev);
3509 
3510 	return retval;
3511 }
3512 
3513 static void nv_linkchange(struct net_device *dev)
3514 {
3515 	if (nv_update_linkspeed(dev)) {
3516 		if (!netif_carrier_ok(dev)) {
3517 			netif_carrier_on(dev);
3518 			netdev_info(dev, "link up\n");
3519 			nv_txrx_gate(dev, false);
3520 			nv_start_rx(dev);
3521 		}
3522 	} else {
3523 		if (netif_carrier_ok(dev)) {
3524 			netif_carrier_off(dev);
3525 			netdev_info(dev, "link down\n");
3526 			nv_txrx_gate(dev, true);
3527 			nv_stop_rx(dev);
3528 		}
3529 	}
3530 }
3531 
3532 static void nv_link_irq(struct net_device *dev)
3533 {
3534 	u8 __iomem *base = get_hwbase(dev);
3535 	u32 miistat;
3536 
3537 	miistat = readl(base + NvRegMIIStatus);
3538 	writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3539 
3540 	if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3541 		nv_linkchange(dev);
3542 }
3543 
3544 static void nv_msi_workaround(struct fe_priv *np)
3545 {
3546 
3547 	/* Need to toggle the msi irq mask within the ethernet device,
3548 	 * otherwise, future interrupts will not be detected.
3549 	 */
3550 	if (np->msi_flags & NV_MSI_ENABLED) {
3551 		u8 __iomem *base = np->base;
3552 
3553 		writel(0, base + NvRegMSIIrqMask);
3554 		writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3555 	}
3556 }
3557 
3558 static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
3559 {
3560 	struct fe_priv *np = netdev_priv(dev);
3561 
3562 	if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
3563 		if (total_work > NV_DYNAMIC_THRESHOLD) {
3564 			/* transition to poll based interrupts */
3565 			np->quiet_count = 0;
3566 			if (np->irqmask != NVREG_IRQMASK_CPU) {
3567 				np->irqmask = NVREG_IRQMASK_CPU;
3568 				return 1;
3569 			}
3570 		} else {
3571 			if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
3572 				np->quiet_count++;
3573 			} else {
3574 				/* reached a period of low activity, switch
3575 				   to per tx/rx packet interrupts */
3576 				if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
3577 					np->irqmask = NVREG_IRQMASK_THROUGHPUT;
3578 					return 1;
3579 				}
3580 			}
3581 		}
3582 	}
3583 	return 0;
3584 }
3585 
3586 static irqreturn_t nv_nic_irq(int foo, void *data)
3587 {
3588 	struct net_device *dev = (struct net_device *) data;
3589 	struct fe_priv *np = netdev_priv(dev);
3590 	u8 __iomem *base = get_hwbase(dev);
3591 
3592 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3593 		np->events = readl(base + NvRegIrqStatus);
3594 		writel(np->events, base + NvRegIrqStatus);
3595 	} else {
3596 		np->events = readl(base + NvRegMSIXIrqStatus);
3597 		writel(np->events, base + NvRegMSIXIrqStatus);
3598 	}
3599 	if (!(np->events & np->irqmask))
3600 		return IRQ_NONE;
3601 
3602 	nv_msi_workaround(np);
3603 
3604 	if (napi_schedule_prep(&np->napi)) {
3605 		/*
3606 		 * Disable further irq's (msix not enabled with napi)
3607 		 */
3608 		writel(0, base + NvRegIrqMask);
3609 		__napi_schedule(&np->napi);
3610 	}
3611 
3612 	return IRQ_HANDLED;
3613 }
3614 
3615 /* All _optimized functions are used to help increase performance
3616  * (reduce CPU and increase throughput). They use descripter version 3,
3617  * compiler directives, and reduce memory accesses.
3618  */
3619 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3620 {
3621 	struct net_device *dev = (struct net_device *) data;
3622 	struct fe_priv *np = netdev_priv(dev);
3623 	u8 __iomem *base = get_hwbase(dev);
3624 
3625 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3626 		np->events = readl(base + NvRegIrqStatus);
3627 		writel(np->events, base + NvRegIrqStatus);
3628 	} else {
3629 		np->events = readl(base + NvRegMSIXIrqStatus);
3630 		writel(np->events, base + NvRegMSIXIrqStatus);
3631 	}
3632 	if (!(np->events & np->irqmask))
3633 		return IRQ_NONE;
3634 
3635 	nv_msi_workaround(np);
3636 
3637 	if (napi_schedule_prep(&np->napi)) {
3638 		/*
3639 		 * Disable further irq's (msix not enabled with napi)
3640 		 */
3641 		writel(0, base + NvRegIrqMask);
3642 		__napi_schedule(&np->napi);
3643 	}
3644 
3645 	return IRQ_HANDLED;
3646 }
3647 
3648 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3649 {
3650 	struct net_device *dev = (struct net_device *) data;
3651 	struct fe_priv *np = netdev_priv(dev);
3652 	u8 __iomem *base = get_hwbase(dev);
3653 	u32 events;
3654 	int i;
3655 	unsigned long flags;
3656 
3657 	for (i = 0;; i++) {
3658 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3659 		writel(events, base + NvRegMSIXIrqStatus);
3660 		netdev_dbg(dev, "tx irq events: %08x\n", events);
3661 		if (!(events & np->irqmask))
3662 			break;
3663 
3664 		spin_lock_irqsave(&np->lock, flags);
3665 		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3666 		spin_unlock_irqrestore(&np->lock, flags);
3667 
3668 		if (unlikely(i > max_interrupt_work)) {
3669 			spin_lock_irqsave(&np->lock, flags);
3670 			/* disable interrupts on the nic */
3671 			writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3672 			pci_push(base);
3673 
3674 			if (!np->in_shutdown) {
3675 				np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3676 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3677 			}
3678 			spin_unlock_irqrestore(&np->lock, flags);
3679 			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3680 				   __func__, i);
3681 			break;
3682 		}
3683 
3684 	}
3685 
3686 	return IRQ_RETVAL(i);
3687 }
3688 
3689 static int nv_napi_poll(struct napi_struct *napi, int budget)
3690 {
3691 	struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3692 	struct net_device *dev = np->dev;
3693 	u8 __iomem *base = get_hwbase(dev);
3694 	unsigned long flags;
3695 	int retcode;
3696 	int rx_count, tx_work = 0, rx_work = 0;
3697 
3698 	do {
3699 		if (!nv_optimized(np)) {
3700 			spin_lock_irqsave(&np->lock, flags);
3701 			tx_work += nv_tx_done(dev, np->tx_ring_size);
3702 			spin_unlock_irqrestore(&np->lock, flags);
3703 
3704 			rx_count = nv_rx_process(dev, budget - rx_work);
3705 			retcode = nv_alloc_rx(dev);
3706 		} else {
3707 			spin_lock_irqsave(&np->lock, flags);
3708 			tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
3709 			spin_unlock_irqrestore(&np->lock, flags);
3710 
3711 			rx_count = nv_rx_process_optimized(dev,
3712 			    budget - rx_work);
3713 			retcode = nv_alloc_rx_optimized(dev);
3714 		}
3715 	} while (retcode == 0 &&
3716 		 rx_count > 0 && (rx_work += rx_count) < budget);
3717 
3718 	if (retcode) {
3719 		spin_lock_irqsave(&np->lock, flags);
3720 		if (!np->in_shutdown)
3721 			mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3722 		spin_unlock_irqrestore(&np->lock, flags);
3723 	}
3724 
3725 	nv_change_interrupt_mode(dev, tx_work + rx_work);
3726 
3727 	if (unlikely(np->events & NVREG_IRQ_LINK)) {
3728 		spin_lock_irqsave(&np->lock, flags);
3729 		nv_link_irq(dev);
3730 		spin_unlock_irqrestore(&np->lock, flags);
3731 	}
3732 	if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3733 		spin_lock_irqsave(&np->lock, flags);
3734 		nv_linkchange(dev);
3735 		spin_unlock_irqrestore(&np->lock, flags);
3736 		np->link_timeout = jiffies + LINK_TIMEOUT;
3737 	}
3738 	if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3739 		spin_lock_irqsave(&np->lock, flags);
3740 		if (!np->in_shutdown) {
3741 			np->nic_poll_irq = np->irqmask;
3742 			np->recover_error = 1;
3743 			mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3744 		}
3745 		spin_unlock_irqrestore(&np->lock, flags);
3746 		napi_complete(napi);
3747 		return rx_work;
3748 	}
3749 
3750 	if (rx_work < budget) {
3751 		/* re-enable interrupts
3752 		   (msix not enabled in napi) */
3753 		napi_complete_done(napi, rx_work);
3754 
3755 		writel(np->irqmask, base + NvRegIrqMask);
3756 	}
3757 	return rx_work;
3758 }
3759 
3760 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3761 {
3762 	struct net_device *dev = (struct net_device *) data;
3763 	struct fe_priv *np = netdev_priv(dev);
3764 	u8 __iomem *base = get_hwbase(dev);
3765 	u32 events;
3766 	int i;
3767 	unsigned long flags;
3768 
3769 	for (i = 0;; i++) {
3770 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3771 		writel(events, base + NvRegMSIXIrqStatus);
3772 		netdev_dbg(dev, "rx irq events: %08x\n", events);
3773 		if (!(events & np->irqmask))
3774 			break;
3775 
3776 		if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3777 			if (unlikely(nv_alloc_rx_optimized(dev))) {
3778 				spin_lock_irqsave(&np->lock, flags);
3779 				if (!np->in_shutdown)
3780 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3781 				spin_unlock_irqrestore(&np->lock, flags);
3782 			}
3783 		}
3784 
3785 		if (unlikely(i > max_interrupt_work)) {
3786 			spin_lock_irqsave(&np->lock, flags);
3787 			/* disable interrupts on the nic */
3788 			writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3789 			pci_push(base);
3790 
3791 			if (!np->in_shutdown) {
3792 				np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3793 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3794 			}
3795 			spin_unlock_irqrestore(&np->lock, flags);
3796 			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3797 				   __func__, i);
3798 			break;
3799 		}
3800 	}
3801 
3802 	return IRQ_RETVAL(i);
3803 }
3804 
3805 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3806 {
3807 	struct net_device *dev = (struct net_device *) data;
3808 	struct fe_priv *np = netdev_priv(dev);
3809 	u8 __iomem *base = get_hwbase(dev);
3810 	u32 events;
3811 	int i;
3812 	unsigned long flags;
3813 
3814 	for (i = 0;; i++) {
3815 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3816 		writel(events, base + NvRegMSIXIrqStatus);
3817 		netdev_dbg(dev, "irq events: %08x\n", events);
3818 		if (!(events & np->irqmask))
3819 			break;
3820 
3821 		/* check tx in case we reached max loop limit in tx isr */
3822 		spin_lock_irqsave(&np->lock, flags);
3823 		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3824 		spin_unlock_irqrestore(&np->lock, flags);
3825 
3826 		if (events & NVREG_IRQ_LINK) {
3827 			spin_lock_irqsave(&np->lock, flags);
3828 			nv_link_irq(dev);
3829 			spin_unlock_irqrestore(&np->lock, flags);
3830 		}
3831 		if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3832 			spin_lock_irqsave(&np->lock, flags);
3833 			nv_linkchange(dev);
3834 			spin_unlock_irqrestore(&np->lock, flags);
3835 			np->link_timeout = jiffies + LINK_TIMEOUT;
3836 		}
3837 		if (events & NVREG_IRQ_RECOVER_ERROR) {
3838 			spin_lock_irqsave(&np->lock, flags);
3839 			/* disable interrupts on the nic */
3840 			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3841 			pci_push(base);
3842 
3843 			if (!np->in_shutdown) {
3844 				np->nic_poll_irq |= NVREG_IRQ_OTHER;
3845 				np->recover_error = 1;
3846 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3847 			}
3848 			spin_unlock_irqrestore(&np->lock, flags);
3849 			break;
3850 		}
3851 		if (unlikely(i > max_interrupt_work)) {
3852 			spin_lock_irqsave(&np->lock, flags);
3853 			/* disable interrupts on the nic */
3854 			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3855 			pci_push(base);
3856 
3857 			if (!np->in_shutdown) {
3858 				np->nic_poll_irq |= NVREG_IRQ_OTHER;
3859 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3860 			}
3861 			spin_unlock_irqrestore(&np->lock, flags);
3862 			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3863 				   __func__, i);
3864 			break;
3865 		}
3866 
3867 	}
3868 
3869 	return IRQ_RETVAL(i);
3870 }
3871 
3872 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3873 {
3874 	struct net_device *dev = (struct net_device *) data;
3875 	struct fe_priv *np = netdev_priv(dev);
3876 	u8 __iomem *base = get_hwbase(dev);
3877 	u32 events;
3878 
3879 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3880 		events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3881 		writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3882 	} else {
3883 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3884 		writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3885 	}
3886 	pci_push(base);
3887 	if (!(events & NVREG_IRQ_TIMER))
3888 		return IRQ_RETVAL(0);
3889 
3890 	nv_msi_workaround(np);
3891 
3892 	spin_lock(&np->lock);
3893 	np->intr_test = 1;
3894 	spin_unlock(&np->lock);
3895 
3896 	return IRQ_RETVAL(1);
3897 }
3898 
3899 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3900 {
3901 	u8 __iomem *base = get_hwbase(dev);
3902 	int i;
3903 	u32 msixmap = 0;
3904 
3905 	/* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3906 	 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3907 	 * the remaining 8 interrupts.
3908 	 */
3909 	for (i = 0; i < 8; i++) {
3910 		if ((irqmask >> i) & 0x1)
3911 			msixmap |= vector << (i << 2);
3912 	}
3913 	writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3914 
3915 	msixmap = 0;
3916 	for (i = 0; i < 8; i++) {
3917 		if ((irqmask >> (i + 8)) & 0x1)
3918 			msixmap |= vector << (i << 2);
3919 	}
3920 	writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3921 }
3922 
3923 static int nv_request_irq(struct net_device *dev, int intr_test)
3924 {
3925 	struct fe_priv *np = get_nvpriv(dev);
3926 	u8 __iomem *base = get_hwbase(dev);
3927 	int ret;
3928 	int i;
3929 	irqreturn_t (*handler)(int foo, void *data);
3930 
3931 	if (intr_test) {
3932 		handler = nv_nic_irq_test;
3933 	} else {
3934 		if (nv_optimized(np))
3935 			handler = nv_nic_irq_optimized;
3936 		else
3937 			handler = nv_nic_irq;
3938 	}
3939 
3940 	if (np->msi_flags & NV_MSI_X_CAPABLE) {
3941 		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3942 			np->msi_x_entry[i].entry = i;
3943 		ret = pci_enable_msix_range(np->pci_dev,
3944 					    np->msi_x_entry,
3945 					    np->msi_flags & NV_MSI_X_VECTORS_MASK,
3946 					    np->msi_flags & NV_MSI_X_VECTORS_MASK);
3947 		if (ret > 0) {
3948 			np->msi_flags |= NV_MSI_X_ENABLED;
3949 			if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3950 				/* Request irq for rx handling */
3951 				sprintf(np->name_rx, "%s-rx", dev->name);
3952 				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3953 						  nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
3954 				if (ret) {
3955 					netdev_info(dev,
3956 						    "request_irq failed for rx %d\n",
3957 						    ret);
3958 					pci_disable_msix(np->pci_dev);
3959 					np->msi_flags &= ~NV_MSI_X_ENABLED;
3960 					goto out_err;
3961 				}
3962 				/* Request irq for tx handling */
3963 				sprintf(np->name_tx, "%s-tx", dev->name);
3964 				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3965 						  nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
3966 				if (ret) {
3967 					netdev_info(dev,
3968 						    "request_irq failed for tx %d\n",
3969 						    ret);
3970 					pci_disable_msix(np->pci_dev);
3971 					np->msi_flags &= ~NV_MSI_X_ENABLED;
3972 					goto out_free_rx;
3973 				}
3974 				/* Request irq for link and timer handling */
3975 				sprintf(np->name_other, "%s-other", dev->name);
3976 				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3977 						  nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
3978 				if (ret) {
3979 					netdev_info(dev,
3980 						    "request_irq failed for link %d\n",
3981 						    ret);
3982 					pci_disable_msix(np->pci_dev);
3983 					np->msi_flags &= ~NV_MSI_X_ENABLED;
3984 					goto out_free_tx;
3985 				}
3986 				/* map interrupts to their respective vector */
3987 				writel(0, base + NvRegMSIXMap0);
3988 				writel(0, base + NvRegMSIXMap1);
3989 				set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3990 				set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3991 				set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3992 			} else {
3993 				/* Request irq for all interrupts */
3994 				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
3995 						  handler, IRQF_SHARED, dev->name, dev);
3996 				if (ret) {
3997 					netdev_info(dev,
3998 						    "request_irq failed %d\n",
3999 						    ret);
4000 					pci_disable_msix(np->pci_dev);
4001 					np->msi_flags &= ~NV_MSI_X_ENABLED;
4002 					goto out_err;
4003 				}
4004 
4005 				/* map interrupts to vector 0 */
4006 				writel(0, base + NvRegMSIXMap0);
4007 				writel(0, base + NvRegMSIXMap1);
4008 			}
4009 			netdev_info(dev, "MSI-X enabled\n");
4010 			return 0;
4011 		}
4012 	}
4013 	if (np->msi_flags & NV_MSI_CAPABLE) {
4014 		ret = pci_enable_msi(np->pci_dev);
4015 		if (ret == 0) {
4016 			np->msi_flags |= NV_MSI_ENABLED;
4017 			ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
4018 			if (ret) {
4019 				netdev_info(dev, "request_irq failed %d\n",
4020 					    ret);
4021 				pci_disable_msi(np->pci_dev);
4022 				np->msi_flags &= ~NV_MSI_ENABLED;
4023 				goto out_err;
4024 			}
4025 
4026 			/* map interrupts to vector 0 */
4027 			writel(0, base + NvRegMSIMap0);
4028 			writel(0, base + NvRegMSIMap1);
4029 			/* enable msi vector 0 */
4030 			writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
4031 			netdev_info(dev, "MSI enabled\n");
4032 			return 0;
4033 		}
4034 	}
4035 
4036 	if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
4037 		goto out_err;
4038 
4039 	return 0;
4040 out_free_tx:
4041 	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
4042 out_free_rx:
4043 	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
4044 out_err:
4045 	return 1;
4046 }
4047 
4048 static void nv_free_irq(struct net_device *dev)
4049 {
4050 	struct fe_priv *np = get_nvpriv(dev);
4051 	int i;
4052 
4053 	if (np->msi_flags & NV_MSI_X_ENABLED) {
4054 		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
4055 			free_irq(np->msi_x_entry[i].vector, dev);
4056 		pci_disable_msix(np->pci_dev);
4057 		np->msi_flags &= ~NV_MSI_X_ENABLED;
4058 	} else {
4059 		free_irq(np->pci_dev->irq, dev);
4060 		if (np->msi_flags & NV_MSI_ENABLED) {
4061 			pci_disable_msi(np->pci_dev);
4062 			np->msi_flags &= ~NV_MSI_ENABLED;
4063 		}
4064 	}
4065 }
4066 
4067 static void nv_do_nic_poll(struct timer_list *t)
4068 {
4069 	struct fe_priv *np = from_timer(np, t, nic_poll);
4070 	struct net_device *dev = np->dev;
4071 	u8 __iomem *base = get_hwbase(dev);
4072 	u32 mask = 0;
4073 	unsigned long flags;
4074 	unsigned int irq = 0;
4075 
4076 	/*
4077 	 * First disable irq(s) and then
4078 	 * reenable interrupts on the nic, we have to do this before calling
4079 	 * nv_nic_irq because that may decide to do otherwise
4080 	 */
4081 
4082 	if (!using_multi_irqs(dev)) {
4083 		if (np->msi_flags & NV_MSI_X_ENABLED)
4084 			irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector;
4085 		else
4086 			irq = np->pci_dev->irq;
4087 		mask = np->irqmask;
4088 	} else {
4089 		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4090 			irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector;
4091 			mask |= NVREG_IRQ_RX_ALL;
4092 		}
4093 		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4094 			irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector;
4095 			mask |= NVREG_IRQ_TX_ALL;
4096 		}
4097 		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4098 			irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector;
4099 			mask |= NVREG_IRQ_OTHER;
4100 		}
4101 	}
4102 
4103 	disable_irq_nosync_lockdep_irqsave(irq, &flags);
4104 	synchronize_irq(irq);
4105 
4106 	if (np->recover_error) {
4107 		np->recover_error = 0;
4108 		netdev_info(dev, "MAC in recoverable error state\n");
4109 		if (netif_running(dev)) {
4110 			netif_tx_lock_bh(dev);
4111 			netif_addr_lock(dev);
4112 			spin_lock(&np->lock);
4113 			/* stop engines */
4114 			nv_stop_rxtx(dev);
4115 			if (np->driver_data & DEV_HAS_POWER_CNTRL)
4116 				nv_mac_reset(dev);
4117 			nv_txrx_reset(dev);
4118 			/* drain rx queue */
4119 			nv_drain_rxtx(dev);
4120 			/* reinit driver view of the rx queue */
4121 			set_bufsize(dev);
4122 			if (nv_init_ring(dev)) {
4123 				if (!np->in_shutdown)
4124 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4125 			}
4126 			/* reinit nic view of the rx queue */
4127 			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4128 			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4129 			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4130 				base + NvRegRingSizes);
4131 			pci_push(base);
4132 			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4133 			pci_push(base);
4134 			/* clear interrupts */
4135 			if (!(np->msi_flags & NV_MSI_X_ENABLED))
4136 				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4137 			else
4138 				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4139 
4140 			/* restart rx engine */
4141 			nv_start_rxtx(dev);
4142 			spin_unlock(&np->lock);
4143 			netif_addr_unlock(dev);
4144 			netif_tx_unlock_bh(dev);
4145 		}
4146 	}
4147 
4148 	writel(mask, base + NvRegIrqMask);
4149 	pci_push(base);
4150 
4151 	if (!using_multi_irqs(dev)) {
4152 		np->nic_poll_irq = 0;
4153 		if (nv_optimized(np))
4154 			nv_nic_irq_optimized(0, dev);
4155 		else
4156 			nv_nic_irq(0, dev);
4157 	} else {
4158 		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4159 			np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
4160 			nv_nic_irq_rx(0, dev);
4161 		}
4162 		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4163 			np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
4164 			nv_nic_irq_tx(0, dev);
4165 		}
4166 		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4167 			np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
4168 			nv_nic_irq_other(0, dev);
4169 		}
4170 	}
4171 
4172 	enable_irq_lockdep_irqrestore(irq, &flags);
4173 }
4174 
4175 #ifdef CONFIG_NET_POLL_CONTROLLER
4176 static void nv_poll_controller(struct net_device *dev)
4177 {
4178 	struct fe_priv *np = netdev_priv(dev);
4179 
4180 	nv_do_nic_poll(&np->nic_poll);
4181 }
4182 #endif
4183 
4184 static void nv_do_stats_poll(struct timer_list *t)
4185 	__acquires(&netdev_priv(dev)->hwstats_lock)
4186 	__releases(&netdev_priv(dev)->hwstats_lock)
4187 {
4188 	struct fe_priv *np = from_timer(np, t, stats_poll);
4189 	struct net_device *dev = np->dev;
4190 
4191 	/* If lock is currently taken, the stats are being refreshed
4192 	 * and hence fresh enough */
4193 	if (spin_trylock(&np->hwstats_lock)) {
4194 		nv_update_stats(dev);
4195 		spin_unlock(&np->hwstats_lock);
4196 	}
4197 
4198 	if (!np->in_shutdown)
4199 		mod_timer(&np->stats_poll,
4200 			round_jiffies(jiffies + STATS_INTERVAL));
4201 }
4202 
4203 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4204 {
4205 	struct fe_priv *np = netdev_priv(dev);
4206 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
4207 	strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
4208 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
4209 }
4210 
4211 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4212 {
4213 	struct fe_priv *np = netdev_priv(dev);
4214 	wolinfo->supported = WAKE_MAGIC;
4215 
4216 	spin_lock_irq(&np->lock);
4217 	if (np->wolenabled)
4218 		wolinfo->wolopts = WAKE_MAGIC;
4219 	spin_unlock_irq(&np->lock);
4220 }
4221 
4222 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4223 {
4224 	struct fe_priv *np = netdev_priv(dev);
4225 	u8 __iomem *base = get_hwbase(dev);
4226 	u32 flags = 0;
4227 
4228 	if (wolinfo->wolopts == 0) {
4229 		np->wolenabled = 0;
4230 	} else if (wolinfo->wolopts & WAKE_MAGIC) {
4231 		np->wolenabled = 1;
4232 		flags = NVREG_WAKEUPFLAGS_ENABLE;
4233 	}
4234 	if (netif_running(dev)) {
4235 		spin_lock_irq(&np->lock);
4236 		writel(flags, base + NvRegWakeUpFlags);
4237 		spin_unlock_irq(&np->lock);
4238 	}
4239 	device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
4240 	return 0;
4241 }
4242 
4243 static int nv_get_link_ksettings(struct net_device *dev,
4244 				 struct ethtool_link_ksettings *cmd)
4245 {
4246 	struct fe_priv *np = netdev_priv(dev);
4247 	u32 speed, supported, advertising;
4248 	int adv;
4249 
4250 	spin_lock_irq(&np->lock);
4251 	cmd->base.port = PORT_MII;
4252 	if (!netif_running(dev)) {
4253 		/* We do not track link speed / duplex setting if the
4254 		 * interface is disabled. Force a link check */
4255 		if (nv_update_linkspeed(dev)) {
4256 			netif_carrier_on(dev);
4257 		} else {
4258 			netif_carrier_off(dev);
4259 		}
4260 	}
4261 
4262 	if (netif_carrier_ok(dev)) {
4263 		switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4264 		case NVREG_LINKSPEED_10:
4265 			speed = SPEED_10;
4266 			break;
4267 		case NVREG_LINKSPEED_100:
4268 			speed = SPEED_100;
4269 			break;
4270 		case NVREG_LINKSPEED_1000:
4271 			speed = SPEED_1000;
4272 			break;
4273 		default:
4274 			speed = -1;
4275 			break;
4276 		}
4277 		cmd->base.duplex = DUPLEX_HALF;
4278 		if (np->duplex)
4279 			cmd->base.duplex = DUPLEX_FULL;
4280 	} else {
4281 		speed = SPEED_UNKNOWN;
4282 		cmd->base.duplex = DUPLEX_UNKNOWN;
4283 	}
4284 	cmd->base.speed = speed;
4285 	cmd->base.autoneg = np->autoneg;
4286 
4287 	advertising = ADVERTISED_MII;
4288 	if (np->autoneg) {
4289 		advertising |= ADVERTISED_Autoneg;
4290 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4291 		if (adv & ADVERTISE_10HALF)
4292 			advertising |= ADVERTISED_10baseT_Half;
4293 		if (adv & ADVERTISE_10FULL)
4294 			advertising |= ADVERTISED_10baseT_Full;
4295 		if (adv & ADVERTISE_100HALF)
4296 			advertising |= ADVERTISED_100baseT_Half;
4297 		if (adv & ADVERTISE_100FULL)
4298 			advertising |= ADVERTISED_100baseT_Full;
4299 		if (np->gigabit == PHY_GIGABIT) {
4300 			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4301 			if (adv & ADVERTISE_1000FULL)
4302 				advertising |= ADVERTISED_1000baseT_Full;
4303 		}
4304 	}
4305 	supported = (SUPPORTED_Autoneg |
4306 		SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4307 		SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4308 		SUPPORTED_MII);
4309 	if (np->gigabit == PHY_GIGABIT)
4310 		supported |= SUPPORTED_1000baseT_Full;
4311 
4312 	cmd->base.phy_address = np->phyaddr;
4313 
4314 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
4315 						supported);
4316 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
4317 						advertising);
4318 
4319 	/* ignore maxtxpkt, maxrxpkt for now */
4320 	spin_unlock_irq(&np->lock);
4321 	return 0;
4322 }
4323 
4324 static int nv_set_link_ksettings(struct net_device *dev,
4325 				 const struct ethtool_link_ksettings *cmd)
4326 {
4327 	struct fe_priv *np = netdev_priv(dev);
4328 	u32 speed = cmd->base.speed;
4329 	u32 advertising;
4330 
4331 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
4332 						cmd->link_modes.advertising);
4333 
4334 	if (cmd->base.port != PORT_MII)
4335 		return -EINVAL;
4336 	if (cmd->base.phy_address != np->phyaddr) {
4337 		/* TODO: support switching between multiple phys. Should be
4338 		 * trivial, but not enabled due to lack of test hardware. */
4339 		return -EINVAL;
4340 	}
4341 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
4342 		u32 mask;
4343 
4344 		mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4345 			  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4346 		if (np->gigabit == PHY_GIGABIT)
4347 			mask |= ADVERTISED_1000baseT_Full;
4348 
4349 		if ((advertising & mask) == 0)
4350 			return -EINVAL;
4351 
4352 	} else if (cmd->base.autoneg == AUTONEG_DISABLE) {
4353 		/* Note: autonegotiation disable, speed 1000 intentionally
4354 		 * forbidden - no one should need that. */
4355 
4356 		if (speed != SPEED_10 && speed != SPEED_100)
4357 			return -EINVAL;
4358 		if (cmd->base.duplex != DUPLEX_HALF &&
4359 		    cmd->base.duplex != DUPLEX_FULL)
4360 			return -EINVAL;
4361 	} else {
4362 		return -EINVAL;
4363 	}
4364 
4365 	netif_carrier_off(dev);
4366 	if (netif_running(dev)) {
4367 		unsigned long flags;
4368 
4369 		nv_disable_irq(dev);
4370 		netif_tx_lock_bh(dev);
4371 		netif_addr_lock(dev);
4372 		/* with plain spinlock lockdep complains */
4373 		spin_lock_irqsave(&np->lock, flags);
4374 		/* stop engines */
4375 		/* FIXME:
4376 		 * this can take some time, and interrupts are disabled
4377 		 * due to spin_lock_irqsave, but let's hope no daemon
4378 		 * is going to change the settings very often...
4379 		 * Worst case:
4380 		 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4381 		 * + some minor delays, which is up to a second approximately
4382 		 */
4383 		nv_stop_rxtx(dev);
4384 		spin_unlock_irqrestore(&np->lock, flags);
4385 		netif_addr_unlock(dev);
4386 		netif_tx_unlock_bh(dev);
4387 	}
4388 
4389 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
4390 		int adv, bmcr;
4391 
4392 		np->autoneg = 1;
4393 
4394 		/* advertise only what has been requested */
4395 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4396 		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4397 		if (advertising & ADVERTISED_10baseT_Half)
4398 			adv |= ADVERTISE_10HALF;
4399 		if (advertising & ADVERTISED_10baseT_Full)
4400 			adv |= ADVERTISE_10FULL;
4401 		if (advertising & ADVERTISED_100baseT_Half)
4402 			adv |= ADVERTISE_100HALF;
4403 		if (advertising & ADVERTISED_100baseT_Full)
4404 			adv |= ADVERTISE_100FULL;
4405 		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)  /* for rx we set both advertisements but disable tx pause */
4406 			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4407 		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4408 			adv |=  ADVERTISE_PAUSE_ASYM;
4409 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4410 
4411 		if (np->gigabit == PHY_GIGABIT) {
4412 			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4413 			adv &= ~ADVERTISE_1000FULL;
4414 			if (advertising & ADVERTISED_1000baseT_Full)
4415 				adv |= ADVERTISE_1000FULL;
4416 			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4417 		}
4418 
4419 		if (netif_running(dev))
4420 			netdev_info(dev, "link down\n");
4421 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4422 		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4423 			bmcr |= BMCR_ANENABLE;
4424 			/* reset the phy in order for settings to stick,
4425 			 * and cause autoneg to start */
4426 			if (phy_reset(dev, bmcr)) {
4427 				netdev_info(dev, "phy reset failed\n");
4428 				return -EINVAL;
4429 			}
4430 		} else {
4431 			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4432 			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4433 		}
4434 	} else {
4435 		int adv, bmcr;
4436 
4437 		np->autoneg = 0;
4438 
4439 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4440 		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4441 		if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_HALF)
4442 			adv |= ADVERTISE_10HALF;
4443 		if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_FULL)
4444 			adv |= ADVERTISE_10FULL;
4445 		if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_HALF)
4446 			adv |= ADVERTISE_100HALF;
4447 		if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_FULL)
4448 			adv |= ADVERTISE_100FULL;
4449 		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4450 		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
4451 			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4452 			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4453 		}
4454 		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4455 			adv |=  ADVERTISE_PAUSE_ASYM;
4456 			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4457 		}
4458 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4459 		np->fixed_mode = adv;
4460 
4461 		if (np->gigabit == PHY_GIGABIT) {
4462 			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4463 			adv &= ~ADVERTISE_1000FULL;
4464 			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4465 		}
4466 
4467 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4468 		bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4469 		if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4470 			bmcr |= BMCR_FULLDPLX;
4471 		if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4472 			bmcr |= BMCR_SPEED100;
4473 		if (np->phy_oui == PHY_OUI_MARVELL) {
4474 			/* reset the phy in order for forced mode settings to stick */
4475 			if (phy_reset(dev, bmcr)) {
4476 				netdev_info(dev, "phy reset failed\n");
4477 				return -EINVAL;
4478 			}
4479 		} else {
4480 			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4481 			if (netif_running(dev)) {
4482 				/* Wait a bit and then reconfigure the nic. */
4483 				udelay(10);
4484 				nv_linkchange(dev);
4485 			}
4486 		}
4487 	}
4488 
4489 	if (netif_running(dev)) {
4490 		nv_start_rxtx(dev);
4491 		nv_enable_irq(dev);
4492 	}
4493 
4494 	return 0;
4495 }
4496 
4497 #define FORCEDETH_REGS_VER	1
4498 
4499 static int nv_get_regs_len(struct net_device *dev)
4500 {
4501 	struct fe_priv *np = netdev_priv(dev);
4502 	return np->register_size;
4503 }
4504 
4505 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4506 {
4507 	struct fe_priv *np = netdev_priv(dev);
4508 	u8 __iomem *base = get_hwbase(dev);
4509 	u32 *rbuf = buf;
4510 	int i;
4511 
4512 	regs->version = FORCEDETH_REGS_VER;
4513 	spin_lock_irq(&np->lock);
4514 	for (i = 0; i < np->register_size/sizeof(u32); i++)
4515 		rbuf[i] = readl(base + i*sizeof(u32));
4516 	spin_unlock_irq(&np->lock);
4517 }
4518 
4519 static int nv_nway_reset(struct net_device *dev)
4520 {
4521 	struct fe_priv *np = netdev_priv(dev);
4522 	int ret;
4523 
4524 	if (np->autoneg) {
4525 		int bmcr;
4526 
4527 		netif_carrier_off(dev);
4528 		if (netif_running(dev)) {
4529 			nv_disable_irq(dev);
4530 			netif_tx_lock_bh(dev);
4531 			netif_addr_lock(dev);
4532 			spin_lock(&np->lock);
4533 			/* stop engines */
4534 			nv_stop_rxtx(dev);
4535 			spin_unlock(&np->lock);
4536 			netif_addr_unlock(dev);
4537 			netif_tx_unlock_bh(dev);
4538 			netdev_info(dev, "link down\n");
4539 		}
4540 
4541 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4542 		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4543 			bmcr |= BMCR_ANENABLE;
4544 			/* reset the phy in order for settings to stick*/
4545 			if (phy_reset(dev, bmcr)) {
4546 				netdev_info(dev, "phy reset failed\n");
4547 				return -EINVAL;
4548 			}
4549 		} else {
4550 			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4551 			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4552 		}
4553 
4554 		if (netif_running(dev)) {
4555 			nv_start_rxtx(dev);
4556 			nv_enable_irq(dev);
4557 		}
4558 		ret = 0;
4559 	} else {
4560 		ret = -EINVAL;
4561 	}
4562 
4563 	return ret;
4564 }
4565 
4566 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4567 {
4568 	struct fe_priv *np = netdev_priv(dev);
4569 
4570 	ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4571 	ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4572 
4573 	ring->rx_pending = np->rx_ring_size;
4574 	ring->tx_pending = np->tx_ring_size;
4575 }
4576 
4577 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4578 {
4579 	struct fe_priv *np = netdev_priv(dev);
4580 	u8 __iomem *base = get_hwbase(dev);
4581 	u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4582 	dma_addr_t ring_addr;
4583 
4584 	if (ring->rx_pending < RX_RING_MIN ||
4585 	    ring->tx_pending < TX_RING_MIN ||
4586 	    ring->rx_mini_pending != 0 ||
4587 	    ring->rx_jumbo_pending != 0 ||
4588 	    (np->desc_ver == DESC_VER_1 &&
4589 	     (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4590 	      ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4591 	    (np->desc_ver != DESC_VER_1 &&
4592 	     (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4593 	      ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4594 		return -EINVAL;
4595 	}
4596 
4597 	/* allocate new rings */
4598 	if (!nv_optimized(np)) {
4599 		rxtx_ring = pci_alloc_consistent(np->pci_dev,
4600 					    sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4601 					    &ring_addr);
4602 	} else {
4603 		rxtx_ring = pci_alloc_consistent(np->pci_dev,
4604 					    sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4605 					    &ring_addr);
4606 	}
4607 	rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4608 	tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4609 	if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4610 		/* fall back to old rings */
4611 		if (!nv_optimized(np)) {
4612 			if (rxtx_ring)
4613 				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4614 						    rxtx_ring, ring_addr);
4615 		} else {
4616 			if (rxtx_ring)
4617 				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4618 						    rxtx_ring, ring_addr);
4619 		}
4620 
4621 		kfree(rx_skbuff);
4622 		kfree(tx_skbuff);
4623 		goto exit;
4624 	}
4625 
4626 	if (netif_running(dev)) {
4627 		nv_disable_irq(dev);
4628 		nv_napi_disable(dev);
4629 		netif_tx_lock_bh(dev);
4630 		netif_addr_lock(dev);
4631 		spin_lock(&np->lock);
4632 		/* stop engines */
4633 		nv_stop_rxtx(dev);
4634 		nv_txrx_reset(dev);
4635 		/* drain queues */
4636 		nv_drain_rxtx(dev);
4637 		/* delete queues */
4638 		free_rings(dev);
4639 	}
4640 
4641 	/* set new values */
4642 	np->rx_ring_size = ring->rx_pending;
4643 	np->tx_ring_size = ring->tx_pending;
4644 
4645 	if (!nv_optimized(np)) {
4646 		np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4647 		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4648 	} else {
4649 		np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4650 		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4651 	}
4652 	np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4653 	np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4654 	np->ring_addr = ring_addr;
4655 
4656 	memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4657 	memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4658 
4659 	if (netif_running(dev)) {
4660 		/* reinit driver view of the queues */
4661 		set_bufsize(dev);
4662 		if (nv_init_ring(dev)) {
4663 			if (!np->in_shutdown)
4664 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4665 		}
4666 
4667 		/* reinit nic view of the queues */
4668 		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4669 		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4670 		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4671 			base + NvRegRingSizes);
4672 		pci_push(base);
4673 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4674 		pci_push(base);
4675 
4676 		/* restart engines */
4677 		nv_start_rxtx(dev);
4678 		spin_unlock(&np->lock);
4679 		netif_addr_unlock(dev);
4680 		netif_tx_unlock_bh(dev);
4681 		nv_napi_enable(dev);
4682 		nv_enable_irq(dev);
4683 	}
4684 	return 0;
4685 exit:
4686 	return -ENOMEM;
4687 }
4688 
4689 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4690 {
4691 	struct fe_priv *np = netdev_priv(dev);
4692 
4693 	pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4694 	pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4695 	pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4696 }
4697 
4698 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4699 {
4700 	struct fe_priv *np = netdev_priv(dev);
4701 	int adv, bmcr;
4702 
4703 	if ((!np->autoneg && np->duplex == 0) ||
4704 	    (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4705 		netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
4706 		return -EINVAL;
4707 	}
4708 	if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4709 		netdev_info(dev, "hardware does not support tx pause frames\n");
4710 		return -EINVAL;
4711 	}
4712 
4713 	netif_carrier_off(dev);
4714 	if (netif_running(dev)) {
4715 		nv_disable_irq(dev);
4716 		netif_tx_lock_bh(dev);
4717 		netif_addr_lock(dev);
4718 		spin_lock(&np->lock);
4719 		/* stop engines */
4720 		nv_stop_rxtx(dev);
4721 		spin_unlock(&np->lock);
4722 		netif_addr_unlock(dev);
4723 		netif_tx_unlock_bh(dev);
4724 	}
4725 
4726 	np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4727 	if (pause->rx_pause)
4728 		np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4729 	if (pause->tx_pause)
4730 		np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4731 
4732 	if (np->autoneg && pause->autoneg) {
4733 		np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4734 
4735 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4736 		adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4737 		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
4738 			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4739 		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4740 			adv |=  ADVERTISE_PAUSE_ASYM;
4741 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4742 
4743 		if (netif_running(dev))
4744 			netdev_info(dev, "link down\n");
4745 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4746 		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4747 		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4748 	} else {
4749 		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4750 		if (pause->rx_pause)
4751 			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4752 		if (pause->tx_pause)
4753 			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4754 
4755 		if (!netif_running(dev))
4756 			nv_update_linkspeed(dev);
4757 		else
4758 			nv_update_pause(dev, np->pause_flags);
4759 	}
4760 
4761 	if (netif_running(dev)) {
4762 		nv_start_rxtx(dev);
4763 		nv_enable_irq(dev);
4764 	}
4765 	return 0;
4766 }
4767 
4768 static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
4769 {
4770 	struct fe_priv *np = netdev_priv(dev);
4771 	unsigned long flags;
4772 	u32 miicontrol;
4773 	int err, retval = 0;
4774 
4775 	spin_lock_irqsave(&np->lock, flags);
4776 	miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4777 	if (features & NETIF_F_LOOPBACK) {
4778 		if (miicontrol & BMCR_LOOPBACK) {
4779 			spin_unlock_irqrestore(&np->lock, flags);
4780 			netdev_info(dev, "Loopback already enabled\n");
4781 			return 0;
4782 		}
4783 		nv_disable_irq(dev);
4784 		/* Turn on loopback mode */
4785 		miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
4786 		err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
4787 		if (err) {
4788 			retval = PHY_ERROR;
4789 			spin_unlock_irqrestore(&np->lock, flags);
4790 			phy_init(dev);
4791 		} else {
4792 			if (netif_running(dev)) {
4793 				/* Force 1000 Mbps full-duplex */
4794 				nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
4795 									 1);
4796 				/* Force link up */
4797 				netif_carrier_on(dev);
4798 			}
4799 			spin_unlock_irqrestore(&np->lock, flags);
4800 			netdev_info(dev,
4801 				"Internal PHY loopback mode enabled.\n");
4802 		}
4803 	} else {
4804 		if (!(miicontrol & BMCR_LOOPBACK)) {
4805 			spin_unlock_irqrestore(&np->lock, flags);
4806 			netdev_info(dev, "Loopback already disabled\n");
4807 			return 0;
4808 		}
4809 		nv_disable_irq(dev);
4810 		/* Turn off loopback */
4811 		spin_unlock_irqrestore(&np->lock, flags);
4812 		netdev_info(dev, "Internal PHY loopback mode disabled.\n");
4813 		phy_init(dev);
4814 	}
4815 	msleep(500);
4816 	spin_lock_irqsave(&np->lock, flags);
4817 	nv_enable_irq(dev);
4818 	spin_unlock_irqrestore(&np->lock, flags);
4819 
4820 	return retval;
4821 }
4822 
4823 static netdev_features_t nv_fix_features(struct net_device *dev,
4824 	netdev_features_t features)
4825 {
4826 	/* vlan is dependent on rx checksum offload */
4827 	if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
4828 		features |= NETIF_F_RXCSUM;
4829 
4830 	return features;
4831 }
4832 
4833 static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
4834 {
4835 	struct fe_priv *np = get_nvpriv(dev);
4836 
4837 	spin_lock_irq(&np->lock);
4838 
4839 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
4840 		np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
4841 	else
4842 		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4843 
4844 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
4845 		np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
4846 	else
4847 		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4848 
4849 	writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4850 
4851 	spin_unlock_irq(&np->lock);
4852 }
4853 
4854 static int nv_set_features(struct net_device *dev, netdev_features_t features)
4855 {
4856 	struct fe_priv *np = netdev_priv(dev);
4857 	u8 __iomem *base = get_hwbase(dev);
4858 	netdev_features_t changed = dev->features ^ features;
4859 	int retval;
4860 
4861 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
4862 		retval = nv_set_loopback(dev, features);
4863 		if (retval != 0)
4864 			return retval;
4865 	}
4866 
4867 	if (changed & NETIF_F_RXCSUM) {
4868 		spin_lock_irq(&np->lock);
4869 
4870 		if (features & NETIF_F_RXCSUM)
4871 			np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4872 		else
4873 			np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4874 
4875 		if (netif_running(dev))
4876 			writel(np->txrxctl_bits, base + NvRegTxRxControl);
4877 
4878 		spin_unlock_irq(&np->lock);
4879 	}
4880 
4881 	if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))
4882 		nv_vlan_mode(dev, features);
4883 
4884 	return 0;
4885 }
4886 
4887 static int nv_get_sset_count(struct net_device *dev, int sset)
4888 {
4889 	struct fe_priv *np = netdev_priv(dev);
4890 
4891 	switch (sset) {
4892 	case ETH_SS_TEST:
4893 		if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4894 			return NV_TEST_COUNT_EXTENDED;
4895 		else
4896 			return NV_TEST_COUNT_BASE;
4897 	case ETH_SS_STATS:
4898 		if (np->driver_data & DEV_HAS_STATISTICS_V3)
4899 			return NV_DEV_STATISTICS_V3_COUNT;
4900 		else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4901 			return NV_DEV_STATISTICS_V2_COUNT;
4902 		else if (np->driver_data & DEV_HAS_STATISTICS_V1)
4903 			return NV_DEV_STATISTICS_V1_COUNT;
4904 		else
4905 			return 0;
4906 	default:
4907 		return -EOPNOTSUPP;
4908 	}
4909 }
4910 
4911 static void nv_get_ethtool_stats(struct net_device *dev,
4912 				 struct ethtool_stats *estats, u64 *buffer)
4913 	__acquires(&netdev_priv(dev)->hwstats_lock)
4914 	__releases(&netdev_priv(dev)->hwstats_lock)
4915 {
4916 	struct fe_priv *np = netdev_priv(dev);
4917 
4918 	spin_lock_bh(&np->hwstats_lock);
4919 	nv_update_stats(dev);
4920 	memcpy(buffer, &np->estats,
4921 	       nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4922 	spin_unlock_bh(&np->hwstats_lock);
4923 }
4924 
4925 static int nv_link_test(struct net_device *dev)
4926 {
4927 	struct fe_priv *np = netdev_priv(dev);
4928 	int mii_status;
4929 
4930 	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4931 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4932 
4933 	/* check phy link status */
4934 	if (!(mii_status & BMSR_LSTATUS))
4935 		return 0;
4936 	else
4937 		return 1;
4938 }
4939 
4940 static int nv_register_test(struct net_device *dev)
4941 {
4942 	u8 __iomem *base = get_hwbase(dev);
4943 	int i = 0;
4944 	u32 orig_read, new_read;
4945 
4946 	do {
4947 		orig_read = readl(base + nv_registers_test[i].reg);
4948 
4949 		/* xor with mask to toggle bits */
4950 		orig_read ^= nv_registers_test[i].mask;
4951 
4952 		writel(orig_read, base + nv_registers_test[i].reg);
4953 
4954 		new_read = readl(base + nv_registers_test[i].reg);
4955 
4956 		if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4957 			return 0;
4958 
4959 		/* restore original value */
4960 		orig_read ^= nv_registers_test[i].mask;
4961 		writel(orig_read, base + nv_registers_test[i].reg);
4962 
4963 	} while (nv_registers_test[++i].reg != 0);
4964 
4965 	return 1;
4966 }
4967 
4968 static int nv_interrupt_test(struct net_device *dev)
4969 {
4970 	struct fe_priv *np = netdev_priv(dev);
4971 	u8 __iomem *base = get_hwbase(dev);
4972 	int ret = 1;
4973 	int testcnt;
4974 	u32 save_msi_flags, save_poll_interval = 0;
4975 
4976 	if (netif_running(dev)) {
4977 		/* free current irq */
4978 		nv_free_irq(dev);
4979 		save_poll_interval = readl(base+NvRegPollingInterval);
4980 	}
4981 
4982 	/* flag to test interrupt handler */
4983 	np->intr_test = 0;
4984 
4985 	/* setup test irq */
4986 	save_msi_flags = np->msi_flags;
4987 	np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4988 	np->msi_flags |= 0x001; /* setup 1 vector */
4989 	if (nv_request_irq(dev, 1))
4990 		return 0;
4991 
4992 	/* setup timer interrupt */
4993 	writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4994 	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4995 
4996 	nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4997 
4998 	/* wait for at least one interrupt */
4999 	msleep(100);
5000 
5001 	spin_lock_irq(&np->lock);
5002 
5003 	/* flag should be set within ISR */
5004 	testcnt = np->intr_test;
5005 	if (!testcnt)
5006 		ret = 2;
5007 
5008 	nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
5009 	if (!(np->msi_flags & NV_MSI_X_ENABLED))
5010 		writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5011 	else
5012 		writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5013 
5014 	spin_unlock_irq(&np->lock);
5015 
5016 	nv_free_irq(dev);
5017 
5018 	np->msi_flags = save_msi_flags;
5019 
5020 	if (netif_running(dev)) {
5021 		writel(save_poll_interval, base + NvRegPollingInterval);
5022 		writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5023 		/* restore original irq */
5024 		if (nv_request_irq(dev, 0))
5025 			return 0;
5026 	}
5027 
5028 	return ret;
5029 }
5030 
5031 static int nv_loopback_test(struct net_device *dev)
5032 {
5033 	struct fe_priv *np = netdev_priv(dev);
5034 	u8 __iomem *base = get_hwbase(dev);
5035 	struct sk_buff *tx_skb, *rx_skb;
5036 	dma_addr_t test_dma_addr;
5037 	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
5038 	u32 flags;
5039 	int len, i, pkt_len;
5040 	u8 *pkt_data;
5041 	u32 filter_flags = 0;
5042 	u32 misc1_flags = 0;
5043 	int ret = 1;
5044 
5045 	if (netif_running(dev)) {
5046 		nv_disable_irq(dev);
5047 		filter_flags = readl(base + NvRegPacketFilterFlags);
5048 		misc1_flags = readl(base + NvRegMisc1);
5049 	} else {
5050 		nv_txrx_reset(dev);
5051 	}
5052 
5053 	/* reinit driver view of the rx queue */
5054 	set_bufsize(dev);
5055 	nv_init_ring(dev);
5056 
5057 	/* setup hardware for loopback */
5058 	writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
5059 	writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
5060 
5061 	/* reinit nic view of the rx queue */
5062 	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5063 	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5064 	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5065 		base + NvRegRingSizes);
5066 	pci_push(base);
5067 
5068 	/* restart rx engine */
5069 	nv_start_rxtx(dev);
5070 
5071 	/* setup packet for tx */
5072 	pkt_len = ETH_DATA_LEN;
5073 	tx_skb = netdev_alloc_skb(dev, pkt_len);
5074 	if (!tx_skb) {
5075 		ret = 0;
5076 		goto out;
5077 	}
5078 	test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data,
5079 				       skb_tailroom(tx_skb),
5080 				       DMA_FROM_DEVICE);
5081 	if (unlikely(dma_mapping_error(&np->pci_dev->dev,
5082 				       test_dma_addr))) {
5083 		dev_kfree_skb_any(tx_skb);
5084 		goto out;
5085 	}
5086 	pkt_data = skb_put(tx_skb, pkt_len);
5087 	for (i = 0; i < pkt_len; i++)
5088 		pkt_data[i] = (u8)(i & 0xff);
5089 
5090 	if (!nv_optimized(np)) {
5091 		np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
5092 		np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5093 	} else {
5094 		np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
5095 		np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
5096 		np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5097 	}
5098 	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5099 	pci_push(get_hwbase(dev));
5100 
5101 	msleep(500);
5102 
5103 	/* check for rx of the packet */
5104 	if (!nv_optimized(np)) {
5105 		flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
5106 		len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
5107 
5108 	} else {
5109 		flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
5110 		len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
5111 	}
5112 
5113 	if (flags & NV_RX_AVAIL) {
5114 		ret = 0;
5115 	} else if (np->desc_ver == DESC_VER_1) {
5116 		if (flags & NV_RX_ERROR)
5117 			ret = 0;
5118 	} else {
5119 		if (flags & NV_RX2_ERROR)
5120 			ret = 0;
5121 	}
5122 
5123 	if (ret) {
5124 		if (len != pkt_len) {
5125 			ret = 0;
5126 		} else {
5127 			rx_skb = np->rx_skb[0].skb;
5128 			for (i = 0; i < pkt_len; i++) {
5129 				if (rx_skb->data[i] != (u8)(i & 0xff)) {
5130 					ret = 0;
5131 					break;
5132 				}
5133 			}
5134 		}
5135 	}
5136 
5137 	dma_unmap_single(&np->pci_dev->dev, test_dma_addr,
5138 			 (skb_end_pointer(tx_skb) - tx_skb->data),
5139 			 DMA_TO_DEVICE);
5140 	dev_kfree_skb_any(tx_skb);
5141  out:
5142 	/* stop engines */
5143 	nv_stop_rxtx(dev);
5144 	nv_txrx_reset(dev);
5145 	/* drain rx queue */
5146 	nv_drain_rxtx(dev);
5147 
5148 	if (netif_running(dev)) {
5149 		writel(misc1_flags, base + NvRegMisc1);
5150 		writel(filter_flags, base + NvRegPacketFilterFlags);
5151 		nv_enable_irq(dev);
5152 	}
5153 
5154 	return ret;
5155 }
5156 
5157 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
5158 {
5159 	struct fe_priv *np = netdev_priv(dev);
5160 	u8 __iomem *base = get_hwbase(dev);
5161 	int result, count;
5162 
5163 	count = nv_get_sset_count(dev, ETH_SS_TEST);
5164 	memset(buffer, 0, count * sizeof(u64));
5165 
5166 	if (!nv_link_test(dev)) {
5167 		test->flags |= ETH_TEST_FL_FAILED;
5168 		buffer[0] = 1;
5169 	}
5170 
5171 	if (test->flags & ETH_TEST_FL_OFFLINE) {
5172 		if (netif_running(dev)) {
5173 			netif_stop_queue(dev);
5174 			nv_napi_disable(dev);
5175 			netif_tx_lock_bh(dev);
5176 			netif_addr_lock(dev);
5177 			spin_lock_irq(&np->lock);
5178 			nv_disable_hw_interrupts(dev, np->irqmask);
5179 			if (!(np->msi_flags & NV_MSI_X_ENABLED))
5180 				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5181 			else
5182 				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5183 			/* stop engines */
5184 			nv_stop_rxtx(dev);
5185 			nv_txrx_reset(dev);
5186 			/* drain rx queue */
5187 			nv_drain_rxtx(dev);
5188 			spin_unlock_irq(&np->lock);
5189 			netif_addr_unlock(dev);
5190 			netif_tx_unlock_bh(dev);
5191 		}
5192 
5193 		if (!nv_register_test(dev)) {
5194 			test->flags |= ETH_TEST_FL_FAILED;
5195 			buffer[1] = 1;
5196 		}
5197 
5198 		result = nv_interrupt_test(dev);
5199 		if (result != 1) {
5200 			test->flags |= ETH_TEST_FL_FAILED;
5201 			buffer[2] = 1;
5202 		}
5203 		if (result == 0) {
5204 			/* bail out */
5205 			return;
5206 		}
5207 
5208 		if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) {
5209 			test->flags |= ETH_TEST_FL_FAILED;
5210 			buffer[3] = 1;
5211 		}
5212 
5213 		if (netif_running(dev)) {
5214 			/* reinit driver view of the rx queue */
5215 			set_bufsize(dev);
5216 			if (nv_init_ring(dev)) {
5217 				if (!np->in_shutdown)
5218 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5219 			}
5220 			/* reinit nic view of the rx queue */
5221 			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5222 			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5223 			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5224 				base + NvRegRingSizes);
5225 			pci_push(base);
5226 			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5227 			pci_push(base);
5228 			/* restart rx engine */
5229 			nv_start_rxtx(dev);
5230 			netif_start_queue(dev);
5231 			nv_napi_enable(dev);
5232 			nv_enable_hw_interrupts(dev, np->irqmask);
5233 		}
5234 	}
5235 }
5236 
5237 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
5238 {
5239 	switch (stringset) {
5240 	case ETH_SS_STATS:
5241 		memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
5242 		break;
5243 	case ETH_SS_TEST:
5244 		memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
5245 		break;
5246 	}
5247 }
5248 
5249 static const struct ethtool_ops ops = {
5250 	.get_drvinfo = nv_get_drvinfo,
5251 	.get_link = ethtool_op_get_link,
5252 	.get_wol = nv_get_wol,
5253 	.set_wol = nv_set_wol,
5254 	.get_regs_len = nv_get_regs_len,
5255 	.get_regs = nv_get_regs,
5256 	.nway_reset = nv_nway_reset,
5257 	.get_ringparam = nv_get_ringparam,
5258 	.set_ringparam = nv_set_ringparam,
5259 	.get_pauseparam = nv_get_pauseparam,
5260 	.set_pauseparam = nv_set_pauseparam,
5261 	.get_strings = nv_get_strings,
5262 	.get_ethtool_stats = nv_get_ethtool_stats,
5263 	.get_sset_count = nv_get_sset_count,
5264 	.self_test = nv_self_test,
5265 	.get_ts_info = ethtool_op_get_ts_info,
5266 	.get_link_ksettings = nv_get_link_ksettings,
5267 	.set_link_ksettings = nv_set_link_ksettings,
5268 };
5269 
5270 /* The mgmt unit and driver use a semaphore to access the phy during init */
5271 static int nv_mgmt_acquire_sema(struct net_device *dev)
5272 {
5273 	struct fe_priv *np = netdev_priv(dev);
5274 	u8 __iomem *base = get_hwbase(dev);
5275 	int i;
5276 	u32 tx_ctrl, mgmt_sema;
5277 
5278 	for (i = 0; i < 10; i++) {
5279 		mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
5280 		if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
5281 			break;
5282 		msleep(500);
5283 	}
5284 
5285 	if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
5286 		return 0;
5287 
5288 	for (i = 0; i < 2; i++) {
5289 		tx_ctrl = readl(base + NvRegTransmitterControl);
5290 		tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
5291 		writel(tx_ctrl, base + NvRegTransmitterControl);
5292 
5293 		/* verify that semaphore was acquired */
5294 		tx_ctrl = readl(base + NvRegTransmitterControl);
5295 		if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
5296 		    ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5297 			np->mgmt_sema = 1;
5298 			return 1;
5299 		} else
5300 			udelay(50);
5301 	}
5302 
5303 	return 0;
5304 }
5305 
5306 static void nv_mgmt_release_sema(struct net_device *dev)
5307 {
5308 	struct fe_priv *np = netdev_priv(dev);
5309 	u8 __iomem *base = get_hwbase(dev);
5310 	u32 tx_ctrl;
5311 
5312 	if (np->driver_data & DEV_HAS_MGMT_UNIT) {
5313 		if (np->mgmt_sema) {
5314 			tx_ctrl = readl(base + NvRegTransmitterControl);
5315 			tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
5316 			writel(tx_ctrl, base + NvRegTransmitterControl);
5317 		}
5318 	}
5319 }
5320 
5321 
5322 static int nv_mgmt_get_version(struct net_device *dev)
5323 {
5324 	struct fe_priv *np = netdev_priv(dev);
5325 	u8 __iomem *base = get_hwbase(dev);
5326 	u32 data_ready = readl(base + NvRegTransmitterControl);
5327 	u32 data_ready2 = 0;
5328 	unsigned long start;
5329 	int ready = 0;
5330 
5331 	writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
5332 	writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
5333 	start = jiffies;
5334 	while (time_before(jiffies, start + 5*HZ)) {
5335 		data_ready2 = readl(base + NvRegTransmitterControl);
5336 		if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
5337 			ready = 1;
5338 			break;
5339 		}
5340 		schedule_timeout_uninterruptible(1);
5341 	}
5342 
5343 	if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
5344 		return 0;
5345 
5346 	np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
5347 
5348 	return 1;
5349 }
5350 
5351 static int nv_open(struct net_device *dev)
5352 {
5353 	struct fe_priv *np = netdev_priv(dev);
5354 	u8 __iomem *base = get_hwbase(dev);
5355 	int ret = 1;
5356 	int oom, i;
5357 	u32 low;
5358 
5359 	/* power up phy */
5360 	mii_rw(dev, np->phyaddr, MII_BMCR,
5361 	       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
5362 
5363 	nv_txrx_gate(dev, false);
5364 	/* erase previous misconfiguration */
5365 	if (np->driver_data & DEV_HAS_POWER_CNTRL)
5366 		nv_mac_reset(dev);
5367 	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5368 	writel(0, base + NvRegMulticastAddrB);
5369 	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5370 	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5371 	writel(0, base + NvRegPacketFilterFlags);
5372 
5373 	writel(0, base + NvRegTransmitterControl);
5374 	writel(0, base + NvRegReceiverControl);
5375 
5376 	writel(0, base + NvRegAdapterControl);
5377 
5378 	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5379 		writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
5380 
5381 	/* initialize descriptor rings */
5382 	set_bufsize(dev);
5383 	oom = nv_init_ring(dev);
5384 
5385 	writel(0, base + NvRegLinkSpeed);
5386 	writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5387 	nv_txrx_reset(dev);
5388 	writel(0, base + NvRegUnknownSetupReg6);
5389 
5390 	np->in_shutdown = 0;
5391 
5392 	/* give hw rings */
5393 	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5394 	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5395 		base + NvRegRingSizes);
5396 
5397 	writel(np->linkspeed, base + NvRegLinkSpeed);
5398 	if (np->desc_ver == DESC_VER_1)
5399 		writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5400 	else
5401 		writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5402 	writel(np->txrxctl_bits, base + NvRegTxRxControl);
5403 	writel(np->vlanctl_bits, base + NvRegVlanControl);
5404 	pci_push(base);
5405 	writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5406 	if (reg_delay(dev, NvRegUnknownSetupReg5,
5407 		      NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5408 		      NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
5409 		netdev_info(dev,
5410 			    "%s: SetupReg5, Bit 31 remained off\n", __func__);
5411 
5412 	writel(0, base + NvRegMIIMask);
5413 	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5414 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5415 
5416 	writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5417 	writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5418 	writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5419 	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5420 
5421 	writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5422 
5423 	get_random_bytes(&low, sizeof(low));
5424 	low &= NVREG_SLOTTIME_MASK;
5425 	if (np->desc_ver == DESC_VER_1) {
5426 		writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5427 	} else {
5428 		if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5429 			/* setup legacy backoff */
5430 			writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5431 		} else {
5432 			writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5433 			nv_gear_backoff_reseed(dev);
5434 		}
5435 	}
5436 	writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5437 	writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5438 	if (poll_interval == -1) {
5439 		if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5440 			writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5441 		else
5442 			writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5443 	} else
5444 		writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5445 	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5446 	writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5447 			base + NvRegAdapterControl);
5448 	writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5449 	writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5450 	if (np->wolenabled)
5451 		writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5452 
5453 	i = readl(base + NvRegPowerState);
5454 	if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
5455 		writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5456 
5457 	pci_push(base);
5458 	udelay(10);
5459 	writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5460 
5461 	nv_disable_hw_interrupts(dev, np->irqmask);
5462 	pci_push(base);
5463 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5464 	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5465 	pci_push(base);
5466 
5467 	if (nv_request_irq(dev, 0))
5468 		goto out_drain;
5469 
5470 	/* ask for interrupts */
5471 	nv_enable_hw_interrupts(dev, np->irqmask);
5472 
5473 	spin_lock_irq(&np->lock);
5474 	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5475 	writel(0, base + NvRegMulticastAddrB);
5476 	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5477 	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5478 	writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5479 	/* One manual link speed update: Interrupts are enabled, future link
5480 	 * speed changes cause interrupts and are handled by nv_link_irq().
5481 	 */
5482 	{
5483 		u32 miistat;
5484 		miistat = readl(base + NvRegMIIStatus);
5485 		writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5486 	}
5487 	/* set linkspeed to invalid value, thus force nv_update_linkspeed
5488 	 * to init hw */
5489 	np->linkspeed = 0;
5490 	ret = nv_update_linkspeed(dev);
5491 	nv_start_rxtx(dev);
5492 	netif_start_queue(dev);
5493 	nv_napi_enable(dev);
5494 
5495 	if (ret) {
5496 		netif_carrier_on(dev);
5497 	} else {
5498 		netdev_info(dev, "no link during initialization\n");
5499 		netif_carrier_off(dev);
5500 	}
5501 	if (oom)
5502 		mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5503 
5504 	/* start statistics timer */
5505 	if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5506 		mod_timer(&np->stats_poll,
5507 			round_jiffies(jiffies + STATS_INTERVAL));
5508 
5509 	spin_unlock_irq(&np->lock);
5510 
5511 	/* If the loopback feature was set while the device was down, make sure
5512 	 * that it's set correctly now.
5513 	 */
5514 	if (dev->features & NETIF_F_LOOPBACK)
5515 		nv_set_loopback(dev, dev->features);
5516 
5517 	return 0;
5518 out_drain:
5519 	nv_drain_rxtx(dev);
5520 	return ret;
5521 }
5522 
5523 static int nv_close(struct net_device *dev)
5524 {
5525 	struct fe_priv *np = netdev_priv(dev);
5526 	u8 __iomem *base;
5527 
5528 	spin_lock_irq(&np->lock);
5529 	np->in_shutdown = 1;
5530 	spin_unlock_irq(&np->lock);
5531 	nv_napi_disable(dev);
5532 	synchronize_irq(np->pci_dev->irq);
5533 
5534 	del_timer_sync(&np->oom_kick);
5535 	del_timer_sync(&np->nic_poll);
5536 	del_timer_sync(&np->stats_poll);
5537 
5538 	netif_stop_queue(dev);
5539 	spin_lock_irq(&np->lock);
5540 	nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
5541 	nv_stop_rxtx(dev);
5542 	nv_txrx_reset(dev);
5543 
5544 	/* disable interrupts on the nic or we will lock up */
5545 	base = get_hwbase(dev);
5546 	nv_disable_hw_interrupts(dev, np->irqmask);
5547 	pci_push(base);
5548 
5549 	spin_unlock_irq(&np->lock);
5550 
5551 	nv_free_irq(dev);
5552 
5553 	nv_drain_rxtx(dev);
5554 
5555 	if (np->wolenabled || !phy_power_down) {
5556 		nv_txrx_gate(dev, false);
5557 		writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5558 		nv_start_rx(dev);
5559 	} else {
5560 		/* power down phy */
5561 		mii_rw(dev, np->phyaddr, MII_BMCR,
5562 		       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
5563 		nv_txrx_gate(dev, true);
5564 	}
5565 
5566 	/* FIXME: power down nic */
5567 
5568 	return 0;
5569 }
5570 
5571 static const struct net_device_ops nv_netdev_ops = {
5572 	.ndo_open		= nv_open,
5573 	.ndo_stop		= nv_close,
5574 	.ndo_get_stats64	= nv_get_stats64,
5575 	.ndo_start_xmit		= nv_start_xmit,
5576 	.ndo_tx_timeout		= nv_tx_timeout,
5577 	.ndo_change_mtu		= nv_change_mtu,
5578 	.ndo_fix_features	= nv_fix_features,
5579 	.ndo_set_features	= nv_set_features,
5580 	.ndo_validate_addr	= eth_validate_addr,
5581 	.ndo_set_mac_address	= nv_set_mac_address,
5582 	.ndo_set_rx_mode	= nv_set_multicast,
5583 #ifdef CONFIG_NET_POLL_CONTROLLER
5584 	.ndo_poll_controller	= nv_poll_controller,
5585 #endif
5586 };
5587 
5588 static const struct net_device_ops nv_netdev_ops_optimized = {
5589 	.ndo_open		= nv_open,
5590 	.ndo_stop		= nv_close,
5591 	.ndo_get_stats64	= nv_get_stats64,
5592 	.ndo_start_xmit		= nv_start_xmit_optimized,
5593 	.ndo_tx_timeout		= nv_tx_timeout,
5594 	.ndo_change_mtu		= nv_change_mtu,
5595 	.ndo_fix_features	= nv_fix_features,
5596 	.ndo_set_features	= nv_set_features,
5597 	.ndo_validate_addr	= eth_validate_addr,
5598 	.ndo_set_mac_address	= nv_set_mac_address,
5599 	.ndo_set_rx_mode	= nv_set_multicast,
5600 #ifdef CONFIG_NET_POLL_CONTROLLER
5601 	.ndo_poll_controller	= nv_poll_controller,
5602 #endif
5603 };
5604 
5605 static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5606 {
5607 	struct net_device *dev;
5608 	struct fe_priv *np;
5609 	unsigned long addr;
5610 	u8 __iomem *base;
5611 	int err, i;
5612 	u32 powerstate, txreg;
5613 	u32 phystate_orig = 0, phystate;
5614 	int phyinitialized = 0;
5615 	static int printed_version;
5616 
5617 	if (!printed_version++)
5618 		pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
5619 			FORCEDETH_VERSION);
5620 
5621 	dev = alloc_etherdev(sizeof(struct fe_priv));
5622 	err = -ENOMEM;
5623 	if (!dev)
5624 		goto out;
5625 
5626 	np = netdev_priv(dev);
5627 	np->dev = dev;
5628 	np->pci_dev = pci_dev;
5629 	spin_lock_init(&np->lock);
5630 	spin_lock_init(&np->hwstats_lock);
5631 	SET_NETDEV_DEV(dev, &pci_dev->dev);
5632 	u64_stats_init(&np->swstats_rx_syncp);
5633 	u64_stats_init(&np->swstats_tx_syncp);
5634 
5635 	timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
5636 	timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
5637 	timer_setup(&np->stats_poll, nv_do_stats_poll, TIMER_DEFERRABLE);
5638 
5639 	err = pci_enable_device(pci_dev);
5640 	if (err)
5641 		goto out_free;
5642 
5643 	pci_set_master(pci_dev);
5644 
5645 	err = pci_request_regions(pci_dev, DRV_NAME);
5646 	if (err < 0)
5647 		goto out_disable;
5648 
5649 	if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5650 		np->register_size = NV_PCI_REGSZ_VER3;
5651 	else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5652 		np->register_size = NV_PCI_REGSZ_VER2;
5653 	else
5654 		np->register_size = NV_PCI_REGSZ_VER1;
5655 
5656 	err = -EINVAL;
5657 	addr = 0;
5658 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5659 		if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5660 				pci_resource_len(pci_dev, i) >= np->register_size) {
5661 			addr = pci_resource_start(pci_dev, i);
5662 			break;
5663 		}
5664 	}
5665 	if (i == DEVICE_COUNT_RESOURCE) {
5666 		dev_info(&pci_dev->dev, "Couldn't find register window\n");
5667 		goto out_relreg;
5668 	}
5669 
5670 	/* copy of driver data */
5671 	np->driver_data = id->driver_data;
5672 	/* copy of device id */
5673 	np->device_id = id->device;
5674 
5675 	/* handle different descriptor versions */
5676 	if (id->driver_data & DEV_HAS_HIGH_DMA) {
5677 		/* packet format 3: supports 40-bit addressing */
5678 		np->desc_ver = DESC_VER_3;
5679 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5680 		if (dma_64bit) {
5681 			if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
5682 				dev_info(&pci_dev->dev,
5683 					 "64-bit DMA failed, using 32-bit addressing\n");
5684 			else
5685 				dev->features |= NETIF_F_HIGHDMA;
5686 			if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
5687 				dev_info(&pci_dev->dev,
5688 					 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5689 			}
5690 		}
5691 	} else if (id->driver_data & DEV_HAS_LARGEDESC) {
5692 		/* packet format 2: supports jumbo frames */
5693 		np->desc_ver = DESC_VER_2;
5694 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5695 	} else {
5696 		/* original packet format */
5697 		np->desc_ver = DESC_VER_1;
5698 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5699 	}
5700 
5701 	np->pkt_limit = NV_PKTLIMIT_1;
5702 	if (id->driver_data & DEV_HAS_LARGEDESC)
5703 		np->pkt_limit = NV_PKTLIMIT_2;
5704 
5705 	if (id->driver_data & DEV_HAS_CHECKSUM) {
5706 		np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5707 		dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
5708 			NETIF_F_TSO | NETIF_F_RXCSUM;
5709 	}
5710 
5711 	np->vlanctl_bits = 0;
5712 	if (id->driver_data & DEV_HAS_VLAN) {
5713 		np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5714 		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
5715 				    NETIF_F_HW_VLAN_CTAG_TX;
5716 	}
5717 
5718 	dev->features |= dev->hw_features;
5719 
5720 	/* Add loopback capability to the device. */
5721 	dev->hw_features |= NETIF_F_LOOPBACK;
5722 
5723 	/* MTU range: 64 - 1500 or 9100 */
5724 	dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
5725 	dev->max_mtu = np->pkt_limit;
5726 
5727 	np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5728 	if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5729 	    (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5730 	    (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5731 		np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5732 	}
5733 
5734 	err = -ENOMEM;
5735 	np->base = ioremap(addr, np->register_size);
5736 	if (!np->base)
5737 		goto out_relreg;
5738 
5739 	np->rx_ring_size = RX_RING_DEFAULT;
5740 	np->tx_ring_size = TX_RING_DEFAULT;
5741 
5742 	if (!nv_optimized(np)) {
5743 		np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5744 					sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5745 					&np->ring_addr);
5746 		if (!np->rx_ring.orig)
5747 			goto out_unmap;
5748 		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5749 	} else {
5750 		np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5751 					sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5752 					&np->ring_addr);
5753 		if (!np->rx_ring.ex)
5754 			goto out_unmap;
5755 		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5756 	}
5757 	np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5758 	np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5759 	if (!np->rx_skb || !np->tx_skb)
5760 		goto out_freering;
5761 
5762 	if (!nv_optimized(np))
5763 		dev->netdev_ops = &nv_netdev_ops;
5764 	else
5765 		dev->netdev_ops = &nv_netdev_ops_optimized;
5766 
5767 	netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5768 	dev->ethtool_ops = &ops;
5769 	dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5770 
5771 	pci_set_drvdata(pci_dev, dev);
5772 
5773 	/* read the mac address */
5774 	base = get_hwbase(dev);
5775 	np->orig_mac[0] = readl(base + NvRegMacAddrA);
5776 	np->orig_mac[1] = readl(base + NvRegMacAddrB);
5777 
5778 	/* check the workaround bit for correct mac address order */
5779 	txreg = readl(base + NvRegTransmitPoll);
5780 	if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
5781 		/* mac address is already in correct order */
5782 		dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
5783 		dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
5784 		dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5785 		dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5786 		dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
5787 		dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
5788 	} else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5789 		/* mac address is already in correct order */
5790 		dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
5791 		dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
5792 		dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5793 		dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5794 		dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
5795 		dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
5796 		/*
5797 		 * Set orig mac address back to the reversed version.
5798 		 * This flag will be cleared during low power transition.
5799 		 * Therefore, we should always put back the reversed address.
5800 		 */
5801 		np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
5802 			(dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
5803 		np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
5804 	} else {
5805 		/* need to reverse mac address to correct order */
5806 		dev->dev_addr[0] = (np->orig_mac[1] >>  8) & 0xff;
5807 		dev->dev_addr[1] = (np->orig_mac[1] >>  0) & 0xff;
5808 		dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5809 		dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5810 		dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
5811 		dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
5812 		writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5813 		dev_dbg(&pci_dev->dev,
5814 			"%s: set workaround bit for reversed mac addr\n",
5815 			__func__);
5816 	}
5817 
5818 	if (!is_valid_ether_addr(dev->dev_addr)) {
5819 		/*
5820 		 * Bad mac address. At least one bios sets the mac address
5821 		 * to 01:23:45:67:89:ab
5822 		 */
5823 		dev_err(&pci_dev->dev,
5824 			"Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
5825 			dev->dev_addr);
5826 		eth_hw_addr_random(dev);
5827 		dev_err(&pci_dev->dev,
5828 			"Using random MAC address: %pM\n", dev->dev_addr);
5829 	}
5830 
5831 	/* set mac address */
5832 	nv_copy_mac_to_hw(dev);
5833 
5834 	/* disable WOL */
5835 	writel(0, base + NvRegWakeUpFlags);
5836 	np->wolenabled = 0;
5837 	device_set_wakeup_enable(&pci_dev->dev, false);
5838 
5839 	if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5840 
5841 		/* take phy and nic out of low power mode */
5842 		powerstate = readl(base + NvRegPowerState2);
5843 		powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5844 		if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
5845 		    pci_dev->revision >= 0xA3)
5846 			powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5847 		writel(powerstate, base + NvRegPowerState2);
5848 	}
5849 
5850 	if (np->desc_ver == DESC_VER_1)
5851 		np->tx_flags = NV_TX_VALID;
5852 	else
5853 		np->tx_flags = NV_TX2_VALID;
5854 
5855 	np->msi_flags = 0;
5856 	if ((id->driver_data & DEV_HAS_MSI) && msi)
5857 		np->msi_flags |= NV_MSI_CAPABLE;
5858 
5859 	if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5860 		/* msix has had reported issues when modifying irqmask
5861 		   as in the case of napi, therefore, disable for now
5862 		*/
5863 #if 0
5864 		np->msi_flags |= NV_MSI_X_CAPABLE;
5865 #endif
5866 	}
5867 
5868 	if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
5869 		np->irqmask = NVREG_IRQMASK_CPU;
5870 		if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5871 			np->msi_flags |= 0x0001;
5872 	} else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
5873 		   !(id->driver_data & DEV_NEED_TIMERIRQ)) {
5874 		/* start off in throughput mode */
5875 		np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5876 		/* remove support for msix mode */
5877 		np->msi_flags &= ~NV_MSI_X_CAPABLE;
5878 	} else {
5879 		optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
5880 		np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5881 		if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5882 			np->msi_flags |= 0x0003;
5883 	}
5884 
5885 	if (id->driver_data & DEV_NEED_TIMERIRQ)
5886 		np->irqmask |= NVREG_IRQ_TIMER;
5887 	if (id->driver_data & DEV_NEED_LINKTIMER) {
5888 		np->need_linktimer = 1;
5889 		np->link_timeout = jiffies + LINK_TIMEOUT;
5890 	} else {
5891 		np->need_linktimer = 0;
5892 	}
5893 
5894 	/* Limit the number of tx's outstanding for hw bug */
5895 	if (id->driver_data & DEV_NEED_TX_LIMIT) {
5896 		np->tx_limit = 1;
5897 		if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
5898 		    pci_dev->revision >= 0xA2)
5899 			np->tx_limit = 0;
5900 	}
5901 
5902 	/* clear phy state and temporarily halt phy interrupts */
5903 	writel(0, base + NvRegMIIMask);
5904 	phystate = readl(base + NvRegAdapterControl);
5905 	if (phystate & NVREG_ADAPTCTL_RUNNING) {
5906 		phystate_orig = 1;
5907 		phystate &= ~NVREG_ADAPTCTL_RUNNING;
5908 		writel(phystate, base + NvRegAdapterControl);
5909 	}
5910 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5911 
5912 	if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5913 		/* management unit running on the mac? */
5914 		if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
5915 		    (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
5916 		    nv_mgmt_acquire_sema(dev) &&
5917 		    nv_mgmt_get_version(dev)) {
5918 			np->mac_in_use = 1;
5919 			if (np->mgmt_version > 0)
5920 				np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5921 			/* management unit setup the phy already? */
5922 			if (np->mac_in_use &&
5923 			    ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5924 			     NVREG_XMITCTL_SYNC_PHY_INIT)) {
5925 				/* phy is inited by mgmt unit */
5926 				phyinitialized = 1;
5927 			} else {
5928 				/* we need to init the phy */
5929 			}
5930 		}
5931 	}
5932 
5933 	/* find a suitable phy */
5934 	for (i = 1; i <= 32; i++) {
5935 		int id1, id2;
5936 		int phyaddr = i & 0x1F;
5937 
5938 		spin_lock_irq(&np->lock);
5939 		id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5940 		spin_unlock_irq(&np->lock);
5941 		if (id1 < 0 || id1 == 0xffff)
5942 			continue;
5943 		spin_lock_irq(&np->lock);
5944 		id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5945 		spin_unlock_irq(&np->lock);
5946 		if (id2 < 0 || id2 == 0xffff)
5947 			continue;
5948 
5949 		np->phy_model = id2 & PHYID2_MODEL_MASK;
5950 		id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5951 		id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5952 		np->phyaddr = phyaddr;
5953 		np->phy_oui = id1 | id2;
5954 
5955 		/* Realtek hardcoded phy id1 to all zero's on certain phys */
5956 		if (np->phy_oui == PHY_OUI_REALTEK2)
5957 			np->phy_oui = PHY_OUI_REALTEK;
5958 		/* Setup phy revision for Realtek */
5959 		if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
5960 			np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
5961 
5962 		break;
5963 	}
5964 	if (i == 33) {
5965 		dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
5966 		goto out_error;
5967 	}
5968 
5969 	if (!phyinitialized) {
5970 		/* reset it */
5971 		phy_init(dev);
5972 	} else {
5973 		/* see if it is a gigabit phy */
5974 		u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5975 		if (mii_status & PHY_GIGABIT)
5976 			np->gigabit = PHY_GIGABIT;
5977 	}
5978 
5979 	/* set default link speed settings */
5980 	np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5981 	np->duplex = 0;
5982 	np->autoneg = 1;
5983 
5984 	err = register_netdev(dev);
5985 	if (err) {
5986 		dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
5987 		goto out_error;
5988 	}
5989 
5990 	netif_carrier_off(dev);
5991 
5992 	/* Some NICs freeze when TX pause is enabled while NIC is
5993 	 * down, and this stays across warm reboots. The sequence
5994 	 * below should be enough to recover from that state.
5995 	 */
5996 	nv_update_pause(dev, 0);
5997 	nv_start_tx(dev);
5998 	nv_stop_tx(dev);
5999 
6000 	if (id->driver_data & DEV_HAS_VLAN)
6001 		nv_vlan_mode(dev, dev->features);
6002 
6003 	dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
6004 		 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
6005 
6006 	dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
6007 		 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
6008 		 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
6009 			"csum " : "",
6010 		 dev->features & (NETIF_F_HW_VLAN_CTAG_RX |
6011 				  NETIF_F_HW_VLAN_CTAG_TX) ?
6012 			"vlan " : "",
6013 		 dev->features & (NETIF_F_LOOPBACK) ?
6014 			"loopback " : "",
6015 		 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
6016 		 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
6017 		 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
6018 		 np->gigabit == PHY_GIGABIT ? "gbit " : "",
6019 		 np->need_linktimer ? "lnktim " : "",
6020 		 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
6021 		 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
6022 		 np->desc_ver);
6023 
6024 	return 0;
6025 
6026 out_error:
6027 	if (phystate_orig)
6028 		writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
6029 out_freering:
6030 	free_rings(dev);
6031 out_unmap:
6032 	iounmap(get_hwbase(dev));
6033 out_relreg:
6034 	pci_release_regions(pci_dev);
6035 out_disable:
6036 	pci_disable_device(pci_dev);
6037 out_free:
6038 	free_netdev(dev);
6039 out:
6040 	return err;
6041 }
6042 
6043 static void nv_restore_phy(struct net_device *dev)
6044 {
6045 	struct fe_priv *np = netdev_priv(dev);
6046 	u16 phy_reserved, mii_control;
6047 
6048 	if (np->phy_oui == PHY_OUI_REALTEK &&
6049 	    np->phy_model == PHY_MODEL_REALTEK_8201 &&
6050 	    phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
6051 		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
6052 		phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
6053 		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
6054 		phy_reserved |= PHY_REALTEK_INIT8;
6055 		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
6056 		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
6057 
6058 		/* restart auto negotiation */
6059 		mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
6060 		mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
6061 		mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
6062 	}
6063 }
6064 
6065 static void nv_restore_mac_addr(struct pci_dev *pci_dev)
6066 {
6067 	struct net_device *dev = pci_get_drvdata(pci_dev);
6068 	struct fe_priv *np = netdev_priv(dev);
6069 	u8 __iomem *base = get_hwbase(dev);
6070 
6071 	/* special op: write back the misordered MAC address - otherwise
6072 	 * the next nv_probe would see a wrong address.
6073 	 */
6074 	writel(np->orig_mac[0], base + NvRegMacAddrA);
6075 	writel(np->orig_mac[1], base + NvRegMacAddrB);
6076 	writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
6077 	       base + NvRegTransmitPoll);
6078 }
6079 
6080 static void nv_remove(struct pci_dev *pci_dev)
6081 {
6082 	struct net_device *dev = pci_get_drvdata(pci_dev);
6083 
6084 	unregister_netdev(dev);
6085 
6086 	nv_restore_mac_addr(pci_dev);
6087 
6088 	/* restore any phy related changes */
6089 	nv_restore_phy(dev);
6090 
6091 	nv_mgmt_release_sema(dev);
6092 
6093 	/* free all structures */
6094 	free_rings(dev);
6095 	iounmap(get_hwbase(dev));
6096 	pci_release_regions(pci_dev);
6097 	pci_disable_device(pci_dev);
6098 	free_netdev(dev);
6099 }
6100 
6101 #ifdef CONFIG_PM_SLEEP
6102 static int nv_suspend(struct device *device)
6103 {
6104 	struct pci_dev *pdev = to_pci_dev(device);
6105 	struct net_device *dev = pci_get_drvdata(pdev);
6106 	struct fe_priv *np = netdev_priv(dev);
6107 	u8 __iomem *base = get_hwbase(dev);
6108 	int i;
6109 
6110 	if (netif_running(dev)) {
6111 		/* Gross. */
6112 		nv_close(dev);
6113 	}
6114 	netif_device_detach(dev);
6115 
6116 	/* save non-pci configuration space */
6117 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
6118 		np->saved_config_space[i] = readl(base + i*sizeof(u32));
6119 
6120 	return 0;
6121 }
6122 
6123 static int nv_resume(struct device *device)
6124 {
6125 	struct pci_dev *pdev = to_pci_dev(device);
6126 	struct net_device *dev = pci_get_drvdata(pdev);
6127 	struct fe_priv *np = netdev_priv(dev);
6128 	u8 __iomem *base = get_hwbase(dev);
6129 	int i, rc = 0;
6130 
6131 	/* restore non-pci configuration space */
6132 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
6133 		writel(np->saved_config_space[i], base+i*sizeof(u32));
6134 
6135 	if (np->driver_data & DEV_NEED_MSI_FIX)
6136 		pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
6137 
6138 	/* restore phy state, including autoneg */
6139 	phy_init(dev);
6140 
6141 	netif_device_attach(dev);
6142 	if (netif_running(dev)) {
6143 		rc = nv_open(dev);
6144 		nv_set_multicast(dev);
6145 	}
6146 	return rc;
6147 }
6148 
6149 static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
6150 #define NV_PM_OPS (&nv_pm_ops)
6151 
6152 #else
6153 #define NV_PM_OPS NULL
6154 #endif /* CONFIG_PM_SLEEP */
6155 
6156 #ifdef CONFIG_PM
6157 static void nv_shutdown(struct pci_dev *pdev)
6158 {
6159 	struct net_device *dev = pci_get_drvdata(pdev);
6160 	struct fe_priv *np = netdev_priv(dev);
6161 
6162 	if (netif_running(dev))
6163 		nv_close(dev);
6164 
6165 	/*
6166 	 * Restore the MAC so a kernel started by kexec won't get confused.
6167 	 * If we really go for poweroff, we must not restore the MAC,
6168 	 * otherwise the MAC for WOL will be reversed at least on some boards.
6169 	 */
6170 	if (system_state != SYSTEM_POWER_OFF)
6171 		nv_restore_mac_addr(pdev);
6172 
6173 	pci_disable_device(pdev);
6174 	/*
6175 	 * Apparently it is not possible to reinitialise from D3 hot,
6176 	 * only put the device into D3 if we really go for poweroff.
6177 	 */
6178 	if (system_state == SYSTEM_POWER_OFF) {
6179 		pci_wake_from_d3(pdev, np->wolenabled);
6180 		pci_set_power_state(pdev, PCI_D3hot);
6181 	}
6182 }
6183 #else
6184 #define nv_shutdown NULL
6185 #endif /* CONFIG_PM */
6186 
6187 static const struct pci_device_id pci_tbl[] = {
6188 	{	/* nForce Ethernet Controller */
6189 		PCI_DEVICE(0x10DE, 0x01C3),
6190 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6191 	},
6192 	{	/* nForce2 Ethernet Controller */
6193 		PCI_DEVICE(0x10DE, 0x0066),
6194 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6195 	},
6196 	{	/* nForce3 Ethernet Controller */
6197 		PCI_DEVICE(0x10DE, 0x00D6),
6198 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6199 	},
6200 	{	/* nForce3 Ethernet Controller */
6201 		PCI_DEVICE(0x10DE, 0x0086),
6202 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6203 	},
6204 	{	/* nForce3 Ethernet Controller */
6205 		PCI_DEVICE(0x10DE, 0x008C),
6206 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6207 	},
6208 	{	/* nForce3 Ethernet Controller */
6209 		PCI_DEVICE(0x10DE, 0x00E6),
6210 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6211 	},
6212 	{	/* nForce3 Ethernet Controller */
6213 		PCI_DEVICE(0x10DE, 0x00DF),
6214 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6215 	},
6216 	{	/* CK804 Ethernet Controller */
6217 		PCI_DEVICE(0x10DE, 0x0056),
6218 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6219 	},
6220 	{	/* CK804 Ethernet Controller */
6221 		PCI_DEVICE(0x10DE, 0x0057),
6222 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6223 	},
6224 	{	/* MCP04 Ethernet Controller */
6225 		PCI_DEVICE(0x10DE, 0x0037),
6226 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6227 	},
6228 	{	/* MCP04 Ethernet Controller */
6229 		PCI_DEVICE(0x10DE, 0x0038),
6230 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6231 	},
6232 	{	/* MCP51 Ethernet Controller */
6233 		PCI_DEVICE(0x10DE, 0x0268),
6234 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6235 	},
6236 	{	/* MCP51 Ethernet Controller */
6237 		PCI_DEVICE(0x10DE, 0x0269),
6238 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6239 	},
6240 	{	/* MCP55 Ethernet Controller */
6241 		PCI_DEVICE(0x10DE, 0x0372),
6242 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6243 	},
6244 	{	/* MCP55 Ethernet Controller */
6245 		PCI_DEVICE(0x10DE, 0x0373),
6246 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6247 	},
6248 	{	/* MCP61 Ethernet Controller */
6249 		PCI_DEVICE(0x10DE, 0x03E5),
6250 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6251 	},
6252 	{	/* MCP61 Ethernet Controller */
6253 		PCI_DEVICE(0x10DE, 0x03E6),
6254 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6255 	},
6256 	{	/* MCP61 Ethernet Controller */
6257 		PCI_DEVICE(0x10DE, 0x03EE),
6258 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6259 	},
6260 	{	/* MCP61 Ethernet Controller */
6261 		PCI_DEVICE(0x10DE, 0x03EF),
6262 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6263 	},
6264 	{	/* MCP65 Ethernet Controller */
6265 		PCI_DEVICE(0x10DE, 0x0450),
6266 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6267 	},
6268 	{	/* MCP65 Ethernet Controller */
6269 		PCI_DEVICE(0x10DE, 0x0451),
6270 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6271 	},
6272 	{	/* MCP65 Ethernet Controller */
6273 		PCI_DEVICE(0x10DE, 0x0452),
6274 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6275 	},
6276 	{	/* MCP65 Ethernet Controller */
6277 		PCI_DEVICE(0x10DE, 0x0453),
6278 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6279 	},
6280 	{	/* MCP67 Ethernet Controller */
6281 		PCI_DEVICE(0x10DE, 0x054C),
6282 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6283 	},
6284 	{	/* MCP67 Ethernet Controller */
6285 		PCI_DEVICE(0x10DE, 0x054D),
6286 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6287 	},
6288 	{	/* MCP67 Ethernet Controller */
6289 		PCI_DEVICE(0x10DE, 0x054E),
6290 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6291 	},
6292 	{	/* MCP67 Ethernet Controller */
6293 		PCI_DEVICE(0x10DE, 0x054F),
6294 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6295 	},
6296 	{	/* MCP73 Ethernet Controller */
6297 		PCI_DEVICE(0x10DE, 0x07DC),
6298 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6299 	},
6300 	{	/* MCP73 Ethernet Controller */
6301 		PCI_DEVICE(0x10DE, 0x07DD),
6302 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6303 	},
6304 	{	/* MCP73 Ethernet Controller */
6305 		PCI_DEVICE(0x10DE, 0x07DE),
6306 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6307 	},
6308 	{	/* MCP73 Ethernet Controller */
6309 		PCI_DEVICE(0x10DE, 0x07DF),
6310 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6311 	},
6312 	{	/* MCP77 Ethernet Controller */
6313 		PCI_DEVICE(0x10DE, 0x0760),
6314 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6315 	},
6316 	{	/* MCP77 Ethernet Controller */
6317 		PCI_DEVICE(0x10DE, 0x0761),
6318 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6319 	},
6320 	{	/* MCP77 Ethernet Controller */
6321 		PCI_DEVICE(0x10DE, 0x0762),
6322 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6323 	},
6324 	{	/* MCP77 Ethernet Controller */
6325 		PCI_DEVICE(0x10DE, 0x0763),
6326 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6327 	},
6328 	{	/* MCP79 Ethernet Controller */
6329 		PCI_DEVICE(0x10DE, 0x0AB0),
6330 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6331 	},
6332 	{	/* MCP79 Ethernet Controller */
6333 		PCI_DEVICE(0x10DE, 0x0AB1),
6334 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6335 	},
6336 	{	/* MCP79 Ethernet Controller */
6337 		PCI_DEVICE(0x10DE, 0x0AB2),
6338 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6339 	},
6340 	{	/* MCP79 Ethernet Controller */
6341 		PCI_DEVICE(0x10DE, 0x0AB3),
6342 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6343 	},
6344 	{	/* MCP89 Ethernet Controller */
6345 		PCI_DEVICE(0x10DE, 0x0D7D),
6346 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
6347 	},
6348 	{0,},
6349 };
6350 
6351 static struct pci_driver forcedeth_pci_driver = {
6352 	.name		= DRV_NAME,
6353 	.id_table	= pci_tbl,
6354 	.probe		= nv_probe,
6355 	.remove		= nv_remove,
6356 	.shutdown	= nv_shutdown,
6357 	.driver.pm	= NV_PM_OPS,
6358 };
6359 
6360 module_param(max_interrupt_work, int, 0);
6361 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6362 module_param(optimization_mode, int, 0);
6363 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
6364 module_param(poll_interval, int, 0);
6365 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6366 module_param(msi, int, 0);
6367 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6368 module_param(msix, int, 0);
6369 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6370 module_param(dma_64bit, int, 0);
6371 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6372 module_param(phy_cross, int, 0);
6373 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6374 module_param(phy_power_down, int, 0);
6375 MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
6376 module_param(debug_tx_timeout, bool, 0);
6377 MODULE_PARM_DESC(debug_tx_timeout,
6378 		 "Dump tx related registers and ring when tx_timeout happens");
6379 
6380 module_pci_driver(forcedeth_pci_driver);
6381 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6382 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6383 MODULE_LICENSE("GPL");
6384 MODULE_DEVICE_TABLE(pci, pci_tbl);
6385