xref: /linux/drivers/net/ethernet/nvidia/forcedeth.c (revision 2d87650a3bf1b80f7d0d150ee1af3f8a89e5b7aa)
1 /*
2  * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
3  *
4  * Note: This driver is a cleanroom reimplementation based on reverse
5  *      engineered documentation written by Carl-Daniel Hailfinger
6  *      and Andrew de Quincey.
7  *
8  * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9  * trademarks of NVIDIA Corporation in the United States and other
10  * countries.
11  *
12  * Copyright (C) 2003,4,5 Manfred Spraul
13  * Copyright (C) 2004 Andrew de Quincey (wol support)
14  * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15  *		IRQ rate fixes, bigendian fixes, cleanups, verification)
16  * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
17  *
18  * This program is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License as published by
20  * the Free Software Foundation; either version 2 of the License, or
21  * (at your option) any later version.
22  *
23  * This program is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26  * GNU General Public License for more details.
27  *
28  * You should have received a copy of the GNU General Public License
29  * along with this program; if not, see <http://www.gnu.org/licenses/>.
30  *
31  * Known bugs:
32  * We suspect that on some hardware no TX done interrupts are generated.
33  * This means recovery from netif_stop_queue only happens if the hw timer
34  * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
35  * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
36  * If your hardware reliably generates tx done interrupts, then you can remove
37  * DEV_NEED_TIMERIRQ from the driver_data flags.
38  * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
39  * superfluous timer interrupts from the nic.
40  */
41 
42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43 
44 #define FORCEDETH_VERSION		"0.64"
45 #define DRV_NAME			"forcedeth"
46 
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/pci.h>
50 #include <linux/interrupt.h>
51 #include <linux/netdevice.h>
52 #include <linux/etherdevice.h>
53 #include <linux/delay.h>
54 #include <linux/sched.h>
55 #include <linux/spinlock.h>
56 #include <linux/ethtool.h>
57 #include <linux/timer.h>
58 #include <linux/skbuff.h>
59 #include <linux/mii.h>
60 #include <linux/random.h>
61 #include <linux/init.h>
62 #include <linux/if_vlan.h>
63 #include <linux/dma-mapping.h>
64 #include <linux/slab.h>
65 #include <linux/uaccess.h>
66 #include <linux/prefetch.h>
67 #include <linux/u64_stats_sync.h>
68 #include <linux/io.h>
69 
70 #include <asm/irq.h>
71 
72 #define TX_WORK_PER_LOOP  64
73 #define RX_WORK_PER_LOOP  64
74 
75 /*
76  * Hardware access:
77  */
78 
79 #define DEV_NEED_TIMERIRQ          0x0000001  /* set the timer irq flag in the irq mask */
80 #define DEV_NEED_LINKTIMER         0x0000002  /* poll link settings. Relies on the timer irq */
81 #define DEV_HAS_LARGEDESC          0x0000004  /* device supports jumbo frames and needs packet format 2 */
82 #define DEV_HAS_HIGH_DMA           0x0000008  /* device supports 64bit dma */
83 #define DEV_HAS_CHECKSUM           0x0000010  /* device supports tx and rx checksum offloads */
84 #define DEV_HAS_VLAN               0x0000020  /* device supports vlan tagging and striping */
85 #define DEV_HAS_MSI                0x0000040  /* device supports MSI */
86 #define DEV_HAS_MSI_X              0x0000080  /* device supports MSI-X */
87 #define DEV_HAS_POWER_CNTRL        0x0000100  /* device supports power savings */
88 #define DEV_HAS_STATISTICS_V1      0x0000200  /* device supports hw statistics version 1 */
89 #define DEV_HAS_STATISTICS_V2      0x0000400  /* device supports hw statistics version 2 */
90 #define DEV_HAS_STATISTICS_V3      0x0000800  /* device supports hw statistics version 3 */
91 #define DEV_HAS_STATISTICS_V12     0x0000600  /* device supports hw statistics version 1 and 2 */
92 #define DEV_HAS_STATISTICS_V123    0x0000e00  /* device supports hw statistics version 1, 2, and 3 */
93 #define DEV_HAS_TEST_EXTENDED      0x0001000  /* device supports extended diagnostic test */
94 #define DEV_HAS_MGMT_UNIT          0x0002000  /* device supports management unit */
95 #define DEV_HAS_CORRECT_MACADDR    0x0004000  /* device supports correct mac address order */
96 #define DEV_HAS_COLLISION_FIX      0x0008000  /* device supports tx collision fix */
97 #define DEV_HAS_PAUSEFRAME_TX_V1   0x0010000  /* device supports tx pause frames version 1 */
98 #define DEV_HAS_PAUSEFRAME_TX_V2   0x0020000  /* device supports tx pause frames version 2 */
99 #define DEV_HAS_PAUSEFRAME_TX_V3   0x0040000  /* device supports tx pause frames version 3 */
100 #define DEV_NEED_TX_LIMIT          0x0080000  /* device needs to limit tx */
101 #define DEV_NEED_TX_LIMIT2         0x0180000  /* device needs to limit tx, expect for some revs */
102 #define DEV_HAS_GEAR_MODE          0x0200000  /* device supports gear mode */
103 #define DEV_NEED_PHY_INIT_FIX      0x0400000  /* device needs specific phy workaround */
104 #define DEV_NEED_LOW_POWER_FIX     0x0800000  /* device needs special power up workaround */
105 #define DEV_NEED_MSI_FIX           0x1000000  /* device needs msi workaround */
106 
107 enum {
108 	NvRegIrqStatus = 0x000,
109 #define NVREG_IRQSTAT_MIIEVENT	0x040
110 #define NVREG_IRQSTAT_MASK		0x83ff
111 	NvRegIrqMask = 0x004,
112 #define NVREG_IRQ_RX_ERROR		0x0001
113 #define NVREG_IRQ_RX			0x0002
114 #define NVREG_IRQ_RX_NOBUF		0x0004
115 #define NVREG_IRQ_TX_ERR		0x0008
116 #define NVREG_IRQ_TX_OK			0x0010
117 #define NVREG_IRQ_TIMER			0x0020
118 #define NVREG_IRQ_LINK			0x0040
119 #define NVREG_IRQ_RX_FORCED		0x0080
120 #define NVREG_IRQ_TX_FORCED		0x0100
121 #define NVREG_IRQ_RECOVER_ERROR		0x8200
122 #define NVREG_IRQMASK_THROUGHPUT	0x00df
123 #define NVREG_IRQMASK_CPU		0x0060
124 #define NVREG_IRQ_TX_ALL		(NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
125 #define NVREG_IRQ_RX_ALL		(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
126 #define NVREG_IRQ_OTHER			(NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
127 
128 	NvRegUnknownSetupReg6 = 0x008,
129 #define NVREG_UNKSETUP6_VAL		3
130 
131 /*
132  * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
133  * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
134  */
135 	NvRegPollingInterval = 0x00c,
136 #define NVREG_POLL_DEFAULT_THROUGHPUT	65535 /* backup tx cleanup if loop max reached */
137 #define NVREG_POLL_DEFAULT_CPU	13
138 	NvRegMSIMap0 = 0x020,
139 	NvRegMSIMap1 = 0x024,
140 	NvRegMSIIrqMask = 0x030,
141 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
142 	NvRegMisc1 = 0x080,
143 #define NVREG_MISC1_PAUSE_TX	0x01
144 #define NVREG_MISC1_HD		0x02
145 #define NVREG_MISC1_FORCE	0x3b0f3c
146 
147 	NvRegMacReset = 0x34,
148 #define NVREG_MAC_RESET_ASSERT	0x0F3
149 	NvRegTransmitterControl = 0x084,
150 #define NVREG_XMITCTL_START	0x01
151 #define NVREG_XMITCTL_MGMT_ST	0x40000000
152 #define NVREG_XMITCTL_SYNC_MASK		0x000f0000
153 #define NVREG_XMITCTL_SYNC_NOT_READY	0x0
154 #define NVREG_XMITCTL_SYNC_PHY_INIT	0x00040000
155 #define NVREG_XMITCTL_MGMT_SEMA_MASK	0x00000f00
156 #define NVREG_XMITCTL_MGMT_SEMA_FREE	0x0
157 #define NVREG_XMITCTL_HOST_SEMA_MASK	0x0000f000
158 #define NVREG_XMITCTL_HOST_SEMA_ACQ	0x0000f000
159 #define NVREG_XMITCTL_HOST_LOADED	0x00004000
160 #define NVREG_XMITCTL_TX_PATH_EN	0x01000000
161 #define NVREG_XMITCTL_DATA_START	0x00100000
162 #define NVREG_XMITCTL_DATA_READY	0x00010000
163 #define NVREG_XMITCTL_DATA_ERROR	0x00020000
164 	NvRegTransmitterStatus = 0x088,
165 #define NVREG_XMITSTAT_BUSY	0x01
166 
167 	NvRegPacketFilterFlags = 0x8c,
168 #define NVREG_PFF_PAUSE_RX	0x08
169 #define NVREG_PFF_ALWAYS	0x7F0000
170 #define NVREG_PFF_PROMISC	0x80
171 #define NVREG_PFF_MYADDR	0x20
172 #define NVREG_PFF_LOOPBACK	0x10
173 
174 	NvRegOffloadConfig = 0x90,
175 #define NVREG_OFFLOAD_HOMEPHY	0x601
176 #define NVREG_OFFLOAD_NORMAL	RX_NIC_BUFSIZE
177 	NvRegReceiverControl = 0x094,
178 #define NVREG_RCVCTL_START	0x01
179 #define NVREG_RCVCTL_RX_PATH_EN	0x01000000
180 	NvRegReceiverStatus = 0x98,
181 #define NVREG_RCVSTAT_BUSY	0x01
182 
183 	NvRegSlotTime = 0x9c,
184 #define NVREG_SLOTTIME_LEGBF_ENABLED	0x80000000
185 #define NVREG_SLOTTIME_10_100_FULL	0x00007f00
186 #define NVREG_SLOTTIME_1000_FULL	0x0003ff00
187 #define NVREG_SLOTTIME_HALF		0x0000ff00
188 #define NVREG_SLOTTIME_DEFAULT		0x00007f00
189 #define NVREG_SLOTTIME_MASK		0x000000ff
190 
191 	NvRegTxDeferral = 0xA0,
192 #define NVREG_TX_DEFERRAL_DEFAULT		0x15050f
193 #define NVREG_TX_DEFERRAL_RGMII_10_100		0x16070f
194 #define NVREG_TX_DEFERRAL_RGMII_1000		0x14050f
195 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10	0x16190f
196 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100	0x16300f
197 #define NVREG_TX_DEFERRAL_MII_STRETCH		0x152000
198 	NvRegRxDeferral = 0xA4,
199 #define NVREG_RX_DEFERRAL_DEFAULT	0x16
200 	NvRegMacAddrA = 0xA8,
201 	NvRegMacAddrB = 0xAC,
202 	NvRegMulticastAddrA = 0xB0,
203 #define NVREG_MCASTADDRA_FORCE	0x01
204 	NvRegMulticastAddrB = 0xB4,
205 	NvRegMulticastMaskA = 0xB8,
206 #define NVREG_MCASTMASKA_NONE		0xffffffff
207 	NvRegMulticastMaskB = 0xBC,
208 #define NVREG_MCASTMASKB_NONE		0xffff
209 
210 	NvRegPhyInterface = 0xC0,
211 #define PHY_RGMII		0x10000000
212 	NvRegBackOffControl = 0xC4,
213 #define NVREG_BKOFFCTRL_DEFAULT			0x70000000
214 #define NVREG_BKOFFCTRL_SEED_MASK		0x000003ff
215 #define NVREG_BKOFFCTRL_SELECT			24
216 #define NVREG_BKOFFCTRL_GEAR			12
217 
218 	NvRegTxRingPhysAddr = 0x100,
219 	NvRegRxRingPhysAddr = 0x104,
220 	NvRegRingSizes = 0x108,
221 #define NVREG_RINGSZ_TXSHIFT 0
222 #define NVREG_RINGSZ_RXSHIFT 16
223 	NvRegTransmitPoll = 0x10c,
224 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV	0x00008000
225 	NvRegLinkSpeed = 0x110,
226 #define NVREG_LINKSPEED_FORCE 0x10000
227 #define NVREG_LINKSPEED_10	1000
228 #define NVREG_LINKSPEED_100	100
229 #define NVREG_LINKSPEED_1000	50
230 #define NVREG_LINKSPEED_MASK	(0xFFF)
231 	NvRegUnknownSetupReg5 = 0x130,
232 #define NVREG_UNKSETUP5_BIT31	(1<<31)
233 	NvRegTxWatermark = 0x13c,
234 #define NVREG_TX_WM_DESC1_DEFAULT	0x0200010
235 #define NVREG_TX_WM_DESC2_3_DEFAULT	0x1e08000
236 #define NVREG_TX_WM_DESC2_3_1000	0xfe08000
237 	NvRegTxRxControl = 0x144,
238 #define NVREG_TXRXCTL_KICK	0x0001
239 #define NVREG_TXRXCTL_BIT1	0x0002
240 #define NVREG_TXRXCTL_BIT2	0x0004
241 #define NVREG_TXRXCTL_IDLE	0x0008
242 #define NVREG_TXRXCTL_RESET	0x0010
243 #define NVREG_TXRXCTL_RXCHECK	0x0400
244 #define NVREG_TXRXCTL_DESC_1	0
245 #define NVREG_TXRXCTL_DESC_2	0x002100
246 #define NVREG_TXRXCTL_DESC_3	0xc02200
247 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
248 #define NVREG_TXRXCTL_VLANINS	0x00080
249 	NvRegTxRingPhysAddrHigh = 0x148,
250 	NvRegRxRingPhysAddrHigh = 0x14C,
251 	NvRegTxPauseFrame = 0x170,
252 #define NVREG_TX_PAUSEFRAME_DISABLE	0x0fff0080
253 #define NVREG_TX_PAUSEFRAME_ENABLE_V1	0x01800010
254 #define NVREG_TX_PAUSEFRAME_ENABLE_V2	0x056003f0
255 #define NVREG_TX_PAUSEFRAME_ENABLE_V3	0x09f00880
256 	NvRegTxPauseFrameLimit = 0x174,
257 #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE	0x00010000
258 	NvRegMIIStatus = 0x180,
259 #define NVREG_MIISTAT_ERROR		0x0001
260 #define NVREG_MIISTAT_LINKCHANGE	0x0008
261 #define NVREG_MIISTAT_MASK_RW		0x0007
262 #define NVREG_MIISTAT_MASK_ALL		0x000f
263 	NvRegMIIMask = 0x184,
264 #define NVREG_MII_LINKCHANGE		0x0008
265 
266 	NvRegAdapterControl = 0x188,
267 #define NVREG_ADAPTCTL_START	0x02
268 #define NVREG_ADAPTCTL_LINKUP	0x04
269 #define NVREG_ADAPTCTL_PHYVALID	0x40000
270 #define NVREG_ADAPTCTL_RUNNING	0x100000
271 #define NVREG_ADAPTCTL_PHYSHIFT	24
272 	NvRegMIISpeed = 0x18c,
273 #define NVREG_MIISPEED_BIT8	(1<<8)
274 #define NVREG_MIIDELAY	5
275 	NvRegMIIControl = 0x190,
276 #define NVREG_MIICTL_INUSE	0x08000
277 #define NVREG_MIICTL_WRITE	0x00400
278 #define NVREG_MIICTL_ADDRSHIFT	5
279 	NvRegMIIData = 0x194,
280 	NvRegTxUnicast = 0x1a0,
281 	NvRegTxMulticast = 0x1a4,
282 	NvRegTxBroadcast = 0x1a8,
283 	NvRegWakeUpFlags = 0x200,
284 #define NVREG_WAKEUPFLAGS_VAL		0x7770
285 #define NVREG_WAKEUPFLAGS_BUSYSHIFT	24
286 #define NVREG_WAKEUPFLAGS_ENABLESHIFT	16
287 #define NVREG_WAKEUPFLAGS_D3SHIFT	12
288 #define NVREG_WAKEUPFLAGS_D2SHIFT	8
289 #define NVREG_WAKEUPFLAGS_D1SHIFT	4
290 #define NVREG_WAKEUPFLAGS_D0SHIFT	0
291 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT		0x01
292 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT	0x02
293 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE	0x04
294 #define NVREG_WAKEUPFLAGS_ENABLE	0x1111
295 
296 	NvRegMgmtUnitGetVersion = 0x204,
297 #define NVREG_MGMTUNITGETVERSION	0x01
298 	NvRegMgmtUnitVersion = 0x208,
299 #define NVREG_MGMTUNITVERSION		0x08
300 	NvRegPowerCap = 0x268,
301 #define NVREG_POWERCAP_D3SUPP	(1<<30)
302 #define NVREG_POWERCAP_D2SUPP	(1<<26)
303 #define NVREG_POWERCAP_D1SUPP	(1<<25)
304 	NvRegPowerState = 0x26c,
305 #define NVREG_POWERSTATE_POWEREDUP	0x8000
306 #define NVREG_POWERSTATE_VALID		0x0100
307 #define NVREG_POWERSTATE_MASK		0x0003
308 #define NVREG_POWERSTATE_D0		0x0000
309 #define NVREG_POWERSTATE_D1		0x0001
310 #define NVREG_POWERSTATE_D2		0x0002
311 #define NVREG_POWERSTATE_D3		0x0003
312 	NvRegMgmtUnitControl = 0x278,
313 #define NVREG_MGMTUNITCONTROL_INUSE	0x20000
314 	NvRegTxCnt = 0x280,
315 	NvRegTxZeroReXmt = 0x284,
316 	NvRegTxOneReXmt = 0x288,
317 	NvRegTxManyReXmt = 0x28c,
318 	NvRegTxLateCol = 0x290,
319 	NvRegTxUnderflow = 0x294,
320 	NvRegTxLossCarrier = 0x298,
321 	NvRegTxExcessDef = 0x29c,
322 	NvRegTxRetryErr = 0x2a0,
323 	NvRegRxFrameErr = 0x2a4,
324 	NvRegRxExtraByte = 0x2a8,
325 	NvRegRxLateCol = 0x2ac,
326 	NvRegRxRunt = 0x2b0,
327 	NvRegRxFrameTooLong = 0x2b4,
328 	NvRegRxOverflow = 0x2b8,
329 	NvRegRxFCSErr = 0x2bc,
330 	NvRegRxFrameAlignErr = 0x2c0,
331 	NvRegRxLenErr = 0x2c4,
332 	NvRegRxUnicast = 0x2c8,
333 	NvRegRxMulticast = 0x2cc,
334 	NvRegRxBroadcast = 0x2d0,
335 	NvRegTxDef = 0x2d4,
336 	NvRegTxFrame = 0x2d8,
337 	NvRegRxCnt = 0x2dc,
338 	NvRegTxPause = 0x2e0,
339 	NvRegRxPause = 0x2e4,
340 	NvRegRxDropFrame = 0x2e8,
341 	NvRegVlanControl = 0x300,
342 #define NVREG_VLANCONTROL_ENABLE	0x2000
343 	NvRegMSIXMap0 = 0x3e0,
344 	NvRegMSIXMap1 = 0x3e4,
345 	NvRegMSIXIrqStatus = 0x3f0,
346 
347 	NvRegPowerState2 = 0x600,
348 #define NVREG_POWERSTATE2_POWERUP_MASK		0x0F15
349 #define NVREG_POWERSTATE2_POWERUP_REV_A3	0x0001
350 #define NVREG_POWERSTATE2_PHY_RESET		0x0004
351 #define NVREG_POWERSTATE2_GATE_CLOCKS		0x0F00
352 };
353 
354 /* Big endian: should work, but is untested */
355 struct ring_desc {
356 	__le32 buf;
357 	__le32 flaglen;
358 };
359 
360 struct ring_desc_ex {
361 	__le32 bufhigh;
362 	__le32 buflow;
363 	__le32 txvlan;
364 	__le32 flaglen;
365 };
366 
367 union ring_type {
368 	struct ring_desc *orig;
369 	struct ring_desc_ex *ex;
370 };
371 
372 #define FLAG_MASK_V1 0xffff0000
373 #define FLAG_MASK_V2 0xffffc000
374 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
375 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
376 
377 #define NV_TX_LASTPACKET	(1<<16)
378 #define NV_TX_RETRYERROR	(1<<19)
379 #define NV_TX_RETRYCOUNT_MASK	(0xF<<20)
380 #define NV_TX_FORCED_INTERRUPT	(1<<24)
381 #define NV_TX_DEFERRED		(1<<26)
382 #define NV_TX_CARRIERLOST	(1<<27)
383 #define NV_TX_LATECOLLISION	(1<<28)
384 #define NV_TX_UNDERFLOW		(1<<29)
385 #define NV_TX_ERROR		(1<<30)
386 #define NV_TX_VALID		(1<<31)
387 
388 #define NV_TX2_LASTPACKET	(1<<29)
389 #define NV_TX2_RETRYERROR	(1<<18)
390 #define NV_TX2_RETRYCOUNT_MASK	(0xF<<19)
391 #define NV_TX2_FORCED_INTERRUPT	(1<<30)
392 #define NV_TX2_DEFERRED		(1<<25)
393 #define NV_TX2_CARRIERLOST	(1<<26)
394 #define NV_TX2_LATECOLLISION	(1<<27)
395 #define NV_TX2_UNDERFLOW	(1<<28)
396 /* error and valid are the same for both */
397 #define NV_TX2_ERROR		(1<<30)
398 #define NV_TX2_VALID		(1<<31)
399 #define NV_TX2_TSO		(1<<28)
400 #define NV_TX2_TSO_SHIFT	14
401 #define NV_TX2_TSO_MAX_SHIFT	14
402 #define NV_TX2_TSO_MAX_SIZE	(1<<NV_TX2_TSO_MAX_SHIFT)
403 #define NV_TX2_CHECKSUM_L3	(1<<27)
404 #define NV_TX2_CHECKSUM_L4	(1<<26)
405 
406 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
407 
408 #define NV_RX_DESCRIPTORVALID	(1<<16)
409 #define NV_RX_MISSEDFRAME	(1<<17)
410 #define NV_RX_SUBSTRACT1	(1<<18)
411 #define NV_RX_ERROR1		(1<<23)
412 #define NV_RX_ERROR2		(1<<24)
413 #define NV_RX_ERROR3		(1<<25)
414 #define NV_RX_ERROR4		(1<<26)
415 #define NV_RX_CRCERR		(1<<27)
416 #define NV_RX_OVERFLOW		(1<<28)
417 #define NV_RX_FRAMINGERR	(1<<29)
418 #define NV_RX_ERROR		(1<<30)
419 #define NV_RX_AVAIL		(1<<31)
420 #define NV_RX_ERROR_MASK	(NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
421 
422 #define NV_RX2_CHECKSUMMASK	(0x1C000000)
423 #define NV_RX2_CHECKSUM_IP	(0x10000000)
424 #define NV_RX2_CHECKSUM_IP_TCP	(0x14000000)
425 #define NV_RX2_CHECKSUM_IP_UDP	(0x18000000)
426 #define NV_RX2_DESCRIPTORVALID	(1<<29)
427 #define NV_RX2_SUBSTRACT1	(1<<25)
428 #define NV_RX2_ERROR1		(1<<18)
429 #define NV_RX2_ERROR2		(1<<19)
430 #define NV_RX2_ERROR3		(1<<20)
431 #define NV_RX2_ERROR4		(1<<21)
432 #define NV_RX2_CRCERR		(1<<22)
433 #define NV_RX2_OVERFLOW		(1<<23)
434 #define NV_RX2_FRAMINGERR	(1<<24)
435 /* error and avail are the same for both */
436 #define NV_RX2_ERROR		(1<<30)
437 #define NV_RX2_AVAIL		(1<<31)
438 #define NV_RX2_ERROR_MASK	(NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
439 
440 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
441 #define NV_RX3_VLAN_TAG_MASK	(0x0000FFFF)
442 
443 /* Miscellaneous hardware related defines: */
444 #define NV_PCI_REGSZ_VER1	0x270
445 #define NV_PCI_REGSZ_VER2	0x2d4
446 #define NV_PCI_REGSZ_VER3	0x604
447 #define NV_PCI_REGSZ_MAX	0x604
448 
449 /* various timeout delays: all in usec */
450 #define NV_TXRX_RESET_DELAY	4
451 #define NV_TXSTOP_DELAY1	10
452 #define NV_TXSTOP_DELAY1MAX	500000
453 #define NV_TXSTOP_DELAY2	100
454 #define NV_RXSTOP_DELAY1	10
455 #define NV_RXSTOP_DELAY1MAX	500000
456 #define NV_RXSTOP_DELAY2	100
457 #define NV_SETUP5_DELAY		5
458 #define NV_SETUP5_DELAYMAX	50000
459 #define NV_POWERUP_DELAY	5
460 #define NV_POWERUP_DELAYMAX	5000
461 #define NV_MIIBUSY_DELAY	50
462 #define NV_MIIPHY_DELAY	10
463 #define NV_MIIPHY_DELAYMAX	10000
464 #define NV_MAC_RESET_DELAY	64
465 
466 #define NV_WAKEUPPATTERNS	5
467 #define NV_WAKEUPMASKENTRIES	4
468 
469 /* General driver defaults */
470 #define NV_WATCHDOG_TIMEO	(5*HZ)
471 
472 #define RX_RING_DEFAULT		512
473 #define TX_RING_DEFAULT		256
474 #define RX_RING_MIN		128
475 #define TX_RING_MIN		64
476 #define RING_MAX_DESC_VER_1	1024
477 #define RING_MAX_DESC_VER_2_3	16384
478 
479 /* rx/tx mac addr + type + vlan + align + slack*/
480 #define NV_RX_HEADERS		(64)
481 /* even more slack. */
482 #define NV_RX_ALLOC_PAD		(64)
483 
484 /* maximum mtu size */
485 #define NV_PKTLIMIT_1	ETH_DATA_LEN	/* hard limit not known */
486 #define NV_PKTLIMIT_2	9100	/* Actual limit according to NVidia: 9202 */
487 
488 #define OOM_REFILL	(1+HZ/20)
489 #define POLL_WAIT	(1+HZ/100)
490 #define LINK_TIMEOUT	(3*HZ)
491 #define STATS_INTERVAL	(10*HZ)
492 
493 /*
494  * desc_ver values:
495  * The nic supports three different descriptor types:
496  * - DESC_VER_1: Original
497  * - DESC_VER_2: support for jumbo frames.
498  * - DESC_VER_3: 64-bit format.
499  */
500 #define DESC_VER_1	1
501 #define DESC_VER_2	2
502 #define DESC_VER_3	3
503 
504 /* PHY defines */
505 #define PHY_OUI_MARVELL		0x5043
506 #define PHY_OUI_CICADA		0x03f1
507 #define PHY_OUI_VITESSE		0x01c1
508 #define PHY_OUI_REALTEK		0x0732
509 #define PHY_OUI_REALTEK2	0x0020
510 #define PHYID1_OUI_MASK	0x03ff
511 #define PHYID1_OUI_SHFT	6
512 #define PHYID2_OUI_MASK	0xfc00
513 #define PHYID2_OUI_SHFT	10
514 #define PHYID2_MODEL_MASK		0x03f0
515 #define PHY_MODEL_REALTEK_8211		0x0110
516 #define PHY_REV_MASK			0x0001
517 #define PHY_REV_REALTEK_8211B		0x0000
518 #define PHY_REV_REALTEK_8211C		0x0001
519 #define PHY_MODEL_REALTEK_8201		0x0200
520 #define PHY_MODEL_MARVELL_E3016		0x0220
521 #define PHY_MARVELL_E3016_INITMASK	0x0300
522 #define PHY_CICADA_INIT1	0x0f000
523 #define PHY_CICADA_INIT2	0x0e00
524 #define PHY_CICADA_INIT3	0x01000
525 #define PHY_CICADA_INIT4	0x0200
526 #define PHY_CICADA_INIT5	0x0004
527 #define PHY_CICADA_INIT6	0x02000
528 #define PHY_VITESSE_INIT_REG1	0x1f
529 #define PHY_VITESSE_INIT_REG2	0x10
530 #define PHY_VITESSE_INIT_REG3	0x11
531 #define PHY_VITESSE_INIT_REG4	0x12
532 #define PHY_VITESSE_INIT_MSK1	0xc
533 #define PHY_VITESSE_INIT_MSK2	0x0180
534 #define PHY_VITESSE_INIT1	0x52b5
535 #define PHY_VITESSE_INIT2	0xaf8a
536 #define PHY_VITESSE_INIT3	0x8
537 #define PHY_VITESSE_INIT4	0x8f8a
538 #define PHY_VITESSE_INIT5	0xaf86
539 #define PHY_VITESSE_INIT6	0x8f86
540 #define PHY_VITESSE_INIT7	0xaf82
541 #define PHY_VITESSE_INIT8	0x0100
542 #define PHY_VITESSE_INIT9	0x8f82
543 #define PHY_VITESSE_INIT10	0x0
544 #define PHY_REALTEK_INIT_REG1	0x1f
545 #define PHY_REALTEK_INIT_REG2	0x19
546 #define PHY_REALTEK_INIT_REG3	0x13
547 #define PHY_REALTEK_INIT_REG4	0x14
548 #define PHY_REALTEK_INIT_REG5	0x18
549 #define PHY_REALTEK_INIT_REG6	0x11
550 #define PHY_REALTEK_INIT_REG7	0x01
551 #define PHY_REALTEK_INIT1	0x0000
552 #define PHY_REALTEK_INIT2	0x8e00
553 #define PHY_REALTEK_INIT3	0x0001
554 #define PHY_REALTEK_INIT4	0xad17
555 #define PHY_REALTEK_INIT5	0xfb54
556 #define PHY_REALTEK_INIT6	0xf5c7
557 #define PHY_REALTEK_INIT7	0x1000
558 #define PHY_REALTEK_INIT8	0x0003
559 #define PHY_REALTEK_INIT9	0x0008
560 #define PHY_REALTEK_INIT10	0x0005
561 #define PHY_REALTEK_INIT11	0x0200
562 #define PHY_REALTEK_INIT_MSK1	0x0003
563 
564 #define PHY_GIGABIT	0x0100
565 
566 #define PHY_TIMEOUT	0x1
567 #define PHY_ERROR	0x2
568 
569 #define PHY_100	0x1
570 #define PHY_1000	0x2
571 #define PHY_HALF	0x100
572 
573 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
574 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
575 #define NV_PAUSEFRAME_RX_ENABLE  0x0004
576 #define NV_PAUSEFRAME_TX_ENABLE  0x0008
577 #define NV_PAUSEFRAME_RX_REQ     0x0010
578 #define NV_PAUSEFRAME_TX_REQ     0x0020
579 #define NV_PAUSEFRAME_AUTONEG    0x0040
580 
581 /* MSI/MSI-X defines */
582 #define NV_MSI_X_MAX_VECTORS  8
583 #define NV_MSI_X_VECTORS_MASK 0x000f
584 #define NV_MSI_CAPABLE        0x0010
585 #define NV_MSI_X_CAPABLE      0x0020
586 #define NV_MSI_ENABLED        0x0040
587 #define NV_MSI_X_ENABLED      0x0080
588 
589 #define NV_MSI_X_VECTOR_ALL   0x0
590 #define NV_MSI_X_VECTOR_RX    0x0
591 #define NV_MSI_X_VECTOR_TX    0x1
592 #define NV_MSI_X_VECTOR_OTHER 0x2
593 
594 #define NV_MSI_PRIV_OFFSET 0x68
595 #define NV_MSI_PRIV_VALUE  0xffffffff
596 
597 #define NV_RESTART_TX         0x1
598 #define NV_RESTART_RX         0x2
599 
600 #define NV_TX_LIMIT_COUNT     16
601 
602 #define NV_DYNAMIC_THRESHOLD        4
603 #define NV_DYNAMIC_MAX_QUIET_COUNT  2048
604 
605 /* statistics */
606 struct nv_ethtool_str {
607 	char name[ETH_GSTRING_LEN];
608 };
609 
610 static const struct nv_ethtool_str nv_estats_str[] = {
611 	{ "tx_bytes" }, /* includes Ethernet FCS CRC */
612 	{ "tx_zero_rexmt" },
613 	{ "tx_one_rexmt" },
614 	{ "tx_many_rexmt" },
615 	{ "tx_late_collision" },
616 	{ "tx_fifo_errors" },
617 	{ "tx_carrier_errors" },
618 	{ "tx_excess_deferral" },
619 	{ "tx_retry_error" },
620 	{ "rx_frame_error" },
621 	{ "rx_extra_byte" },
622 	{ "rx_late_collision" },
623 	{ "rx_runt" },
624 	{ "rx_frame_too_long" },
625 	{ "rx_over_errors" },
626 	{ "rx_crc_errors" },
627 	{ "rx_frame_align_error" },
628 	{ "rx_length_error" },
629 	{ "rx_unicast" },
630 	{ "rx_multicast" },
631 	{ "rx_broadcast" },
632 	{ "rx_packets" },
633 	{ "rx_errors_total" },
634 	{ "tx_errors_total" },
635 
636 	/* version 2 stats */
637 	{ "tx_deferral" },
638 	{ "tx_packets" },
639 	{ "rx_bytes" }, /* includes Ethernet FCS CRC */
640 	{ "tx_pause" },
641 	{ "rx_pause" },
642 	{ "rx_drop_frame" },
643 
644 	/* version 3 stats */
645 	{ "tx_unicast" },
646 	{ "tx_multicast" },
647 	{ "tx_broadcast" }
648 };
649 
650 struct nv_ethtool_stats {
651 	u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */
652 	u64 tx_zero_rexmt;
653 	u64 tx_one_rexmt;
654 	u64 tx_many_rexmt;
655 	u64 tx_late_collision;
656 	u64 tx_fifo_errors;
657 	u64 tx_carrier_errors;
658 	u64 tx_excess_deferral;
659 	u64 tx_retry_error;
660 	u64 rx_frame_error;
661 	u64 rx_extra_byte;
662 	u64 rx_late_collision;
663 	u64 rx_runt;
664 	u64 rx_frame_too_long;
665 	u64 rx_over_errors;
666 	u64 rx_crc_errors;
667 	u64 rx_frame_align_error;
668 	u64 rx_length_error;
669 	u64 rx_unicast;
670 	u64 rx_multicast;
671 	u64 rx_broadcast;
672 	u64 rx_packets; /* should be ifconfig->rx_packets */
673 	u64 rx_errors_total;
674 	u64 tx_errors_total;
675 
676 	/* version 2 stats */
677 	u64 tx_deferral;
678 	u64 tx_packets; /* should be ifconfig->tx_packets */
679 	u64 rx_bytes;   /* should be ifconfig->rx_bytes + 4*rx_packets */
680 	u64 tx_pause;
681 	u64 rx_pause;
682 	u64 rx_drop_frame;
683 
684 	/* version 3 stats */
685 	u64 tx_unicast;
686 	u64 tx_multicast;
687 	u64 tx_broadcast;
688 };
689 
690 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
691 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
692 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
693 
694 /* diagnostics */
695 #define NV_TEST_COUNT_BASE 3
696 #define NV_TEST_COUNT_EXTENDED 4
697 
698 static const struct nv_ethtool_str nv_etests_str[] = {
699 	{ "link      (online/offline)" },
700 	{ "register  (offline)       " },
701 	{ "interrupt (offline)       " },
702 	{ "loopback  (offline)       " }
703 };
704 
705 struct register_test {
706 	__u32 reg;
707 	__u32 mask;
708 };
709 
710 static const struct register_test nv_registers_test[] = {
711 	{ NvRegUnknownSetupReg6, 0x01 },
712 	{ NvRegMisc1, 0x03c },
713 	{ NvRegOffloadConfig, 0x03ff },
714 	{ NvRegMulticastAddrA, 0xffffffff },
715 	{ NvRegTxWatermark, 0x0ff },
716 	{ NvRegWakeUpFlags, 0x07777 },
717 	{ 0, 0 }
718 };
719 
720 struct nv_skb_map {
721 	struct sk_buff *skb;
722 	dma_addr_t dma;
723 	unsigned int dma_len:31;
724 	unsigned int dma_single:1;
725 	struct ring_desc_ex *first_tx_desc;
726 	struct nv_skb_map *next_tx_ctx;
727 };
728 
729 /*
730  * SMP locking:
731  * All hardware access under netdev_priv(dev)->lock, except the performance
732  * critical parts:
733  * - rx is (pseudo-) lockless: it relies on the single-threading provided
734  *	by the arch code for interrupts.
735  * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
736  *	needs netdev_priv(dev)->lock :-(
737  * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
738  *
739  * Hardware stats updates are protected by hwstats_lock:
740  * - updated by nv_do_stats_poll (timer). This is meant to avoid
741  *   integer wraparound in the NIC stats registers, at low frequency
742  *   (0.1 Hz)
743  * - updated by nv_get_ethtool_stats + nv_get_stats64
744  *
745  * Software stats are accessed only through 64b synchronization points
746  * and are not subject to other synchronization techniques (single
747  * update thread on the TX or RX paths).
748  */
749 
750 /* in dev: base, irq */
751 struct fe_priv {
752 	spinlock_t lock;
753 
754 	struct net_device *dev;
755 	struct napi_struct napi;
756 
757 	/* hardware stats are updated in syscall and timer */
758 	spinlock_t hwstats_lock;
759 	struct nv_ethtool_stats estats;
760 
761 	int in_shutdown;
762 	u32 linkspeed;
763 	int duplex;
764 	int autoneg;
765 	int fixed_mode;
766 	int phyaddr;
767 	int wolenabled;
768 	unsigned int phy_oui;
769 	unsigned int phy_model;
770 	unsigned int phy_rev;
771 	u16 gigabit;
772 	int intr_test;
773 	int recover_error;
774 	int quiet_count;
775 
776 	/* General data: RO fields */
777 	dma_addr_t ring_addr;
778 	struct pci_dev *pci_dev;
779 	u32 orig_mac[2];
780 	u32 events;
781 	u32 irqmask;
782 	u32 desc_ver;
783 	u32 txrxctl_bits;
784 	u32 vlanctl_bits;
785 	u32 driver_data;
786 	u32 device_id;
787 	u32 register_size;
788 	u32 mac_in_use;
789 	int mgmt_version;
790 	int mgmt_sema;
791 
792 	void __iomem *base;
793 
794 	/* rx specific fields.
795 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
796 	 */
797 	union ring_type get_rx, put_rx, first_rx, last_rx;
798 	struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
799 	struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
800 	struct nv_skb_map *rx_skb;
801 
802 	union ring_type rx_ring;
803 	unsigned int rx_buf_sz;
804 	unsigned int pkt_limit;
805 	struct timer_list oom_kick;
806 	struct timer_list nic_poll;
807 	struct timer_list stats_poll;
808 	u32 nic_poll_irq;
809 	int rx_ring_size;
810 
811 	/* RX software stats */
812 	struct u64_stats_sync swstats_rx_syncp;
813 	u64 stat_rx_packets;
814 	u64 stat_rx_bytes; /* not always available in HW */
815 	u64 stat_rx_missed_errors;
816 	u64 stat_rx_dropped;
817 
818 	/* media detection workaround.
819 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
820 	 */
821 	int need_linktimer;
822 	unsigned long link_timeout;
823 	/*
824 	 * tx specific fields.
825 	 */
826 	union ring_type get_tx, put_tx, first_tx, last_tx;
827 	struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
828 	struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
829 	struct nv_skb_map *tx_skb;
830 
831 	union ring_type tx_ring;
832 	u32 tx_flags;
833 	int tx_ring_size;
834 	int tx_limit;
835 	u32 tx_pkts_in_progress;
836 	struct nv_skb_map *tx_change_owner;
837 	struct nv_skb_map *tx_end_flip;
838 	int tx_stop;
839 
840 	/* TX software stats */
841 	struct u64_stats_sync swstats_tx_syncp;
842 	u64 stat_tx_packets; /* not always available in HW */
843 	u64 stat_tx_bytes;
844 	u64 stat_tx_dropped;
845 
846 	/* msi/msi-x fields */
847 	u32 msi_flags;
848 	struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
849 
850 	/* flow control */
851 	u32 pause_flags;
852 
853 	/* power saved state */
854 	u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
855 
856 	/* for different msi-x irq type */
857 	char name_rx[IFNAMSIZ + 3];       /* -rx    */
858 	char name_tx[IFNAMSIZ + 3];       /* -tx    */
859 	char name_other[IFNAMSIZ + 6];    /* -other */
860 };
861 
862 /*
863  * Maximum number of loops until we assume that a bit in the irq mask
864  * is stuck. Overridable with module param.
865  */
866 static int max_interrupt_work = 4;
867 
868 /*
869  * Optimization can be either throuput mode or cpu mode
870  *
871  * Throughput Mode: Every tx and rx packet will generate an interrupt.
872  * CPU Mode: Interrupts are controlled by a timer.
873  */
874 enum {
875 	NV_OPTIMIZATION_MODE_THROUGHPUT,
876 	NV_OPTIMIZATION_MODE_CPU,
877 	NV_OPTIMIZATION_MODE_DYNAMIC
878 };
879 static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
880 
881 /*
882  * Poll interval for timer irq
883  *
884  * This interval determines how frequent an interrupt is generated.
885  * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
886  * Min = 0, and Max = 65535
887  */
888 static int poll_interval = -1;
889 
890 /*
891  * MSI interrupts
892  */
893 enum {
894 	NV_MSI_INT_DISABLED,
895 	NV_MSI_INT_ENABLED
896 };
897 static int msi = NV_MSI_INT_ENABLED;
898 
899 /*
900  * MSIX interrupts
901  */
902 enum {
903 	NV_MSIX_INT_DISABLED,
904 	NV_MSIX_INT_ENABLED
905 };
906 static int msix = NV_MSIX_INT_ENABLED;
907 
908 /*
909  * DMA 64bit
910  */
911 enum {
912 	NV_DMA_64BIT_DISABLED,
913 	NV_DMA_64BIT_ENABLED
914 };
915 static int dma_64bit = NV_DMA_64BIT_ENABLED;
916 
917 /*
918  * Debug output control for tx_timeout
919  */
920 static bool debug_tx_timeout = false;
921 
922 /*
923  * Crossover Detection
924  * Realtek 8201 phy + some OEM boards do not work properly.
925  */
926 enum {
927 	NV_CROSSOVER_DETECTION_DISABLED,
928 	NV_CROSSOVER_DETECTION_ENABLED
929 };
930 static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
931 
932 /*
933  * Power down phy when interface is down (persists through reboot;
934  * older Linux and other OSes may not power it up again)
935  */
936 static int phy_power_down;
937 
938 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
939 {
940 	return netdev_priv(dev);
941 }
942 
943 static inline u8 __iomem *get_hwbase(struct net_device *dev)
944 {
945 	return ((struct fe_priv *)netdev_priv(dev))->base;
946 }
947 
948 static inline void pci_push(u8 __iomem *base)
949 {
950 	/* force out pending posted writes */
951 	readl(base);
952 }
953 
954 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
955 {
956 	return le32_to_cpu(prd->flaglen)
957 		& ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
958 }
959 
960 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
961 {
962 	return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
963 }
964 
965 static bool nv_optimized(struct fe_priv *np)
966 {
967 	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
968 		return false;
969 	return true;
970 }
971 
972 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
973 		     int delay, int delaymax)
974 {
975 	u8 __iomem *base = get_hwbase(dev);
976 
977 	pci_push(base);
978 	do {
979 		udelay(delay);
980 		delaymax -= delay;
981 		if (delaymax < 0)
982 			return 1;
983 	} while ((readl(base + offset) & mask) != target);
984 	return 0;
985 }
986 
987 #define NV_SETUP_RX_RING 0x01
988 #define NV_SETUP_TX_RING 0x02
989 
990 static inline u32 dma_low(dma_addr_t addr)
991 {
992 	return addr;
993 }
994 
995 static inline u32 dma_high(dma_addr_t addr)
996 {
997 	return addr>>31>>1;	/* 0 if 32bit, shift down by 32 if 64bit */
998 }
999 
1000 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
1001 {
1002 	struct fe_priv *np = get_nvpriv(dev);
1003 	u8 __iomem *base = get_hwbase(dev);
1004 
1005 	if (!nv_optimized(np)) {
1006 		if (rxtx_flags & NV_SETUP_RX_RING)
1007 			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
1008 		if (rxtx_flags & NV_SETUP_TX_RING)
1009 			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1010 	} else {
1011 		if (rxtx_flags & NV_SETUP_RX_RING) {
1012 			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
1013 			writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
1014 		}
1015 		if (rxtx_flags & NV_SETUP_TX_RING) {
1016 			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1017 			writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
1018 		}
1019 	}
1020 }
1021 
1022 static void free_rings(struct net_device *dev)
1023 {
1024 	struct fe_priv *np = get_nvpriv(dev);
1025 
1026 	if (!nv_optimized(np)) {
1027 		if (np->rx_ring.orig)
1028 			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
1029 					    np->rx_ring.orig, np->ring_addr);
1030 	} else {
1031 		if (np->rx_ring.ex)
1032 			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1033 					    np->rx_ring.ex, np->ring_addr);
1034 	}
1035 	kfree(np->rx_skb);
1036 	kfree(np->tx_skb);
1037 }
1038 
1039 static int using_multi_irqs(struct net_device *dev)
1040 {
1041 	struct fe_priv *np = get_nvpriv(dev);
1042 
1043 	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1044 	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
1045 	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
1046 		return 0;
1047 	else
1048 		return 1;
1049 }
1050 
1051 static void nv_txrx_gate(struct net_device *dev, bool gate)
1052 {
1053 	struct fe_priv *np = get_nvpriv(dev);
1054 	u8 __iomem *base = get_hwbase(dev);
1055 	u32 powerstate;
1056 
1057 	if (!np->mac_in_use &&
1058 	    (np->driver_data & DEV_HAS_POWER_CNTRL)) {
1059 		powerstate = readl(base + NvRegPowerState2);
1060 		if (gate)
1061 			powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
1062 		else
1063 			powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
1064 		writel(powerstate, base + NvRegPowerState2);
1065 	}
1066 }
1067 
1068 static void nv_enable_irq(struct net_device *dev)
1069 {
1070 	struct fe_priv *np = get_nvpriv(dev);
1071 
1072 	if (!using_multi_irqs(dev)) {
1073 		if (np->msi_flags & NV_MSI_X_ENABLED)
1074 			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1075 		else
1076 			enable_irq(np->pci_dev->irq);
1077 	} else {
1078 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1079 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1080 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1081 	}
1082 }
1083 
1084 static void nv_disable_irq(struct net_device *dev)
1085 {
1086 	struct fe_priv *np = get_nvpriv(dev);
1087 
1088 	if (!using_multi_irqs(dev)) {
1089 		if (np->msi_flags & NV_MSI_X_ENABLED)
1090 			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1091 		else
1092 			disable_irq(np->pci_dev->irq);
1093 	} else {
1094 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1095 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1096 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1097 	}
1098 }
1099 
1100 /* In MSIX mode, a write to irqmask behaves as XOR */
1101 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1102 {
1103 	u8 __iomem *base = get_hwbase(dev);
1104 
1105 	writel(mask, base + NvRegIrqMask);
1106 }
1107 
1108 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1109 {
1110 	struct fe_priv *np = get_nvpriv(dev);
1111 	u8 __iomem *base = get_hwbase(dev);
1112 
1113 	if (np->msi_flags & NV_MSI_X_ENABLED) {
1114 		writel(mask, base + NvRegIrqMask);
1115 	} else {
1116 		if (np->msi_flags & NV_MSI_ENABLED)
1117 			writel(0, base + NvRegMSIIrqMask);
1118 		writel(0, base + NvRegIrqMask);
1119 	}
1120 }
1121 
1122 static void nv_napi_enable(struct net_device *dev)
1123 {
1124 	struct fe_priv *np = get_nvpriv(dev);
1125 
1126 	napi_enable(&np->napi);
1127 }
1128 
1129 static void nv_napi_disable(struct net_device *dev)
1130 {
1131 	struct fe_priv *np = get_nvpriv(dev);
1132 
1133 	napi_disable(&np->napi);
1134 }
1135 
1136 #define MII_READ	(-1)
1137 /* mii_rw: read/write a register on the PHY.
1138  *
1139  * Caller must guarantee serialization
1140  */
1141 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1142 {
1143 	u8 __iomem *base = get_hwbase(dev);
1144 	u32 reg;
1145 	int retval;
1146 
1147 	writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1148 
1149 	reg = readl(base + NvRegMIIControl);
1150 	if (reg & NVREG_MIICTL_INUSE) {
1151 		writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1152 		udelay(NV_MIIBUSY_DELAY);
1153 	}
1154 
1155 	reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1156 	if (value != MII_READ) {
1157 		writel(value, base + NvRegMIIData);
1158 		reg |= NVREG_MIICTL_WRITE;
1159 	}
1160 	writel(reg, base + NvRegMIIControl);
1161 
1162 	if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1163 			NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
1164 		retval = -1;
1165 	} else if (value != MII_READ) {
1166 		/* it was a write operation - fewer failures are detectable */
1167 		retval = 0;
1168 	} else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1169 		retval = -1;
1170 	} else {
1171 		retval = readl(base + NvRegMIIData);
1172 	}
1173 
1174 	return retval;
1175 }
1176 
1177 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1178 {
1179 	struct fe_priv *np = netdev_priv(dev);
1180 	u32 miicontrol;
1181 	unsigned int tries = 0;
1182 
1183 	miicontrol = BMCR_RESET | bmcr_setup;
1184 	if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1185 		return -1;
1186 
1187 	/* wait for 500ms */
1188 	msleep(500);
1189 
1190 	/* must wait till reset is deasserted */
1191 	while (miicontrol & BMCR_RESET) {
1192 		usleep_range(10000, 20000);
1193 		miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1194 		/* FIXME: 100 tries seem excessive */
1195 		if (tries++ > 100)
1196 			return -1;
1197 	}
1198 	return 0;
1199 }
1200 
1201 static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
1202 {
1203 	static const struct {
1204 		int reg;
1205 		int init;
1206 	} ri[] = {
1207 		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1208 		{ PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
1209 		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
1210 		{ PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
1211 		{ PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
1212 		{ PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
1213 		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1214 	};
1215 	int i;
1216 
1217 	for (i = 0; i < ARRAY_SIZE(ri); i++) {
1218 		if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
1219 			return PHY_ERROR;
1220 	}
1221 
1222 	return 0;
1223 }
1224 
1225 static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
1226 {
1227 	u32 reg;
1228 	u8 __iomem *base = get_hwbase(dev);
1229 	u32 powerstate = readl(base + NvRegPowerState2);
1230 
1231 	/* need to perform hw phy reset */
1232 	powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1233 	writel(powerstate, base + NvRegPowerState2);
1234 	msleep(25);
1235 
1236 	powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1237 	writel(powerstate, base + NvRegPowerState2);
1238 	msleep(25);
1239 
1240 	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1241 	reg |= PHY_REALTEK_INIT9;
1242 	if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
1243 		return PHY_ERROR;
1244 	if (mii_rw(dev, np->phyaddr,
1245 		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
1246 		return PHY_ERROR;
1247 	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1248 	if (!(reg & PHY_REALTEK_INIT11)) {
1249 		reg |= PHY_REALTEK_INIT11;
1250 		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
1251 			return PHY_ERROR;
1252 	}
1253 	if (mii_rw(dev, np->phyaddr,
1254 		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1255 		return PHY_ERROR;
1256 
1257 	return 0;
1258 }
1259 
1260 static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
1261 {
1262 	u32 phy_reserved;
1263 
1264 	if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1265 		phy_reserved = mii_rw(dev, np->phyaddr,
1266 				      PHY_REALTEK_INIT_REG6, MII_READ);
1267 		phy_reserved |= PHY_REALTEK_INIT7;
1268 		if (mii_rw(dev, np->phyaddr,
1269 			   PHY_REALTEK_INIT_REG6, phy_reserved))
1270 			return PHY_ERROR;
1271 	}
1272 
1273 	return 0;
1274 }
1275 
1276 static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
1277 {
1278 	u32 phy_reserved;
1279 
1280 	if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1281 		if (mii_rw(dev, np->phyaddr,
1282 			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
1283 			return PHY_ERROR;
1284 		phy_reserved = mii_rw(dev, np->phyaddr,
1285 				      PHY_REALTEK_INIT_REG2, MII_READ);
1286 		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1287 		phy_reserved |= PHY_REALTEK_INIT3;
1288 		if (mii_rw(dev, np->phyaddr,
1289 			   PHY_REALTEK_INIT_REG2, phy_reserved))
1290 			return PHY_ERROR;
1291 		if (mii_rw(dev, np->phyaddr,
1292 			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1293 			return PHY_ERROR;
1294 	}
1295 
1296 	return 0;
1297 }
1298 
1299 static int init_cicada(struct net_device *dev, struct fe_priv *np,
1300 		       u32 phyinterface)
1301 {
1302 	u32 phy_reserved;
1303 
1304 	if (phyinterface & PHY_RGMII) {
1305 		phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1306 		phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1307 		phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1308 		if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
1309 			return PHY_ERROR;
1310 		phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1311 		phy_reserved |= PHY_CICADA_INIT5;
1312 		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
1313 			return PHY_ERROR;
1314 	}
1315 	phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1316 	phy_reserved |= PHY_CICADA_INIT6;
1317 	if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
1318 		return PHY_ERROR;
1319 
1320 	return 0;
1321 }
1322 
1323 static int init_vitesse(struct net_device *dev, struct fe_priv *np)
1324 {
1325 	u32 phy_reserved;
1326 
1327 	if (mii_rw(dev, np->phyaddr,
1328 		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
1329 		return PHY_ERROR;
1330 	if (mii_rw(dev, np->phyaddr,
1331 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
1332 		return PHY_ERROR;
1333 	phy_reserved = mii_rw(dev, np->phyaddr,
1334 			      PHY_VITESSE_INIT_REG4, MII_READ);
1335 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1336 		return PHY_ERROR;
1337 	phy_reserved = mii_rw(dev, np->phyaddr,
1338 			      PHY_VITESSE_INIT_REG3, MII_READ);
1339 	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1340 	phy_reserved |= PHY_VITESSE_INIT3;
1341 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1342 		return PHY_ERROR;
1343 	if (mii_rw(dev, np->phyaddr,
1344 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
1345 		return PHY_ERROR;
1346 	if (mii_rw(dev, np->phyaddr,
1347 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
1348 		return PHY_ERROR;
1349 	phy_reserved = mii_rw(dev, np->phyaddr,
1350 			      PHY_VITESSE_INIT_REG4, MII_READ);
1351 	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1352 	phy_reserved |= PHY_VITESSE_INIT3;
1353 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1354 		return PHY_ERROR;
1355 	phy_reserved = mii_rw(dev, np->phyaddr,
1356 			      PHY_VITESSE_INIT_REG3, MII_READ);
1357 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1358 		return PHY_ERROR;
1359 	if (mii_rw(dev, np->phyaddr,
1360 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
1361 		return PHY_ERROR;
1362 	if (mii_rw(dev, np->phyaddr,
1363 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
1364 		return PHY_ERROR;
1365 	phy_reserved = mii_rw(dev, np->phyaddr,
1366 			      PHY_VITESSE_INIT_REG4, MII_READ);
1367 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1368 		return PHY_ERROR;
1369 	phy_reserved = mii_rw(dev, np->phyaddr,
1370 			      PHY_VITESSE_INIT_REG3, MII_READ);
1371 	phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1372 	phy_reserved |= PHY_VITESSE_INIT8;
1373 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1374 		return PHY_ERROR;
1375 	if (mii_rw(dev, np->phyaddr,
1376 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
1377 		return PHY_ERROR;
1378 	if (mii_rw(dev, np->phyaddr,
1379 		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
1380 		return PHY_ERROR;
1381 
1382 	return 0;
1383 }
1384 
1385 static int phy_init(struct net_device *dev)
1386 {
1387 	struct fe_priv *np = get_nvpriv(dev);
1388 	u8 __iomem *base = get_hwbase(dev);
1389 	u32 phyinterface;
1390 	u32 mii_status, mii_control, mii_control_1000, reg;
1391 
1392 	/* phy errata for E3016 phy */
1393 	if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1394 		reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1395 		reg &= ~PHY_MARVELL_E3016_INITMASK;
1396 		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1397 			netdev_info(dev, "%s: phy write to errata reg failed\n",
1398 				    pci_name(np->pci_dev));
1399 			return PHY_ERROR;
1400 		}
1401 	}
1402 	if (np->phy_oui == PHY_OUI_REALTEK) {
1403 		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1404 		    np->phy_rev == PHY_REV_REALTEK_8211B) {
1405 			if (init_realtek_8211b(dev, np)) {
1406 				netdev_info(dev, "%s: phy init failed\n",
1407 					    pci_name(np->pci_dev));
1408 				return PHY_ERROR;
1409 			}
1410 		} else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1411 			   np->phy_rev == PHY_REV_REALTEK_8211C) {
1412 			if (init_realtek_8211c(dev, np)) {
1413 				netdev_info(dev, "%s: phy init failed\n",
1414 					    pci_name(np->pci_dev));
1415 				return PHY_ERROR;
1416 			}
1417 		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1418 			if (init_realtek_8201(dev, np)) {
1419 				netdev_info(dev, "%s: phy init failed\n",
1420 					    pci_name(np->pci_dev));
1421 				return PHY_ERROR;
1422 			}
1423 		}
1424 	}
1425 
1426 	/* set advertise register */
1427 	reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1428 	reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
1429 		ADVERTISE_100HALF | ADVERTISE_100FULL |
1430 		ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
1431 	if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1432 		netdev_info(dev, "%s: phy write to advertise failed\n",
1433 			    pci_name(np->pci_dev));
1434 		return PHY_ERROR;
1435 	}
1436 
1437 	/* get phy interface type */
1438 	phyinterface = readl(base + NvRegPhyInterface);
1439 
1440 	/* see if gigabit phy */
1441 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1442 	if (mii_status & PHY_GIGABIT) {
1443 		np->gigabit = PHY_GIGABIT;
1444 		mii_control_1000 = mii_rw(dev, np->phyaddr,
1445 					  MII_CTRL1000, MII_READ);
1446 		mii_control_1000 &= ~ADVERTISE_1000HALF;
1447 		if (phyinterface & PHY_RGMII)
1448 			mii_control_1000 |= ADVERTISE_1000FULL;
1449 		else
1450 			mii_control_1000 &= ~ADVERTISE_1000FULL;
1451 
1452 		if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1453 			netdev_info(dev, "%s: phy init failed\n",
1454 				    pci_name(np->pci_dev));
1455 			return PHY_ERROR;
1456 		}
1457 	} else
1458 		np->gigabit = 0;
1459 
1460 	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1461 	mii_control |= BMCR_ANENABLE;
1462 
1463 	if (np->phy_oui == PHY_OUI_REALTEK &&
1464 	    np->phy_model == PHY_MODEL_REALTEK_8211 &&
1465 	    np->phy_rev == PHY_REV_REALTEK_8211C) {
1466 		/* start autoneg since we already performed hw reset above */
1467 		mii_control |= BMCR_ANRESTART;
1468 		if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1469 			netdev_info(dev, "%s: phy init failed\n",
1470 				    pci_name(np->pci_dev));
1471 			return PHY_ERROR;
1472 		}
1473 	} else {
1474 		/* reset the phy
1475 		 * (certain phys need bmcr to be setup with reset)
1476 		 */
1477 		if (phy_reset(dev, mii_control)) {
1478 			netdev_info(dev, "%s: phy reset failed\n",
1479 				    pci_name(np->pci_dev));
1480 			return PHY_ERROR;
1481 		}
1482 	}
1483 
1484 	/* phy vendor specific configuration */
1485 	if ((np->phy_oui == PHY_OUI_CICADA)) {
1486 		if (init_cicada(dev, np, phyinterface)) {
1487 			netdev_info(dev, "%s: phy init failed\n",
1488 				    pci_name(np->pci_dev));
1489 			return PHY_ERROR;
1490 		}
1491 	} else if (np->phy_oui == PHY_OUI_VITESSE) {
1492 		if (init_vitesse(dev, np)) {
1493 			netdev_info(dev, "%s: phy init failed\n",
1494 				    pci_name(np->pci_dev));
1495 			return PHY_ERROR;
1496 		}
1497 	} else if (np->phy_oui == PHY_OUI_REALTEK) {
1498 		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1499 		    np->phy_rev == PHY_REV_REALTEK_8211B) {
1500 			/* reset could have cleared these out, set them back */
1501 			if (init_realtek_8211b(dev, np)) {
1502 				netdev_info(dev, "%s: phy init failed\n",
1503 					    pci_name(np->pci_dev));
1504 				return PHY_ERROR;
1505 			}
1506 		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1507 			if (init_realtek_8201(dev, np) ||
1508 			    init_realtek_8201_cross(dev, np)) {
1509 				netdev_info(dev, "%s: phy init failed\n",
1510 					    pci_name(np->pci_dev));
1511 				return PHY_ERROR;
1512 			}
1513 		}
1514 	}
1515 
1516 	/* some phys clear out pause advertisement on reset, set it back */
1517 	mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1518 
1519 	/* restart auto negotiation, power down phy */
1520 	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1521 	mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1522 	if (phy_power_down)
1523 		mii_control |= BMCR_PDOWN;
1524 	if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1525 		return PHY_ERROR;
1526 
1527 	return 0;
1528 }
1529 
1530 static void nv_start_rx(struct net_device *dev)
1531 {
1532 	struct fe_priv *np = netdev_priv(dev);
1533 	u8 __iomem *base = get_hwbase(dev);
1534 	u32 rx_ctrl = readl(base + NvRegReceiverControl);
1535 
1536 	/* Already running? Stop it. */
1537 	if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1538 		rx_ctrl &= ~NVREG_RCVCTL_START;
1539 		writel(rx_ctrl, base + NvRegReceiverControl);
1540 		pci_push(base);
1541 	}
1542 	writel(np->linkspeed, base + NvRegLinkSpeed);
1543 	pci_push(base);
1544 	rx_ctrl |= NVREG_RCVCTL_START;
1545 	if (np->mac_in_use)
1546 		rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1547 	writel(rx_ctrl, base + NvRegReceiverControl);
1548 	pci_push(base);
1549 }
1550 
1551 static void nv_stop_rx(struct net_device *dev)
1552 {
1553 	struct fe_priv *np = netdev_priv(dev);
1554 	u8 __iomem *base = get_hwbase(dev);
1555 	u32 rx_ctrl = readl(base + NvRegReceiverControl);
1556 
1557 	if (!np->mac_in_use)
1558 		rx_ctrl &= ~NVREG_RCVCTL_START;
1559 	else
1560 		rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1561 	writel(rx_ctrl, base + NvRegReceiverControl);
1562 	if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1563 		      NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
1564 		netdev_info(dev, "%s: ReceiverStatus remained busy\n",
1565 			    __func__);
1566 
1567 	udelay(NV_RXSTOP_DELAY2);
1568 	if (!np->mac_in_use)
1569 		writel(0, base + NvRegLinkSpeed);
1570 }
1571 
1572 static void nv_start_tx(struct net_device *dev)
1573 {
1574 	struct fe_priv *np = netdev_priv(dev);
1575 	u8 __iomem *base = get_hwbase(dev);
1576 	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1577 
1578 	tx_ctrl |= NVREG_XMITCTL_START;
1579 	if (np->mac_in_use)
1580 		tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1581 	writel(tx_ctrl, base + NvRegTransmitterControl);
1582 	pci_push(base);
1583 }
1584 
1585 static void nv_stop_tx(struct net_device *dev)
1586 {
1587 	struct fe_priv *np = netdev_priv(dev);
1588 	u8 __iomem *base = get_hwbase(dev);
1589 	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1590 
1591 	if (!np->mac_in_use)
1592 		tx_ctrl &= ~NVREG_XMITCTL_START;
1593 	else
1594 		tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1595 	writel(tx_ctrl, base + NvRegTransmitterControl);
1596 	if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1597 		      NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
1598 		netdev_info(dev, "%s: TransmitterStatus remained busy\n",
1599 			    __func__);
1600 
1601 	udelay(NV_TXSTOP_DELAY2);
1602 	if (!np->mac_in_use)
1603 		writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1604 		       base + NvRegTransmitPoll);
1605 }
1606 
1607 static void nv_start_rxtx(struct net_device *dev)
1608 {
1609 	nv_start_rx(dev);
1610 	nv_start_tx(dev);
1611 }
1612 
1613 static void nv_stop_rxtx(struct net_device *dev)
1614 {
1615 	nv_stop_rx(dev);
1616 	nv_stop_tx(dev);
1617 }
1618 
1619 static void nv_txrx_reset(struct net_device *dev)
1620 {
1621 	struct fe_priv *np = netdev_priv(dev);
1622 	u8 __iomem *base = get_hwbase(dev);
1623 
1624 	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1625 	pci_push(base);
1626 	udelay(NV_TXRX_RESET_DELAY);
1627 	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1628 	pci_push(base);
1629 }
1630 
1631 static void nv_mac_reset(struct net_device *dev)
1632 {
1633 	struct fe_priv *np = netdev_priv(dev);
1634 	u8 __iomem *base = get_hwbase(dev);
1635 	u32 temp1, temp2, temp3;
1636 
1637 	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1638 	pci_push(base);
1639 
1640 	/* save registers since they will be cleared on reset */
1641 	temp1 = readl(base + NvRegMacAddrA);
1642 	temp2 = readl(base + NvRegMacAddrB);
1643 	temp3 = readl(base + NvRegTransmitPoll);
1644 
1645 	writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1646 	pci_push(base);
1647 	udelay(NV_MAC_RESET_DELAY);
1648 	writel(0, base + NvRegMacReset);
1649 	pci_push(base);
1650 	udelay(NV_MAC_RESET_DELAY);
1651 
1652 	/* restore saved registers */
1653 	writel(temp1, base + NvRegMacAddrA);
1654 	writel(temp2, base + NvRegMacAddrB);
1655 	writel(temp3, base + NvRegTransmitPoll);
1656 
1657 	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1658 	pci_push(base);
1659 }
1660 
1661 /* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
1662 static void nv_update_stats(struct net_device *dev)
1663 {
1664 	struct fe_priv *np = netdev_priv(dev);
1665 	u8 __iomem *base = get_hwbase(dev);
1666 
1667 	/* If it happens that this is run in top-half context, then
1668 	 * replace the spin_lock of hwstats_lock with
1669 	 * spin_lock_irqsave() in calling functions. */
1670 	WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half");
1671 	assert_spin_locked(&np->hwstats_lock);
1672 
1673 	/* query hardware */
1674 	np->estats.tx_bytes += readl(base + NvRegTxCnt);
1675 	np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1676 	np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1677 	np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1678 	np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1679 	np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1680 	np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1681 	np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1682 	np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1683 	np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1684 	np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1685 	np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1686 	np->estats.rx_runt += readl(base + NvRegRxRunt);
1687 	np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1688 	np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1689 	np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1690 	np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1691 	np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1692 	np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1693 	np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1694 	np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1695 	np->estats.rx_packets =
1696 		np->estats.rx_unicast +
1697 		np->estats.rx_multicast +
1698 		np->estats.rx_broadcast;
1699 	np->estats.rx_errors_total =
1700 		np->estats.rx_crc_errors +
1701 		np->estats.rx_over_errors +
1702 		np->estats.rx_frame_error +
1703 		(np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1704 		np->estats.rx_late_collision +
1705 		np->estats.rx_runt +
1706 		np->estats.rx_frame_too_long;
1707 	np->estats.tx_errors_total =
1708 		np->estats.tx_late_collision +
1709 		np->estats.tx_fifo_errors +
1710 		np->estats.tx_carrier_errors +
1711 		np->estats.tx_excess_deferral +
1712 		np->estats.tx_retry_error;
1713 
1714 	if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1715 		np->estats.tx_deferral += readl(base + NvRegTxDef);
1716 		np->estats.tx_packets += readl(base + NvRegTxFrame);
1717 		np->estats.rx_bytes += readl(base + NvRegRxCnt);
1718 		np->estats.tx_pause += readl(base + NvRegTxPause);
1719 		np->estats.rx_pause += readl(base + NvRegRxPause);
1720 		np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1721 		np->estats.rx_errors_total += np->estats.rx_drop_frame;
1722 	}
1723 
1724 	if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1725 		np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1726 		np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1727 		np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1728 	}
1729 }
1730 
1731 /*
1732  * nv_get_stats64: dev->ndo_get_stats64 function
1733  * Get latest stats value from the nic.
1734  * Called with read_lock(&dev_base_lock) held for read -
1735  * only synchronized against unregister_netdevice.
1736  */
1737 static struct rtnl_link_stats64*
1738 nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
1739 	__acquires(&netdev_priv(dev)->hwstats_lock)
1740 	__releases(&netdev_priv(dev)->hwstats_lock)
1741 {
1742 	struct fe_priv *np = netdev_priv(dev);
1743 	unsigned int syncp_start;
1744 
1745 	/*
1746 	 * Note: because HW stats are not always available and for
1747 	 * consistency reasons, the following ifconfig stats are
1748 	 * managed by software: rx_bytes, tx_bytes, rx_packets and
1749 	 * tx_packets. The related hardware stats reported by ethtool
1750 	 * should be equivalent to these ifconfig stats, with 4
1751 	 * additional bytes per packet (Ethernet FCS CRC), except for
1752 	 * tx_packets when TSO kicks in.
1753 	 */
1754 
1755 	/* software stats */
1756 	do {
1757 		syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp);
1758 		storage->rx_packets       = np->stat_rx_packets;
1759 		storage->rx_bytes         = np->stat_rx_bytes;
1760 		storage->rx_dropped       = np->stat_rx_dropped;
1761 		storage->rx_missed_errors = np->stat_rx_missed_errors;
1762 	} while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start));
1763 
1764 	do {
1765 		syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp);
1766 		storage->tx_packets = np->stat_tx_packets;
1767 		storage->tx_bytes   = np->stat_tx_bytes;
1768 		storage->tx_dropped = np->stat_tx_dropped;
1769 	} while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start));
1770 
1771 	/* If the nic supports hw counters then retrieve latest values */
1772 	if (np->driver_data & DEV_HAS_STATISTICS_V123) {
1773 		spin_lock_bh(&np->hwstats_lock);
1774 
1775 		nv_update_stats(dev);
1776 
1777 		/* generic stats */
1778 		storage->rx_errors = np->estats.rx_errors_total;
1779 		storage->tx_errors = np->estats.tx_errors_total;
1780 
1781 		/* meaningful only when NIC supports stats v3 */
1782 		storage->multicast = np->estats.rx_multicast;
1783 
1784 		/* detailed rx_errors */
1785 		storage->rx_length_errors = np->estats.rx_length_error;
1786 		storage->rx_over_errors   = np->estats.rx_over_errors;
1787 		storage->rx_crc_errors    = np->estats.rx_crc_errors;
1788 		storage->rx_frame_errors  = np->estats.rx_frame_align_error;
1789 		storage->rx_fifo_errors   = np->estats.rx_drop_frame;
1790 
1791 		/* detailed tx_errors */
1792 		storage->tx_carrier_errors = np->estats.tx_carrier_errors;
1793 		storage->tx_fifo_errors    = np->estats.tx_fifo_errors;
1794 
1795 		spin_unlock_bh(&np->hwstats_lock);
1796 	}
1797 
1798 	return storage;
1799 }
1800 
1801 /*
1802  * nv_alloc_rx: fill rx ring entries.
1803  * Return 1 if the allocations for the skbs failed and the
1804  * rx engine is without Available descriptors
1805  */
1806 static int nv_alloc_rx(struct net_device *dev)
1807 {
1808 	struct fe_priv *np = netdev_priv(dev);
1809 	struct ring_desc *less_rx;
1810 
1811 	less_rx = np->get_rx.orig;
1812 	if (less_rx-- == np->first_rx.orig)
1813 		less_rx = np->last_rx.orig;
1814 
1815 	while (np->put_rx.orig != less_rx) {
1816 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1817 		if (skb) {
1818 			np->put_rx_ctx->skb = skb;
1819 			np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1820 							     skb->data,
1821 							     skb_tailroom(skb),
1822 							     PCI_DMA_FROMDEVICE);
1823 			if (pci_dma_mapping_error(np->pci_dev,
1824 						  np->put_rx_ctx->dma)) {
1825 				kfree_skb(skb);
1826 				goto packet_dropped;
1827 			}
1828 			np->put_rx_ctx->dma_len = skb_tailroom(skb);
1829 			np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1830 			wmb();
1831 			np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1832 			if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1833 				np->put_rx.orig = np->first_rx.orig;
1834 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1835 				np->put_rx_ctx = np->first_rx_ctx;
1836 		} else {
1837 packet_dropped:
1838 			u64_stats_update_begin(&np->swstats_rx_syncp);
1839 			np->stat_rx_dropped++;
1840 			u64_stats_update_end(&np->swstats_rx_syncp);
1841 			return 1;
1842 		}
1843 	}
1844 	return 0;
1845 }
1846 
1847 static int nv_alloc_rx_optimized(struct net_device *dev)
1848 {
1849 	struct fe_priv *np = netdev_priv(dev);
1850 	struct ring_desc_ex *less_rx;
1851 
1852 	less_rx = np->get_rx.ex;
1853 	if (less_rx-- == np->first_rx.ex)
1854 		less_rx = np->last_rx.ex;
1855 
1856 	while (np->put_rx.ex != less_rx) {
1857 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1858 		if (skb) {
1859 			np->put_rx_ctx->skb = skb;
1860 			np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1861 							     skb->data,
1862 							     skb_tailroom(skb),
1863 							     PCI_DMA_FROMDEVICE);
1864 			if (pci_dma_mapping_error(np->pci_dev,
1865 						  np->put_rx_ctx->dma)) {
1866 				kfree_skb(skb);
1867 				goto packet_dropped;
1868 			}
1869 			np->put_rx_ctx->dma_len = skb_tailroom(skb);
1870 			np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1871 			np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1872 			wmb();
1873 			np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1874 			if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1875 				np->put_rx.ex = np->first_rx.ex;
1876 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1877 				np->put_rx_ctx = np->first_rx_ctx;
1878 		} else {
1879 packet_dropped:
1880 			u64_stats_update_begin(&np->swstats_rx_syncp);
1881 			np->stat_rx_dropped++;
1882 			u64_stats_update_end(&np->swstats_rx_syncp);
1883 			return 1;
1884 		}
1885 	}
1886 	return 0;
1887 }
1888 
1889 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1890 static void nv_do_rx_refill(unsigned long data)
1891 {
1892 	struct net_device *dev = (struct net_device *) data;
1893 	struct fe_priv *np = netdev_priv(dev);
1894 
1895 	/* Just reschedule NAPI rx processing */
1896 	napi_schedule(&np->napi);
1897 }
1898 
1899 static void nv_init_rx(struct net_device *dev)
1900 {
1901 	struct fe_priv *np = netdev_priv(dev);
1902 	int i;
1903 
1904 	np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1905 
1906 	if (!nv_optimized(np))
1907 		np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1908 	else
1909 		np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1910 	np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1911 	np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1912 
1913 	for (i = 0; i < np->rx_ring_size; i++) {
1914 		if (!nv_optimized(np)) {
1915 			np->rx_ring.orig[i].flaglen = 0;
1916 			np->rx_ring.orig[i].buf = 0;
1917 		} else {
1918 			np->rx_ring.ex[i].flaglen = 0;
1919 			np->rx_ring.ex[i].txvlan = 0;
1920 			np->rx_ring.ex[i].bufhigh = 0;
1921 			np->rx_ring.ex[i].buflow = 0;
1922 		}
1923 		np->rx_skb[i].skb = NULL;
1924 		np->rx_skb[i].dma = 0;
1925 	}
1926 }
1927 
1928 static void nv_init_tx(struct net_device *dev)
1929 {
1930 	struct fe_priv *np = netdev_priv(dev);
1931 	int i;
1932 
1933 	np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1934 
1935 	if (!nv_optimized(np))
1936 		np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1937 	else
1938 		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1939 	np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1940 	np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1941 	netdev_reset_queue(np->dev);
1942 	np->tx_pkts_in_progress = 0;
1943 	np->tx_change_owner = NULL;
1944 	np->tx_end_flip = NULL;
1945 	np->tx_stop = 0;
1946 
1947 	for (i = 0; i < np->tx_ring_size; i++) {
1948 		if (!nv_optimized(np)) {
1949 			np->tx_ring.orig[i].flaglen = 0;
1950 			np->tx_ring.orig[i].buf = 0;
1951 		} else {
1952 			np->tx_ring.ex[i].flaglen = 0;
1953 			np->tx_ring.ex[i].txvlan = 0;
1954 			np->tx_ring.ex[i].bufhigh = 0;
1955 			np->tx_ring.ex[i].buflow = 0;
1956 		}
1957 		np->tx_skb[i].skb = NULL;
1958 		np->tx_skb[i].dma = 0;
1959 		np->tx_skb[i].dma_len = 0;
1960 		np->tx_skb[i].dma_single = 0;
1961 		np->tx_skb[i].first_tx_desc = NULL;
1962 		np->tx_skb[i].next_tx_ctx = NULL;
1963 	}
1964 }
1965 
1966 static int nv_init_ring(struct net_device *dev)
1967 {
1968 	struct fe_priv *np = netdev_priv(dev);
1969 
1970 	nv_init_tx(dev);
1971 	nv_init_rx(dev);
1972 
1973 	if (!nv_optimized(np))
1974 		return nv_alloc_rx(dev);
1975 	else
1976 		return nv_alloc_rx_optimized(dev);
1977 }
1978 
1979 static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1980 {
1981 	if (tx_skb->dma) {
1982 		if (tx_skb->dma_single)
1983 			pci_unmap_single(np->pci_dev, tx_skb->dma,
1984 					 tx_skb->dma_len,
1985 					 PCI_DMA_TODEVICE);
1986 		else
1987 			pci_unmap_page(np->pci_dev, tx_skb->dma,
1988 				       tx_skb->dma_len,
1989 				       PCI_DMA_TODEVICE);
1990 		tx_skb->dma = 0;
1991 	}
1992 }
1993 
1994 static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1995 {
1996 	nv_unmap_txskb(np, tx_skb);
1997 	if (tx_skb->skb) {
1998 		dev_kfree_skb_any(tx_skb->skb);
1999 		tx_skb->skb = NULL;
2000 		return 1;
2001 	}
2002 	return 0;
2003 }
2004 
2005 static void nv_drain_tx(struct net_device *dev)
2006 {
2007 	struct fe_priv *np = netdev_priv(dev);
2008 	unsigned int i;
2009 
2010 	for (i = 0; i < np->tx_ring_size; i++) {
2011 		if (!nv_optimized(np)) {
2012 			np->tx_ring.orig[i].flaglen = 0;
2013 			np->tx_ring.orig[i].buf = 0;
2014 		} else {
2015 			np->tx_ring.ex[i].flaglen = 0;
2016 			np->tx_ring.ex[i].txvlan = 0;
2017 			np->tx_ring.ex[i].bufhigh = 0;
2018 			np->tx_ring.ex[i].buflow = 0;
2019 		}
2020 		if (nv_release_txskb(np, &np->tx_skb[i])) {
2021 			u64_stats_update_begin(&np->swstats_tx_syncp);
2022 			np->stat_tx_dropped++;
2023 			u64_stats_update_end(&np->swstats_tx_syncp);
2024 		}
2025 		np->tx_skb[i].dma = 0;
2026 		np->tx_skb[i].dma_len = 0;
2027 		np->tx_skb[i].dma_single = 0;
2028 		np->tx_skb[i].first_tx_desc = NULL;
2029 		np->tx_skb[i].next_tx_ctx = NULL;
2030 	}
2031 	np->tx_pkts_in_progress = 0;
2032 	np->tx_change_owner = NULL;
2033 	np->tx_end_flip = NULL;
2034 }
2035 
2036 static void nv_drain_rx(struct net_device *dev)
2037 {
2038 	struct fe_priv *np = netdev_priv(dev);
2039 	int i;
2040 
2041 	for (i = 0; i < np->rx_ring_size; i++) {
2042 		if (!nv_optimized(np)) {
2043 			np->rx_ring.orig[i].flaglen = 0;
2044 			np->rx_ring.orig[i].buf = 0;
2045 		} else {
2046 			np->rx_ring.ex[i].flaglen = 0;
2047 			np->rx_ring.ex[i].txvlan = 0;
2048 			np->rx_ring.ex[i].bufhigh = 0;
2049 			np->rx_ring.ex[i].buflow = 0;
2050 		}
2051 		wmb();
2052 		if (np->rx_skb[i].skb) {
2053 			pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
2054 					 (skb_end_pointer(np->rx_skb[i].skb) -
2055 					  np->rx_skb[i].skb->data),
2056 					 PCI_DMA_FROMDEVICE);
2057 			dev_kfree_skb(np->rx_skb[i].skb);
2058 			np->rx_skb[i].skb = NULL;
2059 		}
2060 	}
2061 }
2062 
2063 static void nv_drain_rxtx(struct net_device *dev)
2064 {
2065 	nv_drain_tx(dev);
2066 	nv_drain_rx(dev);
2067 }
2068 
2069 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
2070 {
2071 	return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
2072 }
2073 
2074 static void nv_legacybackoff_reseed(struct net_device *dev)
2075 {
2076 	u8 __iomem *base = get_hwbase(dev);
2077 	u32 reg;
2078 	u32 low;
2079 	int tx_status = 0;
2080 
2081 	reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
2082 	get_random_bytes(&low, sizeof(low));
2083 	reg |= low & NVREG_SLOTTIME_MASK;
2084 
2085 	/* Need to stop tx before change takes effect.
2086 	 * Caller has already gained np->lock.
2087 	 */
2088 	tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
2089 	if (tx_status)
2090 		nv_stop_tx(dev);
2091 	nv_stop_rx(dev);
2092 	writel(reg, base + NvRegSlotTime);
2093 	if (tx_status)
2094 		nv_start_tx(dev);
2095 	nv_start_rx(dev);
2096 }
2097 
2098 /* Gear Backoff Seeds */
2099 #define BACKOFF_SEEDSET_ROWS	8
2100 #define BACKOFF_SEEDSET_LFSRS	15
2101 
2102 /* Known Good seed sets */
2103 static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2104 	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2105 	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2106 	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2107 	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2108 	{266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2109 	{266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2110 	{366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800,  84},
2111 	{466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2112 
2113 static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2114 	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2115 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2116 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2117 	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2118 	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2119 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2120 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2121 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2122 
2123 static void nv_gear_backoff_reseed(struct net_device *dev)
2124 {
2125 	u8 __iomem *base = get_hwbase(dev);
2126 	u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
2127 	u32 temp, seedset, combinedSeed;
2128 	int i;
2129 
2130 	/* Setup seed for free running LFSR */
2131 	/* We are going to read the time stamp counter 3 times
2132 	   and swizzle bits around to increase randomness */
2133 	get_random_bytes(&miniseed1, sizeof(miniseed1));
2134 	miniseed1 &= 0x0fff;
2135 	if (miniseed1 == 0)
2136 		miniseed1 = 0xabc;
2137 
2138 	get_random_bytes(&miniseed2, sizeof(miniseed2));
2139 	miniseed2 &= 0x0fff;
2140 	if (miniseed2 == 0)
2141 		miniseed2 = 0xabc;
2142 	miniseed2_reversed =
2143 		((miniseed2 & 0xF00) >> 8) |
2144 		 (miniseed2 & 0x0F0) |
2145 		 ((miniseed2 & 0x00F) << 8);
2146 
2147 	get_random_bytes(&miniseed3, sizeof(miniseed3));
2148 	miniseed3 &= 0x0fff;
2149 	if (miniseed3 == 0)
2150 		miniseed3 = 0xabc;
2151 	miniseed3_reversed =
2152 		((miniseed3 & 0xF00) >> 8) |
2153 		 (miniseed3 & 0x0F0) |
2154 		 ((miniseed3 & 0x00F) << 8);
2155 
2156 	combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
2157 		       (miniseed2 ^ miniseed3_reversed);
2158 
2159 	/* Seeds can not be zero */
2160 	if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
2161 		combinedSeed |= 0x08;
2162 	if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
2163 		combinedSeed |= 0x8000;
2164 
2165 	/* No need to disable tx here */
2166 	temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2167 	temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2168 	temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2169 	writel(temp, base + NvRegBackOffControl);
2170 
2171 	/* Setup seeds for all gear LFSRs. */
2172 	get_random_bytes(&seedset, sizeof(seedset));
2173 	seedset = seedset % BACKOFF_SEEDSET_ROWS;
2174 	for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
2175 		temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2176 		temp |= main_seedset[seedset][i-1] & 0x3ff;
2177 		temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
2178 		writel(temp, base + NvRegBackOffControl);
2179 	}
2180 }
2181 
2182 /*
2183  * nv_start_xmit: dev->hard_start_xmit function
2184  * Called with netif_tx_lock held.
2185  */
2186 static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2187 {
2188 	struct fe_priv *np = netdev_priv(dev);
2189 	u32 tx_flags = 0;
2190 	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2191 	unsigned int fragments = skb_shinfo(skb)->nr_frags;
2192 	unsigned int i;
2193 	u32 offset = 0;
2194 	u32 bcnt;
2195 	u32 size = skb_headlen(skb);
2196 	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2197 	u32 empty_slots;
2198 	struct ring_desc *put_tx;
2199 	struct ring_desc *start_tx;
2200 	struct ring_desc *prev_tx;
2201 	struct nv_skb_map *prev_tx_ctx;
2202 	struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
2203 	unsigned long flags;
2204 
2205 	/* add fragments to entries count */
2206 	for (i = 0; i < fragments; i++) {
2207 		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2208 
2209 		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2210 			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2211 	}
2212 
2213 	spin_lock_irqsave(&np->lock, flags);
2214 	empty_slots = nv_get_empty_tx_slots(np);
2215 	if (unlikely(empty_slots <= entries)) {
2216 		netif_stop_queue(dev);
2217 		np->tx_stop = 1;
2218 		spin_unlock_irqrestore(&np->lock, flags);
2219 		return NETDEV_TX_BUSY;
2220 	}
2221 	spin_unlock_irqrestore(&np->lock, flags);
2222 
2223 	start_tx = put_tx = np->put_tx.orig;
2224 
2225 	/* setup the header buffer */
2226 	do {
2227 		prev_tx = put_tx;
2228 		prev_tx_ctx = np->put_tx_ctx;
2229 		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2230 		np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2231 						PCI_DMA_TODEVICE);
2232 		if (pci_dma_mapping_error(np->pci_dev,
2233 					  np->put_tx_ctx->dma)) {
2234 			/* on DMA mapping error - drop the packet */
2235 			kfree_skb(skb);
2236 			u64_stats_update_begin(&np->swstats_tx_syncp);
2237 			np->stat_tx_dropped++;
2238 			u64_stats_update_end(&np->swstats_tx_syncp);
2239 			return NETDEV_TX_OK;
2240 		}
2241 		np->put_tx_ctx->dma_len = bcnt;
2242 		np->put_tx_ctx->dma_single = 1;
2243 		put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2244 		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2245 
2246 		tx_flags = np->tx_flags;
2247 		offset += bcnt;
2248 		size -= bcnt;
2249 		if (unlikely(put_tx++ == np->last_tx.orig))
2250 			put_tx = np->first_tx.orig;
2251 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2252 			np->put_tx_ctx = np->first_tx_ctx;
2253 	} while (size);
2254 
2255 	/* setup the fragments */
2256 	for (i = 0; i < fragments; i++) {
2257 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2258 		u32 frag_size = skb_frag_size(frag);
2259 		offset = 0;
2260 
2261 		do {
2262 			prev_tx = put_tx;
2263 			prev_tx_ctx = np->put_tx_ctx;
2264 			if (!start_tx_ctx)
2265 				start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2266 
2267 			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2268 			np->put_tx_ctx->dma = skb_frag_dma_map(
2269 							&np->pci_dev->dev,
2270 							frag, offset,
2271 							bcnt,
2272 							DMA_TO_DEVICE);
2273 			if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
2274 
2275 				/* Unwind the mapped fragments */
2276 				do {
2277 					nv_unmap_txskb(np, start_tx_ctx);
2278 					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2279 						tmp_tx_ctx = np->first_tx_ctx;
2280 				} while (tmp_tx_ctx != np->put_tx_ctx);
2281 				kfree_skb(skb);
2282 				np->put_tx_ctx = start_tx_ctx;
2283 				u64_stats_update_begin(&np->swstats_tx_syncp);
2284 				np->stat_tx_dropped++;
2285 				u64_stats_update_end(&np->swstats_tx_syncp);
2286 				return NETDEV_TX_OK;
2287 			}
2288 
2289 			np->put_tx_ctx->dma_len = bcnt;
2290 			np->put_tx_ctx->dma_single = 0;
2291 			put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2292 			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2293 
2294 			offset += bcnt;
2295 			frag_size -= bcnt;
2296 			if (unlikely(put_tx++ == np->last_tx.orig))
2297 				put_tx = np->first_tx.orig;
2298 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2299 				np->put_tx_ctx = np->first_tx_ctx;
2300 		} while (frag_size);
2301 	}
2302 
2303 	/* set last fragment flag  */
2304 	prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
2305 
2306 	/* save skb in this slot's context area */
2307 	prev_tx_ctx->skb = skb;
2308 
2309 	if (skb_is_gso(skb))
2310 		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2311 	else
2312 		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2313 			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2314 
2315 	spin_lock_irqsave(&np->lock, flags);
2316 
2317 	/* set tx flags */
2318 	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2319 
2320 	netdev_sent_queue(np->dev, skb->len);
2321 
2322 	skb_tx_timestamp(skb);
2323 
2324 	np->put_tx.orig = put_tx;
2325 
2326 	spin_unlock_irqrestore(&np->lock, flags);
2327 
2328 	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2329 	return NETDEV_TX_OK;
2330 }
2331 
2332 static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2333 					   struct net_device *dev)
2334 {
2335 	struct fe_priv *np = netdev_priv(dev);
2336 	u32 tx_flags = 0;
2337 	u32 tx_flags_extra;
2338 	unsigned int fragments = skb_shinfo(skb)->nr_frags;
2339 	unsigned int i;
2340 	u32 offset = 0;
2341 	u32 bcnt;
2342 	u32 size = skb_headlen(skb);
2343 	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2344 	u32 empty_slots;
2345 	struct ring_desc_ex *put_tx;
2346 	struct ring_desc_ex *start_tx;
2347 	struct ring_desc_ex *prev_tx;
2348 	struct nv_skb_map *prev_tx_ctx;
2349 	struct nv_skb_map *start_tx_ctx = NULL;
2350 	struct nv_skb_map *tmp_tx_ctx = NULL;
2351 	unsigned long flags;
2352 
2353 	/* add fragments to entries count */
2354 	for (i = 0; i < fragments; i++) {
2355 		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2356 
2357 		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2358 			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2359 	}
2360 
2361 	spin_lock_irqsave(&np->lock, flags);
2362 	empty_slots = nv_get_empty_tx_slots(np);
2363 	if (unlikely(empty_slots <= entries)) {
2364 		netif_stop_queue(dev);
2365 		np->tx_stop = 1;
2366 		spin_unlock_irqrestore(&np->lock, flags);
2367 		return NETDEV_TX_BUSY;
2368 	}
2369 	spin_unlock_irqrestore(&np->lock, flags);
2370 
2371 	start_tx = put_tx = np->put_tx.ex;
2372 	start_tx_ctx = np->put_tx_ctx;
2373 
2374 	/* setup the header buffer */
2375 	do {
2376 		prev_tx = put_tx;
2377 		prev_tx_ctx = np->put_tx_ctx;
2378 		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2379 		np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2380 						PCI_DMA_TODEVICE);
2381 		if (pci_dma_mapping_error(np->pci_dev,
2382 					  np->put_tx_ctx->dma)) {
2383 			/* on DMA mapping error - drop the packet */
2384 			kfree_skb(skb);
2385 			u64_stats_update_begin(&np->swstats_tx_syncp);
2386 			np->stat_tx_dropped++;
2387 			u64_stats_update_end(&np->swstats_tx_syncp);
2388 			return NETDEV_TX_OK;
2389 		}
2390 		np->put_tx_ctx->dma_len = bcnt;
2391 		np->put_tx_ctx->dma_single = 1;
2392 		put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2393 		put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2394 		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2395 
2396 		tx_flags = NV_TX2_VALID;
2397 		offset += bcnt;
2398 		size -= bcnt;
2399 		if (unlikely(put_tx++ == np->last_tx.ex))
2400 			put_tx = np->first_tx.ex;
2401 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2402 			np->put_tx_ctx = np->first_tx_ctx;
2403 	} while (size);
2404 
2405 	/* setup the fragments */
2406 	for (i = 0; i < fragments; i++) {
2407 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2408 		u32 frag_size = skb_frag_size(frag);
2409 		offset = 0;
2410 
2411 		do {
2412 			prev_tx = put_tx;
2413 			prev_tx_ctx = np->put_tx_ctx;
2414 			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2415 			if (!start_tx_ctx)
2416 				start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2417 			np->put_tx_ctx->dma = skb_frag_dma_map(
2418 							&np->pci_dev->dev,
2419 							frag, offset,
2420 							bcnt,
2421 							DMA_TO_DEVICE);
2422 
2423 			if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
2424 
2425 				/* Unwind the mapped fragments */
2426 				do {
2427 					nv_unmap_txskb(np, start_tx_ctx);
2428 					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2429 						tmp_tx_ctx = np->first_tx_ctx;
2430 				} while (tmp_tx_ctx != np->put_tx_ctx);
2431 				kfree_skb(skb);
2432 				np->put_tx_ctx = start_tx_ctx;
2433 				u64_stats_update_begin(&np->swstats_tx_syncp);
2434 				np->stat_tx_dropped++;
2435 				u64_stats_update_end(&np->swstats_tx_syncp);
2436 				return NETDEV_TX_OK;
2437 			}
2438 			np->put_tx_ctx->dma_len = bcnt;
2439 			np->put_tx_ctx->dma_single = 0;
2440 			put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2441 			put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2442 			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2443 
2444 			offset += bcnt;
2445 			frag_size -= bcnt;
2446 			if (unlikely(put_tx++ == np->last_tx.ex))
2447 				put_tx = np->first_tx.ex;
2448 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2449 				np->put_tx_ctx = np->first_tx_ctx;
2450 		} while (frag_size);
2451 	}
2452 
2453 	/* set last fragment flag  */
2454 	prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
2455 
2456 	/* save skb in this slot's context area */
2457 	prev_tx_ctx->skb = skb;
2458 
2459 	if (skb_is_gso(skb))
2460 		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2461 	else
2462 		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2463 			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2464 
2465 	/* vlan tag */
2466 	if (vlan_tx_tag_present(skb))
2467 		start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
2468 					vlan_tx_tag_get(skb));
2469 	else
2470 		start_tx->txvlan = 0;
2471 
2472 	spin_lock_irqsave(&np->lock, flags);
2473 
2474 	if (np->tx_limit) {
2475 		/* Limit the number of outstanding tx. Setup all fragments, but
2476 		 * do not set the VALID bit on the first descriptor. Save a pointer
2477 		 * to that descriptor and also for next skb_map element.
2478 		 */
2479 
2480 		if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2481 			if (!np->tx_change_owner)
2482 				np->tx_change_owner = start_tx_ctx;
2483 
2484 			/* remove VALID bit */
2485 			tx_flags &= ~NV_TX2_VALID;
2486 			start_tx_ctx->first_tx_desc = start_tx;
2487 			start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2488 			np->tx_end_flip = np->put_tx_ctx;
2489 		} else {
2490 			np->tx_pkts_in_progress++;
2491 		}
2492 	}
2493 
2494 	/* set tx flags */
2495 	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2496 
2497 	netdev_sent_queue(np->dev, skb->len);
2498 
2499 	skb_tx_timestamp(skb);
2500 
2501 	np->put_tx.ex = put_tx;
2502 
2503 	spin_unlock_irqrestore(&np->lock, flags);
2504 
2505 	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2506 	return NETDEV_TX_OK;
2507 }
2508 
2509 static inline void nv_tx_flip_ownership(struct net_device *dev)
2510 {
2511 	struct fe_priv *np = netdev_priv(dev);
2512 
2513 	np->tx_pkts_in_progress--;
2514 	if (np->tx_change_owner) {
2515 		np->tx_change_owner->first_tx_desc->flaglen |=
2516 			cpu_to_le32(NV_TX2_VALID);
2517 		np->tx_pkts_in_progress++;
2518 
2519 		np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2520 		if (np->tx_change_owner == np->tx_end_flip)
2521 			np->tx_change_owner = NULL;
2522 
2523 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2524 	}
2525 }
2526 
2527 /*
2528  * nv_tx_done: check for completed packets, release the skbs.
2529  *
2530  * Caller must own np->lock.
2531  */
2532 static int nv_tx_done(struct net_device *dev, int limit)
2533 {
2534 	struct fe_priv *np = netdev_priv(dev);
2535 	u32 flags;
2536 	int tx_work = 0;
2537 	struct ring_desc *orig_get_tx = np->get_tx.orig;
2538 	unsigned int bytes_compl = 0;
2539 
2540 	while ((np->get_tx.orig != np->put_tx.orig) &&
2541 	       !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2542 	       (tx_work < limit)) {
2543 
2544 		nv_unmap_txskb(np, np->get_tx_ctx);
2545 
2546 		if (np->desc_ver == DESC_VER_1) {
2547 			if (flags & NV_TX_LASTPACKET) {
2548 				if (flags & NV_TX_ERROR) {
2549 					if ((flags & NV_TX_RETRYERROR)
2550 					    && !(flags & NV_TX_RETRYCOUNT_MASK))
2551 						nv_legacybackoff_reseed(dev);
2552 				} else {
2553 					u64_stats_update_begin(&np->swstats_tx_syncp);
2554 					np->stat_tx_packets++;
2555 					np->stat_tx_bytes += np->get_tx_ctx->skb->len;
2556 					u64_stats_update_end(&np->swstats_tx_syncp);
2557 				}
2558 				bytes_compl += np->get_tx_ctx->skb->len;
2559 				dev_kfree_skb_any(np->get_tx_ctx->skb);
2560 				np->get_tx_ctx->skb = NULL;
2561 				tx_work++;
2562 			}
2563 		} else {
2564 			if (flags & NV_TX2_LASTPACKET) {
2565 				if (flags & NV_TX2_ERROR) {
2566 					if ((flags & NV_TX2_RETRYERROR)
2567 					    && !(flags & NV_TX2_RETRYCOUNT_MASK))
2568 						nv_legacybackoff_reseed(dev);
2569 				} else {
2570 					u64_stats_update_begin(&np->swstats_tx_syncp);
2571 					np->stat_tx_packets++;
2572 					np->stat_tx_bytes += np->get_tx_ctx->skb->len;
2573 					u64_stats_update_end(&np->swstats_tx_syncp);
2574 				}
2575 				bytes_compl += np->get_tx_ctx->skb->len;
2576 				dev_kfree_skb_any(np->get_tx_ctx->skb);
2577 				np->get_tx_ctx->skb = NULL;
2578 				tx_work++;
2579 			}
2580 		}
2581 		if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2582 			np->get_tx.orig = np->first_tx.orig;
2583 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2584 			np->get_tx_ctx = np->first_tx_ctx;
2585 	}
2586 
2587 	netdev_completed_queue(np->dev, tx_work, bytes_compl);
2588 
2589 	if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2590 		np->tx_stop = 0;
2591 		netif_wake_queue(dev);
2592 	}
2593 	return tx_work;
2594 }
2595 
2596 static int nv_tx_done_optimized(struct net_device *dev, int limit)
2597 {
2598 	struct fe_priv *np = netdev_priv(dev);
2599 	u32 flags;
2600 	int tx_work = 0;
2601 	struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2602 	unsigned long bytes_cleaned = 0;
2603 
2604 	while ((np->get_tx.ex != np->put_tx.ex) &&
2605 	       !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
2606 	       (tx_work < limit)) {
2607 
2608 		nv_unmap_txskb(np, np->get_tx_ctx);
2609 
2610 		if (flags & NV_TX2_LASTPACKET) {
2611 			if (flags & NV_TX2_ERROR) {
2612 				if ((flags & NV_TX2_RETRYERROR)
2613 				    && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2614 					if (np->driver_data & DEV_HAS_GEAR_MODE)
2615 						nv_gear_backoff_reseed(dev);
2616 					else
2617 						nv_legacybackoff_reseed(dev);
2618 				}
2619 			} else {
2620 				u64_stats_update_begin(&np->swstats_tx_syncp);
2621 				np->stat_tx_packets++;
2622 				np->stat_tx_bytes += np->get_tx_ctx->skb->len;
2623 				u64_stats_update_end(&np->swstats_tx_syncp);
2624 			}
2625 
2626 			bytes_cleaned += np->get_tx_ctx->skb->len;
2627 			dev_kfree_skb_any(np->get_tx_ctx->skb);
2628 			np->get_tx_ctx->skb = NULL;
2629 			tx_work++;
2630 
2631 			if (np->tx_limit)
2632 				nv_tx_flip_ownership(dev);
2633 		}
2634 
2635 		if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2636 			np->get_tx.ex = np->first_tx.ex;
2637 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2638 			np->get_tx_ctx = np->first_tx_ctx;
2639 	}
2640 
2641 	netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
2642 
2643 	if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2644 		np->tx_stop = 0;
2645 		netif_wake_queue(dev);
2646 	}
2647 	return tx_work;
2648 }
2649 
2650 /*
2651  * nv_tx_timeout: dev->tx_timeout function
2652  * Called with netif_tx_lock held.
2653  */
2654 static void nv_tx_timeout(struct net_device *dev)
2655 {
2656 	struct fe_priv *np = netdev_priv(dev);
2657 	u8 __iomem *base = get_hwbase(dev);
2658 	u32 status;
2659 	union ring_type put_tx;
2660 	int saved_tx_limit;
2661 
2662 	if (np->msi_flags & NV_MSI_X_ENABLED)
2663 		status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2664 	else
2665 		status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2666 
2667 	netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status);
2668 
2669 	if (unlikely(debug_tx_timeout)) {
2670 		int i;
2671 
2672 		netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
2673 		netdev_info(dev, "Dumping tx registers\n");
2674 		for (i = 0; i <= np->register_size; i += 32) {
2675 			netdev_info(dev,
2676 				    "%3x: %08x %08x %08x %08x "
2677 				    "%08x %08x %08x %08x\n",
2678 				    i,
2679 				    readl(base + i + 0), readl(base + i + 4),
2680 				    readl(base + i + 8), readl(base + i + 12),
2681 				    readl(base + i + 16), readl(base + i + 20),
2682 				    readl(base + i + 24), readl(base + i + 28));
2683 		}
2684 		netdev_info(dev, "Dumping tx ring\n");
2685 		for (i = 0; i < np->tx_ring_size; i += 4) {
2686 			if (!nv_optimized(np)) {
2687 				netdev_info(dev,
2688 					    "%03x: %08x %08x // %08x %08x "
2689 					    "// %08x %08x // %08x %08x\n",
2690 					    i,
2691 					    le32_to_cpu(np->tx_ring.orig[i].buf),
2692 					    le32_to_cpu(np->tx_ring.orig[i].flaglen),
2693 					    le32_to_cpu(np->tx_ring.orig[i+1].buf),
2694 					    le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2695 					    le32_to_cpu(np->tx_ring.orig[i+2].buf),
2696 					    le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2697 					    le32_to_cpu(np->tx_ring.orig[i+3].buf),
2698 					    le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2699 			} else {
2700 				netdev_info(dev,
2701 					    "%03x: %08x %08x %08x "
2702 					    "// %08x %08x %08x "
2703 					    "// %08x %08x %08x "
2704 					    "// %08x %08x %08x\n",
2705 					    i,
2706 					    le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2707 					    le32_to_cpu(np->tx_ring.ex[i].buflow),
2708 					    le32_to_cpu(np->tx_ring.ex[i].flaglen),
2709 					    le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2710 					    le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2711 					    le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2712 					    le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2713 					    le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2714 					    le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2715 					    le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2716 					    le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2717 					    le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2718 			}
2719 		}
2720 	}
2721 
2722 	spin_lock_irq(&np->lock);
2723 
2724 	/* 1) stop tx engine */
2725 	nv_stop_tx(dev);
2726 
2727 	/* 2) complete any outstanding tx and do not give HW any limited tx pkts */
2728 	saved_tx_limit = np->tx_limit;
2729 	np->tx_limit = 0; /* prevent giving HW any limited pkts */
2730 	np->tx_stop = 0;  /* prevent waking tx queue */
2731 	if (!nv_optimized(np))
2732 		nv_tx_done(dev, np->tx_ring_size);
2733 	else
2734 		nv_tx_done_optimized(dev, np->tx_ring_size);
2735 
2736 	/* save current HW position */
2737 	if (np->tx_change_owner)
2738 		put_tx.ex = np->tx_change_owner->first_tx_desc;
2739 	else
2740 		put_tx = np->put_tx;
2741 
2742 	/* 3) clear all tx state */
2743 	nv_drain_tx(dev);
2744 	nv_init_tx(dev);
2745 
2746 	/* 4) restore state to current HW position */
2747 	np->get_tx = np->put_tx = put_tx;
2748 	np->tx_limit = saved_tx_limit;
2749 
2750 	/* 5) restart tx engine */
2751 	nv_start_tx(dev);
2752 	netif_wake_queue(dev);
2753 	spin_unlock_irq(&np->lock);
2754 }
2755 
2756 /*
2757  * Called when the nic notices a mismatch between the actual data len on the
2758  * wire and the len indicated in the 802 header
2759  */
2760 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2761 {
2762 	int hdrlen;	/* length of the 802 header */
2763 	int protolen;	/* length as stored in the proto field */
2764 
2765 	/* 1) calculate len according to header */
2766 	if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2767 		protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2768 		hdrlen = VLAN_HLEN;
2769 	} else {
2770 		protolen = ntohs(((struct ethhdr *)packet)->h_proto);
2771 		hdrlen = ETH_HLEN;
2772 	}
2773 	if (protolen > ETH_DATA_LEN)
2774 		return datalen; /* Value in proto field not a len, no checks possible */
2775 
2776 	protolen += hdrlen;
2777 	/* consistency checks: */
2778 	if (datalen > ETH_ZLEN) {
2779 		if (datalen >= protolen) {
2780 			/* more data on wire than in 802 header, trim of
2781 			 * additional data.
2782 			 */
2783 			return protolen;
2784 		} else {
2785 			/* less data on wire than mentioned in header.
2786 			 * Discard the packet.
2787 			 */
2788 			return -1;
2789 		}
2790 	} else {
2791 		/* short packet. Accept only if 802 values are also short */
2792 		if (protolen > ETH_ZLEN) {
2793 			return -1;
2794 		}
2795 		return datalen;
2796 	}
2797 }
2798 
2799 static int nv_rx_process(struct net_device *dev, int limit)
2800 {
2801 	struct fe_priv *np = netdev_priv(dev);
2802 	u32 flags;
2803 	int rx_work = 0;
2804 	struct sk_buff *skb;
2805 	int len;
2806 
2807 	while ((np->get_rx.orig != np->put_rx.orig) &&
2808 	      !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2809 		(rx_work < limit)) {
2810 
2811 		/*
2812 		 * the packet is for us - immediately tear down the pci mapping.
2813 		 * TODO: check if a prefetch of the first cacheline improves
2814 		 * the performance.
2815 		 */
2816 		pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2817 				np->get_rx_ctx->dma_len,
2818 				PCI_DMA_FROMDEVICE);
2819 		skb = np->get_rx_ctx->skb;
2820 		np->get_rx_ctx->skb = NULL;
2821 
2822 		/* look at what we actually got: */
2823 		if (np->desc_ver == DESC_VER_1) {
2824 			if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2825 				len = flags & LEN_MASK_V1;
2826 				if (unlikely(flags & NV_RX_ERROR)) {
2827 					if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2828 						len = nv_getlen(dev, skb->data, len);
2829 						if (len < 0) {
2830 							dev_kfree_skb(skb);
2831 							goto next_pkt;
2832 						}
2833 					}
2834 					/* framing errors are soft errors */
2835 					else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2836 						if (flags & NV_RX_SUBSTRACT1)
2837 							len--;
2838 					}
2839 					/* the rest are hard errors */
2840 					else {
2841 						if (flags & NV_RX_MISSEDFRAME) {
2842 							u64_stats_update_begin(&np->swstats_rx_syncp);
2843 							np->stat_rx_missed_errors++;
2844 							u64_stats_update_end(&np->swstats_rx_syncp);
2845 						}
2846 						dev_kfree_skb(skb);
2847 						goto next_pkt;
2848 					}
2849 				}
2850 			} else {
2851 				dev_kfree_skb(skb);
2852 				goto next_pkt;
2853 			}
2854 		} else {
2855 			if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2856 				len = flags & LEN_MASK_V2;
2857 				if (unlikely(flags & NV_RX2_ERROR)) {
2858 					if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2859 						len = nv_getlen(dev, skb->data, len);
2860 						if (len < 0) {
2861 							dev_kfree_skb(skb);
2862 							goto next_pkt;
2863 						}
2864 					}
2865 					/* framing errors are soft errors */
2866 					else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2867 						if (flags & NV_RX2_SUBSTRACT1)
2868 							len--;
2869 					}
2870 					/* the rest are hard errors */
2871 					else {
2872 						dev_kfree_skb(skb);
2873 						goto next_pkt;
2874 					}
2875 				}
2876 				if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2877 				    ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
2878 					skb->ip_summed = CHECKSUM_UNNECESSARY;
2879 			} else {
2880 				dev_kfree_skb(skb);
2881 				goto next_pkt;
2882 			}
2883 		}
2884 		/* got a valid packet - forward it to the network core */
2885 		skb_put(skb, len);
2886 		skb->protocol = eth_type_trans(skb, dev);
2887 		napi_gro_receive(&np->napi, skb);
2888 		u64_stats_update_begin(&np->swstats_rx_syncp);
2889 		np->stat_rx_packets++;
2890 		np->stat_rx_bytes += len;
2891 		u64_stats_update_end(&np->swstats_rx_syncp);
2892 next_pkt:
2893 		if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2894 			np->get_rx.orig = np->first_rx.orig;
2895 		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2896 			np->get_rx_ctx = np->first_rx_ctx;
2897 
2898 		rx_work++;
2899 	}
2900 
2901 	return rx_work;
2902 }
2903 
2904 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2905 {
2906 	struct fe_priv *np = netdev_priv(dev);
2907 	u32 flags;
2908 	u32 vlanflags = 0;
2909 	int rx_work = 0;
2910 	struct sk_buff *skb;
2911 	int len;
2912 
2913 	while ((np->get_rx.ex != np->put_rx.ex) &&
2914 	      !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2915 	      (rx_work < limit)) {
2916 
2917 		/*
2918 		 * the packet is for us - immediately tear down the pci mapping.
2919 		 * TODO: check if a prefetch of the first cacheline improves
2920 		 * the performance.
2921 		 */
2922 		pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2923 				np->get_rx_ctx->dma_len,
2924 				PCI_DMA_FROMDEVICE);
2925 		skb = np->get_rx_ctx->skb;
2926 		np->get_rx_ctx->skb = NULL;
2927 
2928 		/* look at what we actually got: */
2929 		if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2930 			len = flags & LEN_MASK_V2;
2931 			if (unlikely(flags & NV_RX2_ERROR)) {
2932 				if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2933 					len = nv_getlen(dev, skb->data, len);
2934 					if (len < 0) {
2935 						dev_kfree_skb(skb);
2936 						goto next_pkt;
2937 					}
2938 				}
2939 				/* framing errors are soft errors */
2940 				else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2941 					if (flags & NV_RX2_SUBSTRACT1)
2942 						len--;
2943 				}
2944 				/* the rest are hard errors */
2945 				else {
2946 					dev_kfree_skb(skb);
2947 					goto next_pkt;
2948 				}
2949 			}
2950 
2951 			if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2952 			    ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
2953 				skb->ip_summed = CHECKSUM_UNNECESSARY;
2954 
2955 			/* got a valid packet - forward it to the network core */
2956 			skb_put(skb, len);
2957 			skb->protocol = eth_type_trans(skb, dev);
2958 			prefetch(skb->data);
2959 
2960 			vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2961 
2962 			/*
2963 			 * There's need to check for NETIF_F_HW_VLAN_CTAG_RX
2964 			 * here. Even if vlan rx accel is disabled,
2965 			 * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
2966 			 */
2967 			if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2968 			    vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2969 				u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
2970 
2971 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2972 			}
2973 			napi_gro_receive(&np->napi, skb);
2974 			u64_stats_update_begin(&np->swstats_rx_syncp);
2975 			np->stat_rx_packets++;
2976 			np->stat_rx_bytes += len;
2977 			u64_stats_update_end(&np->swstats_rx_syncp);
2978 		} else {
2979 			dev_kfree_skb(skb);
2980 		}
2981 next_pkt:
2982 		if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2983 			np->get_rx.ex = np->first_rx.ex;
2984 		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2985 			np->get_rx_ctx = np->first_rx_ctx;
2986 
2987 		rx_work++;
2988 	}
2989 
2990 	return rx_work;
2991 }
2992 
2993 static void set_bufsize(struct net_device *dev)
2994 {
2995 	struct fe_priv *np = netdev_priv(dev);
2996 
2997 	if (dev->mtu <= ETH_DATA_LEN)
2998 		np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2999 	else
3000 		np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
3001 }
3002 
3003 /*
3004  * nv_change_mtu: dev->change_mtu function
3005  * Called with dev_base_lock held for read.
3006  */
3007 static int nv_change_mtu(struct net_device *dev, int new_mtu)
3008 {
3009 	struct fe_priv *np = netdev_priv(dev);
3010 	int old_mtu;
3011 
3012 	if (new_mtu < 64 || new_mtu > np->pkt_limit)
3013 		return -EINVAL;
3014 
3015 	old_mtu = dev->mtu;
3016 	dev->mtu = new_mtu;
3017 
3018 	/* return early if the buffer sizes will not change */
3019 	if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
3020 		return 0;
3021 	if (old_mtu == new_mtu)
3022 		return 0;
3023 
3024 	/* synchronized against open : rtnl_lock() held by caller */
3025 	if (netif_running(dev)) {
3026 		u8 __iomem *base = get_hwbase(dev);
3027 		/*
3028 		 * It seems that the nic preloads valid ring entries into an
3029 		 * internal buffer. The procedure for flushing everything is
3030 		 * guessed, there is probably a simpler approach.
3031 		 * Changing the MTU is a rare event, it shouldn't matter.
3032 		 */
3033 		nv_disable_irq(dev);
3034 		nv_napi_disable(dev);
3035 		netif_tx_lock_bh(dev);
3036 		netif_addr_lock(dev);
3037 		spin_lock(&np->lock);
3038 		/* stop engines */
3039 		nv_stop_rxtx(dev);
3040 		nv_txrx_reset(dev);
3041 		/* drain rx queue */
3042 		nv_drain_rxtx(dev);
3043 		/* reinit driver view of the rx queue */
3044 		set_bufsize(dev);
3045 		if (nv_init_ring(dev)) {
3046 			if (!np->in_shutdown)
3047 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3048 		}
3049 		/* reinit nic view of the rx queue */
3050 		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3051 		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3052 		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3053 			base + NvRegRingSizes);
3054 		pci_push(base);
3055 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3056 		pci_push(base);
3057 
3058 		/* restart rx engine */
3059 		nv_start_rxtx(dev);
3060 		spin_unlock(&np->lock);
3061 		netif_addr_unlock(dev);
3062 		netif_tx_unlock_bh(dev);
3063 		nv_napi_enable(dev);
3064 		nv_enable_irq(dev);
3065 	}
3066 	return 0;
3067 }
3068 
3069 static void nv_copy_mac_to_hw(struct net_device *dev)
3070 {
3071 	u8 __iomem *base = get_hwbase(dev);
3072 	u32 mac[2];
3073 
3074 	mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
3075 			(dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
3076 	mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
3077 
3078 	writel(mac[0], base + NvRegMacAddrA);
3079 	writel(mac[1], base + NvRegMacAddrB);
3080 }
3081 
3082 /*
3083  * nv_set_mac_address: dev->set_mac_address function
3084  * Called with rtnl_lock() held.
3085  */
3086 static int nv_set_mac_address(struct net_device *dev, void *addr)
3087 {
3088 	struct fe_priv *np = netdev_priv(dev);
3089 	struct sockaddr *macaddr = (struct sockaddr *)addr;
3090 
3091 	if (!is_valid_ether_addr(macaddr->sa_data))
3092 		return -EADDRNOTAVAIL;
3093 
3094 	/* synchronized against open : rtnl_lock() held by caller */
3095 	memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
3096 
3097 	if (netif_running(dev)) {
3098 		netif_tx_lock_bh(dev);
3099 		netif_addr_lock(dev);
3100 		spin_lock_irq(&np->lock);
3101 
3102 		/* stop rx engine */
3103 		nv_stop_rx(dev);
3104 
3105 		/* set mac address */
3106 		nv_copy_mac_to_hw(dev);
3107 
3108 		/* restart rx engine */
3109 		nv_start_rx(dev);
3110 		spin_unlock_irq(&np->lock);
3111 		netif_addr_unlock(dev);
3112 		netif_tx_unlock_bh(dev);
3113 	} else {
3114 		nv_copy_mac_to_hw(dev);
3115 	}
3116 	return 0;
3117 }
3118 
3119 /*
3120  * nv_set_multicast: dev->set_multicast function
3121  * Called with netif_tx_lock held.
3122  */
3123 static void nv_set_multicast(struct net_device *dev)
3124 {
3125 	struct fe_priv *np = netdev_priv(dev);
3126 	u8 __iomem *base = get_hwbase(dev);
3127 	u32 addr[2];
3128 	u32 mask[2];
3129 	u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
3130 
3131 	memset(addr, 0, sizeof(addr));
3132 	memset(mask, 0, sizeof(mask));
3133 
3134 	if (dev->flags & IFF_PROMISC) {
3135 		pff |= NVREG_PFF_PROMISC;
3136 	} else {
3137 		pff |= NVREG_PFF_MYADDR;
3138 
3139 		if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
3140 			u32 alwaysOff[2];
3141 			u32 alwaysOn[2];
3142 
3143 			alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
3144 			if (dev->flags & IFF_ALLMULTI) {
3145 				alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
3146 			} else {
3147 				struct netdev_hw_addr *ha;
3148 
3149 				netdev_for_each_mc_addr(ha, dev) {
3150 					unsigned char *hw_addr = ha->addr;
3151 					u32 a, b;
3152 
3153 					a = le32_to_cpu(*(__le32 *) hw_addr);
3154 					b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
3155 					alwaysOn[0] &= a;
3156 					alwaysOff[0] &= ~a;
3157 					alwaysOn[1] &= b;
3158 					alwaysOff[1] &= ~b;
3159 				}
3160 			}
3161 			addr[0] = alwaysOn[0];
3162 			addr[1] = alwaysOn[1];
3163 			mask[0] = alwaysOn[0] | alwaysOff[0];
3164 			mask[1] = alwaysOn[1] | alwaysOff[1];
3165 		} else {
3166 			mask[0] = NVREG_MCASTMASKA_NONE;
3167 			mask[1] = NVREG_MCASTMASKB_NONE;
3168 		}
3169 	}
3170 	addr[0] |= NVREG_MCASTADDRA_FORCE;
3171 	pff |= NVREG_PFF_ALWAYS;
3172 	spin_lock_irq(&np->lock);
3173 	nv_stop_rx(dev);
3174 	writel(addr[0], base + NvRegMulticastAddrA);
3175 	writel(addr[1], base + NvRegMulticastAddrB);
3176 	writel(mask[0], base + NvRegMulticastMaskA);
3177 	writel(mask[1], base + NvRegMulticastMaskB);
3178 	writel(pff, base + NvRegPacketFilterFlags);
3179 	nv_start_rx(dev);
3180 	spin_unlock_irq(&np->lock);
3181 }
3182 
3183 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
3184 {
3185 	struct fe_priv *np = netdev_priv(dev);
3186 	u8 __iomem *base = get_hwbase(dev);
3187 
3188 	np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
3189 
3190 	if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
3191 		u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
3192 		if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
3193 			writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
3194 			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3195 		} else {
3196 			writel(pff, base + NvRegPacketFilterFlags);
3197 		}
3198 	}
3199 	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3200 		u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
3201 		if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
3202 			u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3203 			if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3204 				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
3205 			if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3206 				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
3207 				/* limit the number of tx pause frames to a default of 8 */
3208 				writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3209 			}
3210 			writel(pause_enable,  base + NvRegTxPauseFrame);
3211 			writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3212 			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3213 		} else {
3214 			writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
3215 			writel(regmisc, base + NvRegMisc1);
3216 		}
3217 	}
3218 }
3219 
3220 static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
3221 {
3222 	struct fe_priv *np = netdev_priv(dev);
3223 	u8 __iomem *base = get_hwbase(dev);
3224 	u32 phyreg, txreg;
3225 	int mii_status;
3226 
3227 	np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
3228 	np->duplex = duplex;
3229 
3230 	/* see if gigabit phy */
3231 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3232 	if (mii_status & PHY_GIGABIT) {
3233 		np->gigabit = PHY_GIGABIT;
3234 		phyreg = readl(base + NvRegSlotTime);
3235 		phyreg &= ~(0x3FF00);
3236 		if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
3237 			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3238 		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
3239 			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3240 		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3241 			phyreg |= NVREG_SLOTTIME_1000_FULL;
3242 		writel(phyreg, base + NvRegSlotTime);
3243 	}
3244 
3245 	phyreg = readl(base + NvRegPhyInterface);
3246 	phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3247 	if (np->duplex == 0)
3248 		phyreg |= PHY_HALF;
3249 	if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3250 		phyreg |= PHY_100;
3251 	else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3252 							NVREG_LINKSPEED_1000)
3253 		phyreg |= PHY_1000;
3254 	writel(phyreg, base + NvRegPhyInterface);
3255 
3256 	if (phyreg & PHY_RGMII) {
3257 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3258 							NVREG_LINKSPEED_1000)
3259 			txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3260 		else
3261 			txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3262 	} else {
3263 		txreg = NVREG_TX_DEFERRAL_DEFAULT;
3264 	}
3265 	writel(txreg, base + NvRegTxDeferral);
3266 
3267 	if (np->desc_ver == DESC_VER_1) {
3268 		txreg = NVREG_TX_WM_DESC1_DEFAULT;
3269 	} else {
3270 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3271 					 NVREG_LINKSPEED_1000)
3272 			txreg = NVREG_TX_WM_DESC2_3_1000;
3273 		else
3274 			txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3275 	}
3276 	writel(txreg, base + NvRegTxWatermark);
3277 
3278 	writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3279 			base + NvRegMisc1);
3280 	pci_push(base);
3281 	writel(np->linkspeed, base + NvRegLinkSpeed);
3282 	pci_push(base);
3283 
3284 	return;
3285 }
3286 
3287 /**
3288  * nv_update_linkspeed - Setup the MAC according to the link partner
3289  * @dev: Network device to be configured
3290  *
3291  * The function queries the PHY and checks if there is a link partner.
3292  * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3293  * set to 10 MBit HD.
3294  *
3295  * The function returns 0 if there is no link partner and 1 if there is
3296  * a good link partner.
3297  */
3298 static int nv_update_linkspeed(struct net_device *dev)
3299 {
3300 	struct fe_priv *np = netdev_priv(dev);
3301 	u8 __iomem *base = get_hwbase(dev);
3302 	int adv = 0;
3303 	int lpa = 0;
3304 	int adv_lpa, adv_pause, lpa_pause;
3305 	int newls = np->linkspeed;
3306 	int newdup = np->duplex;
3307 	int mii_status;
3308 	u32 bmcr;
3309 	int retval = 0;
3310 	u32 control_1000, status_1000, phyreg, pause_flags, txreg;
3311 	u32 txrxFlags = 0;
3312 	u32 phy_exp;
3313 
3314 	/* If device loopback is enabled, set carrier on and enable max link
3315 	 * speed.
3316 	 */
3317 	bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3318 	if (bmcr & BMCR_LOOPBACK) {
3319 		if (netif_running(dev)) {
3320 			nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
3321 			if (!netif_carrier_ok(dev))
3322 				netif_carrier_on(dev);
3323 		}
3324 		return 1;
3325 	}
3326 
3327 	/* BMSR_LSTATUS is latched, read it twice:
3328 	 * we want the current value.
3329 	 */
3330 	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3331 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3332 
3333 	if (!(mii_status & BMSR_LSTATUS)) {
3334 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3335 		newdup = 0;
3336 		retval = 0;
3337 		goto set_speed;
3338 	}
3339 
3340 	if (np->autoneg == 0) {
3341 		if (np->fixed_mode & LPA_100FULL) {
3342 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3343 			newdup = 1;
3344 		} else if (np->fixed_mode & LPA_100HALF) {
3345 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3346 			newdup = 0;
3347 		} else if (np->fixed_mode & LPA_10FULL) {
3348 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3349 			newdup = 1;
3350 		} else {
3351 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3352 			newdup = 0;
3353 		}
3354 		retval = 1;
3355 		goto set_speed;
3356 	}
3357 	/* check auto negotiation is complete */
3358 	if (!(mii_status & BMSR_ANEGCOMPLETE)) {
3359 		/* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3360 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3361 		newdup = 0;
3362 		retval = 0;
3363 		goto set_speed;
3364 	}
3365 
3366 	adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3367 	lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3368 
3369 	retval = 1;
3370 	if (np->gigabit == PHY_GIGABIT) {
3371 		control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3372 		status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3373 
3374 		if ((control_1000 & ADVERTISE_1000FULL) &&
3375 			(status_1000 & LPA_1000FULL)) {
3376 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3377 			newdup = 1;
3378 			goto set_speed;
3379 		}
3380 	}
3381 
3382 	/* FIXME: handle parallel detection properly */
3383 	adv_lpa = lpa & adv;
3384 	if (adv_lpa & LPA_100FULL) {
3385 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3386 		newdup = 1;
3387 	} else if (adv_lpa & LPA_100HALF) {
3388 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3389 		newdup = 0;
3390 	} else if (adv_lpa & LPA_10FULL) {
3391 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3392 		newdup = 1;
3393 	} else if (adv_lpa & LPA_10HALF) {
3394 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3395 		newdup = 0;
3396 	} else {
3397 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3398 		newdup = 0;
3399 	}
3400 
3401 set_speed:
3402 	if (np->duplex == newdup && np->linkspeed == newls)
3403 		return retval;
3404 
3405 	np->duplex = newdup;
3406 	np->linkspeed = newls;
3407 
3408 	/* The transmitter and receiver must be restarted for safe update */
3409 	if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
3410 		txrxFlags |= NV_RESTART_TX;
3411 		nv_stop_tx(dev);
3412 	}
3413 	if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
3414 		txrxFlags |= NV_RESTART_RX;
3415 		nv_stop_rx(dev);
3416 	}
3417 
3418 	if (np->gigabit == PHY_GIGABIT) {
3419 		phyreg = readl(base + NvRegSlotTime);
3420 		phyreg &= ~(0x3FF00);
3421 		if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3422 		    ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3423 			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3424 		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3425 			phyreg |= NVREG_SLOTTIME_1000_FULL;
3426 		writel(phyreg, base + NvRegSlotTime);
3427 	}
3428 
3429 	phyreg = readl(base + NvRegPhyInterface);
3430 	phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3431 	if (np->duplex == 0)
3432 		phyreg |= PHY_HALF;
3433 	if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3434 		phyreg |= PHY_100;
3435 	else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3436 		phyreg |= PHY_1000;
3437 	writel(phyreg, base + NvRegPhyInterface);
3438 
3439 	phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
3440 	if (phyreg & PHY_RGMII) {
3441 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
3442 			txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3443 		} else {
3444 			if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3445 				if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3446 					txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
3447 				else
3448 					txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
3449 			} else {
3450 				txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3451 			}
3452 		}
3453 	} else {
3454 		if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3455 			txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
3456 		else
3457 			txreg = NVREG_TX_DEFERRAL_DEFAULT;
3458 	}
3459 	writel(txreg, base + NvRegTxDeferral);
3460 
3461 	if (np->desc_ver == DESC_VER_1) {
3462 		txreg = NVREG_TX_WM_DESC1_DEFAULT;
3463 	} else {
3464 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3465 			txreg = NVREG_TX_WM_DESC2_3_1000;
3466 		else
3467 			txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3468 	}
3469 	writel(txreg, base + NvRegTxWatermark);
3470 
3471 	writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3472 		base + NvRegMisc1);
3473 	pci_push(base);
3474 	writel(np->linkspeed, base + NvRegLinkSpeed);
3475 	pci_push(base);
3476 
3477 	pause_flags = 0;
3478 	/* setup pause frame */
3479 	if (netif_running(dev) && (np->duplex != 0)) {
3480 		if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3481 			adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3482 			lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
3483 
3484 			switch (adv_pause) {
3485 			case ADVERTISE_PAUSE_CAP:
3486 				if (lpa_pause & LPA_PAUSE_CAP) {
3487 					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3488 					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3489 						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3490 				}
3491 				break;
3492 			case ADVERTISE_PAUSE_ASYM:
3493 				if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
3494 					pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3495 				break;
3496 			case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
3497 				if (lpa_pause & LPA_PAUSE_CAP) {
3498 					pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
3499 					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3500 						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3501 				}
3502 				if (lpa_pause == LPA_PAUSE_ASYM)
3503 					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3504 				break;
3505 			}
3506 		} else {
3507 			pause_flags = np->pause_flags;
3508 		}
3509 	}
3510 	nv_update_pause(dev, pause_flags);
3511 
3512 	if (txrxFlags & NV_RESTART_TX)
3513 		nv_start_tx(dev);
3514 	if (txrxFlags & NV_RESTART_RX)
3515 		nv_start_rx(dev);
3516 
3517 	return retval;
3518 }
3519 
3520 static void nv_linkchange(struct net_device *dev)
3521 {
3522 	if (nv_update_linkspeed(dev)) {
3523 		if (!netif_carrier_ok(dev)) {
3524 			netif_carrier_on(dev);
3525 			netdev_info(dev, "link up\n");
3526 			nv_txrx_gate(dev, false);
3527 			nv_start_rx(dev);
3528 		}
3529 	} else {
3530 		if (netif_carrier_ok(dev)) {
3531 			netif_carrier_off(dev);
3532 			netdev_info(dev, "link down\n");
3533 			nv_txrx_gate(dev, true);
3534 			nv_stop_rx(dev);
3535 		}
3536 	}
3537 }
3538 
3539 static void nv_link_irq(struct net_device *dev)
3540 {
3541 	u8 __iomem *base = get_hwbase(dev);
3542 	u32 miistat;
3543 
3544 	miistat = readl(base + NvRegMIIStatus);
3545 	writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3546 
3547 	if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3548 		nv_linkchange(dev);
3549 }
3550 
3551 static void nv_msi_workaround(struct fe_priv *np)
3552 {
3553 
3554 	/* Need to toggle the msi irq mask within the ethernet device,
3555 	 * otherwise, future interrupts will not be detected.
3556 	 */
3557 	if (np->msi_flags & NV_MSI_ENABLED) {
3558 		u8 __iomem *base = np->base;
3559 
3560 		writel(0, base + NvRegMSIIrqMask);
3561 		writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3562 	}
3563 }
3564 
3565 static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
3566 {
3567 	struct fe_priv *np = netdev_priv(dev);
3568 
3569 	if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
3570 		if (total_work > NV_DYNAMIC_THRESHOLD) {
3571 			/* transition to poll based interrupts */
3572 			np->quiet_count = 0;
3573 			if (np->irqmask != NVREG_IRQMASK_CPU) {
3574 				np->irqmask = NVREG_IRQMASK_CPU;
3575 				return 1;
3576 			}
3577 		} else {
3578 			if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
3579 				np->quiet_count++;
3580 			} else {
3581 				/* reached a period of low activity, switch
3582 				   to per tx/rx packet interrupts */
3583 				if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
3584 					np->irqmask = NVREG_IRQMASK_THROUGHPUT;
3585 					return 1;
3586 				}
3587 			}
3588 		}
3589 	}
3590 	return 0;
3591 }
3592 
3593 static irqreturn_t nv_nic_irq(int foo, void *data)
3594 {
3595 	struct net_device *dev = (struct net_device *) data;
3596 	struct fe_priv *np = netdev_priv(dev);
3597 	u8 __iomem *base = get_hwbase(dev);
3598 
3599 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3600 		np->events = readl(base + NvRegIrqStatus);
3601 		writel(np->events, base + NvRegIrqStatus);
3602 	} else {
3603 		np->events = readl(base + NvRegMSIXIrqStatus);
3604 		writel(np->events, base + NvRegMSIXIrqStatus);
3605 	}
3606 	if (!(np->events & np->irqmask))
3607 		return IRQ_NONE;
3608 
3609 	nv_msi_workaround(np);
3610 
3611 	if (napi_schedule_prep(&np->napi)) {
3612 		/*
3613 		 * Disable further irq's (msix not enabled with napi)
3614 		 */
3615 		writel(0, base + NvRegIrqMask);
3616 		__napi_schedule(&np->napi);
3617 	}
3618 
3619 	return IRQ_HANDLED;
3620 }
3621 
3622 /* All _optimized functions are used to help increase performance
3623  * (reduce CPU and increase throughput). They use descripter version 3,
3624  * compiler directives, and reduce memory accesses.
3625  */
3626 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3627 {
3628 	struct net_device *dev = (struct net_device *) data;
3629 	struct fe_priv *np = netdev_priv(dev);
3630 	u8 __iomem *base = get_hwbase(dev);
3631 
3632 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3633 		np->events = readl(base + NvRegIrqStatus);
3634 		writel(np->events, base + NvRegIrqStatus);
3635 	} else {
3636 		np->events = readl(base + NvRegMSIXIrqStatus);
3637 		writel(np->events, base + NvRegMSIXIrqStatus);
3638 	}
3639 	if (!(np->events & np->irqmask))
3640 		return IRQ_NONE;
3641 
3642 	nv_msi_workaround(np);
3643 
3644 	if (napi_schedule_prep(&np->napi)) {
3645 		/*
3646 		 * Disable further irq's (msix not enabled with napi)
3647 		 */
3648 		writel(0, base + NvRegIrqMask);
3649 		__napi_schedule(&np->napi);
3650 	}
3651 
3652 	return IRQ_HANDLED;
3653 }
3654 
3655 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3656 {
3657 	struct net_device *dev = (struct net_device *) data;
3658 	struct fe_priv *np = netdev_priv(dev);
3659 	u8 __iomem *base = get_hwbase(dev);
3660 	u32 events;
3661 	int i;
3662 	unsigned long flags;
3663 
3664 	for (i = 0;; i++) {
3665 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3666 		writel(events, base + NvRegMSIXIrqStatus);
3667 		netdev_dbg(dev, "tx irq events: %08x\n", events);
3668 		if (!(events & np->irqmask))
3669 			break;
3670 
3671 		spin_lock_irqsave(&np->lock, flags);
3672 		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3673 		spin_unlock_irqrestore(&np->lock, flags);
3674 
3675 		if (unlikely(i > max_interrupt_work)) {
3676 			spin_lock_irqsave(&np->lock, flags);
3677 			/* disable interrupts on the nic */
3678 			writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3679 			pci_push(base);
3680 
3681 			if (!np->in_shutdown) {
3682 				np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3683 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3684 			}
3685 			spin_unlock_irqrestore(&np->lock, flags);
3686 			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3687 				   __func__, i);
3688 			break;
3689 		}
3690 
3691 	}
3692 
3693 	return IRQ_RETVAL(i);
3694 }
3695 
3696 static int nv_napi_poll(struct napi_struct *napi, int budget)
3697 {
3698 	struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3699 	struct net_device *dev = np->dev;
3700 	u8 __iomem *base = get_hwbase(dev);
3701 	unsigned long flags;
3702 	int retcode;
3703 	int rx_count, tx_work = 0, rx_work = 0;
3704 
3705 	do {
3706 		if (!nv_optimized(np)) {
3707 			spin_lock_irqsave(&np->lock, flags);
3708 			tx_work += nv_tx_done(dev, np->tx_ring_size);
3709 			spin_unlock_irqrestore(&np->lock, flags);
3710 
3711 			rx_count = nv_rx_process(dev, budget - rx_work);
3712 			retcode = nv_alloc_rx(dev);
3713 		} else {
3714 			spin_lock_irqsave(&np->lock, flags);
3715 			tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
3716 			spin_unlock_irqrestore(&np->lock, flags);
3717 
3718 			rx_count = nv_rx_process_optimized(dev,
3719 			    budget - rx_work);
3720 			retcode = nv_alloc_rx_optimized(dev);
3721 		}
3722 	} while (retcode == 0 &&
3723 		 rx_count > 0 && (rx_work += rx_count) < budget);
3724 
3725 	if (retcode) {
3726 		spin_lock_irqsave(&np->lock, flags);
3727 		if (!np->in_shutdown)
3728 			mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3729 		spin_unlock_irqrestore(&np->lock, flags);
3730 	}
3731 
3732 	nv_change_interrupt_mode(dev, tx_work + rx_work);
3733 
3734 	if (unlikely(np->events & NVREG_IRQ_LINK)) {
3735 		spin_lock_irqsave(&np->lock, flags);
3736 		nv_link_irq(dev);
3737 		spin_unlock_irqrestore(&np->lock, flags);
3738 	}
3739 	if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3740 		spin_lock_irqsave(&np->lock, flags);
3741 		nv_linkchange(dev);
3742 		spin_unlock_irqrestore(&np->lock, flags);
3743 		np->link_timeout = jiffies + LINK_TIMEOUT;
3744 	}
3745 	if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3746 		spin_lock_irqsave(&np->lock, flags);
3747 		if (!np->in_shutdown) {
3748 			np->nic_poll_irq = np->irqmask;
3749 			np->recover_error = 1;
3750 			mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3751 		}
3752 		spin_unlock_irqrestore(&np->lock, flags);
3753 		napi_complete(napi);
3754 		return rx_work;
3755 	}
3756 
3757 	if (rx_work < budget) {
3758 		/* re-enable interrupts
3759 		   (msix not enabled in napi) */
3760 		napi_complete(napi);
3761 
3762 		writel(np->irqmask, base + NvRegIrqMask);
3763 	}
3764 	return rx_work;
3765 }
3766 
3767 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3768 {
3769 	struct net_device *dev = (struct net_device *) data;
3770 	struct fe_priv *np = netdev_priv(dev);
3771 	u8 __iomem *base = get_hwbase(dev);
3772 	u32 events;
3773 	int i;
3774 	unsigned long flags;
3775 
3776 	for (i = 0;; i++) {
3777 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3778 		writel(events, base + NvRegMSIXIrqStatus);
3779 		netdev_dbg(dev, "rx irq events: %08x\n", events);
3780 		if (!(events & np->irqmask))
3781 			break;
3782 
3783 		if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3784 			if (unlikely(nv_alloc_rx_optimized(dev))) {
3785 				spin_lock_irqsave(&np->lock, flags);
3786 				if (!np->in_shutdown)
3787 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3788 				spin_unlock_irqrestore(&np->lock, flags);
3789 			}
3790 		}
3791 
3792 		if (unlikely(i > max_interrupt_work)) {
3793 			spin_lock_irqsave(&np->lock, flags);
3794 			/* disable interrupts on the nic */
3795 			writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3796 			pci_push(base);
3797 
3798 			if (!np->in_shutdown) {
3799 				np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3800 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3801 			}
3802 			spin_unlock_irqrestore(&np->lock, flags);
3803 			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3804 				   __func__, i);
3805 			break;
3806 		}
3807 	}
3808 
3809 	return IRQ_RETVAL(i);
3810 }
3811 
3812 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3813 {
3814 	struct net_device *dev = (struct net_device *) data;
3815 	struct fe_priv *np = netdev_priv(dev);
3816 	u8 __iomem *base = get_hwbase(dev);
3817 	u32 events;
3818 	int i;
3819 	unsigned long flags;
3820 
3821 	for (i = 0;; i++) {
3822 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3823 		writel(events, base + NvRegMSIXIrqStatus);
3824 		netdev_dbg(dev, "irq events: %08x\n", events);
3825 		if (!(events & np->irqmask))
3826 			break;
3827 
3828 		/* check tx in case we reached max loop limit in tx isr */
3829 		spin_lock_irqsave(&np->lock, flags);
3830 		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3831 		spin_unlock_irqrestore(&np->lock, flags);
3832 
3833 		if (events & NVREG_IRQ_LINK) {
3834 			spin_lock_irqsave(&np->lock, flags);
3835 			nv_link_irq(dev);
3836 			spin_unlock_irqrestore(&np->lock, flags);
3837 		}
3838 		if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3839 			spin_lock_irqsave(&np->lock, flags);
3840 			nv_linkchange(dev);
3841 			spin_unlock_irqrestore(&np->lock, flags);
3842 			np->link_timeout = jiffies + LINK_TIMEOUT;
3843 		}
3844 		if (events & NVREG_IRQ_RECOVER_ERROR) {
3845 			spin_lock_irqsave(&np->lock, flags);
3846 			/* disable interrupts on the nic */
3847 			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3848 			pci_push(base);
3849 
3850 			if (!np->in_shutdown) {
3851 				np->nic_poll_irq |= NVREG_IRQ_OTHER;
3852 				np->recover_error = 1;
3853 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3854 			}
3855 			spin_unlock_irqrestore(&np->lock, flags);
3856 			break;
3857 		}
3858 		if (unlikely(i > max_interrupt_work)) {
3859 			spin_lock_irqsave(&np->lock, flags);
3860 			/* disable interrupts on the nic */
3861 			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3862 			pci_push(base);
3863 
3864 			if (!np->in_shutdown) {
3865 				np->nic_poll_irq |= NVREG_IRQ_OTHER;
3866 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3867 			}
3868 			spin_unlock_irqrestore(&np->lock, flags);
3869 			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3870 				   __func__, i);
3871 			break;
3872 		}
3873 
3874 	}
3875 
3876 	return IRQ_RETVAL(i);
3877 }
3878 
3879 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3880 {
3881 	struct net_device *dev = (struct net_device *) data;
3882 	struct fe_priv *np = netdev_priv(dev);
3883 	u8 __iomem *base = get_hwbase(dev);
3884 	u32 events;
3885 
3886 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3887 		events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3888 		writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3889 	} else {
3890 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3891 		writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3892 	}
3893 	pci_push(base);
3894 	if (!(events & NVREG_IRQ_TIMER))
3895 		return IRQ_RETVAL(0);
3896 
3897 	nv_msi_workaround(np);
3898 
3899 	spin_lock(&np->lock);
3900 	np->intr_test = 1;
3901 	spin_unlock(&np->lock);
3902 
3903 	return IRQ_RETVAL(1);
3904 }
3905 
3906 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3907 {
3908 	u8 __iomem *base = get_hwbase(dev);
3909 	int i;
3910 	u32 msixmap = 0;
3911 
3912 	/* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3913 	 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3914 	 * the remaining 8 interrupts.
3915 	 */
3916 	for (i = 0; i < 8; i++) {
3917 		if ((irqmask >> i) & 0x1)
3918 			msixmap |= vector << (i << 2);
3919 	}
3920 	writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3921 
3922 	msixmap = 0;
3923 	for (i = 0; i < 8; i++) {
3924 		if ((irqmask >> (i + 8)) & 0x1)
3925 			msixmap |= vector << (i << 2);
3926 	}
3927 	writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3928 }
3929 
3930 static int nv_request_irq(struct net_device *dev, int intr_test)
3931 {
3932 	struct fe_priv *np = get_nvpriv(dev);
3933 	u8 __iomem *base = get_hwbase(dev);
3934 	int ret = 1;
3935 	int i;
3936 	irqreturn_t (*handler)(int foo, void *data);
3937 
3938 	if (intr_test) {
3939 		handler = nv_nic_irq_test;
3940 	} else {
3941 		if (nv_optimized(np))
3942 			handler = nv_nic_irq_optimized;
3943 		else
3944 			handler = nv_nic_irq;
3945 	}
3946 
3947 	if (np->msi_flags & NV_MSI_X_CAPABLE) {
3948 		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3949 			np->msi_x_entry[i].entry = i;
3950 		ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
3951 		if (ret == 0) {
3952 			np->msi_flags |= NV_MSI_X_ENABLED;
3953 			if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3954 				/* Request irq for rx handling */
3955 				sprintf(np->name_rx, "%s-rx", dev->name);
3956 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3957 						nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
3958 					netdev_info(dev,
3959 						    "request_irq failed for rx %d\n",
3960 						    ret);
3961 					pci_disable_msix(np->pci_dev);
3962 					np->msi_flags &= ~NV_MSI_X_ENABLED;
3963 					goto out_err;
3964 				}
3965 				/* Request irq for tx handling */
3966 				sprintf(np->name_tx, "%s-tx", dev->name);
3967 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3968 						nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
3969 					netdev_info(dev,
3970 						    "request_irq failed for tx %d\n",
3971 						    ret);
3972 					pci_disable_msix(np->pci_dev);
3973 					np->msi_flags &= ~NV_MSI_X_ENABLED;
3974 					goto out_free_rx;
3975 				}
3976 				/* Request irq for link and timer handling */
3977 				sprintf(np->name_other, "%s-other", dev->name);
3978 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3979 						nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
3980 					netdev_info(dev,
3981 						    "request_irq failed for link %d\n",
3982 						    ret);
3983 					pci_disable_msix(np->pci_dev);
3984 					np->msi_flags &= ~NV_MSI_X_ENABLED;
3985 					goto out_free_tx;
3986 				}
3987 				/* map interrupts to their respective vector */
3988 				writel(0, base + NvRegMSIXMap0);
3989 				writel(0, base + NvRegMSIXMap1);
3990 				set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3991 				set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3992 				set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3993 			} else {
3994 				/* Request irq for all interrupts */
3995 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3996 					netdev_info(dev,
3997 						    "request_irq failed %d\n",
3998 						    ret);
3999 					pci_disable_msix(np->pci_dev);
4000 					np->msi_flags &= ~NV_MSI_X_ENABLED;
4001 					goto out_err;
4002 				}
4003 
4004 				/* map interrupts to vector 0 */
4005 				writel(0, base + NvRegMSIXMap0);
4006 				writel(0, base + NvRegMSIXMap1);
4007 			}
4008 			netdev_info(dev, "MSI-X enabled\n");
4009 		}
4010 	}
4011 	if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
4012 		ret = pci_enable_msi(np->pci_dev);
4013 		if (ret == 0) {
4014 			np->msi_flags |= NV_MSI_ENABLED;
4015 			if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
4016 				netdev_info(dev, "request_irq failed %d\n",
4017 					    ret);
4018 				pci_disable_msi(np->pci_dev);
4019 				np->msi_flags &= ~NV_MSI_ENABLED;
4020 				goto out_err;
4021 			}
4022 
4023 			/* map interrupts to vector 0 */
4024 			writel(0, base + NvRegMSIMap0);
4025 			writel(0, base + NvRegMSIMap1);
4026 			/* enable msi vector 0 */
4027 			writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
4028 			netdev_info(dev, "MSI enabled\n");
4029 		}
4030 	}
4031 	if (ret != 0) {
4032 		if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
4033 			goto out_err;
4034 
4035 	}
4036 
4037 	return 0;
4038 out_free_tx:
4039 	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
4040 out_free_rx:
4041 	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
4042 out_err:
4043 	return 1;
4044 }
4045 
4046 static void nv_free_irq(struct net_device *dev)
4047 {
4048 	struct fe_priv *np = get_nvpriv(dev);
4049 	int i;
4050 
4051 	if (np->msi_flags & NV_MSI_X_ENABLED) {
4052 		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
4053 			free_irq(np->msi_x_entry[i].vector, dev);
4054 		pci_disable_msix(np->pci_dev);
4055 		np->msi_flags &= ~NV_MSI_X_ENABLED;
4056 	} else {
4057 		free_irq(np->pci_dev->irq, dev);
4058 		if (np->msi_flags & NV_MSI_ENABLED) {
4059 			pci_disable_msi(np->pci_dev);
4060 			np->msi_flags &= ~NV_MSI_ENABLED;
4061 		}
4062 	}
4063 }
4064 
4065 static void nv_do_nic_poll(unsigned long data)
4066 {
4067 	struct net_device *dev = (struct net_device *) data;
4068 	struct fe_priv *np = netdev_priv(dev);
4069 	u8 __iomem *base = get_hwbase(dev);
4070 	u32 mask = 0;
4071 
4072 	/*
4073 	 * First disable irq(s) and then
4074 	 * reenable interrupts on the nic, we have to do this before calling
4075 	 * nv_nic_irq because that may decide to do otherwise
4076 	 */
4077 
4078 	if (!using_multi_irqs(dev)) {
4079 		if (np->msi_flags & NV_MSI_X_ENABLED)
4080 			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
4081 		else
4082 			disable_irq_lockdep(np->pci_dev->irq);
4083 		mask = np->irqmask;
4084 	} else {
4085 		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4086 			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4087 			mask |= NVREG_IRQ_RX_ALL;
4088 		}
4089 		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4090 			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4091 			mask |= NVREG_IRQ_TX_ALL;
4092 		}
4093 		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4094 			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4095 			mask |= NVREG_IRQ_OTHER;
4096 		}
4097 	}
4098 	/* disable_irq() contains synchronize_irq, thus no irq handler can run now */
4099 
4100 	if (np->recover_error) {
4101 		np->recover_error = 0;
4102 		netdev_info(dev, "MAC in recoverable error state\n");
4103 		if (netif_running(dev)) {
4104 			netif_tx_lock_bh(dev);
4105 			netif_addr_lock(dev);
4106 			spin_lock(&np->lock);
4107 			/* stop engines */
4108 			nv_stop_rxtx(dev);
4109 			if (np->driver_data & DEV_HAS_POWER_CNTRL)
4110 				nv_mac_reset(dev);
4111 			nv_txrx_reset(dev);
4112 			/* drain rx queue */
4113 			nv_drain_rxtx(dev);
4114 			/* reinit driver view of the rx queue */
4115 			set_bufsize(dev);
4116 			if (nv_init_ring(dev)) {
4117 				if (!np->in_shutdown)
4118 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4119 			}
4120 			/* reinit nic view of the rx queue */
4121 			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4122 			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4123 			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4124 				base + NvRegRingSizes);
4125 			pci_push(base);
4126 			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4127 			pci_push(base);
4128 			/* clear interrupts */
4129 			if (!(np->msi_flags & NV_MSI_X_ENABLED))
4130 				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4131 			else
4132 				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4133 
4134 			/* restart rx engine */
4135 			nv_start_rxtx(dev);
4136 			spin_unlock(&np->lock);
4137 			netif_addr_unlock(dev);
4138 			netif_tx_unlock_bh(dev);
4139 		}
4140 	}
4141 
4142 	writel(mask, base + NvRegIrqMask);
4143 	pci_push(base);
4144 
4145 	if (!using_multi_irqs(dev)) {
4146 		np->nic_poll_irq = 0;
4147 		if (nv_optimized(np))
4148 			nv_nic_irq_optimized(0, dev);
4149 		else
4150 			nv_nic_irq(0, dev);
4151 		if (np->msi_flags & NV_MSI_X_ENABLED)
4152 			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
4153 		else
4154 			enable_irq_lockdep(np->pci_dev->irq);
4155 	} else {
4156 		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4157 			np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
4158 			nv_nic_irq_rx(0, dev);
4159 			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4160 		}
4161 		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4162 			np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
4163 			nv_nic_irq_tx(0, dev);
4164 			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4165 		}
4166 		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4167 			np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
4168 			nv_nic_irq_other(0, dev);
4169 			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4170 		}
4171 	}
4172 
4173 }
4174 
4175 #ifdef CONFIG_NET_POLL_CONTROLLER
4176 static void nv_poll_controller(struct net_device *dev)
4177 {
4178 	nv_do_nic_poll((unsigned long) dev);
4179 }
4180 #endif
4181 
4182 static void nv_do_stats_poll(unsigned long data)
4183 	__acquires(&netdev_priv(dev)->hwstats_lock)
4184 	__releases(&netdev_priv(dev)->hwstats_lock)
4185 {
4186 	struct net_device *dev = (struct net_device *) data;
4187 	struct fe_priv *np = netdev_priv(dev);
4188 
4189 	/* If lock is currently taken, the stats are being refreshed
4190 	 * and hence fresh enough */
4191 	if (spin_trylock(&np->hwstats_lock)) {
4192 		nv_update_stats(dev);
4193 		spin_unlock(&np->hwstats_lock);
4194 	}
4195 
4196 	if (!np->in_shutdown)
4197 		mod_timer(&np->stats_poll,
4198 			round_jiffies(jiffies + STATS_INTERVAL));
4199 }
4200 
4201 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4202 {
4203 	struct fe_priv *np = netdev_priv(dev);
4204 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
4205 	strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
4206 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
4207 }
4208 
4209 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4210 {
4211 	struct fe_priv *np = netdev_priv(dev);
4212 	wolinfo->supported = WAKE_MAGIC;
4213 
4214 	spin_lock_irq(&np->lock);
4215 	if (np->wolenabled)
4216 		wolinfo->wolopts = WAKE_MAGIC;
4217 	spin_unlock_irq(&np->lock);
4218 }
4219 
4220 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4221 {
4222 	struct fe_priv *np = netdev_priv(dev);
4223 	u8 __iomem *base = get_hwbase(dev);
4224 	u32 flags = 0;
4225 
4226 	if (wolinfo->wolopts == 0) {
4227 		np->wolenabled = 0;
4228 	} else if (wolinfo->wolopts & WAKE_MAGIC) {
4229 		np->wolenabled = 1;
4230 		flags = NVREG_WAKEUPFLAGS_ENABLE;
4231 	}
4232 	if (netif_running(dev)) {
4233 		spin_lock_irq(&np->lock);
4234 		writel(flags, base + NvRegWakeUpFlags);
4235 		spin_unlock_irq(&np->lock);
4236 	}
4237 	device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
4238 	return 0;
4239 }
4240 
4241 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4242 {
4243 	struct fe_priv *np = netdev_priv(dev);
4244 	u32 speed;
4245 	int adv;
4246 
4247 	spin_lock_irq(&np->lock);
4248 	ecmd->port = PORT_MII;
4249 	if (!netif_running(dev)) {
4250 		/* We do not track link speed / duplex setting if the
4251 		 * interface is disabled. Force a link check */
4252 		if (nv_update_linkspeed(dev)) {
4253 			if (!netif_carrier_ok(dev))
4254 				netif_carrier_on(dev);
4255 		} else {
4256 			if (netif_carrier_ok(dev))
4257 				netif_carrier_off(dev);
4258 		}
4259 	}
4260 
4261 	if (netif_carrier_ok(dev)) {
4262 		switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4263 		case NVREG_LINKSPEED_10:
4264 			speed = SPEED_10;
4265 			break;
4266 		case NVREG_LINKSPEED_100:
4267 			speed = SPEED_100;
4268 			break;
4269 		case NVREG_LINKSPEED_1000:
4270 			speed = SPEED_1000;
4271 			break;
4272 		default:
4273 			speed = -1;
4274 			break;
4275 		}
4276 		ecmd->duplex = DUPLEX_HALF;
4277 		if (np->duplex)
4278 			ecmd->duplex = DUPLEX_FULL;
4279 	} else {
4280 		speed = -1;
4281 		ecmd->duplex = -1;
4282 	}
4283 	ethtool_cmd_speed_set(ecmd, speed);
4284 	ecmd->autoneg = np->autoneg;
4285 
4286 	ecmd->advertising = ADVERTISED_MII;
4287 	if (np->autoneg) {
4288 		ecmd->advertising |= ADVERTISED_Autoneg;
4289 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4290 		if (adv & ADVERTISE_10HALF)
4291 			ecmd->advertising |= ADVERTISED_10baseT_Half;
4292 		if (adv & ADVERTISE_10FULL)
4293 			ecmd->advertising |= ADVERTISED_10baseT_Full;
4294 		if (adv & ADVERTISE_100HALF)
4295 			ecmd->advertising |= ADVERTISED_100baseT_Half;
4296 		if (adv & ADVERTISE_100FULL)
4297 			ecmd->advertising |= ADVERTISED_100baseT_Full;
4298 		if (np->gigabit == PHY_GIGABIT) {
4299 			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4300 			if (adv & ADVERTISE_1000FULL)
4301 				ecmd->advertising |= ADVERTISED_1000baseT_Full;
4302 		}
4303 	}
4304 	ecmd->supported = (SUPPORTED_Autoneg |
4305 		SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4306 		SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4307 		SUPPORTED_MII);
4308 	if (np->gigabit == PHY_GIGABIT)
4309 		ecmd->supported |= SUPPORTED_1000baseT_Full;
4310 
4311 	ecmd->phy_address = np->phyaddr;
4312 	ecmd->transceiver = XCVR_EXTERNAL;
4313 
4314 	/* ignore maxtxpkt, maxrxpkt for now */
4315 	spin_unlock_irq(&np->lock);
4316 	return 0;
4317 }
4318 
4319 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4320 {
4321 	struct fe_priv *np = netdev_priv(dev);
4322 	u32 speed = ethtool_cmd_speed(ecmd);
4323 
4324 	if (ecmd->port != PORT_MII)
4325 		return -EINVAL;
4326 	if (ecmd->transceiver != XCVR_EXTERNAL)
4327 		return -EINVAL;
4328 	if (ecmd->phy_address != np->phyaddr) {
4329 		/* TODO: support switching between multiple phys. Should be
4330 		 * trivial, but not enabled due to lack of test hardware. */
4331 		return -EINVAL;
4332 	}
4333 	if (ecmd->autoneg == AUTONEG_ENABLE) {
4334 		u32 mask;
4335 
4336 		mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4337 			  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4338 		if (np->gigabit == PHY_GIGABIT)
4339 			mask |= ADVERTISED_1000baseT_Full;
4340 
4341 		if ((ecmd->advertising & mask) == 0)
4342 			return -EINVAL;
4343 
4344 	} else if (ecmd->autoneg == AUTONEG_DISABLE) {
4345 		/* Note: autonegotiation disable, speed 1000 intentionally
4346 		 * forbidden - no one should need that. */
4347 
4348 		if (speed != SPEED_10 && speed != SPEED_100)
4349 			return -EINVAL;
4350 		if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
4351 			return -EINVAL;
4352 	} else {
4353 		return -EINVAL;
4354 	}
4355 
4356 	netif_carrier_off(dev);
4357 	if (netif_running(dev)) {
4358 		unsigned long flags;
4359 
4360 		nv_disable_irq(dev);
4361 		netif_tx_lock_bh(dev);
4362 		netif_addr_lock(dev);
4363 		/* with plain spinlock lockdep complains */
4364 		spin_lock_irqsave(&np->lock, flags);
4365 		/* stop engines */
4366 		/* FIXME:
4367 		 * this can take some time, and interrupts are disabled
4368 		 * due to spin_lock_irqsave, but let's hope no daemon
4369 		 * is going to change the settings very often...
4370 		 * Worst case:
4371 		 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4372 		 * + some minor delays, which is up to a second approximately
4373 		 */
4374 		nv_stop_rxtx(dev);
4375 		spin_unlock_irqrestore(&np->lock, flags);
4376 		netif_addr_unlock(dev);
4377 		netif_tx_unlock_bh(dev);
4378 	}
4379 
4380 	if (ecmd->autoneg == AUTONEG_ENABLE) {
4381 		int adv, bmcr;
4382 
4383 		np->autoneg = 1;
4384 
4385 		/* advertise only what has been requested */
4386 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4387 		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4388 		if (ecmd->advertising & ADVERTISED_10baseT_Half)
4389 			adv |= ADVERTISE_10HALF;
4390 		if (ecmd->advertising & ADVERTISED_10baseT_Full)
4391 			adv |= ADVERTISE_10FULL;
4392 		if (ecmd->advertising & ADVERTISED_100baseT_Half)
4393 			adv |= ADVERTISE_100HALF;
4394 		if (ecmd->advertising & ADVERTISED_100baseT_Full)
4395 			adv |= ADVERTISE_100FULL;
4396 		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)  /* for rx we set both advertisements but disable tx pause */
4397 			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4398 		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4399 			adv |=  ADVERTISE_PAUSE_ASYM;
4400 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4401 
4402 		if (np->gigabit == PHY_GIGABIT) {
4403 			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4404 			adv &= ~ADVERTISE_1000FULL;
4405 			if (ecmd->advertising & ADVERTISED_1000baseT_Full)
4406 				adv |= ADVERTISE_1000FULL;
4407 			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4408 		}
4409 
4410 		if (netif_running(dev))
4411 			netdev_info(dev, "link down\n");
4412 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4413 		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4414 			bmcr |= BMCR_ANENABLE;
4415 			/* reset the phy in order for settings to stick,
4416 			 * and cause autoneg to start */
4417 			if (phy_reset(dev, bmcr)) {
4418 				netdev_info(dev, "phy reset failed\n");
4419 				return -EINVAL;
4420 			}
4421 		} else {
4422 			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4423 			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4424 		}
4425 	} else {
4426 		int adv, bmcr;
4427 
4428 		np->autoneg = 0;
4429 
4430 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4431 		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4432 		if (speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
4433 			adv |= ADVERTISE_10HALF;
4434 		if (speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
4435 			adv |= ADVERTISE_10FULL;
4436 		if (speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
4437 			adv |= ADVERTISE_100HALF;
4438 		if (speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
4439 			adv |= ADVERTISE_100FULL;
4440 		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4441 		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
4442 			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4443 			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4444 		}
4445 		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4446 			adv |=  ADVERTISE_PAUSE_ASYM;
4447 			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4448 		}
4449 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4450 		np->fixed_mode = adv;
4451 
4452 		if (np->gigabit == PHY_GIGABIT) {
4453 			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4454 			adv &= ~ADVERTISE_1000FULL;
4455 			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4456 		}
4457 
4458 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4459 		bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4460 		if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4461 			bmcr |= BMCR_FULLDPLX;
4462 		if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4463 			bmcr |= BMCR_SPEED100;
4464 		if (np->phy_oui == PHY_OUI_MARVELL) {
4465 			/* reset the phy in order for forced mode settings to stick */
4466 			if (phy_reset(dev, bmcr)) {
4467 				netdev_info(dev, "phy reset failed\n");
4468 				return -EINVAL;
4469 			}
4470 		} else {
4471 			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4472 			if (netif_running(dev)) {
4473 				/* Wait a bit and then reconfigure the nic. */
4474 				udelay(10);
4475 				nv_linkchange(dev);
4476 			}
4477 		}
4478 	}
4479 
4480 	if (netif_running(dev)) {
4481 		nv_start_rxtx(dev);
4482 		nv_enable_irq(dev);
4483 	}
4484 
4485 	return 0;
4486 }
4487 
4488 #define FORCEDETH_REGS_VER	1
4489 
4490 static int nv_get_regs_len(struct net_device *dev)
4491 {
4492 	struct fe_priv *np = netdev_priv(dev);
4493 	return np->register_size;
4494 }
4495 
4496 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4497 {
4498 	struct fe_priv *np = netdev_priv(dev);
4499 	u8 __iomem *base = get_hwbase(dev);
4500 	u32 *rbuf = buf;
4501 	int i;
4502 
4503 	regs->version = FORCEDETH_REGS_VER;
4504 	spin_lock_irq(&np->lock);
4505 	for (i = 0; i < np->register_size/sizeof(u32); i++)
4506 		rbuf[i] = readl(base + i*sizeof(u32));
4507 	spin_unlock_irq(&np->lock);
4508 }
4509 
4510 static int nv_nway_reset(struct net_device *dev)
4511 {
4512 	struct fe_priv *np = netdev_priv(dev);
4513 	int ret;
4514 
4515 	if (np->autoneg) {
4516 		int bmcr;
4517 
4518 		netif_carrier_off(dev);
4519 		if (netif_running(dev)) {
4520 			nv_disable_irq(dev);
4521 			netif_tx_lock_bh(dev);
4522 			netif_addr_lock(dev);
4523 			spin_lock(&np->lock);
4524 			/* stop engines */
4525 			nv_stop_rxtx(dev);
4526 			spin_unlock(&np->lock);
4527 			netif_addr_unlock(dev);
4528 			netif_tx_unlock_bh(dev);
4529 			netdev_info(dev, "link down\n");
4530 		}
4531 
4532 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4533 		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4534 			bmcr |= BMCR_ANENABLE;
4535 			/* reset the phy in order for settings to stick*/
4536 			if (phy_reset(dev, bmcr)) {
4537 				netdev_info(dev, "phy reset failed\n");
4538 				return -EINVAL;
4539 			}
4540 		} else {
4541 			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4542 			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4543 		}
4544 
4545 		if (netif_running(dev)) {
4546 			nv_start_rxtx(dev);
4547 			nv_enable_irq(dev);
4548 		}
4549 		ret = 0;
4550 	} else {
4551 		ret = -EINVAL;
4552 	}
4553 
4554 	return ret;
4555 }
4556 
4557 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4558 {
4559 	struct fe_priv *np = netdev_priv(dev);
4560 
4561 	ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4562 	ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4563 
4564 	ring->rx_pending = np->rx_ring_size;
4565 	ring->tx_pending = np->tx_ring_size;
4566 }
4567 
4568 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4569 {
4570 	struct fe_priv *np = netdev_priv(dev);
4571 	u8 __iomem *base = get_hwbase(dev);
4572 	u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4573 	dma_addr_t ring_addr;
4574 
4575 	if (ring->rx_pending < RX_RING_MIN ||
4576 	    ring->tx_pending < TX_RING_MIN ||
4577 	    ring->rx_mini_pending != 0 ||
4578 	    ring->rx_jumbo_pending != 0 ||
4579 	    (np->desc_ver == DESC_VER_1 &&
4580 	     (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4581 	      ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4582 	    (np->desc_ver != DESC_VER_1 &&
4583 	     (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4584 	      ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4585 		return -EINVAL;
4586 	}
4587 
4588 	/* allocate new rings */
4589 	if (!nv_optimized(np)) {
4590 		rxtx_ring = pci_alloc_consistent(np->pci_dev,
4591 					    sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4592 					    &ring_addr);
4593 	} else {
4594 		rxtx_ring = pci_alloc_consistent(np->pci_dev,
4595 					    sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4596 					    &ring_addr);
4597 	}
4598 	rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4599 	tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4600 	if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4601 		/* fall back to old rings */
4602 		if (!nv_optimized(np)) {
4603 			if (rxtx_ring)
4604 				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4605 						    rxtx_ring, ring_addr);
4606 		} else {
4607 			if (rxtx_ring)
4608 				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4609 						    rxtx_ring, ring_addr);
4610 		}
4611 
4612 		kfree(rx_skbuff);
4613 		kfree(tx_skbuff);
4614 		goto exit;
4615 	}
4616 
4617 	if (netif_running(dev)) {
4618 		nv_disable_irq(dev);
4619 		nv_napi_disable(dev);
4620 		netif_tx_lock_bh(dev);
4621 		netif_addr_lock(dev);
4622 		spin_lock(&np->lock);
4623 		/* stop engines */
4624 		nv_stop_rxtx(dev);
4625 		nv_txrx_reset(dev);
4626 		/* drain queues */
4627 		nv_drain_rxtx(dev);
4628 		/* delete queues */
4629 		free_rings(dev);
4630 	}
4631 
4632 	/* set new values */
4633 	np->rx_ring_size = ring->rx_pending;
4634 	np->tx_ring_size = ring->tx_pending;
4635 
4636 	if (!nv_optimized(np)) {
4637 		np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4638 		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4639 	} else {
4640 		np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4641 		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4642 	}
4643 	np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4644 	np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4645 	np->ring_addr = ring_addr;
4646 
4647 	memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4648 	memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4649 
4650 	if (netif_running(dev)) {
4651 		/* reinit driver view of the queues */
4652 		set_bufsize(dev);
4653 		if (nv_init_ring(dev)) {
4654 			if (!np->in_shutdown)
4655 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4656 		}
4657 
4658 		/* reinit nic view of the queues */
4659 		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4660 		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4661 		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4662 			base + NvRegRingSizes);
4663 		pci_push(base);
4664 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4665 		pci_push(base);
4666 
4667 		/* restart engines */
4668 		nv_start_rxtx(dev);
4669 		spin_unlock(&np->lock);
4670 		netif_addr_unlock(dev);
4671 		netif_tx_unlock_bh(dev);
4672 		nv_napi_enable(dev);
4673 		nv_enable_irq(dev);
4674 	}
4675 	return 0;
4676 exit:
4677 	return -ENOMEM;
4678 }
4679 
4680 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4681 {
4682 	struct fe_priv *np = netdev_priv(dev);
4683 
4684 	pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4685 	pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4686 	pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4687 }
4688 
4689 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4690 {
4691 	struct fe_priv *np = netdev_priv(dev);
4692 	int adv, bmcr;
4693 
4694 	if ((!np->autoneg && np->duplex == 0) ||
4695 	    (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4696 		netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
4697 		return -EINVAL;
4698 	}
4699 	if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4700 		netdev_info(dev, "hardware does not support tx pause frames\n");
4701 		return -EINVAL;
4702 	}
4703 
4704 	netif_carrier_off(dev);
4705 	if (netif_running(dev)) {
4706 		nv_disable_irq(dev);
4707 		netif_tx_lock_bh(dev);
4708 		netif_addr_lock(dev);
4709 		spin_lock(&np->lock);
4710 		/* stop engines */
4711 		nv_stop_rxtx(dev);
4712 		spin_unlock(&np->lock);
4713 		netif_addr_unlock(dev);
4714 		netif_tx_unlock_bh(dev);
4715 	}
4716 
4717 	np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4718 	if (pause->rx_pause)
4719 		np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4720 	if (pause->tx_pause)
4721 		np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4722 
4723 	if (np->autoneg && pause->autoneg) {
4724 		np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4725 
4726 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4727 		adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4728 		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
4729 			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4730 		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4731 			adv |=  ADVERTISE_PAUSE_ASYM;
4732 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4733 
4734 		if (netif_running(dev))
4735 			netdev_info(dev, "link down\n");
4736 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4737 		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4738 		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4739 	} else {
4740 		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4741 		if (pause->rx_pause)
4742 			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4743 		if (pause->tx_pause)
4744 			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4745 
4746 		if (!netif_running(dev))
4747 			nv_update_linkspeed(dev);
4748 		else
4749 			nv_update_pause(dev, np->pause_flags);
4750 	}
4751 
4752 	if (netif_running(dev)) {
4753 		nv_start_rxtx(dev);
4754 		nv_enable_irq(dev);
4755 	}
4756 	return 0;
4757 }
4758 
4759 static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
4760 {
4761 	struct fe_priv *np = netdev_priv(dev);
4762 	unsigned long flags;
4763 	u32 miicontrol;
4764 	int err, retval = 0;
4765 
4766 	spin_lock_irqsave(&np->lock, flags);
4767 	miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4768 	if (features & NETIF_F_LOOPBACK) {
4769 		if (miicontrol & BMCR_LOOPBACK) {
4770 			spin_unlock_irqrestore(&np->lock, flags);
4771 			netdev_info(dev, "Loopback already enabled\n");
4772 			return 0;
4773 		}
4774 		nv_disable_irq(dev);
4775 		/* Turn on loopback mode */
4776 		miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
4777 		err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
4778 		if (err) {
4779 			retval = PHY_ERROR;
4780 			spin_unlock_irqrestore(&np->lock, flags);
4781 			phy_init(dev);
4782 		} else {
4783 			if (netif_running(dev)) {
4784 				/* Force 1000 Mbps full-duplex */
4785 				nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
4786 									 1);
4787 				/* Force link up */
4788 				netif_carrier_on(dev);
4789 			}
4790 			spin_unlock_irqrestore(&np->lock, flags);
4791 			netdev_info(dev,
4792 				"Internal PHY loopback mode enabled.\n");
4793 		}
4794 	} else {
4795 		if (!(miicontrol & BMCR_LOOPBACK)) {
4796 			spin_unlock_irqrestore(&np->lock, flags);
4797 			netdev_info(dev, "Loopback already disabled\n");
4798 			return 0;
4799 		}
4800 		nv_disable_irq(dev);
4801 		/* Turn off loopback */
4802 		spin_unlock_irqrestore(&np->lock, flags);
4803 		netdev_info(dev, "Internal PHY loopback mode disabled.\n");
4804 		phy_init(dev);
4805 	}
4806 	msleep(500);
4807 	spin_lock_irqsave(&np->lock, flags);
4808 	nv_enable_irq(dev);
4809 	spin_unlock_irqrestore(&np->lock, flags);
4810 
4811 	return retval;
4812 }
4813 
4814 static netdev_features_t nv_fix_features(struct net_device *dev,
4815 	netdev_features_t features)
4816 {
4817 	/* vlan is dependent on rx checksum offload */
4818 	if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
4819 		features |= NETIF_F_RXCSUM;
4820 
4821 	return features;
4822 }
4823 
4824 static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
4825 {
4826 	struct fe_priv *np = get_nvpriv(dev);
4827 
4828 	spin_lock_irq(&np->lock);
4829 
4830 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
4831 		np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
4832 	else
4833 		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4834 
4835 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
4836 		np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
4837 	else
4838 		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4839 
4840 	writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4841 
4842 	spin_unlock_irq(&np->lock);
4843 }
4844 
4845 static int nv_set_features(struct net_device *dev, netdev_features_t features)
4846 {
4847 	struct fe_priv *np = netdev_priv(dev);
4848 	u8 __iomem *base = get_hwbase(dev);
4849 	netdev_features_t changed = dev->features ^ features;
4850 	int retval;
4851 
4852 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
4853 		retval = nv_set_loopback(dev, features);
4854 		if (retval != 0)
4855 			return retval;
4856 	}
4857 
4858 	if (changed & NETIF_F_RXCSUM) {
4859 		spin_lock_irq(&np->lock);
4860 
4861 		if (features & NETIF_F_RXCSUM)
4862 			np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4863 		else
4864 			np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4865 
4866 		if (netif_running(dev))
4867 			writel(np->txrxctl_bits, base + NvRegTxRxControl);
4868 
4869 		spin_unlock_irq(&np->lock);
4870 	}
4871 
4872 	if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))
4873 		nv_vlan_mode(dev, features);
4874 
4875 	return 0;
4876 }
4877 
4878 static int nv_get_sset_count(struct net_device *dev, int sset)
4879 {
4880 	struct fe_priv *np = netdev_priv(dev);
4881 
4882 	switch (sset) {
4883 	case ETH_SS_TEST:
4884 		if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4885 			return NV_TEST_COUNT_EXTENDED;
4886 		else
4887 			return NV_TEST_COUNT_BASE;
4888 	case ETH_SS_STATS:
4889 		if (np->driver_data & DEV_HAS_STATISTICS_V3)
4890 			return NV_DEV_STATISTICS_V3_COUNT;
4891 		else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4892 			return NV_DEV_STATISTICS_V2_COUNT;
4893 		else if (np->driver_data & DEV_HAS_STATISTICS_V1)
4894 			return NV_DEV_STATISTICS_V1_COUNT;
4895 		else
4896 			return 0;
4897 	default:
4898 		return -EOPNOTSUPP;
4899 	}
4900 }
4901 
4902 static void nv_get_ethtool_stats(struct net_device *dev,
4903 				 struct ethtool_stats *estats, u64 *buffer)
4904 	__acquires(&netdev_priv(dev)->hwstats_lock)
4905 	__releases(&netdev_priv(dev)->hwstats_lock)
4906 {
4907 	struct fe_priv *np = netdev_priv(dev);
4908 
4909 	spin_lock_bh(&np->hwstats_lock);
4910 	nv_update_stats(dev);
4911 	memcpy(buffer, &np->estats,
4912 	       nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4913 	spin_unlock_bh(&np->hwstats_lock);
4914 }
4915 
4916 static int nv_link_test(struct net_device *dev)
4917 {
4918 	struct fe_priv *np = netdev_priv(dev);
4919 	int mii_status;
4920 
4921 	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4922 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4923 
4924 	/* check phy link status */
4925 	if (!(mii_status & BMSR_LSTATUS))
4926 		return 0;
4927 	else
4928 		return 1;
4929 }
4930 
4931 static int nv_register_test(struct net_device *dev)
4932 {
4933 	u8 __iomem *base = get_hwbase(dev);
4934 	int i = 0;
4935 	u32 orig_read, new_read;
4936 
4937 	do {
4938 		orig_read = readl(base + nv_registers_test[i].reg);
4939 
4940 		/* xor with mask to toggle bits */
4941 		orig_read ^= nv_registers_test[i].mask;
4942 
4943 		writel(orig_read, base + nv_registers_test[i].reg);
4944 
4945 		new_read = readl(base + nv_registers_test[i].reg);
4946 
4947 		if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4948 			return 0;
4949 
4950 		/* restore original value */
4951 		orig_read ^= nv_registers_test[i].mask;
4952 		writel(orig_read, base + nv_registers_test[i].reg);
4953 
4954 	} while (nv_registers_test[++i].reg != 0);
4955 
4956 	return 1;
4957 }
4958 
4959 static int nv_interrupt_test(struct net_device *dev)
4960 {
4961 	struct fe_priv *np = netdev_priv(dev);
4962 	u8 __iomem *base = get_hwbase(dev);
4963 	int ret = 1;
4964 	int testcnt;
4965 	u32 save_msi_flags, save_poll_interval = 0;
4966 
4967 	if (netif_running(dev)) {
4968 		/* free current irq */
4969 		nv_free_irq(dev);
4970 		save_poll_interval = readl(base+NvRegPollingInterval);
4971 	}
4972 
4973 	/* flag to test interrupt handler */
4974 	np->intr_test = 0;
4975 
4976 	/* setup test irq */
4977 	save_msi_flags = np->msi_flags;
4978 	np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4979 	np->msi_flags |= 0x001; /* setup 1 vector */
4980 	if (nv_request_irq(dev, 1))
4981 		return 0;
4982 
4983 	/* setup timer interrupt */
4984 	writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4985 	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4986 
4987 	nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4988 
4989 	/* wait for at least one interrupt */
4990 	msleep(100);
4991 
4992 	spin_lock_irq(&np->lock);
4993 
4994 	/* flag should be set within ISR */
4995 	testcnt = np->intr_test;
4996 	if (!testcnt)
4997 		ret = 2;
4998 
4999 	nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
5000 	if (!(np->msi_flags & NV_MSI_X_ENABLED))
5001 		writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5002 	else
5003 		writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5004 
5005 	spin_unlock_irq(&np->lock);
5006 
5007 	nv_free_irq(dev);
5008 
5009 	np->msi_flags = save_msi_flags;
5010 
5011 	if (netif_running(dev)) {
5012 		writel(save_poll_interval, base + NvRegPollingInterval);
5013 		writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5014 		/* restore original irq */
5015 		if (nv_request_irq(dev, 0))
5016 			return 0;
5017 	}
5018 
5019 	return ret;
5020 }
5021 
5022 static int nv_loopback_test(struct net_device *dev)
5023 {
5024 	struct fe_priv *np = netdev_priv(dev);
5025 	u8 __iomem *base = get_hwbase(dev);
5026 	struct sk_buff *tx_skb, *rx_skb;
5027 	dma_addr_t test_dma_addr;
5028 	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
5029 	u32 flags;
5030 	int len, i, pkt_len;
5031 	u8 *pkt_data;
5032 	u32 filter_flags = 0;
5033 	u32 misc1_flags = 0;
5034 	int ret = 1;
5035 
5036 	if (netif_running(dev)) {
5037 		nv_disable_irq(dev);
5038 		filter_flags = readl(base + NvRegPacketFilterFlags);
5039 		misc1_flags = readl(base + NvRegMisc1);
5040 	} else {
5041 		nv_txrx_reset(dev);
5042 	}
5043 
5044 	/* reinit driver view of the rx queue */
5045 	set_bufsize(dev);
5046 	nv_init_ring(dev);
5047 
5048 	/* setup hardware for loopback */
5049 	writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
5050 	writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
5051 
5052 	/* reinit nic view of the rx queue */
5053 	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5054 	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5055 	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5056 		base + NvRegRingSizes);
5057 	pci_push(base);
5058 
5059 	/* restart rx engine */
5060 	nv_start_rxtx(dev);
5061 
5062 	/* setup packet for tx */
5063 	pkt_len = ETH_DATA_LEN;
5064 	tx_skb = netdev_alloc_skb(dev, pkt_len);
5065 	if (!tx_skb) {
5066 		ret = 0;
5067 		goto out;
5068 	}
5069 	test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
5070 				       skb_tailroom(tx_skb),
5071 				       PCI_DMA_FROMDEVICE);
5072 	if (pci_dma_mapping_error(np->pci_dev,
5073 				  test_dma_addr)) {
5074 		dev_kfree_skb_any(tx_skb);
5075 		goto out;
5076 	}
5077 	pkt_data = skb_put(tx_skb, pkt_len);
5078 	for (i = 0; i < pkt_len; i++)
5079 		pkt_data[i] = (u8)(i & 0xff);
5080 
5081 	if (!nv_optimized(np)) {
5082 		np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
5083 		np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5084 	} else {
5085 		np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
5086 		np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
5087 		np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5088 	}
5089 	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5090 	pci_push(get_hwbase(dev));
5091 
5092 	msleep(500);
5093 
5094 	/* check for rx of the packet */
5095 	if (!nv_optimized(np)) {
5096 		flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
5097 		len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
5098 
5099 	} else {
5100 		flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
5101 		len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
5102 	}
5103 
5104 	if (flags & NV_RX_AVAIL) {
5105 		ret = 0;
5106 	} else if (np->desc_ver == DESC_VER_1) {
5107 		if (flags & NV_RX_ERROR)
5108 			ret = 0;
5109 	} else {
5110 		if (flags & NV_RX2_ERROR)
5111 			ret = 0;
5112 	}
5113 
5114 	if (ret) {
5115 		if (len != pkt_len) {
5116 			ret = 0;
5117 		} else {
5118 			rx_skb = np->rx_skb[0].skb;
5119 			for (i = 0; i < pkt_len; i++) {
5120 				if (rx_skb->data[i] != (u8)(i & 0xff)) {
5121 					ret = 0;
5122 					break;
5123 				}
5124 			}
5125 		}
5126 	}
5127 
5128 	pci_unmap_single(np->pci_dev, test_dma_addr,
5129 		       (skb_end_pointer(tx_skb) - tx_skb->data),
5130 		       PCI_DMA_TODEVICE);
5131 	dev_kfree_skb_any(tx_skb);
5132  out:
5133 	/* stop engines */
5134 	nv_stop_rxtx(dev);
5135 	nv_txrx_reset(dev);
5136 	/* drain rx queue */
5137 	nv_drain_rxtx(dev);
5138 
5139 	if (netif_running(dev)) {
5140 		writel(misc1_flags, base + NvRegMisc1);
5141 		writel(filter_flags, base + NvRegPacketFilterFlags);
5142 		nv_enable_irq(dev);
5143 	}
5144 
5145 	return ret;
5146 }
5147 
5148 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
5149 {
5150 	struct fe_priv *np = netdev_priv(dev);
5151 	u8 __iomem *base = get_hwbase(dev);
5152 	int result, count;
5153 
5154 	count = nv_get_sset_count(dev, ETH_SS_TEST);
5155 	memset(buffer, 0, count * sizeof(u64));
5156 
5157 	if (!nv_link_test(dev)) {
5158 		test->flags |= ETH_TEST_FL_FAILED;
5159 		buffer[0] = 1;
5160 	}
5161 
5162 	if (test->flags & ETH_TEST_FL_OFFLINE) {
5163 		if (netif_running(dev)) {
5164 			netif_stop_queue(dev);
5165 			nv_napi_disable(dev);
5166 			netif_tx_lock_bh(dev);
5167 			netif_addr_lock(dev);
5168 			spin_lock_irq(&np->lock);
5169 			nv_disable_hw_interrupts(dev, np->irqmask);
5170 			if (!(np->msi_flags & NV_MSI_X_ENABLED))
5171 				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5172 			else
5173 				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5174 			/* stop engines */
5175 			nv_stop_rxtx(dev);
5176 			nv_txrx_reset(dev);
5177 			/* drain rx queue */
5178 			nv_drain_rxtx(dev);
5179 			spin_unlock_irq(&np->lock);
5180 			netif_addr_unlock(dev);
5181 			netif_tx_unlock_bh(dev);
5182 		}
5183 
5184 		if (!nv_register_test(dev)) {
5185 			test->flags |= ETH_TEST_FL_FAILED;
5186 			buffer[1] = 1;
5187 		}
5188 
5189 		result = nv_interrupt_test(dev);
5190 		if (result != 1) {
5191 			test->flags |= ETH_TEST_FL_FAILED;
5192 			buffer[2] = 1;
5193 		}
5194 		if (result == 0) {
5195 			/* bail out */
5196 			return;
5197 		}
5198 
5199 		if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) {
5200 			test->flags |= ETH_TEST_FL_FAILED;
5201 			buffer[3] = 1;
5202 		}
5203 
5204 		if (netif_running(dev)) {
5205 			/* reinit driver view of the rx queue */
5206 			set_bufsize(dev);
5207 			if (nv_init_ring(dev)) {
5208 				if (!np->in_shutdown)
5209 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5210 			}
5211 			/* reinit nic view of the rx queue */
5212 			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5213 			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5214 			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5215 				base + NvRegRingSizes);
5216 			pci_push(base);
5217 			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5218 			pci_push(base);
5219 			/* restart rx engine */
5220 			nv_start_rxtx(dev);
5221 			netif_start_queue(dev);
5222 			nv_napi_enable(dev);
5223 			nv_enable_hw_interrupts(dev, np->irqmask);
5224 		}
5225 	}
5226 }
5227 
5228 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
5229 {
5230 	switch (stringset) {
5231 	case ETH_SS_STATS:
5232 		memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
5233 		break;
5234 	case ETH_SS_TEST:
5235 		memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
5236 		break;
5237 	}
5238 }
5239 
5240 static const struct ethtool_ops ops = {
5241 	.get_drvinfo = nv_get_drvinfo,
5242 	.get_link = ethtool_op_get_link,
5243 	.get_wol = nv_get_wol,
5244 	.set_wol = nv_set_wol,
5245 	.get_settings = nv_get_settings,
5246 	.set_settings = nv_set_settings,
5247 	.get_regs_len = nv_get_regs_len,
5248 	.get_regs = nv_get_regs,
5249 	.nway_reset = nv_nway_reset,
5250 	.get_ringparam = nv_get_ringparam,
5251 	.set_ringparam = nv_set_ringparam,
5252 	.get_pauseparam = nv_get_pauseparam,
5253 	.set_pauseparam = nv_set_pauseparam,
5254 	.get_strings = nv_get_strings,
5255 	.get_ethtool_stats = nv_get_ethtool_stats,
5256 	.get_sset_count = nv_get_sset_count,
5257 	.self_test = nv_self_test,
5258 	.get_ts_info = ethtool_op_get_ts_info,
5259 };
5260 
5261 /* The mgmt unit and driver use a semaphore to access the phy during init */
5262 static int nv_mgmt_acquire_sema(struct net_device *dev)
5263 {
5264 	struct fe_priv *np = netdev_priv(dev);
5265 	u8 __iomem *base = get_hwbase(dev);
5266 	int i;
5267 	u32 tx_ctrl, mgmt_sema;
5268 
5269 	for (i = 0; i < 10; i++) {
5270 		mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
5271 		if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
5272 			break;
5273 		msleep(500);
5274 	}
5275 
5276 	if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
5277 		return 0;
5278 
5279 	for (i = 0; i < 2; i++) {
5280 		tx_ctrl = readl(base + NvRegTransmitterControl);
5281 		tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
5282 		writel(tx_ctrl, base + NvRegTransmitterControl);
5283 
5284 		/* verify that semaphore was acquired */
5285 		tx_ctrl = readl(base + NvRegTransmitterControl);
5286 		if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
5287 		    ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5288 			np->mgmt_sema = 1;
5289 			return 1;
5290 		} else
5291 			udelay(50);
5292 	}
5293 
5294 	return 0;
5295 }
5296 
5297 static void nv_mgmt_release_sema(struct net_device *dev)
5298 {
5299 	struct fe_priv *np = netdev_priv(dev);
5300 	u8 __iomem *base = get_hwbase(dev);
5301 	u32 tx_ctrl;
5302 
5303 	if (np->driver_data & DEV_HAS_MGMT_UNIT) {
5304 		if (np->mgmt_sema) {
5305 			tx_ctrl = readl(base + NvRegTransmitterControl);
5306 			tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
5307 			writel(tx_ctrl, base + NvRegTransmitterControl);
5308 		}
5309 	}
5310 }
5311 
5312 
5313 static int nv_mgmt_get_version(struct net_device *dev)
5314 {
5315 	struct fe_priv *np = netdev_priv(dev);
5316 	u8 __iomem *base = get_hwbase(dev);
5317 	u32 data_ready = readl(base + NvRegTransmitterControl);
5318 	u32 data_ready2 = 0;
5319 	unsigned long start;
5320 	int ready = 0;
5321 
5322 	writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
5323 	writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
5324 	start = jiffies;
5325 	while (time_before(jiffies, start + 5*HZ)) {
5326 		data_ready2 = readl(base + NvRegTransmitterControl);
5327 		if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
5328 			ready = 1;
5329 			break;
5330 		}
5331 		schedule_timeout_uninterruptible(1);
5332 	}
5333 
5334 	if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
5335 		return 0;
5336 
5337 	np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
5338 
5339 	return 1;
5340 }
5341 
5342 static int nv_open(struct net_device *dev)
5343 {
5344 	struct fe_priv *np = netdev_priv(dev);
5345 	u8 __iomem *base = get_hwbase(dev);
5346 	int ret = 1;
5347 	int oom, i;
5348 	u32 low;
5349 
5350 	/* power up phy */
5351 	mii_rw(dev, np->phyaddr, MII_BMCR,
5352 	       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
5353 
5354 	nv_txrx_gate(dev, false);
5355 	/* erase previous misconfiguration */
5356 	if (np->driver_data & DEV_HAS_POWER_CNTRL)
5357 		nv_mac_reset(dev);
5358 	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5359 	writel(0, base + NvRegMulticastAddrB);
5360 	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5361 	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5362 	writel(0, base + NvRegPacketFilterFlags);
5363 
5364 	writel(0, base + NvRegTransmitterControl);
5365 	writel(0, base + NvRegReceiverControl);
5366 
5367 	writel(0, base + NvRegAdapterControl);
5368 
5369 	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5370 		writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
5371 
5372 	/* initialize descriptor rings */
5373 	set_bufsize(dev);
5374 	oom = nv_init_ring(dev);
5375 
5376 	writel(0, base + NvRegLinkSpeed);
5377 	writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5378 	nv_txrx_reset(dev);
5379 	writel(0, base + NvRegUnknownSetupReg6);
5380 
5381 	np->in_shutdown = 0;
5382 
5383 	/* give hw rings */
5384 	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5385 	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5386 		base + NvRegRingSizes);
5387 
5388 	writel(np->linkspeed, base + NvRegLinkSpeed);
5389 	if (np->desc_ver == DESC_VER_1)
5390 		writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5391 	else
5392 		writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5393 	writel(np->txrxctl_bits, base + NvRegTxRxControl);
5394 	writel(np->vlanctl_bits, base + NvRegVlanControl);
5395 	pci_push(base);
5396 	writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5397 	if (reg_delay(dev, NvRegUnknownSetupReg5,
5398 		      NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5399 		      NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
5400 		netdev_info(dev,
5401 			    "%s: SetupReg5, Bit 31 remained off\n", __func__);
5402 
5403 	writel(0, base + NvRegMIIMask);
5404 	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5405 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5406 
5407 	writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5408 	writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5409 	writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5410 	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5411 
5412 	writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5413 
5414 	get_random_bytes(&low, sizeof(low));
5415 	low &= NVREG_SLOTTIME_MASK;
5416 	if (np->desc_ver == DESC_VER_1) {
5417 		writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5418 	} else {
5419 		if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5420 			/* setup legacy backoff */
5421 			writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5422 		} else {
5423 			writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5424 			nv_gear_backoff_reseed(dev);
5425 		}
5426 	}
5427 	writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5428 	writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5429 	if (poll_interval == -1) {
5430 		if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5431 			writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5432 		else
5433 			writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5434 	} else
5435 		writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5436 	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5437 	writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5438 			base + NvRegAdapterControl);
5439 	writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5440 	writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5441 	if (np->wolenabled)
5442 		writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5443 
5444 	i = readl(base + NvRegPowerState);
5445 	if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
5446 		writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5447 
5448 	pci_push(base);
5449 	udelay(10);
5450 	writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5451 
5452 	nv_disable_hw_interrupts(dev, np->irqmask);
5453 	pci_push(base);
5454 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5455 	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5456 	pci_push(base);
5457 
5458 	if (nv_request_irq(dev, 0))
5459 		goto out_drain;
5460 
5461 	/* ask for interrupts */
5462 	nv_enable_hw_interrupts(dev, np->irqmask);
5463 
5464 	spin_lock_irq(&np->lock);
5465 	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5466 	writel(0, base + NvRegMulticastAddrB);
5467 	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5468 	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5469 	writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5470 	/* One manual link speed update: Interrupts are enabled, future link
5471 	 * speed changes cause interrupts and are handled by nv_link_irq().
5472 	 */
5473 	{
5474 		u32 miistat;
5475 		miistat = readl(base + NvRegMIIStatus);
5476 		writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5477 	}
5478 	/* set linkspeed to invalid value, thus force nv_update_linkspeed
5479 	 * to init hw */
5480 	np->linkspeed = 0;
5481 	ret = nv_update_linkspeed(dev);
5482 	nv_start_rxtx(dev);
5483 	netif_start_queue(dev);
5484 	nv_napi_enable(dev);
5485 
5486 	if (ret) {
5487 		netif_carrier_on(dev);
5488 	} else {
5489 		netdev_info(dev, "no link during initialization\n");
5490 		netif_carrier_off(dev);
5491 	}
5492 	if (oom)
5493 		mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5494 
5495 	/* start statistics timer */
5496 	if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5497 		mod_timer(&np->stats_poll,
5498 			round_jiffies(jiffies + STATS_INTERVAL));
5499 
5500 	spin_unlock_irq(&np->lock);
5501 
5502 	/* If the loopback feature was set while the device was down, make sure
5503 	 * that it's set correctly now.
5504 	 */
5505 	if (dev->features & NETIF_F_LOOPBACK)
5506 		nv_set_loopback(dev, dev->features);
5507 
5508 	return 0;
5509 out_drain:
5510 	nv_drain_rxtx(dev);
5511 	return ret;
5512 }
5513 
5514 static int nv_close(struct net_device *dev)
5515 {
5516 	struct fe_priv *np = netdev_priv(dev);
5517 	u8 __iomem *base;
5518 
5519 	spin_lock_irq(&np->lock);
5520 	np->in_shutdown = 1;
5521 	spin_unlock_irq(&np->lock);
5522 	nv_napi_disable(dev);
5523 	synchronize_irq(np->pci_dev->irq);
5524 
5525 	del_timer_sync(&np->oom_kick);
5526 	del_timer_sync(&np->nic_poll);
5527 	del_timer_sync(&np->stats_poll);
5528 
5529 	netif_stop_queue(dev);
5530 	spin_lock_irq(&np->lock);
5531 	nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
5532 	nv_stop_rxtx(dev);
5533 	nv_txrx_reset(dev);
5534 
5535 	/* disable interrupts on the nic or we will lock up */
5536 	base = get_hwbase(dev);
5537 	nv_disable_hw_interrupts(dev, np->irqmask);
5538 	pci_push(base);
5539 
5540 	spin_unlock_irq(&np->lock);
5541 
5542 	nv_free_irq(dev);
5543 
5544 	nv_drain_rxtx(dev);
5545 
5546 	if (np->wolenabled || !phy_power_down) {
5547 		nv_txrx_gate(dev, false);
5548 		writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5549 		nv_start_rx(dev);
5550 	} else {
5551 		/* power down phy */
5552 		mii_rw(dev, np->phyaddr, MII_BMCR,
5553 		       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
5554 		nv_txrx_gate(dev, true);
5555 	}
5556 
5557 	/* FIXME: power down nic */
5558 
5559 	return 0;
5560 }
5561 
5562 static const struct net_device_ops nv_netdev_ops = {
5563 	.ndo_open		= nv_open,
5564 	.ndo_stop		= nv_close,
5565 	.ndo_get_stats64	= nv_get_stats64,
5566 	.ndo_start_xmit		= nv_start_xmit,
5567 	.ndo_tx_timeout		= nv_tx_timeout,
5568 	.ndo_change_mtu		= nv_change_mtu,
5569 	.ndo_fix_features	= nv_fix_features,
5570 	.ndo_set_features	= nv_set_features,
5571 	.ndo_validate_addr	= eth_validate_addr,
5572 	.ndo_set_mac_address	= nv_set_mac_address,
5573 	.ndo_set_rx_mode	= nv_set_multicast,
5574 #ifdef CONFIG_NET_POLL_CONTROLLER
5575 	.ndo_poll_controller	= nv_poll_controller,
5576 #endif
5577 };
5578 
5579 static const struct net_device_ops nv_netdev_ops_optimized = {
5580 	.ndo_open		= nv_open,
5581 	.ndo_stop		= nv_close,
5582 	.ndo_get_stats64	= nv_get_stats64,
5583 	.ndo_start_xmit		= nv_start_xmit_optimized,
5584 	.ndo_tx_timeout		= nv_tx_timeout,
5585 	.ndo_change_mtu		= nv_change_mtu,
5586 	.ndo_fix_features	= nv_fix_features,
5587 	.ndo_set_features	= nv_set_features,
5588 	.ndo_validate_addr	= eth_validate_addr,
5589 	.ndo_set_mac_address	= nv_set_mac_address,
5590 	.ndo_set_rx_mode	= nv_set_multicast,
5591 #ifdef CONFIG_NET_POLL_CONTROLLER
5592 	.ndo_poll_controller	= nv_poll_controller,
5593 #endif
5594 };
5595 
5596 static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5597 {
5598 	struct net_device *dev;
5599 	struct fe_priv *np;
5600 	unsigned long addr;
5601 	u8 __iomem *base;
5602 	int err, i;
5603 	u32 powerstate, txreg;
5604 	u32 phystate_orig = 0, phystate;
5605 	int phyinitialized = 0;
5606 	static int printed_version;
5607 
5608 	if (!printed_version++)
5609 		pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
5610 			FORCEDETH_VERSION);
5611 
5612 	dev = alloc_etherdev(sizeof(struct fe_priv));
5613 	err = -ENOMEM;
5614 	if (!dev)
5615 		goto out;
5616 
5617 	np = netdev_priv(dev);
5618 	np->dev = dev;
5619 	np->pci_dev = pci_dev;
5620 	spin_lock_init(&np->lock);
5621 	spin_lock_init(&np->hwstats_lock);
5622 	SET_NETDEV_DEV(dev, &pci_dev->dev);
5623 	u64_stats_init(&np->swstats_rx_syncp);
5624 	u64_stats_init(&np->swstats_tx_syncp);
5625 
5626 	init_timer(&np->oom_kick);
5627 	np->oom_kick.data = (unsigned long) dev;
5628 	np->oom_kick.function = nv_do_rx_refill;	/* timer handler */
5629 	init_timer(&np->nic_poll);
5630 	np->nic_poll.data = (unsigned long) dev;
5631 	np->nic_poll.function = nv_do_nic_poll;	/* timer handler */
5632 	init_timer_deferrable(&np->stats_poll);
5633 	np->stats_poll.data = (unsigned long) dev;
5634 	np->stats_poll.function = nv_do_stats_poll;	/* timer handler */
5635 
5636 	err = pci_enable_device(pci_dev);
5637 	if (err)
5638 		goto out_free;
5639 
5640 	pci_set_master(pci_dev);
5641 
5642 	err = pci_request_regions(pci_dev, DRV_NAME);
5643 	if (err < 0)
5644 		goto out_disable;
5645 
5646 	if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5647 		np->register_size = NV_PCI_REGSZ_VER3;
5648 	else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5649 		np->register_size = NV_PCI_REGSZ_VER2;
5650 	else
5651 		np->register_size = NV_PCI_REGSZ_VER1;
5652 
5653 	err = -EINVAL;
5654 	addr = 0;
5655 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5656 		if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5657 				pci_resource_len(pci_dev, i) >= np->register_size) {
5658 			addr = pci_resource_start(pci_dev, i);
5659 			break;
5660 		}
5661 	}
5662 	if (i == DEVICE_COUNT_RESOURCE) {
5663 		dev_info(&pci_dev->dev, "Couldn't find register window\n");
5664 		goto out_relreg;
5665 	}
5666 
5667 	/* copy of driver data */
5668 	np->driver_data = id->driver_data;
5669 	/* copy of device id */
5670 	np->device_id = id->device;
5671 
5672 	/* handle different descriptor versions */
5673 	if (id->driver_data & DEV_HAS_HIGH_DMA) {
5674 		/* packet format 3: supports 40-bit addressing */
5675 		np->desc_ver = DESC_VER_3;
5676 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5677 		if (dma_64bit) {
5678 			if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
5679 				dev_info(&pci_dev->dev,
5680 					 "64-bit DMA failed, using 32-bit addressing\n");
5681 			else
5682 				dev->features |= NETIF_F_HIGHDMA;
5683 			if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
5684 				dev_info(&pci_dev->dev,
5685 					 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5686 			}
5687 		}
5688 	} else if (id->driver_data & DEV_HAS_LARGEDESC) {
5689 		/* packet format 2: supports jumbo frames */
5690 		np->desc_ver = DESC_VER_2;
5691 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5692 	} else {
5693 		/* original packet format */
5694 		np->desc_ver = DESC_VER_1;
5695 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5696 	}
5697 
5698 	np->pkt_limit = NV_PKTLIMIT_1;
5699 	if (id->driver_data & DEV_HAS_LARGEDESC)
5700 		np->pkt_limit = NV_PKTLIMIT_2;
5701 
5702 	if (id->driver_data & DEV_HAS_CHECKSUM) {
5703 		np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5704 		dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
5705 			NETIF_F_TSO | NETIF_F_RXCSUM;
5706 	}
5707 
5708 	np->vlanctl_bits = 0;
5709 	if (id->driver_data & DEV_HAS_VLAN) {
5710 		np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5711 		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
5712 				    NETIF_F_HW_VLAN_CTAG_TX;
5713 	}
5714 
5715 	dev->features |= dev->hw_features;
5716 
5717 	/* Add loopback capability to the device. */
5718 	dev->hw_features |= NETIF_F_LOOPBACK;
5719 
5720 	np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5721 	if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5722 	    (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5723 	    (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5724 		np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5725 	}
5726 
5727 	err = -ENOMEM;
5728 	np->base = ioremap(addr, np->register_size);
5729 	if (!np->base)
5730 		goto out_relreg;
5731 
5732 	np->rx_ring_size = RX_RING_DEFAULT;
5733 	np->tx_ring_size = TX_RING_DEFAULT;
5734 
5735 	if (!nv_optimized(np)) {
5736 		np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5737 					sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5738 					&np->ring_addr);
5739 		if (!np->rx_ring.orig)
5740 			goto out_unmap;
5741 		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5742 	} else {
5743 		np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5744 					sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5745 					&np->ring_addr);
5746 		if (!np->rx_ring.ex)
5747 			goto out_unmap;
5748 		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5749 	}
5750 	np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5751 	np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5752 	if (!np->rx_skb || !np->tx_skb)
5753 		goto out_freering;
5754 
5755 	if (!nv_optimized(np))
5756 		dev->netdev_ops = &nv_netdev_ops;
5757 	else
5758 		dev->netdev_ops = &nv_netdev_ops_optimized;
5759 
5760 	netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5761 	SET_ETHTOOL_OPS(dev, &ops);
5762 	dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5763 
5764 	pci_set_drvdata(pci_dev, dev);
5765 
5766 	/* read the mac address */
5767 	base = get_hwbase(dev);
5768 	np->orig_mac[0] = readl(base + NvRegMacAddrA);
5769 	np->orig_mac[1] = readl(base + NvRegMacAddrB);
5770 
5771 	/* check the workaround bit for correct mac address order */
5772 	txreg = readl(base + NvRegTransmitPoll);
5773 	if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
5774 		/* mac address is already in correct order */
5775 		dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
5776 		dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
5777 		dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5778 		dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5779 		dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
5780 		dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
5781 	} else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5782 		/* mac address is already in correct order */
5783 		dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
5784 		dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
5785 		dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5786 		dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5787 		dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
5788 		dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
5789 		/*
5790 		 * Set orig mac address back to the reversed version.
5791 		 * This flag will be cleared during low power transition.
5792 		 * Therefore, we should always put back the reversed address.
5793 		 */
5794 		np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
5795 			(dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
5796 		np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
5797 	} else {
5798 		/* need to reverse mac address to correct order */
5799 		dev->dev_addr[0] = (np->orig_mac[1] >>  8) & 0xff;
5800 		dev->dev_addr[1] = (np->orig_mac[1] >>  0) & 0xff;
5801 		dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5802 		dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5803 		dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
5804 		dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
5805 		writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5806 		dev_dbg(&pci_dev->dev,
5807 			"%s: set workaround bit for reversed mac addr\n",
5808 			__func__);
5809 	}
5810 
5811 	if (!is_valid_ether_addr(dev->dev_addr)) {
5812 		/*
5813 		 * Bad mac address. At least one bios sets the mac address
5814 		 * to 01:23:45:67:89:ab
5815 		 */
5816 		dev_err(&pci_dev->dev,
5817 			"Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
5818 			dev->dev_addr);
5819 		eth_hw_addr_random(dev);
5820 		dev_err(&pci_dev->dev,
5821 			"Using random MAC address: %pM\n", dev->dev_addr);
5822 	}
5823 
5824 	/* set mac address */
5825 	nv_copy_mac_to_hw(dev);
5826 
5827 	/* disable WOL */
5828 	writel(0, base + NvRegWakeUpFlags);
5829 	np->wolenabled = 0;
5830 	device_set_wakeup_enable(&pci_dev->dev, false);
5831 
5832 	if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5833 
5834 		/* take phy and nic out of low power mode */
5835 		powerstate = readl(base + NvRegPowerState2);
5836 		powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5837 		if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
5838 		    pci_dev->revision >= 0xA3)
5839 			powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5840 		writel(powerstate, base + NvRegPowerState2);
5841 	}
5842 
5843 	if (np->desc_ver == DESC_VER_1)
5844 		np->tx_flags = NV_TX_VALID;
5845 	else
5846 		np->tx_flags = NV_TX2_VALID;
5847 
5848 	np->msi_flags = 0;
5849 	if ((id->driver_data & DEV_HAS_MSI) && msi)
5850 		np->msi_flags |= NV_MSI_CAPABLE;
5851 
5852 	if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5853 		/* msix has had reported issues when modifying irqmask
5854 		   as in the case of napi, therefore, disable for now
5855 		*/
5856 #if 0
5857 		np->msi_flags |= NV_MSI_X_CAPABLE;
5858 #endif
5859 	}
5860 
5861 	if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
5862 		np->irqmask = NVREG_IRQMASK_CPU;
5863 		if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5864 			np->msi_flags |= 0x0001;
5865 	} else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
5866 		   !(id->driver_data & DEV_NEED_TIMERIRQ)) {
5867 		/* start off in throughput mode */
5868 		np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5869 		/* remove support for msix mode */
5870 		np->msi_flags &= ~NV_MSI_X_CAPABLE;
5871 	} else {
5872 		optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
5873 		np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5874 		if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5875 			np->msi_flags |= 0x0003;
5876 	}
5877 
5878 	if (id->driver_data & DEV_NEED_TIMERIRQ)
5879 		np->irqmask |= NVREG_IRQ_TIMER;
5880 	if (id->driver_data & DEV_NEED_LINKTIMER) {
5881 		np->need_linktimer = 1;
5882 		np->link_timeout = jiffies + LINK_TIMEOUT;
5883 	} else {
5884 		np->need_linktimer = 0;
5885 	}
5886 
5887 	/* Limit the number of tx's outstanding for hw bug */
5888 	if (id->driver_data & DEV_NEED_TX_LIMIT) {
5889 		np->tx_limit = 1;
5890 		if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
5891 		    pci_dev->revision >= 0xA2)
5892 			np->tx_limit = 0;
5893 	}
5894 
5895 	/* clear phy state and temporarily halt phy interrupts */
5896 	writel(0, base + NvRegMIIMask);
5897 	phystate = readl(base + NvRegAdapterControl);
5898 	if (phystate & NVREG_ADAPTCTL_RUNNING) {
5899 		phystate_orig = 1;
5900 		phystate &= ~NVREG_ADAPTCTL_RUNNING;
5901 		writel(phystate, base + NvRegAdapterControl);
5902 	}
5903 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5904 
5905 	if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5906 		/* management unit running on the mac? */
5907 		if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
5908 		    (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
5909 		    nv_mgmt_acquire_sema(dev) &&
5910 		    nv_mgmt_get_version(dev)) {
5911 			np->mac_in_use = 1;
5912 			if (np->mgmt_version > 0)
5913 				np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5914 			/* management unit setup the phy already? */
5915 			if (np->mac_in_use &&
5916 			    ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5917 			     NVREG_XMITCTL_SYNC_PHY_INIT)) {
5918 				/* phy is inited by mgmt unit */
5919 				phyinitialized = 1;
5920 			} else {
5921 				/* we need to init the phy */
5922 			}
5923 		}
5924 	}
5925 
5926 	/* find a suitable phy */
5927 	for (i = 1; i <= 32; i++) {
5928 		int id1, id2;
5929 		int phyaddr = i & 0x1F;
5930 
5931 		spin_lock_irq(&np->lock);
5932 		id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5933 		spin_unlock_irq(&np->lock);
5934 		if (id1 < 0 || id1 == 0xffff)
5935 			continue;
5936 		spin_lock_irq(&np->lock);
5937 		id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5938 		spin_unlock_irq(&np->lock);
5939 		if (id2 < 0 || id2 == 0xffff)
5940 			continue;
5941 
5942 		np->phy_model = id2 & PHYID2_MODEL_MASK;
5943 		id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5944 		id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5945 		np->phyaddr = phyaddr;
5946 		np->phy_oui = id1 | id2;
5947 
5948 		/* Realtek hardcoded phy id1 to all zero's on certain phys */
5949 		if (np->phy_oui == PHY_OUI_REALTEK2)
5950 			np->phy_oui = PHY_OUI_REALTEK;
5951 		/* Setup phy revision for Realtek */
5952 		if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
5953 			np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
5954 
5955 		break;
5956 	}
5957 	if (i == 33) {
5958 		dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
5959 		goto out_error;
5960 	}
5961 
5962 	if (!phyinitialized) {
5963 		/* reset it */
5964 		phy_init(dev);
5965 	} else {
5966 		/* see if it is a gigabit phy */
5967 		u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5968 		if (mii_status & PHY_GIGABIT)
5969 			np->gigabit = PHY_GIGABIT;
5970 	}
5971 
5972 	/* set default link speed settings */
5973 	np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5974 	np->duplex = 0;
5975 	np->autoneg = 1;
5976 
5977 	err = register_netdev(dev);
5978 	if (err) {
5979 		dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
5980 		goto out_error;
5981 	}
5982 
5983 	netif_carrier_off(dev);
5984 
5985 	/* Some NICs freeze when TX pause is enabled while NIC is
5986 	 * down, and this stays across warm reboots. The sequence
5987 	 * below should be enough to recover from that state.
5988 	 */
5989 	nv_update_pause(dev, 0);
5990 	nv_start_tx(dev);
5991 	nv_stop_tx(dev);
5992 
5993 	if (id->driver_data & DEV_HAS_VLAN)
5994 		nv_vlan_mode(dev, dev->features);
5995 
5996 	dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5997 		 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
5998 
5999 	dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
6000 		 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
6001 		 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
6002 			"csum " : "",
6003 		 dev->features & (NETIF_F_HW_VLAN_CTAG_RX |
6004 				  NETIF_F_HW_VLAN_CTAG_TX) ?
6005 			"vlan " : "",
6006 		 dev->features & (NETIF_F_LOOPBACK) ?
6007 			"loopback " : "",
6008 		 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
6009 		 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
6010 		 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
6011 		 np->gigabit == PHY_GIGABIT ? "gbit " : "",
6012 		 np->need_linktimer ? "lnktim " : "",
6013 		 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
6014 		 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
6015 		 np->desc_ver);
6016 
6017 	return 0;
6018 
6019 out_error:
6020 	if (phystate_orig)
6021 		writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
6022 out_freering:
6023 	free_rings(dev);
6024 out_unmap:
6025 	iounmap(get_hwbase(dev));
6026 out_relreg:
6027 	pci_release_regions(pci_dev);
6028 out_disable:
6029 	pci_disable_device(pci_dev);
6030 out_free:
6031 	free_netdev(dev);
6032 out:
6033 	return err;
6034 }
6035 
6036 static void nv_restore_phy(struct net_device *dev)
6037 {
6038 	struct fe_priv *np = netdev_priv(dev);
6039 	u16 phy_reserved, mii_control;
6040 
6041 	if (np->phy_oui == PHY_OUI_REALTEK &&
6042 	    np->phy_model == PHY_MODEL_REALTEK_8201 &&
6043 	    phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
6044 		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
6045 		phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
6046 		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
6047 		phy_reserved |= PHY_REALTEK_INIT8;
6048 		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
6049 		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
6050 
6051 		/* restart auto negotiation */
6052 		mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
6053 		mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
6054 		mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
6055 	}
6056 }
6057 
6058 static void nv_restore_mac_addr(struct pci_dev *pci_dev)
6059 {
6060 	struct net_device *dev = pci_get_drvdata(pci_dev);
6061 	struct fe_priv *np = netdev_priv(dev);
6062 	u8 __iomem *base = get_hwbase(dev);
6063 
6064 	/* special op: write back the misordered MAC address - otherwise
6065 	 * the next nv_probe would see a wrong address.
6066 	 */
6067 	writel(np->orig_mac[0], base + NvRegMacAddrA);
6068 	writel(np->orig_mac[1], base + NvRegMacAddrB);
6069 	writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
6070 	       base + NvRegTransmitPoll);
6071 }
6072 
6073 static void nv_remove(struct pci_dev *pci_dev)
6074 {
6075 	struct net_device *dev = pci_get_drvdata(pci_dev);
6076 
6077 	unregister_netdev(dev);
6078 
6079 	nv_restore_mac_addr(pci_dev);
6080 
6081 	/* restore any phy related changes */
6082 	nv_restore_phy(dev);
6083 
6084 	nv_mgmt_release_sema(dev);
6085 
6086 	/* free all structures */
6087 	free_rings(dev);
6088 	iounmap(get_hwbase(dev));
6089 	pci_release_regions(pci_dev);
6090 	pci_disable_device(pci_dev);
6091 	free_netdev(dev);
6092 }
6093 
6094 #ifdef CONFIG_PM_SLEEP
6095 static int nv_suspend(struct device *device)
6096 {
6097 	struct pci_dev *pdev = to_pci_dev(device);
6098 	struct net_device *dev = pci_get_drvdata(pdev);
6099 	struct fe_priv *np = netdev_priv(dev);
6100 	u8 __iomem *base = get_hwbase(dev);
6101 	int i;
6102 
6103 	if (netif_running(dev)) {
6104 		/* Gross. */
6105 		nv_close(dev);
6106 	}
6107 	netif_device_detach(dev);
6108 
6109 	/* save non-pci configuration space */
6110 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
6111 		np->saved_config_space[i] = readl(base + i*sizeof(u32));
6112 
6113 	return 0;
6114 }
6115 
6116 static int nv_resume(struct device *device)
6117 {
6118 	struct pci_dev *pdev = to_pci_dev(device);
6119 	struct net_device *dev = pci_get_drvdata(pdev);
6120 	struct fe_priv *np = netdev_priv(dev);
6121 	u8 __iomem *base = get_hwbase(dev);
6122 	int i, rc = 0;
6123 
6124 	/* restore non-pci configuration space */
6125 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
6126 		writel(np->saved_config_space[i], base+i*sizeof(u32));
6127 
6128 	if (np->driver_data & DEV_NEED_MSI_FIX)
6129 		pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
6130 
6131 	/* restore phy state, including autoneg */
6132 	phy_init(dev);
6133 
6134 	netif_device_attach(dev);
6135 	if (netif_running(dev)) {
6136 		rc = nv_open(dev);
6137 		nv_set_multicast(dev);
6138 	}
6139 	return rc;
6140 }
6141 
6142 static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
6143 #define NV_PM_OPS (&nv_pm_ops)
6144 
6145 #else
6146 #define NV_PM_OPS NULL
6147 #endif /* CONFIG_PM_SLEEP */
6148 
6149 #ifdef CONFIG_PM
6150 static void nv_shutdown(struct pci_dev *pdev)
6151 {
6152 	struct net_device *dev = pci_get_drvdata(pdev);
6153 	struct fe_priv *np = netdev_priv(dev);
6154 
6155 	if (netif_running(dev))
6156 		nv_close(dev);
6157 
6158 	/*
6159 	 * Restore the MAC so a kernel started by kexec won't get confused.
6160 	 * If we really go for poweroff, we must not restore the MAC,
6161 	 * otherwise the MAC for WOL will be reversed at least on some boards.
6162 	 */
6163 	if (system_state != SYSTEM_POWER_OFF)
6164 		nv_restore_mac_addr(pdev);
6165 
6166 	pci_disable_device(pdev);
6167 	/*
6168 	 * Apparently it is not possible to reinitialise from D3 hot,
6169 	 * only put the device into D3 if we really go for poweroff.
6170 	 */
6171 	if (system_state == SYSTEM_POWER_OFF) {
6172 		pci_wake_from_d3(pdev, np->wolenabled);
6173 		pci_set_power_state(pdev, PCI_D3hot);
6174 	}
6175 }
6176 #else
6177 #define nv_shutdown NULL
6178 #endif /* CONFIG_PM */
6179 
6180 static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
6181 	{	/* nForce Ethernet Controller */
6182 		PCI_DEVICE(0x10DE, 0x01C3),
6183 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6184 	},
6185 	{	/* nForce2 Ethernet Controller */
6186 		PCI_DEVICE(0x10DE, 0x0066),
6187 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6188 	},
6189 	{	/* nForce3 Ethernet Controller */
6190 		PCI_DEVICE(0x10DE, 0x00D6),
6191 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6192 	},
6193 	{	/* nForce3 Ethernet Controller */
6194 		PCI_DEVICE(0x10DE, 0x0086),
6195 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6196 	},
6197 	{	/* nForce3 Ethernet Controller */
6198 		PCI_DEVICE(0x10DE, 0x008C),
6199 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6200 	},
6201 	{	/* nForce3 Ethernet Controller */
6202 		PCI_DEVICE(0x10DE, 0x00E6),
6203 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6204 	},
6205 	{	/* nForce3 Ethernet Controller */
6206 		PCI_DEVICE(0x10DE, 0x00DF),
6207 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6208 	},
6209 	{	/* CK804 Ethernet Controller */
6210 		PCI_DEVICE(0x10DE, 0x0056),
6211 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6212 	},
6213 	{	/* CK804 Ethernet Controller */
6214 		PCI_DEVICE(0x10DE, 0x0057),
6215 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6216 	},
6217 	{	/* MCP04 Ethernet Controller */
6218 		PCI_DEVICE(0x10DE, 0x0037),
6219 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6220 	},
6221 	{	/* MCP04 Ethernet Controller */
6222 		PCI_DEVICE(0x10DE, 0x0038),
6223 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6224 	},
6225 	{	/* MCP51 Ethernet Controller */
6226 		PCI_DEVICE(0x10DE, 0x0268),
6227 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6228 	},
6229 	{	/* MCP51 Ethernet Controller */
6230 		PCI_DEVICE(0x10DE, 0x0269),
6231 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6232 	},
6233 	{	/* MCP55 Ethernet Controller */
6234 		PCI_DEVICE(0x10DE, 0x0372),
6235 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6236 	},
6237 	{	/* MCP55 Ethernet Controller */
6238 		PCI_DEVICE(0x10DE, 0x0373),
6239 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6240 	},
6241 	{	/* MCP61 Ethernet Controller */
6242 		PCI_DEVICE(0x10DE, 0x03E5),
6243 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6244 	},
6245 	{	/* MCP61 Ethernet Controller */
6246 		PCI_DEVICE(0x10DE, 0x03E6),
6247 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6248 	},
6249 	{	/* MCP61 Ethernet Controller */
6250 		PCI_DEVICE(0x10DE, 0x03EE),
6251 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6252 	},
6253 	{	/* MCP61 Ethernet Controller */
6254 		PCI_DEVICE(0x10DE, 0x03EF),
6255 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6256 	},
6257 	{	/* MCP65 Ethernet Controller */
6258 		PCI_DEVICE(0x10DE, 0x0450),
6259 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6260 	},
6261 	{	/* MCP65 Ethernet Controller */
6262 		PCI_DEVICE(0x10DE, 0x0451),
6263 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6264 	},
6265 	{	/* MCP65 Ethernet Controller */
6266 		PCI_DEVICE(0x10DE, 0x0452),
6267 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6268 	},
6269 	{	/* MCP65 Ethernet Controller */
6270 		PCI_DEVICE(0x10DE, 0x0453),
6271 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6272 	},
6273 	{	/* MCP67 Ethernet Controller */
6274 		PCI_DEVICE(0x10DE, 0x054C),
6275 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6276 	},
6277 	{	/* MCP67 Ethernet Controller */
6278 		PCI_DEVICE(0x10DE, 0x054D),
6279 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6280 	},
6281 	{	/* MCP67 Ethernet Controller */
6282 		PCI_DEVICE(0x10DE, 0x054E),
6283 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6284 	},
6285 	{	/* MCP67 Ethernet Controller */
6286 		PCI_DEVICE(0x10DE, 0x054F),
6287 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6288 	},
6289 	{	/* MCP73 Ethernet Controller */
6290 		PCI_DEVICE(0x10DE, 0x07DC),
6291 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6292 	},
6293 	{	/* MCP73 Ethernet Controller */
6294 		PCI_DEVICE(0x10DE, 0x07DD),
6295 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6296 	},
6297 	{	/* MCP73 Ethernet Controller */
6298 		PCI_DEVICE(0x10DE, 0x07DE),
6299 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6300 	},
6301 	{	/* MCP73 Ethernet Controller */
6302 		PCI_DEVICE(0x10DE, 0x07DF),
6303 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6304 	},
6305 	{	/* MCP77 Ethernet Controller */
6306 		PCI_DEVICE(0x10DE, 0x0760),
6307 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6308 	},
6309 	{	/* MCP77 Ethernet Controller */
6310 		PCI_DEVICE(0x10DE, 0x0761),
6311 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6312 	},
6313 	{	/* MCP77 Ethernet Controller */
6314 		PCI_DEVICE(0x10DE, 0x0762),
6315 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6316 	},
6317 	{	/* MCP77 Ethernet Controller */
6318 		PCI_DEVICE(0x10DE, 0x0763),
6319 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6320 	},
6321 	{	/* MCP79 Ethernet Controller */
6322 		PCI_DEVICE(0x10DE, 0x0AB0),
6323 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6324 	},
6325 	{	/* MCP79 Ethernet Controller */
6326 		PCI_DEVICE(0x10DE, 0x0AB1),
6327 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6328 	},
6329 	{	/* MCP79 Ethernet Controller */
6330 		PCI_DEVICE(0x10DE, 0x0AB2),
6331 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6332 	},
6333 	{	/* MCP79 Ethernet Controller */
6334 		PCI_DEVICE(0x10DE, 0x0AB3),
6335 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6336 	},
6337 	{	/* MCP89 Ethernet Controller */
6338 		PCI_DEVICE(0x10DE, 0x0D7D),
6339 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
6340 	},
6341 	{0,},
6342 };
6343 
6344 static struct pci_driver forcedeth_pci_driver = {
6345 	.name		= DRV_NAME,
6346 	.id_table	= pci_tbl,
6347 	.probe		= nv_probe,
6348 	.remove		= nv_remove,
6349 	.shutdown	= nv_shutdown,
6350 	.driver.pm	= NV_PM_OPS,
6351 };
6352 
6353 module_param(max_interrupt_work, int, 0);
6354 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6355 module_param(optimization_mode, int, 0);
6356 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
6357 module_param(poll_interval, int, 0);
6358 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6359 module_param(msi, int, 0);
6360 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6361 module_param(msix, int, 0);
6362 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6363 module_param(dma_64bit, int, 0);
6364 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6365 module_param(phy_cross, int, 0);
6366 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6367 module_param(phy_power_down, int, 0);
6368 MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
6369 module_param(debug_tx_timeout, bool, 0);
6370 MODULE_PARM_DESC(debug_tx_timeout,
6371 		 "Dump tx related registers and ring when tx_timeout happens");
6372 
6373 module_pci_driver(forcedeth_pci_driver);
6374 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6375 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6376 MODULE_LICENSE("GPL");
6377 MODULE_DEVICE_TABLE(pci, pci_tbl);
6378