xref: /linux/drivers/net/ethernet/nvidia/forcedeth.c (revision 44b111b519160e33fdc41eadb39af86a24707edf)
1 /*
2  * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
3  *
4  * Note: This driver is a cleanroom reimplementation based on reverse
5  *      engineered documentation written by Carl-Daniel Hailfinger
6  *      and Andrew de Quincey.
7  *
8  * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9  * trademarks of NVIDIA Corporation in the United States and other
10  * countries.
11  *
12  * Copyright (C) 2003,4,5 Manfred Spraul
13  * Copyright (C) 2004 Andrew de Quincey (wol support)
14  * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15  *		IRQ rate fixes, bigendian fixes, cleanups, verification)
16  * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
17  *
18  * This program is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License as published by
20  * the Free Software Foundation; either version 2 of the License, or
21  * (at your option) any later version.
22  *
23  * This program is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26  * GNU General Public License for more details.
27  *
28  * You should have received a copy of the GNU General Public License
29  * along with this program; if not, write to the Free Software
30  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
31  *
32  * Known bugs:
33  * We suspect that on some hardware no TX done interrupts are generated.
34  * This means recovery from netif_stop_queue only happens if the hw timer
35  * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
36  * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
37  * If your hardware reliably generates tx done interrupts, then you can remove
38  * DEV_NEED_TIMERIRQ from the driver_data flags.
39  * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40  * superfluous timer interrupts from the nic.
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #define FORCEDETH_VERSION		"0.64"
46 #define DRV_NAME			"forcedeth"
47 
48 #include <linux/module.h>
49 #include <linux/types.h>
50 #include <linux/pci.h>
51 #include <linux/interrupt.h>
52 #include <linux/netdevice.h>
53 #include <linux/etherdevice.h>
54 #include <linux/delay.h>
55 #include <linux/sched.h>
56 #include <linux/spinlock.h>
57 #include <linux/ethtool.h>
58 #include <linux/timer.h>
59 #include <linux/skbuff.h>
60 #include <linux/mii.h>
61 #include <linux/random.h>
62 #include <linux/init.h>
63 #include <linux/if_vlan.h>
64 #include <linux/dma-mapping.h>
65 #include <linux/slab.h>
66 #include <linux/uaccess.h>
67 #include <linux/prefetch.h>
68 #include  <linux/io.h>
69 
70 #include <asm/irq.h>
71 #include <asm/system.h>
72 
73 #define TX_WORK_PER_LOOP  64
74 #define RX_WORK_PER_LOOP  64
75 
76 /*
77  * Hardware access:
78  */
79 
80 #define DEV_NEED_TIMERIRQ          0x0000001  /* set the timer irq flag in the irq mask */
81 #define DEV_NEED_LINKTIMER         0x0000002  /* poll link settings. Relies on the timer irq */
82 #define DEV_HAS_LARGEDESC          0x0000004  /* device supports jumbo frames and needs packet format 2 */
83 #define DEV_HAS_HIGH_DMA           0x0000008  /* device supports 64bit dma */
84 #define DEV_HAS_CHECKSUM           0x0000010  /* device supports tx and rx checksum offloads */
85 #define DEV_HAS_VLAN               0x0000020  /* device supports vlan tagging and striping */
86 #define DEV_HAS_MSI                0x0000040  /* device supports MSI */
87 #define DEV_HAS_MSI_X              0x0000080  /* device supports MSI-X */
88 #define DEV_HAS_POWER_CNTRL        0x0000100  /* device supports power savings */
89 #define DEV_HAS_STATISTICS_V1      0x0000200  /* device supports hw statistics version 1 */
90 #define DEV_HAS_STATISTICS_V2      0x0000400  /* device supports hw statistics version 2 */
91 #define DEV_HAS_STATISTICS_V3      0x0000800  /* device supports hw statistics version 3 */
92 #define DEV_HAS_STATISTICS_V12     0x0000600  /* device supports hw statistics version 1 and 2 */
93 #define DEV_HAS_STATISTICS_V123    0x0000e00  /* device supports hw statistics version 1, 2, and 3 */
94 #define DEV_HAS_TEST_EXTENDED      0x0001000  /* device supports extended diagnostic test */
95 #define DEV_HAS_MGMT_UNIT          0x0002000  /* device supports management unit */
96 #define DEV_HAS_CORRECT_MACADDR    0x0004000  /* device supports correct mac address order */
97 #define DEV_HAS_COLLISION_FIX      0x0008000  /* device supports tx collision fix */
98 #define DEV_HAS_PAUSEFRAME_TX_V1   0x0010000  /* device supports tx pause frames version 1 */
99 #define DEV_HAS_PAUSEFRAME_TX_V2   0x0020000  /* device supports tx pause frames version 2 */
100 #define DEV_HAS_PAUSEFRAME_TX_V3   0x0040000  /* device supports tx pause frames version 3 */
101 #define DEV_NEED_TX_LIMIT          0x0080000  /* device needs to limit tx */
102 #define DEV_NEED_TX_LIMIT2         0x0180000  /* device needs to limit tx, expect for some revs */
103 #define DEV_HAS_GEAR_MODE          0x0200000  /* device supports gear mode */
104 #define DEV_NEED_PHY_INIT_FIX      0x0400000  /* device needs specific phy workaround */
105 #define DEV_NEED_LOW_POWER_FIX     0x0800000  /* device needs special power up workaround */
106 #define DEV_NEED_MSI_FIX           0x1000000  /* device needs msi workaround */
107 
108 enum {
109 	NvRegIrqStatus = 0x000,
110 #define NVREG_IRQSTAT_MIIEVENT	0x040
111 #define NVREG_IRQSTAT_MASK		0x83ff
112 	NvRegIrqMask = 0x004,
113 #define NVREG_IRQ_RX_ERROR		0x0001
114 #define NVREG_IRQ_RX			0x0002
115 #define NVREG_IRQ_RX_NOBUF		0x0004
116 #define NVREG_IRQ_TX_ERR		0x0008
117 #define NVREG_IRQ_TX_OK			0x0010
118 #define NVREG_IRQ_TIMER			0x0020
119 #define NVREG_IRQ_LINK			0x0040
120 #define NVREG_IRQ_RX_FORCED		0x0080
121 #define NVREG_IRQ_TX_FORCED		0x0100
122 #define NVREG_IRQ_RECOVER_ERROR		0x8200
123 #define NVREG_IRQMASK_THROUGHPUT	0x00df
124 #define NVREG_IRQMASK_CPU		0x0060
125 #define NVREG_IRQ_TX_ALL		(NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
126 #define NVREG_IRQ_RX_ALL		(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
127 #define NVREG_IRQ_OTHER			(NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
128 
129 	NvRegUnknownSetupReg6 = 0x008,
130 #define NVREG_UNKSETUP6_VAL		3
131 
132 /*
133  * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
134  * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
135  */
136 	NvRegPollingInterval = 0x00c,
137 #define NVREG_POLL_DEFAULT_THROUGHPUT	65535 /* backup tx cleanup if loop max reached */
138 #define NVREG_POLL_DEFAULT_CPU	13
139 	NvRegMSIMap0 = 0x020,
140 	NvRegMSIMap1 = 0x024,
141 	NvRegMSIIrqMask = 0x030,
142 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
143 	NvRegMisc1 = 0x080,
144 #define NVREG_MISC1_PAUSE_TX	0x01
145 #define NVREG_MISC1_HD		0x02
146 #define NVREG_MISC1_FORCE	0x3b0f3c
147 
148 	NvRegMacReset = 0x34,
149 #define NVREG_MAC_RESET_ASSERT	0x0F3
150 	NvRegTransmitterControl = 0x084,
151 #define NVREG_XMITCTL_START	0x01
152 #define NVREG_XMITCTL_MGMT_ST	0x40000000
153 #define NVREG_XMITCTL_SYNC_MASK		0x000f0000
154 #define NVREG_XMITCTL_SYNC_NOT_READY	0x0
155 #define NVREG_XMITCTL_SYNC_PHY_INIT	0x00040000
156 #define NVREG_XMITCTL_MGMT_SEMA_MASK	0x00000f00
157 #define NVREG_XMITCTL_MGMT_SEMA_FREE	0x0
158 #define NVREG_XMITCTL_HOST_SEMA_MASK	0x0000f000
159 #define NVREG_XMITCTL_HOST_SEMA_ACQ	0x0000f000
160 #define NVREG_XMITCTL_HOST_LOADED	0x00004000
161 #define NVREG_XMITCTL_TX_PATH_EN	0x01000000
162 #define NVREG_XMITCTL_DATA_START	0x00100000
163 #define NVREG_XMITCTL_DATA_READY	0x00010000
164 #define NVREG_XMITCTL_DATA_ERROR	0x00020000
165 	NvRegTransmitterStatus = 0x088,
166 #define NVREG_XMITSTAT_BUSY	0x01
167 
168 	NvRegPacketFilterFlags = 0x8c,
169 #define NVREG_PFF_PAUSE_RX	0x08
170 #define NVREG_PFF_ALWAYS	0x7F0000
171 #define NVREG_PFF_PROMISC	0x80
172 #define NVREG_PFF_MYADDR	0x20
173 #define NVREG_PFF_LOOPBACK	0x10
174 
175 	NvRegOffloadConfig = 0x90,
176 #define NVREG_OFFLOAD_HOMEPHY	0x601
177 #define NVREG_OFFLOAD_NORMAL	RX_NIC_BUFSIZE
178 	NvRegReceiverControl = 0x094,
179 #define NVREG_RCVCTL_START	0x01
180 #define NVREG_RCVCTL_RX_PATH_EN	0x01000000
181 	NvRegReceiverStatus = 0x98,
182 #define NVREG_RCVSTAT_BUSY	0x01
183 
184 	NvRegSlotTime = 0x9c,
185 #define NVREG_SLOTTIME_LEGBF_ENABLED	0x80000000
186 #define NVREG_SLOTTIME_10_100_FULL	0x00007f00
187 #define NVREG_SLOTTIME_1000_FULL	0x0003ff00
188 #define NVREG_SLOTTIME_HALF		0x0000ff00
189 #define NVREG_SLOTTIME_DEFAULT		0x00007f00
190 #define NVREG_SLOTTIME_MASK		0x000000ff
191 
192 	NvRegTxDeferral = 0xA0,
193 #define NVREG_TX_DEFERRAL_DEFAULT		0x15050f
194 #define NVREG_TX_DEFERRAL_RGMII_10_100		0x16070f
195 #define NVREG_TX_DEFERRAL_RGMII_1000		0x14050f
196 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10	0x16190f
197 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100	0x16300f
198 #define NVREG_TX_DEFERRAL_MII_STRETCH		0x152000
199 	NvRegRxDeferral = 0xA4,
200 #define NVREG_RX_DEFERRAL_DEFAULT	0x16
201 	NvRegMacAddrA = 0xA8,
202 	NvRegMacAddrB = 0xAC,
203 	NvRegMulticastAddrA = 0xB0,
204 #define NVREG_MCASTADDRA_FORCE	0x01
205 	NvRegMulticastAddrB = 0xB4,
206 	NvRegMulticastMaskA = 0xB8,
207 #define NVREG_MCASTMASKA_NONE		0xffffffff
208 	NvRegMulticastMaskB = 0xBC,
209 #define NVREG_MCASTMASKB_NONE		0xffff
210 
211 	NvRegPhyInterface = 0xC0,
212 #define PHY_RGMII		0x10000000
213 	NvRegBackOffControl = 0xC4,
214 #define NVREG_BKOFFCTRL_DEFAULT			0x70000000
215 #define NVREG_BKOFFCTRL_SEED_MASK		0x000003ff
216 #define NVREG_BKOFFCTRL_SELECT			24
217 #define NVREG_BKOFFCTRL_GEAR			12
218 
219 	NvRegTxRingPhysAddr = 0x100,
220 	NvRegRxRingPhysAddr = 0x104,
221 	NvRegRingSizes = 0x108,
222 #define NVREG_RINGSZ_TXSHIFT 0
223 #define NVREG_RINGSZ_RXSHIFT 16
224 	NvRegTransmitPoll = 0x10c,
225 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV	0x00008000
226 	NvRegLinkSpeed = 0x110,
227 #define NVREG_LINKSPEED_FORCE 0x10000
228 #define NVREG_LINKSPEED_10	1000
229 #define NVREG_LINKSPEED_100	100
230 #define NVREG_LINKSPEED_1000	50
231 #define NVREG_LINKSPEED_MASK	(0xFFF)
232 	NvRegUnknownSetupReg5 = 0x130,
233 #define NVREG_UNKSETUP5_BIT31	(1<<31)
234 	NvRegTxWatermark = 0x13c,
235 #define NVREG_TX_WM_DESC1_DEFAULT	0x0200010
236 #define NVREG_TX_WM_DESC2_3_DEFAULT	0x1e08000
237 #define NVREG_TX_WM_DESC2_3_1000	0xfe08000
238 	NvRegTxRxControl = 0x144,
239 #define NVREG_TXRXCTL_KICK	0x0001
240 #define NVREG_TXRXCTL_BIT1	0x0002
241 #define NVREG_TXRXCTL_BIT2	0x0004
242 #define NVREG_TXRXCTL_IDLE	0x0008
243 #define NVREG_TXRXCTL_RESET	0x0010
244 #define NVREG_TXRXCTL_RXCHECK	0x0400
245 #define NVREG_TXRXCTL_DESC_1	0
246 #define NVREG_TXRXCTL_DESC_2	0x002100
247 #define NVREG_TXRXCTL_DESC_3	0xc02200
248 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
249 #define NVREG_TXRXCTL_VLANINS	0x00080
250 	NvRegTxRingPhysAddrHigh = 0x148,
251 	NvRegRxRingPhysAddrHigh = 0x14C,
252 	NvRegTxPauseFrame = 0x170,
253 #define NVREG_TX_PAUSEFRAME_DISABLE	0x0fff0080
254 #define NVREG_TX_PAUSEFRAME_ENABLE_V1	0x01800010
255 #define NVREG_TX_PAUSEFRAME_ENABLE_V2	0x056003f0
256 #define NVREG_TX_PAUSEFRAME_ENABLE_V3	0x09f00880
257 	NvRegTxPauseFrameLimit = 0x174,
258 #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE	0x00010000
259 	NvRegMIIStatus = 0x180,
260 #define NVREG_MIISTAT_ERROR		0x0001
261 #define NVREG_MIISTAT_LINKCHANGE	0x0008
262 #define NVREG_MIISTAT_MASK_RW		0x0007
263 #define NVREG_MIISTAT_MASK_ALL		0x000f
264 	NvRegMIIMask = 0x184,
265 #define NVREG_MII_LINKCHANGE		0x0008
266 
267 	NvRegAdapterControl = 0x188,
268 #define NVREG_ADAPTCTL_START	0x02
269 #define NVREG_ADAPTCTL_LINKUP	0x04
270 #define NVREG_ADAPTCTL_PHYVALID	0x40000
271 #define NVREG_ADAPTCTL_RUNNING	0x100000
272 #define NVREG_ADAPTCTL_PHYSHIFT	24
273 	NvRegMIISpeed = 0x18c,
274 #define NVREG_MIISPEED_BIT8	(1<<8)
275 #define NVREG_MIIDELAY	5
276 	NvRegMIIControl = 0x190,
277 #define NVREG_MIICTL_INUSE	0x08000
278 #define NVREG_MIICTL_WRITE	0x00400
279 #define NVREG_MIICTL_ADDRSHIFT	5
280 	NvRegMIIData = 0x194,
281 	NvRegTxUnicast = 0x1a0,
282 	NvRegTxMulticast = 0x1a4,
283 	NvRegTxBroadcast = 0x1a8,
284 	NvRegWakeUpFlags = 0x200,
285 #define NVREG_WAKEUPFLAGS_VAL		0x7770
286 #define NVREG_WAKEUPFLAGS_BUSYSHIFT	24
287 #define NVREG_WAKEUPFLAGS_ENABLESHIFT	16
288 #define NVREG_WAKEUPFLAGS_D3SHIFT	12
289 #define NVREG_WAKEUPFLAGS_D2SHIFT	8
290 #define NVREG_WAKEUPFLAGS_D1SHIFT	4
291 #define NVREG_WAKEUPFLAGS_D0SHIFT	0
292 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT		0x01
293 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT	0x02
294 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE	0x04
295 #define NVREG_WAKEUPFLAGS_ENABLE	0x1111
296 
297 	NvRegMgmtUnitGetVersion = 0x204,
298 #define NVREG_MGMTUNITGETVERSION	0x01
299 	NvRegMgmtUnitVersion = 0x208,
300 #define NVREG_MGMTUNITVERSION		0x08
301 	NvRegPowerCap = 0x268,
302 #define NVREG_POWERCAP_D3SUPP	(1<<30)
303 #define NVREG_POWERCAP_D2SUPP	(1<<26)
304 #define NVREG_POWERCAP_D1SUPP	(1<<25)
305 	NvRegPowerState = 0x26c,
306 #define NVREG_POWERSTATE_POWEREDUP	0x8000
307 #define NVREG_POWERSTATE_VALID		0x0100
308 #define NVREG_POWERSTATE_MASK		0x0003
309 #define NVREG_POWERSTATE_D0		0x0000
310 #define NVREG_POWERSTATE_D1		0x0001
311 #define NVREG_POWERSTATE_D2		0x0002
312 #define NVREG_POWERSTATE_D3		0x0003
313 	NvRegMgmtUnitControl = 0x278,
314 #define NVREG_MGMTUNITCONTROL_INUSE	0x20000
315 	NvRegTxCnt = 0x280,
316 	NvRegTxZeroReXmt = 0x284,
317 	NvRegTxOneReXmt = 0x288,
318 	NvRegTxManyReXmt = 0x28c,
319 	NvRegTxLateCol = 0x290,
320 	NvRegTxUnderflow = 0x294,
321 	NvRegTxLossCarrier = 0x298,
322 	NvRegTxExcessDef = 0x29c,
323 	NvRegTxRetryErr = 0x2a0,
324 	NvRegRxFrameErr = 0x2a4,
325 	NvRegRxExtraByte = 0x2a8,
326 	NvRegRxLateCol = 0x2ac,
327 	NvRegRxRunt = 0x2b0,
328 	NvRegRxFrameTooLong = 0x2b4,
329 	NvRegRxOverflow = 0x2b8,
330 	NvRegRxFCSErr = 0x2bc,
331 	NvRegRxFrameAlignErr = 0x2c0,
332 	NvRegRxLenErr = 0x2c4,
333 	NvRegRxUnicast = 0x2c8,
334 	NvRegRxMulticast = 0x2cc,
335 	NvRegRxBroadcast = 0x2d0,
336 	NvRegTxDef = 0x2d4,
337 	NvRegTxFrame = 0x2d8,
338 	NvRegRxCnt = 0x2dc,
339 	NvRegTxPause = 0x2e0,
340 	NvRegRxPause = 0x2e4,
341 	NvRegRxDropFrame = 0x2e8,
342 	NvRegVlanControl = 0x300,
343 #define NVREG_VLANCONTROL_ENABLE	0x2000
344 	NvRegMSIXMap0 = 0x3e0,
345 	NvRegMSIXMap1 = 0x3e4,
346 	NvRegMSIXIrqStatus = 0x3f0,
347 
348 	NvRegPowerState2 = 0x600,
349 #define NVREG_POWERSTATE2_POWERUP_MASK		0x0F15
350 #define NVREG_POWERSTATE2_POWERUP_REV_A3	0x0001
351 #define NVREG_POWERSTATE2_PHY_RESET		0x0004
352 #define NVREG_POWERSTATE2_GATE_CLOCKS		0x0F00
353 };
354 
355 /* Big endian: should work, but is untested */
356 struct ring_desc {
357 	__le32 buf;
358 	__le32 flaglen;
359 };
360 
361 struct ring_desc_ex {
362 	__le32 bufhigh;
363 	__le32 buflow;
364 	__le32 txvlan;
365 	__le32 flaglen;
366 };
367 
368 union ring_type {
369 	struct ring_desc *orig;
370 	struct ring_desc_ex *ex;
371 };
372 
373 #define FLAG_MASK_V1 0xffff0000
374 #define FLAG_MASK_V2 0xffffc000
375 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
376 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
377 
378 #define NV_TX_LASTPACKET	(1<<16)
379 #define NV_TX_RETRYERROR	(1<<19)
380 #define NV_TX_RETRYCOUNT_MASK	(0xF<<20)
381 #define NV_TX_FORCED_INTERRUPT	(1<<24)
382 #define NV_TX_DEFERRED		(1<<26)
383 #define NV_TX_CARRIERLOST	(1<<27)
384 #define NV_TX_LATECOLLISION	(1<<28)
385 #define NV_TX_UNDERFLOW		(1<<29)
386 #define NV_TX_ERROR		(1<<30)
387 #define NV_TX_VALID		(1<<31)
388 
389 #define NV_TX2_LASTPACKET	(1<<29)
390 #define NV_TX2_RETRYERROR	(1<<18)
391 #define NV_TX2_RETRYCOUNT_MASK	(0xF<<19)
392 #define NV_TX2_FORCED_INTERRUPT	(1<<30)
393 #define NV_TX2_DEFERRED		(1<<25)
394 #define NV_TX2_CARRIERLOST	(1<<26)
395 #define NV_TX2_LATECOLLISION	(1<<27)
396 #define NV_TX2_UNDERFLOW	(1<<28)
397 /* error and valid are the same for both */
398 #define NV_TX2_ERROR		(1<<30)
399 #define NV_TX2_VALID		(1<<31)
400 #define NV_TX2_TSO		(1<<28)
401 #define NV_TX2_TSO_SHIFT	14
402 #define NV_TX2_TSO_MAX_SHIFT	14
403 #define NV_TX2_TSO_MAX_SIZE	(1<<NV_TX2_TSO_MAX_SHIFT)
404 #define NV_TX2_CHECKSUM_L3	(1<<27)
405 #define NV_TX2_CHECKSUM_L4	(1<<26)
406 
407 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
408 
409 #define NV_RX_DESCRIPTORVALID	(1<<16)
410 #define NV_RX_MISSEDFRAME	(1<<17)
411 #define NV_RX_SUBSTRACT1	(1<<18)
412 #define NV_RX_ERROR1		(1<<23)
413 #define NV_RX_ERROR2		(1<<24)
414 #define NV_RX_ERROR3		(1<<25)
415 #define NV_RX_ERROR4		(1<<26)
416 #define NV_RX_CRCERR		(1<<27)
417 #define NV_RX_OVERFLOW		(1<<28)
418 #define NV_RX_FRAMINGERR	(1<<29)
419 #define NV_RX_ERROR		(1<<30)
420 #define NV_RX_AVAIL		(1<<31)
421 #define NV_RX_ERROR_MASK	(NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
422 
423 #define NV_RX2_CHECKSUMMASK	(0x1C000000)
424 #define NV_RX2_CHECKSUM_IP	(0x10000000)
425 #define NV_RX2_CHECKSUM_IP_TCP	(0x14000000)
426 #define NV_RX2_CHECKSUM_IP_UDP	(0x18000000)
427 #define NV_RX2_DESCRIPTORVALID	(1<<29)
428 #define NV_RX2_SUBSTRACT1	(1<<25)
429 #define NV_RX2_ERROR1		(1<<18)
430 #define NV_RX2_ERROR2		(1<<19)
431 #define NV_RX2_ERROR3		(1<<20)
432 #define NV_RX2_ERROR4		(1<<21)
433 #define NV_RX2_CRCERR		(1<<22)
434 #define NV_RX2_OVERFLOW		(1<<23)
435 #define NV_RX2_FRAMINGERR	(1<<24)
436 /* error and avail are the same for both */
437 #define NV_RX2_ERROR		(1<<30)
438 #define NV_RX2_AVAIL		(1<<31)
439 #define NV_RX2_ERROR_MASK	(NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
440 
441 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
442 #define NV_RX3_VLAN_TAG_MASK	(0x0000FFFF)
443 
444 /* Miscellaneous hardware related defines: */
445 #define NV_PCI_REGSZ_VER1	0x270
446 #define NV_PCI_REGSZ_VER2	0x2d4
447 #define NV_PCI_REGSZ_VER3	0x604
448 #define NV_PCI_REGSZ_MAX	0x604
449 
450 /* various timeout delays: all in usec */
451 #define NV_TXRX_RESET_DELAY	4
452 #define NV_TXSTOP_DELAY1	10
453 #define NV_TXSTOP_DELAY1MAX	500000
454 #define NV_TXSTOP_DELAY2	100
455 #define NV_RXSTOP_DELAY1	10
456 #define NV_RXSTOP_DELAY1MAX	500000
457 #define NV_RXSTOP_DELAY2	100
458 #define NV_SETUP5_DELAY		5
459 #define NV_SETUP5_DELAYMAX	50000
460 #define NV_POWERUP_DELAY	5
461 #define NV_POWERUP_DELAYMAX	5000
462 #define NV_MIIBUSY_DELAY	50
463 #define NV_MIIPHY_DELAY	10
464 #define NV_MIIPHY_DELAYMAX	10000
465 #define NV_MAC_RESET_DELAY	64
466 
467 #define NV_WAKEUPPATTERNS	5
468 #define NV_WAKEUPMASKENTRIES	4
469 
470 /* General driver defaults */
471 #define NV_WATCHDOG_TIMEO	(5*HZ)
472 
473 #define RX_RING_DEFAULT		512
474 #define TX_RING_DEFAULT		256
475 #define RX_RING_MIN		128
476 #define TX_RING_MIN		64
477 #define RING_MAX_DESC_VER_1	1024
478 #define RING_MAX_DESC_VER_2_3	16384
479 
480 /* rx/tx mac addr + type + vlan + align + slack*/
481 #define NV_RX_HEADERS		(64)
482 /* even more slack. */
483 #define NV_RX_ALLOC_PAD		(64)
484 
485 /* maximum mtu size */
486 #define NV_PKTLIMIT_1	ETH_DATA_LEN	/* hard limit not known */
487 #define NV_PKTLIMIT_2	9100	/* Actual limit according to NVidia: 9202 */
488 
489 #define OOM_REFILL	(1+HZ/20)
490 #define POLL_WAIT	(1+HZ/100)
491 #define LINK_TIMEOUT	(3*HZ)
492 #define STATS_INTERVAL	(10*HZ)
493 
494 /*
495  * desc_ver values:
496  * The nic supports three different descriptor types:
497  * - DESC_VER_1: Original
498  * - DESC_VER_2: support for jumbo frames.
499  * - DESC_VER_3: 64-bit format.
500  */
501 #define DESC_VER_1	1
502 #define DESC_VER_2	2
503 #define DESC_VER_3	3
504 
505 /* PHY defines */
506 #define PHY_OUI_MARVELL		0x5043
507 #define PHY_OUI_CICADA		0x03f1
508 #define PHY_OUI_VITESSE		0x01c1
509 #define PHY_OUI_REALTEK		0x0732
510 #define PHY_OUI_REALTEK2	0x0020
511 #define PHYID1_OUI_MASK	0x03ff
512 #define PHYID1_OUI_SHFT	6
513 #define PHYID2_OUI_MASK	0xfc00
514 #define PHYID2_OUI_SHFT	10
515 #define PHYID2_MODEL_MASK		0x03f0
516 #define PHY_MODEL_REALTEK_8211		0x0110
517 #define PHY_REV_MASK			0x0001
518 #define PHY_REV_REALTEK_8211B		0x0000
519 #define PHY_REV_REALTEK_8211C		0x0001
520 #define PHY_MODEL_REALTEK_8201		0x0200
521 #define PHY_MODEL_MARVELL_E3016		0x0220
522 #define PHY_MARVELL_E3016_INITMASK	0x0300
523 #define PHY_CICADA_INIT1	0x0f000
524 #define PHY_CICADA_INIT2	0x0e00
525 #define PHY_CICADA_INIT3	0x01000
526 #define PHY_CICADA_INIT4	0x0200
527 #define PHY_CICADA_INIT5	0x0004
528 #define PHY_CICADA_INIT6	0x02000
529 #define PHY_VITESSE_INIT_REG1	0x1f
530 #define PHY_VITESSE_INIT_REG2	0x10
531 #define PHY_VITESSE_INIT_REG3	0x11
532 #define PHY_VITESSE_INIT_REG4	0x12
533 #define PHY_VITESSE_INIT_MSK1	0xc
534 #define PHY_VITESSE_INIT_MSK2	0x0180
535 #define PHY_VITESSE_INIT1	0x52b5
536 #define PHY_VITESSE_INIT2	0xaf8a
537 #define PHY_VITESSE_INIT3	0x8
538 #define PHY_VITESSE_INIT4	0x8f8a
539 #define PHY_VITESSE_INIT5	0xaf86
540 #define PHY_VITESSE_INIT6	0x8f86
541 #define PHY_VITESSE_INIT7	0xaf82
542 #define PHY_VITESSE_INIT8	0x0100
543 #define PHY_VITESSE_INIT9	0x8f82
544 #define PHY_VITESSE_INIT10	0x0
545 #define PHY_REALTEK_INIT_REG1	0x1f
546 #define PHY_REALTEK_INIT_REG2	0x19
547 #define PHY_REALTEK_INIT_REG3	0x13
548 #define PHY_REALTEK_INIT_REG4	0x14
549 #define PHY_REALTEK_INIT_REG5	0x18
550 #define PHY_REALTEK_INIT_REG6	0x11
551 #define PHY_REALTEK_INIT_REG7	0x01
552 #define PHY_REALTEK_INIT1	0x0000
553 #define PHY_REALTEK_INIT2	0x8e00
554 #define PHY_REALTEK_INIT3	0x0001
555 #define PHY_REALTEK_INIT4	0xad17
556 #define PHY_REALTEK_INIT5	0xfb54
557 #define PHY_REALTEK_INIT6	0xf5c7
558 #define PHY_REALTEK_INIT7	0x1000
559 #define PHY_REALTEK_INIT8	0x0003
560 #define PHY_REALTEK_INIT9	0x0008
561 #define PHY_REALTEK_INIT10	0x0005
562 #define PHY_REALTEK_INIT11	0x0200
563 #define PHY_REALTEK_INIT_MSK1	0x0003
564 
565 #define PHY_GIGABIT	0x0100
566 
567 #define PHY_TIMEOUT	0x1
568 #define PHY_ERROR	0x2
569 
570 #define PHY_100	0x1
571 #define PHY_1000	0x2
572 #define PHY_HALF	0x100
573 
574 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
575 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
576 #define NV_PAUSEFRAME_RX_ENABLE  0x0004
577 #define NV_PAUSEFRAME_TX_ENABLE  0x0008
578 #define NV_PAUSEFRAME_RX_REQ     0x0010
579 #define NV_PAUSEFRAME_TX_REQ     0x0020
580 #define NV_PAUSEFRAME_AUTONEG    0x0040
581 
582 /* MSI/MSI-X defines */
583 #define NV_MSI_X_MAX_VECTORS  8
584 #define NV_MSI_X_VECTORS_MASK 0x000f
585 #define NV_MSI_CAPABLE        0x0010
586 #define NV_MSI_X_CAPABLE      0x0020
587 #define NV_MSI_ENABLED        0x0040
588 #define NV_MSI_X_ENABLED      0x0080
589 
590 #define NV_MSI_X_VECTOR_ALL   0x0
591 #define NV_MSI_X_VECTOR_RX    0x0
592 #define NV_MSI_X_VECTOR_TX    0x1
593 #define NV_MSI_X_VECTOR_OTHER 0x2
594 
595 #define NV_MSI_PRIV_OFFSET 0x68
596 #define NV_MSI_PRIV_VALUE  0xffffffff
597 
598 #define NV_RESTART_TX         0x1
599 #define NV_RESTART_RX         0x2
600 
601 #define NV_TX_LIMIT_COUNT     16
602 
603 #define NV_DYNAMIC_THRESHOLD        4
604 #define NV_DYNAMIC_MAX_QUIET_COUNT  2048
605 
606 /* statistics */
607 struct nv_ethtool_str {
608 	char name[ETH_GSTRING_LEN];
609 };
610 
611 static const struct nv_ethtool_str nv_estats_str[] = {
612 	{ "tx_bytes" }, /* includes Ethernet FCS CRC */
613 	{ "tx_zero_rexmt" },
614 	{ "tx_one_rexmt" },
615 	{ "tx_many_rexmt" },
616 	{ "tx_late_collision" },
617 	{ "tx_fifo_errors" },
618 	{ "tx_carrier_errors" },
619 	{ "tx_excess_deferral" },
620 	{ "tx_retry_error" },
621 	{ "rx_frame_error" },
622 	{ "rx_extra_byte" },
623 	{ "rx_late_collision" },
624 	{ "rx_runt" },
625 	{ "rx_frame_too_long" },
626 	{ "rx_over_errors" },
627 	{ "rx_crc_errors" },
628 	{ "rx_frame_align_error" },
629 	{ "rx_length_error" },
630 	{ "rx_unicast" },
631 	{ "rx_multicast" },
632 	{ "rx_broadcast" },
633 	{ "rx_packets" },
634 	{ "rx_errors_total" },
635 	{ "tx_errors_total" },
636 
637 	/* version 2 stats */
638 	{ "tx_deferral" },
639 	{ "tx_packets" },
640 	{ "rx_bytes" }, /* includes Ethernet FCS CRC */
641 	{ "tx_pause" },
642 	{ "rx_pause" },
643 	{ "rx_drop_frame" },
644 
645 	/* version 3 stats */
646 	{ "tx_unicast" },
647 	{ "tx_multicast" },
648 	{ "tx_broadcast" }
649 };
650 
651 struct nv_ethtool_stats {
652 	u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */
653 	u64 tx_zero_rexmt;
654 	u64 tx_one_rexmt;
655 	u64 tx_many_rexmt;
656 	u64 tx_late_collision;
657 	u64 tx_fifo_errors;
658 	u64 tx_carrier_errors;
659 	u64 tx_excess_deferral;
660 	u64 tx_retry_error;
661 	u64 rx_frame_error;
662 	u64 rx_extra_byte;
663 	u64 rx_late_collision;
664 	u64 rx_runt;
665 	u64 rx_frame_too_long;
666 	u64 rx_over_errors;
667 	u64 rx_crc_errors;
668 	u64 rx_frame_align_error;
669 	u64 rx_length_error;
670 	u64 rx_unicast;
671 	u64 rx_multicast;
672 	u64 rx_broadcast;
673 	u64 rx_packets; /* should be ifconfig->rx_packets */
674 	u64 rx_errors_total;
675 	u64 tx_errors_total;
676 
677 	/* version 2 stats */
678 	u64 tx_deferral;
679 	u64 tx_packets; /* should be ifconfig->tx_packets */
680 	u64 rx_bytes;   /* should be ifconfig->rx_bytes + 4*rx_packets */
681 	u64 tx_pause;
682 	u64 rx_pause;
683 	u64 rx_drop_frame;
684 
685 	/* version 3 stats */
686 	u64 tx_unicast;
687 	u64 tx_multicast;
688 	u64 tx_broadcast;
689 };
690 
691 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
692 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
693 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
694 
695 /* diagnostics */
696 #define NV_TEST_COUNT_BASE 3
697 #define NV_TEST_COUNT_EXTENDED 4
698 
699 static const struct nv_ethtool_str nv_etests_str[] = {
700 	{ "link      (online/offline)" },
701 	{ "register  (offline)       " },
702 	{ "interrupt (offline)       " },
703 	{ "loopback  (offline)       " }
704 };
705 
706 struct register_test {
707 	__u32 reg;
708 	__u32 mask;
709 };
710 
711 static const struct register_test nv_registers_test[] = {
712 	{ NvRegUnknownSetupReg6, 0x01 },
713 	{ NvRegMisc1, 0x03c },
714 	{ NvRegOffloadConfig, 0x03ff },
715 	{ NvRegMulticastAddrA, 0xffffffff },
716 	{ NvRegTxWatermark, 0x0ff },
717 	{ NvRegWakeUpFlags, 0x07777 },
718 	{ 0, 0 }
719 };
720 
721 struct nv_skb_map {
722 	struct sk_buff *skb;
723 	dma_addr_t dma;
724 	unsigned int dma_len:31;
725 	unsigned int dma_single:1;
726 	struct ring_desc_ex *first_tx_desc;
727 	struct nv_skb_map *next_tx_ctx;
728 };
729 
730 /*
731  * SMP locking:
732  * All hardware access under netdev_priv(dev)->lock, except the performance
733  * critical parts:
734  * - rx is (pseudo-) lockless: it relies on the single-threading provided
735  *	by the arch code for interrupts.
736  * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
737  *	needs netdev_priv(dev)->lock :-(
738  * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
739  */
740 
741 /* in dev: base, irq */
742 struct fe_priv {
743 	spinlock_t lock;
744 
745 	struct net_device *dev;
746 	struct napi_struct napi;
747 
748 	/* General data:
749 	 * Locking: spin_lock(&np->lock); */
750 	struct nv_ethtool_stats estats;
751 	int in_shutdown;
752 	u32 linkspeed;
753 	int duplex;
754 	int autoneg;
755 	int fixed_mode;
756 	int phyaddr;
757 	int wolenabled;
758 	unsigned int phy_oui;
759 	unsigned int phy_model;
760 	unsigned int phy_rev;
761 	u16 gigabit;
762 	int intr_test;
763 	int recover_error;
764 	int quiet_count;
765 
766 	/* General data: RO fields */
767 	dma_addr_t ring_addr;
768 	struct pci_dev *pci_dev;
769 	u32 orig_mac[2];
770 	u32 events;
771 	u32 irqmask;
772 	u32 desc_ver;
773 	u32 txrxctl_bits;
774 	u32 vlanctl_bits;
775 	u32 driver_data;
776 	u32 device_id;
777 	u32 register_size;
778 	u32 mac_in_use;
779 	int mgmt_version;
780 	int mgmt_sema;
781 
782 	void __iomem *base;
783 
784 	/* rx specific fields.
785 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
786 	 */
787 	union ring_type get_rx, put_rx, first_rx, last_rx;
788 	struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
789 	struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
790 	struct nv_skb_map *rx_skb;
791 
792 	union ring_type rx_ring;
793 	unsigned int rx_buf_sz;
794 	unsigned int pkt_limit;
795 	struct timer_list oom_kick;
796 	struct timer_list nic_poll;
797 	struct timer_list stats_poll;
798 	u32 nic_poll_irq;
799 	int rx_ring_size;
800 
801 	/* media detection workaround.
802 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
803 	 */
804 	int need_linktimer;
805 	unsigned long link_timeout;
806 	/*
807 	 * tx specific fields.
808 	 */
809 	union ring_type get_tx, put_tx, first_tx, last_tx;
810 	struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
811 	struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
812 	struct nv_skb_map *tx_skb;
813 
814 	union ring_type tx_ring;
815 	u32 tx_flags;
816 	int tx_ring_size;
817 	int tx_limit;
818 	u32 tx_pkts_in_progress;
819 	struct nv_skb_map *tx_change_owner;
820 	struct nv_skb_map *tx_end_flip;
821 	int tx_stop;
822 
823 	/* msi/msi-x fields */
824 	u32 msi_flags;
825 	struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
826 
827 	/* flow control */
828 	u32 pause_flags;
829 
830 	/* power saved state */
831 	u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
832 
833 	/* for different msi-x irq type */
834 	char name_rx[IFNAMSIZ + 3];       /* -rx    */
835 	char name_tx[IFNAMSIZ + 3];       /* -tx    */
836 	char name_other[IFNAMSIZ + 6];    /* -other */
837 };
838 
839 /*
840  * Maximum number of loops until we assume that a bit in the irq mask
841  * is stuck. Overridable with module param.
842  */
843 static int max_interrupt_work = 4;
844 
845 /*
846  * Optimization can be either throuput mode or cpu mode
847  *
848  * Throughput Mode: Every tx and rx packet will generate an interrupt.
849  * CPU Mode: Interrupts are controlled by a timer.
850  */
851 enum {
852 	NV_OPTIMIZATION_MODE_THROUGHPUT,
853 	NV_OPTIMIZATION_MODE_CPU,
854 	NV_OPTIMIZATION_MODE_DYNAMIC
855 };
856 static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
857 
858 /*
859  * Poll interval for timer irq
860  *
861  * This interval determines how frequent an interrupt is generated.
862  * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
863  * Min = 0, and Max = 65535
864  */
865 static int poll_interval = -1;
866 
867 /*
868  * MSI interrupts
869  */
870 enum {
871 	NV_MSI_INT_DISABLED,
872 	NV_MSI_INT_ENABLED
873 };
874 static int msi = NV_MSI_INT_ENABLED;
875 
876 /*
877  * MSIX interrupts
878  */
879 enum {
880 	NV_MSIX_INT_DISABLED,
881 	NV_MSIX_INT_ENABLED
882 };
883 static int msix = NV_MSIX_INT_ENABLED;
884 
885 /*
886  * DMA 64bit
887  */
888 enum {
889 	NV_DMA_64BIT_DISABLED,
890 	NV_DMA_64BIT_ENABLED
891 };
892 static int dma_64bit = NV_DMA_64BIT_ENABLED;
893 
894 /*
895  * Crossover Detection
896  * Realtek 8201 phy + some OEM boards do not work properly.
897  */
898 enum {
899 	NV_CROSSOVER_DETECTION_DISABLED,
900 	NV_CROSSOVER_DETECTION_ENABLED
901 };
902 static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
903 
904 /*
905  * Power down phy when interface is down (persists through reboot;
906  * older Linux and other OSes may not power it up again)
907  */
908 static int phy_power_down;
909 
910 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
911 {
912 	return netdev_priv(dev);
913 }
914 
915 static inline u8 __iomem *get_hwbase(struct net_device *dev)
916 {
917 	return ((struct fe_priv *)netdev_priv(dev))->base;
918 }
919 
920 static inline void pci_push(u8 __iomem *base)
921 {
922 	/* force out pending posted writes */
923 	readl(base);
924 }
925 
926 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
927 {
928 	return le32_to_cpu(prd->flaglen)
929 		& ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
930 }
931 
932 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
933 {
934 	return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
935 }
936 
937 static bool nv_optimized(struct fe_priv *np)
938 {
939 	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
940 		return false;
941 	return true;
942 }
943 
944 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
945 		     int delay, int delaymax)
946 {
947 	u8 __iomem *base = get_hwbase(dev);
948 
949 	pci_push(base);
950 	do {
951 		udelay(delay);
952 		delaymax -= delay;
953 		if (delaymax < 0)
954 			return 1;
955 	} while ((readl(base + offset) & mask) != target);
956 	return 0;
957 }
958 
959 #define NV_SETUP_RX_RING 0x01
960 #define NV_SETUP_TX_RING 0x02
961 
962 static inline u32 dma_low(dma_addr_t addr)
963 {
964 	return addr;
965 }
966 
967 static inline u32 dma_high(dma_addr_t addr)
968 {
969 	return addr>>31>>1;	/* 0 if 32bit, shift down by 32 if 64bit */
970 }
971 
972 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
973 {
974 	struct fe_priv *np = get_nvpriv(dev);
975 	u8 __iomem *base = get_hwbase(dev);
976 
977 	if (!nv_optimized(np)) {
978 		if (rxtx_flags & NV_SETUP_RX_RING)
979 			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
980 		if (rxtx_flags & NV_SETUP_TX_RING)
981 			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
982 	} else {
983 		if (rxtx_flags & NV_SETUP_RX_RING) {
984 			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
985 			writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
986 		}
987 		if (rxtx_flags & NV_SETUP_TX_RING) {
988 			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
989 			writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
990 		}
991 	}
992 }
993 
994 static void free_rings(struct net_device *dev)
995 {
996 	struct fe_priv *np = get_nvpriv(dev);
997 
998 	if (!nv_optimized(np)) {
999 		if (np->rx_ring.orig)
1000 			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
1001 					    np->rx_ring.orig, np->ring_addr);
1002 	} else {
1003 		if (np->rx_ring.ex)
1004 			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1005 					    np->rx_ring.ex, np->ring_addr);
1006 	}
1007 	kfree(np->rx_skb);
1008 	kfree(np->tx_skb);
1009 }
1010 
1011 static int using_multi_irqs(struct net_device *dev)
1012 {
1013 	struct fe_priv *np = get_nvpriv(dev);
1014 
1015 	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1016 	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
1017 	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
1018 		return 0;
1019 	else
1020 		return 1;
1021 }
1022 
1023 static void nv_txrx_gate(struct net_device *dev, bool gate)
1024 {
1025 	struct fe_priv *np = get_nvpriv(dev);
1026 	u8 __iomem *base = get_hwbase(dev);
1027 	u32 powerstate;
1028 
1029 	if (!np->mac_in_use &&
1030 	    (np->driver_data & DEV_HAS_POWER_CNTRL)) {
1031 		powerstate = readl(base + NvRegPowerState2);
1032 		if (gate)
1033 			powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
1034 		else
1035 			powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
1036 		writel(powerstate, base + NvRegPowerState2);
1037 	}
1038 }
1039 
1040 static void nv_enable_irq(struct net_device *dev)
1041 {
1042 	struct fe_priv *np = get_nvpriv(dev);
1043 
1044 	if (!using_multi_irqs(dev)) {
1045 		if (np->msi_flags & NV_MSI_X_ENABLED)
1046 			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1047 		else
1048 			enable_irq(np->pci_dev->irq);
1049 	} else {
1050 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1051 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1052 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1053 	}
1054 }
1055 
1056 static void nv_disable_irq(struct net_device *dev)
1057 {
1058 	struct fe_priv *np = get_nvpriv(dev);
1059 
1060 	if (!using_multi_irqs(dev)) {
1061 		if (np->msi_flags & NV_MSI_X_ENABLED)
1062 			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1063 		else
1064 			disable_irq(np->pci_dev->irq);
1065 	} else {
1066 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1067 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1068 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1069 	}
1070 }
1071 
1072 /* In MSIX mode, a write to irqmask behaves as XOR */
1073 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1074 {
1075 	u8 __iomem *base = get_hwbase(dev);
1076 
1077 	writel(mask, base + NvRegIrqMask);
1078 }
1079 
1080 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1081 {
1082 	struct fe_priv *np = get_nvpriv(dev);
1083 	u8 __iomem *base = get_hwbase(dev);
1084 
1085 	if (np->msi_flags & NV_MSI_X_ENABLED) {
1086 		writel(mask, base + NvRegIrqMask);
1087 	} else {
1088 		if (np->msi_flags & NV_MSI_ENABLED)
1089 			writel(0, base + NvRegMSIIrqMask);
1090 		writel(0, base + NvRegIrqMask);
1091 	}
1092 }
1093 
1094 static void nv_napi_enable(struct net_device *dev)
1095 {
1096 	struct fe_priv *np = get_nvpriv(dev);
1097 
1098 	napi_enable(&np->napi);
1099 }
1100 
1101 static void nv_napi_disable(struct net_device *dev)
1102 {
1103 	struct fe_priv *np = get_nvpriv(dev);
1104 
1105 	napi_disable(&np->napi);
1106 }
1107 
1108 #define MII_READ	(-1)
1109 /* mii_rw: read/write a register on the PHY.
1110  *
1111  * Caller must guarantee serialization
1112  */
1113 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1114 {
1115 	u8 __iomem *base = get_hwbase(dev);
1116 	u32 reg;
1117 	int retval;
1118 
1119 	writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1120 
1121 	reg = readl(base + NvRegMIIControl);
1122 	if (reg & NVREG_MIICTL_INUSE) {
1123 		writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1124 		udelay(NV_MIIBUSY_DELAY);
1125 	}
1126 
1127 	reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1128 	if (value != MII_READ) {
1129 		writel(value, base + NvRegMIIData);
1130 		reg |= NVREG_MIICTL_WRITE;
1131 	}
1132 	writel(reg, base + NvRegMIIControl);
1133 
1134 	if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1135 			NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
1136 		retval = -1;
1137 	} else if (value != MII_READ) {
1138 		/* it was a write operation - fewer failures are detectable */
1139 		retval = 0;
1140 	} else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1141 		retval = -1;
1142 	} else {
1143 		retval = readl(base + NvRegMIIData);
1144 	}
1145 
1146 	return retval;
1147 }
1148 
1149 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1150 {
1151 	struct fe_priv *np = netdev_priv(dev);
1152 	u32 miicontrol;
1153 	unsigned int tries = 0;
1154 
1155 	miicontrol = BMCR_RESET | bmcr_setup;
1156 	if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1157 		return -1;
1158 
1159 	/* wait for 500ms */
1160 	msleep(500);
1161 
1162 	/* must wait till reset is deasserted */
1163 	while (miicontrol & BMCR_RESET) {
1164 		usleep_range(10000, 20000);
1165 		miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1166 		/* FIXME: 100 tries seem excessive */
1167 		if (tries++ > 100)
1168 			return -1;
1169 	}
1170 	return 0;
1171 }
1172 
1173 static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
1174 {
1175 	static const struct {
1176 		int reg;
1177 		int init;
1178 	} ri[] = {
1179 		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1180 		{ PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
1181 		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
1182 		{ PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
1183 		{ PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
1184 		{ PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
1185 		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1186 	};
1187 	int i;
1188 
1189 	for (i = 0; i < ARRAY_SIZE(ri); i++) {
1190 		if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
1191 			return PHY_ERROR;
1192 	}
1193 
1194 	return 0;
1195 }
1196 
1197 static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
1198 {
1199 	u32 reg;
1200 	u8 __iomem *base = get_hwbase(dev);
1201 	u32 powerstate = readl(base + NvRegPowerState2);
1202 
1203 	/* need to perform hw phy reset */
1204 	powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1205 	writel(powerstate, base + NvRegPowerState2);
1206 	msleep(25);
1207 
1208 	powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1209 	writel(powerstate, base + NvRegPowerState2);
1210 	msleep(25);
1211 
1212 	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1213 	reg |= PHY_REALTEK_INIT9;
1214 	if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
1215 		return PHY_ERROR;
1216 	if (mii_rw(dev, np->phyaddr,
1217 		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
1218 		return PHY_ERROR;
1219 	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1220 	if (!(reg & PHY_REALTEK_INIT11)) {
1221 		reg |= PHY_REALTEK_INIT11;
1222 		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
1223 			return PHY_ERROR;
1224 	}
1225 	if (mii_rw(dev, np->phyaddr,
1226 		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1227 		return PHY_ERROR;
1228 
1229 	return 0;
1230 }
1231 
1232 static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
1233 {
1234 	u32 phy_reserved;
1235 
1236 	if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1237 		phy_reserved = mii_rw(dev, np->phyaddr,
1238 				      PHY_REALTEK_INIT_REG6, MII_READ);
1239 		phy_reserved |= PHY_REALTEK_INIT7;
1240 		if (mii_rw(dev, np->phyaddr,
1241 			   PHY_REALTEK_INIT_REG6, phy_reserved))
1242 			return PHY_ERROR;
1243 	}
1244 
1245 	return 0;
1246 }
1247 
1248 static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
1249 {
1250 	u32 phy_reserved;
1251 
1252 	if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1253 		if (mii_rw(dev, np->phyaddr,
1254 			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
1255 			return PHY_ERROR;
1256 		phy_reserved = mii_rw(dev, np->phyaddr,
1257 				      PHY_REALTEK_INIT_REG2, MII_READ);
1258 		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1259 		phy_reserved |= PHY_REALTEK_INIT3;
1260 		if (mii_rw(dev, np->phyaddr,
1261 			   PHY_REALTEK_INIT_REG2, phy_reserved))
1262 			return PHY_ERROR;
1263 		if (mii_rw(dev, np->phyaddr,
1264 			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1265 			return PHY_ERROR;
1266 	}
1267 
1268 	return 0;
1269 }
1270 
1271 static int init_cicada(struct net_device *dev, struct fe_priv *np,
1272 		       u32 phyinterface)
1273 {
1274 	u32 phy_reserved;
1275 
1276 	if (phyinterface & PHY_RGMII) {
1277 		phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1278 		phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1279 		phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1280 		if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
1281 			return PHY_ERROR;
1282 		phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1283 		phy_reserved |= PHY_CICADA_INIT5;
1284 		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
1285 			return PHY_ERROR;
1286 	}
1287 	phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1288 	phy_reserved |= PHY_CICADA_INIT6;
1289 	if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
1290 		return PHY_ERROR;
1291 
1292 	return 0;
1293 }
1294 
1295 static int init_vitesse(struct net_device *dev, struct fe_priv *np)
1296 {
1297 	u32 phy_reserved;
1298 
1299 	if (mii_rw(dev, np->phyaddr,
1300 		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
1301 		return PHY_ERROR;
1302 	if (mii_rw(dev, np->phyaddr,
1303 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
1304 		return PHY_ERROR;
1305 	phy_reserved = mii_rw(dev, np->phyaddr,
1306 			      PHY_VITESSE_INIT_REG4, MII_READ);
1307 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1308 		return PHY_ERROR;
1309 	phy_reserved = mii_rw(dev, np->phyaddr,
1310 			      PHY_VITESSE_INIT_REG3, MII_READ);
1311 	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1312 	phy_reserved |= PHY_VITESSE_INIT3;
1313 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1314 		return PHY_ERROR;
1315 	if (mii_rw(dev, np->phyaddr,
1316 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
1317 		return PHY_ERROR;
1318 	if (mii_rw(dev, np->phyaddr,
1319 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
1320 		return PHY_ERROR;
1321 	phy_reserved = mii_rw(dev, np->phyaddr,
1322 			      PHY_VITESSE_INIT_REG4, MII_READ);
1323 	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1324 	phy_reserved |= PHY_VITESSE_INIT3;
1325 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1326 		return PHY_ERROR;
1327 	phy_reserved = mii_rw(dev, np->phyaddr,
1328 			      PHY_VITESSE_INIT_REG3, MII_READ);
1329 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1330 		return PHY_ERROR;
1331 	if (mii_rw(dev, np->phyaddr,
1332 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
1333 		return PHY_ERROR;
1334 	if (mii_rw(dev, np->phyaddr,
1335 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
1336 		return PHY_ERROR;
1337 	phy_reserved = mii_rw(dev, np->phyaddr,
1338 			      PHY_VITESSE_INIT_REG4, MII_READ);
1339 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1340 		return PHY_ERROR;
1341 	phy_reserved = mii_rw(dev, np->phyaddr,
1342 			      PHY_VITESSE_INIT_REG3, MII_READ);
1343 	phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1344 	phy_reserved |= PHY_VITESSE_INIT8;
1345 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1346 		return PHY_ERROR;
1347 	if (mii_rw(dev, np->phyaddr,
1348 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
1349 		return PHY_ERROR;
1350 	if (mii_rw(dev, np->phyaddr,
1351 		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
1352 		return PHY_ERROR;
1353 
1354 	return 0;
1355 }
1356 
1357 static int phy_init(struct net_device *dev)
1358 {
1359 	struct fe_priv *np = get_nvpriv(dev);
1360 	u8 __iomem *base = get_hwbase(dev);
1361 	u32 phyinterface;
1362 	u32 mii_status, mii_control, mii_control_1000, reg;
1363 
1364 	/* phy errata for E3016 phy */
1365 	if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1366 		reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1367 		reg &= ~PHY_MARVELL_E3016_INITMASK;
1368 		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1369 			netdev_info(dev, "%s: phy write to errata reg failed\n",
1370 				    pci_name(np->pci_dev));
1371 			return PHY_ERROR;
1372 		}
1373 	}
1374 	if (np->phy_oui == PHY_OUI_REALTEK) {
1375 		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1376 		    np->phy_rev == PHY_REV_REALTEK_8211B) {
1377 			if (init_realtek_8211b(dev, np)) {
1378 				netdev_info(dev, "%s: phy init failed\n",
1379 					    pci_name(np->pci_dev));
1380 				return PHY_ERROR;
1381 			}
1382 		} else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1383 			   np->phy_rev == PHY_REV_REALTEK_8211C) {
1384 			if (init_realtek_8211c(dev, np)) {
1385 				netdev_info(dev, "%s: phy init failed\n",
1386 					    pci_name(np->pci_dev));
1387 				return PHY_ERROR;
1388 			}
1389 		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1390 			if (init_realtek_8201(dev, np)) {
1391 				netdev_info(dev, "%s: phy init failed\n",
1392 					    pci_name(np->pci_dev));
1393 				return PHY_ERROR;
1394 			}
1395 		}
1396 	}
1397 
1398 	/* set advertise register */
1399 	reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1400 	reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
1401 		ADVERTISE_100HALF | ADVERTISE_100FULL |
1402 		ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
1403 	if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1404 		netdev_info(dev, "%s: phy write to advertise failed\n",
1405 			    pci_name(np->pci_dev));
1406 		return PHY_ERROR;
1407 	}
1408 
1409 	/* get phy interface type */
1410 	phyinterface = readl(base + NvRegPhyInterface);
1411 
1412 	/* see if gigabit phy */
1413 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1414 	if (mii_status & PHY_GIGABIT) {
1415 		np->gigabit = PHY_GIGABIT;
1416 		mii_control_1000 = mii_rw(dev, np->phyaddr,
1417 					  MII_CTRL1000, MII_READ);
1418 		mii_control_1000 &= ~ADVERTISE_1000HALF;
1419 		if (phyinterface & PHY_RGMII)
1420 			mii_control_1000 |= ADVERTISE_1000FULL;
1421 		else
1422 			mii_control_1000 &= ~ADVERTISE_1000FULL;
1423 
1424 		if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1425 			netdev_info(dev, "%s: phy init failed\n",
1426 				    pci_name(np->pci_dev));
1427 			return PHY_ERROR;
1428 		}
1429 	} else
1430 		np->gigabit = 0;
1431 
1432 	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1433 	mii_control |= BMCR_ANENABLE;
1434 
1435 	if (np->phy_oui == PHY_OUI_REALTEK &&
1436 	    np->phy_model == PHY_MODEL_REALTEK_8211 &&
1437 	    np->phy_rev == PHY_REV_REALTEK_8211C) {
1438 		/* start autoneg since we already performed hw reset above */
1439 		mii_control |= BMCR_ANRESTART;
1440 		if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1441 			netdev_info(dev, "%s: phy init failed\n",
1442 				    pci_name(np->pci_dev));
1443 			return PHY_ERROR;
1444 		}
1445 	} else {
1446 		/* reset the phy
1447 		 * (certain phys need bmcr to be setup with reset)
1448 		 */
1449 		if (phy_reset(dev, mii_control)) {
1450 			netdev_info(dev, "%s: phy reset failed\n",
1451 				    pci_name(np->pci_dev));
1452 			return PHY_ERROR;
1453 		}
1454 	}
1455 
1456 	/* phy vendor specific configuration */
1457 	if ((np->phy_oui == PHY_OUI_CICADA)) {
1458 		if (init_cicada(dev, np, phyinterface)) {
1459 			netdev_info(dev, "%s: phy init failed\n",
1460 				    pci_name(np->pci_dev));
1461 			return PHY_ERROR;
1462 		}
1463 	} else if (np->phy_oui == PHY_OUI_VITESSE) {
1464 		if (init_vitesse(dev, np)) {
1465 			netdev_info(dev, "%s: phy init failed\n",
1466 				    pci_name(np->pci_dev));
1467 			return PHY_ERROR;
1468 		}
1469 	} else if (np->phy_oui == PHY_OUI_REALTEK) {
1470 		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1471 		    np->phy_rev == PHY_REV_REALTEK_8211B) {
1472 			/* reset could have cleared these out, set them back */
1473 			if (init_realtek_8211b(dev, np)) {
1474 				netdev_info(dev, "%s: phy init failed\n",
1475 					    pci_name(np->pci_dev));
1476 				return PHY_ERROR;
1477 			}
1478 		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1479 			if (init_realtek_8201(dev, np) ||
1480 			    init_realtek_8201_cross(dev, np)) {
1481 				netdev_info(dev, "%s: phy init failed\n",
1482 					    pci_name(np->pci_dev));
1483 				return PHY_ERROR;
1484 			}
1485 		}
1486 	}
1487 
1488 	/* some phys clear out pause advertisement on reset, set it back */
1489 	mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1490 
1491 	/* restart auto negotiation, power down phy */
1492 	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1493 	mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1494 	if (phy_power_down)
1495 		mii_control |= BMCR_PDOWN;
1496 	if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1497 		return PHY_ERROR;
1498 
1499 	return 0;
1500 }
1501 
1502 static void nv_start_rx(struct net_device *dev)
1503 {
1504 	struct fe_priv *np = netdev_priv(dev);
1505 	u8 __iomem *base = get_hwbase(dev);
1506 	u32 rx_ctrl = readl(base + NvRegReceiverControl);
1507 
1508 	/* Already running? Stop it. */
1509 	if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1510 		rx_ctrl &= ~NVREG_RCVCTL_START;
1511 		writel(rx_ctrl, base + NvRegReceiverControl);
1512 		pci_push(base);
1513 	}
1514 	writel(np->linkspeed, base + NvRegLinkSpeed);
1515 	pci_push(base);
1516 	rx_ctrl |= NVREG_RCVCTL_START;
1517 	if (np->mac_in_use)
1518 		rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1519 	writel(rx_ctrl, base + NvRegReceiverControl);
1520 	pci_push(base);
1521 }
1522 
1523 static void nv_stop_rx(struct net_device *dev)
1524 {
1525 	struct fe_priv *np = netdev_priv(dev);
1526 	u8 __iomem *base = get_hwbase(dev);
1527 	u32 rx_ctrl = readl(base + NvRegReceiverControl);
1528 
1529 	if (!np->mac_in_use)
1530 		rx_ctrl &= ~NVREG_RCVCTL_START;
1531 	else
1532 		rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1533 	writel(rx_ctrl, base + NvRegReceiverControl);
1534 	if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1535 		      NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
1536 		netdev_info(dev, "%s: ReceiverStatus remained busy\n",
1537 			    __func__);
1538 
1539 	udelay(NV_RXSTOP_DELAY2);
1540 	if (!np->mac_in_use)
1541 		writel(0, base + NvRegLinkSpeed);
1542 }
1543 
1544 static void nv_start_tx(struct net_device *dev)
1545 {
1546 	struct fe_priv *np = netdev_priv(dev);
1547 	u8 __iomem *base = get_hwbase(dev);
1548 	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1549 
1550 	tx_ctrl |= NVREG_XMITCTL_START;
1551 	if (np->mac_in_use)
1552 		tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1553 	writel(tx_ctrl, base + NvRegTransmitterControl);
1554 	pci_push(base);
1555 }
1556 
1557 static void nv_stop_tx(struct net_device *dev)
1558 {
1559 	struct fe_priv *np = netdev_priv(dev);
1560 	u8 __iomem *base = get_hwbase(dev);
1561 	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1562 
1563 	if (!np->mac_in_use)
1564 		tx_ctrl &= ~NVREG_XMITCTL_START;
1565 	else
1566 		tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1567 	writel(tx_ctrl, base + NvRegTransmitterControl);
1568 	if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1569 		      NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
1570 		netdev_info(dev, "%s: TransmitterStatus remained busy\n",
1571 			    __func__);
1572 
1573 	udelay(NV_TXSTOP_DELAY2);
1574 	if (!np->mac_in_use)
1575 		writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1576 		       base + NvRegTransmitPoll);
1577 }
1578 
1579 static void nv_start_rxtx(struct net_device *dev)
1580 {
1581 	nv_start_rx(dev);
1582 	nv_start_tx(dev);
1583 }
1584 
1585 static void nv_stop_rxtx(struct net_device *dev)
1586 {
1587 	nv_stop_rx(dev);
1588 	nv_stop_tx(dev);
1589 }
1590 
1591 static void nv_txrx_reset(struct net_device *dev)
1592 {
1593 	struct fe_priv *np = netdev_priv(dev);
1594 	u8 __iomem *base = get_hwbase(dev);
1595 
1596 	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1597 	pci_push(base);
1598 	udelay(NV_TXRX_RESET_DELAY);
1599 	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1600 	pci_push(base);
1601 }
1602 
1603 static void nv_mac_reset(struct net_device *dev)
1604 {
1605 	struct fe_priv *np = netdev_priv(dev);
1606 	u8 __iomem *base = get_hwbase(dev);
1607 	u32 temp1, temp2, temp3;
1608 
1609 	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1610 	pci_push(base);
1611 
1612 	/* save registers since they will be cleared on reset */
1613 	temp1 = readl(base + NvRegMacAddrA);
1614 	temp2 = readl(base + NvRegMacAddrB);
1615 	temp3 = readl(base + NvRegTransmitPoll);
1616 
1617 	writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1618 	pci_push(base);
1619 	udelay(NV_MAC_RESET_DELAY);
1620 	writel(0, base + NvRegMacReset);
1621 	pci_push(base);
1622 	udelay(NV_MAC_RESET_DELAY);
1623 
1624 	/* restore saved registers */
1625 	writel(temp1, base + NvRegMacAddrA);
1626 	writel(temp2, base + NvRegMacAddrB);
1627 	writel(temp3, base + NvRegTransmitPoll);
1628 
1629 	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1630 	pci_push(base);
1631 }
1632 
1633 static void nv_get_hw_stats(struct net_device *dev)
1634 {
1635 	struct fe_priv *np = netdev_priv(dev);
1636 	u8 __iomem *base = get_hwbase(dev);
1637 
1638 	np->estats.tx_bytes += readl(base + NvRegTxCnt);
1639 	np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1640 	np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1641 	np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1642 	np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1643 	np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1644 	np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1645 	np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1646 	np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1647 	np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1648 	np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1649 	np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1650 	np->estats.rx_runt += readl(base + NvRegRxRunt);
1651 	np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1652 	np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1653 	np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1654 	np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1655 	np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1656 	np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1657 	np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1658 	np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1659 	np->estats.rx_packets =
1660 		np->estats.rx_unicast +
1661 		np->estats.rx_multicast +
1662 		np->estats.rx_broadcast;
1663 	np->estats.rx_errors_total =
1664 		np->estats.rx_crc_errors +
1665 		np->estats.rx_over_errors +
1666 		np->estats.rx_frame_error +
1667 		(np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1668 		np->estats.rx_late_collision +
1669 		np->estats.rx_runt +
1670 		np->estats.rx_frame_too_long;
1671 	np->estats.tx_errors_total =
1672 		np->estats.tx_late_collision +
1673 		np->estats.tx_fifo_errors +
1674 		np->estats.tx_carrier_errors +
1675 		np->estats.tx_excess_deferral +
1676 		np->estats.tx_retry_error;
1677 
1678 	if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1679 		np->estats.tx_deferral += readl(base + NvRegTxDef);
1680 		np->estats.tx_packets += readl(base + NvRegTxFrame);
1681 		np->estats.rx_bytes += readl(base + NvRegRxCnt);
1682 		np->estats.tx_pause += readl(base + NvRegTxPause);
1683 		np->estats.rx_pause += readl(base + NvRegRxPause);
1684 		np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1685 		np->estats.rx_errors_total += np->estats.rx_drop_frame;
1686 	}
1687 
1688 	if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1689 		np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1690 		np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1691 		np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1692 	}
1693 }
1694 
1695 /*
1696  * nv_get_stats: dev->get_stats function
1697  * Get latest stats value from the nic.
1698  * Called with read_lock(&dev_base_lock) held for read -
1699  * only synchronized against unregister_netdevice.
1700  */
1701 static struct net_device_stats *nv_get_stats(struct net_device *dev)
1702 {
1703 	struct fe_priv *np = netdev_priv(dev);
1704 
1705 	/* If the nic supports hw counters then retrieve latest values */
1706 	if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
1707 		nv_get_hw_stats(dev);
1708 
1709 		/*
1710 		 * Note: because HW stats are not always available and
1711 		 * for consistency reasons, the following ifconfig
1712 		 * stats are managed by software: rx_bytes, tx_bytes,
1713 		 * rx_packets and tx_packets. The related hardware
1714 		 * stats reported by ethtool should be equivalent to
1715 		 * these ifconfig stats, with 4 additional bytes per
1716 		 * packet (Ethernet FCS CRC).
1717 		 */
1718 
1719 		/* copy to net_device stats */
1720 		dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1721 		dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1722 		dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
1723 		dev->stats.rx_over_errors = np->estats.rx_over_errors;
1724 		dev->stats.rx_fifo_errors = np->estats.rx_drop_frame;
1725 		dev->stats.rx_errors = np->estats.rx_errors_total;
1726 		dev->stats.tx_errors = np->estats.tx_errors_total;
1727 	}
1728 
1729 	return &dev->stats;
1730 }
1731 
1732 /*
1733  * nv_alloc_rx: fill rx ring entries.
1734  * Return 1 if the allocations for the skbs failed and the
1735  * rx engine is without Available descriptors
1736  */
1737 static int nv_alloc_rx(struct net_device *dev)
1738 {
1739 	struct fe_priv *np = netdev_priv(dev);
1740 	struct ring_desc *less_rx;
1741 
1742 	less_rx = np->get_rx.orig;
1743 	if (less_rx-- == np->first_rx.orig)
1744 		less_rx = np->last_rx.orig;
1745 
1746 	while (np->put_rx.orig != less_rx) {
1747 		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1748 		if (skb) {
1749 			np->put_rx_ctx->skb = skb;
1750 			np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1751 							     skb->data,
1752 							     skb_tailroom(skb),
1753 							     PCI_DMA_FROMDEVICE);
1754 			np->put_rx_ctx->dma_len = skb_tailroom(skb);
1755 			np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1756 			wmb();
1757 			np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1758 			if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1759 				np->put_rx.orig = np->first_rx.orig;
1760 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1761 				np->put_rx_ctx = np->first_rx_ctx;
1762 		} else
1763 			return 1;
1764 	}
1765 	return 0;
1766 }
1767 
1768 static int nv_alloc_rx_optimized(struct net_device *dev)
1769 {
1770 	struct fe_priv *np = netdev_priv(dev);
1771 	struct ring_desc_ex *less_rx;
1772 
1773 	less_rx = np->get_rx.ex;
1774 	if (less_rx-- == np->first_rx.ex)
1775 		less_rx = np->last_rx.ex;
1776 
1777 	while (np->put_rx.ex != less_rx) {
1778 		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1779 		if (skb) {
1780 			np->put_rx_ctx->skb = skb;
1781 			np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1782 							     skb->data,
1783 							     skb_tailroom(skb),
1784 							     PCI_DMA_FROMDEVICE);
1785 			np->put_rx_ctx->dma_len = skb_tailroom(skb);
1786 			np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1787 			np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1788 			wmb();
1789 			np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1790 			if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1791 				np->put_rx.ex = np->first_rx.ex;
1792 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1793 				np->put_rx_ctx = np->first_rx_ctx;
1794 		} else
1795 			return 1;
1796 	}
1797 	return 0;
1798 }
1799 
1800 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1801 static void nv_do_rx_refill(unsigned long data)
1802 {
1803 	struct net_device *dev = (struct net_device *) data;
1804 	struct fe_priv *np = netdev_priv(dev);
1805 
1806 	/* Just reschedule NAPI rx processing */
1807 	napi_schedule(&np->napi);
1808 }
1809 
1810 static void nv_init_rx(struct net_device *dev)
1811 {
1812 	struct fe_priv *np = netdev_priv(dev);
1813 	int i;
1814 
1815 	np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1816 
1817 	if (!nv_optimized(np))
1818 		np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1819 	else
1820 		np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1821 	np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1822 	np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1823 
1824 	for (i = 0; i < np->rx_ring_size; i++) {
1825 		if (!nv_optimized(np)) {
1826 			np->rx_ring.orig[i].flaglen = 0;
1827 			np->rx_ring.orig[i].buf = 0;
1828 		} else {
1829 			np->rx_ring.ex[i].flaglen = 0;
1830 			np->rx_ring.ex[i].txvlan = 0;
1831 			np->rx_ring.ex[i].bufhigh = 0;
1832 			np->rx_ring.ex[i].buflow = 0;
1833 		}
1834 		np->rx_skb[i].skb = NULL;
1835 		np->rx_skb[i].dma = 0;
1836 	}
1837 }
1838 
1839 static void nv_init_tx(struct net_device *dev)
1840 {
1841 	struct fe_priv *np = netdev_priv(dev);
1842 	int i;
1843 
1844 	np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1845 
1846 	if (!nv_optimized(np))
1847 		np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1848 	else
1849 		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1850 	np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1851 	np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1852 	np->tx_pkts_in_progress = 0;
1853 	np->tx_change_owner = NULL;
1854 	np->tx_end_flip = NULL;
1855 	np->tx_stop = 0;
1856 
1857 	for (i = 0; i < np->tx_ring_size; i++) {
1858 		if (!nv_optimized(np)) {
1859 			np->tx_ring.orig[i].flaglen = 0;
1860 			np->tx_ring.orig[i].buf = 0;
1861 		} else {
1862 			np->tx_ring.ex[i].flaglen = 0;
1863 			np->tx_ring.ex[i].txvlan = 0;
1864 			np->tx_ring.ex[i].bufhigh = 0;
1865 			np->tx_ring.ex[i].buflow = 0;
1866 		}
1867 		np->tx_skb[i].skb = NULL;
1868 		np->tx_skb[i].dma = 0;
1869 		np->tx_skb[i].dma_len = 0;
1870 		np->tx_skb[i].dma_single = 0;
1871 		np->tx_skb[i].first_tx_desc = NULL;
1872 		np->tx_skb[i].next_tx_ctx = NULL;
1873 	}
1874 }
1875 
1876 static int nv_init_ring(struct net_device *dev)
1877 {
1878 	struct fe_priv *np = netdev_priv(dev);
1879 
1880 	nv_init_tx(dev);
1881 	nv_init_rx(dev);
1882 
1883 	if (!nv_optimized(np))
1884 		return nv_alloc_rx(dev);
1885 	else
1886 		return nv_alloc_rx_optimized(dev);
1887 }
1888 
1889 static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1890 {
1891 	if (tx_skb->dma) {
1892 		if (tx_skb->dma_single)
1893 			pci_unmap_single(np->pci_dev, tx_skb->dma,
1894 					 tx_skb->dma_len,
1895 					 PCI_DMA_TODEVICE);
1896 		else
1897 			pci_unmap_page(np->pci_dev, tx_skb->dma,
1898 				       tx_skb->dma_len,
1899 				       PCI_DMA_TODEVICE);
1900 		tx_skb->dma = 0;
1901 	}
1902 }
1903 
1904 static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1905 {
1906 	nv_unmap_txskb(np, tx_skb);
1907 	if (tx_skb->skb) {
1908 		dev_kfree_skb_any(tx_skb->skb);
1909 		tx_skb->skb = NULL;
1910 		return 1;
1911 	}
1912 	return 0;
1913 }
1914 
1915 static void nv_drain_tx(struct net_device *dev)
1916 {
1917 	struct fe_priv *np = netdev_priv(dev);
1918 	unsigned int i;
1919 
1920 	for (i = 0; i < np->tx_ring_size; i++) {
1921 		if (!nv_optimized(np)) {
1922 			np->tx_ring.orig[i].flaglen = 0;
1923 			np->tx_ring.orig[i].buf = 0;
1924 		} else {
1925 			np->tx_ring.ex[i].flaglen = 0;
1926 			np->tx_ring.ex[i].txvlan = 0;
1927 			np->tx_ring.ex[i].bufhigh = 0;
1928 			np->tx_ring.ex[i].buflow = 0;
1929 		}
1930 		if (nv_release_txskb(np, &np->tx_skb[i]))
1931 			dev->stats.tx_dropped++;
1932 		np->tx_skb[i].dma = 0;
1933 		np->tx_skb[i].dma_len = 0;
1934 		np->tx_skb[i].dma_single = 0;
1935 		np->tx_skb[i].first_tx_desc = NULL;
1936 		np->tx_skb[i].next_tx_ctx = NULL;
1937 	}
1938 	np->tx_pkts_in_progress = 0;
1939 	np->tx_change_owner = NULL;
1940 	np->tx_end_flip = NULL;
1941 }
1942 
1943 static void nv_drain_rx(struct net_device *dev)
1944 {
1945 	struct fe_priv *np = netdev_priv(dev);
1946 	int i;
1947 
1948 	for (i = 0; i < np->rx_ring_size; i++) {
1949 		if (!nv_optimized(np)) {
1950 			np->rx_ring.orig[i].flaglen = 0;
1951 			np->rx_ring.orig[i].buf = 0;
1952 		} else {
1953 			np->rx_ring.ex[i].flaglen = 0;
1954 			np->rx_ring.ex[i].txvlan = 0;
1955 			np->rx_ring.ex[i].bufhigh = 0;
1956 			np->rx_ring.ex[i].buflow = 0;
1957 		}
1958 		wmb();
1959 		if (np->rx_skb[i].skb) {
1960 			pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1961 					 (skb_end_pointer(np->rx_skb[i].skb) -
1962 					  np->rx_skb[i].skb->data),
1963 					 PCI_DMA_FROMDEVICE);
1964 			dev_kfree_skb(np->rx_skb[i].skb);
1965 			np->rx_skb[i].skb = NULL;
1966 		}
1967 	}
1968 }
1969 
1970 static void nv_drain_rxtx(struct net_device *dev)
1971 {
1972 	nv_drain_tx(dev);
1973 	nv_drain_rx(dev);
1974 }
1975 
1976 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1977 {
1978 	return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1979 }
1980 
1981 static void nv_legacybackoff_reseed(struct net_device *dev)
1982 {
1983 	u8 __iomem *base = get_hwbase(dev);
1984 	u32 reg;
1985 	u32 low;
1986 	int tx_status = 0;
1987 
1988 	reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
1989 	get_random_bytes(&low, sizeof(low));
1990 	reg |= low & NVREG_SLOTTIME_MASK;
1991 
1992 	/* Need to stop tx before change takes effect.
1993 	 * Caller has already gained np->lock.
1994 	 */
1995 	tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
1996 	if (tx_status)
1997 		nv_stop_tx(dev);
1998 	nv_stop_rx(dev);
1999 	writel(reg, base + NvRegSlotTime);
2000 	if (tx_status)
2001 		nv_start_tx(dev);
2002 	nv_start_rx(dev);
2003 }
2004 
2005 /* Gear Backoff Seeds */
2006 #define BACKOFF_SEEDSET_ROWS	8
2007 #define BACKOFF_SEEDSET_LFSRS	15
2008 
2009 /* Known Good seed sets */
2010 static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2011 	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2012 	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2013 	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2014 	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2015 	{266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2016 	{266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2017 	{366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800,  84},
2018 	{466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2019 
2020 static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2021 	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2022 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2023 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2024 	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2025 	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2026 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2027 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2028 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2029 
2030 static void nv_gear_backoff_reseed(struct net_device *dev)
2031 {
2032 	u8 __iomem *base = get_hwbase(dev);
2033 	u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
2034 	u32 temp, seedset, combinedSeed;
2035 	int i;
2036 
2037 	/* Setup seed for free running LFSR */
2038 	/* We are going to read the time stamp counter 3 times
2039 	   and swizzle bits around to increase randomness */
2040 	get_random_bytes(&miniseed1, sizeof(miniseed1));
2041 	miniseed1 &= 0x0fff;
2042 	if (miniseed1 == 0)
2043 		miniseed1 = 0xabc;
2044 
2045 	get_random_bytes(&miniseed2, sizeof(miniseed2));
2046 	miniseed2 &= 0x0fff;
2047 	if (miniseed2 == 0)
2048 		miniseed2 = 0xabc;
2049 	miniseed2_reversed =
2050 		((miniseed2 & 0xF00) >> 8) |
2051 		 (miniseed2 & 0x0F0) |
2052 		 ((miniseed2 & 0x00F) << 8);
2053 
2054 	get_random_bytes(&miniseed3, sizeof(miniseed3));
2055 	miniseed3 &= 0x0fff;
2056 	if (miniseed3 == 0)
2057 		miniseed3 = 0xabc;
2058 	miniseed3_reversed =
2059 		((miniseed3 & 0xF00) >> 8) |
2060 		 (miniseed3 & 0x0F0) |
2061 		 ((miniseed3 & 0x00F) << 8);
2062 
2063 	combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
2064 		       (miniseed2 ^ miniseed3_reversed);
2065 
2066 	/* Seeds can not be zero */
2067 	if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
2068 		combinedSeed |= 0x08;
2069 	if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
2070 		combinedSeed |= 0x8000;
2071 
2072 	/* No need to disable tx here */
2073 	temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2074 	temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2075 	temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2076 	writel(temp, base + NvRegBackOffControl);
2077 
2078 	/* Setup seeds for all gear LFSRs. */
2079 	get_random_bytes(&seedset, sizeof(seedset));
2080 	seedset = seedset % BACKOFF_SEEDSET_ROWS;
2081 	for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
2082 		temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2083 		temp |= main_seedset[seedset][i-1] & 0x3ff;
2084 		temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
2085 		writel(temp, base + NvRegBackOffControl);
2086 	}
2087 }
2088 
2089 /*
2090  * nv_start_xmit: dev->hard_start_xmit function
2091  * Called with netif_tx_lock held.
2092  */
2093 static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2094 {
2095 	struct fe_priv *np = netdev_priv(dev);
2096 	u32 tx_flags = 0;
2097 	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2098 	unsigned int fragments = skb_shinfo(skb)->nr_frags;
2099 	unsigned int i;
2100 	u32 offset = 0;
2101 	u32 bcnt;
2102 	u32 size = skb_headlen(skb);
2103 	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2104 	u32 empty_slots;
2105 	struct ring_desc *put_tx;
2106 	struct ring_desc *start_tx;
2107 	struct ring_desc *prev_tx;
2108 	struct nv_skb_map *prev_tx_ctx;
2109 	unsigned long flags;
2110 
2111 	/* add fragments to entries count */
2112 	for (i = 0; i < fragments; i++) {
2113 		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2114 
2115 		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2116 			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2117 	}
2118 
2119 	spin_lock_irqsave(&np->lock, flags);
2120 	empty_slots = nv_get_empty_tx_slots(np);
2121 	if (unlikely(empty_slots <= entries)) {
2122 		netif_stop_queue(dev);
2123 		np->tx_stop = 1;
2124 		spin_unlock_irqrestore(&np->lock, flags);
2125 		return NETDEV_TX_BUSY;
2126 	}
2127 	spin_unlock_irqrestore(&np->lock, flags);
2128 
2129 	start_tx = put_tx = np->put_tx.orig;
2130 
2131 	/* setup the header buffer */
2132 	do {
2133 		prev_tx = put_tx;
2134 		prev_tx_ctx = np->put_tx_ctx;
2135 		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2136 		np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2137 						PCI_DMA_TODEVICE);
2138 		np->put_tx_ctx->dma_len = bcnt;
2139 		np->put_tx_ctx->dma_single = 1;
2140 		put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2141 		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2142 
2143 		tx_flags = np->tx_flags;
2144 		offset += bcnt;
2145 		size -= bcnt;
2146 		if (unlikely(put_tx++ == np->last_tx.orig))
2147 			put_tx = np->first_tx.orig;
2148 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2149 			np->put_tx_ctx = np->first_tx_ctx;
2150 	} while (size);
2151 
2152 	/* setup the fragments */
2153 	for (i = 0; i < fragments; i++) {
2154 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2155 		u32 frag_size = skb_frag_size(frag);
2156 		offset = 0;
2157 
2158 		do {
2159 			prev_tx = put_tx;
2160 			prev_tx_ctx = np->put_tx_ctx;
2161 			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2162 			np->put_tx_ctx->dma = skb_frag_dma_map(
2163 							&np->pci_dev->dev,
2164 							frag, offset,
2165 							bcnt,
2166 							DMA_TO_DEVICE);
2167 			np->put_tx_ctx->dma_len = bcnt;
2168 			np->put_tx_ctx->dma_single = 0;
2169 			put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2170 			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2171 
2172 			offset += bcnt;
2173 			frag_size -= bcnt;
2174 			if (unlikely(put_tx++ == np->last_tx.orig))
2175 				put_tx = np->first_tx.orig;
2176 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2177 				np->put_tx_ctx = np->first_tx_ctx;
2178 		} while (frag_size);
2179 	}
2180 
2181 	/* set last fragment flag  */
2182 	prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
2183 
2184 	/* save skb in this slot's context area */
2185 	prev_tx_ctx->skb = skb;
2186 
2187 	if (skb_is_gso(skb))
2188 		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2189 	else
2190 		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2191 			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2192 
2193 	spin_lock_irqsave(&np->lock, flags);
2194 
2195 	/* set tx flags */
2196 	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2197 	np->put_tx.orig = put_tx;
2198 
2199 	spin_unlock_irqrestore(&np->lock, flags);
2200 
2201 	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2202 	return NETDEV_TX_OK;
2203 }
2204 
2205 static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2206 					   struct net_device *dev)
2207 {
2208 	struct fe_priv *np = netdev_priv(dev);
2209 	u32 tx_flags = 0;
2210 	u32 tx_flags_extra;
2211 	unsigned int fragments = skb_shinfo(skb)->nr_frags;
2212 	unsigned int i;
2213 	u32 offset = 0;
2214 	u32 bcnt;
2215 	u32 size = skb_headlen(skb);
2216 	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2217 	u32 empty_slots;
2218 	struct ring_desc_ex *put_tx;
2219 	struct ring_desc_ex *start_tx;
2220 	struct ring_desc_ex *prev_tx;
2221 	struct nv_skb_map *prev_tx_ctx;
2222 	struct nv_skb_map *start_tx_ctx;
2223 	unsigned long flags;
2224 
2225 	/* add fragments to entries count */
2226 	for (i = 0; i < fragments; i++) {
2227 		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2228 
2229 		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2230 			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2231 	}
2232 
2233 	spin_lock_irqsave(&np->lock, flags);
2234 	empty_slots = nv_get_empty_tx_slots(np);
2235 	if (unlikely(empty_slots <= entries)) {
2236 		netif_stop_queue(dev);
2237 		np->tx_stop = 1;
2238 		spin_unlock_irqrestore(&np->lock, flags);
2239 		return NETDEV_TX_BUSY;
2240 	}
2241 	spin_unlock_irqrestore(&np->lock, flags);
2242 
2243 	start_tx = put_tx = np->put_tx.ex;
2244 	start_tx_ctx = np->put_tx_ctx;
2245 
2246 	/* setup the header buffer */
2247 	do {
2248 		prev_tx = put_tx;
2249 		prev_tx_ctx = np->put_tx_ctx;
2250 		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2251 		np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2252 						PCI_DMA_TODEVICE);
2253 		np->put_tx_ctx->dma_len = bcnt;
2254 		np->put_tx_ctx->dma_single = 1;
2255 		put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2256 		put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2257 		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2258 
2259 		tx_flags = NV_TX2_VALID;
2260 		offset += bcnt;
2261 		size -= bcnt;
2262 		if (unlikely(put_tx++ == np->last_tx.ex))
2263 			put_tx = np->first_tx.ex;
2264 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2265 			np->put_tx_ctx = np->first_tx_ctx;
2266 	} while (size);
2267 
2268 	/* setup the fragments */
2269 	for (i = 0; i < fragments; i++) {
2270 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2271 		u32 frag_size = skb_frag_size(frag);
2272 		offset = 0;
2273 
2274 		do {
2275 			prev_tx = put_tx;
2276 			prev_tx_ctx = np->put_tx_ctx;
2277 			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2278 			np->put_tx_ctx->dma = skb_frag_dma_map(
2279 							&np->pci_dev->dev,
2280 							frag, offset,
2281 							bcnt,
2282 							DMA_TO_DEVICE);
2283 			np->put_tx_ctx->dma_len = bcnt;
2284 			np->put_tx_ctx->dma_single = 0;
2285 			put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2286 			put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2287 			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2288 
2289 			offset += bcnt;
2290 			frag_size -= bcnt;
2291 			if (unlikely(put_tx++ == np->last_tx.ex))
2292 				put_tx = np->first_tx.ex;
2293 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2294 				np->put_tx_ctx = np->first_tx_ctx;
2295 		} while (frag_size);
2296 	}
2297 
2298 	/* set last fragment flag  */
2299 	prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
2300 
2301 	/* save skb in this slot's context area */
2302 	prev_tx_ctx->skb = skb;
2303 
2304 	if (skb_is_gso(skb))
2305 		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2306 	else
2307 		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2308 			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2309 
2310 	/* vlan tag */
2311 	if (vlan_tx_tag_present(skb))
2312 		start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
2313 					vlan_tx_tag_get(skb));
2314 	else
2315 		start_tx->txvlan = 0;
2316 
2317 	spin_lock_irqsave(&np->lock, flags);
2318 
2319 	if (np->tx_limit) {
2320 		/* Limit the number of outstanding tx. Setup all fragments, but
2321 		 * do not set the VALID bit on the first descriptor. Save a pointer
2322 		 * to that descriptor and also for next skb_map element.
2323 		 */
2324 
2325 		if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2326 			if (!np->tx_change_owner)
2327 				np->tx_change_owner = start_tx_ctx;
2328 
2329 			/* remove VALID bit */
2330 			tx_flags &= ~NV_TX2_VALID;
2331 			start_tx_ctx->first_tx_desc = start_tx;
2332 			start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2333 			np->tx_end_flip = np->put_tx_ctx;
2334 		} else {
2335 			np->tx_pkts_in_progress++;
2336 		}
2337 	}
2338 
2339 	/* set tx flags */
2340 	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2341 	np->put_tx.ex = put_tx;
2342 
2343 	spin_unlock_irqrestore(&np->lock, flags);
2344 
2345 	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2346 	return NETDEV_TX_OK;
2347 }
2348 
2349 static inline void nv_tx_flip_ownership(struct net_device *dev)
2350 {
2351 	struct fe_priv *np = netdev_priv(dev);
2352 
2353 	np->tx_pkts_in_progress--;
2354 	if (np->tx_change_owner) {
2355 		np->tx_change_owner->first_tx_desc->flaglen |=
2356 			cpu_to_le32(NV_TX2_VALID);
2357 		np->tx_pkts_in_progress++;
2358 
2359 		np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2360 		if (np->tx_change_owner == np->tx_end_flip)
2361 			np->tx_change_owner = NULL;
2362 
2363 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2364 	}
2365 }
2366 
2367 /*
2368  * nv_tx_done: check for completed packets, release the skbs.
2369  *
2370  * Caller must own np->lock.
2371  */
2372 static int nv_tx_done(struct net_device *dev, int limit)
2373 {
2374 	struct fe_priv *np = netdev_priv(dev);
2375 	u32 flags;
2376 	int tx_work = 0;
2377 	struct ring_desc *orig_get_tx = np->get_tx.orig;
2378 
2379 	while ((np->get_tx.orig != np->put_tx.orig) &&
2380 	       !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2381 	       (tx_work < limit)) {
2382 
2383 		nv_unmap_txskb(np, np->get_tx_ctx);
2384 
2385 		if (np->desc_ver == DESC_VER_1) {
2386 			if (flags & NV_TX_LASTPACKET) {
2387 				if (flags & NV_TX_ERROR) {
2388 					if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2389 						nv_legacybackoff_reseed(dev);
2390 				} else {
2391 					dev->stats.tx_packets++;
2392 					dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2393 				}
2394 				dev_kfree_skb_any(np->get_tx_ctx->skb);
2395 				np->get_tx_ctx->skb = NULL;
2396 				tx_work++;
2397 			}
2398 		} else {
2399 			if (flags & NV_TX2_LASTPACKET) {
2400 				if (flags & NV_TX2_ERROR) {
2401 					if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2402 						nv_legacybackoff_reseed(dev);
2403 				} else {
2404 					dev->stats.tx_packets++;
2405 					dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2406 				}
2407 				dev_kfree_skb_any(np->get_tx_ctx->skb);
2408 				np->get_tx_ctx->skb = NULL;
2409 				tx_work++;
2410 			}
2411 		}
2412 		if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2413 			np->get_tx.orig = np->first_tx.orig;
2414 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2415 			np->get_tx_ctx = np->first_tx_ctx;
2416 	}
2417 	if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2418 		np->tx_stop = 0;
2419 		netif_wake_queue(dev);
2420 	}
2421 	return tx_work;
2422 }
2423 
2424 static int nv_tx_done_optimized(struct net_device *dev, int limit)
2425 {
2426 	struct fe_priv *np = netdev_priv(dev);
2427 	u32 flags;
2428 	int tx_work = 0;
2429 	struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2430 
2431 	while ((np->get_tx.ex != np->put_tx.ex) &&
2432 	       !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
2433 	       (tx_work < limit)) {
2434 
2435 		nv_unmap_txskb(np, np->get_tx_ctx);
2436 
2437 		if (flags & NV_TX2_LASTPACKET) {
2438 			if (flags & NV_TX2_ERROR) {
2439 				if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2440 					if (np->driver_data & DEV_HAS_GEAR_MODE)
2441 						nv_gear_backoff_reseed(dev);
2442 					else
2443 						nv_legacybackoff_reseed(dev);
2444 				}
2445 			} else {
2446 				dev->stats.tx_packets++;
2447 				dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2448 			}
2449 
2450 			dev_kfree_skb_any(np->get_tx_ctx->skb);
2451 			np->get_tx_ctx->skb = NULL;
2452 			tx_work++;
2453 
2454 			if (np->tx_limit)
2455 				nv_tx_flip_ownership(dev);
2456 		}
2457 		if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2458 			np->get_tx.ex = np->first_tx.ex;
2459 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2460 			np->get_tx_ctx = np->first_tx_ctx;
2461 	}
2462 	if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2463 		np->tx_stop = 0;
2464 		netif_wake_queue(dev);
2465 	}
2466 	return tx_work;
2467 }
2468 
2469 /*
2470  * nv_tx_timeout: dev->tx_timeout function
2471  * Called with netif_tx_lock held.
2472  */
2473 static void nv_tx_timeout(struct net_device *dev)
2474 {
2475 	struct fe_priv *np = netdev_priv(dev);
2476 	u8 __iomem *base = get_hwbase(dev);
2477 	u32 status;
2478 	union ring_type put_tx;
2479 	int saved_tx_limit;
2480 	int i;
2481 
2482 	if (np->msi_flags & NV_MSI_X_ENABLED)
2483 		status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2484 	else
2485 		status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2486 
2487 	netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
2488 
2489 	netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
2490 	netdev_info(dev, "Dumping tx registers\n");
2491 	for (i = 0; i <= np->register_size; i += 32) {
2492 		netdev_info(dev,
2493 			    "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2494 			    i,
2495 			    readl(base + i + 0), readl(base + i + 4),
2496 			    readl(base + i + 8), readl(base + i + 12),
2497 			    readl(base + i + 16), readl(base + i + 20),
2498 			    readl(base + i + 24), readl(base + i + 28));
2499 	}
2500 	netdev_info(dev, "Dumping tx ring\n");
2501 	for (i = 0; i < np->tx_ring_size; i += 4) {
2502 		if (!nv_optimized(np)) {
2503 			netdev_info(dev,
2504 				    "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2505 				    i,
2506 				    le32_to_cpu(np->tx_ring.orig[i].buf),
2507 				    le32_to_cpu(np->tx_ring.orig[i].flaglen),
2508 				    le32_to_cpu(np->tx_ring.orig[i+1].buf),
2509 				    le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2510 				    le32_to_cpu(np->tx_ring.orig[i+2].buf),
2511 				    le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2512 				    le32_to_cpu(np->tx_ring.orig[i+3].buf),
2513 				    le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2514 		} else {
2515 			netdev_info(dev,
2516 				    "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2517 				    i,
2518 				    le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2519 				    le32_to_cpu(np->tx_ring.ex[i].buflow),
2520 				    le32_to_cpu(np->tx_ring.ex[i].flaglen),
2521 				    le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2522 				    le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2523 				    le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2524 				    le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2525 				    le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2526 				    le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2527 				    le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2528 				    le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2529 				    le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2530 		}
2531 	}
2532 
2533 	spin_lock_irq(&np->lock);
2534 
2535 	/* 1) stop tx engine */
2536 	nv_stop_tx(dev);
2537 
2538 	/* 2) complete any outstanding tx and do not give HW any limited tx pkts */
2539 	saved_tx_limit = np->tx_limit;
2540 	np->tx_limit = 0; /* prevent giving HW any limited pkts */
2541 	np->tx_stop = 0;  /* prevent waking tx queue */
2542 	if (!nv_optimized(np))
2543 		nv_tx_done(dev, np->tx_ring_size);
2544 	else
2545 		nv_tx_done_optimized(dev, np->tx_ring_size);
2546 
2547 	/* save current HW position */
2548 	if (np->tx_change_owner)
2549 		put_tx.ex = np->tx_change_owner->first_tx_desc;
2550 	else
2551 		put_tx = np->put_tx;
2552 
2553 	/* 3) clear all tx state */
2554 	nv_drain_tx(dev);
2555 	nv_init_tx(dev);
2556 
2557 	/* 4) restore state to current HW position */
2558 	np->get_tx = np->put_tx = put_tx;
2559 	np->tx_limit = saved_tx_limit;
2560 
2561 	/* 5) restart tx engine */
2562 	nv_start_tx(dev);
2563 	netif_wake_queue(dev);
2564 	spin_unlock_irq(&np->lock);
2565 }
2566 
2567 /*
2568  * Called when the nic notices a mismatch between the actual data len on the
2569  * wire and the len indicated in the 802 header
2570  */
2571 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2572 {
2573 	int hdrlen;	/* length of the 802 header */
2574 	int protolen;	/* length as stored in the proto field */
2575 
2576 	/* 1) calculate len according to header */
2577 	if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2578 		protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2579 		hdrlen = VLAN_HLEN;
2580 	} else {
2581 		protolen = ntohs(((struct ethhdr *)packet)->h_proto);
2582 		hdrlen = ETH_HLEN;
2583 	}
2584 	if (protolen > ETH_DATA_LEN)
2585 		return datalen; /* Value in proto field not a len, no checks possible */
2586 
2587 	protolen += hdrlen;
2588 	/* consistency checks: */
2589 	if (datalen > ETH_ZLEN) {
2590 		if (datalen >= protolen) {
2591 			/* more data on wire than in 802 header, trim of
2592 			 * additional data.
2593 			 */
2594 			return protolen;
2595 		} else {
2596 			/* less data on wire than mentioned in header.
2597 			 * Discard the packet.
2598 			 */
2599 			return -1;
2600 		}
2601 	} else {
2602 		/* short packet. Accept only if 802 values are also short */
2603 		if (protolen > ETH_ZLEN) {
2604 			return -1;
2605 		}
2606 		return datalen;
2607 	}
2608 }
2609 
2610 static int nv_rx_process(struct net_device *dev, int limit)
2611 {
2612 	struct fe_priv *np = netdev_priv(dev);
2613 	u32 flags;
2614 	int rx_work = 0;
2615 	struct sk_buff *skb;
2616 	int len;
2617 
2618 	while ((np->get_rx.orig != np->put_rx.orig) &&
2619 	      !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2620 		(rx_work < limit)) {
2621 
2622 		/*
2623 		 * the packet is for us - immediately tear down the pci mapping.
2624 		 * TODO: check if a prefetch of the first cacheline improves
2625 		 * the performance.
2626 		 */
2627 		pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2628 				np->get_rx_ctx->dma_len,
2629 				PCI_DMA_FROMDEVICE);
2630 		skb = np->get_rx_ctx->skb;
2631 		np->get_rx_ctx->skb = NULL;
2632 
2633 		/* look at what we actually got: */
2634 		if (np->desc_ver == DESC_VER_1) {
2635 			if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2636 				len = flags & LEN_MASK_V1;
2637 				if (unlikely(flags & NV_RX_ERROR)) {
2638 					if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2639 						len = nv_getlen(dev, skb->data, len);
2640 						if (len < 0) {
2641 							dev_kfree_skb(skb);
2642 							goto next_pkt;
2643 						}
2644 					}
2645 					/* framing errors are soft errors */
2646 					else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2647 						if (flags & NV_RX_SUBSTRACT1)
2648 							len--;
2649 					}
2650 					/* the rest are hard errors */
2651 					else {
2652 						if (flags & NV_RX_MISSEDFRAME)
2653 							dev->stats.rx_missed_errors++;
2654 						dev_kfree_skb(skb);
2655 						goto next_pkt;
2656 					}
2657 				}
2658 			} else {
2659 				dev_kfree_skb(skb);
2660 				goto next_pkt;
2661 			}
2662 		} else {
2663 			if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2664 				len = flags & LEN_MASK_V2;
2665 				if (unlikely(flags & NV_RX2_ERROR)) {
2666 					if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2667 						len = nv_getlen(dev, skb->data, len);
2668 						if (len < 0) {
2669 							dev_kfree_skb(skb);
2670 							goto next_pkt;
2671 						}
2672 					}
2673 					/* framing errors are soft errors */
2674 					else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2675 						if (flags & NV_RX2_SUBSTRACT1)
2676 							len--;
2677 					}
2678 					/* the rest are hard errors */
2679 					else {
2680 						dev_kfree_skb(skb);
2681 						goto next_pkt;
2682 					}
2683 				}
2684 				if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2685 				    ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
2686 					skb->ip_summed = CHECKSUM_UNNECESSARY;
2687 			} else {
2688 				dev_kfree_skb(skb);
2689 				goto next_pkt;
2690 			}
2691 		}
2692 		/* got a valid packet - forward it to the network core */
2693 		skb_put(skb, len);
2694 		skb->protocol = eth_type_trans(skb, dev);
2695 		napi_gro_receive(&np->napi, skb);
2696 		dev->stats.rx_packets++;
2697 		dev->stats.rx_bytes += len;
2698 next_pkt:
2699 		if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2700 			np->get_rx.orig = np->first_rx.orig;
2701 		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2702 			np->get_rx_ctx = np->first_rx_ctx;
2703 
2704 		rx_work++;
2705 	}
2706 
2707 	return rx_work;
2708 }
2709 
2710 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2711 {
2712 	struct fe_priv *np = netdev_priv(dev);
2713 	u32 flags;
2714 	u32 vlanflags = 0;
2715 	int rx_work = 0;
2716 	struct sk_buff *skb;
2717 	int len;
2718 
2719 	while ((np->get_rx.ex != np->put_rx.ex) &&
2720 	      !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2721 	      (rx_work < limit)) {
2722 
2723 		/*
2724 		 * the packet is for us - immediately tear down the pci mapping.
2725 		 * TODO: check if a prefetch of the first cacheline improves
2726 		 * the performance.
2727 		 */
2728 		pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2729 				np->get_rx_ctx->dma_len,
2730 				PCI_DMA_FROMDEVICE);
2731 		skb = np->get_rx_ctx->skb;
2732 		np->get_rx_ctx->skb = NULL;
2733 
2734 		/* look at what we actually got: */
2735 		if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2736 			len = flags & LEN_MASK_V2;
2737 			if (unlikely(flags & NV_RX2_ERROR)) {
2738 				if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2739 					len = nv_getlen(dev, skb->data, len);
2740 					if (len < 0) {
2741 						dev_kfree_skb(skb);
2742 						goto next_pkt;
2743 					}
2744 				}
2745 				/* framing errors are soft errors */
2746 				else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2747 					if (flags & NV_RX2_SUBSTRACT1)
2748 						len--;
2749 				}
2750 				/* the rest are hard errors */
2751 				else {
2752 					dev_kfree_skb(skb);
2753 					goto next_pkt;
2754 				}
2755 			}
2756 
2757 			if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2758 			    ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
2759 				skb->ip_summed = CHECKSUM_UNNECESSARY;
2760 
2761 			/* got a valid packet - forward it to the network core */
2762 			skb_put(skb, len);
2763 			skb->protocol = eth_type_trans(skb, dev);
2764 			prefetch(skb->data);
2765 
2766 			vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2767 
2768 			/*
2769 			 * There's need to check for NETIF_F_HW_VLAN_RX here.
2770 			 * Even if vlan rx accel is disabled,
2771 			 * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
2772 			 */
2773 			if (dev->features & NETIF_F_HW_VLAN_RX &&
2774 			    vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2775 				u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
2776 
2777 				__vlan_hwaccel_put_tag(skb, vid);
2778 			}
2779 			napi_gro_receive(&np->napi, skb);
2780 			dev->stats.rx_packets++;
2781 			dev->stats.rx_bytes += len;
2782 		} else {
2783 			dev_kfree_skb(skb);
2784 		}
2785 next_pkt:
2786 		if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2787 			np->get_rx.ex = np->first_rx.ex;
2788 		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2789 			np->get_rx_ctx = np->first_rx_ctx;
2790 
2791 		rx_work++;
2792 	}
2793 
2794 	return rx_work;
2795 }
2796 
2797 static void set_bufsize(struct net_device *dev)
2798 {
2799 	struct fe_priv *np = netdev_priv(dev);
2800 
2801 	if (dev->mtu <= ETH_DATA_LEN)
2802 		np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2803 	else
2804 		np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
2805 }
2806 
2807 /*
2808  * nv_change_mtu: dev->change_mtu function
2809  * Called with dev_base_lock held for read.
2810  */
2811 static int nv_change_mtu(struct net_device *dev, int new_mtu)
2812 {
2813 	struct fe_priv *np = netdev_priv(dev);
2814 	int old_mtu;
2815 
2816 	if (new_mtu < 64 || new_mtu > np->pkt_limit)
2817 		return -EINVAL;
2818 
2819 	old_mtu = dev->mtu;
2820 	dev->mtu = new_mtu;
2821 
2822 	/* return early if the buffer sizes will not change */
2823 	if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
2824 		return 0;
2825 	if (old_mtu == new_mtu)
2826 		return 0;
2827 
2828 	/* synchronized against open : rtnl_lock() held by caller */
2829 	if (netif_running(dev)) {
2830 		u8 __iomem *base = get_hwbase(dev);
2831 		/*
2832 		 * It seems that the nic preloads valid ring entries into an
2833 		 * internal buffer. The procedure for flushing everything is
2834 		 * guessed, there is probably a simpler approach.
2835 		 * Changing the MTU is a rare event, it shouldn't matter.
2836 		 */
2837 		nv_disable_irq(dev);
2838 		nv_napi_disable(dev);
2839 		netif_tx_lock_bh(dev);
2840 		netif_addr_lock(dev);
2841 		spin_lock(&np->lock);
2842 		/* stop engines */
2843 		nv_stop_rxtx(dev);
2844 		nv_txrx_reset(dev);
2845 		/* drain rx queue */
2846 		nv_drain_rxtx(dev);
2847 		/* reinit driver view of the rx queue */
2848 		set_bufsize(dev);
2849 		if (nv_init_ring(dev)) {
2850 			if (!np->in_shutdown)
2851 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2852 		}
2853 		/* reinit nic view of the rx queue */
2854 		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2855 		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2856 		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2857 			base + NvRegRingSizes);
2858 		pci_push(base);
2859 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2860 		pci_push(base);
2861 
2862 		/* restart rx engine */
2863 		nv_start_rxtx(dev);
2864 		spin_unlock(&np->lock);
2865 		netif_addr_unlock(dev);
2866 		netif_tx_unlock_bh(dev);
2867 		nv_napi_enable(dev);
2868 		nv_enable_irq(dev);
2869 	}
2870 	return 0;
2871 }
2872 
2873 static void nv_copy_mac_to_hw(struct net_device *dev)
2874 {
2875 	u8 __iomem *base = get_hwbase(dev);
2876 	u32 mac[2];
2877 
2878 	mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
2879 			(dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
2880 	mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
2881 
2882 	writel(mac[0], base + NvRegMacAddrA);
2883 	writel(mac[1], base + NvRegMacAddrB);
2884 }
2885 
2886 /*
2887  * nv_set_mac_address: dev->set_mac_address function
2888  * Called with rtnl_lock() held.
2889  */
2890 static int nv_set_mac_address(struct net_device *dev, void *addr)
2891 {
2892 	struct fe_priv *np = netdev_priv(dev);
2893 	struct sockaddr *macaddr = (struct sockaddr *)addr;
2894 
2895 	if (!is_valid_ether_addr(macaddr->sa_data))
2896 		return -EADDRNOTAVAIL;
2897 
2898 	/* synchronized against open : rtnl_lock() held by caller */
2899 	memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
2900 
2901 	if (netif_running(dev)) {
2902 		netif_tx_lock_bh(dev);
2903 		netif_addr_lock(dev);
2904 		spin_lock_irq(&np->lock);
2905 
2906 		/* stop rx engine */
2907 		nv_stop_rx(dev);
2908 
2909 		/* set mac address */
2910 		nv_copy_mac_to_hw(dev);
2911 
2912 		/* restart rx engine */
2913 		nv_start_rx(dev);
2914 		spin_unlock_irq(&np->lock);
2915 		netif_addr_unlock(dev);
2916 		netif_tx_unlock_bh(dev);
2917 	} else {
2918 		nv_copy_mac_to_hw(dev);
2919 	}
2920 	return 0;
2921 }
2922 
2923 /*
2924  * nv_set_multicast: dev->set_multicast function
2925  * Called with netif_tx_lock held.
2926  */
2927 static void nv_set_multicast(struct net_device *dev)
2928 {
2929 	struct fe_priv *np = netdev_priv(dev);
2930 	u8 __iomem *base = get_hwbase(dev);
2931 	u32 addr[2];
2932 	u32 mask[2];
2933 	u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
2934 
2935 	memset(addr, 0, sizeof(addr));
2936 	memset(mask, 0, sizeof(mask));
2937 
2938 	if (dev->flags & IFF_PROMISC) {
2939 		pff |= NVREG_PFF_PROMISC;
2940 	} else {
2941 		pff |= NVREG_PFF_MYADDR;
2942 
2943 		if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
2944 			u32 alwaysOff[2];
2945 			u32 alwaysOn[2];
2946 
2947 			alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
2948 			if (dev->flags & IFF_ALLMULTI) {
2949 				alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
2950 			} else {
2951 				struct netdev_hw_addr *ha;
2952 
2953 				netdev_for_each_mc_addr(ha, dev) {
2954 					unsigned char *hw_addr = ha->addr;
2955 					u32 a, b;
2956 
2957 					a = le32_to_cpu(*(__le32 *) hw_addr);
2958 					b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
2959 					alwaysOn[0] &= a;
2960 					alwaysOff[0] &= ~a;
2961 					alwaysOn[1] &= b;
2962 					alwaysOff[1] &= ~b;
2963 				}
2964 			}
2965 			addr[0] = alwaysOn[0];
2966 			addr[1] = alwaysOn[1];
2967 			mask[0] = alwaysOn[0] | alwaysOff[0];
2968 			mask[1] = alwaysOn[1] | alwaysOff[1];
2969 		} else {
2970 			mask[0] = NVREG_MCASTMASKA_NONE;
2971 			mask[1] = NVREG_MCASTMASKB_NONE;
2972 		}
2973 	}
2974 	addr[0] |= NVREG_MCASTADDRA_FORCE;
2975 	pff |= NVREG_PFF_ALWAYS;
2976 	spin_lock_irq(&np->lock);
2977 	nv_stop_rx(dev);
2978 	writel(addr[0], base + NvRegMulticastAddrA);
2979 	writel(addr[1], base + NvRegMulticastAddrB);
2980 	writel(mask[0], base + NvRegMulticastMaskA);
2981 	writel(mask[1], base + NvRegMulticastMaskB);
2982 	writel(pff, base + NvRegPacketFilterFlags);
2983 	nv_start_rx(dev);
2984 	spin_unlock_irq(&np->lock);
2985 }
2986 
2987 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
2988 {
2989 	struct fe_priv *np = netdev_priv(dev);
2990 	u8 __iomem *base = get_hwbase(dev);
2991 
2992 	np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
2993 
2994 	if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
2995 		u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
2996 		if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
2997 			writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
2998 			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2999 		} else {
3000 			writel(pff, base + NvRegPacketFilterFlags);
3001 		}
3002 	}
3003 	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3004 		u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
3005 		if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
3006 			u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3007 			if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3008 				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
3009 			if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3010 				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
3011 				/* limit the number of tx pause frames to a default of 8 */
3012 				writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3013 			}
3014 			writel(pause_enable,  base + NvRegTxPauseFrame);
3015 			writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3016 			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3017 		} else {
3018 			writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
3019 			writel(regmisc, base + NvRegMisc1);
3020 		}
3021 	}
3022 }
3023 
3024 /**
3025  * nv_update_linkspeed: Setup the MAC according to the link partner
3026  * @dev: Network device to be configured
3027  *
3028  * The function queries the PHY and checks if there is a link partner.
3029  * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3030  * set to 10 MBit HD.
3031  *
3032  * The function returns 0 if there is no link partner and 1 if there is
3033  * a good link partner.
3034  */
3035 static int nv_update_linkspeed(struct net_device *dev)
3036 {
3037 	struct fe_priv *np = netdev_priv(dev);
3038 	u8 __iomem *base = get_hwbase(dev);
3039 	int adv = 0;
3040 	int lpa = 0;
3041 	int adv_lpa, adv_pause, lpa_pause;
3042 	int newls = np->linkspeed;
3043 	int newdup = np->duplex;
3044 	int mii_status;
3045 	int retval = 0;
3046 	u32 control_1000, status_1000, phyreg, pause_flags, txreg;
3047 	u32 txrxFlags = 0;
3048 	u32 phy_exp;
3049 
3050 	/* BMSR_LSTATUS is latched, read it twice:
3051 	 * we want the current value.
3052 	 */
3053 	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3054 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3055 
3056 	if (!(mii_status & BMSR_LSTATUS)) {
3057 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3058 		newdup = 0;
3059 		retval = 0;
3060 		goto set_speed;
3061 	}
3062 
3063 	if (np->autoneg == 0) {
3064 		if (np->fixed_mode & LPA_100FULL) {
3065 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3066 			newdup = 1;
3067 		} else if (np->fixed_mode & LPA_100HALF) {
3068 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3069 			newdup = 0;
3070 		} else if (np->fixed_mode & LPA_10FULL) {
3071 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3072 			newdup = 1;
3073 		} else {
3074 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3075 			newdup = 0;
3076 		}
3077 		retval = 1;
3078 		goto set_speed;
3079 	}
3080 	/* check auto negotiation is complete */
3081 	if (!(mii_status & BMSR_ANEGCOMPLETE)) {
3082 		/* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3083 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3084 		newdup = 0;
3085 		retval = 0;
3086 		goto set_speed;
3087 	}
3088 
3089 	adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3090 	lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3091 
3092 	retval = 1;
3093 	if (np->gigabit == PHY_GIGABIT) {
3094 		control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3095 		status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3096 
3097 		if ((control_1000 & ADVERTISE_1000FULL) &&
3098 			(status_1000 & LPA_1000FULL)) {
3099 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3100 			newdup = 1;
3101 			goto set_speed;
3102 		}
3103 	}
3104 
3105 	/* FIXME: handle parallel detection properly */
3106 	adv_lpa = lpa & adv;
3107 	if (adv_lpa & LPA_100FULL) {
3108 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3109 		newdup = 1;
3110 	} else if (adv_lpa & LPA_100HALF) {
3111 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3112 		newdup = 0;
3113 	} else if (adv_lpa & LPA_10FULL) {
3114 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3115 		newdup = 1;
3116 	} else if (adv_lpa & LPA_10HALF) {
3117 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3118 		newdup = 0;
3119 	} else {
3120 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3121 		newdup = 0;
3122 	}
3123 
3124 set_speed:
3125 	if (np->duplex == newdup && np->linkspeed == newls)
3126 		return retval;
3127 
3128 	np->duplex = newdup;
3129 	np->linkspeed = newls;
3130 
3131 	/* The transmitter and receiver must be restarted for safe update */
3132 	if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
3133 		txrxFlags |= NV_RESTART_TX;
3134 		nv_stop_tx(dev);
3135 	}
3136 	if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
3137 		txrxFlags |= NV_RESTART_RX;
3138 		nv_stop_rx(dev);
3139 	}
3140 
3141 	if (np->gigabit == PHY_GIGABIT) {
3142 		phyreg = readl(base + NvRegSlotTime);
3143 		phyreg &= ~(0x3FF00);
3144 		if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3145 		    ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3146 			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3147 		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3148 			phyreg |= NVREG_SLOTTIME_1000_FULL;
3149 		writel(phyreg, base + NvRegSlotTime);
3150 	}
3151 
3152 	phyreg = readl(base + NvRegPhyInterface);
3153 	phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3154 	if (np->duplex == 0)
3155 		phyreg |= PHY_HALF;
3156 	if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3157 		phyreg |= PHY_100;
3158 	else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3159 		phyreg |= PHY_1000;
3160 	writel(phyreg, base + NvRegPhyInterface);
3161 
3162 	phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
3163 	if (phyreg & PHY_RGMII) {
3164 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
3165 			txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3166 		} else {
3167 			if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3168 				if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3169 					txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
3170 				else
3171 					txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
3172 			} else {
3173 				txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3174 			}
3175 		}
3176 	} else {
3177 		if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3178 			txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
3179 		else
3180 			txreg = NVREG_TX_DEFERRAL_DEFAULT;
3181 	}
3182 	writel(txreg, base + NvRegTxDeferral);
3183 
3184 	if (np->desc_ver == DESC_VER_1) {
3185 		txreg = NVREG_TX_WM_DESC1_DEFAULT;
3186 	} else {
3187 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3188 			txreg = NVREG_TX_WM_DESC2_3_1000;
3189 		else
3190 			txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3191 	}
3192 	writel(txreg, base + NvRegTxWatermark);
3193 
3194 	writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3195 		base + NvRegMisc1);
3196 	pci_push(base);
3197 	writel(np->linkspeed, base + NvRegLinkSpeed);
3198 	pci_push(base);
3199 
3200 	pause_flags = 0;
3201 	/* setup pause frame */
3202 	if (np->duplex != 0) {
3203 		if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3204 			adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3205 			lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
3206 
3207 			switch (adv_pause) {
3208 			case ADVERTISE_PAUSE_CAP:
3209 				if (lpa_pause & LPA_PAUSE_CAP) {
3210 					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3211 					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3212 						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3213 				}
3214 				break;
3215 			case ADVERTISE_PAUSE_ASYM:
3216 				if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
3217 					pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3218 				break;
3219 			case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
3220 				if (lpa_pause & LPA_PAUSE_CAP) {
3221 					pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
3222 					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3223 						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3224 				}
3225 				if (lpa_pause == LPA_PAUSE_ASYM)
3226 					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3227 				break;
3228 			}
3229 		} else {
3230 			pause_flags = np->pause_flags;
3231 		}
3232 	}
3233 	nv_update_pause(dev, pause_flags);
3234 
3235 	if (txrxFlags & NV_RESTART_TX)
3236 		nv_start_tx(dev);
3237 	if (txrxFlags & NV_RESTART_RX)
3238 		nv_start_rx(dev);
3239 
3240 	return retval;
3241 }
3242 
3243 static void nv_linkchange(struct net_device *dev)
3244 {
3245 	if (nv_update_linkspeed(dev)) {
3246 		if (!netif_carrier_ok(dev)) {
3247 			netif_carrier_on(dev);
3248 			netdev_info(dev, "link up\n");
3249 			nv_txrx_gate(dev, false);
3250 			nv_start_rx(dev);
3251 		}
3252 	} else {
3253 		if (netif_carrier_ok(dev)) {
3254 			netif_carrier_off(dev);
3255 			netdev_info(dev, "link down\n");
3256 			nv_txrx_gate(dev, true);
3257 			nv_stop_rx(dev);
3258 		}
3259 	}
3260 }
3261 
3262 static void nv_link_irq(struct net_device *dev)
3263 {
3264 	u8 __iomem *base = get_hwbase(dev);
3265 	u32 miistat;
3266 
3267 	miistat = readl(base + NvRegMIIStatus);
3268 	writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3269 
3270 	if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3271 		nv_linkchange(dev);
3272 }
3273 
3274 static void nv_msi_workaround(struct fe_priv *np)
3275 {
3276 
3277 	/* Need to toggle the msi irq mask within the ethernet device,
3278 	 * otherwise, future interrupts will not be detected.
3279 	 */
3280 	if (np->msi_flags & NV_MSI_ENABLED) {
3281 		u8 __iomem *base = np->base;
3282 
3283 		writel(0, base + NvRegMSIIrqMask);
3284 		writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3285 	}
3286 }
3287 
3288 static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
3289 {
3290 	struct fe_priv *np = netdev_priv(dev);
3291 
3292 	if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
3293 		if (total_work > NV_DYNAMIC_THRESHOLD) {
3294 			/* transition to poll based interrupts */
3295 			np->quiet_count = 0;
3296 			if (np->irqmask != NVREG_IRQMASK_CPU) {
3297 				np->irqmask = NVREG_IRQMASK_CPU;
3298 				return 1;
3299 			}
3300 		} else {
3301 			if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
3302 				np->quiet_count++;
3303 			} else {
3304 				/* reached a period of low activity, switch
3305 				   to per tx/rx packet interrupts */
3306 				if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
3307 					np->irqmask = NVREG_IRQMASK_THROUGHPUT;
3308 					return 1;
3309 				}
3310 			}
3311 		}
3312 	}
3313 	return 0;
3314 }
3315 
3316 static irqreturn_t nv_nic_irq(int foo, void *data)
3317 {
3318 	struct net_device *dev = (struct net_device *) data;
3319 	struct fe_priv *np = netdev_priv(dev);
3320 	u8 __iomem *base = get_hwbase(dev);
3321 
3322 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3323 		np->events = readl(base + NvRegIrqStatus);
3324 		writel(np->events, base + NvRegIrqStatus);
3325 	} else {
3326 		np->events = readl(base + NvRegMSIXIrqStatus);
3327 		writel(np->events, base + NvRegMSIXIrqStatus);
3328 	}
3329 	if (!(np->events & np->irqmask))
3330 		return IRQ_NONE;
3331 
3332 	nv_msi_workaround(np);
3333 
3334 	if (napi_schedule_prep(&np->napi)) {
3335 		/*
3336 		 * Disable further irq's (msix not enabled with napi)
3337 		 */
3338 		writel(0, base + NvRegIrqMask);
3339 		__napi_schedule(&np->napi);
3340 	}
3341 
3342 	return IRQ_HANDLED;
3343 }
3344 
3345 /**
3346  * All _optimized functions are used to help increase performance
3347  * (reduce CPU and increase throughput). They use descripter version 3,
3348  * compiler directives, and reduce memory accesses.
3349  */
3350 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3351 {
3352 	struct net_device *dev = (struct net_device *) data;
3353 	struct fe_priv *np = netdev_priv(dev);
3354 	u8 __iomem *base = get_hwbase(dev);
3355 
3356 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3357 		np->events = readl(base + NvRegIrqStatus);
3358 		writel(np->events, base + NvRegIrqStatus);
3359 	} else {
3360 		np->events = readl(base + NvRegMSIXIrqStatus);
3361 		writel(np->events, base + NvRegMSIXIrqStatus);
3362 	}
3363 	if (!(np->events & np->irqmask))
3364 		return IRQ_NONE;
3365 
3366 	nv_msi_workaround(np);
3367 
3368 	if (napi_schedule_prep(&np->napi)) {
3369 		/*
3370 		 * Disable further irq's (msix not enabled with napi)
3371 		 */
3372 		writel(0, base + NvRegIrqMask);
3373 		__napi_schedule(&np->napi);
3374 	}
3375 
3376 	return IRQ_HANDLED;
3377 }
3378 
3379 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3380 {
3381 	struct net_device *dev = (struct net_device *) data;
3382 	struct fe_priv *np = netdev_priv(dev);
3383 	u8 __iomem *base = get_hwbase(dev);
3384 	u32 events;
3385 	int i;
3386 	unsigned long flags;
3387 
3388 	for (i = 0;; i++) {
3389 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3390 		writel(events, base + NvRegMSIXIrqStatus);
3391 		netdev_dbg(dev, "tx irq events: %08x\n", events);
3392 		if (!(events & np->irqmask))
3393 			break;
3394 
3395 		spin_lock_irqsave(&np->lock, flags);
3396 		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3397 		spin_unlock_irqrestore(&np->lock, flags);
3398 
3399 		if (unlikely(i > max_interrupt_work)) {
3400 			spin_lock_irqsave(&np->lock, flags);
3401 			/* disable interrupts on the nic */
3402 			writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3403 			pci_push(base);
3404 
3405 			if (!np->in_shutdown) {
3406 				np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3407 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3408 			}
3409 			spin_unlock_irqrestore(&np->lock, flags);
3410 			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3411 				   __func__, i);
3412 			break;
3413 		}
3414 
3415 	}
3416 
3417 	return IRQ_RETVAL(i);
3418 }
3419 
3420 static int nv_napi_poll(struct napi_struct *napi, int budget)
3421 {
3422 	struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3423 	struct net_device *dev = np->dev;
3424 	u8 __iomem *base = get_hwbase(dev);
3425 	unsigned long flags;
3426 	int retcode;
3427 	int rx_count, tx_work = 0, rx_work = 0;
3428 
3429 	do {
3430 		if (!nv_optimized(np)) {
3431 			spin_lock_irqsave(&np->lock, flags);
3432 			tx_work += nv_tx_done(dev, np->tx_ring_size);
3433 			spin_unlock_irqrestore(&np->lock, flags);
3434 
3435 			rx_count = nv_rx_process(dev, budget - rx_work);
3436 			retcode = nv_alloc_rx(dev);
3437 		} else {
3438 			spin_lock_irqsave(&np->lock, flags);
3439 			tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
3440 			spin_unlock_irqrestore(&np->lock, flags);
3441 
3442 			rx_count = nv_rx_process_optimized(dev,
3443 			    budget - rx_work);
3444 			retcode = nv_alloc_rx_optimized(dev);
3445 		}
3446 	} while (retcode == 0 &&
3447 		 rx_count > 0 && (rx_work += rx_count) < budget);
3448 
3449 	if (retcode) {
3450 		spin_lock_irqsave(&np->lock, flags);
3451 		if (!np->in_shutdown)
3452 			mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3453 		spin_unlock_irqrestore(&np->lock, flags);
3454 	}
3455 
3456 	nv_change_interrupt_mode(dev, tx_work + rx_work);
3457 
3458 	if (unlikely(np->events & NVREG_IRQ_LINK)) {
3459 		spin_lock_irqsave(&np->lock, flags);
3460 		nv_link_irq(dev);
3461 		spin_unlock_irqrestore(&np->lock, flags);
3462 	}
3463 	if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3464 		spin_lock_irqsave(&np->lock, flags);
3465 		nv_linkchange(dev);
3466 		spin_unlock_irqrestore(&np->lock, flags);
3467 		np->link_timeout = jiffies + LINK_TIMEOUT;
3468 	}
3469 	if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3470 		spin_lock_irqsave(&np->lock, flags);
3471 		if (!np->in_shutdown) {
3472 			np->nic_poll_irq = np->irqmask;
3473 			np->recover_error = 1;
3474 			mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3475 		}
3476 		spin_unlock_irqrestore(&np->lock, flags);
3477 		napi_complete(napi);
3478 		return rx_work;
3479 	}
3480 
3481 	if (rx_work < budget) {
3482 		/* re-enable interrupts
3483 		   (msix not enabled in napi) */
3484 		napi_complete(napi);
3485 
3486 		writel(np->irqmask, base + NvRegIrqMask);
3487 	}
3488 	return rx_work;
3489 }
3490 
3491 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3492 {
3493 	struct net_device *dev = (struct net_device *) data;
3494 	struct fe_priv *np = netdev_priv(dev);
3495 	u8 __iomem *base = get_hwbase(dev);
3496 	u32 events;
3497 	int i;
3498 	unsigned long flags;
3499 
3500 	for (i = 0;; i++) {
3501 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3502 		writel(events, base + NvRegMSIXIrqStatus);
3503 		netdev_dbg(dev, "rx irq events: %08x\n", events);
3504 		if (!(events & np->irqmask))
3505 			break;
3506 
3507 		if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3508 			if (unlikely(nv_alloc_rx_optimized(dev))) {
3509 				spin_lock_irqsave(&np->lock, flags);
3510 				if (!np->in_shutdown)
3511 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3512 				spin_unlock_irqrestore(&np->lock, flags);
3513 			}
3514 		}
3515 
3516 		if (unlikely(i > max_interrupt_work)) {
3517 			spin_lock_irqsave(&np->lock, flags);
3518 			/* disable interrupts on the nic */
3519 			writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3520 			pci_push(base);
3521 
3522 			if (!np->in_shutdown) {
3523 				np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3524 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3525 			}
3526 			spin_unlock_irqrestore(&np->lock, flags);
3527 			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3528 				   __func__, i);
3529 			break;
3530 		}
3531 	}
3532 
3533 	return IRQ_RETVAL(i);
3534 }
3535 
3536 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3537 {
3538 	struct net_device *dev = (struct net_device *) data;
3539 	struct fe_priv *np = netdev_priv(dev);
3540 	u8 __iomem *base = get_hwbase(dev);
3541 	u32 events;
3542 	int i;
3543 	unsigned long flags;
3544 
3545 	for (i = 0;; i++) {
3546 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3547 		writel(events, base + NvRegMSIXIrqStatus);
3548 		netdev_dbg(dev, "irq events: %08x\n", events);
3549 		if (!(events & np->irqmask))
3550 			break;
3551 
3552 		/* check tx in case we reached max loop limit in tx isr */
3553 		spin_lock_irqsave(&np->lock, flags);
3554 		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3555 		spin_unlock_irqrestore(&np->lock, flags);
3556 
3557 		if (events & NVREG_IRQ_LINK) {
3558 			spin_lock_irqsave(&np->lock, flags);
3559 			nv_link_irq(dev);
3560 			spin_unlock_irqrestore(&np->lock, flags);
3561 		}
3562 		if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3563 			spin_lock_irqsave(&np->lock, flags);
3564 			nv_linkchange(dev);
3565 			spin_unlock_irqrestore(&np->lock, flags);
3566 			np->link_timeout = jiffies + LINK_TIMEOUT;
3567 		}
3568 		if (events & NVREG_IRQ_RECOVER_ERROR) {
3569 			spin_lock_irq(&np->lock);
3570 			/* disable interrupts on the nic */
3571 			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3572 			pci_push(base);
3573 
3574 			if (!np->in_shutdown) {
3575 				np->nic_poll_irq |= NVREG_IRQ_OTHER;
3576 				np->recover_error = 1;
3577 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3578 			}
3579 			spin_unlock_irq(&np->lock);
3580 			break;
3581 		}
3582 		if (unlikely(i > max_interrupt_work)) {
3583 			spin_lock_irqsave(&np->lock, flags);
3584 			/* disable interrupts on the nic */
3585 			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3586 			pci_push(base);
3587 
3588 			if (!np->in_shutdown) {
3589 				np->nic_poll_irq |= NVREG_IRQ_OTHER;
3590 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3591 			}
3592 			spin_unlock_irqrestore(&np->lock, flags);
3593 			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3594 				   __func__, i);
3595 			break;
3596 		}
3597 
3598 	}
3599 
3600 	return IRQ_RETVAL(i);
3601 }
3602 
3603 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3604 {
3605 	struct net_device *dev = (struct net_device *) data;
3606 	struct fe_priv *np = netdev_priv(dev);
3607 	u8 __iomem *base = get_hwbase(dev);
3608 	u32 events;
3609 
3610 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3611 		events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3612 		writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3613 	} else {
3614 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3615 		writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3616 	}
3617 	pci_push(base);
3618 	if (!(events & NVREG_IRQ_TIMER))
3619 		return IRQ_RETVAL(0);
3620 
3621 	nv_msi_workaround(np);
3622 
3623 	spin_lock(&np->lock);
3624 	np->intr_test = 1;
3625 	spin_unlock(&np->lock);
3626 
3627 	return IRQ_RETVAL(1);
3628 }
3629 
3630 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3631 {
3632 	u8 __iomem *base = get_hwbase(dev);
3633 	int i;
3634 	u32 msixmap = 0;
3635 
3636 	/* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3637 	 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3638 	 * the remaining 8 interrupts.
3639 	 */
3640 	for (i = 0; i < 8; i++) {
3641 		if ((irqmask >> i) & 0x1)
3642 			msixmap |= vector << (i << 2);
3643 	}
3644 	writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3645 
3646 	msixmap = 0;
3647 	for (i = 0; i < 8; i++) {
3648 		if ((irqmask >> (i + 8)) & 0x1)
3649 			msixmap |= vector << (i << 2);
3650 	}
3651 	writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3652 }
3653 
3654 static int nv_request_irq(struct net_device *dev, int intr_test)
3655 {
3656 	struct fe_priv *np = get_nvpriv(dev);
3657 	u8 __iomem *base = get_hwbase(dev);
3658 	int ret = 1;
3659 	int i;
3660 	irqreturn_t (*handler)(int foo, void *data);
3661 
3662 	if (intr_test) {
3663 		handler = nv_nic_irq_test;
3664 	} else {
3665 		if (nv_optimized(np))
3666 			handler = nv_nic_irq_optimized;
3667 		else
3668 			handler = nv_nic_irq;
3669 	}
3670 
3671 	if (np->msi_flags & NV_MSI_X_CAPABLE) {
3672 		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3673 			np->msi_x_entry[i].entry = i;
3674 		ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
3675 		if (ret == 0) {
3676 			np->msi_flags |= NV_MSI_X_ENABLED;
3677 			if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3678 				/* Request irq for rx handling */
3679 				sprintf(np->name_rx, "%s-rx", dev->name);
3680 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3681 						nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
3682 					netdev_info(dev,
3683 						    "request_irq failed for rx %d\n",
3684 						    ret);
3685 					pci_disable_msix(np->pci_dev);
3686 					np->msi_flags &= ~NV_MSI_X_ENABLED;
3687 					goto out_err;
3688 				}
3689 				/* Request irq for tx handling */
3690 				sprintf(np->name_tx, "%s-tx", dev->name);
3691 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3692 						nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
3693 					netdev_info(dev,
3694 						    "request_irq failed for tx %d\n",
3695 						    ret);
3696 					pci_disable_msix(np->pci_dev);
3697 					np->msi_flags &= ~NV_MSI_X_ENABLED;
3698 					goto out_free_rx;
3699 				}
3700 				/* Request irq for link and timer handling */
3701 				sprintf(np->name_other, "%s-other", dev->name);
3702 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3703 						nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
3704 					netdev_info(dev,
3705 						    "request_irq failed for link %d\n",
3706 						    ret);
3707 					pci_disable_msix(np->pci_dev);
3708 					np->msi_flags &= ~NV_MSI_X_ENABLED;
3709 					goto out_free_tx;
3710 				}
3711 				/* map interrupts to their respective vector */
3712 				writel(0, base + NvRegMSIXMap0);
3713 				writel(0, base + NvRegMSIXMap1);
3714 				set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3715 				set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3716 				set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3717 			} else {
3718 				/* Request irq for all interrupts */
3719 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3720 					netdev_info(dev,
3721 						    "request_irq failed %d\n",
3722 						    ret);
3723 					pci_disable_msix(np->pci_dev);
3724 					np->msi_flags &= ~NV_MSI_X_ENABLED;
3725 					goto out_err;
3726 				}
3727 
3728 				/* map interrupts to vector 0 */
3729 				writel(0, base + NvRegMSIXMap0);
3730 				writel(0, base + NvRegMSIXMap1);
3731 			}
3732 		}
3733 	}
3734 	if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3735 		ret = pci_enable_msi(np->pci_dev);
3736 		if (ret == 0) {
3737 			np->msi_flags |= NV_MSI_ENABLED;
3738 			dev->irq = np->pci_dev->irq;
3739 			if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3740 				netdev_info(dev, "request_irq failed %d\n",
3741 					    ret);
3742 				pci_disable_msi(np->pci_dev);
3743 				np->msi_flags &= ~NV_MSI_ENABLED;
3744 				dev->irq = np->pci_dev->irq;
3745 				goto out_err;
3746 			}
3747 
3748 			/* map interrupts to vector 0 */
3749 			writel(0, base + NvRegMSIMap0);
3750 			writel(0, base + NvRegMSIMap1);
3751 			/* enable msi vector 0 */
3752 			writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3753 		}
3754 	}
3755 	if (ret != 0) {
3756 		if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
3757 			goto out_err;
3758 
3759 	}
3760 
3761 	return 0;
3762 out_free_tx:
3763 	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3764 out_free_rx:
3765 	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3766 out_err:
3767 	return 1;
3768 }
3769 
3770 static void nv_free_irq(struct net_device *dev)
3771 {
3772 	struct fe_priv *np = get_nvpriv(dev);
3773 	int i;
3774 
3775 	if (np->msi_flags & NV_MSI_X_ENABLED) {
3776 		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3777 			free_irq(np->msi_x_entry[i].vector, dev);
3778 		pci_disable_msix(np->pci_dev);
3779 		np->msi_flags &= ~NV_MSI_X_ENABLED;
3780 	} else {
3781 		free_irq(np->pci_dev->irq, dev);
3782 		if (np->msi_flags & NV_MSI_ENABLED) {
3783 			pci_disable_msi(np->pci_dev);
3784 			np->msi_flags &= ~NV_MSI_ENABLED;
3785 		}
3786 	}
3787 }
3788 
3789 static void nv_do_nic_poll(unsigned long data)
3790 {
3791 	struct net_device *dev = (struct net_device *) data;
3792 	struct fe_priv *np = netdev_priv(dev);
3793 	u8 __iomem *base = get_hwbase(dev);
3794 	u32 mask = 0;
3795 
3796 	/*
3797 	 * First disable irq(s) and then
3798 	 * reenable interrupts on the nic, we have to do this before calling
3799 	 * nv_nic_irq because that may decide to do otherwise
3800 	 */
3801 
3802 	if (!using_multi_irqs(dev)) {
3803 		if (np->msi_flags & NV_MSI_X_ENABLED)
3804 			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3805 		else
3806 			disable_irq_lockdep(np->pci_dev->irq);
3807 		mask = np->irqmask;
3808 	} else {
3809 		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3810 			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3811 			mask |= NVREG_IRQ_RX_ALL;
3812 		}
3813 		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3814 			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3815 			mask |= NVREG_IRQ_TX_ALL;
3816 		}
3817 		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3818 			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3819 			mask |= NVREG_IRQ_OTHER;
3820 		}
3821 	}
3822 	/* disable_irq() contains synchronize_irq, thus no irq handler can run now */
3823 
3824 	if (np->recover_error) {
3825 		np->recover_error = 0;
3826 		netdev_info(dev, "MAC in recoverable error state\n");
3827 		if (netif_running(dev)) {
3828 			netif_tx_lock_bh(dev);
3829 			netif_addr_lock(dev);
3830 			spin_lock(&np->lock);
3831 			/* stop engines */
3832 			nv_stop_rxtx(dev);
3833 			if (np->driver_data & DEV_HAS_POWER_CNTRL)
3834 				nv_mac_reset(dev);
3835 			nv_txrx_reset(dev);
3836 			/* drain rx queue */
3837 			nv_drain_rxtx(dev);
3838 			/* reinit driver view of the rx queue */
3839 			set_bufsize(dev);
3840 			if (nv_init_ring(dev)) {
3841 				if (!np->in_shutdown)
3842 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3843 			}
3844 			/* reinit nic view of the rx queue */
3845 			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3846 			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3847 			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3848 				base + NvRegRingSizes);
3849 			pci_push(base);
3850 			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3851 			pci_push(base);
3852 			/* clear interrupts */
3853 			if (!(np->msi_flags & NV_MSI_X_ENABLED))
3854 				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3855 			else
3856 				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3857 
3858 			/* restart rx engine */
3859 			nv_start_rxtx(dev);
3860 			spin_unlock(&np->lock);
3861 			netif_addr_unlock(dev);
3862 			netif_tx_unlock_bh(dev);
3863 		}
3864 	}
3865 
3866 	writel(mask, base + NvRegIrqMask);
3867 	pci_push(base);
3868 
3869 	if (!using_multi_irqs(dev)) {
3870 		np->nic_poll_irq = 0;
3871 		if (nv_optimized(np))
3872 			nv_nic_irq_optimized(0, dev);
3873 		else
3874 			nv_nic_irq(0, dev);
3875 		if (np->msi_flags & NV_MSI_X_ENABLED)
3876 			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3877 		else
3878 			enable_irq_lockdep(np->pci_dev->irq);
3879 	} else {
3880 		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3881 			np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
3882 			nv_nic_irq_rx(0, dev);
3883 			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3884 		}
3885 		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3886 			np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
3887 			nv_nic_irq_tx(0, dev);
3888 			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3889 		}
3890 		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3891 			np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
3892 			nv_nic_irq_other(0, dev);
3893 			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3894 		}
3895 	}
3896 
3897 }
3898 
3899 #ifdef CONFIG_NET_POLL_CONTROLLER
3900 static void nv_poll_controller(struct net_device *dev)
3901 {
3902 	nv_do_nic_poll((unsigned long) dev);
3903 }
3904 #endif
3905 
3906 static void nv_do_stats_poll(unsigned long data)
3907 {
3908 	struct net_device *dev = (struct net_device *) data;
3909 	struct fe_priv *np = netdev_priv(dev);
3910 
3911 	nv_get_hw_stats(dev);
3912 
3913 	if (!np->in_shutdown)
3914 		mod_timer(&np->stats_poll,
3915 			round_jiffies(jiffies + STATS_INTERVAL));
3916 }
3917 
3918 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3919 {
3920 	struct fe_priv *np = netdev_priv(dev);
3921 	strcpy(info->driver, DRV_NAME);
3922 	strcpy(info->version, FORCEDETH_VERSION);
3923 	strcpy(info->bus_info, pci_name(np->pci_dev));
3924 }
3925 
3926 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3927 {
3928 	struct fe_priv *np = netdev_priv(dev);
3929 	wolinfo->supported = WAKE_MAGIC;
3930 
3931 	spin_lock_irq(&np->lock);
3932 	if (np->wolenabled)
3933 		wolinfo->wolopts = WAKE_MAGIC;
3934 	spin_unlock_irq(&np->lock);
3935 }
3936 
3937 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3938 {
3939 	struct fe_priv *np = netdev_priv(dev);
3940 	u8 __iomem *base = get_hwbase(dev);
3941 	u32 flags = 0;
3942 
3943 	if (wolinfo->wolopts == 0) {
3944 		np->wolenabled = 0;
3945 	} else if (wolinfo->wolopts & WAKE_MAGIC) {
3946 		np->wolenabled = 1;
3947 		flags = NVREG_WAKEUPFLAGS_ENABLE;
3948 	}
3949 	if (netif_running(dev)) {
3950 		spin_lock_irq(&np->lock);
3951 		writel(flags, base + NvRegWakeUpFlags);
3952 		spin_unlock_irq(&np->lock);
3953 	}
3954 	device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
3955 	return 0;
3956 }
3957 
3958 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3959 {
3960 	struct fe_priv *np = netdev_priv(dev);
3961 	u32 speed;
3962 	int adv;
3963 
3964 	spin_lock_irq(&np->lock);
3965 	ecmd->port = PORT_MII;
3966 	if (!netif_running(dev)) {
3967 		/* We do not track link speed / duplex setting if the
3968 		 * interface is disabled. Force a link check */
3969 		if (nv_update_linkspeed(dev)) {
3970 			if (!netif_carrier_ok(dev))
3971 				netif_carrier_on(dev);
3972 		} else {
3973 			if (netif_carrier_ok(dev))
3974 				netif_carrier_off(dev);
3975 		}
3976 	}
3977 
3978 	if (netif_carrier_ok(dev)) {
3979 		switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
3980 		case NVREG_LINKSPEED_10:
3981 			speed = SPEED_10;
3982 			break;
3983 		case NVREG_LINKSPEED_100:
3984 			speed = SPEED_100;
3985 			break;
3986 		case NVREG_LINKSPEED_1000:
3987 			speed = SPEED_1000;
3988 			break;
3989 		default:
3990 			speed = -1;
3991 			break;
3992 		}
3993 		ecmd->duplex = DUPLEX_HALF;
3994 		if (np->duplex)
3995 			ecmd->duplex = DUPLEX_FULL;
3996 	} else {
3997 		speed = -1;
3998 		ecmd->duplex = -1;
3999 	}
4000 	ethtool_cmd_speed_set(ecmd, speed);
4001 	ecmd->autoneg = np->autoneg;
4002 
4003 	ecmd->advertising = ADVERTISED_MII;
4004 	if (np->autoneg) {
4005 		ecmd->advertising |= ADVERTISED_Autoneg;
4006 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4007 		if (adv & ADVERTISE_10HALF)
4008 			ecmd->advertising |= ADVERTISED_10baseT_Half;
4009 		if (adv & ADVERTISE_10FULL)
4010 			ecmd->advertising |= ADVERTISED_10baseT_Full;
4011 		if (adv & ADVERTISE_100HALF)
4012 			ecmd->advertising |= ADVERTISED_100baseT_Half;
4013 		if (adv & ADVERTISE_100FULL)
4014 			ecmd->advertising |= ADVERTISED_100baseT_Full;
4015 		if (np->gigabit == PHY_GIGABIT) {
4016 			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4017 			if (adv & ADVERTISE_1000FULL)
4018 				ecmd->advertising |= ADVERTISED_1000baseT_Full;
4019 		}
4020 	}
4021 	ecmd->supported = (SUPPORTED_Autoneg |
4022 		SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4023 		SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4024 		SUPPORTED_MII);
4025 	if (np->gigabit == PHY_GIGABIT)
4026 		ecmd->supported |= SUPPORTED_1000baseT_Full;
4027 
4028 	ecmd->phy_address = np->phyaddr;
4029 	ecmd->transceiver = XCVR_EXTERNAL;
4030 
4031 	/* ignore maxtxpkt, maxrxpkt for now */
4032 	spin_unlock_irq(&np->lock);
4033 	return 0;
4034 }
4035 
4036 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4037 {
4038 	struct fe_priv *np = netdev_priv(dev);
4039 	u32 speed = ethtool_cmd_speed(ecmd);
4040 
4041 	if (ecmd->port != PORT_MII)
4042 		return -EINVAL;
4043 	if (ecmd->transceiver != XCVR_EXTERNAL)
4044 		return -EINVAL;
4045 	if (ecmd->phy_address != np->phyaddr) {
4046 		/* TODO: support switching between multiple phys. Should be
4047 		 * trivial, but not enabled due to lack of test hardware. */
4048 		return -EINVAL;
4049 	}
4050 	if (ecmd->autoneg == AUTONEG_ENABLE) {
4051 		u32 mask;
4052 
4053 		mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4054 			  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4055 		if (np->gigabit == PHY_GIGABIT)
4056 			mask |= ADVERTISED_1000baseT_Full;
4057 
4058 		if ((ecmd->advertising & mask) == 0)
4059 			return -EINVAL;
4060 
4061 	} else if (ecmd->autoneg == AUTONEG_DISABLE) {
4062 		/* Note: autonegotiation disable, speed 1000 intentionally
4063 		 * forbidden - no one should need that. */
4064 
4065 		if (speed != SPEED_10 && speed != SPEED_100)
4066 			return -EINVAL;
4067 		if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
4068 			return -EINVAL;
4069 	} else {
4070 		return -EINVAL;
4071 	}
4072 
4073 	netif_carrier_off(dev);
4074 	if (netif_running(dev)) {
4075 		unsigned long flags;
4076 
4077 		nv_disable_irq(dev);
4078 		netif_tx_lock_bh(dev);
4079 		netif_addr_lock(dev);
4080 		/* with plain spinlock lockdep complains */
4081 		spin_lock_irqsave(&np->lock, flags);
4082 		/* stop engines */
4083 		/* FIXME:
4084 		 * this can take some time, and interrupts are disabled
4085 		 * due to spin_lock_irqsave, but let's hope no daemon
4086 		 * is going to change the settings very often...
4087 		 * Worst case:
4088 		 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4089 		 * + some minor delays, which is up to a second approximately
4090 		 */
4091 		nv_stop_rxtx(dev);
4092 		spin_unlock_irqrestore(&np->lock, flags);
4093 		netif_addr_unlock(dev);
4094 		netif_tx_unlock_bh(dev);
4095 	}
4096 
4097 	if (ecmd->autoneg == AUTONEG_ENABLE) {
4098 		int adv, bmcr;
4099 
4100 		np->autoneg = 1;
4101 
4102 		/* advertise only what has been requested */
4103 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4104 		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4105 		if (ecmd->advertising & ADVERTISED_10baseT_Half)
4106 			adv |= ADVERTISE_10HALF;
4107 		if (ecmd->advertising & ADVERTISED_10baseT_Full)
4108 			adv |= ADVERTISE_10FULL;
4109 		if (ecmd->advertising & ADVERTISED_100baseT_Half)
4110 			adv |= ADVERTISE_100HALF;
4111 		if (ecmd->advertising & ADVERTISED_100baseT_Full)
4112 			adv |= ADVERTISE_100FULL;
4113 		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)  /* for rx we set both advertisements but disable tx pause */
4114 			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4115 		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4116 			adv |=  ADVERTISE_PAUSE_ASYM;
4117 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4118 
4119 		if (np->gigabit == PHY_GIGABIT) {
4120 			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4121 			adv &= ~ADVERTISE_1000FULL;
4122 			if (ecmd->advertising & ADVERTISED_1000baseT_Full)
4123 				adv |= ADVERTISE_1000FULL;
4124 			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4125 		}
4126 
4127 		if (netif_running(dev))
4128 			netdev_info(dev, "link down\n");
4129 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4130 		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4131 			bmcr |= BMCR_ANENABLE;
4132 			/* reset the phy in order for settings to stick,
4133 			 * and cause autoneg to start */
4134 			if (phy_reset(dev, bmcr)) {
4135 				netdev_info(dev, "phy reset failed\n");
4136 				return -EINVAL;
4137 			}
4138 		} else {
4139 			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4140 			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4141 		}
4142 	} else {
4143 		int adv, bmcr;
4144 
4145 		np->autoneg = 0;
4146 
4147 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4148 		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4149 		if (speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
4150 			adv |= ADVERTISE_10HALF;
4151 		if (speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
4152 			adv |= ADVERTISE_10FULL;
4153 		if (speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
4154 			adv |= ADVERTISE_100HALF;
4155 		if (speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
4156 			adv |= ADVERTISE_100FULL;
4157 		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4158 		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
4159 			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4160 			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4161 		}
4162 		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4163 			adv |=  ADVERTISE_PAUSE_ASYM;
4164 			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4165 		}
4166 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4167 		np->fixed_mode = adv;
4168 
4169 		if (np->gigabit == PHY_GIGABIT) {
4170 			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4171 			adv &= ~ADVERTISE_1000FULL;
4172 			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4173 		}
4174 
4175 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4176 		bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4177 		if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4178 			bmcr |= BMCR_FULLDPLX;
4179 		if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4180 			bmcr |= BMCR_SPEED100;
4181 		if (np->phy_oui == PHY_OUI_MARVELL) {
4182 			/* reset the phy in order for forced mode settings to stick */
4183 			if (phy_reset(dev, bmcr)) {
4184 				netdev_info(dev, "phy reset failed\n");
4185 				return -EINVAL;
4186 			}
4187 		} else {
4188 			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4189 			if (netif_running(dev)) {
4190 				/* Wait a bit and then reconfigure the nic. */
4191 				udelay(10);
4192 				nv_linkchange(dev);
4193 			}
4194 		}
4195 	}
4196 
4197 	if (netif_running(dev)) {
4198 		nv_start_rxtx(dev);
4199 		nv_enable_irq(dev);
4200 	}
4201 
4202 	return 0;
4203 }
4204 
4205 #define FORCEDETH_REGS_VER	1
4206 
4207 static int nv_get_regs_len(struct net_device *dev)
4208 {
4209 	struct fe_priv *np = netdev_priv(dev);
4210 	return np->register_size;
4211 }
4212 
4213 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4214 {
4215 	struct fe_priv *np = netdev_priv(dev);
4216 	u8 __iomem *base = get_hwbase(dev);
4217 	u32 *rbuf = buf;
4218 	int i;
4219 
4220 	regs->version = FORCEDETH_REGS_VER;
4221 	spin_lock_irq(&np->lock);
4222 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
4223 		rbuf[i] = readl(base + i*sizeof(u32));
4224 	spin_unlock_irq(&np->lock);
4225 }
4226 
4227 static int nv_nway_reset(struct net_device *dev)
4228 {
4229 	struct fe_priv *np = netdev_priv(dev);
4230 	int ret;
4231 
4232 	if (np->autoneg) {
4233 		int bmcr;
4234 
4235 		netif_carrier_off(dev);
4236 		if (netif_running(dev)) {
4237 			nv_disable_irq(dev);
4238 			netif_tx_lock_bh(dev);
4239 			netif_addr_lock(dev);
4240 			spin_lock(&np->lock);
4241 			/* stop engines */
4242 			nv_stop_rxtx(dev);
4243 			spin_unlock(&np->lock);
4244 			netif_addr_unlock(dev);
4245 			netif_tx_unlock_bh(dev);
4246 			netdev_info(dev, "link down\n");
4247 		}
4248 
4249 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4250 		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4251 			bmcr |= BMCR_ANENABLE;
4252 			/* reset the phy in order for settings to stick*/
4253 			if (phy_reset(dev, bmcr)) {
4254 				netdev_info(dev, "phy reset failed\n");
4255 				return -EINVAL;
4256 			}
4257 		} else {
4258 			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4259 			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4260 		}
4261 
4262 		if (netif_running(dev)) {
4263 			nv_start_rxtx(dev);
4264 			nv_enable_irq(dev);
4265 		}
4266 		ret = 0;
4267 	} else {
4268 		ret = -EINVAL;
4269 	}
4270 
4271 	return ret;
4272 }
4273 
4274 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4275 {
4276 	struct fe_priv *np = netdev_priv(dev);
4277 
4278 	ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4279 	ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4280 
4281 	ring->rx_pending = np->rx_ring_size;
4282 	ring->tx_pending = np->tx_ring_size;
4283 }
4284 
4285 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4286 {
4287 	struct fe_priv *np = netdev_priv(dev);
4288 	u8 __iomem *base = get_hwbase(dev);
4289 	u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4290 	dma_addr_t ring_addr;
4291 
4292 	if (ring->rx_pending < RX_RING_MIN ||
4293 	    ring->tx_pending < TX_RING_MIN ||
4294 	    ring->rx_mini_pending != 0 ||
4295 	    ring->rx_jumbo_pending != 0 ||
4296 	    (np->desc_ver == DESC_VER_1 &&
4297 	     (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4298 	      ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4299 	    (np->desc_ver != DESC_VER_1 &&
4300 	     (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4301 	      ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4302 		return -EINVAL;
4303 	}
4304 
4305 	/* allocate new rings */
4306 	if (!nv_optimized(np)) {
4307 		rxtx_ring = pci_alloc_consistent(np->pci_dev,
4308 					    sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4309 					    &ring_addr);
4310 	} else {
4311 		rxtx_ring = pci_alloc_consistent(np->pci_dev,
4312 					    sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4313 					    &ring_addr);
4314 	}
4315 	rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4316 	tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4317 	if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4318 		/* fall back to old rings */
4319 		if (!nv_optimized(np)) {
4320 			if (rxtx_ring)
4321 				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4322 						    rxtx_ring, ring_addr);
4323 		} else {
4324 			if (rxtx_ring)
4325 				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4326 						    rxtx_ring, ring_addr);
4327 		}
4328 
4329 		kfree(rx_skbuff);
4330 		kfree(tx_skbuff);
4331 		goto exit;
4332 	}
4333 
4334 	if (netif_running(dev)) {
4335 		nv_disable_irq(dev);
4336 		nv_napi_disable(dev);
4337 		netif_tx_lock_bh(dev);
4338 		netif_addr_lock(dev);
4339 		spin_lock(&np->lock);
4340 		/* stop engines */
4341 		nv_stop_rxtx(dev);
4342 		nv_txrx_reset(dev);
4343 		/* drain queues */
4344 		nv_drain_rxtx(dev);
4345 		/* delete queues */
4346 		free_rings(dev);
4347 	}
4348 
4349 	/* set new values */
4350 	np->rx_ring_size = ring->rx_pending;
4351 	np->tx_ring_size = ring->tx_pending;
4352 
4353 	if (!nv_optimized(np)) {
4354 		np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4355 		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4356 	} else {
4357 		np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4358 		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4359 	}
4360 	np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4361 	np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4362 	np->ring_addr = ring_addr;
4363 
4364 	memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4365 	memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4366 
4367 	if (netif_running(dev)) {
4368 		/* reinit driver view of the queues */
4369 		set_bufsize(dev);
4370 		if (nv_init_ring(dev)) {
4371 			if (!np->in_shutdown)
4372 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4373 		}
4374 
4375 		/* reinit nic view of the queues */
4376 		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4377 		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4378 		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4379 			base + NvRegRingSizes);
4380 		pci_push(base);
4381 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4382 		pci_push(base);
4383 
4384 		/* restart engines */
4385 		nv_start_rxtx(dev);
4386 		spin_unlock(&np->lock);
4387 		netif_addr_unlock(dev);
4388 		netif_tx_unlock_bh(dev);
4389 		nv_napi_enable(dev);
4390 		nv_enable_irq(dev);
4391 	}
4392 	return 0;
4393 exit:
4394 	return -ENOMEM;
4395 }
4396 
4397 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4398 {
4399 	struct fe_priv *np = netdev_priv(dev);
4400 
4401 	pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4402 	pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4403 	pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4404 }
4405 
4406 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4407 {
4408 	struct fe_priv *np = netdev_priv(dev);
4409 	int adv, bmcr;
4410 
4411 	if ((!np->autoneg && np->duplex == 0) ||
4412 	    (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4413 		netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
4414 		return -EINVAL;
4415 	}
4416 	if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4417 		netdev_info(dev, "hardware does not support tx pause frames\n");
4418 		return -EINVAL;
4419 	}
4420 
4421 	netif_carrier_off(dev);
4422 	if (netif_running(dev)) {
4423 		nv_disable_irq(dev);
4424 		netif_tx_lock_bh(dev);
4425 		netif_addr_lock(dev);
4426 		spin_lock(&np->lock);
4427 		/* stop engines */
4428 		nv_stop_rxtx(dev);
4429 		spin_unlock(&np->lock);
4430 		netif_addr_unlock(dev);
4431 		netif_tx_unlock_bh(dev);
4432 	}
4433 
4434 	np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4435 	if (pause->rx_pause)
4436 		np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4437 	if (pause->tx_pause)
4438 		np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4439 
4440 	if (np->autoneg && pause->autoneg) {
4441 		np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4442 
4443 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4444 		adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4445 		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
4446 			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4447 		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4448 			adv |=  ADVERTISE_PAUSE_ASYM;
4449 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4450 
4451 		if (netif_running(dev))
4452 			netdev_info(dev, "link down\n");
4453 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4454 		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4455 		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4456 	} else {
4457 		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4458 		if (pause->rx_pause)
4459 			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4460 		if (pause->tx_pause)
4461 			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4462 
4463 		if (!netif_running(dev))
4464 			nv_update_linkspeed(dev);
4465 		else
4466 			nv_update_pause(dev, np->pause_flags);
4467 	}
4468 
4469 	if (netif_running(dev)) {
4470 		nv_start_rxtx(dev);
4471 		nv_enable_irq(dev);
4472 	}
4473 	return 0;
4474 }
4475 
4476 static u32 nv_fix_features(struct net_device *dev, u32 features)
4477 {
4478 	/* vlan is dependent on rx checksum offload */
4479 	if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
4480 		features |= NETIF_F_RXCSUM;
4481 
4482 	return features;
4483 }
4484 
4485 static void nv_vlan_mode(struct net_device *dev, u32 features)
4486 {
4487 	struct fe_priv *np = get_nvpriv(dev);
4488 
4489 	spin_lock_irq(&np->lock);
4490 
4491 	if (features & NETIF_F_HW_VLAN_RX)
4492 		np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
4493 	else
4494 		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4495 
4496 	if (features & NETIF_F_HW_VLAN_TX)
4497 		np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
4498 	else
4499 		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4500 
4501 	writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4502 
4503 	spin_unlock_irq(&np->lock);
4504 }
4505 
4506 static int nv_set_features(struct net_device *dev, u32 features)
4507 {
4508 	struct fe_priv *np = netdev_priv(dev);
4509 	u8 __iomem *base = get_hwbase(dev);
4510 	u32 changed = dev->features ^ features;
4511 
4512 	if (changed & NETIF_F_RXCSUM) {
4513 		spin_lock_irq(&np->lock);
4514 
4515 		if (features & NETIF_F_RXCSUM)
4516 			np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4517 		else
4518 			np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4519 
4520 		if (netif_running(dev))
4521 			writel(np->txrxctl_bits, base + NvRegTxRxControl);
4522 
4523 		spin_unlock_irq(&np->lock);
4524 	}
4525 
4526 	if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX))
4527 		nv_vlan_mode(dev, features);
4528 
4529 	return 0;
4530 }
4531 
4532 static int nv_get_sset_count(struct net_device *dev, int sset)
4533 {
4534 	struct fe_priv *np = netdev_priv(dev);
4535 
4536 	switch (sset) {
4537 	case ETH_SS_TEST:
4538 		if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4539 			return NV_TEST_COUNT_EXTENDED;
4540 		else
4541 			return NV_TEST_COUNT_BASE;
4542 	case ETH_SS_STATS:
4543 		if (np->driver_data & DEV_HAS_STATISTICS_V3)
4544 			return NV_DEV_STATISTICS_V3_COUNT;
4545 		else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4546 			return NV_DEV_STATISTICS_V2_COUNT;
4547 		else if (np->driver_data & DEV_HAS_STATISTICS_V1)
4548 			return NV_DEV_STATISTICS_V1_COUNT;
4549 		else
4550 			return 0;
4551 	default:
4552 		return -EOPNOTSUPP;
4553 	}
4554 }
4555 
4556 static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4557 {
4558 	struct fe_priv *np = netdev_priv(dev);
4559 
4560 	/* update stats */
4561 	nv_get_hw_stats(dev);
4562 
4563 	memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4564 }
4565 
4566 static int nv_link_test(struct net_device *dev)
4567 {
4568 	struct fe_priv *np = netdev_priv(dev);
4569 	int mii_status;
4570 
4571 	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4572 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4573 
4574 	/* check phy link status */
4575 	if (!(mii_status & BMSR_LSTATUS))
4576 		return 0;
4577 	else
4578 		return 1;
4579 }
4580 
4581 static int nv_register_test(struct net_device *dev)
4582 {
4583 	u8 __iomem *base = get_hwbase(dev);
4584 	int i = 0;
4585 	u32 orig_read, new_read;
4586 
4587 	do {
4588 		orig_read = readl(base + nv_registers_test[i].reg);
4589 
4590 		/* xor with mask to toggle bits */
4591 		orig_read ^= nv_registers_test[i].mask;
4592 
4593 		writel(orig_read, base + nv_registers_test[i].reg);
4594 
4595 		new_read = readl(base + nv_registers_test[i].reg);
4596 
4597 		if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4598 			return 0;
4599 
4600 		/* restore original value */
4601 		orig_read ^= nv_registers_test[i].mask;
4602 		writel(orig_read, base + nv_registers_test[i].reg);
4603 
4604 	} while (nv_registers_test[++i].reg != 0);
4605 
4606 	return 1;
4607 }
4608 
4609 static int nv_interrupt_test(struct net_device *dev)
4610 {
4611 	struct fe_priv *np = netdev_priv(dev);
4612 	u8 __iomem *base = get_hwbase(dev);
4613 	int ret = 1;
4614 	int testcnt;
4615 	u32 save_msi_flags, save_poll_interval = 0;
4616 
4617 	if (netif_running(dev)) {
4618 		/* free current irq */
4619 		nv_free_irq(dev);
4620 		save_poll_interval = readl(base+NvRegPollingInterval);
4621 	}
4622 
4623 	/* flag to test interrupt handler */
4624 	np->intr_test = 0;
4625 
4626 	/* setup test irq */
4627 	save_msi_flags = np->msi_flags;
4628 	np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4629 	np->msi_flags |= 0x001; /* setup 1 vector */
4630 	if (nv_request_irq(dev, 1))
4631 		return 0;
4632 
4633 	/* setup timer interrupt */
4634 	writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4635 	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4636 
4637 	nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4638 
4639 	/* wait for at least one interrupt */
4640 	msleep(100);
4641 
4642 	spin_lock_irq(&np->lock);
4643 
4644 	/* flag should be set within ISR */
4645 	testcnt = np->intr_test;
4646 	if (!testcnt)
4647 		ret = 2;
4648 
4649 	nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4650 	if (!(np->msi_flags & NV_MSI_X_ENABLED))
4651 		writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4652 	else
4653 		writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4654 
4655 	spin_unlock_irq(&np->lock);
4656 
4657 	nv_free_irq(dev);
4658 
4659 	np->msi_flags = save_msi_flags;
4660 
4661 	if (netif_running(dev)) {
4662 		writel(save_poll_interval, base + NvRegPollingInterval);
4663 		writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4664 		/* restore original irq */
4665 		if (nv_request_irq(dev, 0))
4666 			return 0;
4667 	}
4668 
4669 	return ret;
4670 }
4671 
4672 static int nv_loopback_test(struct net_device *dev)
4673 {
4674 	struct fe_priv *np = netdev_priv(dev);
4675 	u8 __iomem *base = get_hwbase(dev);
4676 	struct sk_buff *tx_skb, *rx_skb;
4677 	dma_addr_t test_dma_addr;
4678 	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
4679 	u32 flags;
4680 	int len, i, pkt_len;
4681 	u8 *pkt_data;
4682 	u32 filter_flags = 0;
4683 	u32 misc1_flags = 0;
4684 	int ret = 1;
4685 
4686 	if (netif_running(dev)) {
4687 		nv_disable_irq(dev);
4688 		filter_flags = readl(base + NvRegPacketFilterFlags);
4689 		misc1_flags = readl(base + NvRegMisc1);
4690 	} else {
4691 		nv_txrx_reset(dev);
4692 	}
4693 
4694 	/* reinit driver view of the rx queue */
4695 	set_bufsize(dev);
4696 	nv_init_ring(dev);
4697 
4698 	/* setup hardware for loopback */
4699 	writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
4700 	writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
4701 
4702 	/* reinit nic view of the rx queue */
4703 	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4704 	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4705 	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4706 		base + NvRegRingSizes);
4707 	pci_push(base);
4708 
4709 	/* restart rx engine */
4710 	nv_start_rxtx(dev);
4711 
4712 	/* setup packet for tx */
4713 	pkt_len = ETH_DATA_LEN;
4714 	tx_skb = dev_alloc_skb(pkt_len);
4715 	if (!tx_skb) {
4716 		netdev_err(dev, "dev_alloc_skb() failed during loopback test\n");
4717 		ret = 0;
4718 		goto out;
4719 	}
4720 	test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4721 				       skb_tailroom(tx_skb),
4722 				       PCI_DMA_FROMDEVICE);
4723 	pkt_data = skb_put(tx_skb, pkt_len);
4724 	for (i = 0; i < pkt_len; i++)
4725 		pkt_data[i] = (u8)(i & 0xff);
4726 
4727 	if (!nv_optimized(np)) {
4728 		np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4729 		np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4730 	} else {
4731 		np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
4732 		np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
4733 		np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4734 	}
4735 	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4736 	pci_push(get_hwbase(dev));
4737 
4738 	msleep(500);
4739 
4740 	/* check for rx of the packet */
4741 	if (!nv_optimized(np)) {
4742 		flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
4743 		len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4744 
4745 	} else {
4746 		flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
4747 		len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
4748 	}
4749 
4750 	if (flags & NV_RX_AVAIL) {
4751 		ret = 0;
4752 	} else if (np->desc_ver == DESC_VER_1) {
4753 		if (flags & NV_RX_ERROR)
4754 			ret = 0;
4755 	} else {
4756 		if (flags & NV_RX2_ERROR)
4757 			ret = 0;
4758 	}
4759 
4760 	if (ret) {
4761 		if (len != pkt_len) {
4762 			ret = 0;
4763 		} else {
4764 			rx_skb = np->rx_skb[0].skb;
4765 			for (i = 0; i < pkt_len; i++) {
4766 				if (rx_skb->data[i] != (u8)(i & 0xff)) {
4767 					ret = 0;
4768 					break;
4769 				}
4770 			}
4771 		}
4772 	}
4773 
4774 	pci_unmap_single(np->pci_dev, test_dma_addr,
4775 		       (skb_end_pointer(tx_skb) - tx_skb->data),
4776 		       PCI_DMA_TODEVICE);
4777 	dev_kfree_skb_any(tx_skb);
4778  out:
4779 	/* stop engines */
4780 	nv_stop_rxtx(dev);
4781 	nv_txrx_reset(dev);
4782 	/* drain rx queue */
4783 	nv_drain_rxtx(dev);
4784 
4785 	if (netif_running(dev)) {
4786 		writel(misc1_flags, base + NvRegMisc1);
4787 		writel(filter_flags, base + NvRegPacketFilterFlags);
4788 		nv_enable_irq(dev);
4789 	}
4790 
4791 	return ret;
4792 }
4793 
4794 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
4795 {
4796 	struct fe_priv *np = netdev_priv(dev);
4797 	u8 __iomem *base = get_hwbase(dev);
4798 	int result;
4799 	memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
4800 
4801 	if (!nv_link_test(dev)) {
4802 		test->flags |= ETH_TEST_FL_FAILED;
4803 		buffer[0] = 1;
4804 	}
4805 
4806 	if (test->flags & ETH_TEST_FL_OFFLINE) {
4807 		if (netif_running(dev)) {
4808 			netif_stop_queue(dev);
4809 			nv_napi_disable(dev);
4810 			netif_tx_lock_bh(dev);
4811 			netif_addr_lock(dev);
4812 			spin_lock_irq(&np->lock);
4813 			nv_disable_hw_interrupts(dev, np->irqmask);
4814 			if (!(np->msi_flags & NV_MSI_X_ENABLED))
4815 				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4816 			else
4817 				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4818 			/* stop engines */
4819 			nv_stop_rxtx(dev);
4820 			nv_txrx_reset(dev);
4821 			/* drain rx queue */
4822 			nv_drain_rxtx(dev);
4823 			spin_unlock_irq(&np->lock);
4824 			netif_addr_unlock(dev);
4825 			netif_tx_unlock_bh(dev);
4826 		}
4827 
4828 		if (!nv_register_test(dev)) {
4829 			test->flags |= ETH_TEST_FL_FAILED;
4830 			buffer[1] = 1;
4831 		}
4832 
4833 		result = nv_interrupt_test(dev);
4834 		if (result != 1) {
4835 			test->flags |= ETH_TEST_FL_FAILED;
4836 			buffer[2] = 1;
4837 		}
4838 		if (result == 0) {
4839 			/* bail out */
4840 			return;
4841 		}
4842 
4843 		if (!nv_loopback_test(dev)) {
4844 			test->flags |= ETH_TEST_FL_FAILED;
4845 			buffer[3] = 1;
4846 		}
4847 
4848 		if (netif_running(dev)) {
4849 			/* reinit driver view of the rx queue */
4850 			set_bufsize(dev);
4851 			if (nv_init_ring(dev)) {
4852 				if (!np->in_shutdown)
4853 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4854 			}
4855 			/* reinit nic view of the rx queue */
4856 			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4857 			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4858 			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4859 				base + NvRegRingSizes);
4860 			pci_push(base);
4861 			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4862 			pci_push(base);
4863 			/* restart rx engine */
4864 			nv_start_rxtx(dev);
4865 			netif_start_queue(dev);
4866 			nv_napi_enable(dev);
4867 			nv_enable_hw_interrupts(dev, np->irqmask);
4868 		}
4869 	}
4870 }
4871 
4872 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
4873 {
4874 	switch (stringset) {
4875 	case ETH_SS_STATS:
4876 		memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
4877 		break;
4878 	case ETH_SS_TEST:
4879 		memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
4880 		break;
4881 	}
4882 }
4883 
4884 static const struct ethtool_ops ops = {
4885 	.get_drvinfo = nv_get_drvinfo,
4886 	.get_link = ethtool_op_get_link,
4887 	.get_wol = nv_get_wol,
4888 	.set_wol = nv_set_wol,
4889 	.get_settings = nv_get_settings,
4890 	.set_settings = nv_set_settings,
4891 	.get_regs_len = nv_get_regs_len,
4892 	.get_regs = nv_get_regs,
4893 	.nway_reset = nv_nway_reset,
4894 	.get_ringparam = nv_get_ringparam,
4895 	.set_ringparam = nv_set_ringparam,
4896 	.get_pauseparam = nv_get_pauseparam,
4897 	.set_pauseparam = nv_set_pauseparam,
4898 	.get_strings = nv_get_strings,
4899 	.get_ethtool_stats = nv_get_ethtool_stats,
4900 	.get_sset_count = nv_get_sset_count,
4901 	.self_test = nv_self_test,
4902 };
4903 
4904 /* The mgmt unit and driver use a semaphore to access the phy during init */
4905 static int nv_mgmt_acquire_sema(struct net_device *dev)
4906 {
4907 	struct fe_priv *np = netdev_priv(dev);
4908 	u8 __iomem *base = get_hwbase(dev);
4909 	int i;
4910 	u32 tx_ctrl, mgmt_sema;
4911 
4912 	for (i = 0; i < 10; i++) {
4913 		mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
4914 		if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
4915 			break;
4916 		msleep(500);
4917 	}
4918 
4919 	if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
4920 		return 0;
4921 
4922 	for (i = 0; i < 2; i++) {
4923 		tx_ctrl = readl(base + NvRegTransmitterControl);
4924 		tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
4925 		writel(tx_ctrl, base + NvRegTransmitterControl);
4926 
4927 		/* verify that semaphore was acquired */
4928 		tx_ctrl = readl(base + NvRegTransmitterControl);
4929 		if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
4930 		    ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
4931 			np->mgmt_sema = 1;
4932 			return 1;
4933 		} else
4934 			udelay(50);
4935 	}
4936 
4937 	return 0;
4938 }
4939 
4940 static void nv_mgmt_release_sema(struct net_device *dev)
4941 {
4942 	struct fe_priv *np = netdev_priv(dev);
4943 	u8 __iomem *base = get_hwbase(dev);
4944 	u32 tx_ctrl;
4945 
4946 	if (np->driver_data & DEV_HAS_MGMT_UNIT) {
4947 		if (np->mgmt_sema) {
4948 			tx_ctrl = readl(base + NvRegTransmitterControl);
4949 			tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
4950 			writel(tx_ctrl, base + NvRegTransmitterControl);
4951 		}
4952 	}
4953 }
4954 
4955 
4956 static int nv_mgmt_get_version(struct net_device *dev)
4957 {
4958 	struct fe_priv *np = netdev_priv(dev);
4959 	u8 __iomem *base = get_hwbase(dev);
4960 	u32 data_ready = readl(base + NvRegTransmitterControl);
4961 	u32 data_ready2 = 0;
4962 	unsigned long start;
4963 	int ready = 0;
4964 
4965 	writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
4966 	writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
4967 	start = jiffies;
4968 	while (time_before(jiffies, start + 5*HZ)) {
4969 		data_ready2 = readl(base + NvRegTransmitterControl);
4970 		if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
4971 			ready = 1;
4972 			break;
4973 		}
4974 		schedule_timeout_uninterruptible(1);
4975 	}
4976 
4977 	if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
4978 		return 0;
4979 
4980 	np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
4981 
4982 	return 1;
4983 }
4984 
4985 static int nv_open(struct net_device *dev)
4986 {
4987 	struct fe_priv *np = netdev_priv(dev);
4988 	u8 __iomem *base = get_hwbase(dev);
4989 	int ret = 1;
4990 	int oom, i;
4991 	u32 low;
4992 
4993 	/* power up phy */
4994 	mii_rw(dev, np->phyaddr, MII_BMCR,
4995 	       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
4996 
4997 	nv_txrx_gate(dev, false);
4998 	/* erase previous misconfiguration */
4999 	if (np->driver_data & DEV_HAS_POWER_CNTRL)
5000 		nv_mac_reset(dev);
5001 	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5002 	writel(0, base + NvRegMulticastAddrB);
5003 	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5004 	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5005 	writel(0, base + NvRegPacketFilterFlags);
5006 
5007 	writel(0, base + NvRegTransmitterControl);
5008 	writel(0, base + NvRegReceiverControl);
5009 
5010 	writel(0, base + NvRegAdapterControl);
5011 
5012 	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5013 		writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
5014 
5015 	/* initialize descriptor rings */
5016 	set_bufsize(dev);
5017 	oom = nv_init_ring(dev);
5018 
5019 	writel(0, base + NvRegLinkSpeed);
5020 	writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5021 	nv_txrx_reset(dev);
5022 	writel(0, base + NvRegUnknownSetupReg6);
5023 
5024 	np->in_shutdown = 0;
5025 
5026 	/* give hw rings */
5027 	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5028 	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5029 		base + NvRegRingSizes);
5030 
5031 	writel(np->linkspeed, base + NvRegLinkSpeed);
5032 	if (np->desc_ver == DESC_VER_1)
5033 		writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5034 	else
5035 		writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5036 	writel(np->txrxctl_bits, base + NvRegTxRxControl);
5037 	writel(np->vlanctl_bits, base + NvRegVlanControl);
5038 	pci_push(base);
5039 	writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5040 	if (reg_delay(dev, NvRegUnknownSetupReg5,
5041 		      NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5042 		      NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
5043 		netdev_info(dev,
5044 			    "%s: SetupReg5, Bit 31 remained off\n", __func__);
5045 
5046 	writel(0, base + NvRegMIIMask);
5047 	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5048 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5049 
5050 	writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5051 	writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5052 	writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5053 	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5054 
5055 	writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5056 
5057 	get_random_bytes(&low, sizeof(low));
5058 	low &= NVREG_SLOTTIME_MASK;
5059 	if (np->desc_ver == DESC_VER_1) {
5060 		writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5061 	} else {
5062 		if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5063 			/* setup legacy backoff */
5064 			writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5065 		} else {
5066 			writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5067 			nv_gear_backoff_reseed(dev);
5068 		}
5069 	}
5070 	writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5071 	writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5072 	if (poll_interval == -1) {
5073 		if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5074 			writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5075 		else
5076 			writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5077 	} else
5078 		writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5079 	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5080 	writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5081 			base + NvRegAdapterControl);
5082 	writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5083 	writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5084 	if (np->wolenabled)
5085 		writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5086 
5087 	i = readl(base + NvRegPowerState);
5088 	if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
5089 		writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5090 
5091 	pci_push(base);
5092 	udelay(10);
5093 	writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5094 
5095 	nv_disable_hw_interrupts(dev, np->irqmask);
5096 	pci_push(base);
5097 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5098 	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5099 	pci_push(base);
5100 
5101 	if (nv_request_irq(dev, 0))
5102 		goto out_drain;
5103 
5104 	/* ask for interrupts */
5105 	nv_enable_hw_interrupts(dev, np->irqmask);
5106 
5107 	spin_lock_irq(&np->lock);
5108 	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5109 	writel(0, base + NvRegMulticastAddrB);
5110 	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5111 	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5112 	writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5113 	/* One manual link speed update: Interrupts are enabled, future link
5114 	 * speed changes cause interrupts and are handled by nv_link_irq().
5115 	 */
5116 	{
5117 		u32 miistat;
5118 		miistat = readl(base + NvRegMIIStatus);
5119 		writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5120 	}
5121 	/* set linkspeed to invalid value, thus force nv_update_linkspeed
5122 	 * to init hw */
5123 	np->linkspeed = 0;
5124 	ret = nv_update_linkspeed(dev);
5125 	nv_start_rxtx(dev);
5126 	netif_start_queue(dev);
5127 	nv_napi_enable(dev);
5128 
5129 	if (ret) {
5130 		netif_carrier_on(dev);
5131 	} else {
5132 		netdev_info(dev, "no link during initialization\n");
5133 		netif_carrier_off(dev);
5134 	}
5135 	if (oom)
5136 		mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5137 
5138 	/* start statistics timer */
5139 	if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5140 		mod_timer(&np->stats_poll,
5141 			round_jiffies(jiffies + STATS_INTERVAL));
5142 
5143 	spin_unlock_irq(&np->lock);
5144 
5145 	return 0;
5146 out_drain:
5147 	nv_drain_rxtx(dev);
5148 	return ret;
5149 }
5150 
5151 static int nv_close(struct net_device *dev)
5152 {
5153 	struct fe_priv *np = netdev_priv(dev);
5154 	u8 __iomem *base;
5155 
5156 	spin_lock_irq(&np->lock);
5157 	np->in_shutdown = 1;
5158 	spin_unlock_irq(&np->lock);
5159 	nv_napi_disable(dev);
5160 	synchronize_irq(np->pci_dev->irq);
5161 
5162 	del_timer_sync(&np->oom_kick);
5163 	del_timer_sync(&np->nic_poll);
5164 	del_timer_sync(&np->stats_poll);
5165 
5166 	netif_stop_queue(dev);
5167 	spin_lock_irq(&np->lock);
5168 	nv_stop_rxtx(dev);
5169 	nv_txrx_reset(dev);
5170 
5171 	/* disable interrupts on the nic or we will lock up */
5172 	base = get_hwbase(dev);
5173 	nv_disable_hw_interrupts(dev, np->irqmask);
5174 	pci_push(base);
5175 
5176 	spin_unlock_irq(&np->lock);
5177 
5178 	nv_free_irq(dev);
5179 
5180 	nv_drain_rxtx(dev);
5181 
5182 	if (np->wolenabled || !phy_power_down) {
5183 		nv_txrx_gate(dev, false);
5184 		writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5185 		nv_start_rx(dev);
5186 	} else {
5187 		/* power down phy */
5188 		mii_rw(dev, np->phyaddr, MII_BMCR,
5189 		       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
5190 		nv_txrx_gate(dev, true);
5191 	}
5192 
5193 	/* FIXME: power down nic */
5194 
5195 	return 0;
5196 }
5197 
5198 static const struct net_device_ops nv_netdev_ops = {
5199 	.ndo_open		= nv_open,
5200 	.ndo_stop		= nv_close,
5201 	.ndo_get_stats		= nv_get_stats,
5202 	.ndo_start_xmit		= nv_start_xmit,
5203 	.ndo_tx_timeout		= nv_tx_timeout,
5204 	.ndo_change_mtu		= nv_change_mtu,
5205 	.ndo_fix_features	= nv_fix_features,
5206 	.ndo_set_features	= nv_set_features,
5207 	.ndo_validate_addr	= eth_validate_addr,
5208 	.ndo_set_mac_address	= nv_set_mac_address,
5209 	.ndo_set_rx_mode	= nv_set_multicast,
5210 #ifdef CONFIG_NET_POLL_CONTROLLER
5211 	.ndo_poll_controller	= nv_poll_controller,
5212 #endif
5213 };
5214 
5215 static const struct net_device_ops nv_netdev_ops_optimized = {
5216 	.ndo_open		= nv_open,
5217 	.ndo_stop		= nv_close,
5218 	.ndo_get_stats		= nv_get_stats,
5219 	.ndo_start_xmit		= nv_start_xmit_optimized,
5220 	.ndo_tx_timeout		= nv_tx_timeout,
5221 	.ndo_change_mtu		= nv_change_mtu,
5222 	.ndo_fix_features	= nv_fix_features,
5223 	.ndo_set_features	= nv_set_features,
5224 	.ndo_validate_addr	= eth_validate_addr,
5225 	.ndo_set_mac_address	= nv_set_mac_address,
5226 	.ndo_set_rx_mode	= nv_set_multicast,
5227 #ifdef CONFIG_NET_POLL_CONTROLLER
5228 	.ndo_poll_controller	= nv_poll_controller,
5229 #endif
5230 };
5231 
5232 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5233 {
5234 	struct net_device *dev;
5235 	struct fe_priv *np;
5236 	unsigned long addr;
5237 	u8 __iomem *base;
5238 	int err, i;
5239 	u32 powerstate, txreg;
5240 	u32 phystate_orig = 0, phystate;
5241 	int phyinitialized = 0;
5242 	static int printed_version;
5243 
5244 	if (!printed_version++)
5245 		pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
5246 			FORCEDETH_VERSION);
5247 
5248 	dev = alloc_etherdev(sizeof(struct fe_priv));
5249 	err = -ENOMEM;
5250 	if (!dev)
5251 		goto out;
5252 
5253 	np = netdev_priv(dev);
5254 	np->dev = dev;
5255 	np->pci_dev = pci_dev;
5256 	spin_lock_init(&np->lock);
5257 	SET_NETDEV_DEV(dev, &pci_dev->dev);
5258 
5259 	init_timer(&np->oom_kick);
5260 	np->oom_kick.data = (unsigned long) dev;
5261 	np->oom_kick.function = nv_do_rx_refill;	/* timer handler */
5262 	init_timer(&np->nic_poll);
5263 	np->nic_poll.data = (unsigned long) dev;
5264 	np->nic_poll.function = nv_do_nic_poll;	/* timer handler */
5265 	init_timer(&np->stats_poll);
5266 	np->stats_poll.data = (unsigned long) dev;
5267 	np->stats_poll.function = nv_do_stats_poll;	/* timer handler */
5268 
5269 	err = pci_enable_device(pci_dev);
5270 	if (err)
5271 		goto out_free;
5272 
5273 	pci_set_master(pci_dev);
5274 
5275 	err = pci_request_regions(pci_dev, DRV_NAME);
5276 	if (err < 0)
5277 		goto out_disable;
5278 
5279 	if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5280 		np->register_size = NV_PCI_REGSZ_VER3;
5281 	else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5282 		np->register_size = NV_PCI_REGSZ_VER2;
5283 	else
5284 		np->register_size = NV_PCI_REGSZ_VER1;
5285 
5286 	err = -EINVAL;
5287 	addr = 0;
5288 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5289 		if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5290 				pci_resource_len(pci_dev, i) >= np->register_size) {
5291 			addr = pci_resource_start(pci_dev, i);
5292 			break;
5293 		}
5294 	}
5295 	if (i == DEVICE_COUNT_RESOURCE) {
5296 		dev_info(&pci_dev->dev, "Couldn't find register window\n");
5297 		goto out_relreg;
5298 	}
5299 
5300 	/* copy of driver data */
5301 	np->driver_data = id->driver_data;
5302 	/* copy of device id */
5303 	np->device_id = id->device;
5304 
5305 	/* handle different descriptor versions */
5306 	if (id->driver_data & DEV_HAS_HIGH_DMA) {
5307 		/* packet format 3: supports 40-bit addressing */
5308 		np->desc_ver = DESC_VER_3;
5309 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5310 		if (dma_64bit) {
5311 			if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
5312 				dev_info(&pci_dev->dev,
5313 					 "64-bit DMA failed, using 32-bit addressing\n");
5314 			else
5315 				dev->features |= NETIF_F_HIGHDMA;
5316 			if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
5317 				dev_info(&pci_dev->dev,
5318 					 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5319 			}
5320 		}
5321 	} else if (id->driver_data & DEV_HAS_LARGEDESC) {
5322 		/* packet format 2: supports jumbo frames */
5323 		np->desc_ver = DESC_VER_2;
5324 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5325 	} else {
5326 		/* original packet format */
5327 		np->desc_ver = DESC_VER_1;
5328 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5329 	}
5330 
5331 	np->pkt_limit = NV_PKTLIMIT_1;
5332 	if (id->driver_data & DEV_HAS_LARGEDESC)
5333 		np->pkt_limit = NV_PKTLIMIT_2;
5334 
5335 	if (id->driver_data & DEV_HAS_CHECKSUM) {
5336 		np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5337 		dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
5338 			NETIF_F_TSO | NETIF_F_RXCSUM;
5339 	}
5340 
5341 	np->vlanctl_bits = 0;
5342 	if (id->driver_data & DEV_HAS_VLAN) {
5343 		np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5344 		dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5345 	}
5346 
5347 	dev->features |= dev->hw_features;
5348 
5349 	np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5350 	if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5351 	    (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5352 	    (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5353 		np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5354 	}
5355 
5356 	err = -ENOMEM;
5357 	np->base = ioremap(addr, np->register_size);
5358 	if (!np->base)
5359 		goto out_relreg;
5360 	dev->base_addr = (unsigned long)np->base;
5361 
5362 	dev->irq = pci_dev->irq;
5363 
5364 	np->rx_ring_size = RX_RING_DEFAULT;
5365 	np->tx_ring_size = TX_RING_DEFAULT;
5366 
5367 	if (!nv_optimized(np)) {
5368 		np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5369 					sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5370 					&np->ring_addr);
5371 		if (!np->rx_ring.orig)
5372 			goto out_unmap;
5373 		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5374 	} else {
5375 		np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5376 					sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5377 					&np->ring_addr);
5378 		if (!np->rx_ring.ex)
5379 			goto out_unmap;
5380 		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5381 	}
5382 	np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5383 	np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5384 	if (!np->rx_skb || !np->tx_skb)
5385 		goto out_freering;
5386 
5387 	if (!nv_optimized(np))
5388 		dev->netdev_ops = &nv_netdev_ops;
5389 	else
5390 		dev->netdev_ops = &nv_netdev_ops_optimized;
5391 
5392 	netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5393 	SET_ETHTOOL_OPS(dev, &ops);
5394 	dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5395 
5396 	pci_set_drvdata(pci_dev, dev);
5397 
5398 	/* read the mac address */
5399 	base = get_hwbase(dev);
5400 	np->orig_mac[0] = readl(base + NvRegMacAddrA);
5401 	np->orig_mac[1] = readl(base + NvRegMacAddrB);
5402 
5403 	/* check the workaround bit for correct mac address order */
5404 	txreg = readl(base + NvRegTransmitPoll);
5405 	if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
5406 		/* mac address is already in correct order */
5407 		dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
5408 		dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
5409 		dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5410 		dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5411 		dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
5412 		dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
5413 	} else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5414 		/* mac address is already in correct order */
5415 		dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
5416 		dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
5417 		dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5418 		dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5419 		dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
5420 		dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
5421 		/*
5422 		 * Set orig mac address back to the reversed version.
5423 		 * This flag will be cleared during low power transition.
5424 		 * Therefore, we should always put back the reversed address.
5425 		 */
5426 		np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
5427 			(dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
5428 		np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
5429 	} else {
5430 		/* need to reverse mac address to correct order */
5431 		dev->dev_addr[0] = (np->orig_mac[1] >>  8) & 0xff;
5432 		dev->dev_addr[1] = (np->orig_mac[1] >>  0) & 0xff;
5433 		dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5434 		dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5435 		dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
5436 		dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
5437 		writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5438 		dev_dbg(&pci_dev->dev,
5439 			"%s: set workaround bit for reversed mac addr\n",
5440 			__func__);
5441 	}
5442 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5443 
5444 	if (!is_valid_ether_addr(dev->perm_addr)) {
5445 		/*
5446 		 * Bad mac address. At least one bios sets the mac address
5447 		 * to 01:23:45:67:89:ab
5448 		 */
5449 		dev_err(&pci_dev->dev,
5450 			"Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
5451 			dev->dev_addr);
5452 		random_ether_addr(dev->dev_addr);
5453 		dev_err(&pci_dev->dev,
5454 			"Using random MAC address: %pM\n", dev->dev_addr);
5455 	}
5456 
5457 	/* set mac address */
5458 	nv_copy_mac_to_hw(dev);
5459 
5460 	/* disable WOL */
5461 	writel(0, base + NvRegWakeUpFlags);
5462 	np->wolenabled = 0;
5463 	device_set_wakeup_enable(&pci_dev->dev, false);
5464 
5465 	if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5466 
5467 		/* take phy and nic out of low power mode */
5468 		powerstate = readl(base + NvRegPowerState2);
5469 		powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5470 		if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
5471 		    pci_dev->revision >= 0xA3)
5472 			powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5473 		writel(powerstate, base + NvRegPowerState2);
5474 	}
5475 
5476 	if (np->desc_ver == DESC_VER_1)
5477 		np->tx_flags = NV_TX_VALID;
5478 	else
5479 		np->tx_flags = NV_TX2_VALID;
5480 
5481 	np->msi_flags = 0;
5482 	if ((id->driver_data & DEV_HAS_MSI) && msi)
5483 		np->msi_flags |= NV_MSI_CAPABLE;
5484 
5485 	if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5486 		/* msix has had reported issues when modifying irqmask
5487 		   as in the case of napi, therefore, disable for now
5488 		*/
5489 #if 0
5490 		np->msi_flags |= NV_MSI_X_CAPABLE;
5491 #endif
5492 	}
5493 
5494 	if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
5495 		np->irqmask = NVREG_IRQMASK_CPU;
5496 		if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5497 			np->msi_flags |= 0x0001;
5498 	} else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
5499 		   !(id->driver_data & DEV_NEED_TIMERIRQ)) {
5500 		/* start off in throughput mode */
5501 		np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5502 		/* remove support for msix mode */
5503 		np->msi_flags &= ~NV_MSI_X_CAPABLE;
5504 	} else {
5505 		optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
5506 		np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5507 		if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5508 			np->msi_flags |= 0x0003;
5509 	}
5510 
5511 	if (id->driver_data & DEV_NEED_TIMERIRQ)
5512 		np->irqmask |= NVREG_IRQ_TIMER;
5513 	if (id->driver_data & DEV_NEED_LINKTIMER) {
5514 		np->need_linktimer = 1;
5515 		np->link_timeout = jiffies + LINK_TIMEOUT;
5516 	} else {
5517 		np->need_linktimer = 0;
5518 	}
5519 
5520 	/* Limit the number of tx's outstanding for hw bug */
5521 	if (id->driver_data & DEV_NEED_TX_LIMIT) {
5522 		np->tx_limit = 1;
5523 		if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
5524 		    pci_dev->revision >= 0xA2)
5525 			np->tx_limit = 0;
5526 	}
5527 
5528 	/* clear phy state and temporarily halt phy interrupts */
5529 	writel(0, base + NvRegMIIMask);
5530 	phystate = readl(base + NvRegAdapterControl);
5531 	if (phystate & NVREG_ADAPTCTL_RUNNING) {
5532 		phystate_orig = 1;
5533 		phystate &= ~NVREG_ADAPTCTL_RUNNING;
5534 		writel(phystate, base + NvRegAdapterControl);
5535 	}
5536 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5537 
5538 	if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5539 		/* management unit running on the mac? */
5540 		if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
5541 		    (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
5542 		    nv_mgmt_acquire_sema(dev) &&
5543 		    nv_mgmt_get_version(dev)) {
5544 			np->mac_in_use = 1;
5545 			if (np->mgmt_version > 0)
5546 				np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5547 			/* management unit setup the phy already? */
5548 			if (np->mac_in_use &&
5549 			    ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5550 			     NVREG_XMITCTL_SYNC_PHY_INIT)) {
5551 				/* phy is inited by mgmt unit */
5552 				phyinitialized = 1;
5553 			} else {
5554 				/* we need to init the phy */
5555 			}
5556 		}
5557 	}
5558 
5559 	/* find a suitable phy */
5560 	for (i = 1; i <= 32; i++) {
5561 		int id1, id2;
5562 		int phyaddr = i & 0x1F;
5563 
5564 		spin_lock_irq(&np->lock);
5565 		id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5566 		spin_unlock_irq(&np->lock);
5567 		if (id1 < 0 || id1 == 0xffff)
5568 			continue;
5569 		spin_lock_irq(&np->lock);
5570 		id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5571 		spin_unlock_irq(&np->lock);
5572 		if (id2 < 0 || id2 == 0xffff)
5573 			continue;
5574 
5575 		np->phy_model = id2 & PHYID2_MODEL_MASK;
5576 		id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5577 		id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5578 		np->phyaddr = phyaddr;
5579 		np->phy_oui = id1 | id2;
5580 
5581 		/* Realtek hardcoded phy id1 to all zero's on certain phys */
5582 		if (np->phy_oui == PHY_OUI_REALTEK2)
5583 			np->phy_oui = PHY_OUI_REALTEK;
5584 		/* Setup phy revision for Realtek */
5585 		if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
5586 			np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
5587 
5588 		break;
5589 	}
5590 	if (i == 33) {
5591 		dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
5592 		goto out_error;
5593 	}
5594 
5595 	if (!phyinitialized) {
5596 		/* reset it */
5597 		phy_init(dev);
5598 	} else {
5599 		/* see if it is a gigabit phy */
5600 		u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5601 		if (mii_status & PHY_GIGABIT)
5602 			np->gigabit = PHY_GIGABIT;
5603 	}
5604 
5605 	/* set default link speed settings */
5606 	np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5607 	np->duplex = 0;
5608 	np->autoneg = 1;
5609 
5610 	err = register_netdev(dev);
5611 	if (err) {
5612 		dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
5613 		goto out_error;
5614 	}
5615 
5616 	if (id->driver_data & DEV_HAS_VLAN)
5617 		nv_vlan_mode(dev, dev->features);
5618 
5619 	netif_carrier_off(dev);
5620 
5621 	dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5622 		 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
5623 
5624 	dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5625 		 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5626 		 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
5627 			"csum " : "",
5628 		 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5629 			"vlan " : "",
5630 		 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5631 		 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5632 		 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5633 		 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5634 		 np->need_linktimer ? "lnktim " : "",
5635 		 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5636 		 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5637 		 np->desc_ver);
5638 
5639 	return 0;
5640 
5641 out_error:
5642 	if (phystate_orig)
5643 		writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
5644 	pci_set_drvdata(pci_dev, NULL);
5645 out_freering:
5646 	free_rings(dev);
5647 out_unmap:
5648 	iounmap(get_hwbase(dev));
5649 out_relreg:
5650 	pci_release_regions(pci_dev);
5651 out_disable:
5652 	pci_disable_device(pci_dev);
5653 out_free:
5654 	free_netdev(dev);
5655 out:
5656 	return err;
5657 }
5658 
5659 static void nv_restore_phy(struct net_device *dev)
5660 {
5661 	struct fe_priv *np = netdev_priv(dev);
5662 	u16 phy_reserved, mii_control;
5663 
5664 	if (np->phy_oui == PHY_OUI_REALTEK &&
5665 	    np->phy_model == PHY_MODEL_REALTEK_8201 &&
5666 	    phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
5667 		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
5668 		phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
5669 		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
5670 		phy_reserved |= PHY_REALTEK_INIT8;
5671 		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
5672 		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
5673 
5674 		/* restart auto negotiation */
5675 		mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
5676 		mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
5677 		mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
5678 	}
5679 }
5680 
5681 static void nv_restore_mac_addr(struct pci_dev *pci_dev)
5682 {
5683 	struct net_device *dev = pci_get_drvdata(pci_dev);
5684 	struct fe_priv *np = netdev_priv(dev);
5685 	u8 __iomem *base = get_hwbase(dev);
5686 
5687 	/* special op: write back the misordered MAC address - otherwise
5688 	 * the next nv_probe would see a wrong address.
5689 	 */
5690 	writel(np->orig_mac[0], base + NvRegMacAddrA);
5691 	writel(np->orig_mac[1], base + NvRegMacAddrB);
5692 	writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
5693 	       base + NvRegTransmitPoll);
5694 }
5695 
5696 static void __devexit nv_remove(struct pci_dev *pci_dev)
5697 {
5698 	struct net_device *dev = pci_get_drvdata(pci_dev);
5699 
5700 	unregister_netdev(dev);
5701 
5702 	nv_restore_mac_addr(pci_dev);
5703 
5704 	/* restore any phy related changes */
5705 	nv_restore_phy(dev);
5706 
5707 	nv_mgmt_release_sema(dev);
5708 
5709 	/* free all structures */
5710 	free_rings(dev);
5711 	iounmap(get_hwbase(dev));
5712 	pci_release_regions(pci_dev);
5713 	pci_disable_device(pci_dev);
5714 	free_netdev(dev);
5715 	pci_set_drvdata(pci_dev, NULL);
5716 }
5717 
5718 #ifdef CONFIG_PM_SLEEP
5719 static int nv_suspend(struct device *device)
5720 {
5721 	struct pci_dev *pdev = to_pci_dev(device);
5722 	struct net_device *dev = pci_get_drvdata(pdev);
5723 	struct fe_priv *np = netdev_priv(dev);
5724 	u8 __iomem *base = get_hwbase(dev);
5725 	int i;
5726 
5727 	if (netif_running(dev)) {
5728 		/* Gross. */
5729 		nv_close(dev);
5730 	}
5731 	netif_device_detach(dev);
5732 
5733 	/* save non-pci configuration space */
5734 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
5735 		np->saved_config_space[i] = readl(base + i*sizeof(u32));
5736 
5737 	return 0;
5738 }
5739 
5740 static int nv_resume(struct device *device)
5741 {
5742 	struct pci_dev *pdev = to_pci_dev(device);
5743 	struct net_device *dev = pci_get_drvdata(pdev);
5744 	struct fe_priv *np = netdev_priv(dev);
5745 	u8 __iomem *base = get_hwbase(dev);
5746 	int i, rc = 0;
5747 
5748 	/* restore non-pci configuration space */
5749 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
5750 		writel(np->saved_config_space[i], base+i*sizeof(u32));
5751 
5752 	if (np->driver_data & DEV_NEED_MSI_FIX)
5753 		pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
5754 
5755 	/* restore phy state, including autoneg */
5756 	phy_init(dev);
5757 
5758 	netif_device_attach(dev);
5759 	if (netif_running(dev)) {
5760 		rc = nv_open(dev);
5761 		nv_set_multicast(dev);
5762 	}
5763 	return rc;
5764 }
5765 
5766 static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
5767 #define NV_PM_OPS (&nv_pm_ops)
5768 
5769 #else
5770 #define NV_PM_OPS NULL
5771 #endif /* CONFIG_PM_SLEEP */
5772 
5773 #ifdef CONFIG_PM
5774 static void nv_shutdown(struct pci_dev *pdev)
5775 {
5776 	struct net_device *dev = pci_get_drvdata(pdev);
5777 	struct fe_priv *np = netdev_priv(dev);
5778 
5779 	if (netif_running(dev))
5780 		nv_close(dev);
5781 
5782 	/*
5783 	 * Restore the MAC so a kernel started by kexec won't get confused.
5784 	 * If we really go for poweroff, we must not restore the MAC,
5785 	 * otherwise the MAC for WOL will be reversed at least on some boards.
5786 	 */
5787 	if (system_state != SYSTEM_POWER_OFF)
5788 		nv_restore_mac_addr(pdev);
5789 
5790 	pci_disable_device(pdev);
5791 	/*
5792 	 * Apparently it is not possible to reinitialise from D3 hot,
5793 	 * only put the device into D3 if we really go for poweroff.
5794 	 */
5795 	if (system_state == SYSTEM_POWER_OFF) {
5796 		pci_wake_from_d3(pdev, np->wolenabled);
5797 		pci_set_power_state(pdev, PCI_D3hot);
5798 	}
5799 }
5800 #else
5801 #define nv_shutdown NULL
5802 #endif /* CONFIG_PM */
5803 
5804 static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
5805 	{	/* nForce Ethernet Controller */
5806 		PCI_DEVICE(0x10DE, 0x01C3),
5807 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5808 	},
5809 	{	/* nForce2 Ethernet Controller */
5810 		PCI_DEVICE(0x10DE, 0x0066),
5811 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5812 	},
5813 	{	/* nForce3 Ethernet Controller */
5814 		PCI_DEVICE(0x10DE, 0x00D6),
5815 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5816 	},
5817 	{	/* nForce3 Ethernet Controller */
5818 		PCI_DEVICE(0x10DE, 0x0086),
5819 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5820 	},
5821 	{	/* nForce3 Ethernet Controller */
5822 		PCI_DEVICE(0x10DE, 0x008C),
5823 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5824 	},
5825 	{	/* nForce3 Ethernet Controller */
5826 		PCI_DEVICE(0x10DE, 0x00E6),
5827 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5828 	},
5829 	{	/* nForce3 Ethernet Controller */
5830 		PCI_DEVICE(0x10DE, 0x00DF),
5831 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5832 	},
5833 	{	/* CK804 Ethernet Controller */
5834 		PCI_DEVICE(0x10DE, 0x0056),
5835 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5836 	},
5837 	{	/* CK804 Ethernet Controller */
5838 		PCI_DEVICE(0x10DE, 0x0057),
5839 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5840 	},
5841 	{	/* MCP04 Ethernet Controller */
5842 		PCI_DEVICE(0x10DE, 0x0037),
5843 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5844 	},
5845 	{	/* MCP04 Ethernet Controller */
5846 		PCI_DEVICE(0x10DE, 0x0038),
5847 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5848 	},
5849 	{	/* MCP51 Ethernet Controller */
5850 		PCI_DEVICE(0x10DE, 0x0268),
5851 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
5852 	},
5853 	{	/* MCP51 Ethernet Controller */
5854 		PCI_DEVICE(0x10DE, 0x0269),
5855 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
5856 	},
5857 	{	/* MCP55 Ethernet Controller */
5858 		PCI_DEVICE(0x10DE, 0x0372),
5859 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
5860 	},
5861 	{	/* MCP55 Ethernet Controller */
5862 		PCI_DEVICE(0x10DE, 0x0373),
5863 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
5864 	},
5865 	{	/* MCP61 Ethernet Controller */
5866 		PCI_DEVICE(0x10DE, 0x03E5),
5867 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
5868 	},
5869 	{	/* MCP61 Ethernet Controller */
5870 		PCI_DEVICE(0x10DE, 0x03E6),
5871 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
5872 	},
5873 	{	/* MCP61 Ethernet Controller */
5874 		PCI_DEVICE(0x10DE, 0x03EE),
5875 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
5876 	},
5877 	{	/* MCP61 Ethernet Controller */
5878 		PCI_DEVICE(0x10DE, 0x03EF),
5879 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
5880 	},
5881 	{	/* MCP65 Ethernet Controller */
5882 		PCI_DEVICE(0x10DE, 0x0450),
5883 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5884 	},
5885 	{	/* MCP65 Ethernet Controller */
5886 		PCI_DEVICE(0x10DE, 0x0451),
5887 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5888 	},
5889 	{	/* MCP65 Ethernet Controller */
5890 		PCI_DEVICE(0x10DE, 0x0452),
5891 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5892 	},
5893 	{	/* MCP65 Ethernet Controller */
5894 		PCI_DEVICE(0x10DE, 0x0453),
5895 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5896 	},
5897 	{	/* MCP67 Ethernet Controller */
5898 		PCI_DEVICE(0x10DE, 0x054C),
5899 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5900 	},
5901 	{	/* MCP67 Ethernet Controller */
5902 		PCI_DEVICE(0x10DE, 0x054D),
5903 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5904 	},
5905 	{	/* MCP67 Ethernet Controller */
5906 		PCI_DEVICE(0x10DE, 0x054E),
5907 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5908 	},
5909 	{	/* MCP67 Ethernet Controller */
5910 		PCI_DEVICE(0x10DE, 0x054F),
5911 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5912 	},
5913 	{	/* MCP73 Ethernet Controller */
5914 		PCI_DEVICE(0x10DE, 0x07DC),
5915 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5916 	},
5917 	{	/* MCP73 Ethernet Controller */
5918 		PCI_DEVICE(0x10DE, 0x07DD),
5919 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5920 	},
5921 	{	/* MCP73 Ethernet Controller */
5922 		PCI_DEVICE(0x10DE, 0x07DE),
5923 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5924 	},
5925 	{	/* MCP73 Ethernet Controller */
5926 		PCI_DEVICE(0x10DE, 0x07DF),
5927 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5928 	},
5929 	{	/* MCP77 Ethernet Controller */
5930 		PCI_DEVICE(0x10DE, 0x0760),
5931 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5932 	},
5933 	{	/* MCP77 Ethernet Controller */
5934 		PCI_DEVICE(0x10DE, 0x0761),
5935 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5936 	},
5937 	{	/* MCP77 Ethernet Controller */
5938 		PCI_DEVICE(0x10DE, 0x0762),
5939 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5940 	},
5941 	{	/* MCP77 Ethernet Controller */
5942 		PCI_DEVICE(0x10DE, 0x0763),
5943 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5944 	},
5945 	{	/* MCP79 Ethernet Controller */
5946 		PCI_DEVICE(0x10DE, 0x0AB0),
5947 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5948 	},
5949 	{	/* MCP79 Ethernet Controller */
5950 		PCI_DEVICE(0x10DE, 0x0AB1),
5951 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5952 	},
5953 	{	/* MCP79 Ethernet Controller */
5954 		PCI_DEVICE(0x10DE, 0x0AB2),
5955 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5956 	},
5957 	{	/* MCP79 Ethernet Controller */
5958 		PCI_DEVICE(0x10DE, 0x0AB3),
5959 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5960 	},
5961 	{	/* MCP89 Ethernet Controller */
5962 		PCI_DEVICE(0x10DE, 0x0D7D),
5963 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
5964 	},
5965 	{0,},
5966 };
5967 
5968 static struct pci_driver driver = {
5969 	.name		= DRV_NAME,
5970 	.id_table	= pci_tbl,
5971 	.probe		= nv_probe,
5972 	.remove		= __devexit_p(nv_remove),
5973 	.shutdown	= nv_shutdown,
5974 	.driver.pm	= NV_PM_OPS,
5975 };
5976 
5977 static int __init init_nic(void)
5978 {
5979 	return pci_register_driver(&driver);
5980 }
5981 
5982 static void __exit exit_nic(void)
5983 {
5984 	pci_unregister_driver(&driver);
5985 }
5986 
5987 module_param(max_interrupt_work, int, 0);
5988 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
5989 module_param(optimization_mode, int, 0);
5990 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
5991 module_param(poll_interval, int, 0);
5992 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
5993 module_param(msi, int, 0);
5994 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
5995 module_param(msix, int, 0);
5996 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
5997 module_param(dma_64bit, int, 0);
5998 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
5999 module_param(phy_cross, int, 0);
6000 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6001 module_param(phy_power_down, int, 0);
6002 MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
6003 
6004 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6005 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6006 MODULE_LICENSE("GPL");
6007 
6008 MODULE_DEVICE_TABLE(pci, pci_tbl);
6009 
6010 module_init(init_nic);
6011 module_exit(exit_nic);
6012