xref: /linux/drivers/net/ethernet/nvidia/forcedeth.c (revision d229807f669ba3dea9f64467ee965051c4366aed)
1 /*
2  * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
3  *
4  * Note: This driver is a cleanroom reimplementation based on reverse
5  *      engineered documentation written by Carl-Daniel Hailfinger
6  *      and Andrew de Quincey.
7  *
8  * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9  * trademarks of NVIDIA Corporation in the United States and other
10  * countries.
11  *
12  * Copyright (C) 2003,4,5 Manfred Spraul
13  * Copyright (C) 2004 Andrew de Quincey (wol support)
14  * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15  *		IRQ rate fixes, bigendian fixes, cleanups, verification)
16  * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
17  *
18  * This program is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License as published by
20  * the Free Software Foundation; either version 2 of the License, or
21  * (at your option) any later version.
22  *
23  * This program is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26  * GNU General Public License for more details.
27  *
28  * You should have received a copy of the GNU General Public License
29  * along with this program; if not, write to the Free Software
30  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
31  *
32  * Known bugs:
33  * We suspect that on some hardware no TX done interrupts are generated.
34  * This means recovery from netif_stop_queue only happens if the hw timer
35  * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
36  * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
37  * If your hardware reliably generates tx done interrupts, then you can remove
38  * DEV_NEED_TIMERIRQ from the driver_data flags.
39  * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40  * superfluous timer interrupts from the nic.
41  */
42 
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 
45 #define FORCEDETH_VERSION		"0.64"
46 #define DRV_NAME			"forcedeth"
47 
48 #include <linux/module.h>
49 #include <linux/types.h>
50 #include <linux/pci.h>
51 #include <linux/interrupt.h>
52 #include <linux/netdevice.h>
53 #include <linux/etherdevice.h>
54 #include <linux/delay.h>
55 #include <linux/sched.h>
56 #include <linux/spinlock.h>
57 #include <linux/ethtool.h>
58 #include <linux/timer.h>
59 #include <linux/skbuff.h>
60 #include <linux/mii.h>
61 #include <linux/random.h>
62 #include <linux/init.h>
63 #include <linux/if_vlan.h>
64 #include <linux/dma-mapping.h>
65 #include <linux/slab.h>
66 #include <linux/uaccess.h>
67 #include <linux/prefetch.h>
68 #include  <linux/io.h>
69 
70 #include <asm/irq.h>
71 #include <asm/system.h>
72 
73 #define TX_WORK_PER_LOOP  64
74 #define RX_WORK_PER_LOOP  64
75 
76 /*
77  * Hardware access:
78  */
79 
80 #define DEV_NEED_TIMERIRQ          0x0000001  /* set the timer irq flag in the irq mask */
81 #define DEV_NEED_LINKTIMER         0x0000002  /* poll link settings. Relies on the timer irq */
82 #define DEV_HAS_LARGEDESC          0x0000004  /* device supports jumbo frames and needs packet format 2 */
83 #define DEV_HAS_HIGH_DMA           0x0000008  /* device supports 64bit dma */
84 #define DEV_HAS_CHECKSUM           0x0000010  /* device supports tx and rx checksum offloads */
85 #define DEV_HAS_VLAN               0x0000020  /* device supports vlan tagging and striping */
86 #define DEV_HAS_MSI                0x0000040  /* device supports MSI */
87 #define DEV_HAS_MSI_X              0x0000080  /* device supports MSI-X */
88 #define DEV_HAS_POWER_CNTRL        0x0000100  /* device supports power savings */
89 #define DEV_HAS_STATISTICS_V1      0x0000200  /* device supports hw statistics version 1 */
90 #define DEV_HAS_STATISTICS_V2      0x0000400  /* device supports hw statistics version 2 */
91 #define DEV_HAS_STATISTICS_V3      0x0000800  /* device supports hw statistics version 3 */
92 #define DEV_HAS_STATISTICS_V12     0x0000600  /* device supports hw statistics version 1 and 2 */
93 #define DEV_HAS_STATISTICS_V123    0x0000e00  /* device supports hw statistics version 1, 2, and 3 */
94 #define DEV_HAS_TEST_EXTENDED      0x0001000  /* device supports extended diagnostic test */
95 #define DEV_HAS_MGMT_UNIT          0x0002000  /* device supports management unit */
96 #define DEV_HAS_CORRECT_MACADDR    0x0004000  /* device supports correct mac address order */
97 #define DEV_HAS_COLLISION_FIX      0x0008000  /* device supports tx collision fix */
98 #define DEV_HAS_PAUSEFRAME_TX_V1   0x0010000  /* device supports tx pause frames version 1 */
99 #define DEV_HAS_PAUSEFRAME_TX_V2   0x0020000  /* device supports tx pause frames version 2 */
100 #define DEV_HAS_PAUSEFRAME_TX_V3   0x0040000  /* device supports tx pause frames version 3 */
101 #define DEV_NEED_TX_LIMIT          0x0080000  /* device needs to limit tx */
102 #define DEV_NEED_TX_LIMIT2         0x0180000  /* device needs to limit tx, expect for some revs */
103 #define DEV_HAS_GEAR_MODE          0x0200000  /* device supports gear mode */
104 #define DEV_NEED_PHY_INIT_FIX      0x0400000  /* device needs specific phy workaround */
105 #define DEV_NEED_LOW_POWER_FIX     0x0800000  /* device needs special power up workaround */
106 #define DEV_NEED_MSI_FIX           0x1000000  /* device needs msi workaround */
107 
108 enum {
109 	NvRegIrqStatus = 0x000,
110 #define NVREG_IRQSTAT_MIIEVENT	0x040
111 #define NVREG_IRQSTAT_MASK		0x83ff
112 	NvRegIrqMask = 0x004,
113 #define NVREG_IRQ_RX_ERROR		0x0001
114 #define NVREG_IRQ_RX			0x0002
115 #define NVREG_IRQ_RX_NOBUF		0x0004
116 #define NVREG_IRQ_TX_ERR		0x0008
117 #define NVREG_IRQ_TX_OK			0x0010
118 #define NVREG_IRQ_TIMER			0x0020
119 #define NVREG_IRQ_LINK			0x0040
120 #define NVREG_IRQ_RX_FORCED		0x0080
121 #define NVREG_IRQ_TX_FORCED		0x0100
122 #define NVREG_IRQ_RECOVER_ERROR		0x8200
123 #define NVREG_IRQMASK_THROUGHPUT	0x00df
124 #define NVREG_IRQMASK_CPU		0x0060
125 #define NVREG_IRQ_TX_ALL		(NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
126 #define NVREG_IRQ_RX_ALL		(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
127 #define NVREG_IRQ_OTHER			(NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
128 
129 	NvRegUnknownSetupReg6 = 0x008,
130 #define NVREG_UNKSETUP6_VAL		3
131 
132 /*
133  * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
134  * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
135  */
136 	NvRegPollingInterval = 0x00c,
137 #define NVREG_POLL_DEFAULT_THROUGHPUT	65535 /* backup tx cleanup if loop max reached */
138 #define NVREG_POLL_DEFAULT_CPU	13
139 	NvRegMSIMap0 = 0x020,
140 	NvRegMSIMap1 = 0x024,
141 	NvRegMSIIrqMask = 0x030,
142 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
143 	NvRegMisc1 = 0x080,
144 #define NVREG_MISC1_PAUSE_TX	0x01
145 #define NVREG_MISC1_HD		0x02
146 #define NVREG_MISC1_FORCE	0x3b0f3c
147 
148 	NvRegMacReset = 0x34,
149 #define NVREG_MAC_RESET_ASSERT	0x0F3
150 	NvRegTransmitterControl = 0x084,
151 #define NVREG_XMITCTL_START	0x01
152 #define NVREG_XMITCTL_MGMT_ST	0x40000000
153 #define NVREG_XMITCTL_SYNC_MASK		0x000f0000
154 #define NVREG_XMITCTL_SYNC_NOT_READY	0x0
155 #define NVREG_XMITCTL_SYNC_PHY_INIT	0x00040000
156 #define NVREG_XMITCTL_MGMT_SEMA_MASK	0x00000f00
157 #define NVREG_XMITCTL_MGMT_SEMA_FREE	0x0
158 #define NVREG_XMITCTL_HOST_SEMA_MASK	0x0000f000
159 #define NVREG_XMITCTL_HOST_SEMA_ACQ	0x0000f000
160 #define NVREG_XMITCTL_HOST_LOADED	0x00004000
161 #define NVREG_XMITCTL_TX_PATH_EN	0x01000000
162 #define NVREG_XMITCTL_DATA_START	0x00100000
163 #define NVREG_XMITCTL_DATA_READY	0x00010000
164 #define NVREG_XMITCTL_DATA_ERROR	0x00020000
165 	NvRegTransmitterStatus = 0x088,
166 #define NVREG_XMITSTAT_BUSY	0x01
167 
168 	NvRegPacketFilterFlags = 0x8c,
169 #define NVREG_PFF_PAUSE_RX	0x08
170 #define NVREG_PFF_ALWAYS	0x7F0000
171 #define NVREG_PFF_PROMISC	0x80
172 #define NVREG_PFF_MYADDR	0x20
173 #define NVREG_PFF_LOOPBACK	0x10
174 
175 	NvRegOffloadConfig = 0x90,
176 #define NVREG_OFFLOAD_HOMEPHY	0x601
177 #define NVREG_OFFLOAD_NORMAL	RX_NIC_BUFSIZE
178 	NvRegReceiverControl = 0x094,
179 #define NVREG_RCVCTL_START	0x01
180 #define NVREG_RCVCTL_RX_PATH_EN	0x01000000
181 	NvRegReceiverStatus = 0x98,
182 #define NVREG_RCVSTAT_BUSY	0x01
183 
184 	NvRegSlotTime = 0x9c,
185 #define NVREG_SLOTTIME_LEGBF_ENABLED	0x80000000
186 #define NVREG_SLOTTIME_10_100_FULL	0x00007f00
187 #define NVREG_SLOTTIME_1000_FULL	0x0003ff00
188 #define NVREG_SLOTTIME_HALF		0x0000ff00
189 #define NVREG_SLOTTIME_DEFAULT		0x00007f00
190 #define NVREG_SLOTTIME_MASK		0x000000ff
191 
192 	NvRegTxDeferral = 0xA0,
193 #define NVREG_TX_DEFERRAL_DEFAULT		0x15050f
194 #define NVREG_TX_DEFERRAL_RGMII_10_100		0x16070f
195 #define NVREG_TX_DEFERRAL_RGMII_1000		0x14050f
196 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10	0x16190f
197 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100	0x16300f
198 #define NVREG_TX_DEFERRAL_MII_STRETCH		0x152000
199 	NvRegRxDeferral = 0xA4,
200 #define NVREG_RX_DEFERRAL_DEFAULT	0x16
201 	NvRegMacAddrA = 0xA8,
202 	NvRegMacAddrB = 0xAC,
203 	NvRegMulticastAddrA = 0xB0,
204 #define NVREG_MCASTADDRA_FORCE	0x01
205 	NvRegMulticastAddrB = 0xB4,
206 	NvRegMulticastMaskA = 0xB8,
207 #define NVREG_MCASTMASKA_NONE		0xffffffff
208 	NvRegMulticastMaskB = 0xBC,
209 #define NVREG_MCASTMASKB_NONE		0xffff
210 
211 	NvRegPhyInterface = 0xC0,
212 #define PHY_RGMII		0x10000000
213 	NvRegBackOffControl = 0xC4,
214 #define NVREG_BKOFFCTRL_DEFAULT			0x70000000
215 #define NVREG_BKOFFCTRL_SEED_MASK		0x000003ff
216 #define NVREG_BKOFFCTRL_SELECT			24
217 #define NVREG_BKOFFCTRL_GEAR			12
218 
219 	NvRegTxRingPhysAddr = 0x100,
220 	NvRegRxRingPhysAddr = 0x104,
221 	NvRegRingSizes = 0x108,
222 #define NVREG_RINGSZ_TXSHIFT 0
223 #define NVREG_RINGSZ_RXSHIFT 16
224 	NvRegTransmitPoll = 0x10c,
225 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV	0x00008000
226 	NvRegLinkSpeed = 0x110,
227 #define NVREG_LINKSPEED_FORCE 0x10000
228 #define NVREG_LINKSPEED_10	1000
229 #define NVREG_LINKSPEED_100	100
230 #define NVREG_LINKSPEED_1000	50
231 #define NVREG_LINKSPEED_MASK	(0xFFF)
232 	NvRegUnknownSetupReg5 = 0x130,
233 #define NVREG_UNKSETUP5_BIT31	(1<<31)
234 	NvRegTxWatermark = 0x13c,
235 #define NVREG_TX_WM_DESC1_DEFAULT	0x0200010
236 #define NVREG_TX_WM_DESC2_3_DEFAULT	0x1e08000
237 #define NVREG_TX_WM_DESC2_3_1000	0xfe08000
238 	NvRegTxRxControl = 0x144,
239 #define NVREG_TXRXCTL_KICK	0x0001
240 #define NVREG_TXRXCTL_BIT1	0x0002
241 #define NVREG_TXRXCTL_BIT2	0x0004
242 #define NVREG_TXRXCTL_IDLE	0x0008
243 #define NVREG_TXRXCTL_RESET	0x0010
244 #define NVREG_TXRXCTL_RXCHECK	0x0400
245 #define NVREG_TXRXCTL_DESC_1	0
246 #define NVREG_TXRXCTL_DESC_2	0x002100
247 #define NVREG_TXRXCTL_DESC_3	0xc02200
248 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
249 #define NVREG_TXRXCTL_VLANINS	0x00080
250 	NvRegTxRingPhysAddrHigh = 0x148,
251 	NvRegRxRingPhysAddrHigh = 0x14C,
252 	NvRegTxPauseFrame = 0x170,
253 #define NVREG_TX_PAUSEFRAME_DISABLE	0x0fff0080
254 #define NVREG_TX_PAUSEFRAME_ENABLE_V1	0x01800010
255 #define NVREG_TX_PAUSEFRAME_ENABLE_V2	0x056003f0
256 #define NVREG_TX_PAUSEFRAME_ENABLE_V3	0x09f00880
257 	NvRegTxPauseFrameLimit = 0x174,
258 #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE	0x00010000
259 	NvRegMIIStatus = 0x180,
260 #define NVREG_MIISTAT_ERROR		0x0001
261 #define NVREG_MIISTAT_LINKCHANGE	0x0008
262 #define NVREG_MIISTAT_MASK_RW		0x0007
263 #define NVREG_MIISTAT_MASK_ALL		0x000f
264 	NvRegMIIMask = 0x184,
265 #define NVREG_MII_LINKCHANGE		0x0008
266 
267 	NvRegAdapterControl = 0x188,
268 #define NVREG_ADAPTCTL_START	0x02
269 #define NVREG_ADAPTCTL_LINKUP	0x04
270 #define NVREG_ADAPTCTL_PHYVALID	0x40000
271 #define NVREG_ADAPTCTL_RUNNING	0x100000
272 #define NVREG_ADAPTCTL_PHYSHIFT	24
273 	NvRegMIISpeed = 0x18c,
274 #define NVREG_MIISPEED_BIT8	(1<<8)
275 #define NVREG_MIIDELAY	5
276 	NvRegMIIControl = 0x190,
277 #define NVREG_MIICTL_INUSE	0x08000
278 #define NVREG_MIICTL_WRITE	0x00400
279 #define NVREG_MIICTL_ADDRSHIFT	5
280 	NvRegMIIData = 0x194,
281 	NvRegTxUnicast = 0x1a0,
282 	NvRegTxMulticast = 0x1a4,
283 	NvRegTxBroadcast = 0x1a8,
284 	NvRegWakeUpFlags = 0x200,
285 #define NVREG_WAKEUPFLAGS_VAL		0x7770
286 #define NVREG_WAKEUPFLAGS_BUSYSHIFT	24
287 #define NVREG_WAKEUPFLAGS_ENABLESHIFT	16
288 #define NVREG_WAKEUPFLAGS_D3SHIFT	12
289 #define NVREG_WAKEUPFLAGS_D2SHIFT	8
290 #define NVREG_WAKEUPFLAGS_D1SHIFT	4
291 #define NVREG_WAKEUPFLAGS_D0SHIFT	0
292 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT		0x01
293 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT	0x02
294 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE	0x04
295 #define NVREG_WAKEUPFLAGS_ENABLE	0x1111
296 
297 	NvRegMgmtUnitGetVersion = 0x204,
298 #define NVREG_MGMTUNITGETVERSION	0x01
299 	NvRegMgmtUnitVersion = 0x208,
300 #define NVREG_MGMTUNITVERSION		0x08
301 	NvRegPowerCap = 0x268,
302 #define NVREG_POWERCAP_D3SUPP	(1<<30)
303 #define NVREG_POWERCAP_D2SUPP	(1<<26)
304 #define NVREG_POWERCAP_D1SUPP	(1<<25)
305 	NvRegPowerState = 0x26c,
306 #define NVREG_POWERSTATE_POWEREDUP	0x8000
307 #define NVREG_POWERSTATE_VALID		0x0100
308 #define NVREG_POWERSTATE_MASK		0x0003
309 #define NVREG_POWERSTATE_D0		0x0000
310 #define NVREG_POWERSTATE_D1		0x0001
311 #define NVREG_POWERSTATE_D2		0x0002
312 #define NVREG_POWERSTATE_D3		0x0003
313 	NvRegMgmtUnitControl = 0x278,
314 #define NVREG_MGMTUNITCONTROL_INUSE	0x20000
315 	NvRegTxCnt = 0x280,
316 	NvRegTxZeroReXmt = 0x284,
317 	NvRegTxOneReXmt = 0x288,
318 	NvRegTxManyReXmt = 0x28c,
319 	NvRegTxLateCol = 0x290,
320 	NvRegTxUnderflow = 0x294,
321 	NvRegTxLossCarrier = 0x298,
322 	NvRegTxExcessDef = 0x29c,
323 	NvRegTxRetryErr = 0x2a0,
324 	NvRegRxFrameErr = 0x2a4,
325 	NvRegRxExtraByte = 0x2a8,
326 	NvRegRxLateCol = 0x2ac,
327 	NvRegRxRunt = 0x2b0,
328 	NvRegRxFrameTooLong = 0x2b4,
329 	NvRegRxOverflow = 0x2b8,
330 	NvRegRxFCSErr = 0x2bc,
331 	NvRegRxFrameAlignErr = 0x2c0,
332 	NvRegRxLenErr = 0x2c4,
333 	NvRegRxUnicast = 0x2c8,
334 	NvRegRxMulticast = 0x2cc,
335 	NvRegRxBroadcast = 0x2d0,
336 	NvRegTxDef = 0x2d4,
337 	NvRegTxFrame = 0x2d8,
338 	NvRegRxCnt = 0x2dc,
339 	NvRegTxPause = 0x2e0,
340 	NvRegRxPause = 0x2e4,
341 	NvRegRxDropFrame = 0x2e8,
342 	NvRegVlanControl = 0x300,
343 #define NVREG_VLANCONTROL_ENABLE	0x2000
344 	NvRegMSIXMap0 = 0x3e0,
345 	NvRegMSIXMap1 = 0x3e4,
346 	NvRegMSIXIrqStatus = 0x3f0,
347 
348 	NvRegPowerState2 = 0x600,
349 #define NVREG_POWERSTATE2_POWERUP_MASK		0x0F15
350 #define NVREG_POWERSTATE2_POWERUP_REV_A3	0x0001
351 #define NVREG_POWERSTATE2_PHY_RESET		0x0004
352 #define NVREG_POWERSTATE2_GATE_CLOCKS		0x0F00
353 };
354 
355 /* Big endian: should work, but is untested */
356 struct ring_desc {
357 	__le32 buf;
358 	__le32 flaglen;
359 };
360 
361 struct ring_desc_ex {
362 	__le32 bufhigh;
363 	__le32 buflow;
364 	__le32 txvlan;
365 	__le32 flaglen;
366 };
367 
368 union ring_type {
369 	struct ring_desc *orig;
370 	struct ring_desc_ex *ex;
371 };
372 
373 #define FLAG_MASK_V1 0xffff0000
374 #define FLAG_MASK_V2 0xffffc000
375 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
376 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
377 
378 #define NV_TX_LASTPACKET	(1<<16)
379 #define NV_TX_RETRYERROR	(1<<19)
380 #define NV_TX_RETRYCOUNT_MASK	(0xF<<20)
381 #define NV_TX_FORCED_INTERRUPT	(1<<24)
382 #define NV_TX_DEFERRED		(1<<26)
383 #define NV_TX_CARRIERLOST	(1<<27)
384 #define NV_TX_LATECOLLISION	(1<<28)
385 #define NV_TX_UNDERFLOW		(1<<29)
386 #define NV_TX_ERROR		(1<<30)
387 #define NV_TX_VALID		(1<<31)
388 
389 #define NV_TX2_LASTPACKET	(1<<29)
390 #define NV_TX2_RETRYERROR	(1<<18)
391 #define NV_TX2_RETRYCOUNT_MASK	(0xF<<19)
392 #define NV_TX2_FORCED_INTERRUPT	(1<<30)
393 #define NV_TX2_DEFERRED		(1<<25)
394 #define NV_TX2_CARRIERLOST	(1<<26)
395 #define NV_TX2_LATECOLLISION	(1<<27)
396 #define NV_TX2_UNDERFLOW	(1<<28)
397 /* error and valid are the same for both */
398 #define NV_TX2_ERROR		(1<<30)
399 #define NV_TX2_VALID		(1<<31)
400 #define NV_TX2_TSO		(1<<28)
401 #define NV_TX2_TSO_SHIFT	14
402 #define NV_TX2_TSO_MAX_SHIFT	14
403 #define NV_TX2_TSO_MAX_SIZE	(1<<NV_TX2_TSO_MAX_SHIFT)
404 #define NV_TX2_CHECKSUM_L3	(1<<27)
405 #define NV_TX2_CHECKSUM_L4	(1<<26)
406 
407 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
408 
409 #define NV_RX_DESCRIPTORVALID	(1<<16)
410 #define NV_RX_MISSEDFRAME	(1<<17)
411 #define NV_RX_SUBSTRACT1	(1<<18)
412 #define NV_RX_ERROR1		(1<<23)
413 #define NV_RX_ERROR2		(1<<24)
414 #define NV_RX_ERROR3		(1<<25)
415 #define NV_RX_ERROR4		(1<<26)
416 #define NV_RX_CRCERR		(1<<27)
417 #define NV_RX_OVERFLOW		(1<<28)
418 #define NV_RX_FRAMINGERR	(1<<29)
419 #define NV_RX_ERROR		(1<<30)
420 #define NV_RX_AVAIL		(1<<31)
421 #define NV_RX_ERROR_MASK	(NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
422 
423 #define NV_RX2_CHECKSUMMASK	(0x1C000000)
424 #define NV_RX2_CHECKSUM_IP	(0x10000000)
425 #define NV_RX2_CHECKSUM_IP_TCP	(0x14000000)
426 #define NV_RX2_CHECKSUM_IP_UDP	(0x18000000)
427 #define NV_RX2_DESCRIPTORVALID	(1<<29)
428 #define NV_RX2_SUBSTRACT1	(1<<25)
429 #define NV_RX2_ERROR1		(1<<18)
430 #define NV_RX2_ERROR2		(1<<19)
431 #define NV_RX2_ERROR3		(1<<20)
432 #define NV_RX2_ERROR4		(1<<21)
433 #define NV_RX2_CRCERR		(1<<22)
434 #define NV_RX2_OVERFLOW		(1<<23)
435 #define NV_RX2_FRAMINGERR	(1<<24)
436 /* error and avail are the same for both */
437 #define NV_RX2_ERROR		(1<<30)
438 #define NV_RX2_AVAIL		(1<<31)
439 #define NV_RX2_ERROR_MASK	(NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
440 
441 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
442 #define NV_RX3_VLAN_TAG_MASK	(0x0000FFFF)
443 
444 /* Miscellaneous hardware related defines: */
445 #define NV_PCI_REGSZ_VER1	0x270
446 #define NV_PCI_REGSZ_VER2	0x2d4
447 #define NV_PCI_REGSZ_VER3	0x604
448 #define NV_PCI_REGSZ_MAX	0x604
449 
450 /* various timeout delays: all in usec */
451 #define NV_TXRX_RESET_DELAY	4
452 #define NV_TXSTOP_DELAY1	10
453 #define NV_TXSTOP_DELAY1MAX	500000
454 #define NV_TXSTOP_DELAY2	100
455 #define NV_RXSTOP_DELAY1	10
456 #define NV_RXSTOP_DELAY1MAX	500000
457 #define NV_RXSTOP_DELAY2	100
458 #define NV_SETUP5_DELAY		5
459 #define NV_SETUP5_DELAYMAX	50000
460 #define NV_POWERUP_DELAY	5
461 #define NV_POWERUP_DELAYMAX	5000
462 #define NV_MIIBUSY_DELAY	50
463 #define NV_MIIPHY_DELAY	10
464 #define NV_MIIPHY_DELAYMAX	10000
465 #define NV_MAC_RESET_DELAY	64
466 
467 #define NV_WAKEUPPATTERNS	5
468 #define NV_WAKEUPMASKENTRIES	4
469 
470 /* General driver defaults */
471 #define NV_WATCHDOG_TIMEO	(5*HZ)
472 
473 #define RX_RING_DEFAULT		512
474 #define TX_RING_DEFAULT		256
475 #define RX_RING_MIN		128
476 #define TX_RING_MIN		64
477 #define RING_MAX_DESC_VER_1	1024
478 #define RING_MAX_DESC_VER_2_3	16384
479 
480 /* rx/tx mac addr + type + vlan + align + slack*/
481 #define NV_RX_HEADERS		(64)
482 /* even more slack. */
483 #define NV_RX_ALLOC_PAD		(64)
484 
485 /* maximum mtu size */
486 #define NV_PKTLIMIT_1	ETH_DATA_LEN	/* hard limit not known */
487 #define NV_PKTLIMIT_2	9100	/* Actual limit according to NVidia: 9202 */
488 
489 #define OOM_REFILL	(1+HZ/20)
490 #define POLL_WAIT	(1+HZ/100)
491 #define LINK_TIMEOUT	(3*HZ)
492 #define STATS_INTERVAL	(10*HZ)
493 
494 /*
495  * desc_ver values:
496  * The nic supports three different descriptor types:
497  * - DESC_VER_1: Original
498  * - DESC_VER_2: support for jumbo frames.
499  * - DESC_VER_3: 64-bit format.
500  */
501 #define DESC_VER_1	1
502 #define DESC_VER_2	2
503 #define DESC_VER_3	3
504 
505 /* PHY defines */
506 #define PHY_OUI_MARVELL		0x5043
507 #define PHY_OUI_CICADA		0x03f1
508 #define PHY_OUI_VITESSE		0x01c1
509 #define PHY_OUI_REALTEK		0x0732
510 #define PHY_OUI_REALTEK2	0x0020
511 #define PHYID1_OUI_MASK	0x03ff
512 #define PHYID1_OUI_SHFT	6
513 #define PHYID2_OUI_MASK	0xfc00
514 #define PHYID2_OUI_SHFT	10
515 #define PHYID2_MODEL_MASK		0x03f0
516 #define PHY_MODEL_REALTEK_8211		0x0110
517 #define PHY_REV_MASK			0x0001
518 #define PHY_REV_REALTEK_8211B		0x0000
519 #define PHY_REV_REALTEK_8211C		0x0001
520 #define PHY_MODEL_REALTEK_8201		0x0200
521 #define PHY_MODEL_MARVELL_E3016		0x0220
522 #define PHY_MARVELL_E3016_INITMASK	0x0300
523 #define PHY_CICADA_INIT1	0x0f000
524 #define PHY_CICADA_INIT2	0x0e00
525 #define PHY_CICADA_INIT3	0x01000
526 #define PHY_CICADA_INIT4	0x0200
527 #define PHY_CICADA_INIT5	0x0004
528 #define PHY_CICADA_INIT6	0x02000
529 #define PHY_VITESSE_INIT_REG1	0x1f
530 #define PHY_VITESSE_INIT_REG2	0x10
531 #define PHY_VITESSE_INIT_REG3	0x11
532 #define PHY_VITESSE_INIT_REG4	0x12
533 #define PHY_VITESSE_INIT_MSK1	0xc
534 #define PHY_VITESSE_INIT_MSK2	0x0180
535 #define PHY_VITESSE_INIT1	0x52b5
536 #define PHY_VITESSE_INIT2	0xaf8a
537 #define PHY_VITESSE_INIT3	0x8
538 #define PHY_VITESSE_INIT4	0x8f8a
539 #define PHY_VITESSE_INIT5	0xaf86
540 #define PHY_VITESSE_INIT6	0x8f86
541 #define PHY_VITESSE_INIT7	0xaf82
542 #define PHY_VITESSE_INIT8	0x0100
543 #define PHY_VITESSE_INIT9	0x8f82
544 #define PHY_VITESSE_INIT10	0x0
545 #define PHY_REALTEK_INIT_REG1	0x1f
546 #define PHY_REALTEK_INIT_REG2	0x19
547 #define PHY_REALTEK_INIT_REG3	0x13
548 #define PHY_REALTEK_INIT_REG4	0x14
549 #define PHY_REALTEK_INIT_REG5	0x18
550 #define PHY_REALTEK_INIT_REG6	0x11
551 #define PHY_REALTEK_INIT_REG7	0x01
552 #define PHY_REALTEK_INIT1	0x0000
553 #define PHY_REALTEK_INIT2	0x8e00
554 #define PHY_REALTEK_INIT3	0x0001
555 #define PHY_REALTEK_INIT4	0xad17
556 #define PHY_REALTEK_INIT5	0xfb54
557 #define PHY_REALTEK_INIT6	0xf5c7
558 #define PHY_REALTEK_INIT7	0x1000
559 #define PHY_REALTEK_INIT8	0x0003
560 #define PHY_REALTEK_INIT9	0x0008
561 #define PHY_REALTEK_INIT10	0x0005
562 #define PHY_REALTEK_INIT11	0x0200
563 #define PHY_REALTEK_INIT_MSK1	0x0003
564 
565 #define PHY_GIGABIT	0x0100
566 
567 #define PHY_TIMEOUT	0x1
568 #define PHY_ERROR	0x2
569 
570 #define PHY_100	0x1
571 #define PHY_1000	0x2
572 #define PHY_HALF	0x100
573 
574 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
575 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
576 #define NV_PAUSEFRAME_RX_ENABLE  0x0004
577 #define NV_PAUSEFRAME_TX_ENABLE  0x0008
578 #define NV_PAUSEFRAME_RX_REQ     0x0010
579 #define NV_PAUSEFRAME_TX_REQ     0x0020
580 #define NV_PAUSEFRAME_AUTONEG    0x0040
581 
582 /* MSI/MSI-X defines */
583 #define NV_MSI_X_MAX_VECTORS  8
584 #define NV_MSI_X_VECTORS_MASK 0x000f
585 #define NV_MSI_CAPABLE        0x0010
586 #define NV_MSI_X_CAPABLE      0x0020
587 #define NV_MSI_ENABLED        0x0040
588 #define NV_MSI_X_ENABLED      0x0080
589 
590 #define NV_MSI_X_VECTOR_ALL   0x0
591 #define NV_MSI_X_VECTOR_RX    0x0
592 #define NV_MSI_X_VECTOR_TX    0x1
593 #define NV_MSI_X_VECTOR_OTHER 0x2
594 
595 #define NV_MSI_PRIV_OFFSET 0x68
596 #define NV_MSI_PRIV_VALUE  0xffffffff
597 
598 #define NV_RESTART_TX         0x1
599 #define NV_RESTART_RX         0x2
600 
601 #define NV_TX_LIMIT_COUNT     16
602 
603 #define NV_DYNAMIC_THRESHOLD        4
604 #define NV_DYNAMIC_MAX_QUIET_COUNT  2048
605 
606 /* statistics */
607 struct nv_ethtool_str {
608 	char name[ETH_GSTRING_LEN];
609 };
610 
611 static const struct nv_ethtool_str nv_estats_str[] = {
612 	{ "tx_bytes" },
613 	{ "tx_zero_rexmt" },
614 	{ "tx_one_rexmt" },
615 	{ "tx_many_rexmt" },
616 	{ "tx_late_collision" },
617 	{ "tx_fifo_errors" },
618 	{ "tx_carrier_errors" },
619 	{ "tx_excess_deferral" },
620 	{ "tx_retry_error" },
621 	{ "rx_frame_error" },
622 	{ "rx_extra_byte" },
623 	{ "rx_late_collision" },
624 	{ "rx_runt" },
625 	{ "rx_frame_too_long" },
626 	{ "rx_over_errors" },
627 	{ "rx_crc_errors" },
628 	{ "rx_frame_align_error" },
629 	{ "rx_length_error" },
630 	{ "rx_unicast" },
631 	{ "rx_multicast" },
632 	{ "rx_broadcast" },
633 	{ "rx_packets" },
634 	{ "rx_errors_total" },
635 	{ "tx_errors_total" },
636 
637 	/* version 2 stats */
638 	{ "tx_deferral" },
639 	{ "tx_packets" },
640 	{ "rx_bytes" },
641 	{ "tx_pause" },
642 	{ "rx_pause" },
643 	{ "rx_drop_frame" },
644 
645 	/* version 3 stats */
646 	{ "tx_unicast" },
647 	{ "tx_multicast" },
648 	{ "tx_broadcast" }
649 };
650 
651 struct nv_ethtool_stats {
652 	u64 tx_bytes;
653 	u64 tx_zero_rexmt;
654 	u64 tx_one_rexmt;
655 	u64 tx_many_rexmt;
656 	u64 tx_late_collision;
657 	u64 tx_fifo_errors;
658 	u64 tx_carrier_errors;
659 	u64 tx_excess_deferral;
660 	u64 tx_retry_error;
661 	u64 rx_frame_error;
662 	u64 rx_extra_byte;
663 	u64 rx_late_collision;
664 	u64 rx_runt;
665 	u64 rx_frame_too_long;
666 	u64 rx_over_errors;
667 	u64 rx_crc_errors;
668 	u64 rx_frame_align_error;
669 	u64 rx_length_error;
670 	u64 rx_unicast;
671 	u64 rx_multicast;
672 	u64 rx_broadcast;
673 	u64 rx_packets;
674 	u64 rx_errors_total;
675 	u64 tx_errors_total;
676 
677 	/* version 2 stats */
678 	u64 tx_deferral;
679 	u64 tx_packets;
680 	u64 rx_bytes;
681 	u64 tx_pause;
682 	u64 rx_pause;
683 	u64 rx_drop_frame;
684 
685 	/* version 3 stats */
686 	u64 tx_unicast;
687 	u64 tx_multicast;
688 	u64 tx_broadcast;
689 };
690 
691 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
692 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
693 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
694 
695 /* diagnostics */
696 #define NV_TEST_COUNT_BASE 3
697 #define NV_TEST_COUNT_EXTENDED 4
698 
699 static const struct nv_ethtool_str nv_etests_str[] = {
700 	{ "link      (online/offline)" },
701 	{ "register  (offline)       " },
702 	{ "interrupt (offline)       " },
703 	{ "loopback  (offline)       " }
704 };
705 
706 struct register_test {
707 	__u32 reg;
708 	__u32 mask;
709 };
710 
711 static const struct register_test nv_registers_test[] = {
712 	{ NvRegUnknownSetupReg6, 0x01 },
713 	{ NvRegMisc1, 0x03c },
714 	{ NvRegOffloadConfig, 0x03ff },
715 	{ NvRegMulticastAddrA, 0xffffffff },
716 	{ NvRegTxWatermark, 0x0ff },
717 	{ NvRegWakeUpFlags, 0x07777 },
718 	{ 0, 0 }
719 };
720 
721 struct nv_skb_map {
722 	struct sk_buff *skb;
723 	dma_addr_t dma;
724 	unsigned int dma_len:31;
725 	unsigned int dma_single:1;
726 	struct ring_desc_ex *first_tx_desc;
727 	struct nv_skb_map *next_tx_ctx;
728 };
729 
730 /*
731  * SMP locking:
732  * All hardware access under netdev_priv(dev)->lock, except the performance
733  * critical parts:
734  * - rx is (pseudo-) lockless: it relies on the single-threading provided
735  *	by the arch code for interrupts.
736  * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
737  *	needs netdev_priv(dev)->lock :-(
738  * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
739  */
740 
741 /* in dev: base, irq */
742 struct fe_priv {
743 	spinlock_t lock;
744 
745 	struct net_device *dev;
746 	struct napi_struct napi;
747 
748 	/* General data:
749 	 * Locking: spin_lock(&np->lock); */
750 	struct nv_ethtool_stats estats;
751 	int in_shutdown;
752 	u32 linkspeed;
753 	int duplex;
754 	int autoneg;
755 	int fixed_mode;
756 	int phyaddr;
757 	int wolenabled;
758 	unsigned int phy_oui;
759 	unsigned int phy_model;
760 	unsigned int phy_rev;
761 	u16 gigabit;
762 	int intr_test;
763 	int recover_error;
764 	int quiet_count;
765 
766 	/* General data: RO fields */
767 	dma_addr_t ring_addr;
768 	struct pci_dev *pci_dev;
769 	u32 orig_mac[2];
770 	u32 events;
771 	u32 irqmask;
772 	u32 desc_ver;
773 	u32 txrxctl_bits;
774 	u32 vlanctl_bits;
775 	u32 driver_data;
776 	u32 device_id;
777 	u32 register_size;
778 	u32 mac_in_use;
779 	int mgmt_version;
780 	int mgmt_sema;
781 
782 	void __iomem *base;
783 
784 	/* rx specific fields.
785 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
786 	 */
787 	union ring_type get_rx, put_rx, first_rx, last_rx;
788 	struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
789 	struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
790 	struct nv_skb_map *rx_skb;
791 
792 	union ring_type rx_ring;
793 	unsigned int rx_buf_sz;
794 	unsigned int pkt_limit;
795 	struct timer_list oom_kick;
796 	struct timer_list nic_poll;
797 	struct timer_list stats_poll;
798 	u32 nic_poll_irq;
799 	int rx_ring_size;
800 
801 	/* media detection workaround.
802 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
803 	 */
804 	int need_linktimer;
805 	unsigned long link_timeout;
806 	/*
807 	 * tx specific fields.
808 	 */
809 	union ring_type get_tx, put_tx, first_tx, last_tx;
810 	struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
811 	struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
812 	struct nv_skb_map *tx_skb;
813 
814 	union ring_type tx_ring;
815 	u32 tx_flags;
816 	int tx_ring_size;
817 	int tx_limit;
818 	u32 tx_pkts_in_progress;
819 	struct nv_skb_map *tx_change_owner;
820 	struct nv_skb_map *tx_end_flip;
821 	int tx_stop;
822 
823 	/* msi/msi-x fields */
824 	u32 msi_flags;
825 	struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
826 
827 	/* flow control */
828 	u32 pause_flags;
829 
830 	/* power saved state */
831 	u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
832 
833 	/* for different msi-x irq type */
834 	char name_rx[IFNAMSIZ + 3];       /* -rx    */
835 	char name_tx[IFNAMSIZ + 3];       /* -tx    */
836 	char name_other[IFNAMSIZ + 6];    /* -other */
837 };
838 
839 /*
840  * Maximum number of loops until we assume that a bit in the irq mask
841  * is stuck. Overridable with module param.
842  */
843 static int max_interrupt_work = 4;
844 
845 /*
846  * Optimization can be either throuput mode or cpu mode
847  *
848  * Throughput Mode: Every tx and rx packet will generate an interrupt.
849  * CPU Mode: Interrupts are controlled by a timer.
850  */
851 enum {
852 	NV_OPTIMIZATION_MODE_THROUGHPUT,
853 	NV_OPTIMIZATION_MODE_CPU,
854 	NV_OPTIMIZATION_MODE_DYNAMIC
855 };
856 static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
857 
858 /*
859  * Poll interval for timer irq
860  *
861  * This interval determines how frequent an interrupt is generated.
862  * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
863  * Min = 0, and Max = 65535
864  */
865 static int poll_interval = -1;
866 
867 /*
868  * MSI interrupts
869  */
870 enum {
871 	NV_MSI_INT_DISABLED,
872 	NV_MSI_INT_ENABLED
873 };
874 static int msi = NV_MSI_INT_ENABLED;
875 
876 /*
877  * MSIX interrupts
878  */
879 enum {
880 	NV_MSIX_INT_DISABLED,
881 	NV_MSIX_INT_ENABLED
882 };
883 static int msix = NV_MSIX_INT_ENABLED;
884 
885 /*
886  * DMA 64bit
887  */
888 enum {
889 	NV_DMA_64BIT_DISABLED,
890 	NV_DMA_64BIT_ENABLED
891 };
892 static int dma_64bit = NV_DMA_64BIT_ENABLED;
893 
894 /*
895  * Crossover Detection
896  * Realtek 8201 phy + some OEM boards do not work properly.
897  */
898 enum {
899 	NV_CROSSOVER_DETECTION_DISABLED,
900 	NV_CROSSOVER_DETECTION_ENABLED
901 };
902 static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
903 
904 /*
905  * Power down phy when interface is down (persists through reboot;
906  * older Linux and other OSes may not power it up again)
907  */
908 static int phy_power_down;
909 
910 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
911 {
912 	return netdev_priv(dev);
913 }
914 
915 static inline u8 __iomem *get_hwbase(struct net_device *dev)
916 {
917 	return ((struct fe_priv *)netdev_priv(dev))->base;
918 }
919 
920 static inline void pci_push(u8 __iomem *base)
921 {
922 	/* force out pending posted writes */
923 	readl(base);
924 }
925 
926 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
927 {
928 	return le32_to_cpu(prd->flaglen)
929 		& ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
930 }
931 
932 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
933 {
934 	return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
935 }
936 
937 static bool nv_optimized(struct fe_priv *np)
938 {
939 	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
940 		return false;
941 	return true;
942 }
943 
944 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
945 		     int delay, int delaymax)
946 {
947 	u8 __iomem *base = get_hwbase(dev);
948 
949 	pci_push(base);
950 	do {
951 		udelay(delay);
952 		delaymax -= delay;
953 		if (delaymax < 0)
954 			return 1;
955 	} while ((readl(base + offset) & mask) != target);
956 	return 0;
957 }
958 
959 #define NV_SETUP_RX_RING 0x01
960 #define NV_SETUP_TX_RING 0x02
961 
962 static inline u32 dma_low(dma_addr_t addr)
963 {
964 	return addr;
965 }
966 
967 static inline u32 dma_high(dma_addr_t addr)
968 {
969 	return addr>>31>>1;	/* 0 if 32bit, shift down by 32 if 64bit */
970 }
971 
972 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
973 {
974 	struct fe_priv *np = get_nvpriv(dev);
975 	u8 __iomem *base = get_hwbase(dev);
976 
977 	if (!nv_optimized(np)) {
978 		if (rxtx_flags & NV_SETUP_RX_RING)
979 			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
980 		if (rxtx_flags & NV_SETUP_TX_RING)
981 			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
982 	} else {
983 		if (rxtx_flags & NV_SETUP_RX_RING) {
984 			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
985 			writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
986 		}
987 		if (rxtx_flags & NV_SETUP_TX_RING) {
988 			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
989 			writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
990 		}
991 	}
992 }
993 
994 static void free_rings(struct net_device *dev)
995 {
996 	struct fe_priv *np = get_nvpriv(dev);
997 
998 	if (!nv_optimized(np)) {
999 		if (np->rx_ring.orig)
1000 			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
1001 					    np->rx_ring.orig, np->ring_addr);
1002 	} else {
1003 		if (np->rx_ring.ex)
1004 			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1005 					    np->rx_ring.ex, np->ring_addr);
1006 	}
1007 	kfree(np->rx_skb);
1008 	kfree(np->tx_skb);
1009 }
1010 
1011 static int using_multi_irqs(struct net_device *dev)
1012 {
1013 	struct fe_priv *np = get_nvpriv(dev);
1014 
1015 	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1016 	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
1017 	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
1018 		return 0;
1019 	else
1020 		return 1;
1021 }
1022 
1023 static void nv_txrx_gate(struct net_device *dev, bool gate)
1024 {
1025 	struct fe_priv *np = get_nvpriv(dev);
1026 	u8 __iomem *base = get_hwbase(dev);
1027 	u32 powerstate;
1028 
1029 	if (!np->mac_in_use &&
1030 	    (np->driver_data & DEV_HAS_POWER_CNTRL)) {
1031 		powerstate = readl(base + NvRegPowerState2);
1032 		if (gate)
1033 			powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
1034 		else
1035 			powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
1036 		writel(powerstate, base + NvRegPowerState2);
1037 	}
1038 }
1039 
1040 static void nv_enable_irq(struct net_device *dev)
1041 {
1042 	struct fe_priv *np = get_nvpriv(dev);
1043 
1044 	if (!using_multi_irqs(dev)) {
1045 		if (np->msi_flags & NV_MSI_X_ENABLED)
1046 			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1047 		else
1048 			enable_irq(np->pci_dev->irq);
1049 	} else {
1050 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1051 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1052 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1053 	}
1054 }
1055 
1056 static void nv_disable_irq(struct net_device *dev)
1057 {
1058 	struct fe_priv *np = get_nvpriv(dev);
1059 
1060 	if (!using_multi_irqs(dev)) {
1061 		if (np->msi_flags & NV_MSI_X_ENABLED)
1062 			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1063 		else
1064 			disable_irq(np->pci_dev->irq);
1065 	} else {
1066 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1067 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1068 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1069 	}
1070 }
1071 
1072 /* In MSIX mode, a write to irqmask behaves as XOR */
1073 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1074 {
1075 	u8 __iomem *base = get_hwbase(dev);
1076 
1077 	writel(mask, base + NvRegIrqMask);
1078 }
1079 
1080 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1081 {
1082 	struct fe_priv *np = get_nvpriv(dev);
1083 	u8 __iomem *base = get_hwbase(dev);
1084 
1085 	if (np->msi_flags & NV_MSI_X_ENABLED) {
1086 		writel(mask, base + NvRegIrqMask);
1087 	} else {
1088 		if (np->msi_flags & NV_MSI_ENABLED)
1089 			writel(0, base + NvRegMSIIrqMask);
1090 		writel(0, base + NvRegIrqMask);
1091 	}
1092 }
1093 
1094 static void nv_napi_enable(struct net_device *dev)
1095 {
1096 	struct fe_priv *np = get_nvpriv(dev);
1097 
1098 	napi_enable(&np->napi);
1099 }
1100 
1101 static void nv_napi_disable(struct net_device *dev)
1102 {
1103 	struct fe_priv *np = get_nvpriv(dev);
1104 
1105 	napi_disable(&np->napi);
1106 }
1107 
1108 #define MII_READ	(-1)
1109 /* mii_rw: read/write a register on the PHY.
1110  *
1111  * Caller must guarantee serialization
1112  */
1113 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1114 {
1115 	u8 __iomem *base = get_hwbase(dev);
1116 	u32 reg;
1117 	int retval;
1118 
1119 	writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1120 
1121 	reg = readl(base + NvRegMIIControl);
1122 	if (reg & NVREG_MIICTL_INUSE) {
1123 		writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1124 		udelay(NV_MIIBUSY_DELAY);
1125 	}
1126 
1127 	reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1128 	if (value != MII_READ) {
1129 		writel(value, base + NvRegMIIData);
1130 		reg |= NVREG_MIICTL_WRITE;
1131 	}
1132 	writel(reg, base + NvRegMIIControl);
1133 
1134 	if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1135 			NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
1136 		retval = -1;
1137 	} else if (value != MII_READ) {
1138 		/* it was a write operation - fewer failures are detectable */
1139 		retval = 0;
1140 	} else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1141 		retval = -1;
1142 	} else {
1143 		retval = readl(base + NvRegMIIData);
1144 	}
1145 
1146 	return retval;
1147 }
1148 
1149 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1150 {
1151 	struct fe_priv *np = netdev_priv(dev);
1152 	u32 miicontrol;
1153 	unsigned int tries = 0;
1154 
1155 	miicontrol = BMCR_RESET | bmcr_setup;
1156 	if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1157 		return -1;
1158 
1159 	/* wait for 500ms */
1160 	msleep(500);
1161 
1162 	/* must wait till reset is deasserted */
1163 	while (miicontrol & BMCR_RESET) {
1164 		usleep_range(10000, 20000);
1165 		miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1166 		/* FIXME: 100 tries seem excessive */
1167 		if (tries++ > 100)
1168 			return -1;
1169 	}
1170 	return 0;
1171 }
1172 
1173 static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
1174 {
1175 	static const struct {
1176 		int reg;
1177 		int init;
1178 	} ri[] = {
1179 		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1180 		{ PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
1181 		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
1182 		{ PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
1183 		{ PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
1184 		{ PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
1185 		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1186 	};
1187 	int i;
1188 
1189 	for (i = 0; i < ARRAY_SIZE(ri); i++) {
1190 		if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
1191 			return PHY_ERROR;
1192 	}
1193 
1194 	return 0;
1195 }
1196 
1197 static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
1198 {
1199 	u32 reg;
1200 	u8 __iomem *base = get_hwbase(dev);
1201 	u32 powerstate = readl(base + NvRegPowerState2);
1202 
1203 	/* need to perform hw phy reset */
1204 	powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1205 	writel(powerstate, base + NvRegPowerState2);
1206 	msleep(25);
1207 
1208 	powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1209 	writel(powerstate, base + NvRegPowerState2);
1210 	msleep(25);
1211 
1212 	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1213 	reg |= PHY_REALTEK_INIT9;
1214 	if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
1215 		return PHY_ERROR;
1216 	if (mii_rw(dev, np->phyaddr,
1217 		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
1218 		return PHY_ERROR;
1219 	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1220 	if (!(reg & PHY_REALTEK_INIT11)) {
1221 		reg |= PHY_REALTEK_INIT11;
1222 		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
1223 			return PHY_ERROR;
1224 	}
1225 	if (mii_rw(dev, np->phyaddr,
1226 		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1227 		return PHY_ERROR;
1228 
1229 	return 0;
1230 }
1231 
1232 static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
1233 {
1234 	u32 phy_reserved;
1235 
1236 	if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1237 		phy_reserved = mii_rw(dev, np->phyaddr,
1238 				      PHY_REALTEK_INIT_REG6, MII_READ);
1239 		phy_reserved |= PHY_REALTEK_INIT7;
1240 		if (mii_rw(dev, np->phyaddr,
1241 			   PHY_REALTEK_INIT_REG6, phy_reserved))
1242 			return PHY_ERROR;
1243 	}
1244 
1245 	return 0;
1246 }
1247 
1248 static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
1249 {
1250 	u32 phy_reserved;
1251 
1252 	if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1253 		if (mii_rw(dev, np->phyaddr,
1254 			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
1255 			return PHY_ERROR;
1256 		phy_reserved = mii_rw(dev, np->phyaddr,
1257 				      PHY_REALTEK_INIT_REG2, MII_READ);
1258 		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1259 		phy_reserved |= PHY_REALTEK_INIT3;
1260 		if (mii_rw(dev, np->phyaddr,
1261 			   PHY_REALTEK_INIT_REG2, phy_reserved))
1262 			return PHY_ERROR;
1263 		if (mii_rw(dev, np->phyaddr,
1264 			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1265 			return PHY_ERROR;
1266 	}
1267 
1268 	return 0;
1269 }
1270 
1271 static int init_cicada(struct net_device *dev, struct fe_priv *np,
1272 		       u32 phyinterface)
1273 {
1274 	u32 phy_reserved;
1275 
1276 	if (phyinterface & PHY_RGMII) {
1277 		phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1278 		phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1279 		phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1280 		if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
1281 			return PHY_ERROR;
1282 		phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1283 		phy_reserved |= PHY_CICADA_INIT5;
1284 		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
1285 			return PHY_ERROR;
1286 	}
1287 	phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1288 	phy_reserved |= PHY_CICADA_INIT6;
1289 	if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
1290 		return PHY_ERROR;
1291 
1292 	return 0;
1293 }
1294 
1295 static int init_vitesse(struct net_device *dev, struct fe_priv *np)
1296 {
1297 	u32 phy_reserved;
1298 
1299 	if (mii_rw(dev, np->phyaddr,
1300 		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
1301 		return PHY_ERROR;
1302 	if (mii_rw(dev, np->phyaddr,
1303 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
1304 		return PHY_ERROR;
1305 	phy_reserved = mii_rw(dev, np->phyaddr,
1306 			      PHY_VITESSE_INIT_REG4, MII_READ);
1307 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1308 		return PHY_ERROR;
1309 	phy_reserved = mii_rw(dev, np->phyaddr,
1310 			      PHY_VITESSE_INIT_REG3, MII_READ);
1311 	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1312 	phy_reserved |= PHY_VITESSE_INIT3;
1313 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1314 		return PHY_ERROR;
1315 	if (mii_rw(dev, np->phyaddr,
1316 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
1317 		return PHY_ERROR;
1318 	if (mii_rw(dev, np->phyaddr,
1319 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
1320 		return PHY_ERROR;
1321 	phy_reserved = mii_rw(dev, np->phyaddr,
1322 			      PHY_VITESSE_INIT_REG4, MII_READ);
1323 	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1324 	phy_reserved |= PHY_VITESSE_INIT3;
1325 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1326 		return PHY_ERROR;
1327 	phy_reserved = mii_rw(dev, np->phyaddr,
1328 			      PHY_VITESSE_INIT_REG3, MII_READ);
1329 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1330 		return PHY_ERROR;
1331 	if (mii_rw(dev, np->phyaddr,
1332 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
1333 		return PHY_ERROR;
1334 	if (mii_rw(dev, np->phyaddr,
1335 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
1336 		return PHY_ERROR;
1337 	phy_reserved = mii_rw(dev, np->phyaddr,
1338 			      PHY_VITESSE_INIT_REG4, MII_READ);
1339 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1340 		return PHY_ERROR;
1341 	phy_reserved = mii_rw(dev, np->phyaddr,
1342 			      PHY_VITESSE_INIT_REG3, MII_READ);
1343 	phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1344 	phy_reserved |= PHY_VITESSE_INIT8;
1345 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1346 		return PHY_ERROR;
1347 	if (mii_rw(dev, np->phyaddr,
1348 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
1349 		return PHY_ERROR;
1350 	if (mii_rw(dev, np->phyaddr,
1351 		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
1352 		return PHY_ERROR;
1353 
1354 	return 0;
1355 }
1356 
1357 static int phy_init(struct net_device *dev)
1358 {
1359 	struct fe_priv *np = get_nvpriv(dev);
1360 	u8 __iomem *base = get_hwbase(dev);
1361 	u32 phyinterface;
1362 	u32 mii_status, mii_control, mii_control_1000, reg;
1363 
1364 	/* phy errata for E3016 phy */
1365 	if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1366 		reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1367 		reg &= ~PHY_MARVELL_E3016_INITMASK;
1368 		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1369 			netdev_info(dev, "%s: phy write to errata reg failed\n",
1370 				    pci_name(np->pci_dev));
1371 			return PHY_ERROR;
1372 		}
1373 	}
1374 	if (np->phy_oui == PHY_OUI_REALTEK) {
1375 		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1376 		    np->phy_rev == PHY_REV_REALTEK_8211B) {
1377 			if (init_realtek_8211b(dev, np)) {
1378 				netdev_info(dev, "%s: phy init failed\n",
1379 					    pci_name(np->pci_dev));
1380 				return PHY_ERROR;
1381 			}
1382 		} else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1383 			   np->phy_rev == PHY_REV_REALTEK_8211C) {
1384 			if (init_realtek_8211c(dev, np)) {
1385 				netdev_info(dev, "%s: phy init failed\n",
1386 					    pci_name(np->pci_dev));
1387 				return PHY_ERROR;
1388 			}
1389 		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1390 			if (init_realtek_8201(dev, np)) {
1391 				netdev_info(dev, "%s: phy init failed\n",
1392 					    pci_name(np->pci_dev));
1393 				return PHY_ERROR;
1394 			}
1395 		}
1396 	}
1397 
1398 	/* set advertise register */
1399 	reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1400 	reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
1401 		ADVERTISE_100HALF | ADVERTISE_100FULL |
1402 		ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
1403 	if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1404 		netdev_info(dev, "%s: phy write to advertise failed\n",
1405 			    pci_name(np->pci_dev));
1406 		return PHY_ERROR;
1407 	}
1408 
1409 	/* get phy interface type */
1410 	phyinterface = readl(base + NvRegPhyInterface);
1411 
1412 	/* see if gigabit phy */
1413 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1414 	if (mii_status & PHY_GIGABIT) {
1415 		np->gigabit = PHY_GIGABIT;
1416 		mii_control_1000 = mii_rw(dev, np->phyaddr,
1417 					  MII_CTRL1000, MII_READ);
1418 		mii_control_1000 &= ~ADVERTISE_1000HALF;
1419 		if (phyinterface & PHY_RGMII)
1420 			mii_control_1000 |= ADVERTISE_1000FULL;
1421 		else
1422 			mii_control_1000 &= ~ADVERTISE_1000FULL;
1423 
1424 		if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1425 			netdev_info(dev, "%s: phy init failed\n",
1426 				    pci_name(np->pci_dev));
1427 			return PHY_ERROR;
1428 		}
1429 	} else
1430 		np->gigabit = 0;
1431 
1432 	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1433 	mii_control |= BMCR_ANENABLE;
1434 
1435 	if (np->phy_oui == PHY_OUI_REALTEK &&
1436 	    np->phy_model == PHY_MODEL_REALTEK_8211 &&
1437 	    np->phy_rev == PHY_REV_REALTEK_8211C) {
1438 		/* start autoneg since we already performed hw reset above */
1439 		mii_control |= BMCR_ANRESTART;
1440 		if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1441 			netdev_info(dev, "%s: phy init failed\n",
1442 				    pci_name(np->pci_dev));
1443 			return PHY_ERROR;
1444 		}
1445 	} else {
1446 		/* reset the phy
1447 		 * (certain phys need bmcr to be setup with reset)
1448 		 */
1449 		if (phy_reset(dev, mii_control)) {
1450 			netdev_info(dev, "%s: phy reset failed\n",
1451 				    pci_name(np->pci_dev));
1452 			return PHY_ERROR;
1453 		}
1454 	}
1455 
1456 	/* phy vendor specific configuration */
1457 	if ((np->phy_oui == PHY_OUI_CICADA)) {
1458 		if (init_cicada(dev, np, phyinterface)) {
1459 			netdev_info(dev, "%s: phy init failed\n",
1460 				    pci_name(np->pci_dev));
1461 			return PHY_ERROR;
1462 		}
1463 	} else if (np->phy_oui == PHY_OUI_VITESSE) {
1464 		if (init_vitesse(dev, np)) {
1465 			netdev_info(dev, "%s: phy init failed\n",
1466 				    pci_name(np->pci_dev));
1467 			return PHY_ERROR;
1468 		}
1469 	} else if (np->phy_oui == PHY_OUI_REALTEK) {
1470 		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1471 		    np->phy_rev == PHY_REV_REALTEK_8211B) {
1472 			/* reset could have cleared these out, set them back */
1473 			if (init_realtek_8211b(dev, np)) {
1474 				netdev_info(dev, "%s: phy init failed\n",
1475 					    pci_name(np->pci_dev));
1476 				return PHY_ERROR;
1477 			}
1478 		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1479 			if (init_realtek_8201(dev, np) ||
1480 			    init_realtek_8201_cross(dev, np)) {
1481 				netdev_info(dev, "%s: phy init failed\n",
1482 					    pci_name(np->pci_dev));
1483 				return PHY_ERROR;
1484 			}
1485 		}
1486 	}
1487 
1488 	/* some phys clear out pause advertisement on reset, set it back */
1489 	mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1490 
1491 	/* restart auto negotiation, power down phy */
1492 	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1493 	mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1494 	if (phy_power_down)
1495 		mii_control |= BMCR_PDOWN;
1496 	if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1497 		return PHY_ERROR;
1498 
1499 	return 0;
1500 }
1501 
1502 static void nv_start_rx(struct net_device *dev)
1503 {
1504 	struct fe_priv *np = netdev_priv(dev);
1505 	u8 __iomem *base = get_hwbase(dev);
1506 	u32 rx_ctrl = readl(base + NvRegReceiverControl);
1507 
1508 	/* Already running? Stop it. */
1509 	if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1510 		rx_ctrl &= ~NVREG_RCVCTL_START;
1511 		writel(rx_ctrl, base + NvRegReceiverControl);
1512 		pci_push(base);
1513 	}
1514 	writel(np->linkspeed, base + NvRegLinkSpeed);
1515 	pci_push(base);
1516 	rx_ctrl |= NVREG_RCVCTL_START;
1517 	if (np->mac_in_use)
1518 		rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1519 	writel(rx_ctrl, base + NvRegReceiverControl);
1520 	pci_push(base);
1521 }
1522 
1523 static void nv_stop_rx(struct net_device *dev)
1524 {
1525 	struct fe_priv *np = netdev_priv(dev);
1526 	u8 __iomem *base = get_hwbase(dev);
1527 	u32 rx_ctrl = readl(base + NvRegReceiverControl);
1528 
1529 	if (!np->mac_in_use)
1530 		rx_ctrl &= ~NVREG_RCVCTL_START;
1531 	else
1532 		rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1533 	writel(rx_ctrl, base + NvRegReceiverControl);
1534 	if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1535 		      NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
1536 		netdev_info(dev, "%s: ReceiverStatus remained busy\n",
1537 			    __func__);
1538 
1539 	udelay(NV_RXSTOP_DELAY2);
1540 	if (!np->mac_in_use)
1541 		writel(0, base + NvRegLinkSpeed);
1542 }
1543 
1544 static void nv_start_tx(struct net_device *dev)
1545 {
1546 	struct fe_priv *np = netdev_priv(dev);
1547 	u8 __iomem *base = get_hwbase(dev);
1548 	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1549 
1550 	tx_ctrl |= NVREG_XMITCTL_START;
1551 	if (np->mac_in_use)
1552 		tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1553 	writel(tx_ctrl, base + NvRegTransmitterControl);
1554 	pci_push(base);
1555 }
1556 
1557 static void nv_stop_tx(struct net_device *dev)
1558 {
1559 	struct fe_priv *np = netdev_priv(dev);
1560 	u8 __iomem *base = get_hwbase(dev);
1561 	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1562 
1563 	if (!np->mac_in_use)
1564 		tx_ctrl &= ~NVREG_XMITCTL_START;
1565 	else
1566 		tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1567 	writel(tx_ctrl, base + NvRegTransmitterControl);
1568 	if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1569 		      NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
1570 		netdev_info(dev, "%s: TransmitterStatus remained busy\n",
1571 			    __func__);
1572 
1573 	udelay(NV_TXSTOP_DELAY2);
1574 	if (!np->mac_in_use)
1575 		writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1576 		       base + NvRegTransmitPoll);
1577 }
1578 
1579 static void nv_start_rxtx(struct net_device *dev)
1580 {
1581 	nv_start_rx(dev);
1582 	nv_start_tx(dev);
1583 }
1584 
1585 static void nv_stop_rxtx(struct net_device *dev)
1586 {
1587 	nv_stop_rx(dev);
1588 	nv_stop_tx(dev);
1589 }
1590 
1591 static void nv_txrx_reset(struct net_device *dev)
1592 {
1593 	struct fe_priv *np = netdev_priv(dev);
1594 	u8 __iomem *base = get_hwbase(dev);
1595 
1596 	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1597 	pci_push(base);
1598 	udelay(NV_TXRX_RESET_DELAY);
1599 	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1600 	pci_push(base);
1601 }
1602 
1603 static void nv_mac_reset(struct net_device *dev)
1604 {
1605 	struct fe_priv *np = netdev_priv(dev);
1606 	u8 __iomem *base = get_hwbase(dev);
1607 	u32 temp1, temp2, temp3;
1608 
1609 	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1610 	pci_push(base);
1611 
1612 	/* save registers since they will be cleared on reset */
1613 	temp1 = readl(base + NvRegMacAddrA);
1614 	temp2 = readl(base + NvRegMacAddrB);
1615 	temp3 = readl(base + NvRegTransmitPoll);
1616 
1617 	writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1618 	pci_push(base);
1619 	udelay(NV_MAC_RESET_DELAY);
1620 	writel(0, base + NvRegMacReset);
1621 	pci_push(base);
1622 	udelay(NV_MAC_RESET_DELAY);
1623 
1624 	/* restore saved registers */
1625 	writel(temp1, base + NvRegMacAddrA);
1626 	writel(temp2, base + NvRegMacAddrB);
1627 	writel(temp3, base + NvRegTransmitPoll);
1628 
1629 	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1630 	pci_push(base);
1631 }
1632 
1633 static void nv_get_hw_stats(struct net_device *dev)
1634 {
1635 	struct fe_priv *np = netdev_priv(dev);
1636 	u8 __iomem *base = get_hwbase(dev);
1637 
1638 	np->estats.tx_bytes += readl(base + NvRegTxCnt);
1639 	np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1640 	np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1641 	np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1642 	np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1643 	np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1644 	np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1645 	np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1646 	np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1647 	np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1648 	np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1649 	np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1650 	np->estats.rx_runt += readl(base + NvRegRxRunt);
1651 	np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1652 	np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1653 	np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1654 	np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1655 	np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1656 	np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1657 	np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1658 	np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1659 	np->estats.rx_packets =
1660 		np->estats.rx_unicast +
1661 		np->estats.rx_multicast +
1662 		np->estats.rx_broadcast;
1663 	np->estats.rx_errors_total =
1664 		np->estats.rx_crc_errors +
1665 		np->estats.rx_over_errors +
1666 		np->estats.rx_frame_error +
1667 		(np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1668 		np->estats.rx_late_collision +
1669 		np->estats.rx_runt +
1670 		np->estats.rx_frame_too_long;
1671 	np->estats.tx_errors_total =
1672 		np->estats.tx_late_collision +
1673 		np->estats.tx_fifo_errors +
1674 		np->estats.tx_carrier_errors +
1675 		np->estats.tx_excess_deferral +
1676 		np->estats.tx_retry_error;
1677 
1678 	if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1679 		np->estats.tx_deferral += readl(base + NvRegTxDef);
1680 		np->estats.tx_packets += readl(base + NvRegTxFrame);
1681 		np->estats.rx_bytes += readl(base + NvRegRxCnt);
1682 		np->estats.tx_pause += readl(base + NvRegTxPause);
1683 		np->estats.rx_pause += readl(base + NvRegRxPause);
1684 		np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1685 	}
1686 
1687 	if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1688 		np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1689 		np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1690 		np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1691 	}
1692 }
1693 
1694 /*
1695  * nv_get_stats: dev->get_stats function
1696  * Get latest stats value from the nic.
1697  * Called with read_lock(&dev_base_lock) held for read -
1698  * only synchronized against unregister_netdevice.
1699  */
1700 static struct net_device_stats *nv_get_stats(struct net_device *dev)
1701 {
1702 	struct fe_priv *np = netdev_priv(dev);
1703 
1704 	/* If the nic supports hw counters then retrieve latest values */
1705 	if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
1706 		nv_get_hw_stats(dev);
1707 
1708 		/* copy to net_device stats */
1709 		dev->stats.tx_bytes = np->estats.tx_bytes;
1710 		dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1711 		dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1712 		dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
1713 		dev->stats.rx_over_errors = np->estats.rx_over_errors;
1714 		dev->stats.rx_errors = np->estats.rx_errors_total;
1715 		dev->stats.tx_errors = np->estats.tx_errors_total;
1716 	}
1717 
1718 	return &dev->stats;
1719 }
1720 
1721 /*
1722  * nv_alloc_rx: fill rx ring entries.
1723  * Return 1 if the allocations for the skbs failed and the
1724  * rx engine is without Available descriptors
1725  */
1726 static int nv_alloc_rx(struct net_device *dev)
1727 {
1728 	struct fe_priv *np = netdev_priv(dev);
1729 	struct ring_desc *less_rx;
1730 
1731 	less_rx = np->get_rx.orig;
1732 	if (less_rx-- == np->first_rx.orig)
1733 		less_rx = np->last_rx.orig;
1734 
1735 	while (np->put_rx.orig != less_rx) {
1736 		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1737 		if (skb) {
1738 			np->put_rx_ctx->skb = skb;
1739 			np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1740 							     skb->data,
1741 							     skb_tailroom(skb),
1742 							     PCI_DMA_FROMDEVICE);
1743 			np->put_rx_ctx->dma_len = skb_tailroom(skb);
1744 			np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1745 			wmb();
1746 			np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1747 			if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1748 				np->put_rx.orig = np->first_rx.orig;
1749 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1750 				np->put_rx_ctx = np->first_rx_ctx;
1751 		} else
1752 			return 1;
1753 	}
1754 	return 0;
1755 }
1756 
1757 static int nv_alloc_rx_optimized(struct net_device *dev)
1758 {
1759 	struct fe_priv *np = netdev_priv(dev);
1760 	struct ring_desc_ex *less_rx;
1761 
1762 	less_rx = np->get_rx.ex;
1763 	if (less_rx-- == np->first_rx.ex)
1764 		less_rx = np->last_rx.ex;
1765 
1766 	while (np->put_rx.ex != less_rx) {
1767 		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1768 		if (skb) {
1769 			np->put_rx_ctx->skb = skb;
1770 			np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1771 							     skb->data,
1772 							     skb_tailroom(skb),
1773 							     PCI_DMA_FROMDEVICE);
1774 			np->put_rx_ctx->dma_len = skb_tailroom(skb);
1775 			np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1776 			np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1777 			wmb();
1778 			np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1779 			if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1780 				np->put_rx.ex = np->first_rx.ex;
1781 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1782 				np->put_rx_ctx = np->first_rx_ctx;
1783 		} else
1784 			return 1;
1785 	}
1786 	return 0;
1787 }
1788 
1789 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1790 static void nv_do_rx_refill(unsigned long data)
1791 {
1792 	struct net_device *dev = (struct net_device *) data;
1793 	struct fe_priv *np = netdev_priv(dev);
1794 
1795 	/* Just reschedule NAPI rx processing */
1796 	napi_schedule(&np->napi);
1797 }
1798 
1799 static void nv_init_rx(struct net_device *dev)
1800 {
1801 	struct fe_priv *np = netdev_priv(dev);
1802 	int i;
1803 
1804 	np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1805 
1806 	if (!nv_optimized(np))
1807 		np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1808 	else
1809 		np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1810 	np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1811 	np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1812 
1813 	for (i = 0; i < np->rx_ring_size; i++) {
1814 		if (!nv_optimized(np)) {
1815 			np->rx_ring.orig[i].flaglen = 0;
1816 			np->rx_ring.orig[i].buf = 0;
1817 		} else {
1818 			np->rx_ring.ex[i].flaglen = 0;
1819 			np->rx_ring.ex[i].txvlan = 0;
1820 			np->rx_ring.ex[i].bufhigh = 0;
1821 			np->rx_ring.ex[i].buflow = 0;
1822 		}
1823 		np->rx_skb[i].skb = NULL;
1824 		np->rx_skb[i].dma = 0;
1825 	}
1826 }
1827 
1828 static void nv_init_tx(struct net_device *dev)
1829 {
1830 	struct fe_priv *np = netdev_priv(dev);
1831 	int i;
1832 
1833 	np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1834 
1835 	if (!nv_optimized(np))
1836 		np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1837 	else
1838 		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1839 	np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1840 	np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1841 	np->tx_pkts_in_progress = 0;
1842 	np->tx_change_owner = NULL;
1843 	np->tx_end_flip = NULL;
1844 	np->tx_stop = 0;
1845 
1846 	for (i = 0; i < np->tx_ring_size; i++) {
1847 		if (!nv_optimized(np)) {
1848 			np->tx_ring.orig[i].flaglen = 0;
1849 			np->tx_ring.orig[i].buf = 0;
1850 		} else {
1851 			np->tx_ring.ex[i].flaglen = 0;
1852 			np->tx_ring.ex[i].txvlan = 0;
1853 			np->tx_ring.ex[i].bufhigh = 0;
1854 			np->tx_ring.ex[i].buflow = 0;
1855 		}
1856 		np->tx_skb[i].skb = NULL;
1857 		np->tx_skb[i].dma = 0;
1858 		np->tx_skb[i].dma_len = 0;
1859 		np->tx_skb[i].dma_single = 0;
1860 		np->tx_skb[i].first_tx_desc = NULL;
1861 		np->tx_skb[i].next_tx_ctx = NULL;
1862 	}
1863 }
1864 
1865 static int nv_init_ring(struct net_device *dev)
1866 {
1867 	struct fe_priv *np = netdev_priv(dev);
1868 
1869 	nv_init_tx(dev);
1870 	nv_init_rx(dev);
1871 
1872 	if (!nv_optimized(np))
1873 		return nv_alloc_rx(dev);
1874 	else
1875 		return nv_alloc_rx_optimized(dev);
1876 }
1877 
1878 static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1879 {
1880 	if (tx_skb->dma) {
1881 		if (tx_skb->dma_single)
1882 			pci_unmap_single(np->pci_dev, tx_skb->dma,
1883 					 tx_skb->dma_len,
1884 					 PCI_DMA_TODEVICE);
1885 		else
1886 			pci_unmap_page(np->pci_dev, tx_skb->dma,
1887 				       tx_skb->dma_len,
1888 				       PCI_DMA_TODEVICE);
1889 		tx_skb->dma = 0;
1890 	}
1891 }
1892 
1893 static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1894 {
1895 	nv_unmap_txskb(np, tx_skb);
1896 	if (tx_skb->skb) {
1897 		dev_kfree_skb_any(tx_skb->skb);
1898 		tx_skb->skb = NULL;
1899 		return 1;
1900 	}
1901 	return 0;
1902 }
1903 
1904 static void nv_drain_tx(struct net_device *dev)
1905 {
1906 	struct fe_priv *np = netdev_priv(dev);
1907 	unsigned int i;
1908 
1909 	for (i = 0; i < np->tx_ring_size; i++) {
1910 		if (!nv_optimized(np)) {
1911 			np->tx_ring.orig[i].flaglen = 0;
1912 			np->tx_ring.orig[i].buf = 0;
1913 		} else {
1914 			np->tx_ring.ex[i].flaglen = 0;
1915 			np->tx_ring.ex[i].txvlan = 0;
1916 			np->tx_ring.ex[i].bufhigh = 0;
1917 			np->tx_ring.ex[i].buflow = 0;
1918 		}
1919 		if (nv_release_txskb(np, &np->tx_skb[i]))
1920 			dev->stats.tx_dropped++;
1921 		np->tx_skb[i].dma = 0;
1922 		np->tx_skb[i].dma_len = 0;
1923 		np->tx_skb[i].dma_single = 0;
1924 		np->tx_skb[i].first_tx_desc = NULL;
1925 		np->tx_skb[i].next_tx_ctx = NULL;
1926 	}
1927 	np->tx_pkts_in_progress = 0;
1928 	np->tx_change_owner = NULL;
1929 	np->tx_end_flip = NULL;
1930 }
1931 
1932 static void nv_drain_rx(struct net_device *dev)
1933 {
1934 	struct fe_priv *np = netdev_priv(dev);
1935 	int i;
1936 
1937 	for (i = 0; i < np->rx_ring_size; i++) {
1938 		if (!nv_optimized(np)) {
1939 			np->rx_ring.orig[i].flaglen = 0;
1940 			np->rx_ring.orig[i].buf = 0;
1941 		} else {
1942 			np->rx_ring.ex[i].flaglen = 0;
1943 			np->rx_ring.ex[i].txvlan = 0;
1944 			np->rx_ring.ex[i].bufhigh = 0;
1945 			np->rx_ring.ex[i].buflow = 0;
1946 		}
1947 		wmb();
1948 		if (np->rx_skb[i].skb) {
1949 			pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1950 					 (skb_end_pointer(np->rx_skb[i].skb) -
1951 					  np->rx_skb[i].skb->data),
1952 					 PCI_DMA_FROMDEVICE);
1953 			dev_kfree_skb(np->rx_skb[i].skb);
1954 			np->rx_skb[i].skb = NULL;
1955 		}
1956 	}
1957 }
1958 
1959 static void nv_drain_rxtx(struct net_device *dev)
1960 {
1961 	nv_drain_tx(dev);
1962 	nv_drain_rx(dev);
1963 }
1964 
1965 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1966 {
1967 	return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1968 }
1969 
1970 static void nv_legacybackoff_reseed(struct net_device *dev)
1971 {
1972 	u8 __iomem *base = get_hwbase(dev);
1973 	u32 reg;
1974 	u32 low;
1975 	int tx_status = 0;
1976 
1977 	reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
1978 	get_random_bytes(&low, sizeof(low));
1979 	reg |= low & NVREG_SLOTTIME_MASK;
1980 
1981 	/* Need to stop tx before change takes effect.
1982 	 * Caller has already gained np->lock.
1983 	 */
1984 	tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
1985 	if (tx_status)
1986 		nv_stop_tx(dev);
1987 	nv_stop_rx(dev);
1988 	writel(reg, base + NvRegSlotTime);
1989 	if (tx_status)
1990 		nv_start_tx(dev);
1991 	nv_start_rx(dev);
1992 }
1993 
1994 /* Gear Backoff Seeds */
1995 #define BACKOFF_SEEDSET_ROWS	8
1996 #define BACKOFF_SEEDSET_LFSRS	15
1997 
1998 /* Known Good seed sets */
1999 static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2000 	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2001 	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2002 	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2003 	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2004 	{266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2005 	{266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2006 	{366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800,  84},
2007 	{466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2008 
2009 static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2010 	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2011 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2012 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2013 	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2014 	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2015 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2016 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2017 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2018 
2019 static void nv_gear_backoff_reseed(struct net_device *dev)
2020 {
2021 	u8 __iomem *base = get_hwbase(dev);
2022 	u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
2023 	u32 temp, seedset, combinedSeed;
2024 	int i;
2025 
2026 	/* Setup seed for free running LFSR */
2027 	/* We are going to read the time stamp counter 3 times
2028 	   and swizzle bits around to increase randomness */
2029 	get_random_bytes(&miniseed1, sizeof(miniseed1));
2030 	miniseed1 &= 0x0fff;
2031 	if (miniseed1 == 0)
2032 		miniseed1 = 0xabc;
2033 
2034 	get_random_bytes(&miniseed2, sizeof(miniseed2));
2035 	miniseed2 &= 0x0fff;
2036 	if (miniseed2 == 0)
2037 		miniseed2 = 0xabc;
2038 	miniseed2_reversed =
2039 		((miniseed2 & 0xF00) >> 8) |
2040 		 (miniseed2 & 0x0F0) |
2041 		 ((miniseed2 & 0x00F) << 8);
2042 
2043 	get_random_bytes(&miniseed3, sizeof(miniseed3));
2044 	miniseed3 &= 0x0fff;
2045 	if (miniseed3 == 0)
2046 		miniseed3 = 0xabc;
2047 	miniseed3_reversed =
2048 		((miniseed3 & 0xF00) >> 8) |
2049 		 (miniseed3 & 0x0F0) |
2050 		 ((miniseed3 & 0x00F) << 8);
2051 
2052 	combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
2053 		       (miniseed2 ^ miniseed3_reversed);
2054 
2055 	/* Seeds can not be zero */
2056 	if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
2057 		combinedSeed |= 0x08;
2058 	if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
2059 		combinedSeed |= 0x8000;
2060 
2061 	/* No need to disable tx here */
2062 	temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2063 	temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2064 	temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2065 	writel(temp, base + NvRegBackOffControl);
2066 
2067 	/* Setup seeds for all gear LFSRs. */
2068 	get_random_bytes(&seedset, sizeof(seedset));
2069 	seedset = seedset % BACKOFF_SEEDSET_ROWS;
2070 	for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
2071 		temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2072 		temp |= main_seedset[seedset][i-1] & 0x3ff;
2073 		temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
2074 		writel(temp, base + NvRegBackOffControl);
2075 	}
2076 }
2077 
2078 /*
2079  * nv_start_xmit: dev->hard_start_xmit function
2080  * Called with netif_tx_lock held.
2081  */
2082 static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2083 {
2084 	struct fe_priv *np = netdev_priv(dev);
2085 	u32 tx_flags = 0;
2086 	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2087 	unsigned int fragments = skb_shinfo(skb)->nr_frags;
2088 	unsigned int i;
2089 	u32 offset = 0;
2090 	u32 bcnt;
2091 	u32 size = skb_headlen(skb);
2092 	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2093 	u32 empty_slots;
2094 	struct ring_desc *put_tx;
2095 	struct ring_desc *start_tx;
2096 	struct ring_desc *prev_tx;
2097 	struct nv_skb_map *prev_tx_ctx;
2098 	unsigned long flags;
2099 
2100 	/* add fragments to entries count */
2101 	for (i = 0; i < fragments; i++) {
2102 		u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2103 
2104 		entries += (size >> NV_TX2_TSO_MAX_SHIFT) +
2105 			   ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2106 	}
2107 
2108 	spin_lock_irqsave(&np->lock, flags);
2109 	empty_slots = nv_get_empty_tx_slots(np);
2110 	if (unlikely(empty_slots <= entries)) {
2111 		netif_stop_queue(dev);
2112 		np->tx_stop = 1;
2113 		spin_unlock_irqrestore(&np->lock, flags);
2114 		return NETDEV_TX_BUSY;
2115 	}
2116 	spin_unlock_irqrestore(&np->lock, flags);
2117 
2118 	start_tx = put_tx = np->put_tx.orig;
2119 
2120 	/* setup the header buffer */
2121 	do {
2122 		prev_tx = put_tx;
2123 		prev_tx_ctx = np->put_tx_ctx;
2124 		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2125 		np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2126 						PCI_DMA_TODEVICE);
2127 		np->put_tx_ctx->dma_len = bcnt;
2128 		np->put_tx_ctx->dma_single = 1;
2129 		put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2130 		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2131 
2132 		tx_flags = np->tx_flags;
2133 		offset += bcnt;
2134 		size -= bcnt;
2135 		if (unlikely(put_tx++ == np->last_tx.orig))
2136 			put_tx = np->first_tx.orig;
2137 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2138 			np->put_tx_ctx = np->first_tx_ctx;
2139 	} while (size);
2140 
2141 	/* setup the fragments */
2142 	for (i = 0; i < fragments; i++) {
2143 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2144 		u32 size = skb_frag_size(frag);
2145 		offset = 0;
2146 
2147 		do {
2148 			prev_tx = put_tx;
2149 			prev_tx_ctx = np->put_tx_ctx;
2150 			bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2151 			np->put_tx_ctx->dma = skb_frag_dma_map(
2152 							&np->pci_dev->dev,
2153 							frag, offset,
2154 							bcnt,
2155 							DMA_TO_DEVICE);
2156 			np->put_tx_ctx->dma_len = bcnt;
2157 			np->put_tx_ctx->dma_single = 0;
2158 			put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2159 			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2160 
2161 			offset += bcnt;
2162 			size -= bcnt;
2163 			if (unlikely(put_tx++ == np->last_tx.orig))
2164 				put_tx = np->first_tx.orig;
2165 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2166 				np->put_tx_ctx = np->first_tx_ctx;
2167 		} while (size);
2168 	}
2169 
2170 	/* set last fragment flag  */
2171 	prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
2172 
2173 	/* save skb in this slot's context area */
2174 	prev_tx_ctx->skb = skb;
2175 
2176 	if (skb_is_gso(skb))
2177 		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2178 	else
2179 		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2180 			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2181 
2182 	spin_lock_irqsave(&np->lock, flags);
2183 
2184 	/* set tx flags */
2185 	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2186 	np->put_tx.orig = put_tx;
2187 
2188 	spin_unlock_irqrestore(&np->lock, flags);
2189 
2190 	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2191 	return NETDEV_TX_OK;
2192 }
2193 
2194 static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2195 					   struct net_device *dev)
2196 {
2197 	struct fe_priv *np = netdev_priv(dev);
2198 	u32 tx_flags = 0;
2199 	u32 tx_flags_extra;
2200 	unsigned int fragments = skb_shinfo(skb)->nr_frags;
2201 	unsigned int i;
2202 	u32 offset = 0;
2203 	u32 bcnt;
2204 	u32 size = skb_headlen(skb);
2205 	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2206 	u32 empty_slots;
2207 	struct ring_desc_ex *put_tx;
2208 	struct ring_desc_ex *start_tx;
2209 	struct ring_desc_ex *prev_tx;
2210 	struct nv_skb_map *prev_tx_ctx;
2211 	struct nv_skb_map *start_tx_ctx;
2212 	unsigned long flags;
2213 
2214 	/* add fragments to entries count */
2215 	for (i = 0; i < fragments; i++) {
2216 		u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2217 
2218 		entries += (size >> NV_TX2_TSO_MAX_SHIFT) +
2219 			   ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2220 	}
2221 
2222 	spin_lock_irqsave(&np->lock, flags);
2223 	empty_slots = nv_get_empty_tx_slots(np);
2224 	if (unlikely(empty_slots <= entries)) {
2225 		netif_stop_queue(dev);
2226 		np->tx_stop = 1;
2227 		spin_unlock_irqrestore(&np->lock, flags);
2228 		return NETDEV_TX_BUSY;
2229 	}
2230 	spin_unlock_irqrestore(&np->lock, flags);
2231 
2232 	start_tx = put_tx = np->put_tx.ex;
2233 	start_tx_ctx = np->put_tx_ctx;
2234 
2235 	/* setup the header buffer */
2236 	do {
2237 		prev_tx = put_tx;
2238 		prev_tx_ctx = np->put_tx_ctx;
2239 		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2240 		np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2241 						PCI_DMA_TODEVICE);
2242 		np->put_tx_ctx->dma_len = bcnt;
2243 		np->put_tx_ctx->dma_single = 1;
2244 		put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2245 		put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2246 		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2247 
2248 		tx_flags = NV_TX2_VALID;
2249 		offset += bcnt;
2250 		size -= bcnt;
2251 		if (unlikely(put_tx++ == np->last_tx.ex))
2252 			put_tx = np->first_tx.ex;
2253 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2254 			np->put_tx_ctx = np->first_tx_ctx;
2255 	} while (size);
2256 
2257 	/* setup the fragments */
2258 	for (i = 0; i < fragments; i++) {
2259 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2260 		u32 size = skb_frag_size(frag);
2261 		offset = 0;
2262 
2263 		do {
2264 			prev_tx = put_tx;
2265 			prev_tx_ctx = np->put_tx_ctx;
2266 			bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2267 			np->put_tx_ctx->dma = skb_frag_dma_map(
2268 							&np->pci_dev->dev,
2269 							frag, offset,
2270 							bcnt,
2271 							DMA_TO_DEVICE);
2272 			np->put_tx_ctx->dma_len = bcnt;
2273 			np->put_tx_ctx->dma_single = 0;
2274 			put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2275 			put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2276 			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2277 
2278 			offset += bcnt;
2279 			size -= bcnt;
2280 			if (unlikely(put_tx++ == np->last_tx.ex))
2281 				put_tx = np->first_tx.ex;
2282 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2283 				np->put_tx_ctx = np->first_tx_ctx;
2284 		} while (size);
2285 	}
2286 
2287 	/* set last fragment flag  */
2288 	prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
2289 
2290 	/* save skb in this slot's context area */
2291 	prev_tx_ctx->skb = skb;
2292 
2293 	if (skb_is_gso(skb))
2294 		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2295 	else
2296 		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2297 			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2298 
2299 	/* vlan tag */
2300 	if (vlan_tx_tag_present(skb))
2301 		start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
2302 					vlan_tx_tag_get(skb));
2303 	else
2304 		start_tx->txvlan = 0;
2305 
2306 	spin_lock_irqsave(&np->lock, flags);
2307 
2308 	if (np->tx_limit) {
2309 		/* Limit the number of outstanding tx. Setup all fragments, but
2310 		 * do not set the VALID bit on the first descriptor. Save a pointer
2311 		 * to that descriptor and also for next skb_map element.
2312 		 */
2313 
2314 		if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2315 			if (!np->tx_change_owner)
2316 				np->tx_change_owner = start_tx_ctx;
2317 
2318 			/* remove VALID bit */
2319 			tx_flags &= ~NV_TX2_VALID;
2320 			start_tx_ctx->first_tx_desc = start_tx;
2321 			start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2322 			np->tx_end_flip = np->put_tx_ctx;
2323 		} else {
2324 			np->tx_pkts_in_progress++;
2325 		}
2326 	}
2327 
2328 	/* set tx flags */
2329 	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2330 	np->put_tx.ex = put_tx;
2331 
2332 	spin_unlock_irqrestore(&np->lock, flags);
2333 
2334 	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2335 	return NETDEV_TX_OK;
2336 }
2337 
2338 static inline void nv_tx_flip_ownership(struct net_device *dev)
2339 {
2340 	struct fe_priv *np = netdev_priv(dev);
2341 
2342 	np->tx_pkts_in_progress--;
2343 	if (np->tx_change_owner) {
2344 		np->tx_change_owner->first_tx_desc->flaglen |=
2345 			cpu_to_le32(NV_TX2_VALID);
2346 		np->tx_pkts_in_progress++;
2347 
2348 		np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2349 		if (np->tx_change_owner == np->tx_end_flip)
2350 			np->tx_change_owner = NULL;
2351 
2352 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2353 	}
2354 }
2355 
2356 /*
2357  * nv_tx_done: check for completed packets, release the skbs.
2358  *
2359  * Caller must own np->lock.
2360  */
2361 static int nv_tx_done(struct net_device *dev, int limit)
2362 {
2363 	struct fe_priv *np = netdev_priv(dev);
2364 	u32 flags;
2365 	int tx_work = 0;
2366 	struct ring_desc *orig_get_tx = np->get_tx.orig;
2367 
2368 	while ((np->get_tx.orig != np->put_tx.orig) &&
2369 	       !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2370 	       (tx_work < limit)) {
2371 
2372 		nv_unmap_txskb(np, np->get_tx_ctx);
2373 
2374 		if (np->desc_ver == DESC_VER_1) {
2375 			if (flags & NV_TX_LASTPACKET) {
2376 				if (flags & NV_TX_ERROR) {
2377 					if (flags & NV_TX_UNDERFLOW)
2378 						dev->stats.tx_fifo_errors++;
2379 					if (flags & NV_TX_CARRIERLOST)
2380 						dev->stats.tx_carrier_errors++;
2381 					if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2382 						nv_legacybackoff_reseed(dev);
2383 					dev->stats.tx_errors++;
2384 				} else {
2385 					dev->stats.tx_packets++;
2386 					dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2387 				}
2388 				dev_kfree_skb_any(np->get_tx_ctx->skb);
2389 				np->get_tx_ctx->skb = NULL;
2390 				tx_work++;
2391 			}
2392 		} else {
2393 			if (flags & NV_TX2_LASTPACKET) {
2394 				if (flags & NV_TX2_ERROR) {
2395 					if (flags & NV_TX2_UNDERFLOW)
2396 						dev->stats.tx_fifo_errors++;
2397 					if (flags & NV_TX2_CARRIERLOST)
2398 						dev->stats.tx_carrier_errors++;
2399 					if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2400 						nv_legacybackoff_reseed(dev);
2401 					dev->stats.tx_errors++;
2402 				} else {
2403 					dev->stats.tx_packets++;
2404 					dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2405 				}
2406 				dev_kfree_skb_any(np->get_tx_ctx->skb);
2407 				np->get_tx_ctx->skb = NULL;
2408 				tx_work++;
2409 			}
2410 		}
2411 		if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2412 			np->get_tx.orig = np->first_tx.orig;
2413 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2414 			np->get_tx_ctx = np->first_tx_ctx;
2415 	}
2416 	if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2417 		np->tx_stop = 0;
2418 		netif_wake_queue(dev);
2419 	}
2420 	return tx_work;
2421 }
2422 
2423 static int nv_tx_done_optimized(struct net_device *dev, int limit)
2424 {
2425 	struct fe_priv *np = netdev_priv(dev);
2426 	u32 flags;
2427 	int tx_work = 0;
2428 	struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2429 
2430 	while ((np->get_tx.ex != np->put_tx.ex) &&
2431 	       !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
2432 	       (tx_work < limit)) {
2433 
2434 		nv_unmap_txskb(np, np->get_tx_ctx);
2435 
2436 		if (flags & NV_TX2_LASTPACKET) {
2437 			if (!(flags & NV_TX2_ERROR))
2438 				dev->stats.tx_packets++;
2439 			else {
2440 				if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2441 					if (np->driver_data & DEV_HAS_GEAR_MODE)
2442 						nv_gear_backoff_reseed(dev);
2443 					else
2444 						nv_legacybackoff_reseed(dev);
2445 				}
2446 			}
2447 
2448 			dev_kfree_skb_any(np->get_tx_ctx->skb);
2449 			np->get_tx_ctx->skb = NULL;
2450 			tx_work++;
2451 
2452 			if (np->tx_limit)
2453 				nv_tx_flip_ownership(dev);
2454 		}
2455 		if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2456 			np->get_tx.ex = np->first_tx.ex;
2457 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2458 			np->get_tx_ctx = np->first_tx_ctx;
2459 	}
2460 	if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2461 		np->tx_stop = 0;
2462 		netif_wake_queue(dev);
2463 	}
2464 	return tx_work;
2465 }
2466 
2467 /*
2468  * nv_tx_timeout: dev->tx_timeout function
2469  * Called with netif_tx_lock held.
2470  */
2471 static void nv_tx_timeout(struct net_device *dev)
2472 {
2473 	struct fe_priv *np = netdev_priv(dev);
2474 	u8 __iomem *base = get_hwbase(dev);
2475 	u32 status;
2476 	union ring_type put_tx;
2477 	int saved_tx_limit;
2478 	int i;
2479 
2480 	if (np->msi_flags & NV_MSI_X_ENABLED)
2481 		status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2482 	else
2483 		status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2484 
2485 	netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
2486 
2487 	netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
2488 	netdev_info(dev, "Dumping tx registers\n");
2489 	for (i = 0; i <= np->register_size; i += 32) {
2490 		netdev_info(dev,
2491 			    "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2492 			    i,
2493 			    readl(base + i + 0), readl(base + i + 4),
2494 			    readl(base + i + 8), readl(base + i + 12),
2495 			    readl(base + i + 16), readl(base + i + 20),
2496 			    readl(base + i + 24), readl(base + i + 28));
2497 	}
2498 	netdev_info(dev, "Dumping tx ring\n");
2499 	for (i = 0; i < np->tx_ring_size; i += 4) {
2500 		if (!nv_optimized(np)) {
2501 			netdev_info(dev,
2502 				    "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2503 				    i,
2504 				    le32_to_cpu(np->tx_ring.orig[i].buf),
2505 				    le32_to_cpu(np->tx_ring.orig[i].flaglen),
2506 				    le32_to_cpu(np->tx_ring.orig[i+1].buf),
2507 				    le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2508 				    le32_to_cpu(np->tx_ring.orig[i+2].buf),
2509 				    le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2510 				    le32_to_cpu(np->tx_ring.orig[i+3].buf),
2511 				    le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2512 		} else {
2513 			netdev_info(dev,
2514 				    "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2515 				    i,
2516 				    le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2517 				    le32_to_cpu(np->tx_ring.ex[i].buflow),
2518 				    le32_to_cpu(np->tx_ring.ex[i].flaglen),
2519 				    le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2520 				    le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2521 				    le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2522 				    le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2523 				    le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2524 				    le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2525 				    le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2526 				    le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2527 				    le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2528 		}
2529 	}
2530 
2531 	spin_lock_irq(&np->lock);
2532 
2533 	/* 1) stop tx engine */
2534 	nv_stop_tx(dev);
2535 
2536 	/* 2) complete any outstanding tx and do not give HW any limited tx pkts */
2537 	saved_tx_limit = np->tx_limit;
2538 	np->tx_limit = 0; /* prevent giving HW any limited pkts */
2539 	np->tx_stop = 0;  /* prevent waking tx queue */
2540 	if (!nv_optimized(np))
2541 		nv_tx_done(dev, np->tx_ring_size);
2542 	else
2543 		nv_tx_done_optimized(dev, np->tx_ring_size);
2544 
2545 	/* save current HW position */
2546 	if (np->tx_change_owner)
2547 		put_tx.ex = np->tx_change_owner->first_tx_desc;
2548 	else
2549 		put_tx = np->put_tx;
2550 
2551 	/* 3) clear all tx state */
2552 	nv_drain_tx(dev);
2553 	nv_init_tx(dev);
2554 
2555 	/* 4) restore state to current HW position */
2556 	np->get_tx = np->put_tx = put_tx;
2557 	np->tx_limit = saved_tx_limit;
2558 
2559 	/* 5) restart tx engine */
2560 	nv_start_tx(dev);
2561 	netif_wake_queue(dev);
2562 	spin_unlock_irq(&np->lock);
2563 }
2564 
2565 /*
2566  * Called when the nic notices a mismatch between the actual data len on the
2567  * wire and the len indicated in the 802 header
2568  */
2569 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2570 {
2571 	int hdrlen;	/* length of the 802 header */
2572 	int protolen;	/* length as stored in the proto field */
2573 
2574 	/* 1) calculate len according to header */
2575 	if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2576 		protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2577 		hdrlen = VLAN_HLEN;
2578 	} else {
2579 		protolen = ntohs(((struct ethhdr *)packet)->h_proto);
2580 		hdrlen = ETH_HLEN;
2581 	}
2582 	if (protolen > ETH_DATA_LEN)
2583 		return datalen; /* Value in proto field not a len, no checks possible */
2584 
2585 	protolen += hdrlen;
2586 	/* consistency checks: */
2587 	if (datalen > ETH_ZLEN) {
2588 		if (datalen >= protolen) {
2589 			/* more data on wire than in 802 header, trim of
2590 			 * additional data.
2591 			 */
2592 			return protolen;
2593 		} else {
2594 			/* less data on wire than mentioned in header.
2595 			 * Discard the packet.
2596 			 */
2597 			return -1;
2598 		}
2599 	} else {
2600 		/* short packet. Accept only if 802 values are also short */
2601 		if (protolen > ETH_ZLEN) {
2602 			return -1;
2603 		}
2604 		return datalen;
2605 	}
2606 }
2607 
2608 static int nv_rx_process(struct net_device *dev, int limit)
2609 {
2610 	struct fe_priv *np = netdev_priv(dev);
2611 	u32 flags;
2612 	int rx_work = 0;
2613 	struct sk_buff *skb;
2614 	int len;
2615 
2616 	while ((np->get_rx.orig != np->put_rx.orig) &&
2617 	      !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2618 		(rx_work < limit)) {
2619 
2620 		/*
2621 		 * the packet is for us - immediately tear down the pci mapping.
2622 		 * TODO: check if a prefetch of the first cacheline improves
2623 		 * the performance.
2624 		 */
2625 		pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2626 				np->get_rx_ctx->dma_len,
2627 				PCI_DMA_FROMDEVICE);
2628 		skb = np->get_rx_ctx->skb;
2629 		np->get_rx_ctx->skb = NULL;
2630 
2631 		/* look at what we actually got: */
2632 		if (np->desc_ver == DESC_VER_1) {
2633 			if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2634 				len = flags & LEN_MASK_V1;
2635 				if (unlikely(flags & NV_RX_ERROR)) {
2636 					if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2637 						len = nv_getlen(dev, skb->data, len);
2638 						if (len < 0) {
2639 							dev->stats.rx_errors++;
2640 							dev_kfree_skb(skb);
2641 							goto next_pkt;
2642 						}
2643 					}
2644 					/* framing errors are soft errors */
2645 					else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2646 						if (flags & NV_RX_SUBSTRACT1)
2647 							len--;
2648 					}
2649 					/* the rest are hard errors */
2650 					else {
2651 						if (flags & NV_RX_MISSEDFRAME)
2652 							dev->stats.rx_missed_errors++;
2653 						if (flags & NV_RX_CRCERR)
2654 							dev->stats.rx_crc_errors++;
2655 						if (flags & NV_RX_OVERFLOW)
2656 							dev->stats.rx_over_errors++;
2657 						dev->stats.rx_errors++;
2658 						dev_kfree_skb(skb);
2659 						goto next_pkt;
2660 					}
2661 				}
2662 			} else {
2663 				dev_kfree_skb(skb);
2664 				goto next_pkt;
2665 			}
2666 		} else {
2667 			if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2668 				len = flags & LEN_MASK_V2;
2669 				if (unlikely(flags & NV_RX2_ERROR)) {
2670 					if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2671 						len = nv_getlen(dev, skb->data, len);
2672 						if (len < 0) {
2673 							dev->stats.rx_errors++;
2674 							dev_kfree_skb(skb);
2675 							goto next_pkt;
2676 						}
2677 					}
2678 					/* framing errors are soft errors */
2679 					else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2680 						if (flags & NV_RX2_SUBSTRACT1)
2681 							len--;
2682 					}
2683 					/* the rest are hard errors */
2684 					else {
2685 						if (flags & NV_RX2_CRCERR)
2686 							dev->stats.rx_crc_errors++;
2687 						if (flags & NV_RX2_OVERFLOW)
2688 							dev->stats.rx_over_errors++;
2689 						dev->stats.rx_errors++;
2690 						dev_kfree_skb(skb);
2691 						goto next_pkt;
2692 					}
2693 				}
2694 				if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2695 				    ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
2696 					skb->ip_summed = CHECKSUM_UNNECESSARY;
2697 			} else {
2698 				dev_kfree_skb(skb);
2699 				goto next_pkt;
2700 			}
2701 		}
2702 		/* got a valid packet - forward it to the network core */
2703 		skb_put(skb, len);
2704 		skb->protocol = eth_type_trans(skb, dev);
2705 		napi_gro_receive(&np->napi, skb);
2706 		dev->stats.rx_packets++;
2707 		dev->stats.rx_bytes += len;
2708 next_pkt:
2709 		if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2710 			np->get_rx.orig = np->first_rx.orig;
2711 		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2712 			np->get_rx_ctx = np->first_rx_ctx;
2713 
2714 		rx_work++;
2715 	}
2716 
2717 	return rx_work;
2718 }
2719 
2720 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2721 {
2722 	struct fe_priv *np = netdev_priv(dev);
2723 	u32 flags;
2724 	u32 vlanflags = 0;
2725 	int rx_work = 0;
2726 	struct sk_buff *skb;
2727 	int len;
2728 
2729 	while ((np->get_rx.ex != np->put_rx.ex) &&
2730 	      !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2731 	      (rx_work < limit)) {
2732 
2733 		/*
2734 		 * the packet is for us - immediately tear down the pci mapping.
2735 		 * TODO: check if a prefetch of the first cacheline improves
2736 		 * the performance.
2737 		 */
2738 		pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2739 				np->get_rx_ctx->dma_len,
2740 				PCI_DMA_FROMDEVICE);
2741 		skb = np->get_rx_ctx->skb;
2742 		np->get_rx_ctx->skb = NULL;
2743 
2744 		/* look at what we actually got: */
2745 		if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2746 			len = flags & LEN_MASK_V2;
2747 			if (unlikely(flags & NV_RX2_ERROR)) {
2748 				if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2749 					len = nv_getlen(dev, skb->data, len);
2750 					if (len < 0) {
2751 						dev_kfree_skb(skb);
2752 						goto next_pkt;
2753 					}
2754 				}
2755 				/* framing errors are soft errors */
2756 				else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2757 					if (flags & NV_RX2_SUBSTRACT1)
2758 						len--;
2759 				}
2760 				/* the rest are hard errors */
2761 				else {
2762 					dev_kfree_skb(skb);
2763 					goto next_pkt;
2764 				}
2765 			}
2766 
2767 			if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2768 			    ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
2769 				skb->ip_summed = CHECKSUM_UNNECESSARY;
2770 
2771 			/* got a valid packet - forward it to the network core */
2772 			skb_put(skb, len);
2773 			skb->protocol = eth_type_trans(skb, dev);
2774 			prefetch(skb->data);
2775 
2776 			vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2777 
2778 			/*
2779 			 * There's need to check for NETIF_F_HW_VLAN_RX here.
2780 			 * Even if vlan rx accel is disabled,
2781 			 * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
2782 			 */
2783 			if (dev->features & NETIF_F_HW_VLAN_RX &&
2784 			    vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2785 				u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
2786 
2787 				__vlan_hwaccel_put_tag(skb, vid);
2788 			}
2789 			napi_gro_receive(&np->napi, skb);
2790 
2791 			dev->stats.rx_packets++;
2792 			dev->stats.rx_bytes += len;
2793 		} else {
2794 			dev_kfree_skb(skb);
2795 		}
2796 next_pkt:
2797 		if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2798 			np->get_rx.ex = np->first_rx.ex;
2799 		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2800 			np->get_rx_ctx = np->first_rx_ctx;
2801 
2802 		rx_work++;
2803 	}
2804 
2805 	return rx_work;
2806 }
2807 
2808 static void set_bufsize(struct net_device *dev)
2809 {
2810 	struct fe_priv *np = netdev_priv(dev);
2811 
2812 	if (dev->mtu <= ETH_DATA_LEN)
2813 		np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2814 	else
2815 		np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
2816 }
2817 
2818 /*
2819  * nv_change_mtu: dev->change_mtu function
2820  * Called with dev_base_lock held for read.
2821  */
2822 static int nv_change_mtu(struct net_device *dev, int new_mtu)
2823 {
2824 	struct fe_priv *np = netdev_priv(dev);
2825 	int old_mtu;
2826 
2827 	if (new_mtu < 64 || new_mtu > np->pkt_limit)
2828 		return -EINVAL;
2829 
2830 	old_mtu = dev->mtu;
2831 	dev->mtu = new_mtu;
2832 
2833 	/* return early if the buffer sizes will not change */
2834 	if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
2835 		return 0;
2836 	if (old_mtu == new_mtu)
2837 		return 0;
2838 
2839 	/* synchronized against open : rtnl_lock() held by caller */
2840 	if (netif_running(dev)) {
2841 		u8 __iomem *base = get_hwbase(dev);
2842 		/*
2843 		 * It seems that the nic preloads valid ring entries into an
2844 		 * internal buffer. The procedure for flushing everything is
2845 		 * guessed, there is probably a simpler approach.
2846 		 * Changing the MTU is a rare event, it shouldn't matter.
2847 		 */
2848 		nv_disable_irq(dev);
2849 		nv_napi_disable(dev);
2850 		netif_tx_lock_bh(dev);
2851 		netif_addr_lock(dev);
2852 		spin_lock(&np->lock);
2853 		/* stop engines */
2854 		nv_stop_rxtx(dev);
2855 		nv_txrx_reset(dev);
2856 		/* drain rx queue */
2857 		nv_drain_rxtx(dev);
2858 		/* reinit driver view of the rx queue */
2859 		set_bufsize(dev);
2860 		if (nv_init_ring(dev)) {
2861 			if (!np->in_shutdown)
2862 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2863 		}
2864 		/* reinit nic view of the rx queue */
2865 		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2866 		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2867 		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2868 			base + NvRegRingSizes);
2869 		pci_push(base);
2870 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2871 		pci_push(base);
2872 
2873 		/* restart rx engine */
2874 		nv_start_rxtx(dev);
2875 		spin_unlock(&np->lock);
2876 		netif_addr_unlock(dev);
2877 		netif_tx_unlock_bh(dev);
2878 		nv_napi_enable(dev);
2879 		nv_enable_irq(dev);
2880 	}
2881 	return 0;
2882 }
2883 
2884 static void nv_copy_mac_to_hw(struct net_device *dev)
2885 {
2886 	u8 __iomem *base = get_hwbase(dev);
2887 	u32 mac[2];
2888 
2889 	mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
2890 			(dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
2891 	mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
2892 
2893 	writel(mac[0], base + NvRegMacAddrA);
2894 	writel(mac[1], base + NvRegMacAddrB);
2895 }
2896 
2897 /*
2898  * nv_set_mac_address: dev->set_mac_address function
2899  * Called with rtnl_lock() held.
2900  */
2901 static int nv_set_mac_address(struct net_device *dev, void *addr)
2902 {
2903 	struct fe_priv *np = netdev_priv(dev);
2904 	struct sockaddr *macaddr = (struct sockaddr *)addr;
2905 
2906 	if (!is_valid_ether_addr(macaddr->sa_data))
2907 		return -EADDRNOTAVAIL;
2908 
2909 	/* synchronized against open : rtnl_lock() held by caller */
2910 	memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
2911 
2912 	if (netif_running(dev)) {
2913 		netif_tx_lock_bh(dev);
2914 		netif_addr_lock(dev);
2915 		spin_lock_irq(&np->lock);
2916 
2917 		/* stop rx engine */
2918 		nv_stop_rx(dev);
2919 
2920 		/* set mac address */
2921 		nv_copy_mac_to_hw(dev);
2922 
2923 		/* restart rx engine */
2924 		nv_start_rx(dev);
2925 		spin_unlock_irq(&np->lock);
2926 		netif_addr_unlock(dev);
2927 		netif_tx_unlock_bh(dev);
2928 	} else {
2929 		nv_copy_mac_to_hw(dev);
2930 	}
2931 	return 0;
2932 }
2933 
2934 /*
2935  * nv_set_multicast: dev->set_multicast function
2936  * Called with netif_tx_lock held.
2937  */
2938 static void nv_set_multicast(struct net_device *dev)
2939 {
2940 	struct fe_priv *np = netdev_priv(dev);
2941 	u8 __iomem *base = get_hwbase(dev);
2942 	u32 addr[2];
2943 	u32 mask[2];
2944 	u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
2945 
2946 	memset(addr, 0, sizeof(addr));
2947 	memset(mask, 0, sizeof(mask));
2948 
2949 	if (dev->flags & IFF_PROMISC) {
2950 		pff |= NVREG_PFF_PROMISC;
2951 	} else {
2952 		pff |= NVREG_PFF_MYADDR;
2953 
2954 		if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
2955 			u32 alwaysOff[2];
2956 			u32 alwaysOn[2];
2957 
2958 			alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
2959 			if (dev->flags & IFF_ALLMULTI) {
2960 				alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
2961 			} else {
2962 				struct netdev_hw_addr *ha;
2963 
2964 				netdev_for_each_mc_addr(ha, dev) {
2965 					unsigned char *addr = ha->addr;
2966 					u32 a, b;
2967 
2968 					a = le32_to_cpu(*(__le32 *) addr);
2969 					b = le16_to_cpu(*(__le16 *) (&addr[4]));
2970 					alwaysOn[0] &= a;
2971 					alwaysOff[0] &= ~a;
2972 					alwaysOn[1] &= b;
2973 					alwaysOff[1] &= ~b;
2974 				}
2975 			}
2976 			addr[0] = alwaysOn[0];
2977 			addr[1] = alwaysOn[1];
2978 			mask[0] = alwaysOn[0] | alwaysOff[0];
2979 			mask[1] = alwaysOn[1] | alwaysOff[1];
2980 		} else {
2981 			mask[0] = NVREG_MCASTMASKA_NONE;
2982 			mask[1] = NVREG_MCASTMASKB_NONE;
2983 		}
2984 	}
2985 	addr[0] |= NVREG_MCASTADDRA_FORCE;
2986 	pff |= NVREG_PFF_ALWAYS;
2987 	spin_lock_irq(&np->lock);
2988 	nv_stop_rx(dev);
2989 	writel(addr[0], base + NvRegMulticastAddrA);
2990 	writel(addr[1], base + NvRegMulticastAddrB);
2991 	writel(mask[0], base + NvRegMulticastMaskA);
2992 	writel(mask[1], base + NvRegMulticastMaskB);
2993 	writel(pff, base + NvRegPacketFilterFlags);
2994 	nv_start_rx(dev);
2995 	spin_unlock_irq(&np->lock);
2996 }
2997 
2998 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
2999 {
3000 	struct fe_priv *np = netdev_priv(dev);
3001 	u8 __iomem *base = get_hwbase(dev);
3002 
3003 	np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
3004 
3005 	if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
3006 		u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
3007 		if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
3008 			writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
3009 			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3010 		} else {
3011 			writel(pff, base + NvRegPacketFilterFlags);
3012 		}
3013 	}
3014 	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3015 		u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
3016 		if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
3017 			u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3018 			if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3019 				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
3020 			if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3021 				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
3022 				/* limit the number of tx pause frames to a default of 8 */
3023 				writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3024 			}
3025 			writel(pause_enable,  base + NvRegTxPauseFrame);
3026 			writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3027 			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3028 		} else {
3029 			writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
3030 			writel(regmisc, base + NvRegMisc1);
3031 		}
3032 	}
3033 }
3034 
3035 /**
3036  * nv_update_linkspeed: Setup the MAC according to the link partner
3037  * @dev: Network device to be configured
3038  *
3039  * The function queries the PHY and checks if there is a link partner.
3040  * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3041  * set to 10 MBit HD.
3042  *
3043  * The function returns 0 if there is no link partner and 1 if there is
3044  * a good link partner.
3045  */
3046 static int nv_update_linkspeed(struct net_device *dev)
3047 {
3048 	struct fe_priv *np = netdev_priv(dev);
3049 	u8 __iomem *base = get_hwbase(dev);
3050 	int adv = 0;
3051 	int lpa = 0;
3052 	int adv_lpa, adv_pause, lpa_pause;
3053 	int newls = np->linkspeed;
3054 	int newdup = np->duplex;
3055 	int mii_status;
3056 	int retval = 0;
3057 	u32 control_1000, status_1000, phyreg, pause_flags, txreg;
3058 	u32 txrxFlags = 0;
3059 	u32 phy_exp;
3060 
3061 	/* BMSR_LSTATUS is latched, read it twice:
3062 	 * we want the current value.
3063 	 */
3064 	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3065 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3066 
3067 	if (!(mii_status & BMSR_LSTATUS)) {
3068 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3069 		newdup = 0;
3070 		retval = 0;
3071 		goto set_speed;
3072 	}
3073 
3074 	if (np->autoneg == 0) {
3075 		if (np->fixed_mode & LPA_100FULL) {
3076 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3077 			newdup = 1;
3078 		} else if (np->fixed_mode & LPA_100HALF) {
3079 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3080 			newdup = 0;
3081 		} else if (np->fixed_mode & LPA_10FULL) {
3082 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3083 			newdup = 1;
3084 		} else {
3085 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3086 			newdup = 0;
3087 		}
3088 		retval = 1;
3089 		goto set_speed;
3090 	}
3091 	/* check auto negotiation is complete */
3092 	if (!(mii_status & BMSR_ANEGCOMPLETE)) {
3093 		/* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3094 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3095 		newdup = 0;
3096 		retval = 0;
3097 		goto set_speed;
3098 	}
3099 
3100 	adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3101 	lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3102 
3103 	retval = 1;
3104 	if (np->gigabit == PHY_GIGABIT) {
3105 		control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3106 		status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3107 
3108 		if ((control_1000 & ADVERTISE_1000FULL) &&
3109 			(status_1000 & LPA_1000FULL)) {
3110 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3111 			newdup = 1;
3112 			goto set_speed;
3113 		}
3114 	}
3115 
3116 	/* FIXME: handle parallel detection properly */
3117 	adv_lpa = lpa & adv;
3118 	if (adv_lpa & LPA_100FULL) {
3119 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3120 		newdup = 1;
3121 	} else if (adv_lpa & LPA_100HALF) {
3122 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3123 		newdup = 0;
3124 	} else if (adv_lpa & LPA_10FULL) {
3125 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3126 		newdup = 1;
3127 	} else if (adv_lpa & LPA_10HALF) {
3128 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3129 		newdup = 0;
3130 	} else {
3131 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3132 		newdup = 0;
3133 	}
3134 
3135 set_speed:
3136 	if (np->duplex == newdup && np->linkspeed == newls)
3137 		return retval;
3138 
3139 	np->duplex = newdup;
3140 	np->linkspeed = newls;
3141 
3142 	/* The transmitter and receiver must be restarted for safe update */
3143 	if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
3144 		txrxFlags |= NV_RESTART_TX;
3145 		nv_stop_tx(dev);
3146 	}
3147 	if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
3148 		txrxFlags |= NV_RESTART_RX;
3149 		nv_stop_rx(dev);
3150 	}
3151 
3152 	if (np->gigabit == PHY_GIGABIT) {
3153 		phyreg = readl(base + NvRegSlotTime);
3154 		phyreg &= ~(0x3FF00);
3155 		if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3156 		    ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3157 			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3158 		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3159 			phyreg |= NVREG_SLOTTIME_1000_FULL;
3160 		writel(phyreg, base + NvRegSlotTime);
3161 	}
3162 
3163 	phyreg = readl(base + NvRegPhyInterface);
3164 	phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3165 	if (np->duplex == 0)
3166 		phyreg |= PHY_HALF;
3167 	if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3168 		phyreg |= PHY_100;
3169 	else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3170 		phyreg |= PHY_1000;
3171 	writel(phyreg, base + NvRegPhyInterface);
3172 
3173 	phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
3174 	if (phyreg & PHY_RGMII) {
3175 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
3176 			txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3177 		} else {
3178 			if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3179 				if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3180 					txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
3181 				else
3182 					txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
3183 			} else {
3184 				txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3185 			}
3186 		}
3187 	} else {
3188 		if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3189 			txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
3190 		else
3191 			txreg = NVREG_TX_DEFERRAL_DEFAULT;
3192 	}
3193 	writel(txreg, base + NvRegTxDeferral);
3194 
3195 	if (np->desc_ver == DESC_VER_1) {
3196 		txreg = NVREG_TX_WM_DESC1_DEFAULT;
3197 	} else {
3198 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3199 			txreg = NVREG_TX_WM_DESC2_3_1000;
3200 		else
3201 			txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3202 	}
3203 	writel(txreg, base + NvRegTxWatermark);
3204 
3205 	writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3206 		base + NvRegMisc1);
3207 	pci_push(base);
3208 	writel(np->linkspeed, base + NvRegLinkSpeed);
3209 	pci_push(base);
3210 
3211 	pause_flags = 0;
3212 	/* setup pause frame */
3213 	if (np->duplex != 0) {
3214 		if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3215 			adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3216 			lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
3217 
3218 			switch (adv_pause) {
3219 			case ADVERTISE_PAUSE_CAP:
3220 				if (lpa_pause & LPA_PAUSE_CAP) {
3221 					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3222 					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3223 						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3224 				}
3225 				break;
3226 			case ADVERTISE_PAUSE_ASYM:
3227 				if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
3228 					pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3229 				break;
3230 			case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
3231 				if (lpa_pause & LPA_PAUSE_CAP) {
3232 					pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
3233 					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3234 						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3235 				}
3236 				if (lpa_pause == LPA_PAUSE_ASYM)
3237 					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3238 				break;
3239 			}
3240 		} else {
3241 			pause_flags = np->pause_flags;
3242 		}
3243 	}
3244 	nv_update_pause(dev, pause_flags);
3245 
3246 	if (txrxFlags & NV_RESTART_TX)
3247 		nv_start_tx(dev);
3248 	if (txrxFlags & NV_RESTART_RX)
3249 		nv_start_rx(dev);
3250 
3251 	return retval;
3252 }
3253 
3254 static void nv_linkchange(struct net_device *dev)
3255 {
3256 	if (nv_update_linkspeed(dev)) {
3257 		if (!netif_carrier_ok(dev)) {
3258 			netif_carrier_on(dev);
3259 			netdev_info(dev, "link up\n");
3260 			nv_txrx_gate(dev, false);
3261 			nv_start_rx(dev);
3262 		}
3263 	} else {
3264 		if (netif_carrier_ok(dev)) {
3265 			netif_carrier_off(dev);
3266 			netdev_info(dev, "link down\n");
3267 			nv_txrx_gate(dev, true);
3268 			nv_stop_rx(dev);
3269 		}
3270 	}
3271 }
3272 
3273 static void nv_link_irq(struct net_device *dev)
3274 {
3275 	u8 __iomem *base = get_hwbase(dev);
3276 	u32 miistat;
3277 
3278 	miistat = readl(base + NvRegMIIStatus);
3279 	writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3280 
3281 	if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3282 		nv_linkchange(dev);
3283 }
3284 
3285 static void nv_msi_workaround(struct fe_priv *np)
3286 {
3287 
3288 	/* Need to toggle the msi irq mask within the ethernet device,
3289 	 * otherwise, future interrupts will not be detected.
3290 	 */
3291 	if (np->msi_flags & NV_MSI_ENABLED) {
3292 		u8 __iomem *base = np->base;
3293 
3294 		writel(0, base + NvRegMSIIrqMask);
3295 		writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3296 	}
3297 }
3298 
3299 static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
3300 {
3301 	struct fe_priv *np = netdev_priv(dev);
3302 
3303 	if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
3304 		if (total_work > NV_DYNAMIC_THRESHOLD) {
3305 			/* transition to poll based interrupts */
3306 			np->quiet_count = 0;
3307 			if (np->irqmask != NVREG_IRQMASK_CPU) {
3308 				np->irqmask = NVREG_IRQMASK_CPU;
3309 				return 1;
3310 			}
3311 		} else {
3312 			if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
3313 				np->quiet_count++;
3314 			} else {
3315 				/* reached a period of low activity, switch
3316 				   to per tx/rx packet interrupts */
3317 				if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
3318 					np->irqmask = NVREG_IRQMASK_THROUGHPUT;
3319 					return 1;
3320 				}
3321 			}
3322 		}
3323 	}
3324 	return 0;
3325 }
3326 
3327 static irqreturn_t nv_nic_irq(int foo, void *data)
3328 {
3329 	struct net_device *dev = (struct net_device *) data;
3330 	struct fe_priv *np = netdev_priv(dev);
3331 	u8 __iomem *base = get_hwbase(dev);
3332 
3333 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3334 		np->events = readl(base + NvRegIrqStatus);
3335 		writel(np->events, base + NvRegIrqStatus);
3336 	} else {
3337 		np->events = readl(base + NvRegMSIXIrqStatus);
3338 		writel(np->events, base + NvRegMSIXIrqStatus);
3339 	}
3340 	if (!(np->events & np->irqmask))
3341 		return IRQ_NONE;
3342 
3343 	nv_msi_workaround(np);
3344 
3345 	if (napi_schedule_prep(&np->napi)) {
3346 		/*
3347 		 * Disable further irq's (msix not enabled with napi)
3348 		 */
3349 		writel(0, base + NvRegIrqMask);
3350 		__napi_schedule(&np->napi);
3351 	}
3352 
3353 	return IRQ_HANDLED;
3354 }
3355 
3356 /**
3357  * All _optimized functions are used to help increase performance
3358  * (reduce CPU and increase throughput). They use descripter version 3,
3359  * compiler directives, and reduce memory accesses.
3360  */
3361 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3362 {
3363 	struct net_device *dev = (struct net_device *) data;
3364 	struct fe_priv *np = netdev_priv(dev);
3365 	u8 __iomem *base = get_hwbase(dev);
3366 
3367 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3368 		np->events = readl(base + NvRegIrqStatus);
3369 		writel(np->events, base + NvRegIrqStatus);
3370 	} else {
3371 		np->events = readl(base + NvRegMSIXIrqStatus);
3372 		writel(np->events, base + NvRegMSIXIrqStatus);
3373 	}
3374 	if (!(np->events & np->irqmask))
3375 		return IRQ_NONE;
3376 
3377 	nv_msi_workaround(np);
3378 
3379 	if (napi_schedule_prep(&np->napi)) {
3380 		/*
3381 		 * Disable further irq's (msix not enabled with napi)
3382 		 */
3383 		writel(0, base + NvRegIrqMask);
3384 		__napi_schedule(&np->napi);
3385 	}
3386 
3387 	return IRQ_HANDLED;
3388 }
3389 
3390 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3391 {
3392 	struct net_device *dev = (struct net_device *) data;
3393 	struct fe_priv *np = netdev_priv(dev);
3394 	u8 __iomem *base = get_hwbase(dev);
3395 	u32 events;
3396 	int i;
3397 	unsigned long flags;
3398 
3399 	for (i = 0;; i++) {
3400 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3401 		writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3402 		if (!(events & np->irqmask))
3403 			break;
3404 
3405 		spin_lock_irqsave(&np->lock, flags);
3406 		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3407 		spin_unlock_irqrestore(&np->lock, flags);
3408 
3409 		if (unlikely(i > max_interrupt_work)) {
3410 			spin_lock_irqsave(&np->lock, flags);
3411 			/* disable interrupts on the nic */
3412 			writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3413 			pci_push(base);
3414 
3415 			if (!np->in_shutdown) {
3416 				np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3417 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3418 			}
3419 			spin_unlock_irqrestore(&np->lock, flags);
3420 			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3421 				   __func__, i);
3422 			break;
3423 		}
3424 
3425 	}
3426 
3427 	return IRQ_RETVAL(i);
3428 }
3429 
3430 static int nv_napi_poll(struct napi_struct *napi, int budget)
3431 {
3432 	struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3433 	struct net_device *dev = np->dev;
3434 	u8 __iomem *base = get_hwbase(dev);
3435 	unsigned long flags;
3436 	int retcode;
3437 	int rx_count, tx_work = 0, rx_work = 0;
3438 
3439 	do {
3440 		if (!nv_optimized(np)) {
3441 			spin_lock_irqsave(&np->lock, flags);
3442 			tx_work += nv_tx_done(dev, np->tx_ring_size);
3443 			spin_unlock_irqrestore(&np->lock, flags);
3444 
3445 			rx_count = nv_rx_process(dev, budget - rx_work);
3446 			retcode = nv_alloc_rx(dev);
3447 		} else {
3448 			spin_lock_irqsave(&np->lock, flags);
3449 			tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
3450 			spin_unlock_irqrestore(&np->lock, flags);
3451 
3452 			rx_count = nv_rx_process_optimized(dev,
3453 			    budget - rx_work);
3454 			retcode = nv_alloc_rx_optimized(dev);
3455 		}
3456 	} while (retcode == 0 &&
3457 		 rx_count > 0 && (rx_work += rx_count) < budget);
3458 
3459 	if (retcode) {
3460 		spin_lock_irqsave(&np->lock, flags);
3461 		if (!np->in_shutdown)
3462 			mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3463 		spin_unlock_irqrestore(&np->lock, flags);
3464 	}
3465 
3466 	nv_change_interrupt_mode(dev, tx_work + rx_work);
3467 
3468 	if (unlikely(np->events & NVREG_IRQ_LINK)) {
3469 		spin_lock_irqsave(&np->lock, flags);
3470 		nv_link_irq(dev);
3471 		spin_unlock_irqrestore(&np->lock, flags);
3472 	}
3473 	if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3474 		spin_lock_irqsave(&np->lock, flags);
3475 		nv_linkchange(dev);
3476 		spin_unlock_irqrestore(&np->lock, flags);
3477 		np->link_timeout = jiffies + LINK_TIMEOUT;
3478 	}
3479 	if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3480 		spin_lock_irqsave(&np->lock, flags);
3481 		if (!np->in_shutdown) {
3482 			np->nic_poll_irq = np->irqmask;
3483 			np->recover_error = 1;
3484 			mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3485 		}
3486 		spin_unlock_irqrestore(&np->lock, flags);
3487 		napi_complete(napi);
3488 		return rx_work;
3489 	}
3490 
3491 	if (rx_work < budget) {
3492 		/* re-enable interrupts
3493 		   (msix not enabled in napi) */
3494 		napi_complete(napi);
3495 
3496 		writel(np->irqmask, base + NvRegIrqMask);
3497 	}
3498 	return rx_work;
3499 }
3500 
3501 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3502 {
3503 	struct net_device *dev = (struct net_device *) data;
3504 	struct fe_priv *np = netdev_priv(dev);
3505 	u8 __iomem *base = get_hwbase(dev);
3506 	u32 events;
3507 	int i;
3508 	unsigned long flags;
3509 
3510 	for (i = 0;; i++) {
3511 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3512 		writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3513 		if (!(events & np->irqmask))
3514 			break;
3515 
3516 		if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3517 			if (unlikely(nv_alloc_rx_optimized(dev))) {
3518 				spin_lock_irqsave(&np->lock, flags);
3519 				if (!np->in_shutdown)
3520 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3521 				spin_unlock_irqrestore(&np->lock, flags);
3522 			}
3523 		}
3524 
3525 		if (unlikely(i > max_interrupt_work)) {
3526 			spin_lock_irqsave(&np->lock, flags);
3527 			/* disable interrupts on the nic */
3528 			writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3529 			pci_push(base);
3530 
3531 			if (!np->in_shutdown) {
3532 				np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3533 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3534 			}
3535 			spin_unlock_irqrestore(&np->lock, flags);
3536 			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3537 				   __func__, i);
3538 			break;
3539 		}
3540 	}
3541 
3542 	return IRQ_RETVAL(i);
3543 }
3544 
3545 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3546 {
3547 	struct net_device *dev = (struct net_device *) data;
3548 	struct fe_priv *np = netdev_priv(dev);
3549 	u8 __iomem *base = get_hwbase(dev);
3550 	u32 events;
3551 	int i;
3552 	unsigned long flags;
3553 
3554 	for (i = 0;; i++) {
3555 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3556 		writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3557 		if (!(events & np->irqmask))
3558 			break;
3559 
3560 		/* check tx in case we reached max loop limit in tx isr */
3561 		spin_lock_irqsave(&np->lock, flags);
3562 		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3563 		spin_unlock_irqrestore(&np->lock, flags);
3564 
3565 		if (events & NVREG_IRQ_LINK) {
3566 			spin_lock_irqsave(&np->lock, flags);
3567 			nv_link_irq(dev);
3568 			spin_unlock_irqrestore(&np->lock, flags);
3569 		}
3570 		if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3571 			spin_lock_irqsave(&np->lock, flags);
3572 			nv_linkchange(dev);
3573 			spin_unlock_irqrestore(&np->lock, flags);
3574 			np->link_timeout = jiffies + LINK_TIMEOUT;
3575 		}
3576 		if (events & NVREG_IRQ_RECOVER_ERROR) {
3577 			spin_lock_irq(&np->lock);
3578 			/* disable interrupts on the nic */
3579 			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3580 			pci_push(base);
3581 
3582 			if (!np->in_shutdown) {
3583 				np->nic_poll_irq |= NVREG_IRQ_OTHER;
3584 				np->recover_error = 1;
3585 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3586 			}
3587 			spin_unlock_irq(&np->lock);
3588 			break;
3589 		}
3590 		if (unlikely(i > max_interrupt_work)) {
3591 			spin_lock_irqsave(&np->lock, flags);
3592 			/* disable interrupts on the nic */
3593 			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3594 			pci_push(base);
3595 
3596 			if (!np->in_shutdown) {
3597 				np->nic_poll_irq |= NVREG_IRQ_OTHER;
3598 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3599 			}
3600 			spin_unlock_irqrestore(&np->lock, flags);
3601 			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3602 				   __func__, i);
3603 			break;
3604 		}
3605 
3606 	}
3607 
3608 	return IRQ_RETVAL(i);
3609 }
3610 
3611 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3612 {
3613 	struct net_device *dev = (struct net_device *) data;
3614 	struct fe_priv *np = netdev_priv(dev);
3615 	u8 __iomem *base = get_hwbase(dev);
3616 	u32 events;
3617 
3618 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3619 		events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3620 		writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3621 	} else {
3622 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3623 		writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3624 	}
3625 	pci_push(base);
3626 	if (!(events & NVREG_IRQ_TIMER))
3627 		return IRQ_RETVAL(0);
3628 
3629 	nv_msi_workaround(np);
3630 
3631 	spin_lock(&np->lock);
3632 	np->intr_test = 1;
3633 	spin_unlock(&np->lock);
3634 
3635 	return IRQ_RETVAL(1);
3636 }
3637 
3638 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3639 {
3640 	u8 __iomem *base = get_hwbase(dev);
3641 	int i;
3642 	u32 msixmap = 0;
3643 
3644 	/* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3645 	 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3646 	 * the remaining 8 interrupts.
3647 	 */
3648 	for (i = 0; i < 8; i++) {
3649 		if ((irqmask >> i) & 0x1)
3650 			msixmap |= vector << (i << 2);
3651 	}
3652 	writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3653 
3654 	msixmap = 0;
3655 	for (i = 0; i < 8; i++) {
3656 		if ((irqmask >> (i + 8)) & 0x1)
3657 			msixmap |= vector << (i << 2);
3658 	}
3659 	writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3660 }
3661 
3662 static int nv_request_irq(struct net_device *dev, int intr_test)
3663 {
3664 	struct fe_priv *np = get_nvpriv(dev);
3665 	u8 __iomem *base = get_hwbase(dev);
3666 	int ret = 1;
3667 	int i;
3668 	irqreturn_t (*handler)(int foo, void *data);
3669 
3670 	if (intr_test) {
3671 		handler = nv_nic_irq_test;
3672 	} else {
3673 		if (nv_optimized(np))
3674 			handler = nv_nic_irq_optimized;
3675 		else
3676 			handler = nv_nic_irq;
3677 	}
3678 
3679 	if (np->msi_flags & NV_MSI_X_CAPABLE) {
3680 		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3681 			np->msi_x_entry[i].entry = i;
3682 		ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
3683 		if (ret == 0) {
3684 			np->msi_flags |= NV_MSI_X_ENABLED;
3685 			if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3686 				/* Request irq for rx handling */
3687 				sprintf(np->name_rx, "%s-rx", dev->name);
3688 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3689 						nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
3690 					netdev_info(dev,
3691 						    "request_irq failed for rx %d\n",
3692 						    ret);
3693 					pci_disable_msix(np->pci_dev);
3694 					np->msi_flags &= ~NV_MSI_X_ENABLED;
3695 					goto out_err;
3696 				}
3697 				/* Request irq for tx handling */
3698 				sprintf(np->name_tx, "%s-tx", dev->name);
3699 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3700 						nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
3701 					netdev_info(dev,
3702 						    "request_irq failed for tx %d\n",
3703 						    ret);
3704 					pci_disable_msix(np->pci_dev);
3705 					np->msi_flags &= ~NV_MSI_X_ENABLED;
3706 					goto out_free_rx;
3707 				}
3708 				/* Request irq for link and timer handling */
3709 				sprintf(np->name_other, "%s-other", dev->name);
3710 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3711 						nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
3712 					netdev_info(dev,
3713 						    "request_irq failed for link %d\n",
3714 						    ret);
3715 					pci_disable_msix(np->pci_dev);
3716 					np->msi_flags &= ~NV_MSI_X_ENABLED;
3717 					goto out_free_tx;
3718 				}
3719 				/* map interrupts to their respective vector */
3720 				writel(0, base + NvRegMSIXMap0);
3721 				writel(0, base + NvRegMSIXMap1);
3722 				set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3723 				set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3724 				set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3725 			} else {
3726 				/* Request irq for all interrupts */
3727 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3728 					netdev_info(dev,
3729 						    "request_irq failed %d\n",
3730 						    ret);
3731 					pci_disable_msix(np->pci_dev);
3732 					np->msi_flags &= ~NV_MSI_X_ENABLED;
3733 					goto out_err;
3734 				}
3735 
3736 				/* map interrupts to vector 0 */
3737 				writel(0, base + NvRegMSIXMap0);
3738 				writel(0, base + NvRegMSIXMap1);
3739 			}
3740 		}
3741 	}
3742 	if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3743 		ret = pci_enable_msi(np->pci_dev);
3744 		if (ret == 0) {
3745 			np->msi_flags |= NV_MSI_ENABLED;
3746 			dev->irq = np->pci_dev->irq;
3747 			if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3748 				netdev_info(dev, "request_irq failed %d\n",
3749 					    ret);
3750 				pci_disable_msi(np->pci_dev);
3751 				np->msi_flags &= ~NV_MSI_ENABLED;
3752 				dev->irq = np->pci_dev->irq;
3753 				goto out_err;
3754 			}
3755 
3756 			/* map interrupts to vector 0 */
3757 			writel(0, base + NvRegMSIMap0);
3758 			writel(0, base + NvRegMSIMap1);
3759 			/* enable msi vector 0 */
3760 			writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3761 		}
3762 	}
3763 	if (ret != 0) {
3764 		if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
3765 			goto out_err;
3766 
3767 	}
3768 
3769 	return 0;
3770 out_free_tx:
3771 	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3772 out_free_rx:
3773 	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3774 out_err:
3775 	return 1;
3776 }
3777 
3778 static void nv_free_irq(struct net_device *dev)
3779 {
3780 	struct fe_priv *np = get_nvpriv(dev);
3781 	int i;
3782 
3783 	if (np->msi_flags & NV_MSI_X_ENABLED) {
3784 		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3785 			free_irq(np->msi_x_entry[i].vector, dev);
3786 		pci_disable_msix(np->pci_dev);
3787 		np->msi_flags &= ~NV_MSI_X_ENABLED;
3788 	} else {
3789 		free_irq(np->pci_dev->irq, dev);
3790 		if (np->msi_flags & NV_MSI_ENABLED) {
3791 			pci_disable_msi(np->pci_dev);
3792 			np->msi_flags &= ~NV_MSI_ENABLED;
3793 		}
3794 	}
3795 }
3796 
3797 static void nv_do_nic_poll(unsigned long data)
3798 {
3799 	struct net_device *dev = (struct net_device *) data;
3800 	struct fe_priv *np = netdev_priv(dev);
3801 	u8 __iomem *base = get_hwbase(dev);
3802 	u32 mask = 0;
3803 
3804 	/*
3805 	 * First disable irq(s) and then
3806 	 * reenable interrupts on the nic, we have to do this before calling
3807 	 * nv_nic_irq because that may decide to do otherwise
3808 	 */
3809 
3810 	if (!using_multi_irqs(dev)) {
3811 		if (np->msi_flags & NV_MSI_X_ENABLED)
3812 			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3813 		else
3814 			disable_irq_lockdep(np->pci_dev->irq);
3815 		mask = np->irqmask;
3816 	} else {
3817 		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3818 			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3819 			mask |= NVREG_IRQ_RX_ALL;
3820 		}
3821 		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3822 			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3823 			mask |= NVREG_IRQ_TX_ALL;
3824 		}
3825 		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3826 			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3827 			mask |= NVREG_IRQ_OTHER;
3828 		}
3829 	}
3830 	/* disable_irq() contains synchronize_irq, thus no irq handler can run now */
3831 
3832 	if (np->recover_error) {
3833 		np->recover_error = 0;
3834 		netdev_info(dev, "MAC in recoverable error state\n");
3835 		if (netif_running(dev)) {
3836 			netif_tx_lock_bh(dev);
3837 			netif_addr_lock(dev);
3838 			spin_lock(&np->lock);
3839 			/* stop engines */
3840 			nv_stop_rxtx(dev);
3841 			if (np->driver_data & DEV_HAS_POWER_CNTRL)
3842 				nv_mac_reset(dev);
3843 			nv_txrx_reset(dev);
3844 			/* drain rx queue */
3845 			nv_drain_rxtx(dev);
3846 			/* reinit driver view of the rx queue */
3847 			set_bufsize(dev);
3848 			if (nv_init_ring(dev)) {
3849 				if (!np->in_shutdown)
3850 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3851 			}
3852 			/* reinit nic view of the rx queue */
3853 			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3854 			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3855 			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3856 				base + NvRegRingSizes);
3857 			pci_push(base);
3858 			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3859 			pci_push(base);
3860 			/* clear interrupts */
3861 			if (!(np->msi_flags & NV_MSI_X_ENABLED))
3862 				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3863 			else
3864 				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3865 
3866 			/* restart rx engine */
3867 			nv_start_rxtx(dev);
3868 			spin_unlock(&np->lock);
3869 			netif_addr_unlock(dev);
3870 			netif_tx_unlock_bh(dev);
3871 		}
3872 	}
3873 
3874 	writel(mask, base + NvRegIrqMask);
3875 	pci_push(base);
3876 
3877 	if (!using_multi_irqs(dev)) {
3878 		np->nic_poll_irq = 0;
3879 		if (nv_optimized(np))
3880 			nv_nic_irq_optimized(0, dev);
3881 		else
3882 			nv_nic_irq(0, dev);
3883 		if (np->msi_flags & NV_MSI_X_ENABLED)
3884 			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3885 		else
3886 			enable_irq_lockdep(np->pci_dev->irq);
3887 	} else {
3888 		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3889 			np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
3890 			nv_nic_irq_rx(0, dev);
3891 			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3892 		}
3893 		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3894 			np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
3895 			nv_nic_irq_tx(0, dev);
3896 			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3897 		}
3898 		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3899 			np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
3900 			nv_nic_irq_other(0, dev);
3901 			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3902 		}
3903 	}
3904 
3905 }
3906 
3907 #ifdef CONFIG_NET_POLL_CONTROLLER
3908 static void nv_poll_controller(struct net_device *dev)
3909 {
3910 	nv_do_nic_poll((unsigned long) dev);
3911 }
3912 #endif
3913 
3914 static void nv_do_stats_poll(unsigned long data)
3915 {
3916 	struct net_device *dev = (struct net_device *) data;
3917 	struct fe_priv *np = netdev_priv(dev);
3918 
3919 	nv_get_hw_stats(dev);
3920 
3921 	if (!np->in_shutdown)
3922 		mod_timer(&np->stats_poll,
3923 			round_jiffies(jiffies + STATS_INTERVAL));
3924 }
3925 
3926 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3927 {
3928 	struct fe_priv *np = netdev_priv(dev);
3929 	strcpy(info->driver, DRV_NAME);
3930 	strcpy(info->version, FORCEDETH_VERSION);
3931 	strcpy(info->bus_info, pci_name(np->pci_dev));
3932 }
3933 
3934 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3935 {
3936 	struct fe_priv *np = netdev_priv(dev);
3937 	wolinfo->supported = WAKE_MAGIC;
3938 
3939 	spin_lock_irq(&np->lock);
3940 	if (np->wolenabled)
3941 		wolinfo->wolopts = WAKE_MAGIC;
3942 	spin_unlock_irq(&np->lock);
3943 }
3944 
3945 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3946 {
3947 	struct fe_priv *np = netdev_priv(dev);
3948 	u8 __iomem *base = get_hwbase(dev);
3949 	u32 flags = 0;
3950 
3951 	if (wolinfo->wolopts == 0) {
3952 		np->wolenabled = 0;
3953 	} else if (wolinfo->wolopts & WAKE_MAGIC) {
3954 		np->wolenabled = 1;
3955 		flags = NVREG_WAKEUPFLAGS_ENABLE;
3956 	}
3957 	if (netif_running(dev)) {
3958 		spin_lock_irq(&np->lock);
3959 		writel(flags, base + NvRegWakeUpFlags);
3960 		spin_unlock_irq(&np->lock);
3961 	}
3962 	device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
3963 	return 0;
3964 }
3965 
3966 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3967 {
3968 	struct fe_priv *np = netdev_priv(dev);
3969 	u32 speed;
3970 	int adv;
3971 
3972 	spin_lock_irq(&np->lock);
3973 	ecmd->port = PORT_MII;
3974 	if (!netif_running(dev)) {
3975 		/* We do not track link speed / duplex setting if the
3976 		 * interface is disabled. Force a link check */
3977 		if (nv_update_linkspeed(dev)) {
3978 			if (!netif_carrier_ok(dev))
3979 				netif_carrier_on(dev);
3980 		} else {
3981 			if (netif_carrier_ok(dev))
3982 				netif_carrier_off(dev);
3983 		}
3984 	}
3985 
3986 	if (netif_carrier_ok(dev)) {
3987 		switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
3988 		case NVREG_LINKSPEED_10:
3989 			speed = SPEED_10;
3990 			break;
3991 		case NVREG_LINKSPEED_100:
3992 			speed = SPEED_100;
3993 			break;
3994 		case NVREG_LINKSPEED_1000:
3995 			speed = SPEED_1000;
3996 			break;
3997 		default:
3998 			speed = -1;
3999 			break;
4000 		}
4001 		ecmd->duplex = DUPLEX_HALF;
4002 		if (np->duplex)
4003 			ecmd->duplex = DUPLEX_FULL;
4004 	} else {
4005 		speed = -1;
4006 		ecmd->duplex = -1;
4007 	}
4008 	ethtool_cmd_speed_set(ecmd, speed);
4009 	ecmd->autoneg = np->autoneg;
4010 
4011 	ecmd->advertising = ADVERTISED_MII;
4012 	if (np->autoneg) {
4013 		ecmd->advertising |= ADVERTISED_Autoneg;
4014 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4015 		if (adv & ADVERTISE_10HALF)
4016 			ecmd->advertising |= ADVERTISED_10baseT_Half;
4017 		if (adv & ADVERTISE_10FULL)
4018 			ecmd->advertising |= ADVERTISED_10baseT_Full;
4019 		if (adv & ADVERTISE_100HALF)
4020 			ecmd->advertising |= ADVERTISED_100baseT_Half;
4021 		if (adv & ADVERTISE_100FULL)
4022 			ecmd->advertising |= ADVERTISED_100baseT_Full;
4023 		if (np->gigabit == PHY_GIGABIT) {
4024 			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4025 			if (adv & ADVERTISE_1000FULL)
4026 				ecmd->advertising |= ADVERTISED_1000baseT_Full;
4027 		}
4028 	}
4029 	ecmd->supported = (SUPPORTED_Autoneg |
4030 		SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4031 		SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4032 		SUPPORTED_MII);
4033 	if (np->gigabit == PHY_GIGABIT)
4034 		ecmd->supported |= SUPPORTED_1000baseT_Full;
4035 
4036 	ecmd->phy_address = np->phyaddr;
4037 	ecmd->transceiver = XCVR_EXTERNAL;
4038 
4039 	/* ignore maxtxpkt, maxrxpkt for now */
4040 	spin_unlock_irq(&np->lock);
4041 	return 0;
4042 }
4043 
4044 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4045 {
4046 	struct fe_priv *np = netdev_priv(dev);
4047 	u32 speed = ethtool_cmd_speed(ecmd);
4048 
4049 	if (ecmd->port != PORT_MII)
4050 		return -EINVAL;
4051 	if (ecmd->transceiver != XCVR_EXTERNAL)
4052 		return -EINVAL;
4053 	if (ecmd->phy_address != np->phyaddr) {
4054 		/* TODO: support switching between multiple phys. Should be
4055 		 * trivial, but not enabled due to lack of test hardware. */
4056 		return -EINVAL;
4057 	}
4058 	if (ecmd->autoneg == AUTONEG_ENABLE) {
4059 		u32 mask;
4060 
4061 		mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4062 			  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4063 		if (np->gigabit == PHY_GIGABIT)
4064 			mask |= ADVERTISED_1000baseT_Full;
4065 
4066 		if ((ecmd->advertising & mask) == 0)
4067 			return -EINVAL;
4068 
4069 	} else if (ecmd->autoneg == AUTONEG_DISABLE) {
4070 		/* Note: autonegotiation disable, speed 1000 intentionally
4071 		 * forbidden - no one should need that. */
4072 
4073 		if (speed != SPEED_10 && speed != SPEED_100)
4074 			return -EINVAL;
4075 		if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
4076 			return -EINVAL;
4077 	} else {
4078 		return -EINVAL;
4079 	}
4080 
4081 	netif_carrier_off(dev);
4082 	if (netif_running(dev)) {
4083 		unsigned long flags;
4084 
4085 		nv_disable_irq(dev);
4086 		netif_tx_lock_bh(dev);
4087 		netif_addr_lock(dev);
4088 		/* with plain spinlock lockdep complains */
4089 		spin_lock_irqsave(&np->lock, flags);
4090 		/* stop engines */
4091 		/* FIXME:
4092 		 * this can take some time, and interrupts are disabled
4093 		 * due to spin_lock_irqsave, but let's hope no daemon
4094 		 * is going to change the settings very often...
4095 		 * Worst case:
4096 		 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4097 		 * + some minor delays, which is up to a second approximately
4098 		 */
4099 		nv_stop_rxtx(dev);
4100 		spin_unlock_irqrestore(&np->lock, flags);
4101 		netif_addr_unlock(dev);
4102 		netif_tx_unlock_bh(dev);
4103 	}
4104 
4105 	if (ecmd->autoneg == AUTONEG_ENABLE) {
4106 		int adv, bmcr;
4107 
4108 		np->autoneg = 1;
4109 
4110 		/* advertise only what has been requested */
4111 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4112 		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4113 		if (ecmd->advertising & ADVERTISED_10baseT_Half)
4114 			adv |= ADVERTISE_10HALF;
4115 		if (ecmd->advertising & ADVERTISED_10baseT_Full)
4116 			adv |= ADVERTISE_10FULL;
4117 		if (ecmd->advertising & ADVERTISED_100baseT_Half)
4118 			adv |= ADVERTISE_100HALF;
4119 		if (ecmd->advertising & ADVERTISED_100baseT_Full)
4120 			adv |= ADVERTISE_100FULL;
4121 		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)  /* for rx we set both advertisements but disable tx pause */
4122 			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4123 		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4124 			adv |=  ADVERTISE_PAUSE_ASYM;
4125 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4126 
4127 		if (np->gigabit == PHY_GIGABIT) {
4128 			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4129 			adv &= ~ADVERTISE_1000FULL;
4130 			if (ecmd->advertising & ADVERTISED_1000baseT_Full)
4131 				adv |= ADVERTISE_1000FULL;
4132 			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4133 		}
4134 
4135 		if (netif_running(dev))
4136 			netdev_info(dev, "link down\n");
4137 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4138 		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4139 			bmcr |= BMCR_ANENABLE;
4140 			/* reset the phy in order for settings to stick,
4141 			 * and cause autoneg to start */
4142 			if (phy_reset(dev, bmcr)) {
4143 				netdev_info(dev, "phy reset failed\n");
4144 				return -EINVAL;
4145 			}
4146 		} else {
4147 			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4148 			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4149 		}
4150 	} else {
4151 		int adv, bmcr;
4152 
4153 		np->autoneg = 0;
4154 
4155 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4156 		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4157 		if (speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
4158 			adv |= ADVERTISE_10HALF;
4159 		if (speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
4160 			adv |= ADVERTISE_10FULL;
4161 		if (speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
4162 			adv |= ADVERTISE_100HALF;
4163 		if (speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
4164 			adv |= ADVERTISE_100FULL;
4165 		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4166 		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
4167 			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4168 			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4169 		}
4170 		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4171 			adv |=  ADVERTISE_PAUSE_ASYM;
4172 			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4173 		}
4174 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4175 		np->fixed_mode = adv;
4176 
4177 		if (np->gigabit == PHY_GIGABIT) {
4178 			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4179 			adv &= ~ADVERTISE_1000FULL;
4180 			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4181 		}
4182 
4183 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4184 		bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4185 		if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4186 			bmcr |= BMCR_FULLDPLX;
4187 		if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4188 			bmcr |= BMCR_SPEED100;
4189 		if (np->phy_oui == PHY_OUI_MARVELL) {
4190 			/* reset the phy in order for forced mode settings to stick */
4191 			if (phy_reset(dev, bmcr)) {
4192 				netdev_info(dev, "phy reset failed\n");
4193 				return -EINVAL;
4194 			}
4195 		} else {
4196 			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4197 			if (netif_running(dev)) {
4198 				/* Wait a bit and then reconfigure the nic. */
4199 				udelay(10);
4200 				nv_linkchange(dev);
4201 			}
4202 		}
4203 	}
4204 
4205 	if (netif_running(dev)) {
4206 		nv_start_rxtx(dev);
4207 		nv_enable_irq(dev);
4208 	}
4209 
4210 	return 0;
4211 }
4212 
4213 #define FORCEDETH_REGS_VER	1
4214 
4215 static int nv_get_regs_len(struct net_device *dev)
4216 {
4217 	struct fe_priv *np = netdev_priv(dev);
4218 	return np->register_size;
4219 }
4220 
4221 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4222 {
4223 	struct fe_priv *np = netdev_priv(dev);
4224 	u8 __iomem *base = get_hwbase(dev);
4225 	u32 *rbuf = buf;
4226 	int i;
4227 
4228 	regs->version = FORCEDETH_REGS_VER;
4229 	spin_lock_irq(&np->lock);
4230 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
4231 		rbuf[i] = readl(base + i*sizeof(u32));
4232 	spin_unlock_irq(&np->lock);
4233 }
4234 
4235 static int nv_nway_reset(struct net_device *dev)
4236 {
4237 	struct fe_priv *np = netdev_priv(dev);
4238 	int ret;
4239 
4240 	if (np->autoneg) {
4241 		int bmcr;
4242 
4243 		netif_carrier_off(dev);
4244 		if (netif_running(dev)) {
4245 			nv_disable_irq(dev);
4246 			netif_tx_lock_bh(dev);
4247 			netif_addr_lock(dev);
4248 			spin_lock(&np->lock);
4249 			/* stop engines */
4250 			nv_stop_rxtx(dev);
4251 			spin_unlock(&np->lock);
4252 			netif_addr_unlock(dev);
4253 			netif_tx_unlock_bh(dev);
4254 			netdev_info(dev, "link down\n");
4255 		}
4256 
4257 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4258 		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4259 			bmcr |= BMCR_ANENABLE;
4260 			/* reset the phy in order for settings to stick*/
4261 			if (phy_reset(dev, bmcr)) {
4262 				netdev_info(dev, "phy reset failed\n");
4263 				return -EINVAL;
4264 			}
4265 		} else {
4266 			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4267 			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4268 		}
4269 
4270 		if (netif_running(dev)) {
4271 			nv_start_rxtx(dev);
4272 			nv_enable_irq(dev);
4273 		}
4274 		ret = 0;
4275 	} else {
4276 		ret = -EINVAL;
4277 	}
4278 
4279 	return ret;
4280 }
4281 
4282 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4283 {
4284 	struct fe_priv *np = netdev_priv(dev);
4285 
4286 	ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4287 	ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4288 
4289 	ring->rx_pending = np->rx_ring_size;
4290 	ring->tx_pending = np->tx_ring_size;
4291 }
4292 
4293 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4294 {
4295 	struct fe_priv *np = netdev_priv(dev);
4296 	u8 __iomem *base = get_hwbase(dev);
4297 	u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4298 	dma_addr_t ring_addr;
4299 
4300 	if (ring->rx_pending < RX_RING_MIN ||
4301 	    ring->tx_pending < TX_RING_MIN ||
4302 	    ring->rx_mini_pending != 0 ||
4303 	    ring->rx_jumbo_pending != 0 ||
4304 	    (np->desc_ver == DESC_VER_1 &&
4305 	     (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4306 	      ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4307 	    (np->desc_ver != DESC_VER_1 &&
4308 	     (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4309 	      ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4310 		return -EINVAL;
4311 	}
4312 
4313 	/* allocate new rings */
4314 	if (!nv_optimized(np)) {
4315 		rxtx_ring = pci_alloc_consistent(np->pci_dev,
4316 					    sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4317 					    &ring_addr);
4318 	} else {
4319 		rxtx_ring = pci_alloc_consistent(np->pci_dev,
4320 					    sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4321 					    &ring_addr);
4322 	}
4323 	rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4324 	tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4325 	if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4326 		/* fall back to old rings */
4327 		if (!nv_optimized(np)) {
4328 			if (rxtx_ring)
4329 				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4330 						    rxtx_ring, ring_addr);
4331 		} else {
4332 			if (rxtx_ring)
4333 				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4334 						    rxtx_ring, ring_addr);
4335 		}
4336 
4337 		kfree(rx_skbuff);
4338 		kfree(tx_skbuff);
4339 		goto exit;
4340 	}
4341 
4342 	if (netif_running(dev)) {
4343 		nv_disable_irq(dev);
4344 		nv_napi_disable(dev);
4345 		netif_tx_lock_bh(dev);
4346 		netif_addr_lock(dev);
4347 		spin_lock(&np->lock);
4348 		/* stop engines */
4349 		nv_stop_rxtx(dev);
4350 		nv_txrx_reset(dev);
4351 		/* drain queues */
4352 		nv_drain_rxtx(dev);
4353 		/* delete queues */
4354 		free_rings(dev);
4355 	}
4356 
4357 	/* set new values */
4358 	np->rx_ring_size = ring->rx_pending;
4359 	np->tx_ring_size = ring->tx_pending;
4360 
4361 	if (!nv_optimized(np)) {
4362 		np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4363 		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4364 	} else {
4365 		np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4366 		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4367 	}
4368 	np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4369 	np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4370 	np->ring_addr = ring_addr;
4371 
4372 	memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4373 	memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4374 
4375 	if (netif_running(dev)) {
4376 		/* reinit driver view of the queues */
4377 		set_bufsize(dev);
4378 		if (nv_init_ring(dev)) {
4379 			if (!np->in_shutdown)
4380 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4381 		}
4382 
4383 		/* reinit nic view of the queues */
4384 		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4385 		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4386 		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4387 			base + NvRegRingSizes);
4388 		pci_push(base);
4389 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4390 		pci_push(base);
4391 
4392 		/* restart engines */
4393 		nv_start_rxtx(dev);
4394 		spin_unlock(&np->lock);
4395 		netif_addr_unlock(dev);
4396 		netif_tx_unlock_bh(dev);
4397 		nv_napi_enable(dev);
4398 		nv_enable_irq(dev);
4399 	}
4400 	return 0;
4401 exit:
4402 	return -ENOMEM;
4403 }
4404 
4405 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4406 {
4407 	struct fe_priv *np = netdev_priv(dev);
4408 
4409 	pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4410 	pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4411 	pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4412 }
4413 
4414 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4415 {
4416 	struct fe_priv *np = netdev_priv(dev);
4417 	int adv, bmcr;
4418 
4419 	if ((!np->autoneg && np->duplex == 0) ||
4420 	    (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4421 		netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
4422 		return -EINVAL;
4423 	}
4424 	if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4425 		netdev_info(dev, "hardware does not support tx pause frames\n");
4426 		return -EINVAL;
4427 	}
4428 
4429 	netif_carrier_off(dev);
4430 	if (netif_running(dev)) {
4431 		nv_disable_irq(dev);
4432 		netif_tx_lock_bh(dev);
4433 		netif_addr_lock(dev);
4434 		spin_lock(&np->lock);
4435 		/* stop engines */
4436 		nv_stop_rxtx(dev);
4437 		spin_unlock(&np->lock);
4438 		netif_addr_unlock(dev);
4439 		netif_tx_unlock_bh(dev);
4440 	}
4441 
4442 	np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4443 	if (pause->rx_pause)
4444 		np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4445 	if (pause->tx_pause)
4446 		np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4447 
4448 	if (np->autoneg && pause->autoneg) {
4449 		np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4450 
4451 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4452 		adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4453 		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
4454 			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4455 		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4456 			adv |=  ADVERTISE_PAUSE_ASYM;
4457 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4458 
4459 		if (netif_running(dev))
4460 			netdev_info(dev, "link down\n");
4461 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4462 		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4463 		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4464 	} else {
4465 		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4466 		if (pause->rx_pause)
4467 			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4468 		if (pause->tx_pause)
4469 			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4470 
4471 		if (!netif_running(dev))
4472 			nv_update_linkspeed(dev);
4473 		else
4474 			nv_update_pause(dev, np->pause_flags);
4475 	}
4476 
4477 	if (netif_running(dev)) {
4478 		nv_start_rxtx(dev);
4479 		nv_enable_irq(dev);
4480 	}
4481 	return 0;
4482 }
4483 
4484 static u32 nv_fix_features(struct net_device *dev, u32 features)
4485 {
4486 	/* vlan is dependent on rx checksum offload */
4487 	if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
4488 		features |= NETIF_F_RXCSUM;
4489 
4490 	return features;
4491 }
4492 
4493 static void nv_vlan_mode(struct net_device *dev, u32 features)
4494 {
4495 	struct fe_priv *np = get_nvpriv(dev);
4496 
4497 	spin_lock_irq(&np->lock);
4498 
4499 	if (features & NETIF_F_HW_VLAN_RX)
4500 		np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
4501 	else
4502 		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4503 
4504 	if (features & NETIF_F_HW_VLAN_TX)
4505 		np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
4506 	else
4507 		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4508 
4509 	writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4510 
4511 	spin_unlock_irq(&np->lock);
4512 }
4513 
4514 static int nv_set_features(struct net_device *dev, u32 features)
4515 {
4516 	struct fe_priv *np = netdev_priv(dev);
4517 	u8 __iomem *base = get_hwbase(dev);
4518 	u32 changed = dev->features ^ features;
4519 
4520 	if (changed & NETIF_F_RXCSUM) {
4521 		spin_lock_irq(&np->lock);
4522 
4523 		if (features & NETIF_F_RXCSUM)
4524 			np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4525 		else
4526 			np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4527 
4528 		if (netif_running(dev))
4529 			writel(np->txrxctl_bits, base + NvRegTxRxControl);
4530 
4531 		spin_unlock_irq(&np->lock);
4532 	}
4533 
4534 	if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX))
4535 		nv_vlan_mode(dev, features);
4536 
4537 	return 0;
4538 }
4539 
4540 static int nv_get_sset_count(struct net_device *dev, int sset)
4541 {
4542 	struct fe_priv *np = netdev_priv(dev);
4543 
4544 	switch (sset) {
4545 	case ETH_SS_TEST:
4546 		if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4547 			return NV_TEST_COUNT_EXTENDED;
4548 		else
4549 			return NV_TEST_COUNT_BASE;
4550 	case ETH_SS_STATS:
4551 		if (np->driver_data & DEV_HAS_STATISTICS_V3)
4552 			return NV_DEV_STATISTICS_V3_COUNT;
4553 		else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4554 			return NV_DEV_STATISTICS_V2_COUNT;
4555 		else if (np->driver_data & DEV_HAS_STATISTICS_V1)
4556 			return NV_DEV_STATISTICS_V1_COUNT;
4557 		else
4558 			return 0;
4559 	default:
4560 		return -EOPNOTSUPP;
4561 	}
4562 }
4563 
4564 static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4565 {
4566 	struct fe_priv *np = netdev_priv(dev);
4567 
4568 	/* update stats */
4569 	nv_do_stats_poll((unsigned long)dev);
4570 
4571 	memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4572 }
4573 
4574 static int nv_link_test(struct net_device *dev)
4575 {
4576 	struct fe_priv *np = netdev_priv(dev);
4577 	int mii_status;
4578 
4579 	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4580 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4581 
4582 	/* check phy link status */
4583 	if (!(mii_status & BMSR_LSTATUS))
4584 		return 0;
4585 	else
4586 		return 1;
4587 }
4588 
4589 static int nv_register_test(struct net_device *dev)
4590 {
4591 	u8 __iomem *base = get_hwbase(dev);
4592 	int i = 0;
4593 	u32 orig_read, new_read;
4594 
4595 	do {
4596 		orig_read = readl(base + nv_registers_test[i].reg);
4597 
4598 		/* xor with mask to toggle bits */
4599 		orig_read ^= nv_registers_test[i].mask;
4600 
4601 		writel(orig_read, base + nv_registers_test[i].reg);
4602 
4603 		new_read = readl(base + nv_registers_test[i].reg);
4604 
4605 		if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4606 			return 0;
4607 
4608 		/* restore original value */
4609 		orig_read ^= nv_registers_test[i].mask;
4610 		writel(orig_read, base + nv_registers_test[i].reg);
4611 
4612 	} while (nv_registers_test[++i].reg != 0);
4613 
4614 	return 1;
4615 }
4616 
4617 static int nv_interrupt_test(struct net_device *dev)
4618 {
4619 	struct fe_priv *np = netdev_priv(dev);
4620 	u8 __iomem *base = get_hwbase(dev);
4621 	int ret = 1;
4622 	int testcnt;
4623 	u32 save_msi_flags, save_poll_interval = 0;
4624 
4625 	if (netif_running(dev)) {
4626 		/* free current irq */
4627 		nv_free_irq(dev);
4628 		save_poll_interval = readl(base+NvRegPollingInterval);
4629 	}
4630 
4631 	/* flag to test interrupt handler */
4632 	np->intr_test = 0;
4633 
4634 	/* setup test irq */
4635 	save_msi_flags = np->msi_flags;
4636 	np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4637 	np->msi_flags |= 0x001; /* setup 1 vector */
4638 	if (nv_request_irq(dev, 1))
4639 		return 0;
4640 
4641 	/* setup timer interrupt */
4642 	writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4643 	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4644 
4645 	nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4646 
4647 	/* wait for at least one interrupt */
4648 	msleep(100);
4649 
4650 	spin_lock_irq(&np->lock);
4651 
4652 	/* flag should be set within ISR */
4653 	testcnt = np->intr_test;
4654 	if (!testcnt)
4655 		ret = 2;
4656 
4657 	nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4658 	if (!(np->msi_flags & NV_MSI_X_ENABLED))
4659 		writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4660 	else
4661 		writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4662 
4663 	spin_unlock_irq(&np->lock);
4664 
4665 	nv_free_irq(dev);
4666 
4667 	np->msi_flags = save_msi_flags;
4668 
4669 	if (netif_running(dev)) {
4670 		writel(save_poll_interval, base + NvRegPollingInterval);
4671 		writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4672 		/* restore original irq */
4673 		if (nv_request_irq(dev, 0))
4674 			return 0;
4675 	}
4676 
4677 	return ret;
4678 }
4679 
4680 static int nv_loopback_test(struct net_device *dev)
4681 {
4682 	struct fe_priv *np = netdev_priv(dev);
4683 	u8 __iomem *base = get_hwbase(dev);
4684 	struct sk_buff *tx_skb, *rx_skb;
4685 	dma_addr_t test_dma_addr;
4686 	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
4687 	u32 flags;
4688 	int len, i, pkt_len;
4689 	u8 *pkt_data;
4690 	u32 filter_flags = 0;
4691 	u32 misc1_flags = 0;
4692 	int ret = 1;
4693 
4694 	if (netif_running(dev)) {
4695 		nv_disable_irq(dev);
4696 		filter_flags = readl(base + NvRegPacketFilterFlags);
4697 		misc1_flags = readl(base + NvRegMisc1);
4698 	} else {
4699 		nv_txrx_reset(dev);
4700 	}
4701 
4702 	/* reinit driver view of the rx queue */
4703 	set_bufsize(dev);
4704 	nv_init_ring(dev);
4705 
4706 	/* setup hardware for loopback */
4707 	writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
4708 	writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
4709 
4710 	/* reinit nic view of the rx queue */
4711 	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4712 	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4713 	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4714 		base + NvRegRingSizes);
4715 	pci_push(base);
4716 
4717 	/* restart rx engine */
4718 	nv_start_rxtx(dev);
4719 
4720 	/* setup packet for tx */
4721 	pkt_len = ETH_DATA_LEN;
4722 	tx_skb = dev_alloc_skb(pkt_len);
4723 	if (!tx_skb) {
4724 		netdev_err(dev, "dev_alloc_skb() failed during loopback test\n");
4725 		ret = 0;
4726 		goto out;
4727 	}
4728 	test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4729 				       skb_tailroom(tx_skb),
4730 				       PCI_DMA_FROMDEVICE);
4731 	pkt_data = skb_put(tx_skb, pkt_len);
4732 	for (i = 0; i < pkt_len; i++)
4733 		pkt_data[i] = (u8)(i & 0xff);
4734 
4735 	if (!nv_optimized(np)) {
4736 		np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4737 		np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4738 	} else {
4739 		np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
4740 		np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
4741 		np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4742 	}
4743 	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4744 	pci_push(get_hwbase(dev));
4745 
4746 	msleep(500);
4747 
4748 	/* check for rx of the packet */
4749 	if (!nv_optimized(np)) {
4750 		flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
4751 		len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4752 
4753 	} else {
4754 		flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
4755 		len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
4756 	}
4757 
4758 	if (flags & NV_RX_AVAIL) {
4759 		ret = 0;
4760 	} else if (np->desc_ver == DESC_VER_1) {
4761 		if (flags & NV_RX_ERROR)
4762 			ret = 0;
4763 	} else {
4764 		if (flags & NV_RX2_ERROR)
4765 			ret = 0;
4766 	}
4767 
4768 	if (ret) {
4769 		if (len != pkt_len) {
4770 			ret = 0;
4771 		} else {
4772 			rx_skb = np->rx_skb[0].skb;
4773 			for (i = 0; i < pkt_len; i++) {
4774 				if (rx_skb->data[i] != (u8)(i & 0xff)) {
4775 					ret = 0;
4776 					break;
4777 				}
4778 			}
4779 		}
4780 	}
4781 
4782 	pci_unmap_single(np->pci_dev, test_dma_addr,
4783 		       (skb_end_pointer(tx_skb) - tx_skb->data),
4784 		       PCI_DMA_TODEVICE);
4785 	dev_kfree_skb_any(tx_skb);
4786  out:
4787 	/* stop engines */
4788 	nv_stop_rxtx(dev);
4789 	nv_txrx_reset(dev);
4790 	/* drain rx queue */
4791 	nv_drain_rxtx(dev);
4792 
4793 	if (netif_running(dev)) {
4794 		writel(misc1_flags, base + NvRegMisc1);
4795 		writel(filter_flags, base + NvRegPacketFilterFlags);
4796 		nv_enable_irq(dev);
4797 	}
4798 
4799 	return ret;
4800 }
4801 
4802 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
4803 {
4804 	struct fe_priv *np = netdev_priv(dev);
4805 	u8 __iomem *base = get_hwbase(dev);
4806 	int result;
4807 	memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
4808 
4809 	if (!nv_link_test(dev)) {
4810 		test->flags |= ETH_TEST_FL_FAILED;
4811 		buffer[0] = 1;
4812 	}
4813 
4814 	if (test->flags & ETH_TEST_FL_OFFLINE) {
4815 		if (netif_running(dev)) {
4816 			netif_stop_queue(dev);
4817 			nv_napi_disable(dev);
4818 			netif_tx_lock_bh(dev);
4819 			netif_addr_lock(dev);
4820 			spin_lock_irq(&np->lock);
4821 			nv_disable_hw_interrupts(dev, np->irqmask);
4822 			if (!(np->msi_flags & NV_MSI_X_ENABLED))
4823 				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4824 			else
4825 				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4826 			/* stop engines */
4827 			nv_stop_rxtx(dev);
4828 			nv_txrx_reset(dev);
4829 			/* drain rx queue */
4830 			nv_drain_rxtx(dev);
4831 			spin_unlock_irq(&np->lock);
4832 			netif_addr_unlock(dev);
4833 			netif_tx_unlock_bh(dev);
4834 		}
4835 
4836 		if (!nv_register_test(dev)) {
4837 			test->flags |= ETH_TEST_FL_FAILED;
4838 			buffer[1] = 1;
4839 		}
4840 
4841 		result = nv_interrupt_test(dev);
4842 		if (result != 1) {
4843 			test->flags |= ETH_TEST_FL_FAILED;
4844 			buffer[2] = 1;
4845 		}
4846 		if (result == 0) {
4847 			/* bail out */
4848 			return;
4849 		}
4850 
4851 		if (!nv_loopback_test(dev)) {
4852 			test->flags |= ETH_TEST_FL_FAILED;
4853 			buffer[3] = 1;
4854 		}
4855 
4856 		if (netif_running(dev)) {
4857 			/* reinit driver view of the rx queue */
4858 			set_bufsize(dev);
4859 			if (nv_init_ring(dev)) {
4860 				if (!np->in_shutdown)
4861 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4862 			}
4863 			/* reinit nic view of the rx queue */
4864 			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4865 			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4866 			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4867 				base + NvRegRingSizes);
4868 			pci_push(base);
4869 			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4870 			pci_push(base);
4871 			/* restart rx engine */
4872 			nv_start_rxtx(dev);
4873 			netif_start_queue(dev);
4874 			nv_napi_enable(dev);
4875 			nv_enable_hw_interrupts(dev, np->irqmask);
4876 		}
4877 	}
4878 }
4879 
4880 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
4881 {
4882 	switch (stringset) {
4883 	case ETH_SS_STATS:
4884 		memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
4885 		break;
4886 	case ETH_SS_TEST:
4887 		memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
4888 		break;
4889 	}
4890 }
4891 
4892 static const struct ethtool_ops ops = {
4893 	.get_drvinfo = nv_get_drvinfo,
4894 	.get_link = ethtool_op_get_link,
4895 	.get_wol = nv_get_wol,
4896 	.set_wol = nv_set_wol,
4897 	.get_settings = nv_get_settings,
4898 	.set_settings = nv_set_settings,
4899 	.get_regs_len = nv_get_regs_len,
4900 	.get_regs = nv_get_regs,
4901 	.nway_reset = nv_nway_reset,
4902 	.get_ringparam = nv_get_ringparam,
4903 	.set_ringparam = nv_set_ringparam,
4904 	.get_pauseparam = nv_get_pauseparam,
4905 	.set_pauseparam = nv_set_pauseparam,
4906 	.get_strings = nv_get_strings,
4907 	.get_ethtool_stats = nv_get_ethtool_stats,
4908 	.get_sset_count = nv_get_sset_count,
4909 	.self_test = nv_self_test,
4910 };
4911 
4912 /* The mgmt unit and driver use a semaphore to access the phy during init */
4913 static int nv_mgmt_acquire_sema(struct net_device *dev)
4914 {
4915 	struct fe_priv *np = netdev_priv(dev);
4916 	u8 __iomem *base = get_hwbase(dev);
4917 	int i;
4918 	u32 tx_ctrl, mgmt_sema;
4919 
4920 	for (i = 0; i < 10; i++) {
4921 		mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
4922 		if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
4923 			break;
4924 		msleep(500);
4925 	}
4926 
4927 	if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
4928 		return 0;
4929 
4930 	for (i = 0; i < 2; i++) {
4931 		tx_ctrl = readl(base + NvRegTransmitterControl);
4932 		tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
4933 		writel(tx_ctrl, base + NvRegTransmitterControl);
4934 
4935 		/* verify that semaphore was acquired */
4936 		tx_ctrl = readl(base + NvRegTransmitterControl);
4937 		if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
4938 		    ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
4939 			np->mgmt_sema = 1;
4940 			return 1;
4941 		} else
4942 			udelay(50);
4943 	}
4944 
4945 	return 0;
4946 }
4947 
4948 static void nv_mgmt_release_sema(struct net_device *dev)
4949 {
4950 	struct fe_priv *np = netdev_priv(dev);
4951 	u8 __iomem *base = get_hwbase(dev);
4952 	u32 tx_ctrl;
4953 
4954 	if (np->driver_data & DEV_HAS_MGMT_UNIT) {
4955 		if (np->mgmt_sema) {
4956 			tx_ctrl = readl(base + NvRegTransmitterControl);
4957 			tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
4958 			writel(tx_ctrl, base + NvRegTransmitterControl);
4959 		}
4960 	}
4961 }
4962 
4963 
4964 static int nv_mgmt_get_version(struct net_device *dev)
4965 {
4966 	struct fe_priv *np = netdev_priv(dev);
4967 	u8 __iomem *base = get_hwbase(dev);
4968 	u32 data_ready = readl(base + NvRegTransmitterControl);
4969 	u32 data_ready2 = 0;
4970 	unsigned long start;
4971 	int ready = 0;
4972 
4973 	writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
4974 	writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
4975 	start = jiffies;
4976 	while (time_before(jiffies, start + 5*HZ)) {
4977 		data_ready2 = readl(base + NvRegTransmitterControl);
4978 		if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
4979 			ready = 1;
4980 			break;
4981 		}
4982 		schedule_timeout_uninterruptible(1);
4983 	}
4984 
4985 	if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
4986 		return 0;
4987 
4988 	np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
4989 
4990 	return 1;
4991 }
4992 
4993 static int nv_open(struct net_device *dev)
4994 {
4995 	struct fe_priv *np = netdev_priv(dev);
4996 	u8 __iomem *base = get_hwbase(dev);
4997 	int ret = 1;
4998 	int oom, i;
4999 	u32 low;
5000 
5001 	/* power up phy */
5002 	mii_rw(dev, np->phyaddr, MII_BMCR,
5003 	       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
5004 
5005 	nv_txrx_gate(dev, false);
5006 	/* erase previous misconfiguration */
5007 	if (np->driver_data & DEV_HAS_POWER_CNTRL)
5008 		nv_mac_reset(dev);
5009 	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5010 	writel(0, base + NvRegMulticastAddrB);
5011 	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5012 	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5013 	writel(0, base + NvRegPacketFilterFlags);
5014 
5015 	writel(0, base + NvRegTransmitterControl);
5016 	writel(0, base + NvRegReceiverControl);
5017 
5018 	writel(0, base + NvRegAdapterControl);
5019 
5020 	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5021 		writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
5022 
5023 	/* initialize descriptor rings */
5024 	set_bufsize(dev);
5025 	oom = nv_init_ring(dev);
5026 
5027 	writel(0, base + NvRegLinkSpeed);
5028 	writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5029 	nv_txrx_reset(dev);
5030 	writel(0, base + NvRegUnknownSetupReg6);
5031 
5032 	np->in_shutdown = 0;
5033 
5034 	/* give hw rings */
5035 	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5036 	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5037 		base + NvRegRingSizes);
5038 
5039 	writel(np->linkspeed, base + NvRegLinkSpeed);
5040 	if (np->desc_ver == DESC_VER_1)
5041 		writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5042 	else
5043 		writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5044 	writel(np->txrxctl_bits, base + NvRegTxRxControl);
5045 	writel(np->vlanctl_bits, base + NvRegVlanControl);
5046 	pci_push(base);
5047 	writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5048 	if (reg_delay(dev, NvRegUnknownSetupReg5,
5049 		      NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5050 		      NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
5051 		netdev_info(dev,
5052 			    "%s: SetupReg5, Bit 31 remained off\n", __func__);
5053 
5054 	writel(0, base + NvRegMIIMask);
5055 	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5056 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5057 
5058 	writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5059 	writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5060 	writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5061 	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5062 
5063 	writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5064 
5065 	get_random_bytes(&low, sizeof(low));
5066 	low &= NVREG_SLOTTIME_MASK;
5067 	if (np->desc_ver == DESC_VER_1) {
5068 		writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5069 	} else {
5070 		if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5071 			/* setup legacy backoff */
5072 			writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5073 		} else {
5074 			writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5075 			nv_gear_backoff_reseed(dev);
5076 		}
5077 	}
5078 	writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5079 	writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5080 	if (poll_interval == -1) {
5081 		if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5082 			writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5083 		else
5084 			writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5085 	} else
5086 		writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5087 	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5088 	writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5089 			base + NvRegAdapterControl);
5090 	writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5091 	writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5092 	if (np->wolenabled)
5093 		writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5094 
5095 	i = readl(base + NvRegPowerState);
5096 	if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
5097 		writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5098 
5099 	pci_push(base);
5100 	udelay(10);
5101 	writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5102 
5103 	nv_disable_hw_interrupts(dev, np->irqmask);
5104 	pci_push(base);
5105 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5106 	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5107 	pci_push(base);
5108 
5109 	if (nv_request_irq(dev, 0))
5110 		goto out_drain;
5111 
5112 	/* ask for interrupts */
5113 	nv_enable_hw_interrupts(dev, np->irqmask);
5114 
5115 	spin_lock_irq(&np->lock);
5116 	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5117 	writel(0, base + NvRegMulticastAddrB);
5118 	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5119 	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5120 	writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5121 	/* One manual link speed update: Interrupts are enabled, future link
5122 	 * speed changes cause interrupts and are handled by nv_link_irq().
5123 	 */
5124 	{
5125 		u32 miistat;
5126 		miistat = readl(base + NvRegMIIStatus);
5127 		writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5128 	}
5129 	/* set linkspeed to invalid value, thus force nv_update_linkspeed
5130 	 * to init hw */
5131 	np->linkspeed = 0;
5132 	ret = nv_update_linkspeed(dev);
5133 	nv_start_rxtx(dev);
5134 	netif_start_queue(dev);
5135 	nv_napi_enable(dev);
5136 
5137 	if (ret) {
5138 		netif_carrier_on(dev);
5139 	} else {
5140 		netdev_info(dev, "no link during initialization\n");
5141 		netif_carrier_off(dev);
5142 	}
5143 	if (oom)
5144 		mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5145 
5146 	/* start statistics timer */
5147 	if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5148 		mod_timer(&np->stats_poll,
5149 			round_jiffies(jiffies + STATS_INTERVAL));
5150 
5151 	spin_unlock_irq(&np->lock);
5152 
5153 	return 0;
5154 out_drain:
5155 	nv_drain_rxtx(dev);
5156 	return ret;
5157 }
5158 
5159 static int nv_close(struct net_device *dev)
5160 {
5161 	struct fe_priv *np = netdev_priv(dev);
5162 	u8 __iomem *base;
5163 
5164 	spin_lock_irq(&np->lock);
5165 	np->in_shutdown = 1;
5166 	spin_unlock_irq(&np->lock);
5167 	nv_napi_disable(dev);
5168 	synchronize_irq(np->pci_dev->irq);
5169 
5170 	del_timer_sync(&np->oom_kick);
5171 	del_timer_sync(&np->nic_poll);
5172 	del_timer_sync(&np->stats_poll);
5173 
5174 	netif_stop_queue(dev);
5175 	spin_lock_irq(&np->lock);
5176 	nv_stop_rxtx(dev);
5177 	nv_txrx_reset(dev);
5178 
5179 	/* disable interrupts on the nic or we will lock up */
5180 	base = get_hwbase(dev);
5181 	nv_disable_hw_interrupts(dev, np->irqmask);
5182 	pci_push(base);
5183 
5184 	spin_unlock_irq(&np->lock);
5185 
5186 	nv_free_irq(dev);
5187 
5188 	nv_drain_rxtx(dev);
5189 
5190 	if (np->wolenabled || !phy_power_down) {
5191 		nv_txrx_gate(dev, false);
5192 		writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5193 		nv_start_rx(dev);
5194 	} else {
5195 		/* power down phy */
5196 		mii_rw(dev, np->phyaddr, MII_BMCR,
5197 		       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
5198 		nv_txrx_gate(dev, true);
5199 	}
5200 
5201 	/* FIXME: power down nic */
5202 
5203 	return 0;
5204 }
5205 
5206 static const struct net_device_ops nv_netdev_ops = {
5207 	.ndo_open		= nv_open,
5208 	.ndo_stop		= nv_close,
5209 	.ndo_get_stats		= nv_get_stats,
5210 	.ndo_start_xmit		= nv_start_xmit,
5211 	.ndo_tx_timeout		= nv_tx_timeout,
5212 	.ndo_change_mtu		= nv_change_mtu,
5213 	.ndo_fix_features	= nv_fix_features,
5214 	.ndo_set_features	= nv_set_features,
5215 	.ndo_validate_addr	= eth_validate_addr,
5216 	.ndo_set_mac_address	= nv_set_mac_address,
5217 	.ndo_set_rx_mode	= nv_set_multicast,
5218 #ifdef CONFIG_NET_POLL_CONTROLLER
5219 	.ndo_poll_controller	= nv_poll_controller,
5220 #endif
5221 };
5222 
5223 static const struct net_device_ops nv_netdev_ops_optimized = {
5224 	.ndo_open		= nv_open,
5225 	.ndo_stop		= nv_close,
5226 	.ndo_get_stats		= nv_get_stats,
5227 	.ndo_start_xmit		= nv_start_xmit_optimized,
5228 	.ndo_tx_timeout		= nv_tx_timeout,
5229 	.ndo_change_mtu		= nv_change_mtu,
5230 	.ndo_fix_features	= nv_fix_features,
5231 	.ndo_set_features	= nv_set_features,
5232 	.ndo_validate_addr	= eth_validate_addr,
5233 	.ndo_set_mac_address	= nv_set_mac_address,
5234 	.ndo_set_rx_mode	= nv_set_multicast,
5235 #ifdef CONFIG_NET_POLL_CONTROLLER
5236 	.ndo_poll_controller	= nv_poll_controller,
5237 #endif
5238 };
5239 
5240 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5241 {
5242 	struct net_device *dev;
5243 	struct fe_priv *np;
5244 	unsigned long addr;
5245 	u8 __iomem *base;
5246 	int err, i;
5247 	u32 powerstate, txreg;
5248 	u32 phystate_orig = 0, phystate;
5249 	int phyinitialized = 0;
5250 	static int printed_version;
5251 
5252 	if (!printed_version++)
5253 		pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
5254 			FORCEDETH_VERSION);
5255 
5256 	dev = alloc_etherdev(sizeof(struct fe_priv));
5257 	err = -ENOMEM;
5258 	if (!dev)
5259 		goto out;
5260 
5261 	np = netdev_priv(dev);
5262 	np->dev = dev;
5263 	np->pci_dev = pci_dev;
5264 	spin_lock_init(&np->lock);
5265 	SET_NETDEV_DEV(dev, &pci_dev->dev);
5266 
5267 	init_timer(&np->oom_kick);
5268 	np->oom_kick.data = (unsigned long) dev;
5269 	np->oom_kick.function = nv_do_rx_refill;	/* timer handler */
5270 	init_timer(&np->nic_poll);
5271 	np->nic_poll.data = (unsigned long) dev;
5272 	np->nic_poll.function = nv_do_nic_poll;	/* timer handler */
5273 	init_timer(&np->stats_poll);
5274 	np->stats_poll.data = (unsigned long) dev;
5275 	np->stats_poll.function = nv_do_stats_poll;	/* timer handler */
5276 
5277 	err = pci_enable_device(pci_dev);
5278 	if (err)
5279 		goto out_free;
5280 
5281 	pci_set_master(pci_dev);
5282 
5283 	err = pci_request_regions(pci_dev, DRV_NAME);
5284 	if (err < 0)
5285 		goto out_disable;
5286 
5287 	if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5288 		np->register_size = NV_PCI_REGSZ_VER3;
5289 	else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5290 		np->register_size = NV_PCI_REGSZ_VER2;
5291 	else
5292 		np->register_size = NV_PCI_REGSZ_VER1;
5293 
5294 	err = -EINVAL;
5295 	addr = 0;
5296 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5297 		if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5298 				pci_resource_len(pci_dev, i) >= np->register_size) {
5299 			addr = pci_resource_start(pci_dev, i);
5300 			break;
5301 		}
5302 	}
5303 	if (i == DEVICE_COUNT_RESOURCE) {
5304 		dev_info(&pci_dev->dev, "Couldn't find register window\n");
5305 		goto out_relreg;
5306 	}
5307 
5308 	/* copy of driver data */
5309 	np->driver_data = id->driver_data;
5310 	/* copy of device id */
5311 	np->device_id = id->device;
5312 
5313 	/* handle different descriptor versions */
5314 	if (id->driver_data & DEV_HAS_HIGH_DMA) {
5315 		/* packet format 3: supports 40-bit addressing */
5316 		np->desc_ver = DESC_VER_3;
5317 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5318 		if (dma_64bit) {
5319 			if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
5320 				dev_info(&pci_dev->dev,
5321 					 "64-bit DMA failed, using 32-bit addressing\n");
5322 			else
5323 				dev->features |= NETIF_F_HIGHDMA;
5324 			if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
5325 				dev_info(&pci_dev->dev,
5326 					 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5327 			}
5328 		}
5329 	} else if (id->driver_data & DEV_HAS_LARGEDESC) {
5330 		/* packet format 2: supports jumbo frames */
5331 		np->desc_ver = DESC_VER_2;
5332 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5333 	} else {
5334 		/* original packet format */
5335 		np->desc_ver = DESC_VER_1;
5336 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5337 	}
5338 
5339 	np->pkt_limit = NV_PKTLIMIT_1;
5340 	if (id->driver_data & DEV_HAS_LARGEDESC)
5341 		np->pkt_limit = NV_PKTLIMIT_2;
5342 
5343 	if (id->driver_data & DEV_HAS_CHECKSUM) {
5344 		np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5345 		dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
5346 			NETIF_F_TSO | NETIF_F_RXCSUM;
5347 	}
5348 
5349 	np->vlanctl_bits = 0;
5350 	if (id->driver_data & DEV_HAS_VLAN) {
5351 		np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5352 		dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5353 	}
5354 
5355 	dev->features |= dev->hw_features;
5356 
5357 	np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5358 	if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5359 	    (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5360 	    (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5361 		np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5362 	}
5363 
5364 	err = -ENOMEM;
5365 	np->base = ioremap(addr, np->register_size);
5366 	if (!np->base)
5367 		goto out_relreg;
5368 	dev->base_addr = (unsigned long)np->base;
5369 
5370 	dev->irq = pci_dev->irq;
5371 
5372 	np->rx_ring_size = RX_RING_DEFAULT;
5373 	np->tx_ring_size = TX_RING_DEFAULT;
5374 
5375 	if (!nv_optimized(np)) {
5376 		np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5377 					sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5378 					&np->ring_addr);
5379 		if (!np->rx_ring.orig)
5380 			goto out_unmap;
5381 		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5382 	} else {
5383 		np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5384 					sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5385 					&np->ring_addr);
5386 		if (!np->rx_ring.ex)
5387 			goto out_unmap;
5388 		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5389 	}
5390 	np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5391 	np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5392 	if (!np->rx_skb || !np->tx_skb)
5393 		goto out_freering;
5394 
5395 	if (!nv_optimized(np))
5396 		dev->netdev_ops = &nv_netdev_ops;
5397 	else
5398 		dev->netdev_ops = &nv_netdev_ops_optimized;
5399 
5400 	netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5401 	SET_ETHTOOL_OPS(dev, &ops);
5402 	dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5403 
5404 	pci_set_drvdata(pci_dev, dev);
5405 
5406 	/* read the mac address */
5407 	base = get_hwbase(dev);
5408 	np->orig_mac[0] = readl(base + NvRegMacAddrA);
5409 	np->orig_mac[1] = readl(base + NvRegMacAddrB);
5410 
5411 	/* check the workaround bit for correct mac address order */
5412 	txreg = readl(base + NvRegTransmitPoll);
5413 	if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
5414 		/* mac address is already in correct order */
5415 		dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
5416 		dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
5417 		dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5418 		dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5419 		dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
5420 		dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
5421 	} else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5422 		/* mac address is already in correct order */
5423 		dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
5424 		dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
5425 		dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5426 		dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5427 		dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
5428 		dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
5429 		/*
5430 		 * Set orig mac address back to the reversed version.
5431 		 * This flag will be cleared during low power transition.
5432 		 * Therefore, we should always put back the reversed address.
5433 		 */
5434 		np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
5435 			(dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
5436 		np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
5437 	} else {
5438 		/* need to reverse mac address to correct order */
5439 		dev->dev_addr[0] = (np->orig_mac[1] >>  8) & 0xff;
5440 		dev->dev_addr[1] = (np->orig_mac[1] >>  0) & 0xff;
5441 		dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5442 		dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5443 		dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
5444 		dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
5445 		writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5446 		dev_dbg(&pci_dev->dev,
5447 			"%s: set workaround bit for reversed mac addr\n",
5448 			__func__);
5449 	}
5450 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5451 
5452 	if (!is_valid_ether_addr(dev->perm_addr)) {
5453 		/*
5454 		 * Bad mac address. At least one bios sets the mac address
5455 		 * to 01:23:45:67:89:ab
5456 		 */
5457 		dev_err(&pci_dev->dev,
5458 			"Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
5459 			dev->dev_addr);
5460 		random_ether_addr(dev->dev_addr);
5461 		dev_err(&pci_dev->dev,
5462 			"Using random MAC address: %pM\n", dev->dev_addr);
5463 	}
5464 
5465 	/* set mac address */
5466 	nv_copy_mac_to_hw(dev);
5467 
5468 	/* disable WOL */
5469 	writel(0, base + NvRegWakeUpFlags);
5470 	np->wolenabled = 0;
5471 	device_set_wakeup_enable(&pci_dev->dev, false);
5472 
5473 	if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5474 
5475 		/* take phy and nic out of low power mode */
5476 		powerstate = readl(base + NvRegPowerState2);
5477 		powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5478 		if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
5479 		    pci_dev->revision >= 0xA3)
5480 			powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5481 		writel(powerstate, base + NvRegPowerState2);
5482 	}
5483 
5484 	if (np->desc_ver == DESC_VER_1)
5485 		np->tx_flags = NV_TX_VALID;
5486 	else
5487 		np->tx_flags = NV_TX2_VALID;
5488 
5489 	np->msi_flags = 0;
5490 	if ((id->driver_data & DEV_HAS_MSI) && msi)
5491 		np->msi_flags |= NV_MSI_CAPABLE;
5492 
5493 	if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5494 		/* msix has had reported issues when modifying irqmask
5495 		   as in the case of napi, therefore, disable for now
5496 		*/
5497 #if 0
5498 		np->msi_flags |= NV_MSI_X_CAPABLE;
5499 #endif
5500 	}
5501 
5502 	if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
5503 		np->irqmask = NVREG_IRQMASK_CPU;
5504 		if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5505 			np->msi_flags |= 0x0001;
5506 	} else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
5507 		   !(id->driver_data & DEV_NEED_TIMERIRQ)) {
5508 		/* start off in throughput mode */
5509 		np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5510 		/* remove support for msix mode */
5511 		np->msi_flags &= ~NV_MSI_X_CAPABLE;
5512 	} else {
5513 		optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
5514 		np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5515 		if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5516 			np->msi_flags |= 0x0003;
5517 	}
5518 
5519 	if (id->driver_data & DEV_NEED_TIMERIRQ)
5520 		np->irqmask |= NVREG_IRQ_TIMER;
5521 	if (id->driver_data & DEV_NEED_LINKTIMER) {
5522 		np->need_linktimer = 1;
5523 		np->link_timeout = jiffies + LINK_TIMEOUT;
5524 	} else {
5525 		np->need_linktimer = 0;
5526 	}
5527 
5528 	/* Limit the number of tx's outstanding for hw bug */
5529 	if (id->driver_data & DEV_NEED_TX_LIMIT) {
5530 		np->tx_limit = 1;
5531 		if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
5532 		    pci_dev->revision >= 0xA2)
5533 			np->tx_limit = 0;
5534 	}
5535 
5536 	/* clear phy state and temporarily halt phy interrupts */
5537 	writel(0, base + NvRegMIIMask);
5538 	phystate = readl(base + NvRegAdapterControl);
5539 	if (phystate & NVREG_ADAPTCTL_RUNNING) {
5540 		phystate_orig = 1;
5541 		phystate &= ~NVREG_ADAPTCTL_RUNNING;
5542 		writel(phystate, base + NvRegAdapterControl);
5543 	}
5544 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5545 
5546 	if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5547 		/* management unit running on the mac? */
5548 		if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
5549 		    (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
5550 		    nv_mgmt_acquire_sema(dev) &&
5551 		    nv_mgmt_get_version(dev)) {
5552 			np->mac_in_use = 1;
5553 			if (np->mgmt_version > 0)
5554 				np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5555 			/* management unit setup the phy already? */
5556 			if (np->mac_in_use &&
5557 			    ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5558 			     NVREG_XMITCTL_SYNC_PHY_INIT)) {
5559 				/* phy is inited by mgmt unit */
5560 				phyinitialized = 1;
5561 			} else {
5562 				/* we need to init the phy */
5563 			}
5564 		}
5565 	}
5566 
5567 	/* find a suitable phy */
5568 	for (i = 1; i <= 32; i++) {
5569 		int id1, id2;
5570 		int phyaddr = i & 0x1F;
5571 
5572 		spin_lock_irq(&np->lock);
5573 		id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5574 		spin_unlock_irq(&np->lock);
5575 		if (id1 < 0 || id1 == 0xffff)
5576 			continue;
5577 		spin_lock_irq(&np->lock);
5578 		id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5579 		spin_unlock_irq(&np->lock);
5580 		if (id2 < 0 || id2 == 0xffff)
5581 			continue;
5582 
5583 		np->phy_model = id2 & PHYID2_MODEL_MASK;
5584 		id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5585 		id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5586 		np->phyaddr = phyaddr;
5587 		np->phy_oui = id1 | id2;
5588 
5589 		/* Realtek hardcoded phy id1 to all zero's on certain phys */
5590 		if (np->phy_oui == PHY_OUI_REALTEK2)
5591 			np->phy_oui = PHY_OUI_REALTEK;
5592 		/* Setup phy revision for Realtek */
5593 		if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
5594 			np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
5595 
5596 		break;
5597 	}
5598 	if (i == 33) {
5599 		dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
5600 		goto out_error;
5601 	}
5602 
5603 	if (!phyinitialized) {
5604 		/* reset it */
5605 		phy_init(dev);
5606 	} else {
5607 		/* see if it is a gigabit phy */
5608 		u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5609 		if (mii_status & PHY_GIGABIT)
5610 			np->gigabit = PHY_GIGABIT;
5611 	}
5612 
5613 	/* set default link speed settings */
5614 	np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5615 	np->duplex = 0;
5616 	np->autoneg = 1;
5617 
5618 	err = register_netdev(dev);
5619 	if (err) {
5620 		dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
5621 		goto out_error;
5622 	}
5623 
5624 	if (id->driver_data & DEV_HAS_VLAN)
5625 		nv_vlan_mode(dev, dev->features);
5626 
5627 	netif_carrier_off(dev);
5628 
5629 	dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5630 		 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
5631 
5632 	dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5633 		 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5634 		 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
5635 			"csum " : "",
5636 		 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5637 			"vlan " : "",
5638 		 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5639 		 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5640 		 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5641 		 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5642 		 np->need_linktimer ? "lnktim " : "",
5643 		 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5644 		 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5645 		 np->desc_ver);
5646 
5647 	return 0;
5648 
5649 out_error:
5650 	if (phystate_orig)
5651 		writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
5652 	pci_set_drvdata(pci_dev, NULL);
5653 out_freering:
5654 	free_rings(dev);
5655 out_unmap:
5656 	iounmap(get_hwbase(dev));
5657 out_relreg:
5658 	pci_release_regions(pci_dev);
5659 out_disable:
5660 	pci_disable_device(pci_dev);
5661 out_free:
5662 	free_netdev(dev);
5663 out:
5664 	return err;
5665 }
5666 
5667 static void nv_restore_phy(struct net_device *dev)
5668 {
5669 	struct fe_priv *np = netdev_priv(dev);
5670 	u16 phy_reserved, mii_control;
5671 
5672 	if (np->phy_oui == PHY_OUI_REALTEK &&
5673 	    np->phy_model == PHY_MODEL_REALTEK_8201 &&
5674 	    phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
5675 		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
5676 		phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
5677 		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
5678 		phy_reserved |= PHY_REALTEK_INIT8;
5679 		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
5680 		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
5681 
5682 		/* restart auto negotiation */
5683 		mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
5684 		mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
5685 		mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
5686 	}
5687 }
5688 
5689 static void nv_restore_mac_addr(struct pci_dev *pci_dev)
5690 {
5691 	struct net_device *dev = pci_get_drvdata(pci_dev);
5692 	struct fe_priv *np = netdev_priv(dev);
5693 	u8 __iomem *base = get_hwbase(dev);
5694 
5695 	/* special op: write back the misordered MAC address - otherwise
5696 	 * the next nv_probe would see a wrong address.
5697 	 */
5698 	writel(np->orig_mac[0], base + NvRegMacAddrA);
5699 	writel(np->orig_mac[1], base + NvRegMacAddrB);
5700 	writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
5701 	       base + NvRegTransmitPoll);
5702 }
5703 
5704 static void __devexit nv_remove(struct pci_dev *pci_dev)
5705 {
5706 	struct net_device *dev = pci_get_drvdata(pci_dev);
5707 
5708 	unregister_netdev(dev);
5709 
5710 	nv_restore_mac_addr(pci_dev);
5711 
5712 	/* restore any phy related changes */
5713 	nv_restore_phy(dev);
5714 
5715 	nv_mgmt_release_sema(dev);
5716 
5717 	/* free all structures */
5718 	free_rings(dev);
5719 	iounmap(get_hwbase(dev));
5720 	pci_release_regions(pci_dev);
5721 	pci_disable_device(pci_dev);
5722 	free_netdev(dev);
5723 	pci_set_drvdata(pci_dev, NULL);
5724 }
5725 
5726 #ifdef CONFIG_PM_SLEEP
5727 static int nv_suspend(struct device *device)
5728 {
5729 	struct pci_dev *pdev = to_pci_dev(device);
5730 	struct net_device *dev = pci_get_drvdata(pdev);
5731 	struct fe_priv *np = netdev_priv(dev);
5732 	u8 __iomem *base = get_hwbase(dev);
5733 	int i;
5734 
5735 	if (netif_running(dev)) {
5736 		/* Gross. */
5737 		nv_close(dev);
5738 	}
5739 	netif_device_detach(dev);
5740 
5741 	/* save non-pci configuration space */
5742 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
5743 		np->saved_config_space[i] = readl(base + i*sizeof(u32));
5744 
5745 	return 0;
5746 }
5747 
5748 static int nv_resume(struct device *device)
5749 {
5750 	struct pci_dev *pdev = to_pci_dev(device);
5751 	struct net_device *dev = pci_get_drvdata(pdev);
5752 	struct fe_priv *np = netdev_priv(dev);
5753 	u8 __iomem *base = get_hwbase(dev);
5754 	int i, rc = 0;
5755 
5756 	/* restore non-pci configuration space */
5757 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
5758 		writel(np->saved_config_space[i], base+i*sizeof(u32));
5759 
5760 	if (np->driver_data & DEV_NEED_MSI_FIX)
5761 		pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
5762 
5763 	/* restore phy state, including autoneg */
5764 	phy_init(dev);
5765 
5766 	netif_device_attach(dev);
5767 	if (netif_running(dev)) {
5768 		rc = nv_open(dev);
5769 		nv_set_multicast(dev);
5770 	}
5771 	return rc;
5772 }
5773 
5774 static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
5775 #define NV_PM_OPS (&nv_pm_ops)
5776 
5777 #else
5778 #define NV_PM_OPS NULL
5779 #endif /* CONFIG_PM_SLEEP */
5780 
5781 #ifdef CONFIG_PM
5782 static void nv_shutdown(struct pci_dev *pdev)
5783 {
5784 	struct net_device *dev = pci_get_drvdata(pdev);
5785 	struct fe_priv *np = netdev_priv(dev);
5786 
5787 	if (netif_running(dev))
5788 		nv_close(dev);
5789 
5790 	/*
5791 	 * Restore the MAC so a kernel started by kexec won't get confused.
5792 	 * If we really go for poweroff, we must not restore the MAC,
5793 	 * otherwise the MAC for WOL will be reversed at least on some boards.
5794 	 */
5795 	if (system_state != SYSTEM_POWER_OFF)
5796 		nv_restore_mac_addr(pdev);
5797 
5798 	pci_disable_device(pdev);
5799 	/*
5800 	 * Apparently it is not possible to reinitialise from D3 hot,
5801 	 * only put the device into D3 if we really go for poweroff.
5802 	 */
5803 	if (system_state == SYSTEM_POWER_OFF) {
5804 		pci_wake_from_d3(pdev, np->wolenabled);
5805 		pci_set_power_state(pdev, PCI_D3hot);
5806 	}
5807 }
5808 #else
5809 #define nv_shutdown NULL
5810 #endif /* CONFIG_PM */
5811 
5812 static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
5813 	{	/* nForce Ethernet Controller */
5814 		PCI_DEVICE(0x10DE, 0x01C3),
5815 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5816 	},
5817 	{	/* nForce2 Ethernet Controller */
5818 		PCI_DEVICE(0x10DE, 0x0066),
5819 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5820 	},
5821 	{	/* nForce3 Ethernet Controller */
5822 		PCI_DEVICE(0x10DE, 0x00D6),
5823 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5824 	},
5825 	{	/* nForce3 Ethernet Controller */
5826 		PCI_DEVICE(0x10DE, 0x0086),
5827 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5828 	},
5829 	{	/* nForce3 Ethernet Controller */
5830 		PCI_DEVICE(0x10DE, 0x008C),
5831 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5832 	},
5833 	{	/* nForce3 Ethernet Controller */
5834 		PCI_DEVICE(0x10DE, 0x00E6),
5835 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5836 	},
5837 	{	/* nForce3 Ethernet Controller */
5838 		PCI_DEVICE(0x10DE, 0x00DF),
5839 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5840 	},
5841 	{	/* CK804 Ethernet Controller */
5842 		PCI_DEVICE(0x10DE, 0x0056),
5843 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5844 	},
5845 	{	/* CK804 Ethernet Controller */
5846 		PCI_DEVICE(0x10DE, 0x0057),
5847 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5848 	},
5849 	{	/* MCP04 Ethernet Controller */
5850 		PCI_DEVICE(0x10DE, 0x0037),
5851 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5852 	},
5853 	{	/* MCP04 Ethernet Controller */
5854 		PCI_DEVICE(0x10DE, 0x0038),
5855 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5856 	},
5857 	{	/* MCP51 Ethernet Controller */
5858 		PCI_DEVICE(0x10DE, 0x0268),
5859 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
5860 	},
5861 	{	/* MCP51 Ethernet Controller */
5862 		PCI_DEVICE(0x10DE, 0x0269),
5863 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
5864 	},
5865 	{	/* MCP55 Ethernet Controller */
5866 		PCI_DEVICE(0x10DE, 0x0372),
5867 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
5868 	},
5869 	{	/* MCP55 Ethernet Controller */
5870 		PCI_DEVICE(0x10DE, 0x0373),
5871 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
5872 	},
5873 	{	/* MCP61 Ethernet Controller */
5874 		PCI_DEVICE(0x10DE, 0x03E5),
5875 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
5876 	},
5877 	{	/* MCP61 Ethernet Controller */
5878 		PCI_DEVICE(0x10DE, 0x03E6),
5879 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
5880 	},
5881 	{	/* MCP61 Ethernet Controller */
5882 		PCI_DEVICE(0x10DE, 0x03EE),
5883 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
5884 	},
5885 	{	/* MCP61 Ethernet Controller */
5886 		PCI_DEVICE(0x10DE, 0x03EF),
5887 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
5888 	},
5889 	{	/* MCP65 Ethernet Controller */
5890 		PCI_DEVICE(0x10DE, 0x0450),
5891 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5892 	},
5893 	{	/* MCP65 Ethernet Controller */
5894 		PCI_DEVICE(0x10DE, 0x0451),
5895 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5896 	},
5897 	{	/* MCP65 Ethernet Controller */
5898 		PCI_DEVICE(0x10DE, 0x0452),
5899 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5900 	},
5901 	{	/* MCP65 Ethernet Controller */
5902 		PCI_DEVICE(0x10DE, 0x0453),
5903 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5904 	},
5905 	{	/* MCP67 Ethernet Controller */
5906 		PCI_DEVICE(0x10DE, 0x054C),
5907 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5908 	},
5909 	{	/* MCP67 Ethernet Controller */
5910 		PCI_DEVICE(0x10DE, 0x054D),
5911 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5912 	},
5913 	{	/* MCP67 Ethernet Controller */
5914 		PCI_DEVICE(0x10DE, 0x054E),
5915 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5916 	},
5917 	{	/* MCP67 Ethernet Controller */
5918 		PCI_DEVICE(0x10DE, 0x054F),
5919 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5920 	},
5921 	{	/* MCP73 Ethernet Controller */
5922 		PCI_DEVICE(0x10DE, 0x07DC),
5923 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5924 	},
5925 	{	/* MCP73 Ethernet Controller */
5926 		PCI_DEVICE(0x10DE, 0x07DD),
5927 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5928 	},
5929 	{	/* MCP73 Ethernet Controller */
5930 		PCI_DEVICE(0x10DE, 0x07DE),
5931 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5932 	},
5933 	{	/* MCP73 Ethernet Controller */
5934 		PCI_DEVICE(0x10DE, 0x07DF),
5935 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
5936 	},
5937 	{	/* MCP77 Ethernet Controller */
5938 		PCI_DEVICE(0x10DE, 0x0760),
5939 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5940 	},
5941 	{	/* MCP77 Ethernet Controller */
5942 		PCI_DEVICE(0x10DE, 0x0761),
5943 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5944 	},
5945 	{	/* MCP77 Ethernet Controller */
5946 		PCI_DEVICE(0x10DE, 0x0762),
5947 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5948 	},
5949 	{	/* MCP77 Ethernet Controller */
5950 		PCI_DEVICE(0x10DE, 0x0763),
5951 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5952 	},
5953 	{	/* MCP79 Ethernet Controller */
5954 		PCI_DEVICE(0x10DE, 0x0AB0),
5955 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5956 	},
5957 	{	/* MCP79 Ethernet Controller */
5958 		PCI_DEVICE(0x10DE, 0x0AB1),
5959 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5960 	},
5961 	{	/* MCP79 Ethernet Controller */
5962 		PCI_DEVICE(0x10DE, 0x0AB2),
5963 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5964 	},
5965 	{	/* MCP79 Ethernet Controller */
5966 		PCI_DEVICE(0x10DE, 0x0AB3),
5967 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
5968 	},
5969 	{	/* MCP89 Ethernet Controller */
5970 		PCI_DEVICE(0x10DE, 0x0D7D),
5971 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
5972 	},
5973 	{0,},
5974 };
5975 
5976 static struct pci_driver driver = {
5977 	.name		= DRV_NAME,
5978 	.id_table	= pci_tbl,
5979 	.probe		= nv_probe,
5980 	.remove		= __devexit_p(nv_remove),
5981 	.shutdown	= nv_shutdown,
5982 	.driver.pm	= NV_PM_OPS,
5983 };
5984 
5985 static int __init init_nic(void)
5986 {
5987 	return pci_register_driver(&driver);
5988 }
5989 
5990 static void __exit exit_nic(void)
5991 {
5992 	pci_unregister_driver(&driver);
5993 }
5994 
5995 module_param(max_interrupt_work, int, 0);
5996 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
5997 module_param(optimization_mode, int, 0);
5998 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
5999 module_param(poll_interval, int, 0);
6000 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6001 module_param(msi, int, 0);
6002 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6003 module_param(msix, int, 0);
6004 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6005 module_param(dma_64bit, int, 0);
6006 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6007 module_param(phy_cross, int, 0);
6008 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6009 module_param(phy_power_down, int, 0);
6010 MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
6011 
6012 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6013 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6014 MODULE_LICENSE("GPL");
6015 
6016 MODULE_DEVICE_TABLE(pci, pci_tbl);
6017 
6018 module_init(init_nic);
6019 module_exit(exit_nic);
6020