xref: /linux/drivers/net/ethernet/broadcom/bnx2.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/checksum.h>
44 #include <linux/workqueue.h>
45 #include <linux/crc32.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 
52 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
53 #define BCM_CNIC 1
54 #include "cnic_if.h"
55 #endif
56 #include "bnx2.h"
57 #include "bnx2_fw.h"
58 
59 #define DRV_MODULE_NAME		"bnx2"
60 #define DRV_MODULE_VERSION	"2.1.11"
61 #define DRV_MODULE_RELDATE	"July 20, 2011"
62 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.1.fw"
63 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
64 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1a.fw"
65 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
66 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
67 
68 #define RUN_AT(x) (jiffies + (x))
69 
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT  (5*HZ)
72 
73 static char version[] __devinitdata =
74 	"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75 
76 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80 MODULE_FIRMWARE(FW_MIPS_FILE_06);
81 MODULE_FIRMWARE(FW_RV2P_FILE_06);
82 MODULE_FIRMWARE(FW_MIPS_FILE_09);
83 MODULE_FIRMWARE(FW_RV2P_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
85 
86 static int disable_msi = 0;
87 
88 module_param(disable_msi, int, 0);
89 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 
91 typedef enum {
92 	BCM5706 = 0,
93 	NC370T,
94 	NC370I,
95 	BCM5706S,
96 	NC370F,
97 	BCM5708,
98 	BCM5708S,
99 	BCM5709,
100 	BCM5709S,
101 	BCM5716,
102 	BCM5716S,
103 } board_t;
104 
105 /* indexed by board_t, above */
106 static struct {
107 	char *name;
108 } board_info[] __devinitdata = {
109 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
110 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
111 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
112 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
113 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
114 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
115 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
116 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
117 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
118 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
119 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
120 	};
121 
122 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
123 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
125 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
127 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
129 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
130 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
131 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
133 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
135 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
136 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
137 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
138 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
139 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
140 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
141 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
142 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
143 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
144 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
145 	{ 0, }
146 };
147 
148 static const struct flash_spec flash_table[] =
149 {
150 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
151 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
152 	/* Slow EEPROM */
153 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
154 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
155 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
156 	 "EEPROM - slow"},
157 	/* Expansion entry 0001 */
158 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
159 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
160 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
161 	 "Entry 0001"},
162 	/* Saifun SA25F010 (non-buffered flash) */
163 	/* strap, cfg1, & write1 need updates */
164 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
165 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
167 	 "Non-buffered flash (128kB)"},
168 	/* Saifun SA25F020 (non-buffered flash) */
169 	/* strap, cfg1, & write1 need updates */
170 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
171 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
173 	 "Non-buffered flash (256kB)"},
174 	/* Expansion entry 0100 */
175 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
176 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
177 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178 	 "Entry 0100"},
179 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
180 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
181 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
183 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
184 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
185 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
186 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
187 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
188 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
189 	/* Saifun SA25F005 (non-buffered flash) */
190 	/* strap, cfg1, & write1 need updates */
191 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
192 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
194 	 "Non-buffered flash (64kB)"},
195 	/* Fast EEPROM */
196 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
197 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
198 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
199 	 "EEPROM - fast"},
200 	/* Expansion entry 1001 */
201 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
202 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 	 "Entry 1001"},
205 	/* Expansion entry 1010 */
206 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
207 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209 	 "Entry 1010"},
210 	/* ATMEL AT45DB011B (buffered flash) */
211 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
212 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
213 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
214 	 "Buffered flash (128kB)"},
215 	/* Expansion entry 1100 */
216 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
217 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 	 "Entry 1100"},
220 	/* Expansion entry 1101 */
221 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
222 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
224 	 "Entry 1101"},
225 	/* Ateml Expansion entry 1110 */
226 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
227 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
229 	 "Entry 1110 (Atmel)"},
230 	/* ATMEL AT45DB021B (buffered flash) */
231 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
232 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
233 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
234 	 "Buffered flash (256kB)"},
235 };
236 
237 static const struct flash_spec flash_5709 = {
238 	.flags		= BNX2_NV_BUFFERED,
239 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
240 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
241 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
242 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
243 	.name		= "5709 Buffered flash (256kB)",
244 };
245 
246 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
247 
248 static void bnx2_init_napi(struct bnx2 *bp);
249 static void bnx2_del_napi(struct bnx2 *bp);
250 
251 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252 {
253 	u32 diff;
254 
255 	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
256 	barrier();
257 
258 	/* The ring uses 256 indices for 255 entries, one of them
259 	 * needs to be skipped.
260 	 */
261 	diff = txr->tx_prod - txr->tx_cons;
262 	if (unlikely(diff >= TX_DESC_CNT)) {
263 		diff &= 0xffff;
264 		if (diff == TX_DESC_CNT)
265 			diff = MAX_TX_DESC_CNT;
266 	}
267 	return bp->tx_ring_size - diff;
268 }
269 
270 static u32
271 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
272 {
273 	u32 val;
274 
275 	spin_lock_bh(&bp->indirect_lock);
276 	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277 	val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
278 	spin_unlock_bh(&bp->indirect_lock);
279 	return val;
280 }
281 
282 static void
283 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284 {
285 	spin_lock_bh(&bp->indirect_lock);
286 	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
287 	REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
288 	spin_unlock_bh(&bp->indirect_lock);
289 }
290 
291 static void
292 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
293 {
294 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
295 }
296 
297 static u32
298 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
299 {
300 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
301 }
302 
303 static void
304 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
305 {
306 	offset += cid_addr;
307 	spin_lock_bh(&bp->indirect_lock);
308 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
309 		int i;
310 
311 		REG_WR(bp, BNX2_CTX_CTX_DATA, val);
312 		REG_WR(bp, BNX2_CTX_CTX_CTRL,
313 		       offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314 		for (i = 0; i < 5; i++) {
315 			val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
316 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
317 				break;
318 			udelay(5);
319 		}
320 	} else {
321 		REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
322 		REG_WR(bp, BNX2_CTX_DATA, val);
323 	}
324 	spin_unlock_bh(&bp->indirect_lock);
325 }
326 
327 #ifdef BCM_CNIC
328 static int
329 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
330 {
331 	struct bnx2 *bp = netdev_priv(dev);
332 	struct drv_ctl_io *io = &info->data.io;
333 
334 	switch (info->cmd) {
335 	case DRV_CTL_IO_WR_CMD:
336 		bnx2_reg_wr_ind(bp, io->offset, io->data);
337 		break;
338 	case DRV_CTL_IO_RD_CMD:
339 		io->data = bnx2_reg_rd_ind(bp, io->offset);
340 		break;
341 	case DRV_CTL_CTX_WR_CMD:
342 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
343 		break;
344 	default:
345 		return -EINVAL;
346 	}
347 	return 0;
348 }
349 
350 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
351 {
352 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
354 	int sb_id;
355 
356 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
357 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358 		bnapi->cnic_present = 0;
359 		sb_id = bp->irq_nvecs;
360 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
361 	} else {
362 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363 		bnapi->cnic_tag = bnapi->last_status_idx;
364 		bnapi->cnic_present = 1;
365 		sb_id = 0;
366 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
367 	}
368 
369 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370 	cp->irq_arr[0].status_blk = (void *)
371 		((unsigned long) bnapi->status_blk.msi +
372 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373 	cp->irq_arr[0].status_blk_num = sb_id;
374 	cp->num_irq = 1;
375 }
376 
377 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
378 			      void *data)
379 {
380 	struct bnx2 *bp = netdev_priv(dev);
381 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
382 
383 	if (ops == NULL)
384 		return -EINVAL;
385 
386 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
387 		return -EBUSY;
388 
389 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
390 		return -ENODEV;
391 
392 	bp->cnic_data = data;
393 	rcu_assign_pointer(bp->cnic_ops, ops);
394 
395 	cp->num_irq = 0;
396 	cp->drv_state = CNIC_DRV_STATE_REGD;
397 
398 	bnx2_setup_cnic_irq_info(bp);
399 
400 	return 0;
401 }
402 
403 static int bnx2_unregister_cnic(struct net_device *dev)
404 {
405 	struct bnx2 *bp = netdev_priv(dev);
406 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
407 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
408 
409 	mutex_lock(&bp->cnic_lock);
410 	cp->drv_state = 0;
411 	bnapi->cnic_present = 0;
412 	rcu_assign_pointer(bp->cnic_ops, NULL);
413 	mutex_unlock(&bp->cnic_lock);
414 	synchronize_rcu();
415 	return 0;
416 }
417 
418 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
419 {
420 	struct bnx2 *bp = netdev_priv(dev);
421 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
422 
423 	if (!cp->max_iscsi_conn)
424 		return NULL;
425 
426 	cp->drv_owner = THIS_MODULE;
427 	cp->chip_id = bp->chip_id;
428 	cp->pdev = bp->pdev;
429 	cp->io_base = bp->regview;
430 	cp->drv_ctl = bnx2_drv_ctl;
431 	cp->drv_register_cnic = bnx2_register_cnic;
432 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
433 
434 	return cp;
435 }
436 EXPORT_SYMBOL(bnx2_cnic_probe);
437 
438 static void
439 bnx2_cnic_stop(struct bnx2 *bp)
440 {
441 	struct cnic_ops *c_ops;
442 	struct cnic_ctl_info info;
443 
444 	mutex_lock(&bp->cnic_lock);
445 	c_ops = rcu_dereference_protected(bp->cnic_ops,
446 					  lockdep_is_held(&bp->cnic_lock));
447 	if (c_ops) {
448 		info.cmd = CNIC_CTL_STOP_CMD;
449 		c_ops->cnic_ctl(bp->cnic_data, &info);
450 	}
451 	mutex_unlock(&bp->cnic_lock);
452 }
453 
454 static void
455 bnx2_cnic_start(struct bnx2 *bp)
456 {
457 	struct cnic_ops *c_ops;
458 	struct cnic_ctl_info info;
459 
460 	mutex_lock(&bp->cnic_lock);
461 	c_ops = rcu_dereference_protected(bp->cnic_ops,
462 					  lockdep_is_held(&bp->cnic_lock));
463 	if (c_ops) {
464 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466 
467 			bnapi->cnic_tag = bnapi->last_status_idx;
468 		}
469 		info.cmd = CNIC_CTL_START_CMD;
470 		c_ops->cnic_ctl(bp->cnic_data, &info);
471 	}
472 	mutex_unlock(&bp->cnic_lock);
473 }
474 
475 #else
476 
477 static void
478 bnx2_cnic_stop(struct bnx2 *bp)
479 {
480 }
481 
482 static void
483 bnx2_cnic_start(struct bnx2 *bp)
484 {
485 }
486 
487 #endif
488 
489 static int
490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491 {
492 	u32 val1;
493 	int i, ret;
494 
495 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496 		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
497 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498 
499 		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500 		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
501 
502 		udelay(40);
503 	}
504 
505 	val1 = (bp->phy_addr << 21) | (reg << 16) |
506 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507 		BNX2_EMAC_MDIO_COMM_START_BUSY;
508 	REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509 
510 	for (i = 0; i < 50; i++) {
511 		udelay(10);
512 
513 		val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
514 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515 			udelay(5);
516 
517 			val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
518 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519 
520 			break;
521 		}
522 	}
523 
524 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525 		*val = 0x0;
526 		ret = -EBUSY;
527 	}
528 	else {
529 		*val = val1;
530 		ret = 0;
531 	}
532 
533 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534 		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
535 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536 
537 		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538 		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
539 
540 		udelay(40);
541 	}
542 
543 	return ret;
544 }
545 
546 static int
547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548 {
549 	u32 val1;
550 	int i, ret;
551 
552 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553 		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
554 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555 
556 		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557 		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
558 
559 		udelay(40);
560 	}
561 
562 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565 	REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566 
567 	for (i = 0; i < 50; i++) {
568 		udelay(10);
569 
570 		val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
571 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572 			udelay(5);
573 			break;
574 		}
575 	}
576 
577 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578         	ret = -EBUSY;
579 	else
580 		ret = 0;
581 
582 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583 		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
584 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585 
586 		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587 		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
588 
589 		udelay(40);
590 	}
591 
592 	return ret;
593 }
594 
595 static void
596 bnx2_disable_int(struct bnx2 *bp)
597 {
598 	int i;
599 	struct bnx2_napi *bnapi;
600 
601 	for (i = 0; i < bp->irq_nvecs; i++) {
602 		bnapi = &bp->bnx2_napi[i];
603 		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605 	}
606 	REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607 }
608 
609 static void
610 bnx2_enable_int(struct bnx2 *bp)
611 {
612 	int i;
613 	struct bnx2_napi *bnapi;
614 
615 	for (i = 0; i < bp->irq_nvecs; i++) {
616 		bnapi = &bp->bnx2_napi[i];
617 
618 		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619 		       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621 		       bnapi->last_status_idx);
622 
623 		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624 		       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625 		       bnapi->last_status_idx);
626 	}
627 	REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628 }
629 
630 static void
631 bnx2_disable_int_sync(struct bnx2 *bp)
632 {
633 	int i;
634 
635 	atomic_inc(&bp->intr_sem);
636 	if (!netif_running(bp->dev))
637 		return;
638 
639 	bnx2_disable_int(bp);
640 	for (i = 0; i < bp->irq_nvecs; i++)
641 		synchronize_irq(bp->irq_tbl[i].vector);
642 }
643 
644 static void
645 bnx2_napi_disable(struct bnx2 *bp)
646 {
647 	int i;
648 
649 	for (i = 0; i < bp->irq_nvecs; i++)
650 		napi_disable(&bp->bnx2_napi[i].napi);
651 }
652 
653 static void
654 bnx2_napi_enable(struct bnx2 *bp)
655 {
656 	int i;
657 
658 	for (i = 0; i < bp->irq_nvecs; i++)
659 		napi_enable(&bp->bnx2_napi[i].napi);
660 }
661 
662 static void
663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664 {
665 	if (stop_cnic)
666 		bnx2_cnic_stop(bp);
667 	if (netif_running(bp->dev)) {
668 		bnx2_napi_disable(bp);
669 		netif_tx_disable(bp->dev);
670 	}
671 	bnx2_disable_int_sync(bp);
672 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
673 }
674 
675 static void
676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 {
678 	if (atomic_dec_and_test(&bp->intr_sem)) {
679 		if (netif_running(bp->dev)) {
680 			netif_tx_wake_all_queues(bp->dev);
681 			spin_lock_bh(&bp->phy_lock);
682 			if (bp->link_up)
683 				netif_carrier_on(bp->dev);
684 			spin_unlock_bh(&bp->phy_lock);
685 			bnx2_napi_enable(bp);
686 			bnx2_enable_int(bp);
687 			if (start_cnic)
688 				bnx2_cnic_start(bp);
689 		}
690 	}
691 }
692 
693 static void
694 bnx2_free_tx_mem(struct bnx2 *bp)
695 {
696 	int i;
697 
698 	for (i = 0; i < bp->num_tx_rings; i++) {
699 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701 
702 		if (txr->tx_desc_ring) {
703 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704 					  txr->tx_desc_ring,
705 					  txr->tx_desc_mapping);
706 			txr->tx_desc_ring = NULL;
707 		}
708 		kfree(txr->tx_buf_ring);
709 		txr->tx_buf_ring = NULL;
710 	}
711 }
712 
713 static void
714 bnx2_free_rx_mem(struct bnx2 *bp)
715 {
716 	int i;
717 
718 	for (i = 0; i < bp->num_rx_rings; i++) {
719 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721 		int j;
722 
723 		for (j = 0; j < bp->rx_max_ring; j++) {
724 			if (rxr->rx_desc_ring[j])
725 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726 						  rxr->rx_desc_ring[j],
727 						  rxr->rx_desc_mapping[j]);
728 			rxr->rx_desc_ring[j] = NULL;
729 		}
730 		vfree(rxr->rx_buf_ring);
731 		rxr->rx_buf_ring = NULL;
732 
733 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
734 			if (rxr->rx_pg_desc_ring[j])
735 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736 						  rxr->rx_pg_desc_ring[j],
737 						  rxr->rx_pg_desc_mapping[j]);
738 			rxr->rx_pg_desc_ring[j] = NULL;
739 		}
740 		vfree(rxr->rx_pg_ring);
741 		rxr->rx_pg_ring = NULL;
742 	}
743 }
744 
745 static int
746 bnx2_alloc_tx_mem(struct bnx2 *bp)
747 {
748 	int i;
749 
750 	for (i = 0; i < bp->num_tx_rings; i++) {
751 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753 
754 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755 		if (txr->tx_buf_ring == NULL)
756 			return -ENOMEM;
757 
758 		txr->tx_desc_ring =
759 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760 					   &txr->tx_desc_mapping, GFP_KERNEL);
761 		if (txr->tx_desc_ring == NULL)
762 			return -ENOMEM;
763 	}
764 	return 0;
765 }
766 
767 static int
768 bnx2_alloc_rx_mem(struct bnx2 *bp)
769 {
770 	int i;
771 
772 	for (i = 0; i < bp->num_rx_rings; i++) {
773 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775 		int j;
776 
777 		rxr->rx_buf_ring =
778 			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779 		if (rxr->rx_buf_ring == NULL)
780 			return -ENOMEM;
781 
782 		for (j = 0; j < bp->rx_max_ring; j++) {
783 			rxr->rx_desc_ring[j] =
784 				dma_alloc_coherent(&bp->pdev->dev,
785 						   RXBD_RING_SIZE,
786 						   &rxr->rx_desc_mapping[j],
787 						   GFP_KERNEL);
788 			if (rxr->rx_desc_ring[j] == NULL)
789 				return -ENOMEM;
790 
791 		}
792 
793 		if (bp->rx_pg_ring_size) {
794 			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795 						  bp->rx_max_pg_ring);
796 			if (rxr->rx_pg_ring == NULL)
797 				return -ENOMEM;
798 
799 		}
800 
801 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
802 			rxr->rx_pg_desc_ring[j] =
803 				dma_alloc_coherent(&bp->pdev->dev,
804 						   RXBD_RING_SIZE,
805 						   &rxr->rx_pg_desc_mapping[j],
806 						   GFP_KERNEL);
807 			if (rxr->rx_pg_desc_ring[j] == NULL)
808 				return -ENOMEM;
809 
810 		}
811 	}
812 	return 0;
813 }
814 
815 static void
816 bnx2_free_mem(struct bnx2 *bp)
817 {
818 	int i;
819 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820 
821 	bnx2_free_tx_mem(bp);
822 	bnx2_free_rx_mem(bp);
823 
824 	for (i = 0; i < bp->ctx_pages; i++) {
825 		if (bp->ctx_blk[i]) {
826 			dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
827 					  bp->ctx_blk[i],
828 					  bp->ctx_blk_mapping[i]);
829 			bp->ctx_blk[i] = NULL;
830 		}
831 	}
832 	if (bnapi->status_blk.msi) {
833 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834 				  bnapi->status_blk.msi,
835 				  bp->status_blk_mapping);
836 		bnapi->status_blk.msi = NULL;
837 		bp->stats_blk = NULL;
838 	}
839 }
840 
841 static int
842 bnx2_alloc_mem(struct bnx2 *bp)
843 {
844 	int i, status_blk_size, err;
845 	struct bnx2_napi *bnapi;
846 	void *status_blk;
847 
848 	/* Combine status and statistics blocks into one allocation. */
849 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
851 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
853 	bp->status_stats_size = status_blk_size +
854 				sizeof(struct statistics_block);
855 
856 	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 					&bp->status_blk_mapping, GFP_KERNEL);
858 	if (status_blk == NULL)
859 		goto alloc_mem_err;
860 
861 	memset(status_blk, 0, bp->status_stats_size);
862 
863 	bnapi = &bp->bnx2_napi[0];
864 	bnapi->status_blk.msi = status_blk;
865 	bnapi->hw_tx_cons_ptr =
866 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
867 	bnapi->hw_rx_cons_ptr =
868 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
869 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
870 		for (i = 1; i < bp->irq_nvecs; i++) {
871 			struct status_block_msix *sblk;
872 
873 			bnapi = &bp->bnx2_napi[i];
874 
875 			sblk = (void *) (status_blk +
876 					 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
877 			bnapi->status_blk.msix = sblk;
878 			bnapi->hw_tx_cons_ptr =
879 				&sblk->status_tx_quick_consumer_index;
880 			bnapi->hw_rx_cons_ptr =
881 				&sblk->status_rx_quick_consumer_index;
882 			bnapi->int_num = i << 24;
883 		}
884 	}
885 
886 	bp->stats_blk = status_blk + status_blk_size;
887 
888 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
889 
890 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
891 		bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
892 		if (bp->ctx_pages == 0)
893 			bp->ctx_pages = 1;
894 		for (i = 0; i < bp->ctx_pages; i++) {
895 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
896 						BCM_PAGE_SIZE,
897 						&bp->ctx_blk_mapping[i],
898 						GFP_KERNEL);
899 			if (bp->ctx_blk[i] == NULL)
900 				goto alloc_mem_err;
901 		}
902 	}
903 
904 	err = bnx2_alloc_rx_mem(bp);
905 	if (err)
906 		goto alloc_mem_err;
907 
908 	err = bnx2_alloc_tx_mem(bp);
909 	if (err)
910 		goto alloc_mem_err;
911 
912 	return 0;
913 
914 alloc_mem_err:
915 	bnx2_free_mem(bp);
916 	return -ENOMEM;
917 }
918 
919 static void
920 bnx2_report_fw_link(struct bnx2 *bp)
921 {
922 	u32 fw_link_status = 0;
923 
924 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
925 		return;
926 
927 	if (bp->link_up) {
928 		u32 bmsr;
929 
930 		switch (bp->line_speed) {
931 		case SPEED_10:
932 			if (bp->duplex == DUPLEX_HALF)
933 				fw_link_status = BNX2_LINK_STATUS_10HALF;
934 			else
935 				fw_link_status = BNX2_LINK_STATUS_10FULL;
936 			break;
937 		case SPEED_100:
938 			if (bp->duplex == DUPLEX_HALF)
939 				fw_link_status = BNX2_LINK_STATUS_100HALF;
940 			else
941 				fw_link_status = BNX2_LINK_STATUS_100FULL;
942 			break;
943 		case SPEED_1000:
944 			if (bp->duplex == DUPLEX_HALF)
945 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
946 			else
947 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
948 			break;
949 		case SPEED_2500:
950 			if (bp->duplex == DUPLEX_HALF)
951 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
952 			else
953 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
954 			break;
955 		}
956 
957 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
958 
959 		if (bp->autoneg) {
960 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
961 
962 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
963 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
964 
965 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
966 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
967 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
968 			else
969 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
970 		}
971 	}
972 	else
973 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
974 
975 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
976 }
977 
978 static char *
979 bnx2_xceiver_str(struct bnx2 *bp)
980 {
981 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
982 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
983 		 "Copper");
984 }
985 
986 static void
987 bnx2_report_link(struct bnx2 *bp)
988 {
989 	if (bp->link_up) {
990 		netif_carrier_on(bp->dev);
991 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
992 			    bnx2_xceiver_str(bp),
993 			    bp->line_speed,
994 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
995 
996 		if (bp->flow_ctrl) {
997 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
998 				pr_cont(", receive ");
999 				if (bp->flow_ctrl & FLOW_CTRL_TX)
1000 					pr_cont("& transmit ");
1001 			}
1002 			else {
1003 				pr_cont(", transmit ");
1004 			}
1005 			pr_cont("flow control ON");
1006 		}
1007 		pr_cont("\n");
1008 	} else {
1009 		netif_carrier_off(bp->dev);
1010 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1011 			   bnx2_xceiver_str(bp));
1012 	}
1013 
1014 	bnx2_report_fw_link(bp);
1015 }
1016 
1017 static void
1018 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1019 {
1020 	u32 local_adv, remote_adv;
1021 
1022 	bp->flow_ctrl = 0;
1023 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1024 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1025 
1026 		if (bp->duplex == DUPLEX_FULL) {
1027 			bp->flow_ctrl = bp->req_flow_ctrl;
1028 		}
1029 		return;
1030 	}
1031 
1032 	if (bp->duplex != DUPLEX_FULL) {
1033 		return;
1034 	}
1035 
1036 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1037 	    (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1038 		u32 val;
1039 
1040 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1041 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1042 			bp->flow_ctrl |= FLOW_CTRL_TX;
1043 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1044 			bp->flow_ctrl |= FLOW_CTRL_RX;
1045 		return;
1046 	}
1047 
1048 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1049 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1050 
1051 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1052 		u32 new_local_adv = 0;
1053 		u32 new_remote_adv = 0;
1054 
1055 		if (local_adv & ADVERTISE_1000XPAUSE)
1056 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1057 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1058 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1059 		if (remote_adv & ADVERTISE_1000XPAUSE)
1060 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1061 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1062 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1063 
1064 		local_adv = new_local_adv;
1065 		remote_adv = new_remote_adv;
1066 	}
1067 
1068 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1069 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1070 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1071 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1072 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1073 			}
1074 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1075 				bp->flow_ctrl = FLOW_CTRL_RX;
1076 			}
1077 		}
1078 		else {
1079 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1080 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1081 			}
1082 		}
1083 	}
1084 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1085 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1086 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1087 
1088 			bp->flow_ctrl = FLOW_CTRL_TX;
1089 		}
1090 	}
1091 }
1092 
1093 static int
1094 bnx2_5709s_linkup(struct bnx2 *bp)
1095 {
1096 	u32 val, speed;
1097 
1098 	bp->link_up = 1;
1099 
1100 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1101 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1102 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1103 
1104 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1105 		bp->line_speed = bp->req_line_speed;
1106 		bp->duplex = bp->req_duplex;
1107 		return 0;
1108 	}
1109 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1110 	switch (speed) {
1111 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1112 			bp->line_speed = SPEED_10;
1113 			break;
1114 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1115 			bp->line_speed = SPEED_100;
1116 			break;
1117 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1118 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1119 			bp->line_speed = SPEED_1000;
1120 			break;
1121 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1122 			bp->line_speed = SPEED_2500;
1123 			break;
1124 	}
1125 	if (val & MII_BNX2_GP_TOP_AN_FD)
1126 		bp->duplex = DUPLEX_FULL;
1127 	else
1128 		bp->duplex = DUPLEX_HALF;
1129 	return 0;
1130 }
1131 
1132 static int
1133 bnx2_5708s_linkup(struct bnx2 *bp)
1134 {
1135 	u32 val;
1136 
1137 	bp->link_up = 1;
1138 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1139 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1140 		case BCM5708S_1000X_STAT1_SPEED_10:
1141 			bp->line_speed = SPEED_10;
1142 			break;
1143 		case BCM5708S_1000X_STAT1_SPEED_100:
1144 			bp->line_speed = SPEED_100;
1145 			break;
1146 		case BCM5708S_1000X_STAT1_SPEED_1G:
1147 			bp->line_speed = SPEED_1000;
1148 			break;
1149 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1150 			bp->line_speed = SPEED_2500;
1151 			break;
1152 	}
1153 	if (val & BCM5708S_1000X_STAT1_FD)
1154 		bp->duplex = DUPLEX_FULL;
1155 	else
1156 		bp->duplex = DUPLEX_HALF;
1157 
1158 	return 0;
1159 }
1160 
1161 static int
1162 bnx2_5706s_linkup(struct bnx2 *bp)
1163 {
1164 	u32 bmcr, local_adv, remote_adv, common;
1165 
1166 	bp->link_up = 1;
1167 	bp->line_speed = SPEED_1000;
1168 
1169 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1170 	if (bmcr & BMCR_FULLDPLX) {
1171 		bp->duplex = DUPLEX_FULL;
1172 	}
1173 	else {
1174 		bp->duplex = DUPLEX_HALF;
1175 	}
1176 
1177 	if (!(bmcr & BMCR_ANENABLE)) {
1178 		return 0;
1179 	}
1180 
1181 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1182 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1183 
1184 	common = local_adv & remote_adv;
1185 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1186 
1187 		if (common & ADVERTISE_1000XFULL) {
1188 			bp->duplex = DUPLEX_FULL;
1189 		}
1190 		else {
1191 			bp->duplex = DUPLEX_HALF;
1192 		}
1193 	}
1194 
1195 	return 0;
1196 }
1197 
1198 static int
1199 bnx2_copper_linkup(struct bnx2 *bp)
1200 {
1201 	u32 bmcr;
1202 
1203 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1204 	if (bmcr & BMCR_ANENABLE) {
1205 		u32 local_adv, remote_adv, common;
1206 
1207 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1208 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1209 
1210 		common = local_adv & (remote_adv >> 2);
1211 		if (common & ADVERTISE_1000FULL) {
1212 			bp->line_speed = SPEED_1000;
1213 			bp->duplex = DUPLEX_FULL;
1214 		}
1215 		else if (common & ADVERTISE_1000HALF) {
1216 			bp->line_speed = SPEED_1000;
1217 			bp->duplex = DUPLEX_HALF;
1218 		}
1219 		else {
1220 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1221 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1222 
1223 			common = local_adv & remote_adv;
1224 			if (common & ADVERTISE_100FULL) {
1225 				bp->line_speed = SPEED_100;
1226 				bp->duplex = DUPLEX_FULL;
1227 			}
1228 			else if (common & ADVERTISE_100HALF) {
1229 				bp->line_speed = SPEED_100;
1230 				bp->duplex = DUPLEX_HALF;
1231 			}
1232 			else if (common & ADVERTISE_10FULL) {
1233 				bp->line_speed = SPEED_10;
1234 				bp->duplex = DUPLEX_FULL;
1235 			}
1236 			else if (common & ADVERTISE_10HALF) {
1237 				bp->line_speed = SPEED_10;
1238 				bp->duplex = DUPLEX_HALF;
1239 			}
1240 			else {
1241 				bp->line_speed = 0;
1242 				bp->link_up = 0;
1243 			}
1244 		}
1245 	}
1246 	else {
1247 		if (bmcr & BMCR_SPEED100) {
1248 			bp->line_speed = SPEED_100;
1249 		}
1250 		else {
1251 			bp->line_speed = SPEED_10;
1252 		}
1253 		if (bmcr & BMCR_FULLDPLX) {
1254 			bp->duplex = DUPLEX_FULL;
1255 		}
1256 		else {
1257 			bp->duplex = DUPLEX_HALF;
1258 		}
1259 	}
1260 
1261 	return 0;
1262 }
1263 
1264 static void
1265 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1266 {
1267 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1268 
1269 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1270 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1271 	val |= 0x02 << 8;
1272 
1273 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1274 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1275 
1276 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1277 }
1278 
1279 static void
1280 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1281 {
1282 	int i;
1283 	u32 cid;
1284 
1285 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1286 		if (i == 1)
1287 			cid = RX_RSS_CID;
1288 		bnx2_init_rx_context(bp, cid);
1289 	}
1290 }
1291 
1292 static void
1293 bnx2_set_mac_link(struct bnx2 *bp)
1294 {
1295 	u32 val;
1296 
1297 	REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1298 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1299 		(bp->duplex == DUPLEX_HALF)) {
1300 		REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1301 	}
1302 
1303 	/* Configure the EMAC mode register. */
1304 	val = REG_RD(bp, BNX2_EMAC_MODE);
1305 
1306 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1307 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1308 		BNX2_EMAC_MODE_25G_MODE);
1309 
1310 	if (bp->link_up) {
1311 		switch (bp->line_speed) {
1312 			case SPEED_10:
1313 				if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1314 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1315 					break;
1316 				}
1317 				/* fall through */
1318 			case SPEED_100:
1319 				val |= BNX2_EMAC_MODE_PORT_MII;
1320 				break;
1321 			case SPEED_2500:
1322 				val |= BNX2_EMAC_MODE_25G_MODE;
1323 				/* fall through */
1324 			case SPEED_1000:
1325 				val |= BNX2_EMAC_MODE_PORT_GMII;
1326 				break;
1327 		}
1328 	}
1329 	else {
1330 		val |= BNX2_EMAC_MODE_PORT_GMII;
1331 	}
1332 
1333 	/* Set the MAC to operate in the appropriate duplex mode. */
1334 	if (bp->duplex == DUPLEX_HALF)
1335 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1336 	REG_WR(bp, BNX2_EMAC_MODE, val);
1337 
1338 	/* Enable/disable rx PAUSE. */
1339 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1340 
1341 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1342 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1343 	REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1344 
1345 	/* Enable/disable tx PAUSE. */
1346 	val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1347 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1348 
1349 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1350 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1351 	REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1352 
1353 	/* Acknowledge the interrupt. */
1354 	REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1355 
1356 	bnx2_init_all_rx_contexts(bp);
1357 }
1358 
1359 static void
1360 bnx2_enable_bmsr1(struct bnx2 *bp)
1361 {
1362 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1363 	    (CHIP_NUM(bp) == CHIP_NUM_5709))
1364 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1365 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1366 }
1367 
1368 static void
1369 bnx2_disable_bmsr1(struct bnx2 *bp)
1370 {
1371 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1372 	    (CHIP_NUM(bp) == CHIP_NUM_5709))
1373 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1374 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1375 }
1376 
1377 static int
1378 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1379 {
1380 	u32 up1;
1381 	int ret = 1;
1382 
1383 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1384 		return 0;
1385 
1386 	if (bp->autoneg & AUTONEG_SPEED)
1387 		bp->advertising |= ADVERTISED_2500baseX_Full;
1388 
1389 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1390 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1391 
1392 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1393 	if (!(up1 & BCM5708S_UP1_2G5)) {
1394 		up1 |= BCM5708S_UP1_2G5;
1395 		bnx2_write_phy(bp, bp->mii_up1, up1);
1396 		ret = 0;
1397 	}
1398 
1399 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1400 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1401 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1402 
1403 	return ret;
1404 }
1405 
1406 static int
1407 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1408 {
1409 	u32 up1;
1410 	int ret = 0;
1411 
1412 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1413 		return 0;
1414 
1415 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1416 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1417 
1418 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1419 	if (up1 & BCM5708S_UP1_2G5) {
1420 		up1 &= ~BCM5708S_UP1_2G5;
1421 		bnx2_write_phy(bp, bp->mii_up1, up1);
1422 		ret = 1;
1423 	}
1424 
1425 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1426 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1427 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1428 
1429 	return ret;
1430 }
1431 
1432 static void
1433 bnx2_enable_forced_2g5(struct bnx2 *bp)
1434 {
1435 	u32 uninitialized_var(bmcr);
1436 	int err;
1437 
1438 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1439 		return;
1440 
1441 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1442 		u32 val;
1443 
1444 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1445 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1446 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1447 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1448 			val |= MII_BNX2_SD_MISC1_FORCE |
1449 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1450 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1451 		}
1452 
1453 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1454 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1455 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1456 
1457 	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1458 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1459 		if (!err)
1460 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1461 	} else {
1462 		return;
1463 	}
1464 
1465 	if (err)
1466 		return;
1467 
1468 	if (bp->autoneg & AUTONEG_SPEED) {
1469 		bmcr &= ~BMCR_ANENABLE;
1470 		if (bp->req_duplex == DUPLEX_FULL)
1471 			bmcr |= BMCR_FULLDPLX;
1472 	}
1473 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1474 }
1475 
1476 static void
1477 bnx2_disable_forced_2g5(struct bnx2 *bp)
1478 {
1479 	u32 uninitialized_var(bmcr);
1480 	int err;
1481 
1482 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1483 		return;
1484 
1485 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1486 		u32 val;
1487 
1488 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1489 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1490 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1491 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1492 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1493 		}
1494 
1495 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1496 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1497 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1498 
1499 	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1500 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1501 		if (!err)
1502 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1503 	} else {
1504 		return;
1505 	}
1506 
1507 	if (err)
1508 		return;
1509 
1510 	if (bp->autoneg & AUTONEG_SPEED)
1511 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1512 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1513 }
1514 
1515 static void
1516 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1517 {
1518 	u32 val;
1519 
1520 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1521 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1522 	if (start)
1523 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1524 	else
1525 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1526 }
1527 
1528 static int
1529 bnx2_set_link(struct bnx2 *bp)
1530 {
1531 	u32 bmsr;
1532 	u8 link_up;
1533 
1534 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1535 		bp->link_up = 1;
1536 		return 0;
1537 	}
1538 
1539 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1540 		return 0;
1541 
1542 	link_up = bp->link_up;
1543 
1544 	bnx2_enable_bmsr1(bp);
1545 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1547 	bnx2_disable_bmsr1(bp);
1548 
1549 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1550 	    (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1551 		u32 val, an_dbg;
1552 
1553 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1554 			bnx2_5706s_force_link_dn(bp, 0);
1555 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1556 		}
1557 		val = REG_RD(bp, BNX2_EMAC_STATUS);
1558 
1559 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1560 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1562 
1563 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1564 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1565 			bmsr |= BMSR_LSTATUS;
1566 		else
1567 			bmsr &= ~BMSR_LSTATUS;
1568 	}
1569 
1570 	if (bmsr & BMSR_LSTATUS) {
1571 		bp->link_up = 1;
1572 
1573 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1574 			if (CHIP_NUM(bp) == CHIP_NUM_5706)
1575 				bnx2_5706s_linkup(bp);
1576 			else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1577 				bnx2_5708s_linkup(bp);
1578 			else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1579 				bnx2_5709s_linkup(bp);
1580 		}
1581 		else {
1582 			bnx2_copper_linkup(bp);
1583 		}
1584 		bnx2_resolve_flow_ctrl(bp);
1585 	}
1586 	else {
1587 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1588 		    (bp->autoneg & AUTONEG_SPEED))
1589 			bnx2_disable_forced_2g5(bp);
1590 
1591 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1592 			u32 bmcr;
1593 
1594 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1595 			bmcr |= BMCR_ANENABLE;
1596 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1597 
1598 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1599 		}
1600 		bp->link_up = 0;
1601 	}
1602 
1603 	if (bp->link_up != link_up) {
1604 		bnx2_report_link(bp);
1605 	}
1606 
1607 	bnx2_set_mac_link(bp);
1608 
1609 	return 0;
1610 }
1611 
1612 static int
1613 bnx2_reset_phy(struct bnx2 *bp)
1614 {
1615 	int i;
1616 	u32 reg;
1617 
1618         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1619 
1620 #define PHY_RESET_MAX_WAIT 100
1621 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1622 		udelay(10);
1623 
1624 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1625 		if (!(reg & BMCR_RESET)) {
1626 			udelay(20);
1627 			break;
1628 		}
1629 	}
1630 	if (i == PHY_RESET_MAX_WAIT) {
1631 		return -EBUSY;
1632 	}
1633 	return 0;
1634 }
1635 
1636 static u32
1637 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1638 {
1639 	u32 adv = 0;
1640 
1641 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1642 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1643 
1644 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1645 			adv = ADVERTISE_1000XPAUSE;
1646 		}
1647 		else {
1648 			adv = ADVERTISE_PAUSE_CAP;
1649 		}
1650 	}
1651 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1652 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1653 			adv = ADVERTISE_1000XPSE_ASYM;
1654 		}
1655 		else {
1656 			adv = ADVERTISE_PAUSE_ASYM;
1657 		}
1658 	}
1659 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1660 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1661 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1662 		}
1663 		else {
1664 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1665 		}
1666 	}
1667 	return adv;
1668 }
1669 
1670 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1671 
1672 static int
1673 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1674 __releases(&bp->phy_lock)
1675 __acquires(&bp->phy_lock)
1676 {
1677 	u32 speed_arg = 0, pause_adv;
1678 
1679 	pause_adv = bnx2_phy_get_pause_adv(bp);
1680 
1681 	if (bp->autoneg & AUTONEG_SPEED) {
1682 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1683 		if (bp->advertising & ADVERTISED_10baseT_Half)
1684 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1685 		if (bp->advertising & ADVERTISED_10baseT_Full)
1686 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1687 		if (bp->advertising & ADVERTISED_100baseT_Half)
1688 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1689 		if (bp->advertising & ADVERTISED_100baseT_Full)
1690 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1691 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1692 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1693 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1694 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1695 	} else {
1696 		if (bp->req_line_speed == SPEED_2500)
1697 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1698 		else if (bp->req_line_speed == SPEED_1000)
1699 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1700 		else if (bp->req_line_speed == SPEED_100) {
1701 			if (bp->req_duplex == DUPLEX_FULL)
1702 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1703 			else
1704 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1705 		} else if (bp->req_line_speed == SPEED_10) {
1706 			if (bp->req_duplex == DUPLEX_FULL)
1707 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1708 			else
1709 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1710 		}
1711 	}
1712 
1713 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1714 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1715 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1716 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1717 
1718 	if (port == PORT_TP)
1719 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1720 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1721 
1722 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1723 
1724 	spin_unlock_bh(&bp->phy_lock);
1725 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1726 	spin_lock_bh(&bp->phy_lock);
1727 
1728 	return 0;
1729 }
1730 
1731 static int
1732 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1733 __releases(&bp->phy_lock)
1734 __acquires(&bp->phy_lock)
1735 {
1736 	u32 adv, bmcr;
1737 	u32 new_adv = 0;
1738 
1739 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1740 		return bnx2_setup_remote_phy(bp, port);
1741 
1742 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1743 		u32 new_bmcr;
1744 		int force_link_down = 0;
1745 
1746 		if (bp->req_line_speed == SPEED_2500) {
1747 			if (!bnx2_test_and_enable_2g5(bp))
1748 				force_link_down = 1;
1749 		} else if (bp->req_line_speed == SPEED_1000) {
1750 			if (bnx2_test_and_disable_2g5(bp))
1751 				force_link_down = 1;
1752 		}
1753 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1754 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1755 
1756 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1757 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1758 		new_bmcr |= BMCR_SPEED1000;
1759 
1760 		if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1761 			if (bp->req_line_speed == SPEED_2500)
1762 				bnx2_enable_forced_2g5(bp);
1763 			else if (bp->req_line_speed == SPEED_1000) {
1764 				bnx2_disable_forced_2g5(bp);
1765 				new_bmcr &= ~0x2000;
1766 			}
1767 
1768 		} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1769 			if (bp->req_line_speed == SPEED_2500)
1770 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1771 			else
1772 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1773 		}
1774 
1775 		if (bp->req_duplex == DUPLEX_FULL) {
1776 			adv |= ADVERTISE_1000XFULL;
1777 			new_bmcr |= BMCR_FULLDPLX;
1778 		}
1779 		else {
1780 			adv |= ADVERTISE_1000XHALF;
1781 			new_bmcr &= ~BMCR_FULLDPLX;
1782 		}
1783 		if ((new_bmcr != bmcr) || (force_link_down)) {
1784 			/* Force a link down visible on the other side */
1785 			if (bp->link_up) {
1786 				bnx2_write_phy(bp, bp->mii_adv, adv &
1787 					       ~(ADVERTISE_1000XFULL |
1788 						 ADVERTISE_1000XHALF));
1789 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1790 					BMCR_ANRESTART | BMCR_ANENABLE);
1791 
1792 				bp->link_up = 0;
1793 				netif_carrier_off(bp->dev);
1794 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1795 				bnx2_report_link(bp);
1796 			}
1797 			bnx2_write_phy(bp, bp->mii_adv, adv);
1798 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1799 		} else {
1800 			bnx2_resolve_flow_ctrl(bp);
1801 			bnx2_set_mac_link(bp);
1802 		}
1803 		return 0;
1804 	}
1805 
1806 	bnx2_test_and_enable_2g5(bp);
1807 
1808 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1809 		new_adv |= ADVERTISE_1000XFULL;
1810 
1811 	new_adv |= bnx2_phy_get_pause_adv(bp);
1812 
1813 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1814 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1815 
1816 	bp->serdes_an_pending = 0;
1817 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1818 		/* Force a link down visible on the other side */
1819 		if (bp->link_up) {
1820 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1821 			spin_unlock_bh(&bp->phy_lock);
1822 			msleep(20);
1823 			spin_lock_bh(&bp->phy_lock);
1824 		}
1825 
1826 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1827 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1828 			BMCR_ANENABLE);
1829 		/* Speed up link-up time when the link partner
1830 		 * does not autonegotiate which is very common
1831 		 * in blade servers. Some blade servers use
1832 		 * IPMI for kerboard input and it's important
1833 		 * to minimize link disruptions. Autoneg. involves
1834 		 * exchanging base pages plus 3 next pages and
1835 		 * normally completes in about 120 msec.
1836 		 */
1837 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1838 		bp->serdes_an_pending = 1;
1839 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1840 	} else {
1841 		bnx2_resolve_flow_ctrl(bp);
1842 		bnx2_set_mac_link(bp);
1843 	}
1844 
1845 	return 0;
1846 }
1847 
1848 #define ETHTOOL_ALL_FIBRE_SPEED						\
1849 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1850 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1851 		(ADVERTISED_1000baseT_Full)
1852 
1853 #define ETHTOOL_ALL_COPPER_SPEED					\
1854 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1855 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1856 	ADVERTISED_1000baseT_Full)
1857 
1858 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1859 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1860 
1861 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1862 
1863 static void
1864 bnx2_set_default_remote_link(struct bnx2 *bp)
1865 {
1866 	u32 link;
1867 
1868 	if (bp->phy_port == PORT_TP)
1869 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1870 	else
1871 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1872 
1873 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1874 		bp->req_line_speed = 0;
1875 		bp->autoneg |= AUTONEG_SPEED;
1876 		bp->advertising = ADVERTISED_Autoneg;
1877 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1878 			bp->advertising |= ADVERTISED_10baseT_Half;
1879 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1880 			bp->advertising |= ADVERTISED_10baseT_Full;
1881 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1882 			bp->advertising |= ADVERTISED_100baseT_Half;
1883 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1884 			bp->advertising |= ADVERTISED_100baseT_Full;
1885 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1886 			bp->advertising |= ADVERTISED_1000baseT_Full;
1887 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1888 			bp->advertising |= ADVERTISED_2500baseX_Full;
1889 	} else {
1890 		bp->autoneg = 0;
1891 		bp->advertising = 0;
1892 		bp->req_duplex = DUPLEX_FULL;
1893 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1894 			bp->req_line_speed = SPEED_10;
1895 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1896 				bp->req_duplex = DUPLEX_HALF;
1897 		}
1898 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1899 			bp->req_line_speed = SPEED_100;
1900 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1901 				bp->req_duplex = DUPLEX_HALF;
1902 		}
1903 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1904 			bp->req_line_speed = SPEED_1000;
1905 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1906 			bp->req_line_speed = SPEED_2500;
1907 	}
1908 }
1909 
1910 static void
1911 bnx2_set_default_link(struct bnx2 *bp)
1912 {
1913 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1914 		bnx2_set_default_remote_link(bp);
1915 		return;
1916 	}
1917 
1918 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1919 	bp->req_line_speed = 0;
1920 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1921 		u32 reg;
1922 
1923 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1924 
1925 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1926 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1927 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1928 			bp->autoneg = 0;
1929 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1930 			bp->req_duplex = DUPLEX_FULL;
1931 		}
1932 	} else
1933 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1934 }
1935 
1936 static void
1937 bnx2_send_heart_beat(struct bnx2 *bp)
1938 {
1939 	u32 msg;
1940 	u32 addr;
1941 
1942 	spin_lock(&bp->indirect_lock);
1943 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1944 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1945 	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1946 	REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1947 	spin_unlock(&bp->indirect_lock);
1948 }
1949 
1950 static void
1951 bnx2_remote_phy_event(struct bnx2 *bp)
1952 {
1953 	u32 msg;
1954 	u8 link_up = bp->link_up;
1955 	u8 old_port;
1956 
1957 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1958 
1959 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1960 		bnx2_send_heart_beat(bp);
1961 
1962 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1963 
1964 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1965 		bp->link_up = 0;
1966 	else {
1967 		u32 speed;
1968 
1969 		bp->link_up = 1;
1970 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1971 		bp->duplex = DUPLEX_FULL;
1972 		switch (speed) {
1973 			case BNX2_LINK_STATUS_10HALF:
1974 				bp->duplex = DUPLEX_HALF;
1975 			case BNX2_LINK_STATUS_10FULL:
1976 				bp->line_speed = SPEED_10;
1977 				break;
1978 			case BNX2_LINK_STATUS_100HALF:
1979 				bp->duplex = DUPLEX_HALF;
1980 			case BNX2_LINK_STATUS_100BASE_T4:
1981 			case BNX2_LINK_STATUS_100FULL:
1982 				bp->line_speed = SPEED_100;
1983 				break;
1984 			case BNX2_LINK_STATUS_1000HALF:
1985 				bp->duplex = DUPLEX_HALF;
1986 			case BNX2_LINK_STATUS_1000FULL:
1987 				bp->line_speed = SPEED_1000;
1988 				break;
1989 			case BNX2_LINK_STATUS_2500HALF:
1990 				bp->duplex = DUPLEX_HALF;
1991 			case BNX2_LINK_STATUS_2500FULL:
1992 				bp->line_speed = SPEED_2500;
1993 				break;
1994 			default:
1995 				bp->line_speed = 0;
1996 				break;
1997 		}
1998 
1999 		bp->flow_ctrl = 0;
2000 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2001 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2002 			if (bp->duplex == DUPLEX_FULL)
2003 				bp->flow_ctrl = bp->req_flow_ctrl;
2004 		} else {
2005 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2006 				bp->flow_ctrl |= FLOW_CTRL_TX;
2007 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2008 				bp->flow_ctrl |= FLOW_CTRL_RX;
2009 		}
2010 
2011 		old_port = bp->phy_port;
2012 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2013 			bp->phy_port = PORT_FIBRE;
2014 		else
2015 			bp->phy_port = PORT_TP;
2016 
2017 		if (old_port != bp->phy_port)
2018 			bnx2_set_default_link(bp);
2019 
2020 	}
2021 	if (bp->link_up != link_up)
2022 		bnx2_report_link(bp);
2023 
2024 	bnx2_set_mac_link(bp);
2025 }
2026 
2027 static int
2028 bnx2_set_remote_link(struct bnx2 *bp)
2029 {
2030 	u32 evt_code;
2031 
2032 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2033 	switch (evt_code) {
2034 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2035 			bnx2_remote_phy_event(bp);
2036 			break;
2037 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2038 		default:
2039 			bnx2_send_heart_beat(bp);
2040 			break;
2041 	}
2042 	return 0;
2043 }
2044 
2045 static int
2046 bnx2_setup_copper_phy(struct bnx2 *bp)
2047 __releases(&bp->phy_lock)
2048 __acquires(&bp->phy_lock)
2049 {
2050 	u32 bmcr;
2051 	u32 new_bmcr;
2052 
2053 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2054 
2055 	if (bp->autoneg & AUTONEG_SPEED) {
2056 		u32 adv_reg, adv1000_reg;
2057 		u32 new_adv_reg = 0;
2058 		u32 new_adv1000_reg = 0;
2059 
2060 		bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2061 		adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2062 			ADVERTISE_PAUSE_ASYM);
2063 
2064 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2065 		adv1000_reg &= PHY_ALL_1000_SPEED;
2066 
2067 		if (bp->advertising & ADVERTISED_10baseT_Half)
2068 			new_adv_reg |= ADVERTISE_10HALF;
2069 		if (bp->advertising & ADVERTISED_10baseT_Full)
2070 			new_adv_reg |= ADVERTISE_10FULL;
2071 		if (bp->advertising & ADVERTISED_100baseT_Half)
2072 			new_adv_reg |= ADVERTISE_100HALF;
2073 		if (bp->advertising & ADVERTISED_100baseT_Full)
2074 			new_adv_reg |= ADVERTISE_100FULL;
2075 		if (bp->advertising & ADVERTISED_1000baseT_Full)
2076 			new_adv1000_reg |= ADVERTISE_1000FULL;
2077 
2078 		new_adv_reg |= ADVERTISE_CSMA;
2079 
2080 		new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2081 
2082 		if ((adv1000_reg != new_adv1000_reg) ||
2083 			(adv_reg != new_adv_reg) ||
2084 			((bmcr & BMCR_ANENABLE) == 0)) {
2085 
2086 			bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2087 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2088 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2089 				BMCR_ANENABLE);
2090 		}
2091 		else if (bp->link_up) {
2092 			/* Flow ctrl may have changed from auto to forced */
2093 			/* or vice-versa. */
2094 
2095 			bnx2_resolve_flow_ctrl(bp);
2096 			bnx2_set_mac_link(bp);
2097 		}
2098 		return 0;
2099 	}
2100 
2101 	new_bmcr = 0;
2102 	if (bp->req_line_speed == SPEED_100) {
2103 		new_bmcr |= BMCR_SPEED100;
2104 	}
2105 	if (bp->req_duplex == DUPLEX_FULL) {
2106 		new_bmcr |= BMCR_FULLDPLX;
2107 	}
2108 	if (new_bmcr != bmcr) {
2109 		u32 bmsr;
2110 
2111 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2112 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2113 
2114 		if (bmsr & BMSR_LSTATUS) {
2115 			/* Force link down */
2116 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2117 			spin_unlock_bh(&bp->phy_lock);
2118 			msleep(50);
2119 			spin_lock_bh(&bp->phy_lock);
2120 
2121 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2122 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2123 		}
2124 
2125 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2126 
2127 		/* Normally, the new speed is setup after the link has
2128 		 * gone down and up again. In some cases, link will not go
2129 		 * down so we need to set up the new speed here.
2130 		 */
2131 		if (bmsr & BMSR_LSTATUS) {
2132 			bp->line_speed = bp->req_line_speed;
2133 			bp->duplex = bp->req_duplex;
2134 			bnx2_resolve_flow_ctrl(bp);
2135 			bnx2_set_mac_link(bp);
2136 		}
2137 	} else {
2138 		bnx2_resolve_flow_ctrl(bp);
2139 		bnx2_set_mac_link(bp);
2140 	}
2141 	return 0;
2142 }
2143 
2144 static int
2145 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2146 __releases(&bp->phy_lock)
2147 __acquires(&bp->phy_lock)
2148 {
2149 	if (bp->loopback == MAC_LOOPBACK)
2150 		return 0;
2151 
2152 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2153 		return bnx2_setup_serdes_phy(bp, port);
2154 	}
2155 	else {
2156 		return bnx2_setup_copper_phy(bp);
2157 	}
2158 }
2159 
2160 static int
2161 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2162 {
2163 	u32 val;
2164 
2165 	bp->mii_bmcr = MII_BMCR + 0x10;
2166 	bp->mii_bmsr = MII_BMSR + 0x10;
2167 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2168 	bp->mii_adv = MII_ADVERTISE + 0x10;
2169 	bp->mii_lpa = MII_LPA + 0x10;
2170 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2171 
2172 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2173 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2174 
2175 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2176 	if (reset_phy)
2177 		bnx2_reset_phy(bp);
2178 
2179 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2180 
2181 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2182 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2183 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2184 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2185 
2186 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2187 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2188 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2189 		val |= BCM5708S_UP1_2G5;
2190 	else
2191 		val &= ~BCM5708S_UP1_2G5;
2192 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2193 
2194 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2195 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2196 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2197 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2198 
2199 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2200 
2201 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2202 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2203 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2204 
2205 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2206 
2207 	return 0;
2208 }
2209 
2210 static int
2211 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2212 {
2213 	u32 val;
2214 
2215 	if (reset_phy)
2216 		bnx2_reset_phy(bp);
2217 
2218 	bp->mii_up1 = BCM5708S_UP1;
2219 
2220 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2221 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2222 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2223 
2224 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2225 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2226 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2227 
2228 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2229 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2230 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2231 
2232 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2233 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2234 		val |= BCM5708S_UP1_2G5;
2235 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2236 	}
2237 
2238 	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2239 	    (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2240 	    (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2241 		/* increase tx signal amplitude */
2242 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2243 			       BCM5708S_BLK_ADDR_TX_MISC);
2244 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2245 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2246 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2247 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2248 	}
2249 
2250 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2251 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2252 
2253 	if (val) {
2254 		u32 is_backplane;
2255 
2256 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2257 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2258 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2259 				       BCM5708S_BLK_ADDR_TX_MISC);
2260 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2261 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2262 				       BCM5708S_BLK_ADDR_DIG);
2263 		}
2264 	}
2265 	return 0;
2266 }
2267 
2268 static int
2269 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2270 {
2271 	if (reset_phy)
2272 		bnx2_reset_phy(bp);
2273 
2274 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2275 
2276 	if (CHIP_NUM(bp) == CHIP_NUM_5706)
2277         	REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2278 
2279 	if (bp->dev->mtu > 1500) {
2280 		u32 val;
2281 
2282 		/* Set extended packet length bit */
2283 		bnx2_write_phy(bp, 0x18, 0x7);
2284 		bnx2_read_phy(bp, 0x18, &val);
2285 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2286 
2287 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2288 		bnx2_read_phy(bp, 0x1c, &val);
2289 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2290 	}
2291 	else {
2292 		u32 val;
2293 
2294 		bnx2_write_phy(bp, 0x18, 0x7);
2295 		bnx2_read_phy(bp, 0x18, &val);
2296 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2297 
2298 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2299 		bnx2_read_phy(bp, 0x1c, &val);
2300 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2301 	}
2302 
2303 	return 0;
2304 }
2305 
2306 static int
2307 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2308 {
2309 	u32 val;
2310 
2311 	if (reset_phy)
2312 		bnx2_reset_phy(bp);
2313 
2314 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2315 		bnx2_write_phy(bp, 0x18, 0x0c00);
2316 		bnx2_write_phy(bp, 0x17, 0x000a);
2317 		bnx2_write_phy(bp, 0x15, 0x310b);
2318 		bnx2_write_phy(bp, 0x17, 0x201f);
2319 		bnx2_write_phy(bp, 0x15, 0x9506);
2320 		bnx2_write_phy(bp, 0x17, 0x401f);
2321 		bnx2_write_phy(bp, 0x15, 0x14e2);
2322 		bnx2_write_phy(bp, 0x18, 0x0400);
2323 	}
2324 
2325 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2326 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2327 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2328 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2329 		val &= ~(1 << 8);
2330 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2331 	}
2332 
2333 	if (bp->dev->mtu > 1500) {
2334 		/* Set extended packet length bit */
2335 		bnx2_write_phy(bp, 0x18, 0x7);
2336 		bnx2_read_phy(bp, 0x18, &val);
2337 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2338 
2339 		bnx2_read_phy(bp, 0x10, &val);
2340 		bnx2_write_phy(bp, 0x10, val | 0x1);
2341 	}
2342 	else {
2343 		bnx2_write_phy(bp, 0x18, 0x7);
2344 		bnx2_read_phy(bp, 0x18, &val);
2345 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2346 
2347 		bnx2_read_phy(bp, 0x10, &val);
2348 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2349 	}
2350 
2351 	/* ethernet@wirespeed */
2352 	bnx2_write_phy(bp, 0x18, 0x7007);
2353 	bnx2_read_phy(bp, 0x18, &val);
2354 	bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2355 	return 0;
2356 }
2357 
2358 
2359 static int
2360 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2361 __releases(&bp->phy_lock)
2362 __acquires(&bp->phy_lock)
2363 {
2364 	u32 val;
2365 	int rc = 0;
2366 
2367 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2368 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2369 
2370 	bp->mii_bmcr = MII_BMCR;
2371 	bp->mii_bmsr = MII_BMSR;
2372 	bp->mii_bmsr1 = MII_BMSR;
2373 	bp->mii_adv = MII_ADVERTISE;
2374 	bp->mii_lpa = MII_LPA;
2375 
2376         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2377 
2378 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2379 		goto setup_phy;
2380 
2381 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2382 	bp->phy_id = val << 16;
2383 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2384 	bp->phy_id |= val & 0xffff;
2385 
2386 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2387 		if (CHIP_NUM(bp) == CHIP_NUM_5706)
2388 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2389 		else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2390 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2391 		else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2392 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2393 	}
2394 	else {
2395 		rc = bnx2_init_copper_phy(bp, reset_phy);
2396 	}
2397 
2398 setup_phy:
2399 	if (!rc)
2400 		rc = bnx2_setup_phy(bp, bp->phy_port);
2401 
2402 	return rc;
2403 }
2404 
2405 static int
2406 bnx2_set_mac_loopback(struct bnx2 *bp)
2407 {
2408 	u32 mac_mode;
2409 
2410 	mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2411 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2412 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2413 	REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2414 	bp->link_up = 1;
2415 	return 0;
2416 }
2417 
2418 static int bnx2_test_link(struct bnx2 *);
2419 
2420 static int
2421 bnx2_set_phy_loopback(struct bnx2 *bp)
2422 {
2423 	u32 mac_mode;
2424 	int rc, i;
2425 
2426 	spin_lock_bh(&bp->phy_lock);
2427 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2428 			    BMCR_SPEED1000);
2429 	spin_unlock_bh(&bp->phy_lock);
2430 	if (rc)
2431 		return rc;
2432 
2433 	for (i = 0; i < 10; i++) {
2434 		if (bnx2_test_link(bp) == 0)
2435 			break;
2436 		msleep(100);
2437 	}
2438 
2439 	mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2440 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2441 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2442 		      BNX2_EMAC_MODE_25G_MODE);
2443 
2444 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2445 	REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2446 	bp->link_up = 1;
2447 	return 0;
2448 }
2449 
2450 static void
2451 bnx2_dump_mcp_state(struct bnx2 *bp)
2452 {
2453 	struct net_device *dev = bp->dev;
2454 	u32 mcp_p0, mcp_p1;
2455 
2456 	netdev_err(dev, "<--- start MCP states dump --->\n");
2457 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2458 		mcp_p0 = BNX2_MCP_STATE_P0;
2459 		mcp_p1 = BNX2_MCP_STATE_P1;
2460 	} else {
2461 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2462 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2463 	}
2464 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2465 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2466 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2467 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2468 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2469 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2470 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2471 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2472 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2473 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2474 	netdev_err(dev, "DEBUG: shmem states:\n");
2475 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2476 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2477 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2478 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2479 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2480 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2481 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2482 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2483 	pr_cont(" condition[%08x]\n",
2484 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2485 	DP_SHMEM_LINE(bp, 0x3cc);
2486 	DP_SHMEM_LINE(bp, 0x3dc);
2487 	DP_SHMEM_LINE(bp, 0x3ec);
2488 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2489 	netdev_err(dev, "<--- end MCP states dump --->\n");
2490 }
2491 
2492 static int
2493 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2494 {
2495 	int i;
2496 	u32 val;
2497 
2498 	bp->fw_wr_seq++;
2499 	msg_data |= bp->fw_wr_seq;
2500 
2501 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2502 
2503 	if (!ack)
2504 		return 0;
2505 
2506 	/* wait for an acknowledgement. */
2507 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2508 		msleep(10);
2509 
2510 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2511 
2512 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2513 			break;
2514 	}
2515 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2516 		return 0;
2517 
2518 	/* If we timed out, inform the firmware that this is the case. */
2519 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2520 		msg_data &= ~BNX2_DRV_MSG_CODE;
2521 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2522 
2523 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2524 		if (!silent) {
2525 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2526 			bnx2_dump_mcp_state(bp);
2527 		}
2528 
2529 		return -EBUSY;
2530 	}
2531 
2532 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2533 		return -EIO;
2534 
2535 	return 0;
2536 }
2537 
2538 static int
2539 bnx2_init_5709_context(struct bnx2 *bp)
2540 {
2541 	int i, ret = 0;
2542 	u32 val;
2543 
2544 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2545 	val |= (BCM_PAGE_BITS - 8) << 16;
2546 	REG_WR(bp, BNX2_CTX_COMMAND, val);
2547 	for (i = 0; i < 10; i++) {
2548 		val = REG_RD(bp, BNX2_CTX_COMMAND);
2549 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2550 			break;
2551 		udelay(2);
2552 	}
2553 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2554 		return -EBUSY;
2555 
2556 	for (i = 0; i < bp->ctx_pages; i++) {
2557 		int j;
2558 
2559 		if (bp->ctx_blk[i])
2560 			memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2561 		else
2562 			return -ENOMEM;
2563 
2564 		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2565 		       (bp->ctx_blk_mapping[i] & 0xffffffff) |
2566 		       BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2567 		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2568 		       (u64) bp->ctx_blk_mapping[i] >> 32);
2569 		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2570 		       BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2571 		for (j = 0; j < 10; j++) {
2572 
2573 			val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2574 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2575 				break;
2576 			udelay(5);
2577 		}
2578 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2579 			ret = -EBUSY;
2580 			break;
2581 		}
2582 	}
2583 	return ret;
2584 }
2585 
2586 static void
2587 bnx2_init_context(struct bnx2 *bp)
2588 {
2589 	u32 vcid;
2590 
2591 	vcid = 96;
2592 	while (vcid) {
2593 		u32 vcid_addr, pcid_addr, offset;
2594 		int i;
2595 
2596 		vcid--;
2597 
2598 		if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2599 			u32 new_vcid;
2600 
2601 			vcid_addr = GET_PCID_ADDR(vcid);
2602 			if (vcid & 0x8) {
2603 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2604 			}
2605 			else {
2606 				new_vcid = vcid;
2607 			}
2608 			pcid_addr = GET_PCID_ADDR(new_vcid);
2609 		}
2610 		else {
2611 	    		vcid_addr = GET_CID_ADDR(vcid);
2612 			pcid_addr = vcid_addr;
2613 		}
2614 
2615 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2616 			vcid_addr += (i << PHY_CTX_SHIFT);
2617 			pcid_addr += (i << PHY_CTX_SHIFT);
2618 
2619 			REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2620 			REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2621 
2622 			/* Zero out the context. */
2623 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2624 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2625 		}
2626 	}
2627 }
2628 
2629 static int
2630 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2631 {
2632 	u16 *good_mbuf;
2633 	u32 good_mbuf_cnt;
2634 	u32 val;
2635 
2636 	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2637 	if (good_mbuf == NULL) {
2638 		pr_err("Failed to allocate memory in %s\n", __func__);
2639 		return -ENOMEM;
2640 	}
2641 
2642 	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2643 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2644 
2645 	good_mbuf_cnt = 0;
2646 
2647 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2648 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2649 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2650 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2651 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2652 
2653 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2654 
2655 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2656 
2657 		/* The addresses with Bit 9 set are bad memory blocks. */
2658 		if (!(val & (1 << 9))) {
2659 			good_mbuf[good_mbuf_cnt] = (u16) val;
2660 			good_mbuf_cnt++;
2661 		}
2662 
2663 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2664 	}
2665 
2666 	/* Free the good ones back to the mbuf pool thus discarding
2667 	 * all the bad ones. */
2668 	while (good_mbuf_cnt) {
2669 		good_mbuf_cnt--;
2670 
2671 		val = good_mbuf[good_mbuf_cnt];
2672 		val = (val << 9) | val | 1;
2673 
2674 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2675 	}
2676 	kfree(good_mbuf);
2677 	return 0;
2678 }
2679 
2680 static void
2681 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2682 {
2683 	u32 val;
2684 
2685 	val = (mac_addr[0] << 8) | mac_addr[1];
2686 
2687 	REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2688 
2689 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2690 		(mac_addr[4] << 8) | mac_addr[5];
2691 
2692 	REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2693 }
2694 
2695 static inline int
2696 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2697 {
2698 	dma_addr_t mapping;
2699 	struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2700 	struct rx_bd *rxbd =
2701 		&rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2702 	struct page *page = alloc_page(gfp);
2703 
2704 	if (!page)
2705 		return -ENOMEM;
2706 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2707 			       PCI_DMA_FROMDEVICE);
2708 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2709 		__free_page(page);
2710 		return -EIO;
2711 	}
2712 
2713 	rx_pg->page = page;
2714 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2715 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2716 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2717 	return 0;
2718 }
2719 
2720 static void
2721 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2722 {
2723 	struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2724 	struct page *page = rx_pg->page;
2725 
2726 	if (!page)
2727 		return;
2728 
2729 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2730 		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2731 
2732 	__free_page(page);
2733 	rx_pg->page = NULL;
2734 }
2735 
2736 static inline int
2737 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2738 {
2739 	struct sk_buff *skb;
2740 	struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2741 	dma_addr_t mapping;
2742 	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2743 	unsigned long align;
2744 
2745 	skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
2746 	if (skb == NULL) {
2747 		return -ENOMEM;
2748 	}
2749 
2750 	if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2751 		skb_reserve(skb, BNX2_RX_ALIGN - align);
2752 
2753 	mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
2754 				 PCI_DMA_FROMDEVICE);
2755 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2756 		dev_kfree_skb(skb);
2757 		return -EIO;
2758 	}
2759 
2760 	rx_buf->skb = skb;
2761 	rx_buf->desc = (struct l2_fhdr *) skb->data;
2762 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2763 
2764 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2765 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2766 
2767 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2768 
2769 	return 0;
2770 }
2771 
2772 static int
2773 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2774 {
2775 	struct status_block *sblk = bnapi->status_blk.msi;
2776 	u32 new_link_state, old_link_state;
2777 	int is_set = 1;
2778 
2779 	new_link_state = sblk->status_attn_bits & event;
2780 	old_link_state = sblk->status_attn_bits_ack & event;
2781 	if (new_link_state != old_link_state) {
2782 		if (new_link_state)
2783 			REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2784 		else
2785 			REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2786 	} else
2787 		is_set = 0;
2788 
2789 	return is_set;
2790 }
2791 
2792 static void
2793 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2794 {
2795 	spin_lock(&bp->phy_lock);
2796 
2797 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2798 		bnx2_set_link(bp);
2799 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2800 		bnx2_set_remote_link(bp);
2801 
2802 	spin_unlock(&bp->phy_lock);
2803 
2804 }
2805 
2806 static inline u16
2807 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2808 {
2809 	u16 cons;
2810 
2811 	/* Tell compiler that status block fields can change. */
2812 	barrier();
2813 	cons = *bnapi->hw_tx_cons_ptr;
2814 	barrier();
2815 	if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2816 		cons++;
2817 	return cons;
2818 }
2819 
2820 static int
2821 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2822 {
2823 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2824 	u16 hw_cons, sw_cons, sw_ring_cons;
2825 	int tx_pkt = 0, index;
2826 	struct netdev_queue *txq;
2827 
2828 	index = (bnapi - bp->bnx2_napi);
2829 	txq = netdev_get_tx_queue(bp->dev, index);
2830 
2831 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2832 	sw_cons = txr->tx_cons;
2833 
2834 	while (sw_cons != hw_cons) {
2835 		struct sw_tx_bd *tx_buf;
2836 		struct sk_buff *skb;
2837 		int i, last;
2838 
2839 		sw_ring_cons = TX_RING_IDX(sw_cons);
2840 
2841 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2842 		skb = tx_buf->skb;
2843 
2844 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2845 		prefetch(&skb->end);
2846 
2847 		/* partial BD completions possible with TSO packets */
2848 		if (tx_buf->is_gso) {
2849 			u16 last_idx, last_ring_idx;
2850 
2851 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2852 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2853 			if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2854 				last_idx++;
2855 			}
2856 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2857 				break;
2858 			}
2859 		}
2860 
2861 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2862 			skb_headlen(skb), PCI_DMA_TODEVICE);
2863 
2864 		tx_buf->skb = NULL;
2865 		last = tx_buf->nr_frags;
2866 
2867 		for (i = 0; i < last; i++) {
2868 			sw_cons = NEXT_TX_BD(sw_cons);
2869 
2870 			dma_unmap_page(&bp->pdev->dev,
2871 				dma_unmap_addr(
2872 					&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2873 					mapping),
2874 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2875 				PCI_DMA_TODEVICE);
2876 		}
2877 
2878 		sw_cons = NEXT_TX_BD(sw_cons);
2879 
2880 		dev_kfree_skb(skb);
2881 		tx_pkt++;
2882 		if (tx_pkt == budget)
2883 			break;
2884 
2885 		if (hw_cons == sw_cons)
2886 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2887 	}
2888 
2889 	txr->hw_tx_cons = hw_cons;
2890 	txr->tx_cons = sw_cons;
2891 
2892 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2893 	 * before checking for netif_tx_queue_stopped().  Without the
2894 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2895 	 * will miss it and cause the queue to be stopped forever.
2896 	 */
2897 	smp_mb();
2898 
2899 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2900 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2901 		__netif_tx_lock(txq, smp_processor_id());
2902 		if ((netif_tx_queue_stopped(txq)) &&
2903 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2904 			netif_tx_wake_queue(txq);
2905 		__netif_tx_unlock(txq);
2906 	}
2907 
2908 	return tx_pkt;
2909 }
2910 
2911 static void
2912 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2913 			struct sk_buff *skb, int count)
2914 {
2915 	struct sw_pg *cons_rx_pg, *prod_rx_pg;
2916 	struct rx_bd *cons_bd, *prod_bd;
2917 	int i;
2918 	u16 hw_prod, prod;
2919 	u16 cons = rxr->rx_pg_cons;
2920 
2921 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2922 
2923 	/* The caller was unable to allocate a new page to replace the
2924 	 * last one in the frags array, so we need to recycle that page
2925 	 * and then free the skb.
2926 	 */
2927 	if (skb) {
2928 		struct page *page;
2929 		struct skb_shared_info *shinfo;
2930 
2931 		shinfo = skb_shinfo(skb);
2932 		shinfo->nr_frags--;
2933 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2934 		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2935 
2936 		cons_rx_pg->page = page;
2937 		dev_kfree_skb(skb);
2938 	}
2939 
2940 	hw_prod = rxr->rx_pg_prod;
2941 
2942 	for (i = 0; i < count; i++) {
2943 		prod = RX_PG_RING_IDX(hw_prod);
2944 
2945 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2946 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2947 		cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2948 		prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2949 
2950 		if (prod != cons) {
2951 			prod_rx_pg->page = cons_rx_pg->page;
2952 			cons_rx_pg->page = NULL;
2953 			dma_unmap_addr_set(prod_rx_pg, mapping,
2954 				dma_unmap_addr(cons_rx_pg, mapping));
2955 
2956 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2957 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2958 
2959 		}
2960 		cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2961 		hw_prod = NEXT_RX_BD(hw_prod);
2962 	}
2963 	rxr->rx_pg_prod = hw_prod;
2964 	rxr->rx_pg_cons = cons;
2965 }
2966 
2967 static inline void
2968 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2969 		  struct sk_buff *skb, u16 cons, u16 prod)
2970 {
2971 	struct sw_bd *cons_rx_buf, *prod_rx_buf;
2972 	struct rx_bd *cons_bd, *prod_bd;
2973 
2974 	cons_rx_buf = &rxr->rx_buf_ring[cons];
2975 	prod_rx_buf = &rxr->rx_buf_ring[prod];
2976 
2977 	dma_sync_single_for_device(&bp->pdev->dev,
2978 		dma_unmap_addr(cons_rx_buf, mapping),
2979 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2980 
2981 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2982 
2983 	prod_rx_buf->skb = skb;
2984 	prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2985 
2986 	if (cons == prod)
2987 		return;
2988 
2989 	dma_unmap_addr_set(prod_rx_buf, mapping,
2990 			dma_unmap_addr(cons_rx_buf, mapping));
2991 
2992 	cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2993 	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2994 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2995 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2996 }
2997 
2998 static int
2999 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
3000 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3001 	    u32 ring_idx)
3002 {
3003 	int err;
3004 	u16 prod = ring_idx & 0xffff;
3005 
3006 	err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
3007 	if (unlikely(err)) {
3008 		bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
3009 		if (hdr_len) {
3010 			unsigned int raw_len = len + 4;
3011 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3012 
3013 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3014 		}
3015 		return err;
3016 	}
3017 
3018 	skb_reserve(skb, BNX2_RX_OFFSET);
3019 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3020 			 PCI_DMA_FROMDEVICE);
3021 
3022 	if (hdr_len == 0) {
3023 		skb_put(skb, len);
3024 		return 0;
3025 	} else {
3026 		unsigned int i, frag_len, frag_size, pages;
3027 		struct sw_pg *rx_pg;
3028 		u16 pg_cons = rxr->rx_pg_cons;
3029 		u16 pg_prod = rxr->rx_pg_prod;
3030 
3031 		frag_size = len + 4 - hdr_len;
3032 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3033 		skb_put(skb, hdr_len);
3034 
3035 		for (i = 0; i < pages; i++) {
3036 			dma_addr_t mapping_old;
3037 
3038 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3039 			if (unlikely(frag_len <= 4)) {
3040 				unsigned int tail = 4 - frag_len;
3041 
3042 				rxr->rx_pg_cons = pg_cons;
3043 				rxr->rx_pg_prod = pg_prod;
3044 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3045 							pages - i);
3046 				skb->len -= tail;
3047 				if (i == 0) {
3048 					skb->tail -= tail;
3049 				} else {
3050 					skb_frag_t *frag =
3051 						&skb_shinfo(skb)->frags[i - 1];
3052 					skb_frag_size_sub(frag, tail);
3053 					skb->data_len -= tail;
3054 				}
3055 				return 0;
3056 			}
3057 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3058 
3059 			/* Don't unmap yet.  If we're unable to allocate a new
3060 			 * page, we need to recycle the page and the DMA addr.
3061 			 */
3062 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3063 			if (i == pages - 1)
3064 				frag_len -= 4;
3065 
3066 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3067 			rx_pg->page = NULL;
3068 
3069 			err = bnx2_alloc_rx_page(bp, rxr,
3070 						 RX_PG_RING_IDX(pg_prod),
3071 						 GFP_ATOMIC);
3072 			if (unlikely(err)) {
3073 				rxr->rx_pg_cons = pg_cons;
3074 				rxr->rx_pg_prod = pg_prod;
3075 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3076 							pages - i);
3077 				return err;
3078 			}
3079 
3080 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3081 				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3082 
3083 			frag_size -= frag_len;
3084 			skb->data_len += frag_len;
3085 			skb->truesize += PAGE_SIZE;
3086 			skb->len += frag_len;
3087 
3088 			pg_prod = NEXT_RX_BD(pg_prod);
3089 			pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3090 		}
3091 		rxr->rx_pg_prod = pg_prod;
3092 		rxr->rx_pg_cons = pg_cons;
3093 	}
3094 	return 0;
3095 }
3096 
3097 static inline u16
3098 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3099 {
3100 	u16 cons;
3101 
3102 	/* Tell compiler that status block fields can change. */
3103 	barrier();
3104 	cons = *bnapi->hw_rx_cons_ptr;
3105 	barrier();
3106 	if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3107 		cons++;
3108 	return cons;
3109 }
3110 
3111 static int
3112 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3113 {
3114 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3115 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3116 	struct l2_fhdr *rx_hdr;
3117 	int rx_pkt = 0, pg_ring_used = 0;
3118 
3119 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3120 	sw_cons = rxr->rx_cons;
3121 	sw_prod = rxr->rx_prod;
3122 
3123 	/* Memory barrier necessary as speculative reads of the rx
3124 	 * buffer can be ahead of the index in the status block
3125 	 */
3126 	rmb();
3127 	while (sw_cons != hw_cons) {
3128 		unsigned int len, hdr_len;
3129 		u32 status;
3130 		struct sw_bd *rx_buf, *next_rx_buf;
3131 		struct sk_buff *skb;
3132 		dma_addr_t dma_addr;
3133 
3134 		sw_ring_cons = RX_RING_IDX(sw_cons);
3135 		sw_ring_prod = RX_RING_IDX(sw_prod);
3136 
3137 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3138 		skb = rx_buf->skb;
3139 		prefetchw(skb);
3140 
3141 		next_rx_buf =
3142 			&rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3143 		prefetch(next_rx_buf->desc);
3144 
3145 		rx_buf->skb = NULL;
3146 
3147 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3148 
3149 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3150 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3151 			PCI_DMA_FROMDEVICE);
3152 
3153 		rx_hdr = rx_buf->desc;
3154 		len = rx_hdr->l2_fhdr_pkt_len;
3155 		status = rx_hdr->l2_fhdr_status;
3156 
3157 		hdr_len = 0;
3158 		if (status & L2_FHDR_STATUS_SPLIT) {
3159 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3160 			pg_ring_used = 1;
3161 		} else if (len > bp->rx_jumbo_thresh) {
3162 			hdr_len = bp->rx_jumbo_thresh;
3163 			pg_ring_used = 1;
3164 		}
3165 
3166 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3167 				       L2_FHDR_ERRORS_PHY_DECODE |
3168 				       L2_FHDR_ERRORS_ALIGNMENT |
3169 				       L2_FHDR_ERRORS_TOO_SHORT |
3170 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3171 
3172 			bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3173 					  sw_ring_prod);
3174 			if (pg_ring_used) {
3175 				int pages;
3176 
3177 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3178 
3179 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3180 			}
3181 			goto next_rx;
3182 		}
3183 
3184 		len -= 4;
3185 
3186 		if (len <= bp->rx_copy_thresh) {
3187 			struct sk_buff *new_skb;
3188 
3189 			new_skb = netdev_alloc_skb(bp->dev, len + 6);
3190 			if (new_skb == NULL) {
3191 				bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3192 						  sw_ring_prod);
3193 				goto next_rx;
3194 			}
3195 
3196 			/* aligned copy */
3197 			skb_copy_from_linear_data_offset(skb,
3198 							 BNX2_RX_OFFSET - 6,
3199 				      new_skb->data, len + 6);
3200 			skb_reserve(new_skb, 6);
3201 			skb_put(new_skb, len);
3202 
3203 			bnx2_reuse_rx_skb(bp, rxr, skb,
3204 				sw_ring_cons, sw_ring_prod);
3205 
3206 			skb = new_skb;
3207 		} else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3208 			   dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3209 			goto next_rx;
3210 
3211 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3212 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3213 			__vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3214 
3215 		skb->protocol = eth_type_trans(skb, bp->dev);
3216 
3217 		if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3218 			(ntohs(skb->protocol) != 0x8100)) {
3219 
3220 			dev_kfree_skb(skb);
3221 			goto next_rx;
3222 
3223 		}
3224 
3225 		skb_checksum_none_assert(skb);
3226 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3227 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3228 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3229 
3230 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3231 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3232 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3233 		}
3234 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3235 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3236 		     L2_FHDR_STATUS_USE_RXHASH))
3237 			skb->rxhash = rx_hdr->l2_fhdr_hash;
3238 
3239 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3240 		napi_gro_receive(&bnapi->napi, skb);
3241 		rx_pkt++;
3242 
3243 next_rx:
3244 		sw_cons = NEXT_RX_BD(sw_cons);
3245 		sw_prod = NEXT_RX_BD(sw_prod);
3246 
3247 		if ((rx_pkt == budget))
3248 			break;
3249 
3250 		/* Refresh hw_cons to see if there is new work */
3251 		if (sw_cons == hw_cons) {
3252 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3253 			rmb();
3254 		}
3255 	}
3256 	rxr->rx_cons = sw_cons;
3257 	rxr->rx_prod = sw_prod;
3258 
3259 	if (pg_ring_used)
3260 		REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3261 
3262 	REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3263 
3264 	REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3265 
3266 	mmiowb();
3267 
3268 	return rx_pkt;
3269 
3270 }
3271 
3272 /* MSI ISR - The only difference between this and the INTx ISR
3273  * is that the MSI interrupt is always serviced.
3274  */
3275 static irqreturn_t
3276 bnx2_msi(int irq, void *dev_instance)
3277 {
3278 	struct bnx2_napi *bnapi = dev_instance;
3279 	struct bnx2 *bp = bnapi->bp;
3280 
3281 	prefetch(bnapi->status_blk.msi);
3282 	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3283 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3284 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3285 
3286 	/* Return here if interrupt is disabled. */
3287 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3288 		return IRQ_HANDLED;
3289 
3290 	napi_schedule(&bnapi->napi);
3291 
3292 	return IRQ_HANDLED;
3293 }
3294 
3295 static irqreturn_t
3296 bnx2_msi_1shot(int irq, void *dev_instance)
3297 {
3298 	struct bnx2_napi *bnapi = dev_instance;
3299 	struct bnx2 *bp = bnapi->bp;
3300 
3301 	prefetch(bnapi->status_blk.msi);
3302 
3303 	/* Return here if interrupt is disabled. */
3304 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3305 		return IRQ_HANDLED;
3306 
3307 	napi_schedule(&bnapi->napi);
3308 
3309 	return IRQ_HANDLED;
3310 }
3311 
3312 static irqreturn_t
3313 bnx2_interrupt(int irq, void *dev_instance)
3314 {
3315 	struct bnx2_napi *bnapi = dev_instance;
3316 	struct bnx2 *bp = bnapi->bp;
3317 	struct status_block *sblk = bnapi->status_blk.msi;
3318 
3319 	/* When using INTx, it is possible for the interrupt to arrive
3320 	 * at the CPU before the status block posted prior to the
3321 	 * interrupt. Reading a register will flush the status block.
3322 	 * When using MSI, the MSI message will always complete after
3323 	 * the status block write.
3324 	 */
3325 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3326 	    (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3327 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3328 		return IRQ_NONE;
3329 
3330 	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3331 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3332 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3333 
3334 	/* Read back to deassert IRQ immediately to avoid too many
3335 	 * spurious interrupts.
3336 	 */
3337 	REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3338 
3339 	/* Return here if interrupt is shared and is disabled. */
3340 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3341 		return IRQ_HANDLED;
3342 
3343 	if (napi_schedule_prep(&bnapi->napi)) {
3344 		bnapi->last_status_idx = sblk->status_idx;
3345 		__napi_schedule(&bnapi->napi);
3346 	}
3347 
3348 	return IRQ_HANDLED;
3349 }
3350 
3351 static inline int
3352 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3353 {
3354 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3355 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3356 
3357 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3358 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3359 		return 1;
3360 	return 0;
3361 }
3362 
3363 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3364 				 STATUS_ATTN_BITS_TIMER_ABORT)
3365 
3366 static inline int
3367 bnx2_has_work(struct bnx2_napi *bnapi)
3368 {
3369 	struct status_block *sblk = bnapi->status_blk.msi;
3370 
3371 	if (bnx2_has_fast_work(bnapi))
3372 		return 1;
3373 
3374 #ifdef BCM_CNIC
3375 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3376 		return 1;
3377 #endif
3378 
3379 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3380 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3381 		return 1;
3382 
3383 	return 0;
3384 }
3385 
3386 static void
3387 bnx2_chk_missed_msi(struct bnx2 *bp)
3388 {
3389 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3390 	u32 msi_ctrl;
3391 
3392 	if (bnx2_has_work(bnapi)) {
3393 		msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3394 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3395 			return;
3396 
3397 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3398 			REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3399 			       ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3400 			REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3401 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3402 		}
3403 	}
3404 
3405 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3406 }
3407 
3408 #ifdef BCM_CNIC
3409 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3410 {
3411 	struct cnic_ops *c_ops;
3412 
3413 	if (!bnapi->cnic_present)
3414 		return;
3415 
3416 	rcu_read_lock();
3417 	c_ops = rcu_dereference(bp->cnic_ops);
3418 	if (c_ops)
3419 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3420 						      bnapi->status_blk.msi);
3421 	rcu_read_unlock();
3422 }
3423 #endif
3424 
3425 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3426 {
3427 	struct status_block *sblk = bnapi->status_blk.msi;
3428 	u32 status_attn_bits = sblk->status_attn_bits;
3429 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3430 
3431 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3432 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3433 
3434 		bnx2_phy_int(bp, bnapi);
3435 
3436 		/* This is needed to take care of transient status
3437 		 * during link changes.
3438 		 */
3439 		REG_WR(bp, BNX2_HC_COMMAND,
3440 		       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3441 		REG_RD(bp, BNX2_HC_COMMAND);
3442 	}
3443 }
3444 
3445 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3446 			  int work_done, int budget)
3447 {
3448 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3449 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3450 
3451 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3452 		bnx2_tx_int(bp, bnapi, 0);
3453 
3454 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3455 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3456 
3457 	return work_done;
3458 }
3459 
3460 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3461 {
3462 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3463 	struct bnx2 *bp = bnapi->bp;
3464 	int work_done = 0;
3465 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3466 
3467 	while (1) {
3468 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3469 		if (unlikely(work_done >= budget))
3470 			break;
3471 
3472 		bnapi->last_status_idx = sblk->status_idx;
3473 		/* status idx must be read before checking for more work. */
3474 		rmb();
3475 		if (likely(!bnx2_has_fast_work(bnapi))) {
3476 
3477 			napi_complete(napi);
3478 			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3479 			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3480 			       bnapi->last_status_idx);
3481 			break;
3482 		}
3483 	}
3484 	return work_done;
3485 }
3486 
3487 static int bnx2_poll(struct napi_struct *napi, int budget)
3488 {
3489 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3490 	struct bnx2 *bp = bnapi->bp;
3491 	int work_done = 0;
3492 	struct status_block *sblk = bnapi->status_blk.msi;
3493 
3494 	while (1) {
3495 		bnx2_poll_link(bp, bnapi);
3496 
3497 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3498 
3499 #ifdef BCM_CNIC
3500 		bnx2_poll_cnic(bp, bnapi);
3501 #endif
3502 
3503 		/* bnapi->last_status_idx is used below to tell the hw how
3504 		 * much work has been processed, so we must read it before
3505 		 * checking for more work.
3506 		 */
3507 		bnapi->last_status_idx = sblk->status_idx;
3508 
3509 		if (unlikely(work_done >= budget))
3510 			break;
3511 
3512 		rmb();
3513 		if (likely(!bnx2_has_work(bnapi))) {
3514 			napi_complete(napi);
3515 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3516 				REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3517 				       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3518 				       bnapi->last_status_idx);
3519 				break;
3520 			}
3521 			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3522 			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3523 			       BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3524 			       bnapi->last_status_idx);
3525 
3526 			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3527 			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3528 			       bnapi->last_status_idx);
3529 			break;
3530 		}
3531 	}
3532 
3533 	return work_done;
3534 }
3535 
3536 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3537  * from set_multicast.
3538  */
3539 static void
3540 bnx2_set_rx_mode(struct net_device *dev)
3541 {
3542 	struct bnx2 *bp = netdev_priv(dev);
3543 	u32 rx_mode, sort_mode;
3544 	struct netdev_hw_addr *ha;
3545 	int i;
3546 
3547 	if (!netif_running(dev))
3548 		return;
3549 
3550 	spin_lock_bh(&bp->phy_lock);
3551 
3552 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3553 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3554 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3555 	if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3556 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3557 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3558 	if (dev->flags & IFF_PROMISC) {
3559 		/* Promiscuous mode. */
3560 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3561 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3562 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3563 	}
3564 	else if (dev->flags & IFF_ALLMULTI) {
3565 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3566 			REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3567 			       0xffffffff);
3568         	}
3569 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3570 	}
3571 	else {
3572 		/* Accept one or more multicast(s). */
3573 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3574 		u32 regidx;
3575 		u32 bit;
3576 		u32 crc;
3577 
3578 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3579 
3580 		netdev_for_each_mc_addr(ha, dev) {
3581 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3582 			bit = crc & 0xff;
3583 			regidx = (bit & 0xe0) >> 5;
3584 			bit &= 0x1f;
3585 			mc_filter[regidx] |= (1 << bit);
3586 		}
3587 
3588 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3589 			REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3590 			       mc_filter[i]);
3591 		}
3592 
3593 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3594 	}
3595 
3596 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3597 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3598 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3599 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3600 	} else if (!(dev->flags & IFF_PROMISC)) {
3601 		/* Add all entries into to the match filter list */
3602 		i = 0;
3603 		netdev_for_each_uc_addr(ha, dev) {
3604 			bnx2_set_mac_addr(bp, ha->addr,
3605 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3606 			sort_mode |= (1 <<
3607 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3608 			i++;
3609 		}
3610 
3611 	}
3612 
3613 	if (rx_mode != bp->rx_mode) {
3614 		bp->rx_mode = rx_mode;
3615 		REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3616 	}
3617 
3618 	REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3619 	REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3620 	REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3621 
3622 	spin_unlock_bh(&bp->phy_lock);
3623 }
3624 
3625 static int
3626 check_fw_section(const struct firmware *fw,
3627 		 const struct bnx2_fw_file_section *section,
3628 		 u32 alignment, bool non_empty)
3629 {
3630 	u32 offset = be32_to_cpu(section->offset);
3631 	u32 len = be32_to_cpu(section->len);
3632 
3633 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3634 		return -EINVAL;
3635 	if ((non_empty && len == 0) || len > fw->size - offset ||
3636 	    len & (alignment - 1))
3637 		return -EINVAL;
3638 	return 0;
3639 }
3640 
3641 static int
3642 check_mips_fw_entry(const struct firmware *fw,
3643 		    const struct bnx2_mips_fw_file_entry *entry)
3644 {
3645 	if (check_fw_section(fw, &entry->text, 4, true) ||
3646 	    check_fw_section(fw, &entry->data, 4, false) ||
3647 	    check_fw_section(fw, &entry->rodata, 4, false))
3648 		return -EINVAL;
3649 	return 0;
3650 }
3651 
3652 static void bnx2_release_firmware(struct bnx2 *bp)
3653 {
3654 	if (bp->rv2p_firmware) {
3655 		release_firmware(bp->mips_firmware);
3656 		release_firmware(bp->rv2p_firmware);
3657 		bp->rv2p_firmware = NULL;
3658 	}
3659 }
3660 
3661 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3662 {
3663 	const char *mips_fw_file, *rv2p_fw_file;
3664 	const struct bnx2_mips_fw_file *mips_fw;
3665 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3666 	int rc;
3667 
3668 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3669 		mips_fw_file = FW_MIPS_FILE_09;
3670 		if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3671 		    (CHIP_ID(bp) == CHIP_ID_5709_A1))
3672 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3673 		else
3674 			rv2p_fw_file = FW_RV2P_FILE_09;
3675 	} else {
3676 		mips_fw_file = FW_MIPS_FILE_06;
3677 		rv2p_fw_file = FW_RV2P_FILE_06;
3678 	}
3679 
3680 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3681 	if (rc) {
3682 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3683 		goto out;
3684 	}
3685 
3686 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3687 	if (rc) {
3688 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3689 		goto err_release_mips_firmware;
3690 	}
3691 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3692 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3693 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3694 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3695 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3696 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3697 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3698 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3699 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3700 		rc = -EINVAL;
3701 		goto err_release_firmware;
3702 	}
3703 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3704 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3705 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3706 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3707 		rc = -EINVAL;
3708 		goto err_release_firmware;
3709 	}
3710 out:
3711 	return rc;
3712 
3713 err_release_firmware:
3714 	release_firmware(bp->rv2p_firmware);
3715 	bp->rv2p_firmware = NULL;
3716 err_release_mips_firmware:
3717 	release_firmware(bp->mips_firmware);
3718 	goto out;
3719 }
3720 
3721 static int bnx2_request_firmware(struct bnx2 *bp)
3722 {
3723 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3724 }
3725 
3726 static u32
3727 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3728 {
3729 	switch (idx) {
3730 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3731 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3732 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3733 		break;
3734 	}
3735 	return rv2p_code;
3736 }
3737 
3738 static int
3739 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3740 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3741 {
3742 	u32 rv2p_code_len, file_offset;
3743 	__be32 *rv2p_code;
3744 	int i;
3745 	u32 val, cmd, addr;
3746 
3747 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3748 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3749 
3750 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3751 
3752 	if (rv2p_proc == RV2P_PROC1) {
3753 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3754 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3755 	} else {
3756 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3757 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3758 	}
3759 
3760 	for (i = 0; i < rv2p_code_len; i += 8) {
3761 		REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3762 		rv2p_code++;
3763 		REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3764 		rv2p_code++;
3765 
3766 		val = (i / 8) | cmd;
3767 		REG_WR(bp, addr, val);
3768 	}
3769 
3770 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3771 	for (i = 0; i < 8; i++) {
3772 		u32 loc, code;
3773 
3774 		loc = be32_to_cpu(fw_entry->fixup[i]);
3775 		if (loc && ((loc * 4) < rv2p_code_len)) {
3776 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3777 			REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3778 			code = be32_to_cpu(*(rv2p_code + loc));
3779 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3780 			REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3781 
3782 			val = (loc / 2) | cmd;
3783 			REG_WR(bp, addr, val);
3784 		}
3785 	}
3786 
3787 	/* Reset the processor, un-stall is done later. */
3788 	if (rv2p_proc == RV2P_PROC1) {
3789 		REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3790 	}
3791 	else {
3792 		REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3793 	}
3794 
3795 	return 0;
3796 }
3797 
3798 static int
3799 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3800 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3801 {
3802 	u32 addr, len, file_offset;
3803 	__be32 *data;
3804 	u32 offset;
3805 	u32 val;
3806 
3807 	/* Halt the CPU. */
3808 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3809 	val |= cpu_reg->mode_value_halt;
3810 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3811 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3812 
3813 	/* Load the Text area. */
3814 	addr = be32_to_cpu(fw_entry->text.addr);
3815 	len = be32_to_cpu(fw_entry->text.len);
3816 	file_offset = be32_to_cpu(fw_entry->text.offset);
3817 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3818 
3819 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3820 	if (len) {
3821 		int j;
3822 
3823 		for (j = 0; j < (len / 4); j++, offset += 4)
3824 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3825 	}
3826 
3827 	/* Load the Data area. */
3828 	addr = be32_to_cpu(fw_entry->data.addr);
3829 	len = be32_to_cpu(fw_entry->data.len);
3830 	file_offset = be32_to_cpu(fw_entry->data.offset);
3831 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3832 
3833 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3834 	if (len) {
3835 		int j;
3836 
3837 		for (j = 0; j < (len / 4); j++, offset += 4)
3838 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3839 	}
3840 
3841 	/* Load the Read-Only area. */
3842 	addr = be32_to_cpu(fw_entry->rodata.addr);
3843 	len = be32_to_cpu(fw_entry->rodata.len);
3844 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3845 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3846 
3847 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3848 	if (len) {
3849 		int j;
3850 
3851 		for (j = 0; j < (len / 4); j++, offset += 4)
3852 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3853 	}
3854 
3855 	/* Clear the pre-fetch instruction. */
3856 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3857 
3858 	val = be32_to_cpu(fw_entry->start_addr);
3859 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3860 
3861 	/* Start the CPU. */
3862 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3863 	val &= ~cpu_reg->mode_value_halt;
3864 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3865 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3866 
3867 	return 0;
3868 }
3869 
3870 static int
3871 bnx2_init_cpus(struct bnx2 *bp)
3872 {
3873 	const struct bnx2_mips_fw_file *mips_fw =
3874 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3875 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3876 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3877 	int rc;
3878 
3879 	/* Initialize the RV2P processor. */
3880 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3881 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3882 
3883 	/* Initialize the RX Processor. */
3884 	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3885 	if (rc)
3886 		goto init_cpu_err;
3887 
3888 	/* Initialize the TX Processor. */
3889 	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3890 	if (rc)
3891 		goto init_cpu_err;
3892 
3893 	/* Initialize the TX Patch-up Processor. */
3894 	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3895 	if (rc)
3896 		goto init_cpu_err;
3897 
3898 	/* Initialize the Completion Processor. */
3899 	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3900 	if (rc)
3901 		goto init_cpu_err;
3902 
3903 	/* Initialize the Command Processor. */
3904 	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3905 
3906 init_cpu_err:
3907 	return rc;
3908 }
3909 
3910 static int
3911 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3912 {
3913 	u16 pmcsr;
3914 
3915 	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3916 
3917 	switch (state) {
3918 	case PCI_D0: {
3919 		u32 val;
3920 
3921 		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3922 			(pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3923 			PCI_PM_CTRL_PME_STATUS);
3924 
3925 		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3926 			/* delay required during transition out of D3hot */
3927 			msleep(20);
3928 
3929 		val = REG_RD(bp, BNX2_EMAC_MODE);
3930 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3931 		val &= ~BNX2_EMAC_MODE_MPKT;
3932 		REG_WR(bp, BNX2_EMAC_MODE, val);
3933 
3934 		val = REG_RD(bp, BNX2_RPM_CONFIG);
3935 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3936 		REG_WR(bp, BNX2_RPM_CONFIG, val);
3937 		break;
3938 	}
3939 	case PCI_D3hot: {
3940 		int i;
3941 		u32 val, wol_msg;
3942 
3943 		if (bp->wol) {
3944 			u32 advertising;
3945 			u8 autoneg;
3946 
3947 			autoneg = bp->autoneg;
3948 			advertising = bp->advertising;
3949 
3950 			if (bp->phy_port == PORT_TP) {
3951 				bp->autoneg = AUTONEG_SPEED;
3952 				bp->advertising = ADVERTISED_10baseT_Half |
3953 					ADVERTISED_10baseT_Full |
3954 					ADVERTISED_100baseT_Half |
3955 					ADVERTISED_100baseT_Full |
3956 					ADVERTISED_Autoneg;
3957 			}
3958 
3959 			spin_lock_bh(&bp->phy_lock);
3960 			bnx2_setup_phy(bp, bp->phy_port);
3961 			spin_unlock_bh(&bp->phy_lock);
3962 
3963 			bp->autoneg = autoneg;
3964 			bp->advertising = advertising;
3965 
3966 			bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3967 
3968 			val = REG_RD(bp, BNX2_EMAC_MODE);
3969 
3970 			/* Enable port mode. */
3971 			val &= ~BNX2_EMAC_MODE_PORT;
3972 			val |= BNX2_EMAC_MODE_MPKT_RCVD |
3973 			       BNX2_EMAC_MODE_ACPI_RCVD |
3974 			       BNX2_EMAC_MODE_MPKT;
3975 			if (bp->phy_port == PORT_TP)
3976 				val |= BNX2_EMAC_MODE_PORT_MII;
3977 			else {
3978 				val |= BNX2_EMAC_MODE_PORT_GMII;
3979 				if (bp->line_speed == SPEED_2500)
3980 					val |= BNX2_EMAC_MODE_25G_MODE;
3981 			}
3982 
3983 			REG_WR(bp, BNX2_EMAC_MODE, val);
3984 
3985 			/* receive all multicast */
3986 			for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3987 				REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3988 				       0xffffffff);
3989 			}
3990 			REG_WR(bp, BNX2_EMAC_RX_MODE,
3991 			       BNX2_EMAC_RX_MODE_SORT_MODE);
3992 
3993 			val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3994 			      BNX2_RPM_SORT_USER0_MC_EN;
3995 			REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3996 			REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3997 			REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3998 			       BNX2_RPM_SORT_USER0_ENA);
3999 
4000 			/* Need to enable EMAC and RPM for WOL. */
4001 			REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4002 			       BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4003 			       BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4004 			       BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4005 
4006 			val = REG_RD(bp, BNX2_RPM_CONFIG);
4007 			val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4008 			REG_WR(bp, BNX2_RPM_CONFIG, val);
4009 
4010 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4011 		}
4012 		else {
4013 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4014 		}
4015 
4016 		if (!(bp->flags & BNX2_FLAG_NO_WOL))
4017 			bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
4018 				     1, 0);
4019 
4020 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4021 		if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4022 		    (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4023 
4024 			if (bp->wol)
4025 				pmcsr |= 3;
4026 		}
4027 		else {
4028 			pmcsr |= 3;
4029 		}
4030 		if (bp->wol) {
4031 			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4032 		}
4033 		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4034 				      pmcsr);
4035 
4036 		/* No more memory access after this point until
4037 		 * device is brought back to D0.
4038 		 */
4039 		udelay(50);
4040 		break;
4041 	}
4042 	default:
4043 		return -EINVAL;
4044 	}
4045 	return 0;
4046 }
4047 
4048 static int
4049 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4050 {
4051 	u32 val;
4052 	int j;
4053 
4054 	/* Request access to the flash interface. */
4055 	REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4056 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4057 		val = REG_RD(bp, BNX2_NVM_SW_ARB);
4058 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4059 			break;
4060 
4061 		udelay(5);
4062 	}
4063 
4064 	if (j >= NVRAM_TIMEOUT_COUNT)
4065 		return -EBUSY;
4066 
4067 	return 0;
4068 }
4069 
4070 static int
4071 bnx2_release_nvram_lock(struct bnx2 *bp)
4072 {
4073 	int j;
4074 	u32 val;
4075 
4076 	/* Relinquish nvram interface. */
4077 	REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4078 
4079 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4080 		val = REG_RD(bp, BNX2_NVM_SW_ARB);
4081 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4082 			break;
4083 
4084 		udelay(5);
4085 	}
4086 
4087 	if (j >= NVRAM_TIMEOUT_COUNT)
4088 		return -EBUSY;
4089 
4090 	return 0;
4091 }
4092 
4093 
4094 static int
4095 bnx2_enable_nvram_write(struct bnx2 *bp)
4096 {
4097 	u32 val;
4098 
4099 	val = REG_RD(bp, BNX2_MISC_CFG);
4100 	REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4101 
4102 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4103 		int j;
4104 
4105 		REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4106 		REG_WR(bp, BNX2_NVM_COMMAND,
4107 		       BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4108 
4109 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4110 			udelay(5);
4111 
4112 			val = REG_RD(bp, BNX2_NVM_COMMAND);
4113 			if (val & BNX2_NVM_COMMAND_DONE)
4114 				break;
4115 		}
4116 
4117 		if (j >= NVRAM_TIMEOUT_COUNT)
4118 			return -EBUSY;
4119 	}
4120 	return 0;
4121 }
4122 
4123 static void
4124 bnx2_disable_nvram_write(struct bnx2 *bp)
4125 {
4126 	u32 val;
4127 
4128 	val = REG_RD(bp, BNX2_MISC_CFG);
4129 	REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4130 }
4131 
4132 
4133 static void
4134 bnx2_enable_nvram_access(struct bnx2 *bp)
4135 {
4136 	u32 val;
4137 
4138 	val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4139 	/* Enable both bits, even on read. */
4140 	REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4141 	       val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4142 }
4143 
4144 static void
4145 bnx2_disable_nvram_access(struct bnx2 *bp)
4146 {
4147 	u32 val;
4148 
4149 	val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4150 	/* Disable both bits, even after read. */
4151 	REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4152 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4153 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4154 }
4155 
4156 static int
4157 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4158 {
4159 	u32 cmd;
4160 	int j;
4161 
4162 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4163 		/* Buffered flash, no erase needed */
4164 		return 0;
4165 
4166 	/* Build an erase command */
4167 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4168 	      BNX2_NVM_COMMAND_DOIT;
4169 
4170 	/* Need to clear DONE bit separately. */
4171 	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4172 
4173 	/* Address of the NVRAM to read from. */
4174 	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4175 
4176 	/* Issue an erase command. */
4177 	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4178 
4179 	/* Wait for completion. */
4180 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4181 		u32 val;
4182 
4183 		udelay(5);
4184 
4185 		val = REG_RD(bp, BNX2_NVM_COMMAND);
4186 		if (val & BNX2_NVM_COMMAND_DONE)
4187 			break;
4188 	}
4189 
4190 	if (j >= NVRAM_TIMEOUT_COUNT)
4191 		return -EBUSY;
4192 
4193 	return 0;
4194 }
4195 
4196 static int
4197 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4198 {
4199 	u32 cmd;
4200 	int j;
4201 
4202 	/* Build the command word. */
4203 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4204 
4205 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4206 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4207 		offset = ((offset / bp->flash_info->page_size) <<
4208 			   bp->flash_info->page_bits) +
4209 			  (offset % bp->flash_info->page_size);
4210 	}
4211 
4212 	/* Need to clear DONE bit separately. */
4213 	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4214 
4215 	/* Address of the NVRAM to read from. */
4216 	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4217 
4218 	/* Issue a read command. */
4219 	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4220 
4221 	/* Wait for completion. */
4222 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4223 		u32 val;
4224 
4225 		udelay(5);
4226 
4227 		val = REG_RD(bp, BNX2_NVM_COMMAND);
4228 		if (val & BNX2_NVM_COMMAND_DONE) {
4229 			__be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4230 			memcpy(ret_val, &v, 4);
4231 			break;
4232 		}
4233 	}
4234 	if (j >= NVRAM_TIMEOUT_COUNT)
4235 		return -EBUSY;
4236 
4237 	return 0;
4238 }
4239 
4240 
4241 static int
4242 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4243 {
4244 	u32 cmd;
4245 	__be32 val32;
4246 	int j;
4247 
4248 	/* Build the command word. */
4249 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4250 
4251 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4252 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4253 		offset = ((offset / bp->flash_info->page_size) <<
4254 			  bp->flash_info->page_bits) +
4255 			 (offset % bp->flash_info->page_size);
4256 	}
4257 
4258 	/* Need to clear DONE bit separately. */
4259 	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4260 
4261 	memcpy(&val32, val, 4);
4262 
4263 	/* Write the data. */
4264 	REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4265 
4266 	/* Address of the NVRAM to write to. */
4267 	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4268 
4269 	/* Issue the write command. */
4270 	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4271 
4272 	/* Wait for completion. */
4273 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4274 		udelay(5);
4275 
4276 		if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4277 			break;
4278 	}
4279 	if (j >= NVRAM_TIMEOUT_COUNT)
4280 		return -EBUSY;
4281 
4282 	return 0;
4283 }
4284 
4285 static int
4286 bnx2_init_nvram(struct bnx2 *bp)
4287 {
4288 	u32 val;
4289 	int j, entry_count, rc = 0;
4290 	const struct flash_spec *flash;
4291 
4292 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4293 		bp->flash_info = &flash_5709;
4294 		goto get_flash_size;
4295 	}
4296 
4297 	/* Determine the selected interface. */
4298 	val = REG_RD(bp, BNX2_NVM_CFG1);
4299 
4300 	entry_count = ARRAY_SIZE(flash_table);
4301 
4302 	if (val & 0x40000000) {
4303 
4304 		/* Flash interface has been reconfigured */
4305 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4306 		     j++, flash++) {
4307 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4308 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4309 				bp->flash_info = flash;
4310 				break;
4311 			}
4312 		}
4313 	}
4314 	else {
4315 		u32 mask;
4316 		/* Not yet been reconfigured */
4317 
4318 		if (val & (1 << 23))
4319 			mask = FLASH_BACKUP_STRAP_MASK;
4320 		else
4321 			mask = FLASH_STRAP_MASK;
4322 
4323 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4324 			j++, flash++) {
4325 
4326 			if ((val & mask) == (flash->strapping & mask)) {
4327 				bp->flash_info = flash;
4328 
4329 				/* Request access to the flash interface. */
4330 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4331 					return rc;
4332 
4333 				/* Enable access to flash interface */
4334 				bnx2_enable_nvram_access(bp);
4335 
4336 				/* Reconfigure the flash interface */
4337 				REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4338 				REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4339 				REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4340 				REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4341 
4342 				/* Disable access to flash interface */
4343 				bnx2_disable_nvram_access(bp);
4344 				bnx2_release_nvram_lock(bp);
4345 
4346 				break;
4347 			}
4348 		}
4349 	} /* if (val & 0x40000000) */
4350 
4351 	if (j == entry_count) {
4352 		bp->flash_info = NULL;
4353 		pr_alert("Unknown flash/EEPROM type\n");
4354 		return -ENODEV;
4355 	}
4356 
4357 get_flash_size:
4358 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4359 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4360 	if (val)
4361 		bp->flash_size = val;
4362 	else
4363 		bp->flash_size = bp->flash_info->total_size;
4364 
4365 	return rc;
4366 }
4367 
4368 static int
4369 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4370 		int buf_size)
4371 {
4372 	int rc = 0;
4373 	u32 cmd_flags, offset32, len32, extra;
4374 
4375 	if (buf_size == 0)
4376 		return 0;
4377 
4378 	/* Request access to the flash interface. */
4379 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4380 		return rc;
4381 
4382 	/* Enable access to flash interface */
4383 	bnx2_enable_nvram_access(bp);
4384 
4385 	len32 = buf_size;
4386 	offset32 = offset;
4387 	extra = 0;
4388 
4389 	cmd_flags = 0;
4390 
4391 	if (offset32 & 3) {
4392 		u8 buf[4];
4393 		u32 pre_len;
4394 
4395 		offset32 &= ~3;
4396 		pre_len = 4 - (offset & 3);
4397 
4398 		if (pre_len >= len32) {
4399 			pre_len = len32;
4400 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4401 				    BNX2_NVM_COMMAND_LAST;
4402 		}
4403 		else {
4404 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4405 		}
4406 
4407 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4408 
4409 		if (rc)
4410 			return rc;
4411 
4412 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4413 
4414 		offset32 += 4;
4415 		ret_buf += pre_len;
4416 		len32 -= pre_len;
4417 	}
4418 	if (len32 & 3) {
4419 		extra = 4 - (len32 & 3);
4420 		len32 = (len32 + 4) & ~3;
4421 	}
4422 
4423 	if (len32 == 4) {
4424 		u8 buf[4];
4425 
4426 		if (cmd_flags)
4427 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4428 		else
4429 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4430 				    BNX2_NVM_COMMAND_LAST;
4431 
4432 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4433 
4434 		memcpy(ret_buf, buf, 4 - extra);
4435 	}
4436 	else if (len32 > 0) {
4437 		u8 buf[4];
4438 
4439 		/* Read the first word. */
4440 		if (cmd_flags)
4441 			cmd_flags = 0;
4442 		else
4443 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4444 
4445 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4446 
4447 		/* Advance to the next dword. */
4448 		offset32 += 4;
4449 		ret_buf += 4;
4450 		len32 -= 4;
4451 
4452 		while (len32 > 4 && rc == 0) {
4453 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4454 
4455 			/* Advance to the next dword. */
4456 			offset32 += 4;
4457 			ret_buf += 4;
4458 			len32 -= 4;
4459 		}
4460 
4461 		if (rc)
4462 			return rc;
4463 
4464 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4465 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4466 
4467 		memcpy(ret_buf, buf, 4 - extra);
4468 	}
4469 
4470 	/* Disable access to flash interface */
4471 	bnx2_disable_nvram_access(bp);
4472 
4473 	bnx2_release_nvram_lock(bp);
4474 
4475 	return rc;
4476 }
4477 
4478 static int
4479 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4480 		int buf_size)
4481 {
4482 	u32 written, offset32, len32;
4483 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4484 	int rc = 0;
4485 	int align_start, align_end;
4486 
4487 	buf = data_buf;
4488 	offset32 = offset;
4489 	len32 = buf_size;
4490 	align_start = align_end = 0;
4491 
4492 	if ((align_start = (offset32 & 3))) {
4493 		offset32 &= ~3;
4494 		len32 += align_start;
4495 		if (len32 < 4)
4496 			len32 = 4;
4497 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4498 			return rc;
4499 	}
4500 
4501 	if (len32 & 3) {
4502 		align_end = 4 - (len32 & 3);
4503 		len32 += align_end;
4504 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4505 			return rc;
4506 	}
4507 
4508 	if (align_start || align_end) {
4509 		align_buf = kmalloc(len32, GFP_KERNEL);
4510 		if (align_buf == NULL)
4511 			return -ENOMEM;
4512 		if (align_start) {
4513 			memcpy(align_buf, start, 4);
4514 		}
4515 		if (align_end) {
4516 			memcpy(align_buf + len32 - 4, end, 4);
4517 		}
4518 		memcpy(align_buf + align_start, data_buf, buf_size);
4519 		buf = align_buf;
4520 	}
4521 
4522 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4523 		flash_buffer = kmalloc(264, GFP_KERNEL);
4524 		if (flash_buffer == NULL) {
4525 			rc = -ENOMEM;
4526 			goto nvram_write_end;
4527 		}
4528 	}
4529 
4530 	written = 0;
4531 	while ((written < len32) && (rc == 0)) {
4532 		u32 page_start, page_end, data_start, data_end;
4533 		u32 addr, cmd_flags;
4534 		int i;
4535 
4536 	        /* Find the page_start addr */
4537 		page_start = offset32 + written;
4538 		page_start -= (page_start % bp->flash_info->page_size);
4539 		/* Find the page_end addr */
4540 		page_end = page_start + bp->flash_info->page_size;
4541 		/* Find the data_start addr */
4542 		data_start = (written == 0) ? offset32 : page_start;
4543 		/* Find the data_end addr */
4544 		data_end = (page_end > offset32 + len32) ?
4545 			(offset32 + len32) : page_end;
4546 
4547 		/* Request access to the flash interface. */
4548 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4549 			goto nvram_write_end;
4550 
4551 		/* Enable access to flash interface */
4552 		bnx2_enable_nvram_access(bp);
4553 
4554 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4555 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4556 			int j;
4557 
4558 			/* Read the whole page into the buffer
4559 			 * (non-buffer flash only) */
4560 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4561 				if (j == (bp->flash_info->page_size - 4)) {
4562 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4563 				}
4564 				rc = bnx2_nvram_read_dword(bp,
4565 					page_start + j,
4566 					&flash_buffer[j],
4567 					cmd_flags);
4568 
4569 				if (rc)
4570 					goto nvram_write_end;
4571 
4572 				cmd_flags = 0;
4573 			}
4574 		}
4575 
4576 		/* Enable writes to flash interface (unlock write-protect) */
4577 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4578 			goto nvram_write_end;
4579 
4580 		/* Loop to write back the buffer data from page_start to
4581 		 * data_start */
4582 		i = 0;
4583 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4584 			/* Erase the page */
4585 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4586 				goto nvram_write_end;
4587 
4588 			/* Re-enable the write again for the actual write */
4589 			bnx2_enable_nvram_write(bp);
4590 
4591 			for (addr = page_start; addr < data_start;
4592 				addr += 4, i += 4) {
4593 
4594 				rc = bnx2_nvram_write_dword(bp, addr,
4595 					&flash_buffer[i], cmd_flags);
4596 
4597 				if (rc != 0)
4598 					goto nvram_write_end;
4599 
4600 				cmd_flags = 0;
4601 			}
4602 		}
4603 
4604 		/* Loop to write the new data from data_start to data_end */
4605 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4606 			if ((addr == page_end - 4) ||
4607 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4608 				 (addr == data_end - 4))) {
4609 
4610 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4611 			}
4612 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4613 				cmd_flags);
4614 
4615 			if (rc != 0)
4616 				goto nvram_write_end;
4617 
4618 			cmd_flags = 0;
4619 			buf += 4;
4620 		}
4621 
4622 		/* Loop to write back the buffer data from data_end
4623 		 * to page_end */
4624 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4625 			for (addr = data_end; addr < page_end;
4626 				addr += 4, i += 4) {
4627 
4628 				if (addr == page_end-4) {
4629 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4630                 		}
4631 				rc = bnx2_nvram_write_dword(bp, addr,
4632 					&flash_buffer[i], cmd_flags);
4633 
4634 				if (rc != 0)
4635 					goto nvram_write_end;
4636 
4637 				cmd_flags = 0;
4638 			}
4639 		}
4640 
4641 		/* Disable writes to flash interface (lock write-protect) */
4642 		bnx2_disable_nvram_write(bp);
4643 
4644 		/* Disable access to flash interface */
4645 		bnx2_disable_nvram_access(bp);
4646 		bnx2_release_nvram_lock(bp);
4647 
4648 		/* Increment written */
4649 		written += data_end - data_start;
4650 	}
4651 
4652 nvram_write_end:
4653 	kfree(flash_buffer);
4654 	kfree(align_buf);
4655 	return rc;
4656 }
4657 
4658 static void
4659 bnx2_init_fw_cap(struct bnx2 *bp)
4660 {
4661 	u32 val, sig = 0;
4662 
4663 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4664 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4665 
4666 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4667 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4668 
4669 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4670 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4671 		return;
4672 
4673 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4674 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4675 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4676 	}
4677 
4678 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4679 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4680 		u32 link;
4681 
4682 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4683 
4684 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4685 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4686 			bp->phy_port = PORT_FIBRE;
4687 		else
4688 			bp->phy_port = PORT_TP;
4689 
4690 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4691 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4692 	}
4693 
4694 	if (netif_running(bp->dev) && sig)
4695 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4696 }
4697 
4698 static void
4699 bnx2_setup_msix_tbl(struct bnx2 *bp)
4700 {
4701 	REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4702 
4703 	REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4704 	REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4705 }
4706 
4707 static int
4708 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4709 {
4710 	u32 val;
4711 	int i, rc = 0;
4712 	u8 old_port;
4713 
4714 	/* Wait for the current PCI transaction to complete before
4715 	 * issuing a reset. */
4716 	if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4717 	    (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4718 		REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4719 		       BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4720 		       BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4721 		       BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4722 		       BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4723 		val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4724 		udelay(5);
4725 	} else {  /* 5709 */
4726 		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4727 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4728 		REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4729 		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4730 
4731 		for (i = 0; i < 100; i++) {
4732 			msleep(1);
4733 			val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4734 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4735 				break;
4736 		}
4737 	}
4738 
4739 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4740 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4741 
4742 	/* Deposit a driver reset signature so the firmware knows that
4743 	 * this is a soft reset. */
4744 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4745 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4746 
4747 	/* Do a dummy read to force the chip to complete all current transaction
4748 	 * before we issue a reset. */
4749 	val = REG_RD(bp, BNX2_MISC_ID);
4750 
4751 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4752 		REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4753 		REG_RD(bp, BNX2_MISC_COMMAND);
4754 		udelay(5);
4755 
4756 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4757 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4758 
4759 		REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4760 
4761 	} else {
4762 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4763 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4764 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4765 
4766 		/* Chip reset. */
4767 		REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4768 
4769 		/* Reading back any register after chip reset will hang the
4770 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4771 		 * of margin for write posting.
4772 		 */
4773 		if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4774 		    (CHIP_ID(bp) == CHIP_ID_5706_A1))
4775 			msleep(20);
4776 
4777 		/* Reset takes approximate 30 usec */
4778 		for (i = 0; i < 10; i++) {
4779 			val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4780 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4781 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4782 				break;
4783 			udelay(10);
4784 		}
4785 
4786 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4787 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4788 			pr_err("Chip reset did not complete\n");
4789 			return -EBUSY;
4790 		}
4791 	}
4792 
4793 	/* Make sure byte swapping is properly configured. */
4794 	val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4795 	if (val != 0x01020304) {
4796 		pr_err("Chip not in correct endian mode\n");
4797 		return -ENODEV;
4798 	}
4799 
4800 	/* Wait for the firmware to finish its initialization. */
4801 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4802 	if (rc)
4803 		return rc;
4804 
4805 	spin_lock_bh(&bp->phy_lock);
4806 	old_port = bp->phy_port;
4807 	bnx2_init_fw_cap(bp);
4808 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4809 	    old_port != bp->phy_port)
4810 		bnx2_set_default_remote_link(bp);
4811 	spin_unlock_bh(&bp->phy_lock);
4812 
4813 	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4814 		/* Adjust the voltage regular to two steps lower.  The default
4815 		 * of this register is 0x0000000e. */
4816 		REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4817 
4818 		/* Remove bad rbuf memory from the free pool. */
4819 		rc = bnx2_alloc_bad_rbuf(bp);
4820 	}
4821 
4822 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4823 		bnx2_setup_msix_tbl(bp);
4824 		/* Prevent MSIX table reads and write from timing out */
4825 		REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4826 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4827 	}
4828 
4829 	return rc;
4830 }
4831 
4832 static int
4833 bnx2_init_chip(struct bnx2 *bp)
4834 {
4835 	u32 val, mtu;
4836 	int rc, i;
4837 
4838 	/* Make sure the interrupt is not active. */
4839 	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4840 
4841 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4842 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4843 #ifdef __BIG_ENDIAN
4844 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4845 #endif
4846 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4847 	      DMA_READ_CHANS << 12 |
4848 	      DMA_WRITE_CHANS << 16;
4849 
4850 	val |= (0x2 << 20) | (1 << 11);
4851 
4852 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4853 		val |= (1 << 23);
4854 
4855 	if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4856 	    (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4857 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4858 
4859 	REG_WR(bp, BNX2_DMA_CONFIG, val);
4860 
4861 	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4862 		val = REG_RD(bp, BNX2_TDMA_CONFIG);
4863 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4864 		REG_WR(bp, BNX2_TDMA_CONFIG, val);
4865 	}
4866 
4867 	if (bp->flags & BNX2_FLAG_PCIX) {
4868 		u16 val16;
4869 
4870 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4871 				     &val16);
4872 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4873 				      val16 & ~PCI_X_CMD_ERO);
4874 	}
4875 
4876 	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4877 	       BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4878 	       BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4879 	       BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4880 
4881 	/* Initialize context mapping and zero out the quick contexts.  The
4882 	 * context block must have already been enabled. */
4883 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4884 		rc = bnx2_init_5709_context(bp);
4885 		if (rc)
4886 			return rc;
4887 	} else
4888 		bnx2_init_context(bp);
4889 
4890 	if ((rc = bnx2_init_cpus(bp)) != 0)
4891 		return rc;
4892 
4893 	bnx2_init_nvram(bp);
4894 
4895 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4896 
4897 	val = REG_RD(bp, BNX2_MQ_CONFIG);
4898 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4899 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4900 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4901 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4902 		if (CHIP_REV(bp) == CHIP_REV_Ax)
4903 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4904 	}
4905 
4906 	REG_WR(bp, BNX2_MQ_CONFIG, val);
4907 
4908 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4909 	REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4910 	REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4911 
4912 	val = (BCM_PAGE_BITS - 8) << 24;
4913 	REG_WR(bp, BNX2_RV2P_CONFIG, val);
4914 
4915 	/* Configure page size. */
4916 	val = REG_RD(bp, BNX2_TBDR_CONFIG);
4917 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4918 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4919 	REG_WR(bp, BNX2_TBDR_CONFIG, val);
4920 
4921 	val = bp->mac_addr[0] +
4922 	      (bp->mac_addr[1] << 8) +
4923 	      (bp->mac_addr[2] << 16) +
4924 	      bp->mac_addr[3] +
4925 	      (bp->mac_addr[4] << 8) +
4926 	      (bp->mac_addr[5] << 16);
4927 	REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4928 
4929 	/* Program the MTU.  Also include 4 bytes for CRC32. */
4930 	mtu = bp->dev->mtu;
4931 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4932 	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4933 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4934 	REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4935 
4936 	if (mtu < 1500)
4937 		mtu = 1500;
4938 
4939 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4940 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4941 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4942 
4943 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4944 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4945 		bp->bnx2_napi[i].last_status_idx = 0;
4946 
4947 	bp->idle_chk_status_idx = 0xffff;
4948 
4949 	bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4950 
4951 	/* Set up how to generate a link change interrupt. */
4952 	REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4953 
4954 	REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4955 	       (u64) bp->status_blk_mapping & 0xffffffff);
4956 	REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4957 
4958 	REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4959 	       (u64) bp->stats_blk_mapping & 0xffffffff);
4960 	REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4961 	       (u64) bp->stats_blk_mapping >> 32);
4962 
4963 	REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4964 	       (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4965 
4966 	REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4967 	       (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4968 
4969 	REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4970 	       (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4971 
4972 	REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4973 
4974 	REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4975 
4976 	REG_WR(bp, BNX2_HC_COM_TICKS,
4977 	       (bp->com_ticks_int << 16) | bp->com_ticks);
4978 
4979 	REG_WR(bp, BNX2_HC_CMD_TICKS,
4980 	       (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4981 
4982 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4983 		REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4984 	else
4985 		REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4986 	REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4987 
4988 	if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4989 		val = BNX2_HC_CONFIG_COLLECT_STATS;
4990 	else {
4991 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4992 		      BNX2_HC_CONFIG_COLLECT_STATS;
4993 	}
4994 
4995 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4996 		REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4997 		       BNX2_HC_MSIX_BIT_VECTOR_VAL);
4998 
4999 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5000 	}
5001 
5002 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5003 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5004 
5005 	REG_WR(bp, BNX2_HC_CONFIG, val);
5006 
5007 	if (bp->rx_ticks < 25)
5008 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5009 	else
5010 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5011 
5012 	for (i = 1; i < bp->irq_nvecs; i++) {
5013 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5014 			   BNX2_HC_SB_CONFIG_1;
5015 
5016 		REG_WR(bp, base,
5017 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5018 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5019 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5020 
5021 		REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5022 			(bp->tx_quick_cons_trip_int << 16) |
5023 			 bp->tx_quick_cons_trip);
5024 
5025 		REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5026 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5027 
5028 		REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5029 		       (bp->rx_quick_cons_trip_int << 16) |
5030 			bp->rx_quick_cons_trip);
5031 
5032 		REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5033 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5034 	}
5035 
5036 	/* Clear internal stats counters. */
5037 	REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5038 
5039 	REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5040 
5041 	/* Initialize the receive filter. */
5042 	bnx2_set_rx_mode(bp->dev);
5043 
5044 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5045 		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5046 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5047 		REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5048 	}
5049 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5050 			  1, 0);
5051 
5052 	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5053 	REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5054 
5055 	udelay(20);
5056 
5057 	bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5058 
5059 	return rc;
5060 }
5061 
5062 static void
5063 bnx2_clear_ring_states(struct bnx2 *bp)
5064 {
5065 	struct bnx2_napi *bnapi;
5066 	struct bnx2_tx_ring_info *txr;
5067 	struct bnx2_rx_ring_info *rxr;
5068 	int i;
5069 
5070 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5071 		bnapi = &bp->bnx2_napi[i];
5072 		txr = &bnapi->tx_ring;
5073 		rxr = &bnapi->rx_ring;
5074 
5075 		txr->tx_cons = 0;
5076 		txr->hw_tx_cons = 0;
5077 		rxr->rx_prod_bseq = 0;
5078 		rxr->rx_prod = 0;
5079 		rxr->rx_cons = 0;
5080 		rxr->rx_pg_prod = 0;
5081 		rxr->rx_pg_cons = 0;
5082 	}
5083 }
5084 
5085 static void
5086 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5087 {
5088 	u32 val, offset0, offset1, offset2, offset3;
5089 	u32 cid_addr = GET_CID_ADDR(cid);
5090 
5091 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5092 		offset0 = BNX2_L2CTX_TYPE_XI;
5093 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5094 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5095 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5096 	} else {
5097 		offset0 = BNX2_L2CTX_TYPE;
5098 		offset1 = BNX2_L2CTX_CMD_TYPE;
5099 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5100 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5101 	}
5102 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5103 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5104 
5105 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5106 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5107 
5108 	val = (u64) txr->tx_desc_mapping >> 32;
5109 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5110 
5111 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5112 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5113 }
5114 
5115 static void
5116 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5117 {
5118 	struct tx_bd *txbd;
5119 	u32 cid = TX_CID;
5120 	struct bnx2_napi *bnapi;
5121 	struct bnx2_tx_ring_info *txr;
5122 
5123 	bnapi = &bp->bnx2_napi[ring_num];
5124 	txr = &bnapi->tx_ring;
5125 
5126 	if (ring_num == 0)
5127 		cid = TX_CID;
5128 	else
5129 		cid = TX_TSS_CID + ring_num - 1;
5130 
5131 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5132 
5133 	txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5134 
5135 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5136 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5137 
5138 	txr->tx_prod = 0;
5139 	txr->tx_prod_bseq = 0;
5140 
5141 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5142 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5143 
5144 	bnx2_init_tx_context(bp, cid, txr);
5145 }
5146 
5147 static void
5148 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5149 		     int num_rings)
5150 {
5151 	int i;
5152 	struct rx_bd *rxbd;
5153 
5154 	for (i = 0; i < num_rings; i++) {
5155 		int j;
5156 
5157 		rxbd = &rx_ring[i][0];
5158 		for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5159 			rxbd->rx_bd_len = buf_size;
5160 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5161 		}
5162 		if (i == (num_rings - 1))
5163 			j = 0;
5164 		else
5165 			j = i + 1;
5166 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5167 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5168 	}
5169 }
5170 
5171 static void
5172 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5173 {
5174 	int i;
5175 	u16 prod, ring_prod;
5176 	u32 cid, rx_cid_addr, val;
5177 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5178 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5179 
5180 	if (ring_num == 0)
5181 		cid = RX_CID;
5182 	else
5183 		cid = RX_RSS_CID + ring_num - 1;
5184 
5185 	rx_cid_addr = GET_CID_ADDR(cid);
5186 
5187 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5188 			     bp->rx_buf_use_size, bp->rx_max_ring);
5189 
5190 	bnx2_init_rx_context(bp, cid);
5191 
5192 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5193 		val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5194 		REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5195 	}
5196 
5197 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5198 	if (bp->rx_pg_ring_size) {
5199 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5200 				     rxr->rx_pg_desc_mapping,
5201 				     PAGE_SIZE, bp->rx_max_pg_ring);
5202 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5203 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5204 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5205 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5206 
5207 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5208 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5209 
5210 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5211 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5212 
5213 		if (CHIP_NUM(bp) == CHIP_NUM_5709)
5214 			REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5215 	}
5216 
5217 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5218 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5219 
5220 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5221 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5222 
5223 	ring_prod = prod = rxr->rx_pg_prod;
5224 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5225 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5226 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5227 				    ring_num, i, bp->rx_pg_ring_size);
5228 			break;
5229 		}
5230 		prod = NEXT_RX_BD(prod);
5231 		ring_prod = RX_PG_RING_IDX(prod);
5232 	}
5233 	rxr->rx_pg_prod = prod;
5234 
5235 	ring_prod = prod = rxr->rx_prod;
5236 	for (i = 0; i < bp->rx_ring_size; i++) {
5237 		if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5238 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5239 				    ring_num, i, bp->rx_ring_size);
5240 			break;
5241 		}
5242 		prod = NEXT_RX_BD(prod);
5243 		ring_prod = RX_RING_IDX(prod);
5244 	}
5245 	rxr->rx_prod = prod;
5246 
5247 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5248 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5249 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5250 
5251 	REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5252 	REG_WR16(bp, rxr->rx_bidx_addr, prod);
5253 
5254 	REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5255 }
5256 
5257 static void
5258 bnx2_init_all_rings(struct bnx2 *bp)
5259 {
5260 	int i;
5261 	u32 val;
5262 
5263 	bnx2_clear_ring_states(bp);
5264 
5265 	REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5266 	for (i = 0; i < bp->num_tx_rings; i++)
5267 		bnx2_init_tx_ring(bp, i);
5268 
5269 	if (bp->num_tx_rings > 1)
5270 		REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5271 		       (TX_TSS_CID << 7));
5272 
5273 	REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5274 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5275 
5276 	for (i = 0; i < bp->num_rx_rings; i++)
5277 		bnx2_init_rx_ring(bp, i);
5278 
5279 	if (bp->num_rx_rings > 1) {
5280 		u32 tbl_32 = 0;
5281 
5282 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5283 			int shift = (i % 8) << 2;
5284 
5285 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5286 			if ((i % 8) == 7) {
5287 				REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5288 				REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5289 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5290 					BNX2_RLUP_RSS_COMMAND_WRITE |
5291 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5292 				tbl_32 = 0;
5293 			}
5294 		}
5295 
5296 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5297 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5298 
5299 		REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5300 
5301 	}
5302 }
5303 
5304 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5305 {
5306 	u32 max, num_rings = 1;
5307 
5308 	while (ring_size > MAX_RX_DESC_CNT) {
5309 		ring_size -= MAX_RX_DESC_CNT;
5310 		num_rings++;
5311 	}
5312 	/* round to next power of 2 */
5313 	max = max_size;
5314 	while ((max & num_rings) == 0)
5315 		max >>= 1;
5316 
5317 	if (num_rings != max)
5318 		max <<= 1;
5319 
5320 	return max;
5321 }
5322 
5323 static void
5324 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5325 {
5326 	u32 rx_size, rx_space, jumbo_size;
5327 
5328 	/* 8 for CRC and VLAN */
5329 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5330 
5331 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5332 		sizeof(struct skb_shared_info);
5333 
5334 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5335 	bp->rx_pg_ring_size = 0;
5336 	bp->rx_max_pg_ring = 0;
5337 	bp->rx_max_pg_ring_idx = 0;
5338 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5339 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5340 
5341 		jumbo_size = size * pages;
5342 		if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5343 			jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5344 
5345 		bp->rx_pg_ring_size = jumbo_size;
5346 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5347 							MAX_RX_PG_RINGS);
5348 		bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5349 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5350 		bp->rx_copy_thresh = 0;
5351 	}
5352 
5353 	bp->rx_buf_use_size = rx_size;
5354 	/* hw alignment */
5355 	bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5356 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5357 	bp->rx_ring_size = size;
5358 	bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5359 	bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5360 }
5361 
5362 static void
5363 bnx2_free_tx_skbs(struct bnx2 *bp)
5364 {
5365 	int i;
5366 
5367 	for (i = 0; i < bp->num_tx_rings; i++) {
5368 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5369 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5370 		int j;
5371 
5372 		if (txr->tx_buf_ring == NULL)
5373 			continue;
5374 
5375 		for (j = 0; j < TX_DESC_CNT; ) {
5376 			struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5377 			struct sk_buff *skb = tx_buf->skb;
5378 			int k, last;
5379 
5380 			if (skb == NULL) {
5381 				j++;
5382 				continue;
5383 			}
5384 
5385 			dma_unmap_single(&bp->pdev->dev,
5386 					 dma_unmap_addr(tx_buf, mapping),
5387 					 skb_headlen(skb),
5388 					 PCI_DMA_TODEVICE);
5389 
5390 			tx_buf->skb = NULL;
5391 
5392 			last = tx_buf->nr_frags;
5393 			j++;
5394 			for (k = 0; k < last; k++, j++) {
5395 				tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5396 				dma_unmap_page(&bp->pdev->dev,
5397 					dma_unmap_addr(tx_buf, mapping),
5398 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5399 					PCI_DMA_TODEVICE);
5400 			}
5401 			dev_kfree_skb(skb);
5402 		}
5403 	}
5404 }
5405 
5406 static void
5407 bnx2_free_rx_skbs(struct bnx2 *bp)
5408 {
5409 	int i;
5410 
5411 	for (i = 0; i < bp->num_rx_rings; i++) {
5412 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5413 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5414 		int j;
5415 
5416 		if (rxr->rx_buf_ring == NULL)
5417 			return;
5418 
5419 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5420 			struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5421 			struct sk_buff *skb = rx_buf->skb;
5422 
5423 			if (skb == NULL)
5424 				continue;
5425 
5426 			dma_unmap_single(&bp->pdev->dev,
5427 					 dma_unmap_addr(rx_buf, mapping),
5428 					 bp->rx_buf_use_size,
5429 					 PCI_DMA_FROMDEVICE);
5430 
5431 			rx_buf->skb = NULL;
5432 
5433 			dev_kfree_skb(skb);
5434 		}
5435 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5436 			bnx2_free_rx_page(bp, rxr, j);
5437 	}
5438 }
5439 
5440 static void
5441 bnx2_free_skbs(struct bnx2 *bp)
5442 {
5443 	bnx2_free_tx_skbs(bp);
5444 	bnx2_free_rx_skbs(bp);
5445 }
5446 
5447 static int
5448 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5449 {
5450 	int rc;
5451 
5452 	rc = bnx2_reset_chip(bp, reset_code);
5453 	bnx2_free_skbs(bp);
5454 	if (rc)
5455 		return rc;
5456 
5457 	if ((rc = bnx2_init_chip(bp)) != 0)
5458 		return rc;
5459 
5460 	bnx2_init_all_rings(bp);
5461 	return 0;
5462 }
5463 
5464 static int
5465 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5466 {
5467 	int rc;
5468 
5469 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5470 		return rc;
5471 
5472 	spin_lock_bh(&bp->phy_lock);
5473 	bnx2_init_phy(bp, reset_phy);
5474 	bnx2_set_link(bp);
5475 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5476 		bnx2_remote_phy_event(bp);
5477 	spin_unlock_bh(&bp->phy_lock);
5478 	return 0;
5479 }
5480 
5481 static int
5482 bnx2_shutdown_chip(struct bnx2 *bp)
5483 {
5484 	u32 reset_code;
5485 
5486 	if (bp->flags & BNX2_FLAG_NO_WOL)
5487 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5488 	else if (bp->wol)
5489 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5490 	else
5491 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5492 
5493 	return bnx2_reset_chip(bp, reset_code);
5494 }
5495 
5496 static int
5497 bnx2_test_registers(struct bnx2 *bp)
5498 {
5499 	int ret;
5500 	int i, is_5709;
5501 	static const struct {
5502 		u16   offset;
5503 		u16   flags;
5504 #define BNX2_FL_NOT_5709	1
5505 		u32   rw_mask;
5506 		u32   ro_mask;
5507 	} reg_tbl[] = {
5508 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5509 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5510 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5511 
5512 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5513 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5514 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5515 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5516 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5517 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5518 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5519 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5520 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5521 
5522 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5523 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5524 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5525 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5526 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5527 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5528 
5529 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5530 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5531 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5532 
5533 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5534 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5535 
5536 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5537 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5538 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5539 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5540 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5541 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5542 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5543 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5544 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5545 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5546 
5547 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5548 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5549 
5550 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5551 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5552 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5553 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5554 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5555 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5556 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5557 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5558 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5559 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5560 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5561 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5562 
5563 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5564 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5565 
5566 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5567 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5568 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5569 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5570 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5571 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5572 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5573 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5574 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5575 
5576 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5577 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5578 
5579 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5580 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5581 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5582 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5583 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5584 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5585 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5586 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5587 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5588 
5589 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5590 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5591 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5592 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5593 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5594 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5595 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5596 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5597 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5598 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5599 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5600 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5601 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5602 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5603 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5604 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5605 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5606 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5607 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5608 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5609 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5610 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5611 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5612 
5613 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5614 	};
5615 
5616 	ret = 0;
5617 	is_5709 = 0;
5618 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
5619 		is_5709 = 1;
5620 
5621 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5622 		u32 offset, rw_mask, ro_mask, save_val, val;
5623 		u16 flags = reg_tbl[i].flags;
5624 
5625 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5626 			continue;
5627 
5628 		offset = (u32) reg_tbl[i].offset;
5629 		rw_mask = reg_tbl[i].rw_mask;
5630 		ro_mask = reg_tbl[i].ro_mask;
5631 
5632 		save_val = readl(bp->regview + offset);
5633 
5634 		writel(0, bp->regview + offset);
5635 
5636 		val = readl(bp->regview + offset);
5637 		if ((val & rw_mask) != 0) {
5638 			goto reg_test_err;
5639 		}
5640 
5641 		if ((val & ro_mask) != (save_val & ro_mask)) {
5642 			goto reg_test_err;
5643 		}
5644 
5645 		writel(0xffffffff, bp->regview + offset);
5646 
5647 		val = readl(bp->regview + offset);
5648 		if ((val & rw_mask) != rw_mask) {
5649 			goto reg_test_err;
5650 		}
5651 
5652 		if ((val & ro_mask) != (save_val & ro_mask)) {
5653 			goto reg_test_err;
5654 		}
5655 
5656 		writel(save_val, bp->regview + offset);
5657 		continue;
5658 
5659 reg_test_err:
5660 		writel(save_val, bp->regview + offset);
5661 		ret = -ENODEV;
5662 		break;
5663 	}
5664 	return ret;
5665 }
5666 
5667 static int
5668 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5669 {
5670 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5671 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5672 	int i;
5673 
5674 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5675 		u32 offset;
5676 
5677 		for (offset = 0; offset < size; offset += 4) {
5678 
5679 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5680 
5681 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5682 				test_pattern[i]) {
5683 				return -ENODEV;
5684 			}
5685 		}
5686 	}
5687 	return 0;
5688 }
5689 
5690 static int
5691 bnx2_test_memory(struct bnx2 *bp)
5692 {
5693 	int ret = 0;
5694 	int i;
5695 	static struct mem_entry {
5696 		u32   offset;
5697 		u32   len;
5698 	} mem_tbl_5706[] = {
5699 		{ 0x60000,  0x4000 },
5700 		{ 0xa0000,  0x3000 },
5701 		{ 0xe0000,  0x4000 },
5702 		{ 0x120000, 0x4000 },
5703 		{ 0x1a0000, 0x4000 },
5704 		{ 0x160000, 0x4000 },
5705 		{ 0xffffffff, 0    },
5706 	},
5707 	mem_tbl_5709[] = {
5708 		{ 0x60000,  0x4000 },
5709 		{ 0xa0000,  0x3000 },
5710 		{ 0xe0000,  0x4000 },
5711 		{ 0x120000, 0x4000 },
5712 		{ 0x1a0000, 0x4000 },
5713 		{ 0xffffffff, 0    },
5714 	};
5715 	struct mem_entry *mem_tbl;
5716 
5717 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
5718 		mem_tbl = mem_tbl_5709;
5719 	else
5720 		mem_tbl = mem_tbl_5706;
5721 
5722 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5723 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5724 			mem_tbl[i].len)) != 0) {
5725 			return ret;
5726 		}
5727 	}
5728 
5729 	return ret;
5730 }
5731 
5732 #define BNX2_MAC_LOOPBACK	0
5733 #define BNX2_PHY_LOOPBACK	1
5734 
5735 static int
5736 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5737 {
5738 	unsigned int pkt_size, num_pkts, i;
5739 	struct sk_buff *skb, *rx_skb;
5740 	unsigned char *packet;
5741 	u16 rx_start_idx, rx_idx;
5742 	dma_addr_t map;
5743 	struct tx_bd *txbd;
5744 	struct sw_bd *rx_buf;
5745 	struct l2_fhdr *rx_hdr;
5746 	int ret = -ENODEV;
5747 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5748 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5749 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5750 
5751 	tx_napi = bnapi;
5752 
5753 	txr = &tx_napi->tx_ring;
5754 	rxr = &bnapi->rx_ring;
5755 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5756 		bp->loopback = MAC_LOOPBACK;
5757 		bnx2_set_mac_loopback(bp);
5758 	}
5759 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5760 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5761 			return 0;
5762 
5763 		bp->loopback = PHY_LOOPBACK;
5764 		bnx2_set_phy_loopback(bp);
5765 	}
5766 	else
5767 		return -EINVAL;
5768 
5769 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5770 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5771 	if (!skb)
5772 		return -ENOMEM;
5773 	packet = skb_put(skb, pkt_size);
5774 	memcpy(packet, bp->dev->dev_addr, 6);
5775 	memset(packet + 6, 0x0, 8);
5776 	for (i = 14; i < pkt_size; i++)
5777 		packet[i] = (unsigned char) (i & 0xff);
5778 
5779 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5780 			     PCI_DMA_TODEVICE);
5781 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5782 		dev_kfree_skb(skb);
5783 		return -EIO;
5784 	}
5785 
5786 	REG_WR(bp, BNX2_HC_COMMAND,
5787 	       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5788 
5789 	REG_RD(bp, BNX2_HC_COMMAND);
5790 
5791 	udelay(5);
5792 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5793 
5794 	num_pkts = 0;
5795 
5796 	txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5797 
5798 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5799 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5800 	txbd->tx_bd_mss_nbytes = pkt_size;
5801 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5802 
5803 	num_pkts++;
5804 	txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5805 	txr->tx_prod_bseq += pkt_size;
5806 
5807 	REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5808 	REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5809 
5810 	udelay(100);
5811 
5812 	REG_WR(bp, BNX2_HC_COMMAND,
5813 	       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5814 
5815 	REG_RD(bp, BNX2_HC_COMMAND);
5816 
5817 	udelay(5);
5818 
5819 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5820 	dev_kfree_skb(skb);
5821 
5822 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5823 		goto loopback_test_done;
5824 
5825 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5826 	if (rx_idx != rx_start_idx + num_pkts) {
5827 		goto loopback_test_done;
5828 	}
5829 
5830 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5831 	rx_skb = rx_buf->skb;
5832 
5833 	rx_hdr = rx_buf->desc;
5834 	skb_reserve(rx_skb, BNX2_RX_OFFSET);
5835 
5836 	dma_sync_single_for_cpu(&bp->pdev->dev,
5837 		dma_unmap_addr(rx_buf, mapping),
5838 		bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5839 
5840 	if (rx_hdr->l2_fhdr_status &
5841 		(L2_FHDR_ERRORS_BAD_CRC |
5842 		L2_FHDR_ERRORS_PHY_DECODE |
5843 		L2_FHDR_ERRORS_ALIGNMENT |
5844 		L2_FHDR_ERRORS_TOO_SHORT |
5845 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5846 
5847 		goto loopback_test_done;
5848 	}
5849 
5850 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5851 		goto loopback_test_done;
5852 	}
5853 
5854 	for (i = 14; i < pkt_size; i++) {
5855 		if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5856 			goto loopback_test_done;
5857 		}
5858 	}
5859 
5860 	ret = 0;
5861 
5862 loopback_test_done:
5863 	bp->loopback = 0;
5864 	return ret;
5865 }
5866 
5867 #define BNX2_MAC_LOOPBACK_FAILED	1
5868 #define BNX2_PHY_LOOPBACK_FAILED	2
5869 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5870 					 BNX2_PHY_LOOPBACK_FAILED)
5871 
5872 static int
5873 bnx2_test_loopback(struct bnx2 *bp)
5874 {
5875 	int rc = 0;
5876 
5877 	if (!netif_running(bp->dev))
5878 		return BNX2_LOOPBACK_FAILED;
5879 
5880 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5881 	spin_lock_bh(&bp->phy_lock);
5882 	bnx2_init_phy(bp, 1);
5883 	spin_unlock_bh(&bp->phy_lock);
5884 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5885 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5886 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5887 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5888 	return rc;
5889 }
5890 
5891 #define NVRAM_SIZE 0x200
5892 #define CRC32_RESIDUAL 0xdebb20e3
5893 
5894 static int
5895 bnx2_test_nvram(struct bnx2 *bp)
5896 {
5897 	__be32 buf[NVRAM_SIZE / 4];
5898 	u8 *data = (u8 *) buf;
5899 	int rc = 0;
5900 	u32 magic, csum;
5901 
5902 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5903 		goto test_nvram_done;
5904 
5905         magic = be32_to_cpu(buf[0]);
5906 	if (magic != 0x669955aa) {
5907 		rc = -ENODEV;
5908 		goto test_nvram_done;
5909 	}
5910 
5911 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5912 		goto test_nvram_done;
5913 
5914 	csum = ether_crc_le(0x100, data);
5915 	if (csum != CRC32_RESIDUAL) {
5916 		rc = -ENODEV;
5917 		goto test_nvram_done;
5918 	}
5919 
5920 	csum = ether_crc_le(0x100, data + 0x100);
5921 	if (csum != CRC32_RESIDUAL) {
5922 		rc = -ENODEV;
5923 	}
5924 
5925 test_nvram_done:
5926 	return rc;
5927 }
5928 
5929 static int
5930 bnx2_test_link(struct bnx2 *bp)
5931 {
5932 	u32 bmsr;
5933 
5934 	if (!netif_running(bp->dev))
5935 		return -ENODEV;
5936 
5937 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5938 		if (bp->link_up)
5939 			return 0;
5940 		return -ENODEV;
5941 	}
5942 	spin_lock_bh(&bp->phy_lock);
5943 	bnx2_enable_bmsr1(bp);
5944 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5945 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5946 	bnx2_disable_bmsr1(bp);
5947 	spin_unlock_bh(&bp->phy_lock);
5948 
5949 	if (bmsr & BMSR_LSTATUS) {
5950 		return 0;
5951 	}
5952 	return -ENODEV;
5953 }
5954 
5955 static int
5956 bnx2_test_intr(struct bnx2 *bp)
5957 {
5958 	int i;
5959 	u16 status_idx;
5960 
5961 	if (!netif_running(bp->dev))
5962 		return -ENODEV;
5963 
5964 	status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5965 
5966 	/* This register is not touched during run-time. */
5967 	REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5968 	REG_RD(bp, BNX2_HC_COMMAND);
5969 
5970 	for (i = 0; i < 10; i++) {
5971 		if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5972 			status_idx) {
5973 
5974 			break;
5975 		}
5976 
5977 		msleep_interruptible(10);
5978 	}
5979 	if (i < 10)
5980 		return 0;
5981 
5982 	return -ENODEV;
5983 }
5984 
5985 /* Determining link for parallel detection. */
5986 static int
5987 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5988 {
5989 	u32 mode_ctl, an_dbg, exp;
5990 
5991 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5992 		return 0;
5993 
5994 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5995 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5996 
5997 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5998 		return 0;
5999 
6000 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6001 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6002 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6003 
6004 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6005 		return 0;
6006 
6007 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6008 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6009 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6010 
6011 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6012 		return 0;
6013 
6014 	return 1;
6015 }
6016 
6017 static void
6018 bnx2_5706_serdes_timer(struct bnx2 *bp)
6019 {
6020 	int check_link = 1;
6021 
6022 	spin_lock(&bp->phy_lock);
6023 	if (bp->serdes_an_pending) {
6024 		bp->serdes_an_pending--;
6025 		check_link = 0;
6026 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6027 		u32 bmcr;
6028 
6029 		bp->current_interval = BNX2_TIMER_INTERVAL;
6030 
6031 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6032 
6033 		if (bmcr & BMCR_ANENABLE) {
6034 			if (bnx2_5706_serdes_has_link(bp)) {
6035 				bmcr &= ~BMCR_ANENABLE;
6036 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6037 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6038 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6039 			}
6040 		}
6041 	}
6042 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6043 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6044 		u32 phy2;
6045 
6046 		bnx2_write_phy(bp, 0x17, 0x0f01);
6047 		bnx2_read_phy(bp, 0x15, &phy2);
6048 		if (phy2 & 0x20) {
6049 			u32 bmcr;
6050 
6051 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6052 			bmcr |= BMCR_ANENABLE;
6053 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6054 
6055 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6056 		}
6057 	} else
6058 		bp->current_interval = BNX2_TIMER_INTERVAL;
6059 
6060 	if (check_link) {
6061 		u32 val;
6062 
6063 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6064 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6065 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6066 
6067 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6068 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6069 				bnx2_5706s_force_link_dn(bp, 1);
6070 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6071 			} else
6072 				bnx2_set_link(bp);
6073 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6074 			bnx2_set_link(bp);
6075 	}
6076 	spin_unlock(&bp->phy_lock);
6077 }
6078 
6079 static void
6080 bnx2_5708_serdes_timer(struct bnx2 *bp)
6081 {
6082 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6083 		return;
6084 
6085 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6086 		bp->serdes_an_pending = 0;
6087 		return;
6088 	}
6089 
6090 	spin_lock(&bp->phy_lock);
6091 	if (bp->serdes_an_pending)
6092 		bp->serdes_an_pending--;
6093 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6094 		u32 bmcr;
6095 
6096 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6097 		if (bmcr & BMCR_ANENABLE) {
6098 			bnx2_enable_forced_2g5(bp);
6099 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6100 		} else {
6101 			bnx2_disable_forced_2g5(bp);
6102 			bp->serdes_an_pending = 2;
6103 			bp->current_interval = BNX2_TIMER_INTERVAL;
6104 		}
6105 
6106 	} else
6107 		bp->current_interval = BNX2_TIMER_INTERVAL;
6108 
6109 	spin_unlock(&bp->phy_lock);
6110 }
6111 
6112 static void
6113 bnx2_timer(unsigned long data)
6114 {
6115 	struct bnx2 *bp = (struct bnx2 *) data;
6116 
6117 	if (!netif_running(bp->dev))
6118 		return;
6119 
6120 	if (atomic_read(&bp->intr_sem) != 0)
6121 		goto bnx2_restart_timer;
6122 
6123 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6124 	     BNX2_FLAG_USING_MSI)
6125 		bnx2_chk_missed_msi(bp);
6126 
6127 	bnx2_send_heart_beat(bp);
6128 
6129 	bp->stats_blk->stat_FwRxDrop =
6130 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6131 
6132 	/* workaround occasional corrupted counters */
6133 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6134 		REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6135 					    BNX2_HC_COMMAND_STATS_NOW);
6136 
6137 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6138 		if (CHIP_NUM(bp) == CHIP_NUM_5706)
6139 			bnx2_5706_serdes_timer(bp);
6140 		else
6141 			bnx2_5708_serdes_timer(bp);
6142 	}
6143 
6144 bnx2_restart_timer:
6145 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6146 }
6147 
6148 static int
6149 bnx2_request_irq(struct bnx2 *bp)
6150 {
6151 	unsigned long flags;
6152 	struct bnx2_irq *irq;
6153 	int rc = 0, i;
6154 
6155 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6156 		flags = 0;
6157 	else
6158 		flags = IRQF_SHARED;
6159 
6160 	for (i = 0; i < bp->irq_nvecs; i++) {
6161 		irq = &bp->irq_tbl[i];
6162 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6163 				 &bp->bnx2_napi[i]);
6164 		if (rc)
6165 			break;
6166 		irq->requested = 1;
6167 	}
6168 	return rc;
6169 }
6170 
6171 static void
6172 __bnx2_free_irq(struct bnx2 *bp)
6173 {
6174 	struct bnx2_irq *irq;
6175 	int i;
6176 
6177 	for (i = 0; i < bp->irq_nvecs; i++) {
6178 		irq = &bp->irq_tbl[i];
6179 		if (irq->requested)
6180 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6181 		irq->requested = 0;
6182 	}
6183 }
6184 
6185 static void
6186 bnx2_free_irq(struct bnx2 *bp)
6187 {
6188 
6189 	__bnx2_free_irq(bp);
6190 	if (bp->flags & BNX2_FLAG_USING_MSI)
6191 		pci_disable_msi(bp->pdev);
6192 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6193 		pci_disable_msix(bp->pdev);
6194 
6195 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6196 }
6197 
6198 static void
6199 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6200 {
6201 	int i, total_vecs, rc;
6202 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6203 	struct net_device *dev = bp->dev;
6204 	const int len = sizeof(bp->irq_tbl[0].name);
6205 
6206 	bnx2_setup_msix_tbl(bp);
6207 	REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6208 	REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6209 	REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6210 
6211 	/*  Need to flush the previous three writes to ensure MSI-X
6212 	 *  is setup properly */
6213 	REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6214 
6215 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6216 		msix_ent[i].entry = i;
6217 		msix_ent[i].vector = 0;
6218 	}
6219 
6220 	total_vecs = msix_vecs;
6221 #ifdef BCM_CNIC
6222 	total_vecs++;
6223 #endif
6224 	rc = -ENOSPC;
6225 	while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6226 		rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6227 		if (rc <= 0)
6228 			break;
6229 		if (rc > 0)
6230 			total_vecs = rc;
6231 	}
6232 
6233 	if (rc != 0)
6234 		return;
6235 
6236 	msix_vecs = total_vecs;
6237 #ifdef BCM_CNIC
6238 	msix_vecs--;
6239 #endif
6240 	bp->irq_nvecs = msix_vecs;
6241 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6242 	for (i = 0; i < total_vecs; i++) {
6243 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6244 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6245 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6246 	}
6247 }
6248 
6249 static int
6250 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6251 {
6252 	int cpus = num_online_cpus();
6253 	int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6254 
6255 	bp->irq_tbl[0].handler = bnx2_interrupt;
6256 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6257 	bp->irq_nvecs = 1;
6258 	bp->irq_tbl[0].vector = bp->pdev->irq;
6259 
6260 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6261 		bnx2_enable_msix(bp, msix_vecs);
6262 
6263 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6264 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6265 		if (pci_enable_msi(bp->pdev) == 0) {
6266 			bp->flags |= BNX2_FLAG_USING_MSI;
6267 			if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6268 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6269 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6270 			} else
6271 				bp->irq_tbl[0].handler = bnx2_msi;
6272 
6273 			bp->irq_tbl[0].vector = bp->pdev->irq;
6274 		}
6275 	}
6276 
6277 	bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6278 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6279 
6280 	bp->num_rx_rings = bp->irq_nvecs;
6281 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6282 }
6283 
6284 /* Called with rtnl_lock */
6285 static int
6286 bnx2_open(struct net_device *dev)
6287 {
6288 	struct bnx2 *bp = netdev_priv(dev);
6289 	int rc;
6290 
6291 	rc = bnx2_request_firmware(bp);
6292 	if (rc < 0)
6293 		goto out;
6294 
6295 	netif_carrier_off(dev);
6296 
6297 	bnx2_set_power_state(bp, PCI_D0);
6298 	bnx2_disable_int(bp);
6299 
6300 	rc = bnx2_setup_int_mode(bp, disable_msi);
6301 	if (rc)
6302 		goto open_err;
6303 	bnx2_init_napi(bp);
6304 	bnx2_napi_enable(bp);
6305 	rc = bnx2_alloc_mem(bp);
6306 	if (rc)
6307 		goto open_err;
6308 
6309 	rc = bnx2_request_irq(bp);
6310 	if (rc)
6311 		goto open_err;
6312 
6313 	rc = bnx2_init_nic(bp, 1);
6314 	if (rc)
6315 		goto open_err;
6316 
6317 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6318 
6319 	atomic_set(&bp->intr_sem, 0);
6320 
6321 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6322 
6323 	bnx2_enable_int(bp);
6324 
6325 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6326 		/* Test MSI to make sure it is working
6327 		 * If MSI test fails, go back to INTx mode
6328 		 */
6329 		if (bnx2_test_intr(bp) != 0) {
6330 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6331 
6332 			bnx2_disable_int(bp);
6333 			bnx2_free_irq(bp);
6334 
6335 			bnx2_setup_int_mode(bp, 1);
6336 
6337 			rc = bnx2_init_nic(bp, 0);
6338 
6339 			if (!rc)
6340 				rc = bnx2_request_irq(bp);
6341 
6342 			if (rc) {
6343 				del_timer_sync(&bp->timer);
6344 				goto open_err;
6345 			}
6346 			bnx2_enable_int(bp);
6347 		}
6348 	}
6349 	if (bp->flags & BNX2_FLAG_USING_MSI)
6350 		netdev_info(dev, "using MSI\n");
6351 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6352 		netdev_info(dev, "using MSIX\n");
6353 
6354 	netif_tx_start_all_queues(dev);
6355 out:
6356 	return rc;
6357 
6358 open_err:
6359 	bnx2_napi_disable(bp);
6360 	bnx2_free_skbs(bp);
6361 	bnx2_free_irq(bp);
6362 	bnx2_free_mem(bp);
6363 	bnx2_del_napi(bp);
6364 	bnx2_release_firmware(bp);
6365 	goto out;
6366 }
6367 
6368 static void
6369 bnx2_reset_task(struct work_struct *work)
6370 {
6371 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6372 	int rc;
6373 
6374 	rtnl_lock();
6375 	if (!netif_running(bp->dev)) {
6376 		rtnl_unlock();
6377 		return;
6378 	}
6379 
6380 	bnx2_netif_stop(bp, true);
6381 
6382 	rc = bnx2_init_nic(bp, 1);
6383 	if (rc) {
6384 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6385 		bnx2_napi_enable(bp);
6386 		dev_close(bp->dev);
6387 		rtnl_unlock();
6388 		return;
6389 	}
6390 
6391 	atomic_set(&bp->intr_sem, 1);
6392 	bnx2_netif_start(bp, true);
6393 	rtnl_unlock();
6394 }
6395 
6396 static void
6397 bnx2_dump_state(struct bnx2 *bp)
6398 {
6399 	struct net_device *dev = bp->dev;
6400 	u32 val1, val2;
6401 
6402 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6403 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6404 		   atomic_read(&bp->intr_sem), val1);
6405 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6406 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6407 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6408 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6409 		   REG_RD(bp, BNX2_EMAC_TX_STATUS),
6410 		   REG_RD(bp, BNX2_EMAC_RX_STATUS));
6411 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6412 		   REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6413 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6414 		   REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6415 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6416 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6417 			   REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6418 }
6419 
6420 static void
6421 bnx2_tx_timeout(struct net_device *dev)
6422 {
6423 	struct bnx2 *bp = netdev_priv(dev);
6424 
6425 	bnx2_dump_state(bp);
6426 	bnx2_dump_mcp_state(bp);
6427 
6428 	/* This allows the netif to be shutdown gracefully before resetting */
6429 	schedule_work(&bp->reset_task);
6430 }
6431 
6432 /* Called with netif_tx_lock.
6433  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6434  * netif_wake_queue().
6435  */
6436 static netdev_tx_t
6437 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6438 {
6439 	struct bnx2 *bp = netdev_priv(dev);
6440 	dma_addr_t mapping;
6441 	struct tx_bd *txbd;
6442 	struct sw_tx_bd *tx_buf;
6443 	u32 len, vlan_tag_flags, last_frag, mss;
6444 	u16 prod, ring_prod;
6445 	int i;
6446 	struct bnx2_napi *bnapi;
6447 	struct bnx2_tx_ring_info *txr;
6448 	struct netdev_queue *txq;
6449 
6450 	/*  Determine which tx ring we will be placed on */
6451 	i = skb_get_queue_mapping(skb);
6452 	bnapi = &bp->bnx2_napi[i];
6453 	txr = &bnapi->tx_ring;
6454 	txq = netdev_get_tx_queue(dev, i);
6455 
6456 	if (unlikely(bnx2_tx_avail(bp, txr) <
6457 	    (skb_shinfo(skb)->nr_frags + 1))) {
6458 		netif_tx_stop_queue(txq);
6459 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6460 
6461 		return NETDEV_TX_BUSY;
6462 	}
6463 	len = skb_headlen(skb);
6464 	prod = txr->tx_prod;
6465 	ring_prod = TX_RING_IDX(prod);
6466 
6467 	vlan_tag_flags = 0;
6468 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6469 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6470 	}
6471 
6472 	if (vlan_tx_tag_present(skb)) {
6473 		vlan_tag_flags |=
6474 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6475 	}
6476 
6477 	if ((mss = skb_shinfo(skb)->gso_size)) {
6478 		u32 tcp_opt_len;
6479 		struct iphdr *iph;
6480 
6481 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6482 
6483 		tcp_opt_len = tcp_optlen(skb);
6484 
6485 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6486 			u32 tcp_off = skb_transport_offset(skb) -
6487 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6488 
6489 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6490 					  TX_BD_FLAGS_SW_FLAGS;
6491 			if (likely(tcp_off == 0))
6492 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6493 			else {
6494 				tcp_off >>= 3;
6495 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6496 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6497 						  ((tcp_off & 0x10) <<
6498 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6499 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6500 			}
6501 		} else {
6502 			iph = ip_hdr(skb);
6503 			if (tcp_opt_len || (iph->ihl > 5)) {
6504 				vlan_tag_flags |= ((iph->ihl - 5) +
6505 						   (tcp_opt_len >> 2)) << 8;
6506 			}
6507 		}
6508 	} else
6509 		mss = 0;
6510 
6511 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6512 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6513 		dev_kfree_skb(skb);
6514 		return NETDEV_TX_OK;
6515 	}
6516 
6517 	tx_buf = &txr->tx_buf_ring[ring_prod];
6518 	tx_buf->skb = skb;
6519 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6520 
6521 	txbd = &txr->tx_desc_ring[ring_prod];
6522 
6523 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6524 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6525 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6526 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6527 
6528 	last_frag = skb_shinfo(skb)->nr_frags;
6529 	tx_buf->nr_frags = last_frag;
6530 	tx_buf->is_gso = skb_is_gso(skb);
6531 
6532 	for (i = 0; i < last_frag; i++) {
6533 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6534 
6535 		prod = NEXT_TX_BD(prod);
6536 		ring_prod = TX_RING_IDX(prod);
6537 		txbd = &txr->tx_desc_ring[ring_prod];
6538 
6539 		len = skb_frag_size(frag);
6540 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6541 					   DMA_TO_DEVICE);
6542 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6543 			goto dma_error;
6544 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6545 				   mapping);
6546 
6547 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6548 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6549 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6550 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6551 
6552 	}
6553 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6554 
6555 	prod = NEXT_TX_BD(prod);
6556 	txr->tx_prod_bseq += skb->len;
6557 
6558 	REG_WR16(bp, txr->tx_bidx_addr, prod);
6559 	REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6560 
6561 	mmiowb();
6562 
6563 	txr->tx_prod = prod;
6564 
6565 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6566 		netif_tx_stop_queue(txq);
6567 
6568 		/* netif_tx_stop_queue() must be done before checking
6569 		 * tx index in bnx2_tx_avail() below, because in
6570 		 * bnx2_tx_int(), we update tx index before checking for
6571 		 * netif_tx_queue_stopped().
6572 		 */
6573 		smp_mb();
6574 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6575 			netif_tx_wake_queue(txq);
6576 	}
6577 
6578 	return NETDEV_TX_OK;
6579 dma_error:
6580 	/* save value of frag that failed */
6581 	last_frag = i;
6582 
6583 	/* start back at beginning and unmap skb */
6584 	prod = txr->tx_prod;
6585 	ring_prod = TX_RING_IDX(prod);
6586 	tx_buf = &txr->tx_buf_ring[ring_prod];
6587 	tx_buf->skb = NULL;
6588 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6589 			 skb_headlen(skb), PCI_DMA_TODEVICE);
6590 
6591 	/* unmap remaining mapped pages */
6592 	for (i = 0; i < last_frag; i++) {
6593 		prod = NEXT_TX_BD(prod);
6594 		ring_prod = TX_RING_IDX(prod);
6595 		tx_buf = &txr->tx_buf_ring[ring_prod];
6596 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6597 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6598 			       PCI_DMA_TODEVICE);
6599 	}
6600 
6601 	dev_kfree_skb(skb);
6602 	return NETDEV_TX_OK;
6603 }
6604 
6605 /* Called with rtnl_lock */
6606 static int
6607 bnx2_close(struct net_device *dev)
6608 {
6609 	struct bnx2 *bp = netdev_priv(dev);
6610 
6611 	bnx2_disable_int_sync(bp);
6612 	bnx2_napi_disable(bp);
6613 	del_timer_sync(&bp->timer);
6614 	bnx2_shutdown_chip(bp);
6615 	bnx2_free_irq(bp);
6616 	bnx2_free_skbs(bp);
6617 	bnx2_free_mem(bp);
6618 	bnx2_del_napi(bp);
6619 	bp->link_up = 0;
6620 	netif_carrier_off(bp->dev);
6621 	bnx2_set_power_state(bp, PCI_D3hot);
6622 	return 0;
6623 }
6624 
6625 static void
6626 bnx2_save_stats(struct bnx2 *bp)
6627 {
6628 	u32 *hw_stats = (u32 *) bp->stats_blk;
6629 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6630 	int i;
6631 
6632 	/* The 1st 10 counters are 64-bit counters */
6633 	for (i = 0; i < 20; i += 2) {
6634 		u32 hi;
6635 		u64 lo;
6636 
6637 		hi = temp_stats[i] + hw_stats[i];
6638 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6639 		if (lo > 0xffffffff)
6640 			hi++;
6641 		temp_stats[i] = hi;
6642 		temp_stats[i + 1] = lo & 0xffffffff;
6643 	}
6644 
6645 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6646 		temp_stats[i] += hw_stats[i];
6647 }
6648 
6649 #define GET_64BIT_NET_STATS64(ctr)		\
6650 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6651 
6652 #define GET_64BIT_NET_STATS(ctr)				\
6653 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6654 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6655 
6656 #define GET_32BIT_NET_STATS(ctr)				\
6657 	(unsigned long) (bp->stats_blk->ctr +			\
6658 			 bp->temp_stats_blk->ctr)
6659 
6660 static struct rtnl_link_stats64 *
6661 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6662 {
6663 	struct bnx2 *bp = netdev_priv(dev);
6664 
6665 	if (bp->stats_blk == NULL)
6666 		return net_stats;
6667 
6668 	net_stats->rx_packets =
6669 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6670 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6671 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6672 
6673 	net_stats->tx_packets =
6674 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6675 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6676 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6677 
6678 	net_stats->rx_bytes =
6679 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6680 
6681 	net_stats->tx_bytes =
6682 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6683 
6684 	net_stats->multicast =
6685 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6686 
6687 	net_stats->collisions =
6688 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6689 
6690 	net_stats->rx_length_errors =
6691 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6692 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6693 
6694 	net_stats->rx_over_errors =
6695 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6696 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6697 
6698 	net_stats->rx_frame_errors =
6699 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6700 
6701 	net_stats->rx_crc_errors =
6702 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6703 
6704 	net_stats->rx_errors = net_stats->rx_length_errors +
6705 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6706 		net_stats->rx_crc_errors;
6707 
6708 	net_stats->tx_aborted_errors =
6709 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6710 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6711 
6712 	if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6713 	    (CHIP_ID(bp) == CHIP_ID_5708_A0))
6714 		net_stats->tx_carrier_errors = 0;
6715 	else {
6716 		net_stats->tx_carrier_errors =
6717 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6718 	}
6719 
6720 	net_stats->tx_errors =
6721 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6722 		net_stats->tx_aborted_errors +
6723 		net_stats->tx_carrier_errors;
6724 
6725 	net_stats->rx_missed_errors =
6726 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6727 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6728 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6729 
6730 	return net_stats;
6731 }
6732 
6733 /* All ethtool functions called with rtnl_lock */
6734 
6735 static int
6736 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6737 {
6738 	struct bnx2 *bp = netdev_priv(dev);
6739 	int support_serdes = 0, support_copper = 0;
6740 
6741 	cmd->supported = SUPPORTED_Autoneg;
6742 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6743 		support_serdes = 1;
6744 		support_copper = 1;
6745 	} else if (bp->phy_port == PORT_FIBRE)
6746 		support_serdes = 1;
6747 	else
6748 		support_copper = 1;
6749 
6750 	if (support_serdes) {
6751 		cmd->supported |= SUPPORTED_1000baseT_Full |
6752 			SUPPORTED_FIBRE;
6753 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6754 			cmd->supported |= SUPPORTED_2500baseX_Full;
6755 
6756 	}
6757 	if (support_copper) {
6758 		cmd->supported |= SUPPORTED_10baseT_Half |
6759 			SUPPORTED_10baseT_Full |
6760 			SUPPORTED_100baseT_Half |
6761 			SUPPORTED_100baseT_Full |
6762 			SUPPORTED_1000baseT_Full |
6763 			SUPPORTED_TP;
6764 
6765 	}
6766 
6767 	spin_lock_bh(&bp->phy_lock);
6768 	cmd->port = bp->phy_port;
6769 	cmd->advertising = bp->advertising;
6770 
6771 	if (bp->autoneg & AUTONEG_SPEED) {
6772 		cmd->autoneg = AUTONEG_ENABLE;
6773 	} else {
6774 		cmd->autoneg = AUTONEG_DISABLE;
6775 	}
6776 
6777 	if (netif_carrier_ok(dev)) {
6778 		ethtool_cmd_speed_set(cmd, bp->line_speed);
6779 		cmd->duplex = bp->duplex;
6780 	}
6781 	else {
6782 		ethtool_cmd_speed_set(cmd, -1);
6783 		cmd->duplex = -1;
6784 	}
6785 	spin_unlock_bh(&bp->phy_lock);
6786 
6787 	cmd->transceiver = XCVR_INTERNAL;
6788 	cmd->phy_address = bp->phy_addr;
6789 
6790 	return 0;
6791 }
6792 
6793 static int
6794 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6795 {
6796 	struct bnx2 *bp = netdev_priv(dev);
6797 	u8 autoneg = bp->autoneg;
6798 	u8 req_duplex = bp->req_duplex;
6799 	u16 req_line_speed = bp->req_line_speed;
6800 	u32 advertising = bp->advertising;
6801 	int err = -EINVAL;
6802 
6803 	spin_lock_bh(&bp->phy_lock);
6804 
6805 	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6806 		goto err_out_unlock;
6807 
6808 	if (cmd->port != bp->phy_port &&
6809 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6810 		goto err_out_unlock;
6811 
6812 	/* If device is down, we can store the settings only if the user
6813 	 * is setting the currently active port.
6814 	 */
6815 	if (!netif_running(dev) && cmd->port != bp->phy_port)
6816 		goto err_out_unlock;
6817 
6818 	if (cmd->autoneg == AUTONEG_ENABLE) {
6819 		autoneg |= AUTONEG_SPEED;
6820 
6821 		advertising = cmd->advertising;
6822 		if (cmd->port == PORT_TP) {
6823 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6824 			if (!advertising)
6825 				advertising = ETHTOOL_ALL_COPPER_SPEED;
6826 		} else {
6827 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6828 			if (!advertising)
6829 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6830 		}
6831 		advertising |= ADVERTISED_Autoneg;
6832 	}
6833 	else {
6834 		u32 speed = ethtool_cmd_speed(cmd);
6835 		if (cmd->port == PORT_FIBRE) {
6836 			if ((speed != SPEED_1000 &&
6837 			     speed != SPEED_2500) ||
6838 			    (cmd->duplex != DUPLEX_FULL))
6839 				goto err_out_unlock;
6840 
6841 			if (speed == SPEED_2500 &&
6842 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6843 				goto err_out_unlock;
6844 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
6845 			goto err_out_unlock;
6846 
6847 		autoneg &= ~AUTONEG_SPEED;
6848 		req_line_speed = speed;
6849 		req_duplex = cmd->duplex;
6850 		advertising = 0;
6851 	}
6852 
6853 	bp->autoneg = autoneg;
6854 	bp->advertising = advertising;
6855 	bp->req_line_speed = req_line_speed;
6856 	bp->req_duplex = req_duplex;
6857 
6858 	err = 0;
6859 	/* If device is down, the new settings will be picked up when it is
6860 	 * brought up.
6861 	 */
6862 	if (netif_running(dev))
6863 		err = bnx2_setup_phy(bp, cmd->port);
6864 
6865 err_out_unlock:
6866 	spin_unlock_bh(&bp->phy_lock);
6867 
6868 	return err;
6869 }
6870 
6871 static void
6872 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6873 {
6874 	struct bnx2 *bp = netdev_priv(dev);
6875 
6876 	strcpy(info->driver, DRV_MODULE_NAME);
6877 	strcpy(info->version, DRV_MODULE_VERSION);
6878 	strcpy(info->bus_info, pci_name(bp->pdev));
6879 	strcpy(info->fw_version, bp->fw_version);
6880 }
6881 
6882 #define BNX2_REGDUMP_LEN		(32 * 1024)
6883 
6884 static int
6885 bnx2_get_regs_len(struct net_device *dev)
6886 {
6887 	return BNX2_REGDUMP_LEN;
6888 }
6889 
6890 static void
6891 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6892 {
6893 	u32 *p = _p, i, offset;
6894 	u8 *orig_p = _p;
6895 	struct bnx2 *bp = netdev_priv(dev);
6896 	static const u32 reg_boundaries[] = {
6897 		0x0000, 0x0098, 0x0400, 0x045c,
6898 		0x0800, 0x0880, 0x0c00, 0x0c10,
6899 		0x0c30, 0x0d08, 0x1000, 0x101c,
6900 		0x1040, 0x1048, 0x1080, 0x10a4,
6901 		0x1400, 0x1490, 0x1498, 0x14f0,
6902 		0x1500, 0x155c, 0x1580, 0x15dc,
6903 		0x1600, 0x1658, 0x1680, 0x16d8,
6904 		0x1800, 0x1820, 0x1840, 0x1854,
6905 		0x1880, 0x1894, 0x1900, 0x1984,
6906 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6907 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
6908 		0x2000, 0x2030, 0x23c0, 0x2400,
6909 		0x2800, 0x2820, 0x2830, 0x2850,
6910 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
6911 		0x3c00, 0x3c94, 0x4000, 0x4010,
6912 		0x4080, 0x4090, 0x43c0, 0x4458,
6913 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
6914 		0x4fc0, 0x5010, 0x53c0, 0x5444,
6915 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
6916 		0x5fc0, 0x6000, 0x6400, 0x6428,
6917 		0x6800, 0x6848, 0x684c, 0x6860,
6918 		0x6888, 0x6910, 0x8000
6919 	};
6920 
6921 	regs->version = 0;
6922 
6923 	memset(p, 0, BNX2_REGDUMP_LEN);
6924 
6925 	if (!netif_running(bp->dev))
6926 		return;
6927 
6928 	i = 0;
6929 	offset = reg_boundaries[0];
6930 	p += offset;
6931 	while (offset < BNX2_REGDUMP_LEN) {
6932 		*p++ = REG_RD(bp, offset);
6933 		offset += 4;
6934 		if (offset == reg_boundaries[i + 1]) {
6935 			offset = reg_boundaries[i + 2];
6936 			p = (u32 *) (orig_p + offset);
6937 			i += 2;
6938 		}
6939 	}
6940 }
6941 
6942 static void
6943 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6944 {
6945 	struct bnx2 *bp = netdev_priv(dev);
6946 
6947 	if (bp->flags & BNX2_FLAG_NO_WOL) {
6948 		wol->supported = 0;
6949 		wol->wolopts = 0;
6950 	}
6951 	else {
6952 		wol->supported = WAKE_MAGIC;
6953 		if (bp->wol)
6954 			wol->wolopts = WAKE_MAGIC;
6955 		else
6956 			wol->wolopts = 0;
6957 	}
6958 	memset(&wol->sopass, 0, sizeof(wol->sopass));
6959 }
6960 
6961 static int
6962 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6963 {
6964 	struct bnx2 *bp = netdev_priv(dev);
6965 
6966 	if (wol->wolopts & ~WAKE_MAGIC)
6967 		return -EINVAL;
6968 
6969 	if (wol->wolopts & WAKE_MAGIC) {
6970 		if (bp->flags & BNX2_FLAG_NO_WOL)
6971 			return -EINVAL;
6972 
6973 		bp->wol = 1;
6974 	}
6975 	else {
6976 		bp->wol = 0;
6977 	}
6978 	return 0;
6979 }
6980 
6981 static int
6982 bnx2_nway_reset(struct net_device *dev)
6983 {
6984 	struct bnx2 *bp = netdev_priv(dev);
6985 	u32 bmcr;
6986 
6987 	if (!netif_running(dev))
6988 		return -EAGAIN;
6989 
6990 	if (!(bp->autoneg & AUTONEG_SPEED)) {
6991 		return -EINVAL;
6992 	}
6993 
6994 	spin_lock_bh(&bp->phy_lock);
6995 
6996 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6997 		int rc;
6998 
6999 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7000 		spin_unlock_bh(&bp->phy_lock);
7001 		return rc;
7002 	}
7003 
7004 	/* Force a link down visible on the other side */
7005 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7006 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7007 		spin_unlock_bh(&bp->phy_lock);
7008 
7009 		msleep(20);
7010 
7011 		spin_lock_bh(&bp->phy_lock);
7012 
7013 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7014 		bp->serdes_an_pending = 1;
7015 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7016 	}
7017 
7018 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7019 	bmcr &= ~BMCR_LOOPBACK;
7020 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7021 
7022 	spin_unlock_bh(&bp->phy_lock);
7023 
7024 	return 0;
7025 }
7026 
7027 static u32
7028 bnx2_get_link(struct net_device *dev)
7029 {
7030 	struct bnx2 *bp = netdev_priv(dev);
7031 
7032 	return bp->link_up;
7033 }
7034 
7035 static int
7036 bnx2_get_eeprom_len(struct net_device *dev)
7037 {
7038 	struct bnx2 *bp = netdev_priv(dev);
7039 
7040 	if (bp->flash_info == NULL)
7041 		return 0;
7042 
7043 	return (int) bp->flash_size;
7044 }
7045 
7046 static int
7047 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7048 		u8 *eebuf)
7049 {
7050 	struct bnx2 *bp = netdev_priv(dev);
7051 	int rc;
7052 
7053 	if (!netif_running(dev))
7054 		return -EAGAIN;
7055 
7056 	/* parameters already validated in ethtool_get_eeprom */
7057 
7058 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7059 
7060 	return rc;
7061 }
7062 
7063 static int
7064 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7065 		u8 *eebuf)
7066 {
7067 	struct bnx2 *bp = netdev_priv(dev);
7068 	int rc;
7069 
7070 	if (!netif_running(dev))
7071 		return -EAGAIN;
7072 
7073 	/* parameters already validated in ethtool_set_eeprom */
7074 
7075 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7076 
7077 	return rc;
7078 }
7079 
7080 static int
7081 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7082 {
7083 	struct bnx2 *bp = netdev_priv(dev);
7084 
7085 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7086 
7087 	coal->rx_coalesce_usecs = bp->rx_ticks;
7088 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7089 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7090 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7091 
7092 	coal->tx_coalesce_usecs = bp->tx_ticks;
7093 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7094 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7095 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7096 
7097 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7098 
7099 	return 0;
7100 }
7101 
7102 static int
7103 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7104 {
7105 	struct bnx2 *bp = netdev_priv(dev);
7106 
7107 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7108 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7109 
7110 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7111 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7112 
7113 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7114 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7115 
7116 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7117 	if (bp->rx_quick_cons_trip_int > 0xff)
7118 		bp->rx_quick_cons_trip_int = 0xff;
7119 
7120 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7121 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7122 
7123 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7124 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7125 
7126 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7127 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7128 
7129 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7130 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7131 		0xff;
7132 
7133 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7134 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7135 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7136 			bp->stats_ticks = USEC_PER_SEC;
7137 	}
7138 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7139 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7140 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7141 
7142 	if (netif_running(bp->dev)) {
7143 		bnx2_netif_stop(bp, true);
7144 		bnx2_init_nic(bp, 0);
7145 		bnx2_netif_start(bp, true);
7146 	}
7147 
7148 	return 0;
7149 }
7150 
7151 static void
7152 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7153 {
7154 	struct bnx2 *bp = netdev_priv(dev);
7155 
7156 	ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7157 	ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7158 
7159 	ering->rx_pending = bp->rx_ring_size;
7160 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7161 
7162 	ering->tx_max_pending = MAX_TX_DESC_CNT;
7163 	ering->tx_pending = bp->tx_ring_size;
7164 }
7165 
7166 static int
7167 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7168 {
7169 	if (netif_running(bp->dev)) {
7170 		/* Reset will erase chipset stats; save them */
7171 		bnx2_save_stats(bp);
7172 
7173 		bnx2_netif_stop(bp, true);
7174 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7175 		__bnx2_free_irq(bp);
7176 		bnx2_free_skbs(bp);
7177 		bnx2_free_mem(bp);
7178 	}
7179 
7180 	bnx2_set_rx_ring_size(bp, rx);
7181 	bp->tx_ring_size = tx;
7182 
7183 	if (netif_running(bp->dev)) {
7184 		int rc;
7185 
7186 		rc = bnx2_alloc_mem(bp);
7187 		if (!rc)
7188 			rc = bnx2_request_irq(bp);
7189 
7190 		if (!rc)
7191 			rc = bnx2_init_nic(bp, 0);
7192 
7193 		if (rc) {
7194 			bnx2_napi_enable(bp);
7195 			dev_close(bp->dev);
7196 			return rc;
7197 		}
7198 #ifdef BCM_CNIC
7199 		mutex_lock(&bp->cnic_lock);
7200 		/* Let cnic know about the new status block. */
7201 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7202 			bnx2_setup_cnic_irq_info(bp);
7203 		mutex_unlock(&bp->cnic_lock);
7204 #endif
7205 		bnx2_netif_start(bp, true);
7206 	}
7207 	return 0;
7208 }
7209 
7210 static int
7211 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7212 {
7213 	struct bnx2 *bp = netdev_priv(dev);
7214 	int rc;
7215 
7216 	if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7217 		(ering->tx_pending > MAX_TX_DESC_CNT) ||
7218 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7219 
7220 		return -EINVAL;
7221 	}
7222 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7223 	return rc;
7224 }
7225 
7226 static void
7227 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7228 {
7229 	struct bnx2 *bp = netdev_priv(dev);
7230 
7231 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7232 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7233 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7234 }
7235 
7236 static int
7237 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7238 {
7239 	struct bnx2 *bp = netdev_priv(dev);
7240 
7241 	bp->req_flow_ctrl = 0;
7242 	if (epause->rx_pause)
7243 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7244 	if (epause->tx_pause)
7245 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7246 
7247 	if (epause->autoneg) {
7248 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7249 	}
7250 	else {
7251 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7252 	}
7253 
7254 	if (netif_running(dev)) {
7255 		spin_lock_bh(&bp->phy_lock);
7256 		bnx2_setup_phy(bp, bp->phy_port);
7257 		spin_unlock_bh(&bp->phy_lock);
7258 	}
7259 
7260 	return 0;
7261 }
7262 
7263 static struct {
7264 	char string[ETH_GSTRING_LEN];
7265 } bnx2_stats_str_arr[] = {
7266 	{ "rx_bytes" },
7267 	{ "rx_error_bytes" },
7268 	{ "tx_bytes" },
7269 	{ "tx_error_bytes" },
7270 	{ "rx_ucast_packets" },
7271 	{ "rx_mcast_packets" },
7272 	{ "rx_bcast_packets" },
7273 	{ "tx_ucast_packets" },
7274 	{ "tx_mcast_packets" },
7275 	{ "tx_bcast_packets" },
7276 	{ "tx_mac_errors" },
7277 	{ "tx_carrier_errors" },
7278 	{ "rx_crc_errors" },
7279 	{ "rx_align_errors" },
7280 	{ "tx_single_collisions" },
7281 	{ "tx_multi_collisions" },
7282 	{ "tx_deferred" },
7283 	{ "tx_excess_collisions" },
7284 	{ "tx_late_collisions" },
7285 	{ "tx_total_collisions" },
7286 	{ "rx_fragments" },
7287 	{ "rx_jabbers" },
7288 	{ "rx_undersize_packets" },
7289 	{ "rx_oversize_packets" },
7290 	{ "rx_64_byte_packets" },
7291 	{ "rx_65_to_127_byte_packets" },
7292 	{ "rx_128_to_255_byte_packets" },
7293 	{ "rx_256_to_511_byte_packets" },
7294 	{ "rx_512_to_1023_byte_packets" },
7295 	{ "rx_1024_to_1522_byte_packets" },
7296 	{ "rx_1523_to_9022_byte_packets" },
7297 	{ "tx_64_byte_packets" },
7298 	{ "tx_65_to_127_byte_packets" },
7299 	{ "tx_128_to_255_byte_packets" },
7300 	{ "tx_256_to_511_byte_packets" },
7301 	{ "tx_512_to_1023_byte_packets" },
7302 	{ "tx_1024_to_1522_byte_packets" },
7303 	{ "tx_1523_to_9022_byte_packets" },
7304 	{ "rx_xon_frames" },
7305 	{ "rx_xoff_frames" },
7306 	{ "tx_xon_frames" },
7307 	{ "tx_xoff_frames" },
7308 	{ "rx_mac_ctrl_frames" },
7309 	{ "rx_filtered_packets" },
7310 	{ "rx_ftq_discards" },
7311 	{ "rx_discards" },
7312 	{ "rx_fw_discards" },
7313 };
7314 
7315 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7316 			sizeof(bnx2_stats_str_arr[0]))
7317 
7318 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7319 
7320 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7321     STATS_OFFSET32(stat_IfHCInOctets_hi),
7322     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7323     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7324     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7325     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7326     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7327     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7328     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7329     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7330     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7331     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7332     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7333     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7334     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7335     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7336     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7337     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7338     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7339     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7340     STATS_OFFSET32(stat_EtherStatsCollisions),
7341     STATS_OFFSET32(stat_EtherStatsFragments),
7342     STATS_OFFSET32(stat_EtherStatsJabbers),
7343     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7344     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7345     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7346     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7347     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7348     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7349     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7350     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7351     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7352     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7353     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7354     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7355     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7356     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7357     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7358     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7359     STATS_OFFSET32(stat_XonPauseFramesReceived),
7360     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7361     STATS_OFFSET32(stat_OutXonSent),
7362     STATS_OFFSET32(stat_OutXoffSent),
7363     STATS_OFFSET32(stat_MacControlFramesReceived),
7364     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7365     STATS_OFFSET32(stat_IfInFTQDiscards),
7366     STATS_OFFSET32(stat_IfInMBUFDiscards),
7367     STATS_OFFSET32(stat_FwRxDrop),
7368 };
7369 
7370 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7371  * skipped because of errata.
7372  */
7373 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7374 	8,0,8,8,8,8,8,8,8,8,
7375 	4,0,4,4,4,4,4,4,4,4,
7376 	4,4,4,4,4,4,4,4,4,4,
7377 	4,4,4,4,4,4,4,4,4,4,
7378 	4,4,4,4,4,4,4,
7379 };
7380 
7381 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7382 	8,0,8,8,8,8,8,8,8,8,
7383 	4,4,4,4,4,4,4,4,4,4,
7384 	4,4,4,4,4,4,4,4,4,4,
7385 	4,4,4,4,4,4,4,4,4,4,
7386 	4,4,4,4,4,4,4,
7387 };
7388 
7389 #define BNX2_NUM_TESTS 6
7390 
7391 static struct {
7392 	char string[ETH_GSTRING_LEN];
7393 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7394 	{ "register_test (offline)" },
7395 	{ "memory_test (offline)" },
7396 	{ "loopback_test (offline)" },
7397 	{ "nvram_test (online)" },
7398 	{ "interrupt_test (online)" },
7399 	{ "link_test (online)" },
7400 };
7401 
7402 static int
7403 bnx2_get_sset_count(struct net_device *dev, int sset)
7404 {
7405 	switch (sset) {
7406 	case ETH_SS_TEST:
7407 		return BNX2_NUM_TESTS;
7408 	case ETH_SS_STATS:
7409 		return BNX2_NUM_STATS;
7410 	default:
7411 		return -EOPNOTSUPP;
7412 	}
7413 }
7414 
7415 static void
7416 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7417 {
7418 	struct bnx2 *bp = netdev_priv(dev);
7419 
7420 	bnx2_set_power_state(bp, PCI_D0);
7421 
7422 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7423 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7424 		int i;
7425 
7426 		bnx2_netif_stop(bp, true);
7427 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7428 		bnx2_free_skbs(bp);
7429 
7430 		if (bnx2_test_registers(bp) != 0) {
7431 			buf[0] = 1;
7432 			etest->flags |= ETH_TEST_FL_FAILED;
7433 		}
7434 		if (bnx2_test_memory(bp) != 0) {
7435 			buf[1] = 1;
7436 			etest->flags |= ETH_TEST_FL_FAILED;
7437 		}
7438 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7439 			etest->flags |= ETH_TEST_FL_FAILED;
7440 
7441 		if (!netif_running(bp->dev))
7442 			bnx2_shutdown_chip(bp);
7443 		else {
7444 			bnx2_init_nic(bp, 1);
7445 			bnx2_netif_start(bp, true);
7446 		}
7447 
7448 		/* wait for link up */
7449 		for (i = 0; i < 7; i++) {
7450 			if (bp->link_up)
7451 				break;
7452 			msleep_interruptible(1000);
7453 		}
7454 	}
7455 
7456 	if (bnx2_test_nvram(bp) != 0) {
7457 		buf[3] = 1;
7458 		etest->flags |= ETH_TEST_FL_FAILED;
7459 	}
7460 	if (bnx2_test_intr(bp) != 0) {
7461 		buf[4] = 1;
7462 		etest->flags |= ETH_TEST_FL_FAILED;
7463 	}
7464 
7465 	if (bnx2_test_link(bp) != 0) {
7466 		buf[5] = 1;
7467 		etest->flags |= ETH_TEST_FL_FAILED;
7468 
7469 	}
7470 	if (!netif_running(bp->dev))
7471 		bnx2_set_power_state(bp, PCI_D3hot);
7472 }
7473 
7474 static void
7475 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7476 {
7477 	switch (stringset) {
7478 	case ETH_SS_STATS:
7479 		memcpy(buf, bnx2_stats_str_arr,
7480 			sizeof(bnx2_stats_str_arr));
7481 		break;
7482 	case ETH_SS_TEST:
7483 		memcpy(buf, bnx2_tests_str_arr,
7484 			sizeof(bnx2_tests_str_arr));
7485 		break;
7486 	}
7487 }
7488 
7489 static void
7490 bnx2_get_ethtool_stats(struct net_device *dev,
7491 		struct ethtool_stats *stats, u64 *buf)
7492 {
7493 	struct bnx2 *bp = netdev_priv(dev);
7494 	int i;
7495 	u32 *hw_stats = (u32 *) bp->stats_blk;
7496 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7497 	u8 *stats_len_arr = NULL;
7498 
7499 	if (hw_stats == NULL) {
7500 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7501 		return;
7502 	}
7503 
7504 	if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7505 	    (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7506 	    (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7507 	    (CHIP_ID(bp) == CHIP_ID_5708_A0))
7508 		stats_len_arr = bnx2_5706_stats_len_arr;
7509 	else
7510 		stats_len_arr = bnx2_5708_stats_len_arr;
7511 
7512 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7513 		unsigned long offset;
7514 
7515 		if (stats_len_arr[i] == 0) {
7516 			/* skip this counter */
7517 			buf[i] = 0;
7518 			continue;
7519 		}
7520 
7521 		offset = bnx2_stats_offset_arr[i];
7522 		if (stats_len_arr[i] == 4) {
7523 			/* 4-byte counter */
7524 			buf[i] = (u64) *(hw_stats + offset) +
7525 				 *(temp_stats + offset);
7526 			continue;
7527 		}
7528 		/* 8-byte counter */
7529 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7530 			 *(hw_stats + offset + 1) +
7531 			 (((u64) *(temp_stats + offset)) << 32) +
7532 			 *(temp_stats + offset + 1);
7533 	}
7534 }
7535 
7536 static int
7537 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7538 {
7539 	struct bnx2 *bp = netdev_priv(dev);
7540 
7541 	switch (state) {
7542 	case ETHTOOL_ID_ACTIVE:
7543 		bnx2_set_power_state(bp, PCI_D0);
7544 
7545 		bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
7546 		REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7547 		return 1;	/* cycle on/off once per second */
7548 
7549 	case ETHTOOL_ID_ON:
7550 		REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7551 		       BNX2_EMAC_LED_1000MB_OVERRIDE |
7552 		       BNX2_EMAC_LED_100MB_OVERRIDE |
7553 		       BNX2_EMAC_LED_10MB_OVERRIDE |
7554 		       BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7555 		       BNX2_EMAC_LED_TRAFFIC);
7556 		break;
7557 
7558 	case ETHTOOL_ID_OFF:
7559 		REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7560 		break;
7561 
7562 	case ETHTOOL_ID_INACTIVE:
7563 		REG_WR(bp, BNX2_EMAC_LED, 0);
7564 		REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7565 
7566 		if (!netif_running(dev))
7567 			bnx2_set_power_state(bp, PCI_D3hot);
7568 		break;
7569 	}
7570 
7571 	return 0;
7572 }
7573 
7574 static u32
7575 bnx2_fix_features(struct net_device *dev, u32 features)
7576 {
7577 	struct bnx2 *bp = netdev_priv(dev);
7578 
7579 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7580 		features |= NETIF_F_HW_VLAN_RX;
7581 
7582 	return features;
7583 }
7584 
7585 static int
7586 bnx2_set_features(struct net_device *dev, u32 features)
7587 {
7588 	struct bnx2 *bp = netdev_priv(dev);
7589 
7590 	/* TSO with VLAN tag won't work with current firmware */
7591 	if (features & NETIF_F_HW_VLAN_TX)
7592 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7593 	else
7594 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7595 
7596 	if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7597 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7598 	    netif_running(dev)) {
7599 		bnx2_netif_stop(bp, false);
7600 		dev->features = features;
7601 		bnx2_set_rx_mode(dev);
7602 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7603 		bnx2_netif_start(bp, false);
7604 		return 1;
7605 	}
7606 
7607 	return 0;
7608 }
7609 
7610 static const struct ethtool_ops bnx2_ethtool_ops = {
7611 	.get_settings		= bnx2_get_settings,
7612 	.set_settings		= bnx2_set_settings,
7613 	.get_drvinfo		= bnx2_get_drvinfo,
7614 	.get_regs_len		= bnx2_get_regs_len,
7615 	.get_regs		= bnx2_get_regs,
7616 	.get_wol		= bnx2_get_wol,
7617 	.set_wol		= bnx2_set_wol,
7618 	.nway_reset		= bnx2_nway_reset,
7619 	.get_link		= bnx2_get_link,
7620 	.get_eeprom_len		= bnx2_get_eeprom_len,
7621 	.get_eeprom		= bnx2_get_eeprom,
7622 	.set_eeprom		= bnx2_set_eeprom,
7623 	.get_coalesce		= bnx2_get_coalesce,
7624 	.set_coalesce		= bnx2_set_coalesce,
7625 	.get_ringparam		= bnx2_get_ringparam,
7626 	.set_ringparam		= bnx2_set_ringparam,
7627 	.get_pauseparam		= bnx2_get_pauseparam,
7628 	.set_pauseparam		= bnx2_set_pauseparam,
7629 	.self_test		= bnx2_self_test,
7630 	.get_strings		= bnx2_get_strings,
7631 	.set_phys_id		= bnx2_set_phys_id,
7632 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7633 	.get_sset_count		= bnx2_get_sset_count,
7634 };
7635 
7636 /* Called with rtnl_lock */
7637 static int
7638 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7639 {
7640 	struct mii_ioctl_data *data = if_mii(ifr);
7641 	struct bnx2 *bp = netdev_priv(dev);
7642 	int err;
7643 
7644 	switch(cmd) {
7645 	case SIOCGMIIPHY:
7646 		data->phy_id = bp->phy_addr;
7647 
7648 		/* fallthru */
7649 	case SIOCGMIIREG: {
7650 		u32 mii_regval;
7651 
7652 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7653 			return -EOPNOTSUPP;
7654 
7655 		if (!netif_running(dev))
7656 			return -EAGAIN;
7657 
7658 		spin_lock_bh(&bp->phy_lock);
7659 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7660 		spin_unlock_bh(&bp->phy_lock);
7661 
7662 		data->val_out = mii_regval;
7663 
7664 		return err;
7665 	}
7666 
7667 	case SIOCSMIIREG:
7668 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7669 			return -EOPNOTSUPP;
7670 
7671 		if (!netif_running(dev))
7672 			return -EAGAIN;
7673 
7674 		spin_lock_bh(&bp->phy_lock);
7675 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7676 		spin_unlock_bh(&bp->phy_lock);
7677 
7678 		return err;
7679 
7680 	default:
7681 		/* do nothing */
7682 		break;
7683 	}
7684 	return -EOPNOTSUPP;
7685 }
7686 
7687 /* Called with rtnl_lock */
7688 static int
7689 bnx2_change_mac_addr(struct net_device *dev, void *p)
7690 {
7691 	struct sockaddr *addr = p;
7692 	struct bnx2 *bp = netdev_priv(dev);
7693 
7694 	if (!is_valid_ether_addr(addr->sa_data))
7695 		return -EINVAL;
7696 
7697 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7698 	if (netif_running(dev))
7699 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7700 
7701 	return 0;
7702 }
7703 
7704 /* Called with rtnl_lock */
7705 static int
7706 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7707 {
7708 	struct bnx2 *bp = netdev_priv(dev);
7709 
7710 	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7711 		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7712 		return -EINVAL;
7713 
7714 	dev->mtu = new_mtu;
7715 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
7716 }
7717 
7718 #ifdef CONFIG_NET_POLL_CONTROLLER
7719 static void
7720 poll_bnx2(struct net_device *dev)
7721 {
7722 	struct bnx2 *bp = netdev_priv(dev);
7723 	int i;
7724 
7725 	for (i = 0; i < bp->irq_nvecs; i++) {
7726 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7727 
7728 		disable_irq(irq->vector);
7729 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7730 		enable_irq(irq->vector);
7731 	}
7732 }
7733 #endif
7734 
7735 static void __devinit
7736 bnx2_get_5709_media(struct bnx2 *bp)
7737 {
7738 	u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7739 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7740 	u32 strap;
7741 
7742 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7743 		return;
7744 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7745 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7746 		return;
7747 	}
7748 
7749 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7750 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7751 	else
7752 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7753 
7754 	if (PCI_FUNC(bp->pdev->devfn) == 0) {
7755 		switch (strap) {
7756 		case 0x4:
7757 		case 0x5:
7758 		case 0x6:
7759 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7760 			return;
7761 		}
7762 	} else {
7763 		switch (strap) {
7764 		case 0x1:
7765 		case 0x2:
7766 		case 0x4:
7767 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7768 			return;
7769 		}
7770 	}
7771 }
7772 
7773 static void __devinit
7774 bnx2_get_pci_speed(struct bnx2 *bp)
7775 {
7776 	u32 reg;
7777 
7778 	reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7779 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7780 		u32 clkreg;
7781 
7782 		bp->flags |= BNX2_FLAG_PCIX;
7783 
7784 		clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7785 
7786 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7787 		switch (clkreg) {
7788 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7789 			bp->bus_speed_mhz = 133;
7790 			break;
7791 
7792 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7793 			bp->bus_speed_mhz = 100;
7794 			break;
7795 
7796 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7797 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7798 			bp->bus_speed_mhz = 66;
7799 			break;
7800 
7801 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7802 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7803 			bp->bus_speed_mhz = 50;
7804 			break;
7805 
7806 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7807 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7808 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7809 			bp->bus_speed_mhz = 33;
7810 			break;
7811 		}
7812 	}
7813 	else {
7814 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7815 			bp->bus_speed_mhz = 66;
7816 		else
7817 			bp->bus_speed_mhz = 33;
7818 	}
7819 
7820 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7821 		bp->flags |= BNX2_FLAG_PCI_32BIT;
7822 
7823 }
7824 
7825 static void __devinit
7826 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7827 {
7828 	int rc, i, j;
7829 	u8 *data;
7830 	unsigned int block_end, rosize, len;
7831 
7832 #define BNX2_VPD_NVRAM_OFFSET	0x300
7833 #define BNX2_VPD_LEN		128
7834 #define BNX2_MAX_VER_SLEN	30
7835 
7836 	data = kmalloc(256, GFP_KERNEL);
7837 	if (!data)
7838 		return;
7839 
7840 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7841 			     BNX2_VPD_LEN);
7842 	if (rc)
7843 		goto vpd_done;
7844 
7845 	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7846 		data[i] = data[i + BNX2_VPD_LEN + 3];
7847 		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7848 		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7849 		data[i + 3] = data[i + BNX2_VPD_LEN];
7850 	}
7851 
7852 	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7853 	if (i < 0)
7854 		goto vpd_done;
7855 
7856 	rosize = pci_vpd_lrdt_size(&data[i]);
7857 	i += PCI_VPD_LRDT_TAG_SIZE;
7858 	block_end = i + rosize;
7859 
7860 	if (block_end > BNX2_VPD_LEN)
7861 		goto vpd_done;
7862 
7863 	j = pci_vpd_find_info_keyword(data, i, rosize,
7864 				      PCI_VPD_RO_KEYWORD_MFR_ID);
7865 	if (j < 0)
7866 		goto vpd_done;
7867 
7868 	len = pci_vpd_info_field_size(&data[j]);
7869 
7870 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
7871 	if (j + len > block_end || len != 4 ||
7872 	    memcmp(&data[j], "1028", 4))
7873 		goto vpd_done;
7874 
7875 	j = pci_vpd_find_info_keyword(data, i, rosize,
7876 				      PCI_VPD_RO_KEYWORD_VENDOR0);
7877 	if (j < 0)
7878 		goto vpd_done;
7879 
7880 	len = pci_vpd_info_field_size(&data[j]);
7881 
7882 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
7883 	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7884 		goto vpd_done;
7885 
7886 	memcpy(bp->fw_version, &data[j], len);
7887 	bp->fw_version[len] = ' ';
7888 
7889 vpd_done:
7890 	kfree(data);
7891 }
7892 
7893 static int __devinit
7894 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7895 {
7896 	struct bnx2 *bp;
7897 	unsigned long mem_len;
7898 	int rc, i, j;
7899 	u32 reg;
7900 	u64 dma_mask, persist_dma_mask;
7901 	int err;
7902 
7903 	SET_NETDEV_DEV(dev, &pdev->dev);
7904 	bp = netdev_priv(dev);
7905 
7906 	bp->flags = 0;
7907 	bp->phy_flags = 0;
7908 
7909 	bp->temp_stats_blk =
7910 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7911 
7912 	if (bp->temp_stats_blk == NULL) {
7913 		rc = -ENOMEM;
7914 		goto err_out;
7915 	}
7916 
7917 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
7918 	rc = pci_enable_device(pdev);
7919 	if (rc) {
7920 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7921 		goto err_out;
7922 	}
7923 
7924 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7925 		dev_err(&pdev->dev,
7926 			"Cannot find PCI device base address, aborting\n");
7927 		rc = -ENODEV;
7928 		goto err_out_disable;
7929 	}
7930 
7931 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7932 	if (rc) {
7933 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7934 		goto err_out_disable;
7935 	}
7936 
7937 	pci_set_master(pdev);
7938 
7939 	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7940 	if (bp->pm_cap == 0) {
7941 		dev_err(&pdev->dev,
7942 			"Cannot find power management capability, aborting\n");
7943 		rc = -EIO;
7944 		goto err_out_release;
7945 	}
7946 
7947 	bp->dev = dev;
7948 	bp->pdev = pdev;
7949 
7950 	spin_lock_init(&bp->phy_lock);
7951 	spin_lock_init(&bp->indirect_lock);
7952 #ifdef BCM_CNIC
7953 	mutex_init(&bp->cnic_lock);
7954 #endif
7955 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
7956 
7957 	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7958 	mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7959 	dev->mem_end = dev->mem_start + mem_len;
7960 	dev->irq = pdev->irq;
7961 
7962 	bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7963 
7964 	if (!bp->regview) {
7965 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7966 		rc = -ENOMEM;
7967 		goto err_out_release;
7968 	}
7969 
7970 	bnx2_set_power_state(bp, PCI_D0);
7971 
7972 	/* Configure byte swap and enable write to the reg_window registers.
7973 	 * Rely on CPU to do target byte swapping on big endian systems
7974 	 * The chip's target access swapping will not swap all accesses
7975 	 */
7976 	REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
7977 		   BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7978 		   BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7979 
7980 	bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7981 
7982 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7983 		if (!pci_is_pcie(pdev)) {
7984 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
7985 			rc = -EIO;
7986 			goto err_out_unmap;
7987 		}
7988 		bp->flags |= BNX2_FLAG_PCIE;
7989 		if (CHIP_REV(bp) == CHIP_REV_Ax)
7990 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7991 
7992 		/* AER (Advanced Error Reporting) hooks */
7993 		err = pci_enable_pcie_error_reporting(pdev);
7994 		if (!err)
7995 			bp->flags |= BNX2_FLAG_AER_ENABLED;
7996 
7997 	} else {
7998 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7999 		if (bp->pcix_cap == 0) {
8000 			dev_err(&pdev->dev,
8001 				"Cannot find PCIX capability, aborting\n");
8002 			rc = -EIO;
8003 			goto err_out_unmap;
8004 		}
8005 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8006 	}
8007 
8008 	if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
8009 		if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
8010 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8011 	}
8012 
8013 	if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
8014 		if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8015 			bp->flags |= BNX2_FLAG_MSI_CAP;
8016 	}
8017 
8018 	/* 5708 cannot support DMA addresses > 40-bit.  */
8019 	if (CHIP_NUM(bp) == CHIP_NUM_5708)
8020 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8021 	else
8022 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8023 
8024 	/* Configure DMA attributes. */
8025 	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8026 		dev->features |= NETIF_F_HIGHDMA;
8027 		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8028 		if (rc) {
8029 			dev_err(&pdev->dev,
8030 				"pci_set_consistent_dma_mask failed, aborting\n");
8031 			goto err_out_unmap;
8032 		}
8033 	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8034 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8035 		goto err_out_unmap;
8036 	}
8037 
8038 	if (!(bp->flags & BNX2_FLAG_PCIE))
8039 		bnx2_get_pci_speed(bp);
8040 
8041 	/* 5706A0 may falsely detect SERR and PERR. */
8042 	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8043 		reg = REG_RD(bp, PCI_COMMAND);
8044 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8045 		REG_WR(bp, PCI_COMMAND, reg);
8046 	}
8047 	else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8048 		!(bp->flags & BNX2_FLAG_PCIX)) {
8049 
8050 		dev_err(&pdev->dev,
8051 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8052 		goto err_out_unmap;
8053 	}
8054 
8055 	bnx2_init_nvram(bp);
8056 
8057 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8058 
8059 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8060 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8061 		u32 off = PCI_FUNC(pdev->devfn) << 2;
8062 
8063 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8064 	} else
8065 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8066 
8067 	/* Get the permanent MAC address.  First we need to make sure the
8068 	 * firmware is actually running.
8069 	 */
8070 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8071 
8072 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8073 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8074 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8075 		rc = -ENODEV;
8076 		goto err_out_unmap;
8077 	}
8078 
8079 	bnx2_read_vpd_fw_ver(bp);
8080 
8081 	j = strlen(bp->fw_version);
8082 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8083 	for (i = 0; i < 3 && j < 24; i++) {
8084 		u8 num, k, skip0;
8085 
8086 		if (i == 0) {
8087 			bp->fw_version[j++] = 'b';
8088 			bp->fw_version[j++] = 'c';
8089 			bp->fw_version[j++] = ' ';
8090 		}
8091 		num = (u8) (reg >> (24 - (i * 8)));
8092 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8093 			if (num >= k || !skip0 || k == 1) {
8094 				bp->fw_version[j++] = (num / k) + '0';
8095 				skip0 = 0;
8096 			}
8097 		}
8098 		if (i != 2)
8099 			bp->fw_version[j++] = '.';
8100 	}
8101 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8102 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8103 		bp->wol = 1;
8104 
8105 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8106 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8107 
8108 		for (i = 0; i < 30; i++) {
8109 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8110 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8111 				break;
8112 			msleep(10);
8113 		}
8114 	}
8115 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8116 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8117 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8118 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8119 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8120 
8121 		if (j < 32)
8122 			bp->fw_version[j++] = ' ';
8123 		for (i = 0; i < 3 && j < 28; i++) {
8124 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8125 			reg = be32_to_cpu(reg);
8126 			memcpy(&bp->fw_version[j], &reg, 4);
8127 			j += 4;
8128 		}
8129 	}
8130 
8131 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8132 	bp->mac_addr[0] = (u8) (reg >> 8);
8133 	bp->mac_addr[1] = (u8) reg;
8134 
8135 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8136 	bp->mac_addr[2] = (u8) (reg >> 24);
8137 	bp->mac_addr[3] = (u8) (reg >> 16);
8138 	bp->mac_addr[4] = (u8) (reg >> 8);
8139 	bp->mac_addr[5] = (u8) reg;
8140 
8141 	bp->tx_ring_size = MAX_TX_DESC_CNT;
8142 	bnx2_set_rx_ring_size(bp, 255);
8143 
8144 	bp->tx_quick_cons_trip_int = 2;
8145 	bp->tx_quick_cons_trip = 20;
8146 	bp->tx_ticks_int = 18;
8147 	bp->tx_ticks = 80;
8148 
8149 	bp->rx_quick_cons_trip_int = 2;
8150 	bp->rx_quick_cons_trip = 12;
8151 	bp->rx_ticks_int = 18;
8152 	bp->rx_ticks = 18;
8153 
8154 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8155 
8156 	bp->current_interval = BNX2_TIMER_INTERVAL;
8157 
8158 	bp->phy_addr = 1;
8159 
8160 	/* Disable WOL support if we are running on a SERDES chip. */
8161 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
8162 		bnx2_get_5709_media(bp);
8163 	else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8164 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8165 
8166 	bp->phy_port = PORT_TP;
8167 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8168 		bp->phy_port = PORT_FIBRE;
8169 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8170 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8171 			bp->flags |= BNX2_FLAG_NO_WOL;
8172 			bp->wol = 0;
8173 		}
8174 		if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8175 			/* Don't do parallel detect on this board because of
8176 			 * some board problems.  The link will not go down
8177 			 * if we do parallel detect.
8178 			 */
8179 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8180 			    pdev->subsystem_device == 0x310c)
8181 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8182 		} else {
8183 			bp->phy_addr = 2;
8184 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8185 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8186 		}
8187 	} else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8188 		   CHIP_NUM(bp) == CHIP_NUM_5708)
8189 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8190 	else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8191 		 (CHIP_REV(bp) == CHIP_REV_Ax ||
8192 		  CHIP_REV(bp) == CHIP_REV_Bx))
8193 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8194 
8195 	bnx2_init_fw_cap(bp);
8196 
8197 	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8198 	    (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8199 	    (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8200 	    !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8201 		bp->flags |= BNX2_FLAG_NO_WOL;
8202 		bp->wol = 0;
8203 	}
8204 
8205 	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8206 		bp->tx_quick_cons_trip_int =
8207 			bp->tx_quick_cons_trip;
8208 		bp->tx_ticks_int = bp->tx_ticks;
8209 		bp->rx_quick_cons_trip_int =
8210 			bp->rx_quick_cons_trip;
8211 		bp->rx_ticks_int = bp->rx_ticks;
8212 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8213 		bp->com_ticks_int = bp->com_ticks;
8214 		bp->cmd_ticks_int = bp->cmd_ticks;
8215 	}
8216 
8217 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8218 	 *
8219 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8220 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8221 	 * but causes problems on the AMD 8132 which will eventually stop
8222 	 * responding after a while.
8223 	 *
8224 	 * AMD believes this incompatibility is unique to the 5706, and
8225 	 * prefers to locally disable MSI rather than globally disabling it.
8226 	 */
8227 	if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8228 		struct pci_dev *amd_8132 = NULL;
8229 
8230 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8231 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8232 						  amd_8132))) {
8233 
8234 			if (amd_8132->revision >= 0x10 &&
8235 			    amd_8132->revision <= 0x13) {
8236 				disable_msi = 1;
8237 				pci_dev_put(amd_8132);
8238 				break;
8239 			}
8240 		}
8241 	}
8242 
8243 	bnx2_set_default_link(bp);
8244 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8245 
8246 	init_timer(&bp->timer);
8247 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8248 	bp->timer.data = (unsigned long) bp;
8249 	bp->timer.function = bnx2_timer;
8250 
8251 #ifdef BCM_CNIC
8252 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8253 		bp->cnic_eth_dev.max_iscsi_conn =
8254 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8255 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8256 #endif
8257 	pci_save_state(pdev);
8258 
8259 	return 0;
8260 
8261 err_out_unmap:
8262 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8263 		pci_disable_pcie_error_reporting(pdev);
8264 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8265 	}
8266 
8267 	if (bp->regview) {
8268 		iounmap(bp->regview);
8269 		bp->regview = NULL;
8270 	}
8271 
8272 err_out_release:
8273 	pci_release_regions(pdev);
8274 
8275 err_out_disable:
8276 	pci_disable_device(pdev);
8277 	pci_set_drvdata(pdev, NULL);
8278 
8279 err_out:
8280 	return rc;
8281 }
8282 
8283 static char * __devinit
8284 bnx2_bus_string(struct bnx2 *bp, char *str)
8285 {
8286 	char *s = str;
8287 
8288 	if (bp->flags & BNX2_FLAG_PCIE) {
8289 		s += sprintf(s, "PCI Express");
8290 	} else {
8291 		s += sprintf(s, "PCI");
8292 		if (bp->flags & BNX2_FLAG_PCIX)
8293 			s += sprintf(s, "-X");
8294 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8295 			s += sprintf(s, " 32-bit");
8296 		else
8297 			s += sprintf(s, " 64-bit");
8298 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8299 	}
8300 	return str;
8301 }
8302 
8303 static void
8304 bnx2_del_napi(struct bnx2 *bp)
8305 {
8306 	int i;
8307 
8308 	for (i = 0; i < bp->irq_nvecs; i++)
8309 		netif_napi_del(&bp->bnx2_napi[i].napi);
8310 }
8311 
8312 static void
8313 bnx2_init_napi(struct bnx2 *bp)
8314 {
8315 	int i;
8316 
8317 	for (i = 0; i < bp->irq_nvecs; i++) {
8318 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8319 		int (*poll)(struct napi_struct *, int);
8320 
8321 		if (i == 0)
8322 			poll = bnx2_poll;
8323 		else
8324 			poll = bnx2_poll_msix;
8325 
8326 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8327 		bnapi->bp = bp;
8328 	}
8329 }
8330 
8331 static const struct net_device_ops bnx2_netdev_ops = {
8332 	.ndo_open		= bnx2_open,
8333 	.ndo_start_xmit		= bnx2_start_xmit,
8334 	.ndo_stop		= bnx2_close,
8335 	.ndo_get_stats64	= bnx2_get_stats64,
8336 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8337 	.ndo_do_ioctl		= bnx2_ioctl,
8338 	.ndo_validate_addr	= eth_validate_addr,
8339 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8340 	.ndo_change_mtu		= bnx2_change_mtu,
8341 	.ndo_fix_features	= bnx2_fix_features,
8342 	.ndo_set_features	= bnx2_set_features,
8343 	.ndo_tx_timeout		= bnx2_tx_timeout,
8344 #ifdef CONFIG_NET_POLL_CONTROLLER
8345 	.ndo_poll_controller	= poll_bnx2,
8346 #endif
8347 };
8348 
8349 static int __devinit
8350 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8351 {
8352 	static int version_printed = 0;
8353 	struct net_device *dev = NULL;
8354 	struct bnx2 *bp;
8355 	int rc;
8356 	char str[40];
8357 
8358 	if (version_printed++ == 0)
8359 		pr_info("%s", version);
8360 
8361 	/* dev zeroed in init_etherdev */
8362 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8363 
8364 	if (!dev)
8365 		return -ENOMEM;
8366 
8367 	rc = bnx2_init_board(pdev, dev);
8368 	if (rc < 0) {
8369 		free_netdev(dev);
8370 		return rc;
8371 	}
8372 
8373 	dev->netdev_ops = &bnx2_netdev_ops;
8374 	dev->watchdog_timeo = TX_TIMEOUT;
8375 	dev->ethtool_ops = &bnx2_ethtool_ops;
8376 
8377 	bp = netdev_priv(dev);
8378 
8379 	pci_set_drvdata(pdev, dev);
8380 
8381 	memcpy(dev->dev_addr, bp->mac_addr, 6);
8382 	memcpy(dev->perm_addr, bp->mac_addr, 6);
8383 
8384 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8385 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8386 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8387 
8388 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
8389 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8390 
8391 	dev->vlan_features = dev->hw_features;
8392 	dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8393 	dev->features |= dev->hw_features;
8394 	dev->priv_flags |= IFF_UNICAST_FLT;
8395 
8396 	if ((rc = register_netdev(dev))) {
8397 		dev_err(&pdev->dev, "Cannot register net device\n");
8398 		goto error;
8399 	}
8400 
8401 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8402 		    board_info[ent->driver_data].name,
8403 		    ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8404 		    ((CHIP_ID(bp) & 0x0ff0) >> 4),
8405 		    bnx2_bus_string(bp, str),
8406 		    dev->base_addr,
8407 		    bp->pdev->irq, dev->dev_addr);
8408 
8409 	return 0;
8410 
8411 error:
8412 	if (bp->regview)
8413 		iounmap(bp->regview);
8414 	pci_release_regions(pdev);
8415 	pci_disable_device(pdev);
8416 	pci_set_drvdata(pdev, NULL);
8417 	free_netdev(dev);
8418 	return rc;
8419 }
8420 
8421 static void __devexit
8422 bnx2_remove_one(struct pci_dev *pdev)
8423 {
8424 	struct net_device *dev = pci_get_drvdata(pdev);
8425 	struct bnx2 *bp = netdev_priv(dev);
8426 
8427 	unregister_netdev(dev);
8428 
8429 	del_timer_sync(&bp->timer);
8430 	cancel_work_sync(&bp->reset_task);
8431 
8432 	if (bp->regview)
8433 		iounmap(bp->regview);
8434 
8435 	kfree(bp->temp_stats_blk);
8436 
8437 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8438 		pci_disable_pcie_error_reporting(pdev);
8439 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8440 	}
8441 
8442 	bnx2_release_firmware(bp);
8443 
8444 	free_netdev(dev);
8445 
8446 	pci_release_regions(pdev);
8447 	pci_disable_device(pdev);
8448 	pci_set_drvdata(pdev, NULL);
8449 }
8450 
8451 static int
8452 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8453 {
8454 	struct net_device *dev = pci_get_drvdata(pdev);
8455 	struct bnx2 *bp = netdev_priv(dev);
8456 
8457 	/* PCI register 4 needs to be saved whether netif_running() or not.
8458 	 * MSI address and data need to be saved if using MSI and
8459 	 * netif_running().
8460 	 */
8461 	pci_save_state(pdev);
8462 	if (!netif_running(dev))
8463 		return 0;
8464 
8465 	cancel_work_sync(&bp->reset_task);
8466 	bnx2_netif_stop(bp, true);
8467 	netif_device_detach(dev);
8468 	del_timer_sync(&bp->timer);
8469 	bnx2_shutdown_chip(bp);
8470 	bnx2_free_skbs(bp);
8471 	bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8472 	return 0;
8473 }
8474 
8475 static int
8476 bnx2_resume(struct pci_dev *pdev)
8477 {
8478 	struct net_device *dev = pci_get_drvdata(pdev);
8479 	struct bnx2 *bp = netdev_priv(dev);
8480 
8481 	pci_restore_state(pdev);
8482 	if (!netif_running(dev))
8483 		return 0;
8484 
8485 	bnx2_set_power_state(bp, PCI_D0);
8486 	netif_device_attach(dev);
8487 	bnx2_init_nic(bp, 1);
8488 	bnx2_netif_start(bp, true);
8489 	return 0;
8490 }
8491 
8492 /**
8493  * bnx2_io_error_detected - called when PCI error is detected
8494  * @pdev: Pointer to PCI device
8495  * @state: The current pci connection state
8496  *
8497  * This function is called after a PCI bus error affecting
8498  * this device has been detected.
8499  */
8500 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8501 					       pci_channel_state_t state)
8502 {
8503 	struct net_device *dev = pci_get_drvdata(pdev);
8504 	struct bnx2 *bp = netdev_priv(dev);
8505 
8506 	rtnl_lock();
8507 	netif_device_detach(dev);
8508 
8509 	if (state == pci_channel_io_perm_failure) {
8510 		rtnl_unlock();
8511 		return PCI_ERS_RESULT_DISCONNECT;
8512 	}
8513 
8514 	if (netif_running(dev)) {
8515 		bnx2_netif_stop(bp, true);
8516 		del_timer_sync(&bp->timer);
8517 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8518 	}
8519 
8520 	pci_disable_device(pdev);
8521 	rtnl_unlock();
8522 
8523 	/* Request a slot slot reset. */
8524 	return PCI_ERS_RESULT_NEED_RESET;
8525 }
8526 
8527 /**
8528  * bnx2_io_slot_reset - called after the pci bus has been reset.
8529  * @pdev: Pointer to PCI device
8530  *
8531  * Restart the card from scratch, as if from a cold-boot.
8532  */
8533 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8534 {
8535 	struct net_device *dev = pci_get_drvdata(pdev);
8536 	struct bnx2 *bp = netdev_priv(dev);
8537 	pci_ers_result_t result;
8538 	int err;
8539 
8540 	rtnl_lock();
8541 	if (pci_enable_device(pdev)) {
8542 		dev_err(&pdev->dev,
8543 			"Cannot re-enable PCI device after reset\n");
8544 		result = PCI_ERS_RESULT_DISCONNECT;
8545 	} else {
8546 		pci_set_master(pdev);
8547 		pci_restore_state(pdev);
8548 		pci_save_state(pdev);
8549 
8550 		if (netif_running(dev)) {
8551 			bnx2_set_power_state(bp, PCI_D0);
8552 			bnx2_init_nic(bp, 1);
8553 		}
8554 		result = PCI_ERS_RESULT_RECOVERED;
8555 	}
8556 	rtnl_unlock();
8557 
8558 	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8559 		return result;
8560 
8561 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8562 	if (err) {
8563 		dev_err(&pdev->dev,
8564 			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8565 			 err); /* non-fatal, continue */
8566 	}
8567 
8568 	return result;
8569 }
8570 
8571 /**
8572  * bnx2_io_resume - called when traffic can start flowing again.
8573  * @pdev: Pointer to PCI device
8574  *
8575  * This callback is called when the error recovery driver tells us that
8576  * its OK to resume normal operation.
8577  */
8578 static void bnx2_io_resume(struct pci_dev *pdev)
8579 {
8580 	struct net_device *dev = pci_get_drvdata(pdev);
8581 	struct bnx2 *bp = netdev_priv(dev);
8582 
8583 	rtnl_lock();
8584 	if (netif_running(dev))
8585 		bnx2_netif_start(bp, true);
8586 
8587 	netif_device_attach(dev);
8588 	rtnl_unlock();
8589 }
8590 
8591 static struct pci_error_handlers bnx2_err_handler = {
8592 	.error_detected	= bnx2_io_error_detected,
8593 	.slot_reset	= bnx2_io_slot_reset,
8594 	.resume		= bnx2_io_resume,
8595 };
8596 
8597 static struct pci_driver bnx2_pci_driver = {
8598 	.name		= DRV_MODULE_NAME,
8599 	.id_table	= bnx2_pci_tbl,
8600 	.probe		= bnx2_init_one,
8601 	.remove		= __devexit_p(bnx2_remove_one),
8602 	.suspend	= bnx2_suspend,
8603 	.resume		= bnx2_resume,
8604 	.err_handler	= &bnx2_err_handler,
8605 };
8606 
8607 static int __init bnx2_init(void)
8608 {
8609 	return pci_register_driver(&bnx2_pci_driver);
8610 }
8611 
8612 static void __exit bnx2_cleanup(void)
8613 {
8614 	pci_unregister_driver(&bnx2_pci_driver);
8615 }
8616 
8617 module_init(bnx2_init);
8618 module_exit(bnx2_cleanup);
8619 
8620 
8621 
8622