xref: /linux/drivers/net/ethernet/broadcom/bnx2.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/checksum.h>
44 #include <linux/workqueue.h>
45 #include <linux/crc32.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 
52 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
53 #define BCM_CNIC 1
54 #include "cnic_if.h"
55 #endif
56 #include "bnx2.h"
57 #include "bnx2_fw.h"
58 
59 #define DRV_MODULE_NAME		"bnx2"
60 #define DRV_MODULE_VERSION	"2.2.1"
61 #define DRV_MODULE_RELDATE	"Dec 18, 2011"
62 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
63 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
64 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
65 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
66 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
67 
68 #define RUN_AT(x) (jiffies + (x))
69 
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT  (5*HZ)
72 
73 static char version[] __devinitdata =
74 	"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75 
76 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80 MODULE_FIRMWARE(FW_MIPS_FILE_06);
81 MODULE_FIRMWARE(FW_RV2P_FILE_06);
82 MODULE_FIRMWARE(FW_MIPS_FILE_09);
83 MODULE_FIRMWARE(FW_RV2P_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
85 
86 static int disable_msi = 0;
87 
88 module_param(disable_msi, int, 0);
89 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 
91 typedef enum {
92 	BCM5706 = 0,
93 	NC370T,
94 	NC370I,
95 	BCM5706S,
96 	NC370F,
97 	BCM5708,
98 	BCM5708S,
99 	BCM5709,
100 	BCM5709S,
101 	BCM5716,
102 	BCM5716S,
103 } board_t;
104 
105 /* indexed by board_t, above */
106 static struct {
107 	char *name;
108 } board_info[] __devinitdata = {
109 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
110 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
111 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
112 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
113 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
114 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
115 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
116 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
117 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
118 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
119 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
120 	};
121 
122 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
123 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
125 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
127 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
129 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
130 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
131 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
133 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
135 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
136 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
137 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
138 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
139 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
140 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
141 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
142 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
143 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
144 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
145 	{ 0, }
146 };
147 
148 static const struct flash_spec flash_table[] =
149 {
150 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
151 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
152 	/* Slow EEPROM */
153 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
154 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
155 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
156 	 "EEPROM - slow"},
157 	/* Expansion entry 0001 */
158 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
159 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
160 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
161 	 "Entry 0001"},
162 	/* Saifun SA25F010 (non-buffered flash) */
163 	/* strap, cfg1, & write1 need updates */
164 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
165 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
167 	 "Non-buffered flash (128kB)"},
168 	/* Saifun SA25F020 (non-buffered flash) */
169 	/* strap, cfg1, & write1 need updates */
170 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
171 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
173 	 "Non-buffered flash (256kB)"},
174 	/* Expansion entry 0100 */
175 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
176 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
177 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178 	 "Entry 0100"},
179 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
180 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
181 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
183 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
184 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
185 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
186 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
187 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
188 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
189 	/* Saifun SA25F005 (non-buffered flash) */
190 	/* strap, cfg1, & write1 need updates */
191 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
192 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
194 	 "Non-buffered flash (64kB)"},
195 	/* Fast EEPROM */
196 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
197 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
198 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
199 	 "EEPROM - fast"},
200 	/* Expansion entry 1001 */
201 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
202 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 	 "Entry 1001"},
205 	/* Expansion entry 1010 */
206 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
207 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209 	 "Entry 1010"},
210 	/* ATMEL AT45DB011B (buffered flash) */
211 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
212 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
213 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
214 	 "Buffered flash (128kB)"},
215 	/* Expansion entry 1100 */
216 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
217 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 	 "Entry 1100"},
220 	/* Expansion entry 1101 */
221 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
222 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
224 	 "Entry 1101"},
225 	/* Ateml Expansion entry 1110 */
226 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
227 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
229 	 "Entry 1110 (Atmel)"},
230 	/* ATMEL AT45DB021B (buffered flash) */
231 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
232 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
233 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
234 	 "Buffered flash (256kB)"},
235 };
236 
237 static const struct flash_spec flash_5709 = {
238 	.flags		= BNX2_NV_BUFFERED,
239 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
240 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
241 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
242 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
243 	.name		= "5709 Buffered flash (256kB)",
244 };
245 
246 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
247 
248 static void bnx2_init_napi(struct bnx2 *bp);
249 static void bnx2_del_napi(struct bnx2 *bp);
250 
251 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252 {
253 	u32 diff;
254 
255 	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
256 	barrier();
257 
258 	/* The ring uses 256 indices for 255 entries, one of them
259 	 * needs to be skipped.
260 	 */
261 	diff = txr->tx_prod - txr->tx_cons;
262 	if (unlikely(diff >= TX_DESC_CNT)) {
263 		diff &= 0xffff;
264 		if (diff == TX_DESC_CNT)
265 			diff = MAX_TX_DESC_CNT;
266 	}
267 	return bp->tx_ring_size - diff;
268 }
269 
270 static u32
271 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
272 {
273 	u32 val;
274 
275 	spin_lock_bh(&bp->indirect_lock);
276 	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277 	val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
278 	spin_unlock_bh(&bp->indirect_lock);
279 	return val;
280 }
281 
282 static void
283 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284 {
285 	spin_lock_bh(&bp->indirect_lock);
286 	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
287 	REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
288 	spin_unlock_bh(&bp->indirect_lock);
289 }
290 
291 static void
292 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
293 {
294 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
295 }
296 
297 static u32
298 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
299 {
300 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
301 }
302 
303 static void
304 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
305 {
306 	offset += cid_addr;
307 	spin_lock_bh(&bp->indirect_lock);
308 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
309 		int i;
310 
311 		REG_WR(bp, BNX2_CTX_CTX_DATA, val);
312 		REG_WR(bp, BNX2_CTX_CTX_CTRL,
313 		       offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314 		for (i = 0; i < 5; i++) {
315 			val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
316 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
317 				break;
318 			udelay(5);
319 		}
320 	} else {
321 		REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
322 		REG_WR(bp, BNX2_CTX_DATA, val);
323 	}
324 	spin_unlock_bh(&bp->indirect_lock);
325 }
326 
327 #ifdef BCM_CNIC
328 static int
329 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
330 {
331 	struct bnx2 *bp = netdev_priv(dev);
332 	struct drv_ctl_io *io = &info->data.io;
333 
334 	switch (info->cmd) {
335 	case DRV_CTL_IO_WR_CMD:
336 		bnx2_reg_wr_ind(bp, io->offset, io->data);
337 		break;
338 	case DRV_CTL_IO_RD_CMD:
339 		io->data = bnx2_reg_rd_ind(bp, io->offset);
340 		break;
341 	case DRV_CTL_CTX_WR_CMD:
342 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
343 		break;
344 	default:
345 		return -EINVAL;
346 	}
347 	return 0;
348 }
349 
350 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
351 {
352 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
354 	int sb_id;
355 
356 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
357 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358 		bnapi->cnic_present = 0;
359 		sb_id = bp->irq_nvecs;
360 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
361 	} else {
362 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363 		bnapi->cnic_tag = bnapi->last_status_idx;
364 		bnapi->cnic_present = 1;
365 		sb_id = 0;
366 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
367 	}
368 
369 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370 	cp->irq_arr[0].status_blk = (void *)
371 		((unsigned long) bnapi->status_blk.msi +
372 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373 	cp->irq_arr[0].status_blk_num = sb_id;
374 	cp->num_irq = 1;
375 }
376 
377 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
378 			      void *data)
379 {
380 	struct bnx2 *bp = netdev_priv(dev);
381 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
382 
383 	if (ops == NULL)
384 		return -EINVAL;
385 
386 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
387 		return -EBUSY;
388 
389 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
390 		return -ENODEV;
391 
392 	bp->cnic_data = data;
393 	rcu_assign_pointer(bp->cnic_ops, ops);
394 
395 	cp->num_irq = 0;
396 	cp->drv_state = CNIC_DRV_STATE_REGD;
397 
398 	bnx2_setup_cnic_irq_info(bp);
399 
400 	return 0;
401 }
402 
403 static int bnx2_unregister_cnic(struct net_device *dev)
404 {
405 	struct bnx2 *bp = netdev_priv(dev);
406 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
407 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
408 
409 	mutex_lock(&bp->cnic_lock);
410 	cp->drv_state = 0;
411 	bnapi->cnic_present = 0;
412 	RCU_INIT_POINTER(bp->cnic_ops, NULL);
413 	mutex_unlock(&bp->cnic_lock);
414 	synchronize_rcu();
415 	return 0;
416 }
417 
418 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
419 {
420 	struct bnx2 *bp = netdev_priv(dev);
421 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
422 
423 	if (!cp->max_iscsi_conn)
424 		return NULL;
425 
426 	cp->drv_owner = THIS_MODULE;
427 	cp->chip_id = bp->chip_id;
428 	cp->pdev = bp->pdev;
429 	cp->io_base = bp->regview;
430 	cp->drv_ctl = bnx2_drv_ctl;
431 	cp->drv_register_cnic = bnx2_register_cnic;
432 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
433 
434 	return cp;
435 }
436 EXPORT_SYMBOL(bnx2_cnic_probe);
437 
438 static void
439 bnx2_cnic_stop(struct bnx2 *bp)
440 {
441 	struct cnic_ops *c_ops;
442 	struct cnic_ctl_info info;
443 
444 	mutex_lock(&bp->cnic_lock);
445 	c_ops = rcu_dereference_protected(bp->cnic_ops,
446 					  lockdep_is_held(&bp->cnic_lock));
447 	if (c_ops) {
448 		info.cmd = CNIC_CTL_STOP_CMD;
449 		c_ops->cnic_ctl(bp->cnic_data, &info);
450 	}
451 	mutex_unlock(&bp->cnic_lock);
452 }
453 
454 static void
455 bnx2_cnic_start(struct bnx2 *bp)
456 {
457 	struct cnic_ops *c_ops;
458 	struct cnic_ctl_info info;
459 
460 	mutex_lock(&bp->cnic_lock);
461 	c_ops = rcu_dereference_protected(bp->cnic_ops,
462 					  lockdep_is_held(&bp->cnic_lock));
463 	if (c_ops) {
464 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466 
467 			bnapi->cnic_tag = bnapi->last_status_idx;
468 		}
469 		info.cmd = CNIC_CTL_START_CMD;
470 		c_ops->cnic_ctl(bp->cnic_data, &info);
471 	}
472 	mutex_unlock(&bp->cnic_lock);
473 }
474 
475 #else
476 
477 static void
478 bnx2_cnic_stop(struct bnx2 *bp)
479 {
480 }
481 
482 static void
483 bnx2_cnic_start(struct bnx2 *bp)
484 {
485 }
486 
487 #endif
488 
489 static int
490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491 {
492 	u32 val1;
493 	int i, ret;
494 
495 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496 		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
497 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498 
499 		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500 		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
501 
502 		udelay(40);
503 	}
504 
505 	val1 = (bp->phy_addr << 21) | (reg << 16) |
506 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507 		BNX2_EMAC_MDIO_COMM_START_BUSY;
508 	REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509 
510 	for (i = 0; i < 50; i++) {
511 		udelay(10);
512 
513 		val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
514 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515 			udelay(5);
516 
517 			val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
518 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519 
520 			break;
521 		}
522 	}
523 
524 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525 		*val = 0x0;
526 		ret = -EBUSY;
527 	}
528 	else {
529 		*val = val1;
530 		ret = 0;
531 	}
532 
533 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534 		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
535 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536 
537 		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538 		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
539 
540 		udelay(40);
541 	}
542 
543 	return ret;
544 }
545 
546 static int
547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548 {
549 	u32 val1;
550 	int i, ret;
551 
552 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553 		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
554 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555 
556 		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557 		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
558 
559 		udelay(40);
560 	}
561 
562 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565 	REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566 
567 	for (i = 0; i < 50; i++) {
568 		udelay(10);
569 
570 		val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
571 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572 			udelay(5);
573 			break;
574 		}
575 	}
576 
577 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578         	ret = -EBUSY;
579 	else
580 		ret = 0;
581 
582 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583 		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
584 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585 
586 		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587 		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
588 
589 		udelay(40);
590 	}
591 
592 	return ret;
593 }
594 
595 static void
596 bnx2_disable_int(struct bnx2 *bp)
597 {
598 	int i;
599 	struct bnx2_napi *bnapi;
600 
601 	for (i = 0; i < bp->irq_nvecs; i++) {
602 		bnapi = &bp->bnx2_napi[i];
603 		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605 	}
606 	REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607 }
608 
609 static void
610 bnx2_enable_int(struct bnx2 *bp)
611 {
612 	int i;
613 	struct bnx2_napi *bnapi;
614 
615 	for (i = 0; i < bp->irq_nvecs; i++) {
616 		bnapi = &bp->bnx2_napi[i];
617 
618 		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619 		       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621 		       bnapi->last_status_idx);
622 
623 		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624 		       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625 		       bnapi->last_status_idx);
626 	}
627 	REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628 }
629 
630 static void
631 bnx2_disable_int_sync(struct bnx2 *bp)
632 {
633 	int i;
634 
635 	atomic_inc(&bp->intr_sem);
636 	if (!netif_running(bp->dev))
637 		return;
638 
639 	bnx2_disable_int(bp);
640 	for (i = 0; i < bp->irq_nvecs; i++)
641 		synchronize_irq(bp->irq_tbl[i].vector);
642 }
643 
644 static void
645 bnx2_napi_disable(struct bnx2 *bp)
646 {
647 	int i;
648 
649 	for (i = 0; i < bp->irq_nvecs; i++)
650 		napi_disable(&bp->bnx2_napi[i].napi);
651 }
652 
653 static void
654 bnx2_napi_enable(struct bnx2 *bp)
655 {
656 	int i;
657 
658 	for (i = 0; i < bp->irq_nvecs; i++)
659 		napi_enable(&bp->bnx2_napi[i].napi);
660 }
661 
662 static void
663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664 {
665 	if (stop_cnic)
666 		bnx2_cnic_stop(bp);
667 	if (netif_running(bp->dev)) {
668 		bnx2_napi_disable(bp);
669 		netif_tx_disable(bp->dev);
670 	}
671 	bnx2_disable_int_sync(bp);
672 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
673 }
674 
675 static void
676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 {
678 	if (atomic_dec_and_test(&bp->intr_sem)) {
679 		if (netif_running(bp->dev)) {
680 			netif_tx_wake_all_queues(bp->dev);
681 			spin_lock_bh(&bp->phy_lock);
682 			if (bp->link_up)
683 				netif_carrier_on(bp->dev);
684 			spin_unlock_bh(&bp->phy_lock);
685 			bnx2_napi_enable(bp);
686 			bnx2_enable_int(bp);
687 			if (start_cnic)
688 				bnx2_cnic_start(bp);
689 		}
690 	}
691 }
692 
693 static void
694 bnx2_free_tx_mem(struct bnx2 *bp)
695 {
696 	int i;
697 
698 	for (i = 0; i < bp->num_tx_rings; i++) {
699 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701 
702 		if (txr->tx_desc_ring) {
703 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704 					  txr->tx_desc_ring,
705 					  txr->tx_desc_mapping);
706 			txr->tx_desc_ring = NULL;
707 		}
708 		kfree(txr->tx_buf_ring);
709 		txr->tx_buf_ring = NULL;
710 	}
711 }
712 
713 static void
714 bnx2_free_rx_mem(struct bnx2 *bp)
715 {
716 	int i;
717 
718 	for (i = 0; i < bp->num_rx_rings; i++) {
719 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721 		int j;
722 
723 		for (j = 0; j < bp->rx_max_ring; j++) {
724 			if (rxr->rx_desc_ring[j])
725 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726 						  rxr->rx_desc_ring[j],
727 						  rxr->rx_desc_mapping[j]);
728 			rxr->rx_desc_ring[j] = NULL;
729 		}
730 		vfree(rxr->rx_buf_ring);
731 		rxr->rx_buf_ring = NULL;
732 
733 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
734 			if (rxr->rx_pg_desc_ring[j])
735 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736 						  rxr->rx_pg_desc_ring[j],
737 						  rxr->rx_pg_desc_mapping[j]);
738 			rxr->rx_pg_desc_ring[j] = NULL;
739 		}
740 		vfree(rxr->rx_pg_ring);
741 		rxr->rx_pg_ring = NULL;
742 	}
743 }
744 
745 static int
746 bnx2_alloc_tx_mem(struct bnx2 *bp)
747 {
748 	int i;
749 
750 	for (i = 0; i < bp->num_tx_rings; i++) {
751 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753 
754 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755 		if (txr->tx_buf_ring == NULL)
756 			return -ENOMEM;
757 
758 		txr->tx_desc_ring =
759 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760 					   &txr->tx_desc_mapping, GFP_KERNEL);
761 		if (txr->tx_desc_ring == NULL)
762 			return -ENOMEM;
763 	}
764 	return 0;
765 }
766 
767 static int
768 bnx2_alloc_rx_mem(struct bnx2 *bp)
769 {
770 	int i;
771 
772 	for (i = 0; i < bp->num_rx_rings; i++) {
773 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775 		int j;
776 
777 		rxr->rx_buf_ring =
778 			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779 		if (rxr->rx_buf_ring == NULL)
780 			return -ENOMEM;
781 
782 		for (j = 0; j < bp->rx_max_ring; j++) {
783 			rxr->rx_desc_ring[j] =
784 				dma_alloc_coherent(&bp->pdev->dev,
785 						   RXBD_RING_SIZE,
786 						   &rxr->rx_desc_mapping[j],
787 						   GFP_KERNEL);
788 			if (rxr->rx_desc_ring[j] == NULL)
789 				return -ENOMEM;
790 
791 		}
792 
793 		if (bp->rx_pg_ring_size) {
794 			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795 						  bp->rx_max_pg_ring);
796 			if (rxr->rx_pg_ring == NULL)
797 				return -ENOMEM;
798 
799 		}
800 
801 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
802 			rxr->rx_pg_desc_ring[j] =
803 				dma_alloc_coherent(&bp->pdev->dev,
804 						   RXBD_RING_SIZE,
805 						   &rxr->rx_pg_desc_mapping[j],
806 						   GFP_KERNEL);
807 			if (rxr->rx_pg_desc_ring[j] == NULL)
808 				return -ENOMEM;
809 
810 		}
811 	}
812 	return 0;
813 }
814 
815 static void
816 bnx2_free_mem(struct bnx2 *bp)
817 {
818 	int i;
819 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820 
821 	bnx2_free_tx_mem(bp);
822 	bnx2_free_rx_mem(bp);
823 
824 	for (i = 0; i < bp->ctx_pages; i++) {
825 		if (bp->ctx_blk[i]) {
826 			dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
827 					  bp->ctx_blk[i],
828 					  bp->ctx_blk_mapping[i]);
829 			bp->ctx_blk[i] = NULL;
830 		}
831 	}
832 	if (bnapi->status_blk.msi) {
833 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834 				  bnapi->status_blk.msi,
835 				  bp->status_blk_mapping);
836 		bnapi->status_blk.msi = NULL;
837 		bp->stats_blk = NULL;
838 	}
839 }
840 
841 static int
842 bnx2_alloc_mem(struct bnx2 *bp)
843 {
844 	int i, status_blk_size, err;
845 	struct bnx2_napi *bnapi;
846 	void *status_blk;
847 
848 	/* Combine status and statistics blocks into one allocation. */
849 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
851 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
853 	bp->status_stats_size = status_blk_size +
854 				sizeof(struct statistics_block);
855 
856 	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 					&bp->status_blk_mapping, GFP_KERNEL);
858 	if (status_blk == NULL)
859 		goto alloc_mem_err;
860 
861 	memset(status_blk, 0, bp->status_stats_size);
862 
863 	bnapi = &bp->bnx2_napi[0];
864 	bnapi->status_blk.msi = status_blk;
865 	bnapi->hw_tx_cons_ptr =
866 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
867 	bnapi->hw_rx_cons_ptr =
868 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
869 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
870 		for (i = 1; i < bp->irq_nvecs; i++) {
871 			struct status_block_msix *sblk;
872 
873 			bnapi = &bp->bnx2_napi[i];
874 
875 			sblk = (void *) (status_blk +
876 					 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
877 			bnapi->status_blk.msix = sblk;
878 			bnapi->hw_tx_cons_ptr =
879 				&sblk->status_tx_quick_consumer_index;
880 			bnapi->hw_rx_cons_ptr =
881 				&sblk->status_rx_quick_consumer_index;
882 			bnapi->int_num = i << 24;
883 		}
884 	}
885 
886 	bp->stats_blk = status_blk + status_blk_size;
887 
888 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
889 
890 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
891 		bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
892 		if (bp->ctx_pages == 0)
893 			bp->ctx_pages = 1;
894 		for (i = 0; i < bp->ctx_pages; i++) {
895 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
896 						BCM_PAGE_SIZE,
897 						&bp->ctx_blk_mapping[i],
898 						GFP_KERNEL);
899 			if (bp->ctx_blk[i] == NULL)
900 				goto alloc_mem_err;
901 		}
902 	}
903 
904 	err = bnx2_alloc_rx_mem(bp);
905 	if (err)
906 		goto alloc_mem_err;
907 
908 	err = bnx2_alloc_tx_mem(bp);
909 	if (err)
910 		goto alloc_mem_err;
911 
912 	return 0;
913 
914 alloc_mem_err:
915 	bnx2_free_mem(bp);
916 	return -ENOMEM;
917 }
918 
919 static void
920 bnx2_report_fw_link(struct bnx2 *bp)
921 {
922 	u32 fw_link_status = 0;
923 
924 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
925 		return;
926 
927 	if (bp->link_up) {
928 		u32 bmsr;
929 
930 		switch (bp->line_speed) {
931 		case SPEED_10:
932 			if (bp->duplex == DUPLEX_HALF)
933 				fw_link_status = BNX2_LINK_STATUS_10HALF;
934 			else
935 				fw_link_status = BNX2_LINK_STATUS_10FULL;
936 			break;
937 		case SPEED_100:
938 			if (bp->duplex == DUPLEX_HALF)
939 				fw_link_status = BNX2_LINK_STATUS_100HALF;
940 			else
941 				fw_link_status = BNX2_LINK_STATUS_100FULL;
942 			break;
943 		case SPEED_1000:
944 			if (bp->duplex == DUPLEX_HALF)
945 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
946 			else
947 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
948 			break;
949 		case SPEED_2500:
950 			if (bp->duplex == DUPLEX_HALF)
951 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
952 			else
953 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
954 			break;
955 		}
956 
957 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
958 
959 		if (bp->autoneg) {
960 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
961 
962 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
963 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
964 
965 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
966 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
967 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
968 			else
969 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
970 		}
971 	}
972 	else
973 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
974 
975 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
976 }
977 
978 static char *
979 bnx2_xceiver_str(struct bnx2 *bp)
980 {
981 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
982 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
983 		 "Copper");
984 }
985 
986 static void
987 bnx2_report_link(struct bnx2 *bp)
988 {
989 	if (bp->link_up) {
990 		netif_carrier_on(bp->dev);
991 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
992 			    bnx2_xceiver_str(bp),
993 			    bp->line_speed,
994 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
995 
996 		if (bp->flow_ctrl) {
997 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
998 				pr_cont(", receive ");
999 				if (bp->flow_ctrl & FLOW_CTRL_TX)
1000 					pr_cont("& transmit ");
1001 			}
1002 			else {
1003 				pr_cont(", transmit ");
1004 			}
1005 			pr_cont("flow control ON");
1006 		}
1007 		pr_cont("\n");
1008 	} else {
1009 		netif_carrier_off(bp->dev);
1010 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1011 			   bnx2_xceiver_str(bp));
1012 	}
1013 
1014 	bnx2_report_fw_link(bp);
1015 }
1016 
1017 static void
1018 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1019 {
1020 	u32 local_adv, remote_adv;
1021 
1022 	bp->flow_ctrl = 0;
1023 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1024 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1025 
1026 		if (bp->duplex == DUPLEX_FULL) {
1027 			bp->flow_ctrl = bp->req_flow_ctrl;
1028 		}
1029 		return;
1030 	}
1031 
1032 	if (bp->duplex != DUPLEX_FULL) {
1033 		return;
1034 	}
1035 
1036 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1037 	    (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1038 		u32 val;
1039 
1040 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1041 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1042 			bp->flow_ctrl |= FLOW_CTRL_TX;
1043 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1044 			bp->flow_ctrl |= FLOW_CTRL_RX;
1045 		return;
1046 	}
1047 
1048 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1049 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1050 
1051 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1052 		u32 new_local_adv = 0;
1053 		u32 new_remote_adv = 0;
1054 
1055 		if (local_adv & ADVERTISE_1000XPAUSE)
1056 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1057 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1058 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1059 		if (remote_adv & ADVERTISE_1000XPAUSE)
1060 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1061 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1062 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1063 
1064 		local_adv = new_local_adv;
1065 		remote_adv = new_remote_adv;
1066 	}
1067 
1068 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1069 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1070 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1071 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1072 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1073 			}
1074 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1075 				bp->flow_ctrl = FLOW_CTRL_RX;
1076 			}
1077 		}
1078 		else {
1079 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1080 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1081 			}
1082 		}
1083 	}
1084 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1085 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1086 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1087 
1088 			bp->flow_ctrl = FLOW_CTRL_TX;
1089 		}
1090 	}
1091 }
1092 
1093 static int
1094 bnx2_5709s_linkup(struct bnx2 *bp)
1095 {
1096 	u32 val, speed;
1097 
1098 	bp->link_up = 1;
1099 
1100 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1101 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1102 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1103 
1104 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1105 		bp->line_speed = bp->req_line_speed;
1106 		bp->duplex = bp->req_duplex;
1107 		return 0;
1108 	}
1109 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1110 	switch (speed) {
1111 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1112 			bp->line_speed = SPEED_10;
1113 			break;
1114 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1115 			bp->line_speed = SPEED_100;
1116 			break;
1117 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1118 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1119 			bp->line_speed = SPEED_1000;
1120 			break;
1121 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1122 			bp->line_speed = SPEED_2500;
1123 			break;
1124 	}
1125 	if (val & MII_BNX2_GP_TOP_AN_FD)
1126 		bp->duplex = DUPLEX_FULL;
1127 	else
1128 		bp->duplex = DUPLEX_HALF;
1129 	return 0;
1130 }
1131 
1132 static int
1133 bnx2_5708s_linkup(struct bnx2 *bp)
1134 {
1135 	u32 val;
1136 
1137 	bp->link_up = 1;
1138 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1139 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1140 		case BCM5708S_1000X_STAT1_SPEED_10:
1141 			bp->line_speed = SPEED_10;
1142 			break;
1143 		case BCM5708S_1000X_STAT1_SPEED_100:
1144 			bp->line_speed = SPEED_100;
1145 			break;
1146 		case BCM5708S_1000X_STAT1_SPEED_1G:
1147 			bp->line_speed = SPEED_1000;
1148 			break;
1149 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1150 			bp->line_speed = SPEED_2500;
1151 			break;
1152 	}
1153 	if (val & BCM5708S_1000X_STAT1_FD)
1154 		bp->duplex = DUPLEX_FULL;
1155 	else
1156 		bp->duplex = DUPLEX_HALF;
1157 
1158 	return 0;
1159 }
1160 
1161 static int
1162 bnx2_5706s_linkup(struct bnx2 *bp)
1163 {
1164 	u32 bmcr, local_adv, remote_adv, common;
1165 
1166 	bp->link_up = 1;
1167 	bp->line_speed = SPEED_1000;
1168 
1169 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1170 	if (bmcr & BMCR_FULLDPLX) {
1171 		bp->duplex = DUPLEX_FULL;
1172 	}
1173 	else {
1174 		bp->duplex = DUPLEX_HALF;
1175 	}
1176 
1177 	if (!(bmcr & BMCR_ANENABLE)) {
1178 		return 0;
1179 	}
1180 
1181 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1182 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1183 
1184 	common = local_adv & remote_adv;
1185 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1186 
1187 		if (common & ADVERTISE_1000XFULL) {
1188 			bp->duplex = DUPLEX_FULL;
1189 		}
1190 		else {
1191 			bp->duplex = DUPLEX_HALF;
1192 		}
1193 	}
1194 
1195 	return 0;
1196 }
1197 
1198 static int
1199 bnx2_copper_linkup(struct bnx2 *bp)
1200 {
1201 	u32 bmcr;
1202 
1203 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1204 	if (bmcr & BMCR_ANENABLE) {
1205 		u32 local_adv, remote_adv, common;
1206 
1207 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1208 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1209 
1210 		common = local_adv & (remote_adv >> 2);
1211 		if (common & ADVERTISE_1000FULL) {
1212 			bp->line_speed = SPEED_1000;
1213 			bp->duplex = DUPLEX_FULL;
1214 		}
1215 		else if (common & ADVERTISE_1000HALF) {
1216 			bp->line_speed = SPEED_1000;
1217 			bp->duplex = DUPLEX_HALF;
1218 		}
1219 		else {
1220 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1221 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1222 
1223 			common = local_adv & remote_adv;
1224 			if (common & ADVERTISE_100FULL) {
1225 				bp->line_speed = SPEED_100;
1226 				bp->duplex = DUPLEX_FULL;
1227 			}
1228 			else if (common & ADVERTISE_100HALF) {
1229 				bp->line_speed = SPEED_100;
1230 				bp->duplex = DUPLEX_HALF;
1231 			}
1232 			else if (common & ADVERTISE_10FULL) {
1233 				bp->line_speed = SPEED_10;
1234 				bp->duplex = DUPLEX_FULL;
1235 			}
1236 			else if (common & ADVERTISE_10HALF) {
1237 				bp->line_speed = SPEED_10;
1238 				bp->duplex = DUPLEX_HALF;
1239 			}
1240 			else {
1241 				bp->line_speed = 0;
1242 				bp->link_up = 0;
1243 			}
1244 		}
1245 	}
1246 	else {
1247 		if (bmcr & BMCR_SPEED100) {
1248 			bp->line_speed = SPEED_100;
1249 		}
1250 		else {
1251 			bp->line_speed = SPEED_10;
1252 		}
1253 		if (bmcr & BMCR_FULLDPLX) {
1254 			bp->duplex = DUPLEX_FULL;
1255 		}
1256 		else {
1257 			bp->duplex = DUPLEX_HALF;
1258 		}
1259 	}
1260 
1261 	return 0;
1262 }
1263 
1264 static void
1265 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1266 {
1267 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1268 
1269 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1270 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1271 	val |= 0x02 << 8;
1272 
1273 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1274 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1275 
1276 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1277 }
1278 
1279 static void
1280 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1281 {
1282 	int i;
1283 	u32 cid;
1284 
1285 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1286 		if (i == 1)
1287 			cid = RX_RSS_CID;
1288 		bnx2_init_rx_context(bp, cid);
1289 	}
1290 }
1291 
1292 static void
1293 bnx2_set_mac_link(struct bnx2 *bp)
1294 {
1295 	u32 val;
1296 
1297 	REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1298 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1299 		(bp->duplex == DUPLEX_HALF)) {
1300 		REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1301 	}
1302 
1303 	/* Configure the EMAC mode register. */
1304 	val = REG_RD(bp, BNX2_EMAC_MODE);
1305 
1306 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1307 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1308 		BNX2_EMAC_MODE_25G_MODE);
1309 
1310 	if (bp->link_up) {
1311 		switch (bp->line_speed) {
1312 			case SPEED_10:
1313 				if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1314 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1315 					break;
1316 				}
1317 				/* fall through */
1318 			case SPEED_100:
1319 				val |= BNX2_EMAC_MODE_PORT_MII;
1320 				break;
1321 			case SPEED_2500:
1322 				val |= BNX2_EMAC_MODE_25G_MODE;
1323 				/* fall through */
1324 			case SPEED_1000:
1325 				val |= BNX2_EMAC_MODE_PORT_GMII;
1326 				break;
1327 		}
1328 	}
1329 	else {
1330 		val |= BNX2_EMAC_MODE_PORT_GMII;
1331 	}
1332 
1333 	/* Set the MAC to operate in the appropriate duplex mode. */
1334 	if (bp->duplex == DUPLEX_HALF)
1335 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1336 	REG_WR(bp, BNX2_EMAC_MODE, val);
1337 
1338 	/* Enable/disable rx PAUSE. */
1339 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1340 
1341 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1342 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1343 	REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1344 
1345 	/* Enable/disable tx PAUSE. */
1346 	val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1347 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1348 
1349 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1350 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1351 	REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1352 
1353 	/* Acknowledge the interrupt. */
1354 	REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1355 
1356 	bnx2_init_all_rx_contexts(bp);
1357 }
1358 
1359 static void
1360 bnx2_enable_bmsr1(struct bnx2 *bp)
1361 {
1362 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1363 	    (CHIP_NUM(bp) == CHIP_NUM_5709))
1364 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1365 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1366 }
1367 
1368 static void
1369 bnx2_disable_bmsr1(struct bnx2 *bp)
1370 {
1371 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1372 	    (CHIP_NUM(bp) == CHIP_NUM_5709))
1373 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1374 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1375 }
1376 
1377 static int
1378 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1379 {
1380 	u32 up1;
1381 	int ret = 1;
1382 
1383 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1384 		return 0;
1385 
1386 	if (bp->autoneg & AUTONEG_SPEED)
1387 		bp->advertising |= ADVERTISED_2500baseX_Full;
1388 
1389 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1390 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1391 
1392 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1393 	if (!(up1 & BCM5708S_UP1_2G5)) {
1394 		up1 |= BCM5708S_UP1_2G5;
1395 		bnx2_write_phy(bp, bp->mii_up1, up1);
1396 		ret = 0;
1397 	}
1398 
1399 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1400 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1401 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1402 
1403 	return ret;
1404 }
1405 
1406 static int
1407 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1408 {
1409 	u32 up1;
1410 	int ret = 0;
1411 
1412 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1413 		return 0;
1414 
1415 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1416 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1417 
1418 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1419 	if (up1 & BCM5708S_UP1_2G5) {
1420 		up1 &= ~BCM5708S_UP1_2G5;
1421 		bnx2_write_phy(bp, bp->mii_up1, up1);
1422 		ret = 1;
1423 	}
1424 
1425 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1426 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1427 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1428 
1429 	return ret;
1430 }
1431 
1432 static void
1433 bnx2_enable_forced_2g5(struct bnx2 *bp)
1434 {
1435 	u32 uninitialized_var(bmcr);
1436 	int err;
1437 
1438 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1439 		return;
1440 
1441 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1442 		u32 val;
1443 
1444 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1445 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1446 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1447 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1448 			val |= MII_BNX2_SD_MISC1_FORCE |
1449 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1450 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1451 		}
1452 
1453 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1454 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1455 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1456 
1457 	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1458 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1459 		if (!err)
1460 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1461 	} else {
1462 		return;
1463 	}
1464 
1465 	if (err)
1466 		return;
1467 
1468 	if (bp->autoneg & AUTONEG_SPEED) {
1469 		bmcr &= ~BMCR_ANENABLE;
1470 		if (bp->req_duplex == DUPLEX_FULL)
1471 			bmcr |= BMCR_FULLDPLX;
1472 	}
1473 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1474 }
1475 
1476 static void
1477 bnx2_disable_forced_2g5(struct bnx2 *bp)
1478 {
1479 	u32 uninitialized_var(bmcr);
1480 	int err;
1481 
1482 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1483 		return;
1484 
1485 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1486 		u32 val;
1487 
1488 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1489 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1490 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1491 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1492 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1493 		}
1494 
1495 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1496 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1497 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1498 
1499 	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1500 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1501 		if (!err)
1502 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1503 	} else {
1504 		return;
1505 	}
1506 
1507 	if (err)
1508 		return;
1509 
1510 	if (bp->autoneg & AUTONEG_SPEED)
1511 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1512 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1513 }
1514 
1515 static void
1516 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1517 {
1518 	u32 val;
1519 
1520 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1521 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1522 	if (start)
1523 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1524 	else
1525 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1526 }
1527 
1528 static int
1529 bnx2_set_link(struct bnx2 *bp)
1530 {
1531 	u32 bmsr;
1532 	u8 link_up;
1533 
1534 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1535 		bp->link_up = 1;
1536 		return 0;
1537 	}
1538 
1539 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1540 		return 0;
1541 
1542 	link_up = bp->link_up;
1543 
1544 	bnx2_enable_bmsr1(bp);
1545 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1547 	bnx2_disable_bmsr1(bp);
1548 
1549 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1550 	    (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1551 		u32 val, an_dbg;
1552 
1553 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1554 			bnx2_5706s_force_link_dn(bp, 0);
1555 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1556 		}
1557 		val = REG_RD(bp, BNX2_EMAC_STATUS);
1558 
1559 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1560 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1562 
1563 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1564 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1565 			bmsr |= BMSR_LSTATUS;
1566 		else
1567 			bmsr &= ~BMSR_LSTATUS;
1568 	}
1569 
1570 	if (bmsr & BMSR_LSTATUS) {
1571 		bp->link_up = 1;
1572 
1573 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1574 			if (CHIP_NUM(bp) == CHIP_NUM_5706)
1575 				bnx2_5706s_linkup(bp);
1576 			else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1577 				bnx2_5708s_linkup(bp);
1578 			else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1579 				bnx2_5709s_linkup(bp);
1580 		}
1581 		else {
1582 			bnx2_copper_linkup(bp);
1583 		}
1584 		bnx2_resolve_flow_ctrl(bp);
1585 	}
1586 	else {
1587 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1588 		    (bp->autoneg & AUTONEG_SPEED))
1589 			bnx2_disable_forced_2g5(bp);
1590 
1591 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1592 			u32 bmcr;
1593 
1594 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1595 			bmcr |= BMCR_ANENABLE;
1596 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1597 
1598 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1599 		}
1600 		bp->link_up = 0;
1601 	}
1602 
1603 	if (bp->link_up != link_up) {
1604 		bnx2_report_link(bp);
1605 	}
1606 
1607 	bnx2_set_mac_link(bp);
1608 
1609 	return 0;
1610 }
1611 
1612 static int
1613 bnx2_reset_phy(struct bnx2 *bp)
1614 {
1615 	int i;
1616 	u32 reg;
1617 
1618         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1619 
1620 #define PHY_RESET_MAX_WAIT 100
1621 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1622 		udelay(10);
1623 
1624 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1625 		if (!(reg & BMCR_RESET)) {
1626 			udelay(20);
1627 			break;
1628 		}
1629 	}
1630 	if (i == PHY_RESET_MAX_WAIT) {
1631 		return -EBUSY;
1632 	}
1633 	return 0;
1634 }
1635 
1636 static u32
1637 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1638 {
1639 	u32 adv = 0;
1640 
1641 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1642 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1643 
1644 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1645 			adv = ADVERTISE_1000XPAUSE;
1646 		}
1647 		else {
1648 			adv = ADVERTISE_PAUSE_CAP;
1649 		}
1650 	}
1651 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1652 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1653 			adv = ADVERTISE_1000XPSE_ASYM;
1654 		}
1655 		else {
1656 			adv = ADVERTISE_PAUSE_ASYM;
1657 		}
1658 	}
1659 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1660 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1661 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1662 		}
1663 		else {
1664 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1665 		}
1666 	}
1667 	return adv;
1668 }
1669 
1670 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1671 
1672 static int
1673 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1674 __releases(&bp->phy_lock)
1675 __acquires(&bp->phy_lock)
1676 {
1677 	u32 speed_arg = 0, pause_adv;
1678 
1679 	pause_adv = bnx2_phy_get_pause_adv(bp);
1680 
1681 	if (bp->autoneg & AUTONEG_SPEED) {
1682 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1683 		if (bp->advertising & ADVERTISED_10baseT_Half)
1684 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1685 		if (bp->advertising & ADVERTISED_10baseT_Full)
1686 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1687 		if (bp->advertising & ADVERTISED_100baseT_Half)
1688 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1689 		if (bp->advertising & ADVERTISED_100baseT_Full)
1690 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1691 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1692 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1693 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1694 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1695 	} else {
1696 		if (bp->req_line_speed == SPEED_2500)
1697 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1698 		else if (bp->req_line_speed == SPEED_1000)
1699 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1700 		else if (bp->req_line_speed == SPEED_100) {
1701 			if (bp->req_duplex == DUPLEX_FULL)
1702 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1703 			else
1704 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1705 		} else if (bp->req_line_speed == SPEED_10) {
1706 			if (bp->req_duplex == DUPLEX_FULL)
1707 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1708 			else
1709 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1710 		}
1711 	}
1712 
1713 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1714 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1715 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1716 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1717 
1718 	if (port == PORT_TP)
1719 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1720 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1721 
1722 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1723 
1724 	spin_unlock_bh(&bp->phy_lock);
1725 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1726 	spin_lock_bh(&bp->phy_lock);
1727 
1728 	return 0;
1729 }
1730 
1731 static int
1732 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1733 __releases(&bp->phy_lock)
1734 __acquires(&bp->phy_lock)
1735 {
1736 	u32 adv, bmcr;
1737 	u32 new_adv = 0;
1738 
1739 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1740 		return bnx2_setup_remote_phy(bp, port);
1741 
1742 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1743 		u32 new_bmcr;
1744 		int force_link_down = 0;
1745 
1746 		if (bp->req_line_speed == SPEED_2500) {
1747 			if (!bnx2_test_and_enable_2g5(bp))
1748 				force_link_down = 1;
1749 		} else if (bp->req_line_speed == SPEED_1000) {
1750 			if (bnx2_test_and_disable_2g5(bp))
1751 				force_link_down = 1;
1752 		}
1753 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1754 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1755 
1756 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1757 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1758 		new_bmcr |= BMCR_SPEED1000;
1759 
1760 		if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1761 			if (bp->req_line_speed == SPEED_2500)
1762 				bnx2_enable_forced_2g5(bp);
1763 			else if (bp->req_line_speed == SPEED_1000) {
1764 				bnx2_disable_forced_2g5(bp);
1765 				new_bmcr &= ~0x2000;
1766 			}
1767 
1768 		} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1769 			if (bp->req_line_speed == SPEED_2500)
1770 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1771 			else
1772 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1773 		}
1774 
1775 		if (bp->req_duplex == DUPLEX_FULL) {
1776 			adv |= ADVERTISE_1000XFULL;
1777 			new_bmcr |= BMCR_FULLDPLX;
1778 		}
1779 		else {
1780 			adv |= ADVERTISE_1000XHALF;
1781 			new_bmcr &= ~BMCR_FULLDPLX;
1782 		}
1783 		if ((new_bmcr != bmcr) || (force_link_down)) {
1784 			/* Force a link down visible on the other side */
1785 			if (bp->link_up) {
1786 				bnx2_write_phy(bp, bp->mii_adv, adv &
1787 					       ~(ADVERTISE_1000XFULL |
1788 						 ADVERTISE_1000XHALF));
1789 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1790 					BMCR_ANRESTART | BMCR_ANENABLE);
1791 
1792 				bp->link_up = 0;
1793 				netif_carrier_off(bp->dev);
1794 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1795 				bnx2_report_link(bp);
1796 			}
1797 			bnx2_write_phy(bp, bp->mii_adv, adv);
1798 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1799 		} else {
1800 			bnx2_resolve_flow_ctrl(bp);
1801 			bnx2_set_mac_link(bp);
1802 		}
1803 		return 0;
1804 	}
1805 
1806 	bnx2_test_and_enable_2g5(bp);
1807 
1808 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1809 		new_adv |= ADVERTISE_1000XFULL;
1810 
1811 	new_adv |= bnx2_phy_get_pause_adv(bp);
1812 
1813 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1814 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1815 
1816 	bp->serdes_an_pending = 0;
1817 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1818 		/* Force a link down visible on the other side */
1819 		if (bp->link_up) {
1820 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1821 			spin_unlock_bh(&bp->phy_lock);
1822 			msleep(20);
1823 			spin_lock_bh(&bp->phy_lock);
1824 		}
1825 
1826 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1827 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1828 			BMCR_ANENABLE);
1829 		/* Speed up link-up time when the link partner
1830 		 * does not autonegotiate which is very common
1831 		 * in blade servers. Some blade servers use
1832 		 * IPMI for kerboard input and it's important
1833 		 * to minimize link disruptions. Autoneg. involves
1834 		 * exchanging base pages plus 3 next pages and
1835 		 * normally completes in about 120 msec.
1836 		 */
1837 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1838 		bp->serdes_an_pending = 1;
1839 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1840 	} else {
1841 		bnx2_resolve_flow_ctrl(bp);
1842 		bnx2_set_mac_link(bp);
1843 	}
1844 
1845 	return 0;
1846 }
1847 
1848 #define ETHTOOL_ALL_FIBRE_SPEED						\
1849 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1850 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1851 		(ADVERTISED_1000baseT_Full)
1852 
1853 #define ETHTOOL_ALL_COPPER_SPEED					\
1854 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1855 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1856 	ADVERTISED_1000baseT_Full)
1857 
1858 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1859 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1860 
1861 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1862 
1863 static void
1864 bnx2_set_default_remote_link(struct bnx2 *bp)
1865 {
1866 	u32 link;
1867 
1868 	if (bp->phy_port == PORT_TP)
1869 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1870 	else
1871 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1872 
1873 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1874 		bp->req_line_speed = 0;
1875 		bp->autoneg |= AUTONEG_SPEED;
1876 		bp->advertising = ADVERTISED_Autoneg;
1877 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1878 			bp->advertising |= ADVERTISED_10baseT_Half;
1879 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1880 			bp->advertising |= ADVERTISED_10baseT_Full;
1881 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1882 			bp->advertising |= ADVERTISED_100baseT_Half;
1883 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1884 			bp->advertising |= ADVERTISED_100baseT_Full;
1885 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1886 			bp->advertising |= ADVERTISED_1000baseT_Full;
1887 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1888 			bp->advertising |= ADVERTISED_2500baseX_Full;
1889 	} else {
1890 		bp->autoneg = 0;
1891 		bp->advertising = 0;
1892 		bp->req_duplex = DUPLEX_FULL;
1893 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1894 			bp->req_line_speed = SPEED_10;
1895 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1896 				bp->req_duplex = DUPLEX_HALF;
1897 		}
1898 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1899 			bp->req_line_speed = SPEED_100;
1900 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1901 				bp->req_duplex = DUPLEX_HALF;
1902 		}
1903 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1904 			bp->req_line_speed = SPEED_1000;
1905 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1906 			bp->req_line_speed = SPEED_2500;
1907 	}
1908 }
1909 
1910 static void
1911 bnx2_set_default_link(struct bnx2 *bp)
1912 {
1913 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1914 		bnx2_set_default_remote_link(bp);
1915 		return;
1916 	}
1917 
1918 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1919 	bp->req_line_speed = 0;
1920 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1921 		u32 reg;
1922 
1923 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1924 
1925 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1926 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1927 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1928 			bp->autoneg = 0;
1929 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1930 			bp->req_duplex = DUPLEX_FULL;
1931 		}
1932 	} else
1933 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1934 }
1935 
1936 static void
1937 bnx2_send_heart_beat(struct bnx2 *bp)
1938 {
1939 	u32 msg;
1940 	u32 addr;
1941 
1942 	spin_lock(&bp->indirect_lock);
1943 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1944 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1945 	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1946 	REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1947 	spin_unlock(&bp->indirect_lock);
1948 }
1949 
1950 static void
1951 bnx2_remote_phy_event(struct bnx2 *bp)
1952 {
1953 	u32 msg;
1954 	u8 link_up = bp->link_up;
1955 	u8 old_port;
1956 
1957 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1958 
1959 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1960 		bnx2_send_heart_beat(bp);
1961 
1962 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1963 
1964 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1965 		bp->link_up = 0;
1966 	else {
1967 		u32 speed;
1968 
1969 		bp->link_up = 1;
1970 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1971 		bp->duplex = DUPLEX_FULL;
1972 		switch (speed) {
1973 			case BNX2_LINK_STATUS_10HALF:
1974 				bp->duplex = DUPLEX_HALF;
1975 			case BNX2_LINK_STATUS_10FULL:
1976 				bp->line_speed = SPEED_10;
1977 				break;
1978 			case BNX2_LINK_STATUS_100HALF:
1979 				bp->duplex = DUPLEX_HALF;
1980 			case BNX2_LINK_STATUS_100BASE_T4:
1981 			case BNX2_LINK_STATUS_100FULL:
1982 				bp->line_speed = SPEED_100;
1983 				break;
1984 			case BNX2_LINK_STATUS_1000HALF:
1985 				bp->duplex = DUPLEX_HALF;
1986 			case BNX2_LINK_STATUS_1000FULL:
1987 				bp->line_speed = SPEED_1000;
1988 				break;
1989 			case BNX2_LINK_STATUS_2500HALF:
1990 				bp->duplex = DUPLEX_HALF;
1991 			case BNX2_LINK_STATUS_2500FULL:
1992 				bp->line_speed = SPEED_2500;
1993 				break;
1994 			default:
1995 				bp->line_speed = 0;
1996 				break;
1997 		}
1998 
1999 		bp->flow_ctrl = 0;
2000 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2001 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2002 			if (bp->duplex == DUPLEX_FULL)
2003 				bp->flow_ctrl = bp->req_flow_ctrl;
2004 		} else {
2005 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2006 				bp->flow_ctrl |= FLOW_CTRL_TX;
2007 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2008 				bp->flow_ctrl |= FLOW_CTRL_RX;
2009 		}
2010 
2011 		old_port = bp->phy_port;
2012 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2013 			bp->phy_port = PORT_FIBRE;
2014 		else
2015 			bp->phy_port = PORT_TP;
2016 
2017 		if (old_port != bp->phy_port)
2018 			bnx2_set_default_link(bp);
2019 
2020 	}
2021 	if (bp->link_up != link_up)
2022 		bnx2_report_link(bp);
2023 
2024 	bnx2_set_mac_link(bp);
2025 }
2026 
2027 static int
2028 bnx2_set_remote_link(struct bnx2 *bp)
2029 {
2030 	u32 evt_code;
2031 
2032 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2033 	switch (evt_code) {
2034 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2035 			bnx2_remote_phy_event(bp);
2036 			break;
2037 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2038 		default:
2039 			bnx2_send_heart_beat(bp);
2040 			break;
2041 	}
2042 	return 0;
2043 }
2044 
2045 static int
2046 bnx2_setup_copper_phy(struct bnx2 *bp)
2047 __releases(&bp->phy_lock)
2048 __acquires(&bp->phy_lock)
2049 {
2050 	u32 bmcr;
2051 	u32 new_bmcr;
2052 
2053 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2054 
2055 	if (bp->autoneg & AUTONEG_SPEED) {
2056 		u32 adv_reg, adv1000_reg;
2057 		u32 new_adv = 0;
2058 		u32 new_adv1000 = 0;
2059 
2060 		bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2061 		adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2062 			ADVERTISE_PAUSE_ASYM);
2063 
2064 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2065 		adv1000_reg &= PHY_ALL_1000_SPEED;
2066 
2067 		new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
2068 		new_adv |= ADVERTISE_CSMA;
2069 		new_adv |= bnx2_phy_get_pause_adv(bp);
2070 
2071 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2072 
2073 		if ((adv1000_reg != new_adv1000) ||
2074 			(adv_reg != new_adv) ||
2075 			((bmcr & BMCR_ANENABLE) == 0)) {
2076 
2077 			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2078 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2079 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2080 				BMCR_ANENABLE);
2081 		}
2082 		else if (bp->link_up) {
2083 			/* Flow ctrl may have changed from auto to forced */
2084 			/* or vice-versa. */
2085 
2086 			bnx2_resolve_flow_ctrl(bp);
2087 			bnx2_set_mac_link(bp);
2088 		}
2089 		return 0;
2090 	}
2091 
2092 	new_bmcr = 0;
2093 	if (bp->req_line_speed == SPEED_100) {
2094 		new_bmcr |= BMCR_SPEED100;
2095 	}
2096 	if (bp->req_duplex == DUPLEX_FULL) {
2097 		new_bmcr |= BMCR_FULLDPLX;
2098 	}
2099 	if (new_bmcr != bmcr) {
2100 		u32 bmsr;
2101 
2102 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2103 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2104 
2105 		if (bmsr & BMSR_LSTATUS) {
2106 			/* Force link down */
2107 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2108 			spin_unlock_bh(&bp->phy_lock);
2109 			msleep(50);
2110 			spin_lock_bh(&bp->phy_lock);
2111 
2112 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2113 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2114 		}
2115 
2116 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2117 
2118 		/* Normally, the new speed is setup after the link has
2119 		 * gone down and up again. In some cases, link will not go
2120 		 * down so we need to set up the new speed here.
2121 		 */
2122 		if (bmsr & BMSR_LSTATUS) {
2123 			bp->line_speed = bp->req_line_speed;
2124 			bp->duplex = bp->req_duplex;
2125 			bnx2_resolve_flow_ctrl(bp);
2126 			bnx2_set_mac_link(bp);
2127 		}
2128 	} else {
2129 		bnx2_resolve_flow_ctrl(bp);
2130 		bnx2_set_mac_link(bp);
2131 	}
2132 	return 0;
2133 }
2134 
2135 static int
2136 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2137 __releases(&bp->phy_lock)
2138 __acquires(&bp->phy_lock)
2139 {
2140 	if (bp->loopback == MAC_LOOPBACK)
2141 		return 0;
2142 
2143 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2144 		return bnx2_setup_serdes_phy(bp, port);
2145 	}
2146 	else {
2147 		return bnx2_setup_copper_phy(bp);
2148 	}
2149 }
2150 
2151 static int
2152 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2153 {
2154 	u32 val;
2155 
2156 	bp->mii_bmcr = MII_BMCR + 0x10;
2157 	bp->mii_bmsr = MII_BMSR + 0x10;
2158 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2159 	bp->mii_adv = MII_ADVERTISE + 0x10;
2160 	bp->mii_lpa = MII_LPA + 0x10;
2161 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2162 
2163 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2164 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2165 
2166 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2167 	if (reset_phy)
2168 		bnx2_reset_phy(bp);
2169 
2170 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2171 
2172 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2173 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2174 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2175 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2176 
2177 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2178 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2179 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2180 		val |= BCM5708S_UP1_2G5;
2181 	else
2182 		val &= ~BCM5708S_UP1_2G5;
2183 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2184 
2185 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2186 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2187 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2188 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2189 
2190 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2191 
2192 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2193 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2194 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2195 
2196 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2197 
2198 	return 0;
2199 }
2200 
2201 static int
2202 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2203 {
2204 	u32 val;
2205 
2206 	if (reset_phy)
2207 		bnx2_reset_phy(bp);
2208 
2209 	bp->mii_up1 = BCM5708S_UP1;
2210 
2211 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2212 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2213 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2214 
2215 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2216 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2217 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2218 
2219 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2220 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2221 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2222 
2223 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2224 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2225 		val |= BCM5708S_UP1_2G5;
2226 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2227 	}
2228 
2229 	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2230 	    (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2231 	    (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2232 		/* increase tx signal amplitude */
2233 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2234 			       BCM5708S_BLK_ADDR_TX_MISC);
2235 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2236 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2237 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2238 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2239 	}
2240 
2241 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2242 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2243 
2244 	if (val) {
2245 		u32 is_backplane;
2246 
2247 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2248 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2249 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2250 				       BCM5708S_BLK_ADDR_TX_MISC);
2251 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2252 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2253 				       BCM5708S_BLK_ADDR_DIG);
2254 		}
2255 	}
2256 	return 0;
2257 }
2258 
2259 static int
2260 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2261 {
2262 	if (reset_phy)
2263 		bnx2_reset_phy(bp);
2264 
2265 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2266 
2267 	if (CHIP_NUM(bp) == CHIP_NUM_5706)
2268         	REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2269 
2270 	if (bp->dev->mtu > 1500) {
2271 		u32 val;
2272 
2273 		/* Set extended packet length bit */
2274 		bnx2_write_phy(bp, 0x18, 0x7);
2275 		bnx2_read_phy(bp, 0x18, &val);
2276 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2277 
2278 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2279 		bnx2_read_phy(bp, 0x1c, &val);
2280 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2281 	}
2282 	else {
2283 		u32 val;
2284 
2285 		bnx2_write_phy(bp, 0x18, 0x7);
2286 		bnx2_read_phy(bp, 0x18, &val);
2287 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2288 
2289 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2290 		bnx2_read_phy(bp, 0x1c, &val);
2291 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2292 	}
2293 
2294 	return 0;
2295 }
2296 
2297 static int
2298 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2299 {
2300 	u32 val;
2301 
2302 	if (reset_phy)
2303 		bnx2_reset_phy(bp);
2304 
2305 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2306 		bnx2_write_phy(bp, 0x18, 0x0c00);
2307 		bnx2_write_phy(bp, 0x17, 0x000a);
2308 		bnx2_write_phy(bp, 0x15, 0x310b);
2309 		bnx2_write_phy(bp, 0x17, 0x201f);
2310 		bnx2_write_phy(bp, 0x15, 0x9506);
2311 		bnx2_write_phy(bp, 0x17, 0x401f);
2312 		bnx2_write_phy(bp, 0x15, 0x14e2);
2313 		bnx2_write_phy(bp, 0x18, 0x0400);
2314 	}
2315 
2316 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2317 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2318 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2319 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2320 		val &= ~(1 << 8);
2321 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2322 	}
2323 
2324 	if (bp->dev->mtu > 1500) {
2325 		/* Set extended packet length bit */
2326 		bnx2_write_phy(bp, 0x18, 0x7);
2327 		bnx2_read_phy(bp, 0x18, &val);
2328 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2329 
2330 		bnx2_read_phy(bp, 0x10, &val);
2331 		bnx2_write_phy(bp, 0x10, val | 0x1);
2332 	}
2333 	else {
2334 		bnx2_write_phy(bp, 0x18, 0x7);
2335 		bnx2_read_phy(bp, 0x18, &val);
2336 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2337 
2338 		bnx2_read_phy(bp, 0x10, &val);
2339 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2340 	}
2341 
2342 	/* ethernet@wirespeed */
2343 	bnx2_write_phy(bp, 0x18, 0x7007);
2344 	bnx2_read_phy(bp, 0x18, &val);
2345 	bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2346 	return 0;
2347 }
2348 
2349 
2350 static int
2351 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2352 __releases(&bp->phy_lock)
2353 __acquires(&bp->phy_lock)
2354 {
2355 	u32 val;
2356 	int rc = 0;
2357 
2358 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2359 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2360 
2361 	bp->mii_bmcr = MII_BMCR;
2362 	bp->mii_bmsr = MII_BMSR;
2363 	bp->mii_bmsr1 = MII_BMSR;
2364 	bp->mii_adv = MII_ADVERTISE;
2365 	bp->mii_lpa = MII_LPA;
2366 
2367         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2368 
2369 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2370 		goto setup_phy;
2371 
2372 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2373 	bp->phy_id = val << 16;
2374 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2375 	bp->phy_id |= val & 0xffff;
2376 
2377 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2378 		if (CHIP_NUM(bp) == CHIP_NUM_5706)
2379 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2380 		else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2381 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2382 		else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2383 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2384 	}
2385 	else {
2386 		rc = bnx2_init_copper_phy(bp, reset_phy);
2387 	}
2388 
2389 setup_phy:
2390 	if (!rc)
2391 		rc = bnx2_setup_phy(bp, bp->phy_port);
2392 
2393 	return rc;
2394 }
2395 
2396 static int
2397 bnx2_set_mac_loopback(struct bnx2 *bp)
2398 {
2399 	u32 mac_mode;
2400 
2401 	mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2402 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2403 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2404 	REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2405 	bp->link_up = 1;
2406 	return 0;
2407 }
2408 
2409 static int bnx2_test_link(struct bnx2 *);
2410 
2411 static int
2412 bnx2_set_phy_loopback(struct bnx2 *bp)
2413 {
2414 	u32 mac_mode;
2415 	int rc, i;
2416 
2417 	spin_lock_bh(&bp->phy_lock);
2418 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2419 			    BMCR_SPEED1000);
2420 	spin_unlock_bh(&bp->phy_lock);
2421 	if (rc)
2422 		return rc;
2423 
2424 	for (i = 0; i < 10; i++) {
2425 		if (bnx2_test_link(bp) == 0)
2426 			break;
2427 		msleep(100);
2428 	}
2429 
2430 	mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2431 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2432 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2433 		      BNX2_EMAC_MODE_25G_MODE);
2434 
2435 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2436 	REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2437 	bp->link_up = 1;
2438 	return 0;
2439 }
2440 
2441 static void
2442 bnx2_dump_mcp_state(struct bnx2 *bp)
2443 {
2444 	struct net_device *dev = bp->dev;
2445 	u32 mcp_p0, mcp_p1;
2446 
2447 	netdev_err(dev, "<--- start MCP states dump --->\n");
2448 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2449 		mcp_p0 = BNX2_MCP_STATE_P0;
2450 		mcp_p1 = BNX2_MCP_STATE_P1;
2451 	} else {
2452 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2453 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2454 	}
2455 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2456 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2457 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2458 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2459 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2460 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2461 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2462 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2463 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2464 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2465 	netdev_err(dev, "DEBUG: shmem states:\n");
2466 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2467 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2468 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2469 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2470 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2471 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2472 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2473 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2474 	pr_cont(" condition[%08x]\n",
2475 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2476 	DP_SHMEM_LINE(bp, 0x3cc);
2477 	DP_SHMEM_LINE(bp, 0x3dc);
2478 	DP_SHMEM_LINE(bp, 0x3ec);
2479 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2480 	netdev_err(dev, "<--- end MCP states dump --->\n");
2481 }
2482 
2483 static int
2484 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2485 {
2486 	int i;
2487 	u32 val;
2488 
2489 	bp->fw_wr_seq++;
2490 	msg_data |= bp->fw_wr_seq;
2491 
2492 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2493 
2494 	if (!ack)
2495 		return 0;
2496 
2497 	/* wait for an acknowledgement. */
2498 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2499 		msleep(10);
2500 
2501 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2502 
2503 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2504 			break;
2505 	}
2506 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2507 		return 0;
2508 
2509 	/* If we timed out, inform the firmware that this is the case. */
2510 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2511 		msg_data &= ~BNX2_DRV_MSG_CODE;
2512 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2513 
2514 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2515 		if (!silent) {
2516 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2517 			bnx2_dump_mcp_state(bp);
2518 		}
2519 
2520 		return -EBUSY;
2521 	}
2522 
2523 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2524 		return -EIO;
2525 
2526 	return 0;
2527 }
2528 
2529 static int
2530 bnx2_init_5709_context(struct bnx2 *bp)
2531 {
2532 	int i, ret = 0;
2533 	u32 val;
2534 
2535 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2536 	val |= (BCM_PAGE_BITS - 8) << 16;
2537 	REG_WR(bp, BNX2_CTX_COMMAND, val);
2538 	for (i = 0; i < 10; i++) {
2539 		val = REG_RD(bp, BNX2_CTX_COMMAND);
2540 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2541 			break;
2542 		udelay(2);
2543 	}
2544 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2545 		return -EBUSY;
2546 
2547 	for (i = 0; i < bp->ctx_pages; i++) {
2548 		int j;
2549 
2550 		if (bp->ctx_blk[i])
2551 			memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2552 		else
2553 			return -ENOMEM;
2554 
2555 		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2556 		       (bp->ctx_blk_mapping[i] & 0xffffffff) |
2557 		       BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2558 		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2559 		       (u64) bp->ctx_blk_mapping[i] >> 32);
2560 		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2561 		       BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2562 		for (j = 0; j < 10; j++) {
2563 
2564 			val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2565 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2566 				break;
2567 			udelay(5);
2568 		}
2569 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2570 			ret = -EBUSY;
2571 			break;
2572 		}
2573 	}
2574 	return ret;
2575 }
2576 
2577 static void
2578 bnx2_init_context(struct bnx2 *bp)
2579 {
2580 	u32 vcid;
2581 
2582 	vcid = 96;
2583 	while (vcid) {
2584 		u32 vcid_addr, pcid_addr, offset;
2585 		int i;
2586 
2587 		vcid--;
2588 
2589 		if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2590 			u32 new_vcid;
2591 
2592 			vcid_addr = GET_PCID_ADDR(vcid);
2593 			if (vcid & 0x8) {
2594 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2595 			}
2596 			else {
2597 				new_vcid = vcid;
2598 			}
2599 			pcid_addr = GET_PCID_ADDR(new_vcid);
2600 		}
2601 		else {
2602 	    		vcid_addr = GET_CID_ADDR(vcid);
2603 			pcid_addr = vcid_addr;
2604 		}
2605 
2606 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2607 			vcid_addr += (i << PHY_CTX_SHIFT);
2608 			pcid_addr += (i << PHY_CTX_SHIFT);
2609 
2610 			REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2611 			REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2612 
2613 			/* Zero out the context. */
2614 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2615 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2616 		}
2617 	}
2618 }
2619 
2620 static int
2621 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2622 {
2623 	u16 *good_mbuf;
2624 	u32 good_mbuf_cnt;
2625 	u32 val;
2626 
2627 	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2628 	if (good_mbuf == NULL) {
2629 		pr_err("Failed to allocate memory in %s\n", __func__);
2630 		return -ENOMEM;
2631 	}
2632 
2633 	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2634 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2635 
2636 	good_mbuf_cnt = 0;
2637 
2638 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2639 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2640 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2641 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2642 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2643 
2644 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2645 
2646 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2647 
2648 		/* The addresses with Bit 9 set are bad memory blocks. */
2649 		if (!(val & (1 << 9))) {
2650 			good_mbuf[good_mbuf_cnt] = (u16) val;
2651 			good_mbuf_cnt++;
2652 		}
2653 
2654 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2655 	}
2656 
2657 	/* Free the good ones back to the mbuf pool thus discarding
2658 	 * all the bad ones. */
2659 	while (good_mbuf_cnt) {
2660 		good_mbuf_cnt--;
2661 
2662 		val = good_mbuf[good_mbuf_cnt];
2663 		val = (val << 9) | val | 1;
2664 
2665 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2666 	}
2667 	kfree(good_mbuf);
2668 	return 0;
2669 }
2670 
2671 static void
2672 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2673 {
2674 	u32 val;
2675 
2676 	val = (mac_addr[0] << 8) | mac_addr[1];
2677 
2678 	REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2679 
2680 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2681 		(mac_addr[4] << 8) | mac_addr[5];
2682 
2683 	REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2684 }
2685 
2686 static inline int
2687 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2688 {
2689 	dma_addr_t mapping;
2690 	struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2691 	struct rx_bd *rxbd =
2692 		&rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2693 	struct page *page = alloc_page(gfp);
2694 
2695 	if (!page)
2696 		return -ENOMEM;
2697 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2698 			       PCI_DMA_FROMDEVICE);
2699 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2700 		__free_page(page);
2701 		return -EIO;
2702 	}
2703 
2704 	rx_pg->page = page;
2705 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2706 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2707 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2708 	return 0;
2709 }
2710 
2711 static void
2712 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2713 {
2714 	struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2715 	struct page *page = rx_pg->page;
2716 
2717 	if (!page)
2718 		return;
2719 
2720 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2721 		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2722 
2723 	__free_page(page);
2724 	rx_pg->page = NULL;
2725 }
2726 
2727 static inline int
2728 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2729 {
2730 	u8 *data;
2731 	struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2732 	dma_addr_t mapping;
2733 	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2734 
2735 	data = kmalloc(bp->rx_buf_size, gfp);
2736 	if (!data)
2737 		return -ENOMEM;
2738 
2739 	mapping = dma_map_single(&bp->pdev->dev,
2740 				 get_l2_fhdr(data),
2741 				 bp->rx_buf_use_size,
2742 				 PCI_DMA_FROMDEVICE);
2743 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2744 		kfree(data);
2745 		return -EIO;
2746 	}
2747 
2748 	rx_buf->data = data;
2749 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2750 
2751 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2752 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2753 
2754 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2755 
2756 	return 0;
2757 }
2758 
2759 static int
2760 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2761 {
2762 	struct status_block *sblk = bnapi->status_blk.msi;
2763 	u32 new_link_state, old_link_state;
2764 	int is_set = 1;
2765 
2766 	new_link_state = sblk->status_attn_bits & event;
2767 	old_link_state = sblk->status_attn_bits_ack & event;
2768 	if (new_link_state != old_link_state) {
2769 		if (new_link_state)
2770 			REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2771 		else
2772 			REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2773 	} else
2774 		is_set = 0;
2775 
2776 	return is_set;
2777 }
2778 
2779 static void
2780 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2781 {
2782 	spin_lock(&bp->phy_lock);
2783 
2784 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2785 		bnx2_set_link(bp);
2786 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2787 		bnx2_set_remote_link(bp);
2788 
2789 	spin_unlock(&bp->phy_lock);
2790 
2791 }
2792 
2793 static inline u16
2794 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2795 {
2796 	u16 cons;
2797 
2798 	/* Tell compiler that status block fields can change. */
2799 	barrier();
2800 	cons = *bnapi->hw_tx_cons_ptr;
2801 	barrier();
2802 	if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2803 		cons++;
2804 	return cons;
2805 }
2806 
2807 static int
2808 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2809 {
2810 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2811 	u16 hw_cons, sw_cons, sw_ring_cons;
2812 	int tx_pkt = 0, index;
2813 	unsigned int tx_bytes = 0;
2814 	struct netdev_queue *txq;
2815 
2816 	index = (bnapi - bp->bnx2_napi);
2817 	txq = netdev_get_tx_queue(bp->dev, index);
2818 
2819 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2820 	sw_cons = txr->tx_cons;
2821 
2822 	while (sw_cons != hw_cons) {
2823 		struct sw_tx_bd *tx_buf;
2824 		struct sk_buff *skb;
2825 		int i, last;
2826 
2827 		sw_ring_cons = TX_RING_IDX(sw_cons);
2828 
2829 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2830 		skb = tx_buf->skb;
2831 
2832 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2833 		prefetch(&skb->end);
2834 
2835 		/* partial BD completions possible with TSO packets */
2836 		if (tx_buf->is_gso) {
2837 			u16 last_idx, last_ring_idx;
2838 
2839 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2840 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2841 			if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2842 				last_idx++;
2843 			}
2844 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2845 				break;
2846 			}
2847 		}
2848 
2849 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2850 			skb_headlen(skb), PCI_DMA_TODEVICE);
2851 
2852 		tx_buf->skb = NULL;
2853 		last = tx_buf->nr_frags;
2854 
2855 		for (i = 0; i < last; i++) {
2856 			sw_cons = NEXT_TX_BD(sw_cons);
2857 
2858 			dma_unmap_page(&bp->pdev->dev,
2859 				dma_unmap_addr(
2860 					&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2861 					mapping),
2862 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2863 				PCI_DMA_TODEVICE);
2864 		}
2865 
2866 		sw_cons = NEXT_TX_BD(sw_cons);
2867 
2868 		tx_bytes += skb->len;
2869 		dev_kfree_skb(skb);
2870 		tx_pkt++;
2871 		if (tx_pkt == budget)
2872 			break;
2873 
2874 		if (hw_cons == sw_cons)
2875 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2876 	}
2877 
2878 	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2879 	txr->hw_tx_cons = hw_cons;
2880 	txr->tx_cons = sw_cons;
2881 
2882 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2883 	 * before checking for netif_tx_queue_stopped().  Without the
2884 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2885 	 * will miss it and cause the queue to be stopped forever.
2886 	 */
2887 	smp_mb();
2888 
2889 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2890 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2891 		__netif_tx_lock(txq, smp_processor_id());
2892 		if ((netif_tx_queue_stopped(txq)) &&
2893 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2894 			netif_tx_wake_queue(txq);
2895 		__netif_tx_unlock(txq);
2896 	}
2897 
2898 	return tx_pkt;
2899 }
2900 
2901 static void
2902 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2903 			struct sk_buff *skb, int count)
2904 {
2905 	struct sw_pg *cons_rx_pg, *prod_rx_pg;
2906 	struct rx_bd *cons_bd, *prod_bd;
2907 	int i;
2908 	u16 hw_prod, prod;
2909 	u16 cons = rxr->rx_pg_cons;
2910 
2911 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2912 
2913 	/* The caller was unable to allocate a new page to replace the
2914 	 * last one in the frags array, so we need to recycle that page
2915 	 * and then free the skb.
2916 	 */
2917 	if (skb) {
2918 		struct page *page;
2919 		struct skb_shared_info *shinfo;
2920 
2921 		shinfo = skb_shinfo(skb);
2922 		shinfo->nr_frags--;
2923 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2924 		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2925 
2926 		cons_rx_pg->page = page;
2927 		dev_kfree_skb(skb);
2928 	}
2929 
2930 	hw_prod = rxr->rx_pg_prod;
2931 
2932 	for (i = 0; i < count; i++) {
2933 		prod = RX_PG_RING_IDX(hw_prod);
2934 
2935 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2936 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2937 		cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2938 		prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2939 
2940 		if (prod != cons) {
2941 			prod_rx_pg->page = cons_rx_pg->page;
2942 			cons_rx_pg->page = NULL;
2943 			dma_unmap_addr_set(prod_rx_pg, mapping,
2944 				dma_unmap_addr(cons_rx_pg, mapping));
2945 
2946 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2947 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2948 
2949 		}
2950 		cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2951 		hw_prod = NEXT_RX_BD(hw_prod);
2952 	}
2953 	rxr->rx_pg_prod = hw_prod;
2954 	rxr->rx_pg_cons = cons;
2955 }
2956 
2957 static inline void
2958 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2959 		   u8 *data, u16 cons, u16 prod)
2960 {
2961 	struct sw_bd *cons_rx_buf, *prod_rx_buf;
2962 	struct rx_bd *cons_bd, *prod_bd;
2963 
2964 	cons_rx_buf = &rxr->rx_buf_ring[cons];
2965 	prod_rx_buf = &rxr->rx_buf_ring[prod];
2966 
2967 	dma_sync_single_for_device(&bp->pdev->dev,
2968 		dma_unmap_addr(cons_rx_buf, mapping),
2969 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2970 
2971 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2972 
2973 	prod_rx_buf->data = data;
2974 
2975 	if (cons == prod)
2976 		return;
2977 
2978 	dma_unmap_addr_set(prod_rx_buf, mapping,
2979 			dma_unmap_addr(cons_rx_buf, mapping));
2980 
2981 	cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2982 	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2983 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2984 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2985 }
2986 
2987 static struct sk_buff *
2988 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2989 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2990 	    u32 ring_idx)
2991 {
2992 	int err;
2993 	u16 prod = ring_idx & 0xffff;
2994 	struct sk_buff *skb;
2995 
2996 	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
2997 	if (unlikely(err)) {
2998 		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
2999 error:
3000 		if (hdr_len) {
3001 			unsigned int raw_len = len + 4;
3002 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3003 
3004 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3005 		}
3006 		return NULL;
3007 	}
3008 
3009 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3010 			 PCI_DMA_FROMDEVICE);
3011 	skb = build_skb(data);
3012 	if (!skb) {
3013 		kfree(data);
3014 		goto error;
3015 	}
3016 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3017 	if (hdr_len == 0) {
3018 		skb_put(skb, len);
3019 		return skb;
3020 	} else {
3021 		unsigned int i, frag_len, frag_size, pages;
3022 		struct sw_pg *rx_pg;
3023 		u16 pg_cons = rxr->rx_pg_cons;
3024 		u16 pg_prod = rxr->rx_pg_prod;
3025 
3026 		frag_size = len + 4 - hdr_len;
3027 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3028 		skb_put(skb, hdr_len);
3029 
3030 		for (i = 0; i < pages; i++) {
3031 			dma_addr_t mapping_old;
3032 
3033 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3034 			if (unlikely(frag_len <= 4)) {
3035 				unsigned int tail = 4 - frag_len;
3036 
3037 				rxr->rx_pg_cons = pg_cons;
3038 				rxr->rx_pg_prod = pg_prod;
3039 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3040 							pages - i);
3041 				skb->len -= tail;
3042 				if (i == 0) {
3043 					skb->tail -= tail;
3044 				} else {
3045 					skb_frag_t *frag =
3046 						&skb_shinfo(skb)->frags[i - 1];
3047 					skb_frag_size_sub(frag, tail);
3048 					skb->data_len -= tail;
3049 				}
3050 				return skb;
3051 			}
3052 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3053 
3054 			/* Don't unmap yet.  If we're unable to allocate a new
3055 			 * page, we need to recycle the page and the DMA addr.
3056 			 */
3057 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3058 			if (i == pages - 1)
3059 				frag_len -= 4;
3060 
3061 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3062 			rx_pg->page = NULL;
3063 
3064 			err = bnx2_alloc_rx_page(bp, rxr,
3065 						 RX_PG_RING_IDX(pg_prod),
3066 						 GFP_ATOMIC);
3067 			if (unlikely(err)) {
3068 				rxr->rx_pg_cons = pg_cons;
3069 				rxr->rx_pg_prod = pg_prod;
3070 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3071 							pages - i);
3072 				return NULL;
3073 			}
3074 
3075 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3076 				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3077 
3078 			frag_size -= frag_len;
3079 			skb->data_len += frag_len;
3080 			skb->truesize += PAGE_SIZE;
3081 			skb->len += frag_len;
3082 
3083 			pg_prod = NEXT_RX_BD(pg_prod);
3084 			pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3085 		}
3086 		rxr->rx_pg_prod = pg_prod;
3087 		rxr->rx_pg_cons = pg_cons;
3088 	}
3089 	return skb;
3090 }
3091 
3092 static inline u16
3093 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3094 {
3095 	u16 cons;
3096 
3097 	/* Tell compiler that status block fields can change. */
3098 	barrier();
3099 	cons = *bnapi->hw_rx_cons_ptr;
3100 	barrier();
3101 	if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3102 		cons++;
3103 	return cons;
3104 }
3105 
3106 static int
3107 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3108 {
3109 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3110 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3111 	struct l2_fhdr *rx_hdr;
3112 	int rx_pkt = 0, pg_ring_used = 0;
3113 
3114 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3115 	sw_cons = rxr->rx_cons;
3116 	sw_prod = rxr->rx_prod;
3117 
3118 	/* Memory barrier necessary as speculative reads of the rx
3119 	 * buffer can be ahead of the index in the status block
3120 	 */
3121 	rmb();
3122 	while (sw_cons != hw_cons) {
3123 		unsigned int len, hdr_len;
3124 		u32 status;
3125 		struct sw_bd *rx_buf, *next_rx_buf;
3126 		struct sk_buff *skb;
3127 		dma_addr_t dma_addr;
3128 		u8 *data;
3129 
3130 		sw_ring_cons = RX_RING_IDX(sw_cons);
3131 		sw_ring_prod = RX_RING_IDX(sw_prod);
3132 
3133 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3134 		data = rx_buf->data;
3135 		rx_buf->data = NULL;
3136 
3137 		rx_hdr = get_l2_fhdr(data);
3138 		prefetch(rx_hdr);
3139 
3140 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3141 
3142 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3143 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3144 			PCI_DMA_FROMDEVICE);
3145 
3146 		next_rx_buf =
3147 			&rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3148 		prefetch(get_l2_fhdr(next_rx_buf->data));
3149 
3150 		len = rx_hdr->l2_fhdr_pkt_len;
3151 		status = rx_hdr->l2_fhdr_status;
3152 
3153 		hdr_len = 0;
3154 		if (status & L2_FHDR_STATUS_SPLIT) {
3155 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3156 			pg_ring_used = 1;
3157 		} else if (len > bp->rx_jumbo_thresh) {
3158 			hdr_len = bp->rx_jumbo_thresh;
3159 			pg_ring_used = 1;
3160 		}
3161 
3162 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3163 				       L2_FHDR_ERRORS_PHY_DECODE |
3164 				       L2_FHDR_ERRORS_ALIGNMENT |
3165 				       L2_FHDR_ERRORS_TOO_SHORT |
3166 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3167 
3168 			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3169 					  sw_ring_prod);
3170 			if (pg_ring_used) {
3171 				int pages;
3172 
3173 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3174 
3175 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3176 			}
3177 			goto next_rx;
3178 		}
3179 
3180 		len -= 4;
3181 
3182 		if (len <= bp->rx_copy_thresh) {
3183 			skb = netdev_alloc_skb(bp->dev, len + 6);
3184 			if (skb == NULL) {
3185 				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3186 						  sw_ring_prod);
3187 				goto next_rx;
3188 			}
3189 
3190 			/* aligned copy */
3191 			memcpy(skb->data,
3192 			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3193 			       len + 6);
3194 			skb_reserve(skb, 6);
3195 			skb_put(skb, len);
3196 
3197 			bnx2_reuse_rx_data(bp, rxr, data,
3198 				sw_ring_cons, sw_ring_prod);
3199 
3200 		} else {
3201 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3202 					  (sw_ring_cons << 16) | sw_ring_prod);
3203 			if (!skb)
3204 				goto next_rx;
3205 		}
3206 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3207 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3208 			__vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3209 
3210 		skb->protocol = eth_type_trans(skb, bp->dev);
3211 
3212 		if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3213 			(ntohs(skb->protocol) != 0x8100)) {
3214 
3215 			dev_kfree_skb(skb);
3216 			goto next_rx;
3217 
3218 		}
3219 
3220 		skb_checksum_none_assert(skb);
3221 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3222 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3223 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3224 
3225 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3226 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3227 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3228 		}
3229 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3230 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3231 		     L2_FHDR_STATUS_USE_RXHASH))
3232 			skb->rxhash = rx_hdr->l2_fhdr_hash;
3233 
3234 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3235 		napi_gro_receive(&bnapi->napi, skb);
3236 		rx_pkt++;
3237 
3238 next_rx:
3239 		sw_cons = NEXT_RX_BD(sw_cons);
3240 		sw_prod = NEXT_RX_BD(sw_prod);
3241 
3242 		if ((rx_pkt == budget))
3243 			break;
3244 
3245 		/* Refresh hw_cons to see if there is new work */
3246 		if (sw_cons == hw_cons) {
3247 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3248 			rmb();
3249 		}
3250 	}
3251 	rxr->rx_cons = sw_cons;
3252 	rxr->rx_prod = sw_prod;
3253 
3254 	if (pg_ring_used)
3255 		REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3256 
3257 	REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3258 
3259 	REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3260 
3261 	mmiowb();
3262 
3263 	return rx_pkt;
3264 
3265 }
3266 
3267 /* MSI ISR - The only difference between this and the INTx ISR
3268  * is that the MSI interrupt is always serviced.
3269  */
3270 static irqreturn_t
3271 bnx2_msi(int irq, void *dev_instance)
3272 {
3273 	struct bnx2_napi *bnapi = dev_instance;
3274 	struct bnx2 *bp = bnapi->bp;
3275 
3276 	prefetch(bnapi->status_blk.msi);
3277 	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3278 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3279 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3280 
3281 	/* Return here if interrupt is disabled. */
3282 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3283 		return IRQ_HANDLED;
3284 
3285 	napi_schedule(&bnapi->napi);
3286 
3287 	return IRQ_HANDLED;
3288 }
3289 
3290 static irqreturn_t
3291 bnx2_msi_1shot(int irq, void *dev_instance)
3292 {
3293 	struct bnx2_napi *bnapi = dev_instance;
3294 	struct bnx2 *bp = bnapi->bp;
3295 
3296 	prefetch(bnapi->status_blk.msi);
3297 
3298 	/* Return here if interrupt is disabled. */
3299 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3300 		return IRQ_HANDLED;
3301 
3302 	napi_schedule(&bnapi->napi);
3303 
3304 	return IRQ_HANDLED;
3305 }
3306 
3307 static irqreturn_t
3308 bnx2_interrupt(int irq, void *dev_instance)
3309 {
3310 	struct bnx2_napi *bnapi = dev_instance;
3311 	struct bnx2 *bp = bnapi->bp;
3312 	struct status_block *sblk = bnapi->status_blk.msi;
3313 
3314 	/* When using INTx, it is possible for the interrupt to arrive
3315 	 * at the CPU before the status block posted prior to the
3316 	 * interrupt. Reading a register will flush the status block.
3317 	 * When using MSI, the MSI message will always complete after
3318 	 * the status block write.
3319 	 */
3320 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3321 	    (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3322 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3323 		return IRQ_NONE;
3324 
3325 	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3326 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3327 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3328 
3329 	/* Read back to deassert IRQ immediately to avoid too many
3330 	 * spurious interrupts.
3331 	 */
3332 	REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3333 
3334 	/* Return here if interrupt is shared and is disabled. */
3335 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3336 		return IRQ_HANDLED;
3337 
3338 	if (napi_schedule_prep(&bnapi->napi)) {
3339 		bnapi->last_status_idx = sblk->status_idx;
3340 		__napi_schedule(&bnapi->napi);
3341 	}
3342 
3343 	return IRQ_HANDLED;
3344 }
3345 
3346 static inline int
3347 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3348 {
3349 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3350 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3351 
3352 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3353 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3354 		return 1;
3355 	return 0;
3356 }
3357 
3358 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3359 				 STATUS_ATTN_BITS_TIMER_ABORT)
3360 
3361 static inline int
3362 bnx2_has_work(struct bnx2_napi *bnapi)
3363 {
3364 	struct status_block *sblk = bnapi->status_blk.msi;
3365 
3366 	if (bnx2_has_fast_work(bnapi))
3367 		return 1;
3368 
3369 #ifdef BCM_CNIC
3370 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3371 		return 1;
3372 #endif
3373 
3374 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3375 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3376 		return 1;
3377 
3378 	return 0;
3379 }
3380 
3381 static void
3382 bnx2_chk_missed_msi(struct bnx2 *bp)
3383 {
3384 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3385 	u32 msi_ctrl;
3386 
3387 	if (bnx2_has_work(bnapi)) {
3388 		msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3389 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3390 			return;
3391 
3392 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3393 			REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3394 			       ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3395 			REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3396 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3397 		}
3398 	}
3399 
3400 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3401 }
3402 
3403 #ifdef BCM_CNIC
3404 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3405 {
3406 	struct cnic_ops *c_ops;
3407 
3408 	if (!bnapi->cnic_present)
3409 		return;
3410 
3411 	rcu_read_lock();
3412 	c_ops = rcu_dereference(bp->cnic_ops);
3413 	if (c_ops)
3414 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3415 						      bnapi->status_blk.msi);
3416 	rcu_read_unlock();
3417 }
3418 #endif
3419 
3420 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3421 {
3422 	struct status_block *sblk = bnapi->status_blk.msi;
3423 	u32 status_attn_bits = sblk->status_attn_bits;
3424 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3425 
3426 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3427 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3428 
3429 		bnx2_phy_int(bp, bnapi);
3430 
3431 		/* This is needed to take care of transient status
3432 		 * during link changes.
3433 		 */
3434 		REG_WR(bp, BNX2_HC_COMMAND,
3435 		       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3436 		REG_RD(bp, BNX2_HC_COMMAND);
3437 	}
3438 }
3439 
3440 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3441 			  int work_done, int budget)
3442 {
3443 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3444 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3445 
3446 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3447 		bnx2_tx_int(bp, bnapi, 0);
3448 
3449 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3450 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3451 
3452 	return work_done;
3453 }
3454 
3455 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3456 {
3457 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3458 	struct bnx2 *bp = bnapi->bp;
3459 	int work_done = 0;
3460 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3461 
3462 	while (1) {
3463 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3464 		if (unlikely(work_done >= budget))
3465 			break;
3466 
3467 		bnapi->last_status_idx = sblk->status_idx;
3468 		/* status idx must be read before checking for more work. */
3469 		rmb();
3470 		if (likely(!bnx2_has_fast_work(bnapi))) {
3471 
3472 			napi_complete(napi);
3473 			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3474 			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3475 			       bnapi->last_status_idx);
3476 			break;
3477 		}
3478 	}
3479 	return work_done;
3480 }
3481 
3482 static int bnx2_poll(struct napi_struct *napi, int budget)
3483 {
3484 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3485 	struct bnx2 *bp = bnapi->bp;
3486 	int work_done = 0;
3487 	struct status_block *sblk = bnapi->status_blk.msi;
3488 
3489 	while (1) {
3490 		bnx2_poll_link(bp, bnapi);
3491 
3492 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3493 
3494 #ifdef BCM_CNIC
3495 		bnx2_poll_cnic(bp, bnapi);
3496 #endif
3497 
3498 		/* bnapi->last_status_idx is used below to tell the hw how
3499 		 * much work has been processed, so we must read it before
3500 		 * checking for more work.
3501 		 */
3502 		bnapi->last_status_idx = sblk->status_idx;
3503 
3504 		if (unlikely(work_done >= budget))
3505 			break;
3506 
3507 		rmb();
3508 		if (likely(!bnx2_has_work(bnapi))) {
3509 			napi_complete(napi);
3510 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3511 				REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3512 				       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3513 				       bnapi->last_status_idx);
3514 				break;
3515 			}
3516 			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3517 			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3518 			       BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3519 			       bnapi->last_status_idx);
3520 
3521 			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3522 			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3523 			       bnapi->last_status_idx);
3524 			break;
3525 		}
3526 	}
3527 
3528 	return work_done;
3529 }
3530 
3531 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3532  * from set_multicast.
3533  */
3534 static void
3535 bnx2_set_rx_mode(struct net_device *dev)
3536 {
3537 	struct bnx2 *bp = netdev_priv(dev);
3538 	u32 rx_mode, sort_mode;
3539 	struct netdev_hw_addr *ha;
3540 	int i;
3541 
3542 	if (!netif_running(dev))
3543 		return;
3544 
3545 	spin_lock_bh(&bp->phy_lock);
3546 
3547 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3548 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3549 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3550 	if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3551 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3552 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3553 	if (dev->flags & IFF_PROMISC) {
3554 		/* Promiscuous mode. */
3555 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3556 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3557 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3558 	}
3559 	else if (dev->flags & IFF_ALLMULTI) {
3560 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3561 			REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3562 			       0xffffffff);
3563         	}
3564 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3565 	}
3566 	else {
3567 		/* Accept one or more multicast(s). */
3568 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3569 		u32 regidx;
3570 		u32 bit;
3571 		u32 crc;
3572 
3573 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3574 
3575 		netdev_for_each_mc_addr(ha, dev) {
3576 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3577 			bit = crc & 0xff;
3578 			regidx = (bit & 0xe0) >> 5;
3579 			bit &= 0x1f;
3580 			mc_filter[regidx] |= (1 << bit);
3581 		}
3582 
3583 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3584 			REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3585 			       mc_filter[i]);
3586 		}
3587 
3588 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3589 	}
3590 
3591 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3592 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3593 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3594 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3595 	} else if (!(dev->flags & IFF_PROMISC)) {
3596 		/* Add all entries into to the match filter list */
3597 		i = 0;
3598 		netdev_for_each_uc_addr(ha, dev) {
3599 			bnx2_set_mac_addr(bp, ha->addr,
3600 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3601 			sort_mode |= (1 <<
3602 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3603 			i++;
3604 		}
3605 
3606 	}
3607 
3608 	if (rx_mode != bp->rx_mode) {
3609 		bp->rx_mode = rx_mode;
3610 		REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3611 	}
3612 
3613 	REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3614 	REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3615 	REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3616 
3617 	spin_unlock_bh(&bp->phy_lock);
3618 }
3619 
3620 static int
3621 check_fw_section(const struct firmware *fw,
3622 		 const struct bnx2_fw_file_section *section,
3623 		 u32 alignment, bool non_empty)
3624 {
3625 	u32 offset = be32_to_cpu(section->offset);
3626 	u32 len = be32_to_cpu(section->len);
3627 
3628 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3629 		return -EINVAL;
3630 	if ((non_empty && len == 0) || len > fw->size - offset ||
3631 	    len & (alignment - 1))
3632 		return -EINVAL;
3633 	return 0;
3634 }
3635 
3636 static int
3637 check_mips_fw_entry(const struct firmware *fw,
3638 		    const struct bnx2_mips_fw_file_entry *entry)
3639 {
3640 	if (check_fw_section(fw, &entry->text, 4, true) ||
3641 	    check_fw_section(fw, &entry->data, 4, false) ||
3642 	    check_fw_section(fw, &entry->rodata, 4, false))
3643 		return -EINVAL;
3644 	return 0;
3645 }
3646 
3647 static void bnx2_release_firmware(struct bnx2 *bp)
3648 {
3649 	if (bp->rv2p_firmware) {
3650 		release_firmware(bp->mips_firmware);
3651 		release_firmware(bp->rv2p_firmware);
3652 		bp->rv2p_firmware = NULL;
3653 	}
3654 }
3655 
3656 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3657 {
3658 	const char *mips_fw_file, *rv2p_fw_file;
3659 	const struct bnx2_mips_fw_file *mips_fw;
3660 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3661 	int rc;
3662 
3663 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3664 		mips_fw_file = FW_MIPS_FILE_09;
3665 		if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3666 		    (CHIP_ID(bp) == CHIP_ID_5709_A1))
3667 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3668 		else
3669 			rv2p_fw_file = FW_RV2P_FILE_09;
3670 	} else {
3671 		mips_fw_file = FW_MIPS_FILE_06;
3672 		rv2p_fw_file = FW_RV2P_FILE_06;
3673 	}
3674 
3675 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3676 	if (rc) {
3677 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3678 		goto out;
3679 	}
3680 
3681 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3682 	if (rc) {
3683 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3684 		goto err_release_mips_firmware;
3685 	}
3686 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3687 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3688 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3689 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3690 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3691 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3692 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3693 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3694 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3695 		rc = -EINVAL;
3696 		goto err_release_firmware;
3697 	}
3698 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3699 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3700 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3701 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3702 		rc = -EINVAL;
3703 		goto err_release_firmware;
3704 	}
3705 out:
3706 	return rc;
3707 
3708 err_release_firmware:
3709 	release_firmware(bp->rv2p_firmware);
3710 	bp->rv2p_firmware = NULL;
3711 err_release_mips_firmware:
3712 	release_firmware(bp->mips_firmware);
3713 	goto out;
3714 }
3715 
3716 static int bnx2_request_firmware(struct bnx2 *bp)
3717 {
3718 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3719 }
3720 
3721 static u32
3722 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3723 {
3724 	switch (idx) {
3725 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3726 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3727 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3728 		break;
3729 	}
3730 	return rv2p_code;
3731 }
3732 
3733 static int
3734 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3735 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3736 {
3737 	u32 rv2p_code_len, file_offset;
3738 	__be32 *rv2p_code;
3739 	int i;
3740 	u32 val, cmd, addr;
3741 
3742 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3743 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3744 
3745 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3746 
3747 	if (rv2p_proc == RV2P_PROC1) {
3748 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3749 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3750 	} else {
3751 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3752 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3753 	}
3754 
3755 	for (i = 0; i < rv2p_code_len; i += 8) {
3756 		REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3757 		rv2p_code++;
3758 		REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3759 		rv2p_code++;
3760 
3761 		val = (i / 8) | cmd;
3762 		REG_WR(bp, addr, val);
3763 	}
3764 
3765 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3766 	for (i = 0; i < 8; i++) {
3767 		u32 loc, code;
3768 
3769 		loc = be32_to_cpu(fw_entry->fixup[i]);
3770 		if (loc && ((loc * 4) < rv2p_code_len)) {
3771 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3772 			REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3773 			code = be32_to_cpu(*(rv2p_code + loc));
3774 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3775 			REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3776 
3777 			val = (loc / 2) | cmd;
3778 			REG_WR(bp, addr, val);
3779 		}
3780 	}
3781 
3782 	/* Reset the processor, un-stall is done later. */
3783 	if (rv2p_proc == RV2P_PROC1) {
3784 		REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3785 	}
3786 	else {
3787 		REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3788 	}
3789 
3790 	return 0;
3791 }
3792 
3793 static int
3794 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3795 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3796 {
3797 	u32 addr, len, file_offset;
3798 	__be32 *data;
3799 	u32 offset;
3800 	u32 val;
3801 
3802 	/* Halt the CPU. */
3803 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3804 	val |= cpu_reg->mode_value_halt;
3805 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3806 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3807 
3808 	/* Load the Text area. */
3809 	addr = be32_to_cpu(fw_entry->text.addr);
3810 	len = be32_to_cpu(fw_entry->text.len);
3811 	file_offset = be32_to_cpu(fw_entry->text.offset);
3812 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3813 
3814 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3815 	if (len) {
3816 		int j;
3817 
3818 		for (j = 0; j < (len / 4); j++, offset += 4)
3819 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3820 	}
3821 
3822 	/* Load the Data area. */
3823 	addr = be32_to_cpu(fw_entry->data.addr);
3824 	len = be32_to_cpu(fw_entry->data.len);
3825 	file_offset = be32_to_cpu(fw_entry->data.offset);
3826 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3827 
3828 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3829 	if (len) {
3830 		int j;
3831 
3832 		for (j = 0; j < (len / 4); j++, offset += 4)
3833 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3834 	}
3835 
3836 	/* Load the Read-Only area. */
3837 	addr = be32_to_cpu(fw_entry->rodata.addr);
3838 	len = be32_to_cpu(fw_entry->rodata.len);
3839 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3840 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3841 
3842 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3843 	if (len) {
3844 		int j;
3845 
3846 		for (j = 0; j < (len / 4); j++, offset += 4)
3847 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3848 	}
3849 
3850 	/* Clear the pre-fetch instruction. */
3851 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3852 
3853 	val = be32_to_cpu(fw_entry->start_addr);
3854 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3855 
3856 	/* Start the CPU. */
3857 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3858 	val &= ~cpu_reg->mode_value_halt;
3859 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3860 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3861 
3862 	return 0;
3863 }
3864 
3865 static int
3866 bnx2_init_cpus(struct bnx2 *bp)
3867 {
3868 	const struct bnx2_mips_fw_file *mips_fw =
3869 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3870 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3871 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3872 	int rc;
3873 
3874 	/* Initialize the RV2P processor. */
3875 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3876 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3877 
3878 	/* Initialize the RX Processor. */
3879 	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3880 	if (rc)
3881 		goto init_cpu_err;
3882 
3883 	/* Initialize the TX Processor. */
3884 	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3885 	if (rc)
3886 		goto init_cpu_err;
3887 
3888 	/* Initialize the TX Patch-up Processor. */
3889 	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3890 	if (rc)
3891 		goto init_cpu_err;
3892 
3893 	/* Initialize the Completion Processor. */
3894 	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3895 	if (rc)
3896 		goto init_cpu_err;
3897 
3898 	/* Initialize the Command Processor. */
3899 	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3900 
3901 init_cpu_err:
3902 	return rc;
3903 }
3904 
3905 static int
3906 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3907 {
3908 	u16 pmcsr;
3909 
3910 	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3911 
3912 	switch (state) {
3913 	case PCI_D0: {
3914 		u32 val;
3915 
3916 		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3917 			(pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3918 			PCI_PM_CTRL_PME_STATUS);
3919 
3920 		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3921 			/* delay required during transition out of D3hot */
3922 			msleep(20);
3923 
3924 		val = REG_RD(bp, BNX2_EMAC_MODE);
3925 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3926 		val &= ~BNX2_EMAC_MODE_MPKT;
3927 		REG_WR(bp, BNX2_EMAC_MODE, val);
3928 
3929 		val = REG_RD(bp, BNX2_RPM_CONFIG);
3930 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3931 		REG_WR(bp, BNX2_RPM_CONFIG, val);
3932 		break;
3933 	}
3934 	case PCI_D3hot: {
3935 		int i;
3936 		u32 val, wol_msg;
3937 
3938 		if (bp->wol) {
3939 			u32 advertising;
3940 			u8 autoneg;
3941 
3942 			autoneg = bp->autoneg;
3943 			advertising = bp->advertising;
3944 
3945 			if (bp->phy_port == PORT_TP) {
3946 				bp->autoneg = AUTONEG_SPEED;
3947 				bp->advertising = ADVERTISED_10baseT_Half |
3948 					ADVERTISED_10baseT_Full |
3949 					ADVERTISED_100baseT_Half |
3950 					ADVERTISED_100baseT_Full |
3951 					ADVERTISED_Autoneg;
3952 			}
3953 
3954 			spin_lock_bh(&bp->phy_lock);
3955 			bnx2_setup_phy(bp, bp->phy_port);
3956 			spin_unlock_bh(&bp->phy_lock);
3957 
3958 			bp->autoneg = autoneg;
3959 			bp->advertising = advertising;
3960 
3961 			bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3962 
3963 			val = REG_RD(bp, BNX2_EMAC_MODE);
3964 
3965 			/* Enable port mode. */
3966 			val &= ~BNX2_EMAC_MODE_PORT;
3967 			val |= BNX2_EMAC_MODE_MPKT_RCVD |
3968 			       BNX2_EMAC_MODE_ACPI_RCVD |
3969 			       BNX2_EMAC_MODE_MPKT;
3970 			if (bp->phy_port == PORT_TP)
3971 				val |= BNX2_EMAC_MODE_PORT_MII;
3972 			else {
3973 				val |= BNX2_EMAC_MODE_PORT_GMII;
3974 				if (bp->line_speed == SPEED_2500)
3975 					val |= BNX2_EMAC_MODE_25G_MODE;
3976 			}
3977 
3978 			REG_WR(bp, BNX2_EMAC_MODE, val);
3979 
3980 			/* receive all multicast */
3981 			for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3982 				REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3983 				       0xffffffff);
3984 			}
3985 			REG_WR(bp, BNX2_EMAC_RX_MODE,
3986 			       BNX2_EMAC_RX_MODE_SORT_MODE);
3987 
3988 			val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3989 			      BNX2_RPM_SORT_USER0_MC_EN;
3990 			REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3991 			REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3992 			REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3993 			       BNX2_RPM_SORT_USER0_ENA);
3994 
3995 			/* Need to enable EMAC and RPM for WOL. */
3996 			REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3997 			       BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3998 			       BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3999 			       BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4000 
4001 			val = REG_RD(bp, BNX2_RPM_CONFIG);
4002 			val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4003 			REG_WR(bp, BNX2_RPM_CONFIG, val);
4004 
4005 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4006 		}
4007 		else {
4008 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4009 		}
4010 
4011 		if (!(bp->flags & BNX2_FLAG_NO_WOL))
4012 			bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
4013 				     1, 0);
4014 
4015 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4016 		if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4017 		    (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4018 
4019 			if (bp->wol)
4020 				pmcsr |= 3;
4021 		}
4022 		else {
4023 			pmcsr |= 3;
4024 		}
4025 		if (bp->wol) {
4026 			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4027 		}
4028 		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4029 				      pmcsr);
4030 
4031 		/* No more memory access after this point until
4032 		 * device is brought back to D0.
4033 		 */
4034 		udelay(50);
4035 		break;
4036 	}
4037 	default:
4038 		return -EINVAL;
4039 	}
4040 	return 0;
4041 }
4042 
4043 static int
4044 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4045 {
4046 	u32 val;
4047 	int j;
4048 
4049 	/* Request access to the flash interface. */
4050 	REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4051 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4052 		val = REG_RD(bp, BNX2_NVM_SW_ARB);
4053 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4054 			break;
4055 
4056 		udelay(5);
4057 	}
4058 
4059 	if (j >= NVRAM_TIMEOUT_COUNT)
4060 		return -EBUSY;
4061 
4062 	return 0;
4063 }
4064 
4065 static int
4066 bnx2_release_nvram_lock(struct bnx2 *bp)
4067 {
4068 	int j;
4069 	u32 val;
4070 
4071 	/* Relinquish nvram interface. */
4072 	REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4073 
4074 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4075 		val = REG_RD(bp, BNX2_NVM_SW_ARB);
4076 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4077 			break;
4078 
4079 		udelay(5);
4080 	}
4081 
4082 	if (j >= NVRAM_TIMEOUT_COUNT)
4083 		return -EBUSY;
4084 
4085 	return 0;
4086 }
4087 
4088 
4089 static int
4090 bnx2_enable_nvram_write(struct bnx2 *bp)
4091 {
4092 	u32 val;
4093 
4094 	val = REG_RD(bp, BNX2_MISC_CFG);
4095 	REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4096 
4097 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4098 		int j;
4099 
4100 		REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4101 		REG_WR(bp, BNX2_NVM_COMMAND,
4102 		       BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4103 
4104 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4105 			udelay(5);
4106 
4107 			val = REG_RD(bp, BNX2_NVM_COMMAND);
4108 			if (val & BNX2_NVM_COMMAND_DONE)
4109 				break;
4110 		}
4111 
4112 		if (j >= NVRAM_TIMEOUT_COUNT)
4113 			return -EBUSY;
4114 	}
4115 	return 0;
4116 }
4117 
4118 static void
4119 bnx2_disable_nvram_write(struct bnx2 *bp)
4120 {
4121 	u32 val;
4122 
4123 	val = REG_RD(bp, BNX2_MISC_CFG);
4124 	REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4125 }
4126 
4127 
4128 static void
4129 bnx2_enable_nvram_access(struct bnx2 *bp)
4130 {
4131 	u32 val;
4132 
4133 	val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4134 	/* Enable both bits, even on read. */
4135 	REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4136 	       val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4137 }
4138 
4139 static void
4140 bnx2_disable_nvram_access(struct bnx2 *bp)
4141 {
4142 	u32 val;
4143 
4144 	val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4145 	/* Disable both bits, even after read. */
4146 	REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4147 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4148 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4149 }
4150 
4151 static int
4152 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4153 {
4154 	u32 cmd;
4155 	int j;
4156 
4157 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4158 		/* Buffered flash, no erase needed */
4159 		return 0;
4160 
4161 	/* Build an erase command */
4162 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4163 	      BNX2_NVM_COMMAND_DOIT;
4164 
4165 	/* Need to clear DONE bit separately. */
4166 	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4167 
4168 	/* Address of the NVRAM to read from. */
4169 	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4170 
4171 	/* Issue an erase command. */
4172 	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4173 
4174 	/* Wait for completion. */
4175 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4176 		u32 val;
4177 
4178 		udelay(5);
4179 
4180 		val = REG_RD(bp, BNX2_NVM_COMMAND);
4181 		if (val & BNX2_NVM_COMMAND_DONE)
4182 			break;
4183 	}
4184 
4185 	if (j >= NVRAM_TIMEOUT_COUNT)
4186 		return -EBUSY;
4187 
4188 	return 0;
4189 }
4190 
4191 static int
4192 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4193 {
4194 	u32 cmd;
4195 	int j;
4196 
4197 	/* Build the command word. */
4198 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4199 
4200 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4201 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4202 		offset = ((offset / bp->flash_info->page_size) <<
4203 			   bp->flash_info->page_bits) +
4204 			  (offset % bp->flash_info->page_size);
4205 	}
4206 
4207 	/* Need to clear DONE bit separately. */
4208 	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4209 
4210 	/* Address of the NVRAM to read from. */
4211 	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4212 
4213 	/* Issue a read command. */
4214 	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4215 
4216 	/* Wait for completion. */
4217 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4218 		u32 val;
4219 
4220 		udelay(5);
4221 
4222 		val = REG_RD(bp, BNX2_NVM_COMMAND);
4223 		if (val & BNX2_NVM_COMMAND_DONE) {
4224 			__be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4225 			memcpy(ret_val, &v, 4);
4226 			break;
4227 		}
4228 	}
4229 	if (j >= NVRAM_TIMEOUT_COUNT)
4230 		return -EBUSY;
4231 
4232 	return 0;
4233 }
4234 
4235 
4236 static int
4237 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4238 {
4239 	u32 cmd;
4240 	__be32 val32;
4241 	int j;
4242 
4243 	/* Build the command word. */
4244 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4245 
4246 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4247 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4248 		offset = ((offset / bp->flash_info->page_size) <<
4249 			  bp->flash_info->page_bits) +
4250 			 (offset % bp->flash_info->page_size);
4251 	}
4252 
4253 	/* Need to clear DONE bit separately. */
4254 	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4255 
4256 	memcpy(&val32, val, 4);
4257 
4258 	/* Write the data. */
4259 	REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4260 
4261 	/* Address of the NVRAM to write to. */
4262 	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4263 
4264 	/* Issue the write command. */
4265 	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4266 
4267 	/* Wait for completion. */
4268 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4269 		udelay(5);
4270 
4271 		if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4272 			break;
4273 	}
4274 	if (j >= NVRAM_TIMEOUT_COUNT)
4275 		return -EBUSY;
4276 
4277 	return 0;
4278 }
4279 
4280 static int
4281 bnx2_init_nvram(struct bnx2 *bp)
4282 {
4283 	u32 val;
4284 	int j, entry_count, rc = 0;
4285 	const struct flash_spec *flash;
4286 
4287 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4288 		bp->flash_info = &flash_5709;
4289 		goto get_flash_size;
4290 	}
4291 
4292 	/* Determine the selected interface. */
4293 	val = REG_RD(bp, BNX2_NVM_CFG1);
4294 
4295 	entry_count = ARRAY_SIZE(flash_table);
4296 
4297 	if (val & 0x40000000) {
4298 
4299 		/* Flash interface has been reconfigured */
4300 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4301 		     j++, flash++) {
4302 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4303 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4304 				bp->flash_info = flash;
4305 				break;
4306 			}
4307 		}
4308 	}
4309 	else {
4310 		u32 mask;
4311 		/* Not yet been reconfigured */
4312 
4313 		if (val & (1 << 23))
4314 			mask = FLASH_BACKUP_STRAP_MASK;
4315 		else
4316 			mask = FLASH_STRAP_MASK;
4317 
4318 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4319 			j++, flash++) {
4320 
4321 			if ((val & mask) == (flash->strapping & mask)) {
4322 				bp->flash_info = flash;
4323 
4324 				/* Request access to the flash interface. */
4325 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4326 					return rc;
4327 
4328 				/* Enable access to flash interface */
4329 				bnx2_enable_nvram_access(bp);
4330 
4331 				/* Reconfigure the flash interface */
4332 				REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4333 				REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4334 				REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4335 				REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4336 
4337 				/* Disable access to flash interface */
4338 				bnx2_disable_nvram_access(bp);
4339 				bnx2_release_nvram_lock(bp);
4340 
4341 				break;
4342 			}
4343 		}
4344 	} /* if (val & 0x40000000) */
4345 
4346 	if (j == entry_count) {
4347 		bp->flash_info = NULL;
4348 		pr_alert("Unknown flash/EEPROM type\n");
4349 		return -ENODEV;
4350 	}
4351 
4352 get_flash_size:
4353 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4354 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4355 	if (val)
4356 		bp->flash_size = val;
4357 	else
4358 		bp->flash_size = bp->flash_info->total_size;
4359 
4360 	return rc;
4361 }
4362 
4363 static int
4364 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4365 		int buf_size)
4366 {
4367 	int rc = 0;
4368 	u32 cmd_flags, offset32, len32, extra;
4369 
4370 	if (buf_size == 0)
4371 		return 0;
4372 
4373 	/* Request access to the flash interface. */
4374 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4375 		return rc;
4376 
4377 	/* Enable access to flash interface */
4378 	bnx2_enable_nvram_access(bp);
4379 
4380 	len32 = buf_size;
4381 	offset32 = offset;
4382 	extra = 0;
4383 
4384 	cmd_flags = 0;
4385 
4386 	if (offset32 & 3) {
4387 		u8 buf[4];
4388 		u32 pre_len;
4389 
4390 		offset32 &= ~3;
4391 		pre_len = 4 - (offset & 3);
4392 
4393 		if (pre_len >= len32) {
4394 			pre_len = len32;
4395 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4396 				    BNX2_NVM_COMMAND_LAST;
4397 		}
4398 		else {
4399 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4400 		}
4401 
4402 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4403 
4404 		if (rc)
4405 			return rc;
4406 
4407 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4408 
4409 		offset32 += 4;
4410 		ret_buf += pre_len;
4411 		len32 -= pre_len;
4412 	}
4413 	if (len32 & 3) {
4414 		extra = 4 - (len32 & 3);
4415 		len32 = (len32 + 4) & ~3;
4416 	}
4417 
4418 	if (len32 == 4) {
4419 		u8 buf[4];
4420 
4421 		if (cmd_flags)
4422 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4423 		else
4424 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4425 				    BNX2_NVM_COMMAND_LAST;
4426 
4427 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4428 
4429 		memcpy(ret_buf, buf, 4 - extra);
4430 	}
4431 	else if (len32 > 0) {
4432 		u8 buf[4];
4433 
4434 		/* Read the first word. */
4435 		if (cmd_flags)
4436 			cmd_flags = 0;
4437 		else
4438 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4439 
4440 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4441 
4442 		/* Advance to the next dword. */
4443 		offset32 += 4;
4444 		ret_buf += 4;
4445 		len32 -= 4;
4446 
4447 		while (len32 > 4 && rc == 0) {
4448 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4449 
4450 			/* Advance to the next dword. */
4451 			offset32 += 4;
4452 			ret_buf += 4;
4453 			len32 -= 4;
4454 		}
4455 
4456 		if (rc)
4457 			return rc;
4458 
4459 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4460 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4461 
4462 		memcpy(ret_buf, buf, 4 - extra);
4463 	}
4464 
4465 	/* Disable access to flash interface */
4466 	bnx2_disable_nvram_access(bp);
4467 
4468 	bnx2_release_nvram_lock(bp);
4469 
4470 	return rc;
4471 }
4472 
4473 static int
4474 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4475 		int buf_size)
4476 {
4477 	u32 written, offset32, len32;
4478 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4479 	int rc = 0;
4480 	int align_start, align_end;
4481 
4482 	buf = data_buf;
4483 	offset32 = offset;
4484 	len32 = buf_size;
4485 	align_start = align_end = 0;
4486 
4487 	if ((align_start = (offset32 & 3))) {
4488 		offset32 &= ~3;
4489 		len32 += align_start;
4490 		if (len32 < 4)
4491 			len32 = 4;
4492 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4493 			return rc;
4494 	}
4495 
4496 	if (len32 & 3) {
4497 		align_end = 4 - (len32 & 3);
4498 		len32 += align_end;
4499 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4500 			return rc;
4501 	}
4502 
4503 	if (align_start || align_end) {
4504 		align_buf = kmalloc(len32, GFP_KERNEL);
4505 		if (align_buf == NULL)
4506 			return -ENOMEM;
4507 		if (align_start) {
4508 			memcpy(align_buf, start, 4);
4509 		}
4510 		if (align_end) {
4511 			memcpy(align_buf + len32 - 4, end, 4);
4512 		}
4513 		memcpy(align_buf + align_start, data_buf, buf_size);
4514 		buf = align_buf;
4515 	}
4516 
4517 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4518 		flash_buffer = kmalloc(264, GFP_KERNEL);
4519 		if (flash_buffer == NULL) {
4520 			rc = -ENOMEM;
4521 			goto nvram_write_end;
4522 		}
4523 	}
4524 
4525 	written = 0;
4526 	while ((written < len32) && (rc == 0)) {
4527 		u32 page_start, page_end, data_start, data_end;
4528 		u32 addr, cmd_flags;
4529 		int i;
4530 
4531 	        /* Find the page_start addr */
4532 		page_start = offset32 + written;
4533 		page_start -= (page_start % bp->flash_info->page_size);
4534 		/* Find the page_end addr */
4535 		page_end = page_start + bp->flash_info->page_size;
4536 		/* Find the data_start addr */
4537 		data_start = (written == 0) ? offset32 : page_start;
4538 		/* Find the data_end addr */
4539 		data_end = (page_end > offset32 + len32) ?
4540 			(offset32 + len32) : page_end;
4541 
4542 		/* Request access to the flash interface. */
4543 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4544 			goto nvram_write_end;
4545 
4546 		/* Enable access to flash interface */
4547 		bnx2_enable_nvram_access(bp);
4548 
4549 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4550 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4551 			int j;
4552 
4553 			/* Read the whole page into the buffer
4554 			 * (non-buffer flash only) */
4555 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4556 				if (j == (bp->flash_info->page_size - 4)) {
4557 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4558 				}
4559 				rc = bnx2_nvram_read_dword(bp,
4560 					page_start + j,
4561 					&flash_buffer[j],
4562 					cmd_flags);
4563 
4564 				if (rc)
4565 					goto nvram_write_end;
4566 
4567 				cmd_flags = 0;
4568 			}
4569 		}
4570 
4571 		/* Enable writes to flash interface (unlock write-protect) */
4572 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4573 			goto nvram_write_end;
4574 
4575 		/* Loop to write back the buffer data from page_start to
4576 		 * data_start */
4577 		i = 0;
4578 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4579 			/* Erase the page */
4580 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4581 				goto nvram_write_end;
4582 
4583 			/* Re-enable the write again for the actual write */
4584 			bnx2_enable_nvram_write(bp);
4585 
4586 			for (addr = page_start; addr < data_start;
4587 				addr += 4, i += 4) {
4588 
4589 				rc = bnx2_nvram_write_dword(bp, addr,
4590 					&flash_buffer[i], cmd_flags);
4591 
4592 				if (rc != 0)
4593 					goto nvram_write_end;
4594 
4595 				cmd_flags = 0;
4596 			}
4597 		}
4598 
4599 		/* Loop to write the new data from data_start to data_end */
4600 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4601 			if ((addr == page_end - 4) ||
4602 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4603 				 (addr == data_end - 4))) {
4604 
4605 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4606 			}
4607 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4608 				cmd_flags);
4609 
4610 			if (rc != 0)
4611 				goto nvram_write_end;
4612 
4613 			cmd_flags = 0;
4614 			buf += 4;
4615 		}
4616 
4617 		/* Loop to write back the buffer data from data_end
4618 		 * to page_end */
4619 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4620 			for (addr = data_end; addr < page_end;
4621 				addr += 4, i += 4) {
4622 
4623 				if (addr == page_end-4) {
4624 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4625                 		}
4626 				rc = bnx2_nvram_write_dword(bp, addr,
4627 					&flash_buffer[i], cmd_flags);
4628 
4629 				if (rc != 0)
4630 					goto nvram_write_end;
4631 
4632 				cmd_flags = 0;
4633 			}
4634 		}
4635 
4636 		/* Disable writes to flash interface (lock write-protect) */
4637 		bnx2_disable_nvram_write(bp);
4638 
4639 		/* Disable access to flash interface */
4640 		bnx2_disable_nvram_access(bp);
4641 		bnx2_release_nvram_lock(bp);
4642 
4643 		/* Increment written */
4644 		written += data_end - data_start;
4645 	}
4646 
4647 nvram_write_end:
4648 	kfree(flash_buffer);
4649 	kfree(align_buf);
4650 	return rc;
4651 }
4652 
4653 static void
4654 bnx2_init_fw_cap(struct bnx2 *bp)
4655 {
4656 	u32 val, sig = 0;
4657 
4658 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4659 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4660 
4661 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4662 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4663 
4664 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4665 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4666 		return;
4667 
4668 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4669 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4670 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4671 	}
4672 
4673 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4674 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4675 		u32 link;
4676 
4677 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4678 
4679 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4680 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4681 			bp->phy_port = PORT_FIBRE;
4682 		else
4683 			bp->phy_port = PORT_TP;
4684 
4685 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4686 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4687 	}
4688 
4689 	if (netif_running(bp->dev) && sig)
4690 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4691 }
4692 
4693 static void
4694 bnx2_setup_msix_tbl(struct bnx2 *bp)
4695 {
4696 	REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4697 
4698 	REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4699 	REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4700 }
4701 
4702 static int
4703 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4704 {
4705 	u32 val;
4706 	int i, rc = 0;
4707 	u8 old_port;
4708 
4709 	/* Wait for the current PCI transaction to complete before
4710 	 * issuing a reset. */
4711 	if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4712 	    (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4713 		REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4714 		       BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4715 		       BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4716 		       BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4717 		       BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4718 		val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4719 		udelay(5);
4720 	} else {  /* 5709 */
4721 		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4722 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4723 		REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4724 		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4725 
4726 		for (i = 0; i < 100; i++) {
4727 			msleep(1);
4728 			val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4729 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4730 				break;
4731 		}
4732 	}
4733 
4734 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4735 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4736 
4737 	/* Deposit a driver reset signature so the firmware knows that
4738 	 * this is a soft reset. */
4739 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4740 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4741 
4742 	/* Do a dummy read to force the chip to complete all current transaction
4743 	 * before we issue a reset. */
4744 	val = REG_RD(bp, BNX2_MISC_ID);
4745 
4746 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4747 		REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4748 		REG_RD(bp, BNX2_MISC_COMMAND);
4749 		udelay(5);
4750 
4751 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4752 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4753 
4754 		REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4755 
4756 	} else {
4757 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4758 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4759 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4760 
4761 		/* Chip reset. */
4762 		REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4763 
4764 		/* Reading back any register after chip reset will hang the
4765 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4766 		 * of margin for write posting.
4767 		 */
4768 		if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4769 		    (CHIP_ID(bp) == CHIP_ID_5706_A1))
4770 			msleep(20);
4771 
4772 		/* Reset takes approximate 30 usec */
4773 		for (i = 0; i < 10; i++) {
4774 			val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4775 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4776 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4777 				break;
4778 			udelay(10);
4779 		}
4780 
4781 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4782 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4783 			pr_err("Chip reset did not complete\n");
4784 			return -EBUSY;
4785 		}
4786 	}
4787 
4788 	/* Make sure byte swapping is properly configured. */
4789 	val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4790 	if (val != 0x01020304) {
4791 		pr_err("Chip not in correct endian mode\n");
4792 		return -ENODEV;
4793 	}
4794 
4795 	/* Wait for the firmware to finish its initialization. */
4796 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4797 	if (rc)
4798 		return rc;
4799 
4800 	spin_lock_bh(&bp->phy_lock);
4801 	old_port = bp->phy_port;
4802 	bnx2_init_fw_cap(bp);
4803 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4804 	    old_port != bp->phy_port)
4805 		bnx2_set_default_remote_link(bp);
4806 	spin_unlock_bh(&bp->phy_lock);
4807 
4808 	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4809 		/* Adjust the voltage regular to two steps lower.  The default
4810 		 * of this register is 0x0000000e. */
4811 		REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4812 
4813 		/* Remove bad rbuf memory from the free pool. */
4814 		rc = bnx2_alloc_bad_rbuf(bp);
4815 	}
4816 
4817 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4818 		bnx2_setup_msix_tbl(bp);
4819 		/* Prevent MSIX table reads and write from timing out */
4820 		REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4821 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4822 	}
4823 
4824 	return rc;
4825 }
4826 
4827 static int
4828 bnx2_init_chip(struct bnx2 *bp)
4829 {
4830 	u32 val, mtu;
4831 	int rc, i;
4832 
4833 	/* Make sure the interrupt is not active. */
4834 	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4835 
4836 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4837 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4838 #ifdef __BIG_ENDIAN
4839 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4840 #endif
4841 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4842 	      DMA_READ_CHANS << 12 |
4843 	      DMA_WRITE_CHANS << 16;
4844 
4845 	val |= (0x2 << 20) | (1 << 11);
4846 
4847 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4848 		val |= (1 << 23);
4849 
4850 	if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4851 	    (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4852 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4853 
4854 	REG_WR(bp, BNX2_DMA_CONFIG, val);
4855 
4856 	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4857 		val = REG_RD(bp, BNX2_TDMA_CONFIG);
4858 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4859 		REG_WR(bp, BNX2_TDMA_CONFIG, val);
4860 	}
4861 
4862 	if (bp->flags & BNX2_FLAG_PCIX) {
4863 		u16 val16;
4864 
4865 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4866 				     &val16);
4867 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4868 				      val16 & ~PCI_X_CMD_ERO);
4869 	}
4870 
4871 	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4872 	       BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4873 	       BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4874 	       BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4875 
4876 	/* Initialize context mapping and zero out the quick contexts.  The
4877 	 * context block must have already been enabled. */
4878 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4879 		rc = bnx2_init_5709_context(bp);
4880 		if (rc)
4881 			return rc;
4882 	} else
4883 		bnx2_init_context(bp);
4884 
4885 	if ((rc = bnx2_init_cpus(bp)) != 0)
4886 		return rc;
4887 
4888 	bnx2_init_nvram(bp);
4889 
4890 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4891 
4892 	val = REG_RD(bp, BNX2_MQ_CONFIG);
4893 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4894 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4895 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4896 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4897 		if (CHIP_REV(bp) == CHIP_REV_Ax)
4898 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4899 	}
4900 
4901 	REG_WR(bp, BNX2_MQ_CONFIG, val);
4902 
4903 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4904 	REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4905 	REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4906 
4907 	val = (BCM_PAGE_BITS - 8) << 24;
4908 	REG_WR(bp, BNX2_RV2P_CONFIG, val);
4909 
4910 	/* Configure page size. */
4911 	val = REG_RD(bp, BNX2_TBDR_CONFIG);
4912 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4913 	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4914 	REG_WR(bp, BNX2_TBDR_CONFIG, val);
4915 
4916 	val = bp->mac_addr[0] +
4917 	      (bp->mac_addr[1] << 8) +
4918 	      (bp->mac_addr[2] << 16) +
4919 	      bp->mac_addr[3] +
4920 	      (bp->mac_addr[4] << 8) +
4921 	      (bp->mac_addr[5] << 16);
4922 	REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4923 
4924 	/* Program the MTU.  Also include 4 bytes for CRC32. */
4925 	mtu = bp->dev->mtu;
4926 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4927 	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4928 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4929 	REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4930 
4931 	if (mtu < 1500)
4932 		mtu = 1500;
4933 
4934 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4935 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4936 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4937 
4938 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4939 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4940 		bp->bnx2_napi[i].last_status_idx = 0;
4941 
4942 	bp->idle_chk_status_idx = 0xffff;
4943 
4944 	bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4945 
4946 	/* Set up how to generate a link change interrupt. */
4947 	REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4948 
4949 	REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4950 	       (u64) bp->status_blk_mapping & 0xffffffff);
4951 	REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4952 
4953 	REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4954 	       (u64) bp->stats_blk_mapping & 0xffffffff);
4955 	REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4956 	       (u64) bp->stats_blk_mapping >> 32);
4957 
4958 	REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4959 	       (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4960 
4961 	REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4962 	       (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4963 
4964 	REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4965 	       (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4966 
4967 	REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4968 
4969 	REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4970 
4971 	REG_WR(bp, BNX2_HC_COM_TICKS,
4972 	       (bp->com_ticks_int << 16) | bp->com_ticks);
4973 
4974 	REG_WR(bp, BNX2_HC_CMD_TICKS,
4975 	       (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4976 
4977 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4978 		REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4979 	else
4980 		REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4981 	REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4982 
4983 	if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4984 		val = BNX2_HC_CONFIG_COLLECT_STATS;
4985 	else {
4986 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4987 		      BNX2_HC_CONFIG_COLLECT_STATS;
4988 	}
4989 
4990 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4991 		REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4992 		       BNX2_HC_MSIX_BIT_VECTOR_VAL);
4993 
4994 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4995 	}
4996 
4997 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4998 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4999 
5000 	REG_WR(bp, BNX2_HC_CONFIG, val);
5001 
5002 	if (bp->rx_ticks < 25)
5003 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5004 	else
5005 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5006 
5007 	for (i = 1; i < bp->irq_nvecs; i++) {
5008 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5009 			   BNX2_HC_SB_CONFIG_1;
5010 
5011 		REG_WR(bp, base,
5012 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5013 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5014 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5015 
5016 		REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5017 			(bp->tx_quick_cons_trip_int << 16) |
5018 			 bp->tx_quick_cons_trip);
5019 
5020 		REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5021 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5022 
5023 		REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5024 		       (bp->rx_quick_cons_trip_int << 16) |
5025 			bp->rx_quick_cons_trip);
5026 
5027 		REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5028 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5029 	}
5030 
5031 	/* Clear internal stats counters. */
5032 	REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5033 
5034 	REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5035 
5036 	/* Initialize the receive filter. */
5037 	bnx2_set_rx_mode(bp->dev);
5038 
5039 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5040 		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5041 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5042 		REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5043 	}
5044 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5045 			  1, 0);
5046 
5047 	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5048 	REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5049 
5050 	udelay(20);
5051 
5052 	bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5053 
5054 	return rc;
5055 }
5056 
5057 static void
5058 bnx2_clear_ring_states(struct bnx2 *bp)
5059 {
5060 	struct bnx2_napi *bnapi;
5061 	struct bnx2_tx_ring_info *txr;
5062 	struct bnx2_rx_ring_info *rxr;
5063 	int i;
5064 
5065 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5066 		bnapi = &bp->bnx2_napi[i];
5067 		txr = &bnapi->tx_ring;
5068 		rxr = &bnapi->rx_ring;
5069 
5070 		txr->tx_cons = 0;
5071 		txr->hw_tx_cons = 0;
5072 		rxr->rx_prod_bseq = 0;
5073 		rxr->rx_prod = 0;
5074 		rxr->rx_cons = 0;
5075 		rxr->rx_pg_prod = 0;
5076 		rxr->rx_pg_cons = 0;
5077 	}
5078 }
5079 
5080 static void
5081 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5082 {
5083 	u32 val, offset0, offset1, offset2, offset3;
5084 	u32 cid_addr = GET_CID_ADDR(cid);
5085 
5086 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5087 		offset0 = BNX2_L2CTX_TYPE_XI;
5088 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5089 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5090 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5091 	} else {
5092 		offset0 = BNX2_L2CTX_TYPE;
5093 		offset1 = BNX2_L2CTX_CMD_TYPE;
5094 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5095 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5096 	}
5097 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5098 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5099 
5100 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5101 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5102 
5103 	val = (u64) txr->tx_desc_mapping >> 32;
5104 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5105 
5106 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5107 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5108 }
5109 
5110 static void
5111 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5112 {
5113 	struct tx_bd *txbd;
5114 	u32 cid = TX_CID;
5115 	struct bnx2_napi *bnapi;
5116 	struct bnx2_tx_ring_info *txr;
5117 
5118 	bnapi = &bp->bnx2_napi[ring_num];
5119 	txr = &bnapi->tx_ring;
5120 
5121 	if (ring_num == 0)
5122 		cid = TX_CID;
5123 	else
5124 		cid = TX_TSS_CID + ring_num - 1;
5125 
5126 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5127 
5128 	txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5129 
5130 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5131 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5132 
5133 	txr->tx_prod = 0;
5134 	txr->tx_prod_bseq = 0;
5135 
5136 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5137 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5138 
5139 	bnx2_init_tx_context(bp, cid, txr);
5140 }
5141 
5142 static void
5143 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5144 		     int num_rings)
5145 {
5146 	int i;
5147 	struct rx_bd *rxbd;
5148 
5149 	for (i = 0; i < num_rings; i++) {
5150 		int j;
5151 
5152 		rxbd = &rx_ring[i][0];
5153 		for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5154 			rxbd->rx_bd_len = buf_size;
5155 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5156 		}
5157 		if (i == (num_rings - 1))
5158 			j = 0;
5159 		else
5160 			j = i + 1;
5161 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5162 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5163 	}
5164 }
5165 
5166 static void
5167 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5168 {
5169 	int i;
5170 	u16 prod, ring_prod;
5171 	u32 cid, rx_cid_addr, val;
5172 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5173 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5174 
5175 	if (ring_num == 0)
5176 		cid = RX_CID;
5177 	else
5178 		cid = RX_RSS_CID + ring_num - 1;
5179 
5180 	rx_cid_addr = GET_CID_ADDR(cid);
5181 
5182 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5183 			     bp->rx_buf_use_size, bp->rx_max_ring);
5184 
5185 	bnx2_init_rx_context(bp, cid);
5186 
5187 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5188 		val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5189 		REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5190 	}
5191 
5192 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5193 	if (bp->rx_pg_ring_size) {
5194 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5195 				     rxr->rx_pg_desc_mapping,
5196 				     PAGE_SIZE, bp->rx_max_pg_ring);
5197 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5198 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5199 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5200 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5201 
5202 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5203 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5204 
5205 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5206 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5207 
5208 		if (CHIP_NUM(bp) == CHIP_NUM_5709)
5209 			REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5210 	}
5211 
5212 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5213 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5214 
5215 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5216 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5217 
5218 	ring_prod = prod = rxr->rx_pg_prod;
5219 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5220 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5221 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5222 				    ring_num, i, bp->rx_pg_ring_size);
5223 			break;
5224 		}
5225 		prod = NEXT_RX_BD(prod);
5226 		ring_prod = RX_PG_RING_IDX(prod);
5227 	}
5228 	rxr->rx_pg_prod = prod;
5229 
5230 	ring_prod = prod = rxr->rx_prod;
5231 	for (i = 0; i < bp->rx_ring_size; i++) {
5232 		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5233 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5234 				    ring_num, i, bp->rx_ring_size);
5235 			break;
5236 		}
5237 		prod = NEXT_RX_BD(prod);
5238 		ring_prod = RX_RING_IDX(prod);
5239 	}
5240 	rxr->rx_prod = prod;
5241 
5242 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5243 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5244 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5245 
5246 	REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5247 	REG_WR16(bp, rxr->rx_bidx_addr, prod);
5248 
5249 	REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5250 }
5251 
5252 static void
5253 bnx2_init_all_rings(struct bnx2 *bp)
5254 {
5255 	int i;
5256 	u32 val;
5257 
5258 	bnx2_clear_ring_states(bp);
5259 
5260 	REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5261 	for (i = 0; i < bp->num_tx_rings; i++)
5262 		bnx2_init_tx_ring(bp, i);
5263 
5264 	if (bp->num_tx_rings > 1)
5265 		REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5266 		       (TX_TSS_CID << 7));
5267 
5268 	REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5269 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5270 
5271 	for (i = 0; i < bp->num_rx_rings; i++)
5272 		bnx2_init_rx_ring(bp, i);
5273 
5274 	if (bp->num_rx_rings > 1) {
5275 		u32 tbl_32 = 0;
5276 
5277 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5278 			int shift = (i % 8) << 2;
5279 
5280 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5281 			if ((i % 8) == 7) {
5282 				REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5283 				REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5284 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5285 					BNX2_RLUP_RSS_COMMAND_WRITE |
5286 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5287 				tbl_32 = 0;
5288 			}
5289 		}
5290 
5291 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5292 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5293 
5294 		REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5295 
5296 	}
5297 }
5298 
5299 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5300 {
5301 	u32 max, num_rings = 1;
5302 
5303 	while (ring_size > MAX_RX_DESC_CNT) {
5304 		ring_size -= MAX_RX_DESC_CNT;
5305 		num_rings++;
5306 	}
5307 	/* round to next power of 2 */
5308 	max = max_size;
5309 	while ((max & num_rings) == 0)
5310 		max >>= 1;
5311 
5312 	if (num_rings != max)
5313 		max <<= 1;
5314 
5315 	return max;
5316 }
5317 
5318 static void
5319 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5320 {
5321 	u32 rx_size, rx_space, jumbo_size;
5322 
5323 	/* 8 for CRC and VLAN */
5324 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5325 
5326 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5327 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5328 
5329 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5330 	bp->rx_pg_ring_size = 0;
5331 	bp->rx_max_pg_ring = 0;
5332 	bp->rx_max_pg_ring_idx = 0;
5333 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5334 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5335 
5336 		jumbo_size = size * pages;
5337 		if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5338 			jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5339 
5340 		bp->rx_pg_ring_size = jumbo_size;
5341 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5342 							MAX_RX_PG_RINGS);
5343 		bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5344 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5345 		bp->rx_copy_thresh = 0;
5346 	}
5347 
5348 	bp->rx_buf_use_size = rx_size;
5349 	/* hw alignment + build_skb() overhead*/
5350 	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5351 		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5352 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5353 	bp->rx_ring_size = size;
5354 	bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5355 	bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5356 }
5357 
5358 static void
5359 bnx2_free_tx_skbs(struct bnx2 *bp)
5360 {
5361 	int i;
5362 
5363 	for (i = 0; i < bp->num_tx_rings; i++) {
5364 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5365 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5366 		int j;
5367 
5368 		if (txr->tx_buf_ring == NULL)
5369 			continue;
5370 
5371 		for (j = 0; j < TX_DESC_CNT; ) {
5372 			struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5373 			struct sk_buff *skb = tx_buf->skb;
5374 			int k, last;
5375 
5376 			if (skb == NULL) {
5377 				j++;
5378 				continue;
5379 			}
5380 
5381 			dma_unmap_single(&bp->pdev->dev,
5382 					 dma_unmap_addr(tx_buf, mapping),
5383 					 skb_headlen(skb),
5384 					 PCI_DMA_TODEVICE);
5385 
5386 			tx_buf->skb = NULL;
5387 
5388 			last = tx_buf->nr_frags;
5389 			j++;
5390 			for (k = 0; k < last; k++, j++) {
5391 				tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5392 				dma_unmap_page(&bp->pdev->dev,
5393 					dma_unmap_addr(tx_buf, mapping),
5394 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5395 					PCI_DMA_TODEVICE);
5396 			}
5397 			dev_kfree_skb(skb);
5398 		}
5399 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5400 	}
5401 }
5402 
5403 static void
5404 bnx2_free_rx_skbs(struct bnx2 *bp)
5405 {
5406 	int i;
5407 
5408 	for (i = 0; i < bp->num_rx_rings; i++) {
5409 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5410 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5411 		int j;
5412 
5413 		if (rxr->rx_buf_ring == NULL)
5414 			return;
5415 
5416 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5417 			struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5418 			u8 *data = rx_buf->data;
5419 
5420 			if (data == NULL)
5421 				continue;
5422 
5423 			dma_unmap_single(&bp->pdev->dev,
5424 					 dma_unmap_addr(rx_buf, mapping),
5425 					 bp->rx_buf_use_size,
5426 					 PCI_DMA_FROMDEVICE);
5427 
5428 			rx_buf->data = NULL;
5429 
5430 			kfree(data);
5431 		}
5432 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5433 			bnx2_free_rx_page(bp, rxr, j);
5434 	}
5435 }
5436 
5437 static void
5438 bnx2_free_skbs(struct bnx2 *bp)
5439 {
5440 	bnx2_free_tx_skbs(bp);
5441 	bnx2_free_rx_skbs(bp);
5442 }
5443 
5444 static int
5445 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5446 {
5447 	int rc;
5448 
5449 	rc = bnx2_reset_chip(bp, reset_code);
5450 	bnx2_free_skbs(bp);
5451 	if (rc)
5452 		return rc;
5453 
5454 	if ((rc = bnx2_init_chip(bp)) != 0)
5455 		return rc;
5456 
5457 	bnx2_init_all_rings(bp);
5458 	return 0;
5459 }
5460 
5461 static int
5462 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5463 {
5464 	int rc;
5465 
5466 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5467 		return rc;
5468 
5469 	spin_lock_bh(&bp->phy_lock);
5470 	bnx2_init_phy(bp, reset_phy);
5471 	bnx2_set_link(bp);
5472 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5473 		bnx2_remote_phy_event(bp);
5474 	spin_unlock_bh(&bp->phy_lock);
5475 	return 0;
5476 }
5477 
5478 static int
5479 bnx2_shutdown_chip(struct bnx2 *bp)
5480 {
5481 	u32 reset_code;
5482 
5483 	if (bp->flags & BNX2_FLAG_NO_WOL)
5484 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5485 	else if (bp->wol)
5486 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5487 	else
5488 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5489 
5490 	return bnx2_reset_chip(bp, reset_code);
5491 }
5492 
5493 static int
5494 bnx2_test_registers(struct bnx2 *bp)
5495 {
5496 	int ret;
5497 	int i, is_5709;
5498 	static const struct {
5499 		u16   offset;
5500 		u16   flags;
5501 #define BNX2_FL_NOT_5709	1
5502 		u32   rw_mask;
5503 		u32   ro_mask;
5504 	} reg_tbl[] = {
5505 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5506 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5507 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5508 
5509 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5510 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5511 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5512 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5513 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5514 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5515 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5516 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5517 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5518 
5519 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5520 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5521 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5522 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5523 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5524 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5525 
5526 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5527 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5528 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5529 
5530 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5531 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5532 
5533 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5534 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5535 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5536 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5537 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5538 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5539 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5540 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5541 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5542 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5543 
5544 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5545 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5546 
5547 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5548 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5549 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5550 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5551 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5552 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5553 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5554 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5555 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5556 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5557 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5558 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5559 
5560 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5561 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5562 
5563 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5564 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5565 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5566 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5567 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5568 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5569 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5570 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5571 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5572 
5573 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5574 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5575 
5576 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5577 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5578 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5579 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5580 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5581 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5582 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5583 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5584 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5585 
5586 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5587 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5588 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5589 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5590 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5591 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5592 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5593 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5594 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5595 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5596 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5597 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5598 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5599 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5600 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5601 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5602 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5603 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5604 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5605 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5606 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5607 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5608 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5609 
5610 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5611 	};
5612 
5613 	ret = 0;
5614 	is_5709 = 0;
5615 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
5616 		is_5709 = 1;
5617 
5618 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5619 		u32 offset, rw_mask, ro_mask, save_val, val;
5620 		u16 flags = reg_tbl[i].flags;
5621 
5622 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5623 			continue;
5624 
5625 		offset = (u32) reg_tbl[i].offset;
5626 		rw_mask = reg_tbl[i].rw_mask;
5627 		ro_mask = reg_tbl[i].ro_mask;
5628 
5629 		save_val = readl(bp->regview + offset);
5630 
5631 		writel(0, bp->regview + offset);
5632 
5633 		val = readl(bp->regview + offset);
5634 		if ((val & rw_mask) != 0) {
5635 			goto reg_test_err;
5636 		}
5637 
5638 		if ((val & ro_mask) != (save_val & ro_mask)) {
5639 			goto reg_test_err;
5640 		}
5641 
5642 		writel(0xffffffff, bp->regview + offset);
5643 
5644 		val = readl(bp->regview + offset);
5645 		if ((val & rw_mask) != rw_mask) {
5646 			goto reg_test_err;
5647 		}
5648 
5649 		if ((val & ro_mask) != (save_val & ro_mask)) {
5650 			goto reg_test_err;
5651 		}
5652 
5653 		writel(save_val, bp->regview + offset);
5654 		continue;
5655 
5656 reg_test_err:
5657 		writel(save_val, bp->regview + offset);
5658 		ret = -ENODEV;
5659 		break;
5660 	}
5661 	return ret;
5662 }
5663 
5664 static int
5665 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5666 {
5667 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5668 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5669 	int i;
5670 
5671 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5672 		u32 offset;
5673 
5674 		for (offset = 0; offset < size; offset += 4) {
5675 
5676 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5677 
5678 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5679 				test_pattern[i]) {
5680 				return -ENODEV;
5681 			}
5682 		}
5683 	}
5684 	return 0;
5685 }
5686 
5687 static int
5688 bnx2_test_memory(struct bnx2 *bp)
5689 {
5690 	int ret = 0;
5691 	int i;
5692 	static struct mem_entry {
5693 		u32   offset;
5694 		u32   len;
5695 	} mem_tbl_5706[] = {
5696 		{ 0x60000,  0x4000 },
5697 		{ 0xa0000,  0x3000 },
5698 		{ 0xe0000,  0x4000 },
5699 		{ 0x120000, 0x4000 },
5700 		{ 0x1a0000, 0x4000 },
5701 		{ 0x160000, 0x4000 },
5702 		{ 0xffffffff, 0    },
5703 	},
5704 	mem_tbl_5709[] = {
5705 		{ 0x60000,  0x4000 },
5706 		{ 0xa0000,  0x3000 },
5707 		{ 0xe0000,  0x4000 },
5708 		{ 0x120000, 0x4000 },
5709 		{ 0x1a0000, 0x4000 },
5710 		{ 0xffffffff, 0    },
5711 	};
5712 	struct mem_entry *mem_tbl;
5713 
5714 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
5715 		mem_tbl = mem_tbl_5709;
5716 	else
5717 		mem_tbl = mem_tbl_5706;
5718 
5719 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5720 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5721 			mem_tbl[i].len)) != 0) {
5722 			return ret;
5723 		}
5724 	}
5725 
5726 	return ret;
5727 }
5728 
5729 #define BNX2_MAC_LOOPBACK	0
5730 #define BNX2_PHY_LOOPBACK	1
5731 
5732 static int
5733 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5734 {
5735 	unsigned int pkt_size, num_pkts, i;
5736 	struct sk_buff *skb;
5737 	u8 *data;
5738 	unsigned char *packet;
5739 	u16 rx_start_idx, rx_idx;
5740 	dma_addr_t map;
5741 	struct tx_bd *txbd;
5742 	struct sw_bd *rx_buf;
5743 	struct l2_fhdr *rx_hdr;
5744 	int ret = -ENODEV;
5745 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5746 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5747 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5748 
5749 	tx_napi = bnapi;
5750 
5751 	txr = &tx_napi->tx_ring;
5752 	rxr = &bnapi->rx_ring;
5753 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5754 		bp->loopback = MAC_LOOPBACK;
5755 		bnx2_set_mac_loopback(bp);
5756 	}
5757 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5758 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5759 			return 0;
5760 
5761 		bp->loopback = PHY_LOOPBACK;
5762 		bnx2_set_phy_loopback(bp);
5763 	}
5764 	else
5765 		return -EINVAL;
5766 
5767 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5768 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5769 	if (!skb)
5770 		return -ENOMEM;
5771 	packet = skb_put(skb, pkt_size);
5772 	memcpy(packet, bp->dev->dev_addr, 6);
5773 	memset(packet + 6, 0x0, 8);
5774 	for (i = 14; i < pkt_size; i++)
5775 		packet[i] = (unsigned char) (i & 0xff);
5776 
5777 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5778 			     PCI_DMA_TODEVICE);
5779 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5780 		dev_kfree_skb(skb);
5781 		return -EIO;
5782 	}
5783 
5784 	REG_WR(bp, BNX2_HC_COMMAND,
5785 	       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5786 
5787 	REG_RD(bp, BNX2_HC_COMMAND);
5788 
5789 	udelay(5);
5790 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5791 
5792 	num_pkts = 0;
5793 
5794 	txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5795 
5796 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5797 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5798 	txbd->tx_bd_mss_nbytes = pkt_size;
5799 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5800 
5801 	num_pkts++;
5802 	txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5803 	txr->tx_prod_bseq += pkt_size;
5804 
5805 	REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5806 	REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5807 
5808 	udelay(100);
5809 
5810 	REG_WR(bp, BNX2_HC_COMMAND,
5811 	       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5812 
5813 	REG_RD(bp, BNX2_HC_COMMAND);
5814 
5815 	udelay(5);
5816 
5817 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5818 	dev_kfree_skb(skb);
5819 
5820 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5821 		goto loopback_test_done;
5822 
5823 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5824 	if (rx_idx != rx_start_idx + num_pkts) {
5825 		goto loopback_test_done;
5826 	}
5827 
5828 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5829 	data = rx_buf->data;
5830 
5831 	rx_hdr = get_l2_fhdr(data);
5832 	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5833 
5834 	dma_sync_single_for_cpu(&bp->pdev->dev,
5835 		dma_unmap_addr(rx_buf, mapping),
5836 		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5837 
5838 	if (rx_hdr->l2_fhdr_status &
5839 		(L2_FHDR_ERRORS_BAD_CRC |
5840 		L2_FHDR_ERRORS_PHY_DECODE |
5841 		L2_FHDR_ERRORS_ALIGNMENT |
5842 		L2_FHDR_ERRORS_TOO_SHORT |
5843 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5844 
5845 		goto loopback_test_done;
5846 	}
5847 
5848 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5849 		goto loopback_test_done;
5850 	}
5851 
5852 	for (i = 14; i < pkt_size; i++) {
5853 		if (*(data + i) != (unsigned char) (i & 0xff)) {
5854 			goto loopback_test_done;
5855 		}
5856 	}
5857 
5858 	ret = 0;
5859 
5860 loopback_test_done:
5861 	bp->loopback = 0;
5862 	return ret;
5863 }
5864 
5865 #define BNX2_MAC_LOOPBACK_FAILED	1
5866 #define BNX2_PHY_LOOPBACK_FAILED	2
5867 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5868 					 BNX2_PHY_LOOPBACK_FAILED)
5869 
5870 static int
5871 bnx2_test_loopback(struct bnx2 *bp)
5872 {
5873 	int rc = 0;
5874 
5875 	if (!netif_running(bp->dev))
5876 		return BNX2_LOOPBACK_FAILED;
5877 
5878 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5879 	spin_lock_bh(&bp->phy_lock);
5880 	bnx2_init_phy(bp, 1);
5881 	spin_unlock_bh(&bp->phy_lock);
5882 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5883 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5884 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5885 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5886 	return rc;
5887 }
5888 
5889 #define NVRAM_SIZE 0x200
5890 #define CRC32_RESIDUAL 0xdebb20e3
5891 
5892 static int
5893 bnx2_test_nvram(struct bnx2 *bp)
5894 {
5895 	__be32 buf[NVRAM_SIZE / 4];
5896 	u8 *data = (u8 *) buf;
5897 	int rc = 0;
5898 	u32 magic, csum;
5899 
5900 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5901 		goto test_nvram_done;
5902 
5903         magic = be32_to_cpu(buf[0]);
5904 	if (magic != 0x669955aa) {
5905 		rc = -ENODEV;
5906 		goto test_nvram_done;
5907 	}
5908 
5909 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5910 		goto test_nvram_done;
5911 
5912 	csum = ether_crc_le(0x100, data);
5913 	if (csum != CRC32_RESIDUAL) {
5914 		rc = -ENODEV;
5915 		goto test_nvram_done;
5916 	}
5917 
5918 	csum = ether_crc_le(0x100, data + 0x100);
5919 	if (csum != CRC32_RESIDUAL) {
5920 		rc = -ENODEV;
5921 	}
5922 
5923 test_nvram_done:
5924 	return rc;
5925 }
5926 
5927 static int
5928 bnx2_test_link(struct bnx2 *bp)
5929 {
5930 	u32 bmsr;
5931 
5932 	if (!netif_running(bp->dev))
5933 		return -ENODEV;
5934 
5935 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5936 		if (bp->link_up)
5937 			return 0;
5938 		return -ENODEV;
5939 	}
5940 	spin_lock_bh(&bp->phy_lock);
5941 	bnx2_enable_bmsr1(bp);
5942 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5943 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5944 	bnx2_disable_bmsr1(bp);
5945 	spin_unlock_bh(&bp->phy_lock);
5946 
5947 	if (bmsr & BMSR_LSTATUS) {
5948 		return 0;
5949 	}
5950 	return -ENODEV;
5951 }
5952 
5953 static int
5954 bnx2_test_intr(struct bnx2 *bp)
5955 {
5956 	int i;
5957 	u16 status_idx;
5958 
5959 	if (!netif_running(bp->dev))
5960 		return -ENODEV;
5961 
5962 	status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5963 
5964 	/* This register is not touched during run-time. */
5965 	REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5966 	REG_RD(bp, BNX2_HC_COMMAND);
5967 
5968 	for (i = 0; i < 10; i++) {
5969 		if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5970 			status_idx) {
5971 
5972 			break;
5973 		}
5974 
5975 		msleep_interruptible(10);
5976 	}
5977 	if (i < 10)
5978 		return 0;
5979 
5980 	return -ENODEV;
5981 }
5982 
5983 /* Determining link for parallel detection. */
5984 static int
5985 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5986 {
5987 	u32 mode_ctl, an_dbg, exp;
5988 
5989 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5990 		return 0;
5991 
5992 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5993 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5994 
5995 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5996 		return 0;
5997 
5998 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5999 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6000 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6001 
6002 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6003 		return 0;
6004 
6005 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6006 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6007 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6008 
6009 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6010 		return 0;
6011 
6012 	return 1;
6013 }
6014 
6015 static void
6016 bnx2_5706_serdes_timer(struct bnx2 *bp)
6017 {
6018 	int check_link = 1;
6019 
6020 	spin_lock(&bp->phy_lock);
6021 	if (bp->serdes_an_pending) {
6022 		bp->serdes_an_pending--;
6023 		check_link = 0;
6024 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6025 		u32 bmcr;
6026 
6027 		bp->current_interval = BNX2_TIMER_INTERVAL;
6028 
6029 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6030 
6031 		if (bmcr & BMCR_ANENABLE) {
6032 			if (bnx2_5706_serdes_has_link(bp)) {
6033 				bmcr &= ~BMCR_ANENABLE;
6034 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6035 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6036 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6037 			}
6038 		}
6039 	}
6040 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6041 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6042 		u32 phy2;
6043 
6044 		bnx2_write_phy(bp, 0x17, 0x0f01);
6045 		bnx2_read_phy(bp, 0x15, &phy2);
6046 		if (phy2 & 0x20) {
6047 			u32 bmcr;
6048 
6049 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6050 			bmcr |= BMCR_ANENABLE;
6051 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6052 
6053 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6054 		}
6055 	} else
6056 		bp->current_interval = BNX2_TIMER_INTERVAL;
6057 
6058 	if (check_link) {
6059 		u32 val;
6060 
6061 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6062 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6063 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6064 
6065 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6066 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6067 				bnx2_5706s_force_link_dn(bp, 1);
6068 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6069 			} else
6070 				bnx2_set_link(bp);
6071 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6072 			bnx2_set_link(bp);
6073 	}
6074 	spin_unlock(&bp->phy_lock);
6075 }
6076 
6077 static void
6078 bnx2_5708_serdes_timer(struct bnx2 *bp)
6079 {
6080 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6081 		return;
6082 
6083 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6084 		bp->serdes_an_pending = 0;
6085 		return;
6086 	}
6087 
6088 	spin_lock(&bp->phy_lock);
6089 	if (bp->serdes_an_pending)
6090 		bp->serdes_an_pending--;
6091 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6092 		u32 bmcr;
6093 
6094 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6095 		if (bmcr & BMCR_ANENABLE) {
6096 			bnx2_enable_forced_2g5(bp);
6097 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6098 		} else {
6099 			bnx2_disable_forced_2g5(bp);
6100 			bp->serdes_an_pending = 2;
6101 			bp->current_interval = BNX2_TIMER_INTERVAL;
6102 		}
6103 
6104 	} else
6105 		bp->current_interval = BNX2_TIMER_INTERVAL;
6106 
6107 	spin_unlock(&bp->phy_lock);
6108 }
6109 
6110 static void
6111 bnx2_timer(unsigned long data)
6112 {
6113 	struct bnx2 *bp = (struct bnx2 *) data;
6114 
6115 	if (!netif_running(bp->dev))
6116 		return;
6117 
6118 	if (atomic_read(&bp->intr_sem) != 0)
6119 		goto bnx2_restart_timer;
6120 
6121 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6122 	     BNX2_FLAG_USING_MSI)
6123 		bnx2_chk_missed_msi(bp);
6124 
6125 	bnx2_send_heart_beat(bp);
6126 
6127 	bp->stats_blk->stat_FwRxDrop =
6128 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6129 
6130 	/* workaround occasional corrupted counters */
6131 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6132 		REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6133 					    BNX2_HC_COMMAND_STATS_NOW);
6134 
6135 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6136 		if (CHIP_NUM(bp) == CHIP_NUM_5706)
6137 			bnx2_5706_serdes_timer(bp);
6138 		else
6139 			bnx2_5708_serdes_timer(bp);
6140 	}
6141 
6142 bnx2_restart_timer:
6143 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6144 }
6145 
6146 static int
6147 bnx2_request_irq(struct bnx2 *bp)
6148 {
6149 	unsigned long flags;
6150 	struct bnx2_irq *irq;
6151 	int rc = 0, i;
6152 
6153 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6154 		flags = 0;
6155 	else
6156 		flags = IRQF_SHARED;
6157 
6158 	for (i = 0; i < bp->irq_nvecs; i++) {
6159 		irq = &bp->irq_tbl[i];
6160 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6161 				 &bp->bnx2_napi[i]);
6162 		if (rc)
6163 			break;
6164 		irq->requested = 1;
6165 	}
6166 	return rc;
6167 }
6168 
6169 static void
6170 __bnx2_free_irq(struct bnx2 *bp)
6171 {
6172 	struct bnx2_irq *irq;
6173 	int i;
6174 
6175 	for (i = 0; i < bp->irq_nvecs; i++) {
6176 		irq = &bp->irq_tbl[i];
6177 		if (irq->requested)
6178 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6179 		irq->requested = 0;
6180 	}
6181 }
6182 
6183 static void
6184 bnx2_free_irq(struct bnx2 *bp)
6185 {
6186 
6187 	__bnx2_free_irq(bp);
6188 	if (bp->flags & BNX2_FLAG_USING_MSI)
6189 		pci_disable_msi(bp->pdev);
6190 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6191 		pci_disable_msix(bp->pdev);
6192 
6193 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6194 }
6195 
6196 static void
6197 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6198 {
6199 	int i, total_vecs, rc;
6200 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6201 	struct net_device *dev = bp->dev;
6202 	const int len = sizeof(bp->irq_tbl[0].name);
6203 
6204 	bnx2_setup_msix_tbl(bp);
6205 	REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6206 	REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6207 	REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6208 
6209 	/*  Need to flush the previous three writes to ensure MSI-X
6210 	 *  is setup properly */
6211 	REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6212 
6213 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6214 		msix_ent[i].entry = i;
6215 		msix_ent[i].vector = 0;
6216 	}
6217 
6218 	total_vecs = msix_vecs;
6219 #ifdef BCM_CNIC
6220 	total_vecs++;
6221 #endif
6222 	rc = -ENOSPC;
6223 	while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6224 		rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6225 		if (rc <= 0)
6226 			break;
6227 		if (rc > 0)
6228 			total_vecs = rc;
6229 	}
6230 
6231 	if (rc != 0)
6232 		return;
6233 
6234 	msix_vecs = total_vecs;
6235 #ifdef BCM_CNIC
6236 	msix_vecs--;
6237 #endif
6238 	bp->irq_nvecs = msix_vecs;
6239 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6240 	for (i = 0; i < total_vecs; i++) {
6241 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6242 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6243 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6244 	}
6245 }
6246 
6247 static int
6248 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6249 {
6250 	int cpus = num_online_cpus();
6251 	int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6252 
6253 	bp->irq_tbl[0].handler = bnx2_interrupt;
6254 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6255 	bp->irq_nvecs = 1;
6256 	bp->irq_tbl[0].vector = bp->pdev->irq;
6257 
6258 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6259 		bnx2_enable_msix(bp, msix_vecs);
6260 
6261 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6262 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6263 		if (pci_enable_msi(bp->pdev) == 0) {
6264 			bp->flags |= BNX2_FLAG_USING_MSI;
6265 			if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6266 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6267 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6268 			} else
6269 				bp->irq_tbl[0].handler = bnx2_msi;
6270 
6271 			bp->irq_tbl[0].vector = bp->pdev->irq;
6272 		}
6273 	}
6274 
6275 	bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6276 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6277 
6278 	bp->num_rx_rings = bp->irq_nvecs;
6279 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6280 }
6281 
6282 /* Called with rtnl_lock */
6283 static int
6284 bnx2_open(struct net_device *dev)
6285 {
6286 	struct bnx2 *bp = netdev_priv(dev);
6287 	int rc;
6288 
6289 	rc = bnx2_request_firmware(bp);
6290 	if (rc < 0)
6291 		goto out;
6292 
6293 	netif_carrier_off(dev);
6294 
6295 	bnx2_set_power_state(bp, PCI_D0);
6296 	bnx2_disable_int(bp);
6297 
6298 	rc = bnx2_setup_int_mode(bp, disable_msi);
6299 	if (rc)
6300 		goto open_err;
6301 	bnx2_init_napi(bp);
6302 	bnx2_napi_enable(bp);
6303 	rc = bnx2_alloc_mem(bp);
6304 	if (rc)
6305 		goto open_err;
6306 
6307 	rc = bnx2_request_irq(bp);
6308 	if (rc)
6309 		goto open_err;
6310 
6311 	rc = bnx2_init_nic(bp, 1);
6312 	if (rc)
6313 		goto open_err;
6314 
6315 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6316 
6317 	atomic_set(&bp->intr_sem, 0);
6318 
6319 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6320 
6321 	bnx2_enable_int(bp);
6322 
6323 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6324 		/* Test MSI to make sure it is working
6325 		 * If MSI test fails, go back to INTx mode
6326 		 */
6327 		if (bnx2_test_intr(bp) != 0) {
6328 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6329 
6330 			bnx2_disable_int(bp);
6331 			bnx2_free_irq(bp);
6332 
6333 			bnx2_setup_int_mode(bp, 1);
6334 
6335 			rc = bnx2_init_nic(bp, 0);
6336 
6337 			if (!rc)
6338 				rc = bnx2_request_irq(bp);
6339 
6340 			if (rc) {
6341 				del_timer_sync(&bp->timer);
6342 				goto open_err;
6343 			}
6344 			bnx2_enable_int(bp);
6345 		}
6346 	}
6347 	if (bp->flags & BNX2_FLAG_USING_MSI)
6348 		netdev_info(dev, "using MSI\n");
6349 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6350 		netdev_info(dev, "using MSIX\n");
6351 
6352 	netif_tx_start_all_queues(dev);
6353 out:
6354 	return rc;
6355 
6356 open_err:
6357 	bnx2_napi_disable(bp);
6358 	bnx2_free_skbs(bp);
6359 	bnx2_free_irq(bp);
6360 	bnx2_free_mem(bp);
6361 	bnx2_del_napi(bp);
6362 	bnx2_release_firmware(bp);
6363 	goto out;
6364 }
6365 
6366 static void
6367 bnx2_reset_task(struct work_struct *work)
6368 {
6369 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6370 	int rc;
6371 
6372 	rtnl_lock();
6373 	if (!netif_running(bp->dev)) {
6374 		rtnl_unlock();
6375 		return;
6376 	}
6377 
6378 	bnx2_netif_stop(bp, true);
6379 
6380 	rc = bnx2_init_nic(bp, 1);
6381 	if (rc) {
6382 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6383 		bnx2_napi_enable(bp);
6384 		dev_close(bp->dev);
6385 		rtnl_unlock();
6386 		return;
6387 	}
6388 
6389 	atomic_set(&bp->intr_sem, 1);
6390 	bnx2_netif_start(bp, true);
6391 	rtnl_unlock();
6392 }
6393 
6394 static void
6395 bnx2_dump_state(struct bnx2 *bp)
6396 {
6397 	struct net_device *dev = bp->dev;
6398 	u32 val1, val2;
6399 
6400 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6401 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6402 		   atomic_read(&bp->intr_sem), val1);
6403 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6404 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6405 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6406 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6407 		   REG_RD(bp, BNX2_EMAC_TX_STATUS),
6408 		   REG_RD(bp, BNX2_EMAC_RX_STATUS));
6409 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6410 		   REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6411 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6412 		   REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6413 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6414 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6415 			   REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6416 }
6417 
6418 static void
6419 bnx2_tx_timeout(struct net_device *dev)
6420 {
6421 	struct bnx2 *bp = netdev_priv(dev);
6422 
6423 	bnx2_dump_state(bp);
6424 	bnx2_dump_mcp_state(bp);
6425 
6426 	/* This allows the netif to be shutdown gracefully before resetting */
6427 	schedule_work(&bp->reset_task);
6428 }
6429 
6430 /* Called with netif_tx_lock.
6431  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6432  * netif_wake_queue().
6433  */
6434 static netdev_tx_t
6435 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6436 {
6437 	struct bnx2 *bp = netdev_priv(dev);
6438 	dma_addr_t mapping;
6439 	struct tx_bd *txbd;
6440 	struct sw_tx_bd *tx_buf;
6441 	u32 len, vlan_tag_flags, last_frag, mss;
6442 	u16 prod, ring_prod;
6443 	int i;
6444 	struct bnx2_napi *bnapi;
6445 	struct bnx2_tx_ring_info *txr;
6446 	struct netdev_queue *txq;
6447 
6448 	/*  Determine which tx ring we will be placed on */
6449 	i = skb_get_queue_mapping(skb);
6450 	bnapi = &bp->bnx2_napi[i];
6451 	txr = &bnapi->tx_ring;
6452 	txq = netdev_get_tx_queue(dev, i);
6453 
6454 	if (unlikely(bnx2_tx_avail(bp, txr) <
6455 	    (skb_shinfo(skb)->nr_frags + 1))) {
6456 		netif_tx_stop_queue(txq);
6457 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6458 
6459 		return NETDEV_TX_BUSY;
6460 	}
6461 	len = skb_headlen(skb);
6462 	prod = txr->tx_prod;
6463 	ring_prod = TX_RING_IDX(prod);
6464 
6465 	vlan_tag_flags = 0;
6466 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6467 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6468 	}
6469 
6470 	if (vlan_tx_tag_present(skb)) {
6471 		vlan_tag_flags |=
6472 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6473 	}
6474 
6475 	if ((mss = skb_shinfo(skb)->gso_size)) {
6476 		u32 tcp_opt_len;
6477 		struct iphdr *iph;
6478 
6479 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6480 
6481 		tcp_opt_len = tcp_optlen(skb);
6482 
6483 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6484 			u32 tcp_off = skb_transport_offset(skb) -
6485 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6486 
6487 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6488 					  TX_BD_FLAGS_SW_FLAGS;
6489 			if (likely(tcp_off == 0))
6490 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6491 			else {
6492 				tcp_off >>= 3;
6493 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6494 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6495 						  ((tcp_off & 0x10) <<
6496 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6497 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6498 			}
6499 		} else {
6500 			iph = ip_hdr(skb);
6501 			if (tcp_opt_len || (iph->ihl > 5)) {
6502 				vlan_tag_flags |= ((iph->ihl - 5) +
6503 						   (tcp_opt_len >> 2)) << 8;
6504 			}
6505 		}
6506 	} else
6507 		mss = 0;
6508 
6509 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6510 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6511 		dev_kfree_skb(skb);
6512 		return NETDEV_TX_OK;
6513 	}
6514 
6515 	tx_buf = &txr->tx_buf_ring[ring_prod];
6516 	tx_buf->skb = skb;
6517 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6518 
6519 	txbd = &txr->tx_desc_ring[ring_prod];
6520 
6521 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6522 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6523 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6524 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6525 
6526 	last_frag = skb_shinfo(skb)->nr_frags;
6527 	tx_buf->nr_frags = last_frag;
6528 	tx_buf->is_gso = skb_is_gso(skb);
6529 
6530 	for (i = 0; i < last_frag; i++) {
6531 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6532 
6533 		prod = NEXT_TX_BD(prod);
6534 		ring_prod = TX_RING_IDX(prod);
6535 		txbd = &txr->tx_desc_ring[ring_prod];
6536 
6537 		len = skb_frag_size(frag);
6538 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6539 					   DMA_TO_DEVICE);
6540 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6541 			goto dma_error;
6542 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6543 				   mapping);
6544 
6545 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6546 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6547 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6548 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6549 
6550 	}
6551 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6552 
6553 	netdev_tx_sent_queue(txq, skb->len);
6554 
6555 	prod = NEXT_TX_BD(prod);
6556 	txr->tx_prod_bseq += skb->len;
6557 
6558 	REG_WR16(bp, txr->tx_bidx_addr, prod);
6559 	REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6560 
6561 	mmiowb();
6562 
6563 	txr->tx_prod = prod;
6564 
6565 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6566 		netif_tx_stop_queue(txq);
6567 
6568 		/* netif_tx_stop_queue() must be done before checking
6569 		 * tx index in bnx2_tx_avail() below, because in
6570 		 * bnx2_tx_int(), we update tx index before checking for
6571 		 * netif_tx_queue_stopped().
6572 		 */
6573 		smp_mb();
6574 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6575 			netif_tx_wake_queue(txq);
6576 	}
6577 
6578 	return NETDEV_TX_OK;
6579 dma_error:
6580 	/* save value of frag that failed */
6581 	last_frag = i;
6582 
6583 	/* start back at beginning and unmap skb */
6584 	prod = txr->tx_prod;
6585 	ring_prod = TX_RING_IDX(prod);
6586 	tx_buf = &txr->tx_buf_ring[ring_prod];
6587 	tx_buf->skb = NULL;
6588 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6589 			 skb_headlen(skb), PCI_DMA_TODEVICE);
6590 
6591 	/* unmap remaining mapped pages */
6592 	for (i = 0; i < last_frag; i++) {
6593 		prod = NEXT_TX_BD(prod);
6594 		ring_prod = TX_RING_IDX(prod);
6595 		tx_buf = &txr->tx_buf_ring[ring_prod];
6596 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6597 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6598 			       PCI_DMA_TODEVICE);
6599 	}
6600 
6601 	dev_kfree_skb(skb);
6602 	return NETDEV_TX_OK;
6603 }
6604 
6605 /* Called with rtnl_lock */
6606 static int
6607 bnx2_close(struct net_device *dev)
6608 {
6609 	struct bnx2 *bp = netdev_priv(dev);
6610 
6611 	bnx2_disable_int_sync(bp);
6612 	bnx2_napi_disable(bp);
6613 	del_timer_sync(&bp->timer);
6614 	bnx2_shutdown_chip(bp);
6615 	bnx2_free_irq(bp);
6616 	bnx2_free_skbs(bp);
6617 	bnx2_free_mem(bp);
6618 	bnx2_del_napi(bp);
6619 	bp->link_up = 0;
6620 	netif_carrier_off(bp->dev);
6621 	bnx2_set_power_state(bp, PCI_D3hot);
6622 	return 0;
6623 }
6624 
6625 static void
6626 bnx2_save_stats(struct bnx2 *bp)
6627 {
6628 	u32 *hw_stats = (u32 *) bp->stats_blk;
6629 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6630 	int i;
6631 
6632 	/* The 1st 10 counters are 64-bit counters */
6633 	for (i = 0; i < 20; i += 2) {
6634 		u32 hi;
6635 		u64 lo;
6636 
6637 		hi = temp_stats[i] + hw_stats[i];
6638 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6639 		if (lo > 0xffffffff)
6640 			hi++;
6641 		temp_stats[i] = hi;
6642 		temp_stats[i + 1] = lo & 0xffffffff;
6643 	}
6644 
6645 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6646 		temp_stats[i] += hw_stats[i];
6647 }
6648 
6649 #define GET_64BIT_NET_STATS64(ctr)		\
6650 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6651 
6652 #define GET_64BIT_NET_STATS(ctr)				\
6653 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6654 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6655 
6656 #define GET_32BIT_NET_STATS(ctr)				\
6657 	(unsigned long) (bp->stats_blk->ctr +			\
6658 			 bp->temp_stats_blk->ctr)
6659 
6660 static struct rtnl_link_stats64 *
6661 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6662 {
6663 	struct bnx2 *bp = netdev_priv(dev);
6664 
6665 	if (bp->stats_blk == NULL)
6666 		return net_stats;
6667 
6668 	net_stats->rx_packets =
6669 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6670 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6671 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6672 
6673 	net_stats->tx_packets =
6674 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6675 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6676 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6677 
6678 	net_stats->rx_bytes =
6679 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6680 
6681 	net_stats->tx_bytes =
6682 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6683 
6684 	net_stats->multicast =
6685 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6686 
6687 	net_stats->collisions =
6688 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6689 
6690 	net_stats->rx_length_errors =
6691 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6692 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6693 
6694 	net_stats->rx_over_errors =
6695 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6696 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6697 
6698 	net_stats->rx_frame_errors =
6699 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6700 
6701 	net_stats->rx_crc_errors =
6702 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6703 
6704 	net_stats->rx_errors = net_stats->rx_length_errors +
6705 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6706 		net_stats->rx_crc_errors;
6707 
6708 	net_stats->tx_aborted_errors =
6709 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6710 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6711 
6712 	if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6713 	    (CHIP_ID(bp) == CHIP_ID_5708_A0))
6714 		net_stats->tx_carrier_errors = 0;
6715 	else {
6716 		net_stats->tx_carrier_errors =
6717 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6718 	}
6719 
6720 	net_stats->tx_errors =
6721 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6722 		net_stats->tx_aborted_errors +
6723 		net_stats->tx_carrier_errors;
6724 
6725 	net_stats->rx_missed_errors =
6726 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6727 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6728 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6729 
6730 	return net_stats;
6731 }
6732 
6733 /* All ethtool functions called with rtnl_lock */
6734 
6735 static int
6736 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6737 {
6738 	struct bnx2 *bp = netdev_priv(dev);
6739 	int support_serdes = 0, support_copper = 0;
6740 
6741 	cmd->supported = SUPPORTED_Autoneg;
6742 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6743 		support_serdes = 1;
6744 		support_copper = 1;
6745 	} else if (bp->phy_port == PORT_FIBRE)
6746 		support_serdes = 1;
6747 	else
6748 		support_copper = 1;
6749 
6750 	if (support_serdes) {
6751 		cmd->supported |= SUPPORTED_1000baseT_Full |
6752 			SUPPORTED_FIBRE;
6753 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6754 			cmd->supported |= SUPPORTED_2500baseX_Full;
6755 
6756 	}
6757 	if (support_copper) {
6758 		cmd->supported |= SUPPORTED_10baseT_Half |
6759 			SUPPORTED_10baseT_Full |
6760 			SUPPORTED_100baseT_Half |
6761 			SUPPORTED_100baseT_Full |
6762 			SUPPORTED_1000baseT_Full |
6763 			SUPPORTED_TP;
6764 
6765 	}
6766 
6767 	spin_lock_bh(&bp->phy_lock);
6768 	cmd->port = bp->phy_port;
6769 	cmd->advertising = bp->advertising;
6770 
6771 	if (bp->autoneg & AUTONEG_SPEED) {
6772 		cmd->autoneg = AUTONEG_ENABLE;
6773 	} else {
6774 		cmd->autoneg = AUTONEG_DISABLE;
6775 	}
6776 
6777 	if (netif_carrier_ok(dev)) {
6778 		ethtool_cmd_speed_set(cmd, bp->line_speed);
6779 		cmd->duplex = bp->duplex;
6780 	}
6781 	else {
6782 		ethtool_cmd_speed_set(cmd, -1);
6783 		cmd->duplex = -1;
6784 	}
6785 	spin_unlock_bh(&bp->phy_lock);
6786 
6787 	cmd->transceiver = XCVR_INTERNAL;
6788 	cmd->phy_address = bp->phy_addr;
6789 
6790 	return 0;
6791 }
6792 
6793 static int
6794 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6795 {
6796 	struct bnx2 *bp = netdev_priv(dev);
6797 	u8 autoneg = bp->autoneg;
6798 	u8 req_duplex = bp->req_duplex;
6799 	u16 req_line_speed = bp->req_line_speed;
6800 	u32 advertising = bp->advertising;
6801 	int err = -EINVAL;
6802 
6803 	spin_lock_bh(&bp->phy_lock);
6804 
6805 	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6806 		goto err_out_unlock;
6807 
6808 	if (cmd->port != bp->phy_port &&
6809 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6810 		goto err_out_unlock;
6811 
6812 	/* If device is down, we can store the settings only if the user
6813 	 * is setting the currently active port.
6814 	 */
6815 	if (!netif_running(dev) && cmd->port != bp->phy_port)
6816 		goto err_out_unlock;
6817 
6818 	if (cmd->autoneg == AUTONEG_ENABLE) {
6819 		autoneg |= AUTONEG_SPEED;
6820 
6821 		advertising = cmd->advertising;
6822 		if (cmd->port == PORT_TP) {
6823 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6824 			if (!advertising)
6825 				advertising = ETHTOOL_ALL_COPPER_SPEED;
6826 		} else {
6827 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6828 			if (!advertising)
6829 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6830 		}
6831 		advertising |= ADVERTISED_Autoneg;
6832 	}
6833 	else {
6834 		u32 speed = ethtool_cmd_speed(cmd);
6835 		if (cmd->port == PORT_FIBRE) {
6836 			if ((speed != SPEED_1000 &&
6837 			     speed != SPEED_2500) ||
6838 			    (cmd->duplex != DUPLEX_FULL))
6839 				goto err_out_unlock;
6840 
6841 			if (speed == SPEED_2500 &&
6842 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6843 				goto err_out_unlock;
6844 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
6845 			goto err_out_unlock;
6846 
6847 		autoneg &= ~AUTONEG_SPEED;
6848 		req_line_speed = speed;
6849 		req_duplex = cmd->duplex;
6850 		advertising = 0;
6851 	}
6852 
6853 	bp->autoneg = autoneg;
6854 	bp->advertising = advertising;
6855 	bp->req_line_speed = req_line_speed;
6856 	bp->req_duplex = req_duplex;
6857 
6858 	err = 0;
6859 	/* If device is down, the new settings will be picked up when it is
6860 	 * brought up.
6861 	 */
6862 	if (netif_running(dev))
6863 		err = bnx2_setup_phy(bp, cmd->port);
6864 
6865 err_out_unlock:
6866 	spin_unlock_bh(&bp->phy_lock);
6867 
6868 	return err;
6869 }
6870 
6871 static void
6872 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6873 {
6874 	struct bnx2 *bp = netdev_priv(dev);
6875 
6876 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6877 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6878 	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6879 	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6880 }
6881 
6882 #define BNX2_REGDUMP_LEN		(32 * 1024)
6883 
6884 static int
6885 bnx2_get_regs_len(struct net_device *dev)
6886 {
6887 	return BNX2_REGDUMP_LEN;
6888 }
6889 
6890 static void
6891 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6892 {
6893 	u32 *p = _p, i, offset;
6894 	u8 *orig_p = _p;
6895 	struct bnx2 *bp = netdev_priv(dev);
6896 	static const u32 reg_boundaries[] = {
6897 		0x0000, 0x0098, 0x0400, 0x045c,
6898 		0x0800, 0x0880, 0x0c00, 0x0c10,
6899 		0x0c30, 0x0d08, 0x1000, 0x101c,
6900 		0x1040, 0x1048, 0x1080, 0x10a4,
6901 		0x1400, 0x1490, 0x1498, 0x14f0,
6902 		0x1500, 0x155c, 0x1580, 0x15dc,
6903 		0x1600, 0x1658, 0x1680, 0x16d8,
6904 		0x1800, 0x1820, 0x1840, 0x1854,
6905 		0x1880, 0x1894, 0x1900, 0x1984,
6906 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6907 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
6908 		0x2000, 0x2030, 0x23c0, 0x2400,
6909 		0x2800, 0x2820, 0x2830, 0x2850,
6910 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
6911 		0x3c00, 0x3c94, 0x4000, 0x4010,
6912 		0x4080, 0x4090, 0x43c0, 0x4458,
6913 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
6914 		0x4fc0, 0x5010, 0x53c0, 0x5444,
6915 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
6916 		0x5fc0, 0x6000, 0x6400, 0x6428,
6917 		0x6800, 0x6848, 0x684c, 0x6860,
6918 		0x6888, 0x6910, 0x8000
6919 	};
6920 
6921 	regs->version = 0;
6922 
6923 	memset(p, 0, BNX2_REGDUMP_LEN);
6924 
6925 	if (!netif_running(bp->dev))
6926 		return;
6927 
6928 	i = 0;
6929 	offset = reg_boundaries[0];
6930 	p += offset;
6931 	while (offset < BNX2_REGDUMP_LEN) {
6932 		*p++ = REG_RD(bp, offset);
6933 		offset += 4;
6934 		if (offset == reg_boundaries[i + 1]) {
6935 			offset = reg_boundaries[i + 2];
6936 			p = (u32 *) (orig_p + offset);
6937 			i += 2;
6938 		}
6939 	}
6940 }
6941 
6942 static void
6943 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6944 {
6945 	struct bnx2 *bp = netdev_priv(dev);
6946 
6947 	if (bp->flags & BNX2_FLAG_NO_WOL) {
6948 		wol->supported = 0;
6949 		wol->wolopts = 0;
6950 	}
6951 	else {
6952 		wol->supported = WAKE_MAGIC;
6953 		if (bp->wol)
6954 			wol->wolopts = WAKE_MAGIC;
6955 		else
6956 			wol->wolopts = 0;
6957 	}
6958 	memset(&wol->sopass, 0, sizeof(wol->sopass));
6959 }
6960 
6961 static int
6962 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6963 {
6964 	struct bnx2 *bp = netdev_priv(dev);
6965 
6966 	if (wol->wolopts & ~WAKE_MAGIC)
6967 		return -EINVAL;
6968 
6969 	if (wol->wolopts & WAKE_MAGIC) {
6970 		if (bp->flags & BNX2_FLAG_NO_WOL)
6971 			return -EINVAL;
6972 
6973 		bp->wol = 1;
6974 	}
6975 	else {
6976 		bp->wol = 0;
6977 	}
6978 	return 0;
6979 }
6980 
6981 static int
6982 bnx2_nway_reset(struct net_device *dev)
6983 {
6984 	struct bnx2 *bp = netdev_priv(dev);
6985 	u32 bmcr;
6986 
6987 	if (!netif_running(dev))
6988 		return -EAGAIN;
6989 
6990 	if (!(bp->autoneg & AUTONEG_SPEED)) {
6991 		return -EINVAL;
6992 	}
6993 
6994 	spin_lock_bh(&bp->phy_lock);
6995 
6996 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6997 		int rc;
6998 
6999 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7000 		spin_unlock_bh(&bp->phy_lock);
7001 		return rc;
7002 	}
7003 
7004 	/* Force a link down visible on the other side */
7005 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7006 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7007 		spin_unlock_bh(&bp->phy_lock);
7008 
7009 		msleep(20);
7010 
7011 		spin_lock_bh(&bp->phy_lock);
7012 
7013 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7014 		bp->serdes_an_pending = 1;
7015 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7016 	}
7017 
7018 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7019 	bmcr &= ~BMCR_LOOPBACK;
7020 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7021 
7022 	spin_unlock_bh(&bp->phy_lock);
7023 
7024 	return 0;
7025 }
7026 
7027 static u32
7028 bnx2_get_link(struct net_device *dev)
7029 {
7030 	struct bnx2 *bp = netdev_priv(dev);
7031 
7032 	return bp->link_up;
7033 }
7034 
7035 static int
7036 bnx2_get_eeprom_len(struct net_device *dev)
7037 {
7038 	struct bnx2 *bp = netdev_priv(dev);
7039 
7040 	if (bp->flash_info == NULL)
7041 		return 0;
7042 
7043 	return (int) bp->flash_size;
7044 }
7045 
7046 static int
7047 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7048 		u8 *eebuf)
7049 {
7050 	struct bnx2 *bp = netdev_priv(dev);
7051 	int rc;
7052 
7053 	if (!netif_running(dev))
7054 		return -EAGAIN;
7055 
7056 	/* parameters already validated in ethtool_get_eeprom */
7057 
7058 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7059 
7060 	return rc;
7061 }
7062 
7063 static int
7064 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7065 		u8 *eebuf)
7066 {
7067 	struct bnx2 *bp = netdev_priv(dev);
7068 	int rc;
7069 
7070 	if (!netif_running(dev))
7071 		return -EAGAIN;
7072 
7073 	/* parameters already validated in ethtool_set_eeprom */
7074 
7075 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7076 
7077 	return rc;
7078 }
7079 
7080 static int
7081 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7082 {
7083 	struct bnx2 *bp = netdev_priv(dev);
7084 
7085 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7086 
7087 	coal->rx_coalesce_usecs = bp->rx_ticks;
7088 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7089 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7090 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7091 
7092 	coal->tx_coalesce_usecs = bp->tx_ticks;
7093 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7094 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7095 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7096 
7097 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7098 
7099 	return 0;
7100 }
7101 
7102 static int
7103 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7104 {
7105 	struct bnx2 *bp = netdev_priv(dev);
7106 
7107 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7108 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7109 
7110 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7111 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7112 
7113 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7114 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7115 
7116 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7117 	if (bp->rx_quick_cons_trip_int > 0xff)
7118 		bp->rx_quick_cons_trip_int = 0xff;
7119 
7120 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7121 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7122 
7123 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7124 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7125 
7126 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7127 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7128 
7129 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7130 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7131 		0xff;
7132 
7133 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7134 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7135 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7136 			bp->stats_ticks = USEC_PER_SEC;
7137 	}
7138 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7139 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7140 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7141 
7142 	if (netif_running(bp->dev)) {
7143 		bnx2_netif_stop(bp, true);
7144 		bnx2_init_nic(bp, 0);
7145 		bnx2_netif_start(bp, true);
7146 	}
7147 
7148 	return 0;
7149 }
7150 
7151 static void
7152 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7153 {
7154 	struct bnx2 *bp = netdev_priv(dev);
7155 
7156 	ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7157 	ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7158 
7159 	ering->rx_pending = bp->rx_ring_size;
7160 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7161 
7162 	ering->tx_max_pending = MAX_TX_DESC_CNT;
7163 	ering->tx_pending = bp->tx_ring_size;
7164 }
7165 
7166 static int
7167 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7168 {
7169 	if (netif_running(bp->dev)) {
7170 		/* Reset will erase chipset stats; save them */
7171 		bnx2_save_stats(bp);
7172 
7173 		bnx2_netif_stop(bp, true);
7174 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7175 		__bnx2_free_irq(bp);
7176 		bnx2_free_skbs(bp);
7177 		bnx2_free_mem(bp);
7178 	}
7179 
7180 	bnx2_set_rx_ring_size(bp, rx);
7181 	bp->tx_ring_size = tx;
7182 
7183 	if (netif_running(bp->dev)) {
7184 		int rc;
7185 
7186 		rc = bnx2_alloc_mem(bp);
7187 		if (!rc)
7188 			rc = bnx2_request_irq(bp);
7189 
7190 		if (!rc)
7191 			rc = bnx2_init_nic(bp, 0);
7192 
7193 		if (rc) {
7194 			bnx2_napi_enable(bp);
7195 			dev_close(bp->dev);
7196 			return rc;
7197 		}
7198 #ifdef BCM_CNIC
7199 		mutex_lock(&bp->cnic_lock);
7200 		/* Let cnic know about the new status block. */
7201 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7202 			bnx2_setup_cnic_irq_info(bp);
7203 		mutex_unlock(&bp->cnic_lock);
7204 #endif
7205 		bnx2_netif_start(bp, true);
7206 	}
7207 	return 0;
7208 }
7209 
7210 static int
7211 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7212 {
7213 	struct bnx2 *bp = netdev_priv(dev);
7214 	int rc;
7215 
7216 	if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7217 		(ering->tx_pending > MAX_TX_DESC_CNT) ||
7218 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7219 
7220 		return -EINVAL;
7221 	}
7222 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7223 	return rc;
7224 }
7225 
7226 static void
7227 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7228 {
7229 	struct bnx2 *bp = netdev_priv(dev);
7230 
7231 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7232 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7233 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7234 }
7235 
7236 static int
7237 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7238 {
7239 	struct bnx2 *bp = netdev_priv(dev);
7240 
7241 	bp->req_flow_ctrl = 0;
7242 	if (epause->rx_pause)
7243 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7244 	if (epause->tx_pause)
7245 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7246 
7247 	if (epause->autoneg) {
7248 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7249 	}
7250 	else {
7251 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7252 	}
7253 
7254 	if (netif_running(dev)) {
7255 		spin_lock_bh(&bp->phy_lock);
7256 		bnx2_setup_phy(bp, bp->phy_port);
7257 		spin_unlock_bh(&bp->phy_lock);
7258 	}
7259 
7260 	return 0;
7261 }
7262 
7263 static struct {
7264 	char string[ETH_GSTRING_LEN];
7265 } bnx2_stats_str_arr[] = {
7266 	{ "rx_bytes" },
7267 	{ "rx_error_bytes" },
7268 	{ "tx_bytes" },
7269 	{ "tx_error_bytes" },
7270 	{ "rx_ucast_packets" },
7271 	{ "rx_mcast_packets" },
7272 	{ "rx_bcast_packets" },
7273 	{ "tx_ucast_packets" },
7274 	{ "tx_mcast_packets" },
7275 	{ "tx_bcast_packets" },
7276 	{ "tx_mac_errors" },
7277 	{ "tx_carrier_errors" },
7278 	{ "rx_crc_errors" },
7279 	{ "rx_align_errors" },
7280 	{ "tx_single_collisions" },
7281 	{ "tx_multi_collisions" },
7282 	{ "tx_deferred" },
7283 	{ "tx_excess_collisions" },
7284 	{ "tx_late_collisions" },
7285 	{ "tx_total_collisions" },
7286 	{ "rx_fragments" },
7287 	{ "rx_jabbers" },
7288 	{ "rx_undersize_packets" },
7289 	{ "rx_oversize_packets" },
7290 	{ "rx_64_byte_packets" },
7291 	{ "rx_65_to_127_byte_packets" },
7292 	{ "rx_128_to_255_byte_packets" },
7293 	{ "rx_256_to_511_byte_packets" },
7294 	{ "rx_512_to_1023_byte_packets" },
7295 	{ "rx_1024_to_1522_byte_packets" },
7296 	{ "rx_1523_to_9022_byte_packets" },
7297 	{ "tx_64_byte_packets" },
7298 	{ "tx_65_to_127_byte_packets" },
7299 	{ "tx_128_to_255_byte_packets" },
7300 	{ "tx_256_to_511_byte_packets" },
7301 	{ "tx_512_to_1023_byte_packets" },
7302 	{ "tx_1024_to_1522_byte_packets" },
7303 	{ "tx_1523_to_9022_byte_packets" },
7304 	{ "rx_xon_frames" },
7305 	{ "rx_xoff_frames" },
7306 	{ "tx_xon_frames" },
7307 	{ "tx_xoff_frames" },
7308 	{ "rx_mac_ctrl_frames" },
7309 	{ "rx_filtered_packets" },
7310 	{ "rx_ftq_discards" },
7311 	{ "rx_discards" },
7312 	{ "rx_fw_discards" },
7313 };
7314 
7315 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7316 			sizeof(bnx2_stats_str_arr[0]))
7317 
7318 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7319 
7320 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7321     STATS_OFFSET32(stat_IfHCInOctets_hi),
7322     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7323     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7324     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7325     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7326     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7327     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7328     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7329     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7330     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7331     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7332     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7333     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7334     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7335     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7336     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7337     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7338     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7339     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7340     STATS_OFFSET32(stat_EtherStatsCollisions),
7341     STATS_OFFSET32(stat_EtherStatsFragments),
7342     STATS_OFFSET32(stat_EtherStatsJabbers),
7343     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7344     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7345     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7346     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7347     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7348     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7349     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7350     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7351     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7352     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7353     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7354     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7355     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7356     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7357     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7358     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7359     STATS_OFFSET32(stat_XonPauseFramesReceived),
7360     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7361     STATS_OFFSET32(stat_OutXonSent),
7362     STATS_OFFSET32(stat_OutXoffSent),
7363     STATS_OFFSET32(stat_MacControlFramesReceived),
7364     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7365     STATS_OFFSET32(stat_IfInFTQDiscards),
7366     STATS_OFFSET32(stat_IfInMBUFDiscards),
7367     STATS_OFFSET32(stat_FwRxDrop),
7368 };
7369 
7370 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7371  * skipped because of errata.
7372  */
7373 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7374 	8,0,8,8,8,8,8,8,8,8,
7375 	4,0,4,4,4,4,4,4,4,4,
7376 	4,4,4,4,4,4,4,4,4,4,
7377 	4,4,4,4,4,4,4,4,4,4,
7378 	4,4,4,4,4,4,4,
7379 };
7380 
7381 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7382 	8,0,8,8,8,8,8,8,8,8,
7383 	4,4,4,4,4,4,4,4,4,4,
7384 	4,4,4,4,4,4,4,4,4,4,
7385 	4,4,4,4,4,4,4,4,4,4,
7386 	4,4,4,4,4,4,4,
7387 };
7388 
7389 #define BNX2_NUM_TESTS 6
7390 
7391 static struct {
7392 	char string[ETH_GSTRING_LEN];
7393 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7394 	{ "register_test (offline)" },
7395 	{ "memory_test (offline)" },
7396 	{ "loopback_test (offline)" },
7397 	{ "nvram_test (online)" },
7398 	{ "interrupt_test (online)" },
7399 	{ "link_test (online)" },
7400 };
7401 
7402 static int
7403 bnx2_get_sset_count(struct net_device *dev, int sset)
7404 {
7405 	switch (sset) {
7406 	case ETH_SS_TEST:
7407 		return BNX2_NUM_TESTS;
7408 	case ETH_SS_STATS:
7409 		return BNX2_NUM_STATS;
7410 	default:
7411 		return -EOPNOTSUPP;
7412 	}
7413 }
7414 
7415 static void
7416 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7417 {
7418 	struct bnx2 *bp = netdev_priv(dev);
7419 
7420 	bnx2_set_power_state(bp, PCI_D0);
7421 
7422 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7423 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7424 		int i;
7425 
7426 		bnx2_netif_stop(bp, true);
7427 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7428 		bnx2_free_skbs(bp);
7429 
7430 		if (bnx2_test_registers(bp) != 0) {
7431 			buf[0] = 1;
7432 			etest->flags |= ETH_TEST_FL_FAILED;
7433 		}
7434 		if (bnx2_test_memory(bp) != 0) {
7435 			buf[1] = 1;
7436 			etest->flags |= ETH_TEST_FL_FAILED;
7437 		}
7438 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7439 			etest->flags |= ETH_TEST_FL_FAILED;
7440 
7441 		if (!netif_running(bp->dev))
7442 			bnx2_shutdown_chip(bp);
7443 		else {
7444 			bnx2_init_nic(bp, 1);
7445 			bnx2_netif_start(bp, true);
7446 		}
7447 
7448 		/* wait for link up */
7449 		for (i = 0; i < 7; i++) {
7450 			if (bp->link_up)
7451 				break;
7452 			msleep_interruptible(1000);
7453 		}
7454 	}
7455 
7456 	if (bnx2_test_nvram(bp) != 0) {
7457 		buf[3] = 1;
7458 		etest->flags |= ETH_TEST_FL_FAILED;
7459 	}
7460 	if (bnx2_test_intr(bp) != 0) {
7461 		buf[4] = 1;
7462 		etest->flags |= ETH_TEST_FL_FAILED;
7463 	}
7464 
7465 	if (bnx2_test_link(bp) != 0) {
7466 		buf[5] = 1;
7467 		etest->flags |= ETH_TEST_FL_FAILED;
7468 
7469 	}
7470 	if (!netif_running(bp->dev))
7471 		bnx2_set_power_state(bp, PCI_D3hot);
7472 }
7473 
7474 static void
7475 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7476 {
7477 	switch (stringset) {
7478 	case ETH_SS_STATS:
7479 		memcpy(buf, bnx2_stats_str_arr,
7480 			sizeof(bnx2_stats_str_arr));
7481 		break;
7482 	case ETH_SS_TEST:
7483 		memcpy(buf, bnx2_tests_str_arr,
7484 			sizeof(bnx2_tests_str_arr));
7485 		break;
7486 	}
7487 }
7488 
7489 static void
7490 bnx2_get_ethtool_stats(struct net_device *dev,
7491 		struct ethtool_stats *stats, u64 *buf)
7492 {
7493 	struct bnx2 *bp = netdev_priv(dev);
7494 	int i;
7495 	u32 *hw_stats = (u32 *) bp->stats_blk;
7496 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7497 	u8 *stats_len_arr = NULL;
7498 
7499 	if (hw_stats == NULL) {
7500 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7501 		return;
7502 	}
7503 
7504 	if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7505 	    (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7506 	    (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7507 	    (CHIP_ID(bp) == CHIP_ID_5708_A0))
7508 		stats_len_arr = bnx2_5706_stats_len_arr;
7509 	else
7510 		stats_len_arr = bnx2_5708_stats_len_arr;
7511 
7512 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7513 		unsigned long offset;
7514 
7515 		if (stats_len_arr[i] == 0) {
7516 			/* skip this counter */
7517 			buf[i] = 0;
7518 			continue;
7519 		}
7520 
7521 		offset = bnx2_stats_offset_arr[i];
7522 		if (stats_len_arr[i] == 4) {
7523 			/* 4-byte counter */
7524 			buf[i] = (u64) *(hw_stats + offset) +
7525 				 *(temp_stats + offset);
7526 			continue;
7527 		}
7528 		/* 8-byte counter */
7529 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7530 			 *(hw_stats + offset + 1) +
7531 			 (((u64) *(temp_stats + offset)) << 32) +
7532 			 *(temp_stats + offset + 1);
7533 	}
7534 }
7535 
7536 static int
7537 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7538 {
7539 	struct bnx2 *bp = netdev_priv(dev);
7540 
7541 	switch (state) {
7542 	case ETHTOOL_ID_ACTIVE:
7543 		bnx2_set_power_state(bp, PCI_D0);
7544 
7545 		bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
7546 		REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7547 		return 1;	/* cycle on/off once per second */
7548 
7549 	case ETHTOOL_ID_ON:
7550 		REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7551 		       BNX2_EMAC_LED_1000MB_OVERRIDE |
7552 		       BNX2_EMAC_LED_100MB_OVERRIDE |
7553 		       BNX2_EMAC_LED_10MB_OVERRIDE |
7554 		       BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7555 		       BNX2_EMAC_LED_TRAFFIC);
7556 		break;
7557 
7558 	case ETHTOOL_ID_OFF:
7559 		REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7560 		break;
7561 
7562 	case ETHTOOL_ID_INACTIVE:
7563 		REG_WR(bp, BNX2_EMAC_LED, 0);
7564 		REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7565 
7566 		if (!netif_running(dev))
7567 			bnx2_set_power_state(bp, PCI_D3hot);
7568 		break;
7569 	}
7570 
7571 	return 0;
7572 }
7573 
7574 static netdev_features_t
7575 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7576 {
7577 	struct bnx2 *bp = netdev_priv(dev);
7578 
7579 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7580 		features |= NETIF_F_HW_VLAN_RX;
7581 
7582 	return features;
7583 }
7584 
7585 static int
7586 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7587 {
7588 	struct bnx2 *bp = netdev_priv(dev);
7589 
7590 	/* TSO with VLAN tag won't work with current firmware */
7591 	if (features & NETIF_F_HW_VLAN_TX)
7592 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7593 	else
7594 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7595 
7596 	if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7597 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7598 	    netif_running(dev)) {
7599 		bnx2_netif_stop(bp, false);
7600 		dev->features = features;
7601 		bnx2_set_rx_mode(dev);
7602 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7603 		bnx2_netif_start(bp, false);
7604 		return 1;
7605 	}
7606 
7607 	return 0;
7608 }
7609 
7610 static const struct ethtool_ops bnx2_ethtool_ops = {
7611 	.get_settings		= bnx2_get_settings,
7612 	.set_settings		= bnx2_set_settings,
7613 	.get_drvinfo		= bnx2_get_drvinfo,
7614 	.get_regs_len		= bnx2_get_regs_len,
7615 	.get_regs		= bnx2_get_regs,
7616 	.get_wol		= bnx2_get_wol,
7617 	.set_wol		= bnx2_set_wol,
7618 	.nway_reset		= bnx2_nway_reset,
7619 	.get_link		= bnx2_get_link,
7620 	.get_eeprom_len		= bnx2_get_eeprom_len,
7621 	.get_eeprom		= bnx2_get_eeprom,
7622 	.set_eeprom		= bnx2_set_eeprom,
7623 	.get_coalesce		= bnx2_get_coalesce,
7624 	.set_coalesce		= bnx2_set_coalesce,
7625 	.get_ringparam		= bnx2_get_ringparam,
7626 	.set_ringparam		= bnx2_set_ringparam,
7627 	.get_pauseparam		= bnx2_get_pauseparam,
7628 	.set_pauseparam		= bnx2_set_pauseparam,
7629 	.self_test		= bnx2_self_test,
7630 	.get_strings		= bnx2_get_strings,
7631 	.set_phys_id		= bnx2_set_phys_id,
7632 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7633 	.get_sset_count		= bnx2_get_sset_count,
7634 };
7635 
7636 /* Called with rtnl_lock */
7637 static int
7638 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7639 {
7640 	struct mii_ioctl_data *data = if_mii(ifr);
7641 	struct bnx2 *bp = netdev_priv(dev);
7642 	int err;
7643 
7644 	switch(cmd) {
7645 	case SIOCGMIIPHY:
7646 		data->phy_id = bp->phy_addr;
7647 
7648 		/* fallthru */
7649 	case SIOCGMIIREG: {
7650 		u32 mii_regval;
7651 
7652 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7653 			return -EOPNOTSUPP;
7654 
7655 		if (!netif_running(dev))
7656 			return -EAGAIN;
7657 
7658 		spin_lock_bh(&bp->phy_lock);
7659 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7660 		spin_unlock_bh(&bp->phy_lock);
7661 
7662 		data->val_out = mii_regval;
7663 
7664 		return err;
7665 	}
7666 
7667 	case SIOCSMIIREG:
7668 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7669 			return -EOPNOTSUPP;
7670 
7671 		if (!netif_running(dev))
7672 			return -EAGAIN;
7673 
7674 		spin_lock_bh(&bp->phy_lock);
7675 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7676 		spin_unlock_bh(&bp->phy_lock);
7677 
7678 		return err;
7679 
7680 	default:
7681 		/* do nothing */
7682 		break;
7683 	}
7684 	return -EOPNOTSUPP;
7685 }
7686 
7687 /* Called with rtnl_lock */
7688 static int
7689 bnx2_change_mac_addr(struct net_device *dev, void *p)
7690 {
7691 	struct sockaddr *addr = p;
7692 	struct bnx2 *bp = netdev_priv(dev);
7693 
7694 	if (!is_valid_ether_addr(addr->sa_data))
7695 		return -EINVAL;
7696 
7697 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7698 	if (netif_running(dev))
7699 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7700 
7701 	return 0;
7702 }
7703 
7704 /* Called with rtnl_lock */
7705 static int
7706 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7707 {
7708 	struct bnx2 *bp = netdev_priv(dev);
7709 
7710 	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7711 		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7712 		return -EINVAL;
7713 
7714 	dev->mtu = new_mtu;
7715 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
7716 }
7717 
7718 #ifdef CONFIG_NET_POLL_CONTROLLER
7719 static void
7720 poll_bnx2(struct net_device *dev)
7721 {
7722 	struct bnx2 *bp = netdev_priv(dev);
7723 	int i;
7724 
7725 	for (i = 0; i < bp->irq_nvecs; i++) {
7726 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7727 
7728 		disable_irq(irq->vector);
7729 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7730 		enable_irq(irq->vector);
7731 	}
7732 }
7733 #endif
7734 
7735 static void __devinit
7736 bnx2_get_5709_media(struct bnx2 *bp)
7737 {
7738 	u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7739 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7740 	u32 strap;
7741 
7742 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7743 		return;
7744 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7745 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7746 		return;
7747 	}
7748 
7749 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7750 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7751 	else
7752 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7753 
7754 	if (PCI_FUNC(bp->pdev->devfn) == 0) {
7755 		switch (strap) {
7756 		case 0x4:
7757 		case 0x5:
7758 		case 0x6:
7759 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7760 			return;
7761 		}
7762 	} else {
7763 		switch (strap) {
7764 		case 0x1:
7765 		case 0x2:
7766 		case 0x4:
7767 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7768 			return;
7769 		}
7770 	}
7771 }
7772 
7773 static void __devinit
7774 bnx2_get_pci_speed(struct bnx2 *bp)
7775 {
7776 	u32 reg;
7777 
7778 	reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7779 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7780 		u32 clkreg;
7781 
7782 		bp->flags |= BNX2_FLAG_PCIX;
7783 
7784 		clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7785 
7786 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7787 		switch (clkreg) {
7788 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7789 			bp->bus_speed_mhz = 133;
7790 			break;
7791 
7792 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7793 			bp->bus_speed_mhz = 100;
7794 			break;
7795 
7796 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7797 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7798 			bp->bus_speed_mhz = 66;
7799 			break;
7800 
7801 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7802 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7803 			bp->bus_speed_mhz = 50;
7804 			break;
7805 
7806 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7807 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7808 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7809 			bp->bus_speed_mhz = 33;
7810 			break;
7811 		}
7812 	}
7813 	else {
7814 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7815 			bp->bus_speed_mhz = 66;
7816 		else
7817 			bp->bus_speed_mhz = 33;
7818 	}
7819 
7820 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7821 		bp->flags |= BNX2_FLAG_PCI_32BIT;
7822 
7823 }
7824 
7825 static void __devinit
7826 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7827 {
7828 	int rc, i, j;
7829 	u8 *data;
7830 	unsigned int block_end, rosize, len;
7831 
7832 #define BNX2_VPD_NVRAM_OFFSET	0x300
7833 #define BNX2_VPD_LEN		128
7834 #define BNX2_MAX_VER_SLEN	30
7835 
7836 	data = kmalloc(256, GFP_KERNEL);
7837 	if (!data)
7838 		return;
7839 
7840 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7841 			     BNX2_VPD_LEN);
7842 	if (rc)
7843 		goto vpd_done;
7844 
7845 	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7846 		data[i] = data[i + BNX2_VPD_LEN + 3];
7847 		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7848 		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7849 		data[i + 3] = data[i + BNX2_VPD_LEN];
7850 	}
7851 
7852 	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7853 	if (i < 0)
7854 		goto vpd_done;
7855 
7856 	rosize = pci_vpd_lrdt_size(&data[i]);
7857 	i += PCI_VPD_LRDT_TAG_SIZE;
7858 	block_end = i + rosize;
7859 
7860 	if (block_end > BNX2_VPD_LEN)
7861 		goto vpd_done;
7862 
7863 	j = pci_vpd_find_info_keyword(data, i, rosize,
7864 				      PCI_VPD_RO_KEYWORD_MFR_ID);
7865 	if (j < 0)
7866 		goto vpd_done;
7867 
7868 	len = pci_vpd_info_field_size(&data[j]);
7869 
7870 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
7871 	if (j + len > block_end || len != 4 ||
7872 	    memcmp(&data[j], "1028", 4))
7873 		goto vpd_done;
7874 
7875 	j = pci_vpd_find_info_keyword(data, i, rosize,
7876 				      PCI_VPD_RO_KEYWORD_VENDOR0);
7877 	if (j < 0)
7878 		goto vpd_done;
7879 
7880 	len = pci_vpd_info_field_size(&data[j]);
7881 
7882 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
7883 	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7884 		goto vpd_done;
7885 
7886 	memcpy(bp->fw_version, &data[j], len);
7887 	bp->fw_version[len] = ' ';
7888 
7889 vpd_done:
7890 	kfree(data);
7891 }
7892 
7893 static int __devinit
7894 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7895 {
7896 	struct bnx2 *bp;
7897 	unsigned long mem_len;
7898 	int rc, i, j;
7899 	u32 reg;
7900 	u64 dma_mask, persist_dma_mask;
7901 	int err;
7902 
7903 	SET_NETDEV_DEV(dev, &pdev->dev);
7904 	bp = netdev_priv(dev);
7905 
7906 	bp->flags = 0;
7907 	bp->phy_flags = 0;
7908 
7909 	bp->temp_stats_blk =
7910 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7911 
7912 	if (bp->temp_stats_blk == NULL) {
7913 		rc = -ENOMEM;
7914 		goto err_out;
7915 	}
7916 
7917 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
7918 	rc = pci_enable_device(pdev);
7919 	if (rc) {
7920 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7921 		goto err_out;
7922 	}
7923 
7924 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7925 		dev_err(&pdev->dev,
7926 			"Cannot find PCI device base address, aborting\n");
7927 		rc = -ENODEV;
7928 		goto err_out_disable;
7929 	}
7930 
7931 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7932 	if (rc) {
7933 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7934 		goto err_out_disable;
7935 	}
7936 
7937 	pci_set_master(pdev);
7938 
7939 	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7940 	if (bp->pm_cap == 0) {
7941 		dev_err(&pdev->dev,
7942 			"Cannot find power management capability, aborting\n");
7943 		rc = -EIO;
7944 		goto err_out_release;
7945 	}
7946 
7947 	bp->dev = dev;
7948 	bp->pdev = pdev;
7949 
7950 	spin_lock_init(&bp->phy_lock);
7951 	spin_lock_init(&bp->indirect_lock);
7952 #ifdef BCM_CNIC
7953 	mutex_init(&bp->cnic_lock);
7954 #endif
7955 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
7956 
7957 	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7958 	mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7959 	dev->mem_end = dev->mem_start + mem_len;
7960 	dev->irq = pdev->irq;
7961 
7962 	bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7963 
7964 	if (!bp->regview) {
7965 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7966 		rc = -ENOMEM;
7967 		goto err_out_release;
7968 	}
7969 
7970 	bnx2_set_power_state(bp, PCI_D0);
7971 
7972 	/* Configure byte swap and enable write to the reg_window registers.
7973 	 * Rely on CPU to do target byte swapping on big endian systems
7974 	 * The chip's target access swapping will not swap all accesses
7975 	 */
7976 	REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
7977 		   BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7978 		   BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7979 
7980 	bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7981 
7982 	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7983 		if (!pci_is_pcie(pdev)) {
7984 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
7985 			rc = -EIO;
7986 			goto err_out_unmap;
7987 		}
7988 		bp->flags |= BNX2_FLAG_PCIE;
7989 		if (CHIP_REV(bp) == CHIP_REV_Ax)
7990 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7991 
7992 		/* AER (Advanced Error Reporting) hooks */
7993 		err = pci_enable_pcie_error_reporting(pdev);
7994 		if (!err)
7995 			bp->flags |= BNX2_FLAG_AER_ENABLED;
7996 
7997 	} else {
7998 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7999 		if (bp->pcix_cap == 0) {
8000 			dev_err(&pdev->dev,
8001 				"Cannot find PCIX capability, aborting\n");
8002 			rc = -EIO;
8003 			goto err_out_unmap;
8004 		}
8005 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8006 	}
8007 
8008 	if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
8009 		if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
8010 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8011 	}
8012 
8013 	if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
8014 		if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8015 			bp->flags |= BNX2_FLAG_MSI_CAP;
8016 	}
8017 
8018 	/* 5708 cannot support DMA addresses > 40-bit.  */
8019 	if (CHIP_NUM(bp) == CHIP_NUM_5708)
8020 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8021 	else
8022 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8023 
8024 	/* Configure DMA attributes. */
8025 	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8026 		dev->features |= NETIF_F_HIGHDMA;
8027 		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8028 		if (rc) {
8029 			dev_err(&pdev->dev,
8030 				"pci_set_consistent_dma_mask failed, aborting\n");
8031 			goto err_out_unmap;
8032 		}
8033 	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8034 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8035 		goto err_out_unmap;
8036 	}
8037 
8038 	if (!(bp->flags & BNX2_FLAG_PCIE))
8039 		bnx2_get_pci_speed(bp);
8040 
8041 	/* 5706A0 may falsely detect SERR and PERR. */
8042 	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8043 		reg = REG_RD(bp, PCI_COMMAND);
8044 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8045 		REG_WR(bp, PCI_COMMAND, reg);
8046 	}
8047 	else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8048 		!(bp->flags & BNX2_FLAG_PCIX)) {
8049 
8050 		dev_err(&pdev->dev,
8051 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8052 		goto err_out_unmap;
8053 	}
8054 
8055 	bnx2_init_nvram(bp);
8056 
8057 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8058 
8059 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8060 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8061 		u32 off = PCI_FUNC(pdev->devfn) << 2;
8062 
8063 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8064 	} else
8065 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8066 
8067 	/* Get the permanent MAC address.  First we need to make sure the
8068 	 * firmware is actually running.
8069 	 */
8070 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8071 
8072 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8073 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8074 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8075 		rc = -ENODEV;
8076 		goto err_out_unmap;
8077 	}
8078 
8079 	bnx2_read_vpd_fw_ver(bp);
8080 
8081 	j = strlen(bp->fw_version);
8082 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8083 	for (i = 0; i < 3 && j < 24; i++) {
8084 		u8 num, k, skip0;
8085 
8086 		if (i == 0) {
8087 			bp->fw_version[j++] = 'b';
8088 			bp->fw_version[j++] = 'c';
8089 			bp->fw_version[j++] = ' ';
8090 		}
8091 		num = (u8) (reg >> (24 - (i * 8)));
8092 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8093 			if (num >= k || !skip0 || k == 1) {
8094 				bp->fw_version[j++] = (num / k) + '0';
8095 				skip0 = 0;
8096 			}
8097 		}
8098 		if (i != 2)
8099 			bp->fw_version[j++] = '.';
8100 	}
8101 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8102 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8103 		bp->wol = 1;
8104 
8105 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8106 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8107 
8108 		for (i = 0; i < 30; i++) {
8109 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8110 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8111 				break;
8112 			msleep(10);
8113 		}
8114 	}
8115 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8116 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8117 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8118 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8119 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8120 
8121 		if (j < 32)
8122 			bp->fw_version[j++] = ' ';
8123 		for (i = 0; i < 3 && j < 28; i++) {
8124 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8125 			reg = be32_to_cpu(reg);
8126 			memcpy(&bp->fw_version[j], &reg, 4);
8127 			j += 4;
8128 		}
8129 	}
8130 
8131 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8132 	bp->mac_addr[0] = (u8) (reg >> 8);
8133 	bp->mac_addr[1] = (u8) reg;
8134 
8135 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8136 	bp->mac_addr[2] = (u8) (reg >> 24);
8137 	bp->mac_addr[3] = (u8) (reg >> 16);
8138 	bp->mac_addr[4] = (u8) (reg >> 8);
8139 	bp->mac_addr[5] = (u8) reg;
8140 
8141 	bp->tx_ring_size = MAX_TX_DESC_CNT;
8142 	bnx2_set_rx_ring_size(bp, 255);
8143 
8144 	bp->tx_quick_cons_trip_int = 2;
8145 	bp->tx_quick_cons_trip = 20;
8146 	bp->tx_ticks_int = 18;
8147 	bp->tx_ticks = 80;
8148 
8149 	bp->rx_quick_cons_trip_int = 2;
8150 	bp->rx_quick_cons_trip = 12;
8151 	bp->rx_ticks_int = 18;
8152 	bp->rx_ticks = 18;
8153 
8154 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8155 
8156 	bp->current_interval = BNX2_TIMER_INTERVAL;
8157 
8158 	bp->phy_addr = 1;
8159 
8160 	/* Disable WOL support if we are running on a SERDES chip. */
8161 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
8162 		bnx2_get_5709_media(bp);
8163 	else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8164 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8165 
8166 	bp->phy_port = PORT_TP;
8167 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8168 		bp->phy_port = PORT_FIBRE;
8169 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8170 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8171 			bp->flags |= BNX2_FLAG_NO_WOL;
8172 			bp->wol = 0;
8173 		}
8174 		if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8175 			/* Don't do parallel detect on this board because of
8176 			 * some board problems.  The link will not go down
8177 			 * if we do parallel detect.
8178 			 */
8179 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8180 			    pdev->subsystem_device == 0x310c)
8181 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8182 		} else {
8183 			bp->phy_addr = 2;
8184 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8185 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8186 		}
8187 	} else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8188 		   CHIP_NUM(bp) == CHIP_NUM_5708)
8189 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8190 	else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8191 		 (CHIP_REV(bp) == CHIP_REV_Ax ||
8192 		  CHIP_REV(bp) == CHIP_REV_Bx))
8193 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8194 
8195 	bnx2_init_fw_cap(bp);
8196 
8197 	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8198 	    (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8199 	    (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8200 	    !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8201 		bp->flags |= BNX2_FLAG_NO_WOL;
8202 		bp->wol = 0;
8203 	}
8204 
8205 	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8206 		bp->tx_quick_cons_trip_int =
8207 			bp->tx_quick_cons_trip;
8208 		bp->tx_ticks_int = bp->tx_ticks;
8209 		bp->rx_quick_cons_trip_int =
8210 			bp->rx_quick_cons_trip;
8211 		bp->rx_ticks_int = bp->rx_ticks;
8212 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8213 		bp->com_ticks_int = bp->com_ticks;
8214 		bp->cmd_ticks_int = bp->cmd_ticks;
8215 	}
8216 
8217 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8218 	 *
8219 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8220 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8221 	 * but causes problems on the AMD 8132 which will eventually stop
8222 	 * responding after a while.
8223 	 *
8224 	 * AMD believes this incompatibility is unique to the 5706, and
8225 	 * prefers to locally disable MSI rather than globally disabling it.
8226 	 */
8227 	if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8228 		struct pci_dev *amd_8132 = NULL;
8229 
8230 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8231 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8232 						  amd_8132))) {
8233 
8234 			if (amd_8132->revision >= 0x10 &&
8235 			    amd_8132->revision <= 0x13) {
8236 				disable_msi = 1;
8237 				pci_dev_put(amd_8132);
8238 				break;
8239 			}
8240 		}
8241 	}
8242 
8243 	bnx2_set_default_link(bp);
8244 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8245 
8246 	init_timer(&bp->timer);
8247 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8248 	bp->timer.data = (unsigned long) bp;
8249 	bp->timer.function = bnx2_timer;
8250 
8251 #ifdef BCM_CNIC
8252 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8253 		bp->cnic_eth_dev.max_iscsi_conn =
8254 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8255 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8256 #endif
8257 	pci_save_state(pdev);
8258 
8259 	return 0;
8260 
8261 err_out_unmap:
8262 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8263 		pci_disable_pcie_error_reporting(pdev);
8264 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8265 	}
8266 
8267 	if (bp->regview) {
8268 		iounmap(bp->regview);
8269 		bp->regview = NULL;
8270 	}
8271 
8272 err_out_release:
8273 	pci_release_regions(pdev);
8274 
8275 err_out_disable:
8276 	pci_disable_device(pdev);
8277 	pci_set_drvdata(pdev, NULL);
8278 
8279 err_out:
8280 	return rc;
8281 }
8282 
8283 static char * __devinit
8284 bnx2_bus_string(struct bnx2 *bp, char *str)
8285 {
8286 	char *s = str;
8287 
8288 	if (bp->flags & BNX2_FLAG_PCIE) {
8289 		s += sprintf(s, "PCI Express");
8290 	} else {
8291 		s += sprintf(s, "PCI");
8292 		if (bp->flags & BNX2_FLAG_PCIX)
8293 			s += sprintf(s, "-X");
8294 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8295 			s += sprintf(s, " 32-bit");
8296 		else
8297 			s += sprintf(s, " 64-bit");
8298 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8299 	}
8300 	return str;
8301 }
8302 
8303 static void
8304 bnx2_del_napi(struct bnx2 *bp)
8305 {
8306 	int i;
8307 
8308 	for (i = 0; i < bp->irq_nvecs; i++)
8309 		netif_napi_del(&bp->bnx2_napi[i].napi);
8310 }
8311 
8312 static void
8313 bnx2_init_napi(struct bnx2 *bp)
8314 {
8315 	int i;
8316 
8317 	for (i = 0; i < bp->irq_nvecs; i++) {
8318 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8319 		int (*poll)(struct napi_struct *, int);
8320 
8321 		if (i == 0)
8322 			poll = bnx2_poll;
8323 		else
8324 			poll = bnx2_poll_msix;
8325 
8326 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8327 		bnapi->bp = bp;
8328 	}
8329 }
8330 
8331 static const struct net_device_ops bnx2_netdev_ops = {
8332 	.ndo_open		= bnx2_open,
8333 	.ndo_start_xmit		= bnx2_start_xmit,
8334 	.ndo_stop		= bnx2_close,
8335 	.ndo_get_stats64	= bnx2_get_stats64,
8336 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8337 	.ndo_do_ioctl		= bnx2_ioctl,
8338 	.ndo_validate_addr	= eth_validate_addr,
8339 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8340 	.ndo_change_mtu		= bnx2_change_mtu,
8341 	.ndo_fix_features	= bnx2_fix_features,
8342 	.ndo_set_features	= bnx2_set_features,
8343 	.ndo_tx_timeout		= bnx2_tx_timeout,
8344 #ifdef CONFIG_NET_POLL_CONTROLLER
8345 	.ndo_poll_controller	= poll_bnx2,
8346 #endif
8347 };
8348 
8349 static int __devinit
8350 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8351 {
8352 	static int version_printed = 0;
8353 	struct net_device *dev = NULL;
8354 	struct bnx2 *bp;
8355 	int rc;
8356 	char str[40];
8357 
8358 	if (version_printed++ == 0)
8359 		pr_info("%s", version);
8360 
8361 	/* dev zeroed in init_etherdev */
8362 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8363 
8364 	if (!dev)
8365 		return -ENOMEM;
8366 
8367 	rc = bnx2_init_board(pdev, dev);
8368 	if (rc < 0) {
8369 		free_netdev(dev);
8370 		return rc;
8371 	}
8372 
8373 	dev->netdev_ops = &bnx2_netdev_ops;
8374 	dev->watchdog_timeo = TX_TIMEOUT;
8375 	dev->ethtool_ops = &bnx2_ethtool_ops;
8376 
8377 	bp = netdev_priv(dev);
8378 
8379 	pci_set_drvdata(pdev, dev);
8380 
8381 	memcpy(dev->dev_addr, bp->mac_addr, 6);
8382 	memcpy(dev->perm_addr, bp->mac_addr, 6);
8383 
8384 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8385 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8386 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8387 
8388 	if (CHIP_NUM(bp) == CHIP_NUM_5709)
8389 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8390 
8391 	dev->vlan_features = dev->hw_features;
8392 	dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8393 	dev->features |= dev->hw_features;
8394 	dev->priv_flags |= IFF_UNICAST_FLT;
8395 
8396 	if ((rc = register_netdev(dev))) {
8397 		dev_err(&pdev->dev, "Cannot register net device\n");
8398 		goto error;
8399 	}
8400 
8401 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8402 		    board_info[ent->driver_data].name,
8403 		    ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8404 		    ((CHIP_ID(bp) & 0x0ff0) >> 4),
8405 		    bnx2_bus_string(bp, str),
8406 		    dev->base_addr,
8407 		    bp->pdev->irq, dev->dev_addr);
8408 
8409 	return 0;
8410 
8411 error:
8412 	if (bp->regview)
8413 		iounmap(bp->regview);
8414 	pci_release_regions(pdev);
8415 	pci_disable_device(pdev);
8416 	pci_set_drvdata(pdev, NULL);
8417 	free_netdev(dev);
8418 	return rc;
8419 }
8420 
8421 static void __devexit
8422 bnx2_remove_one(struct pci_dev *pdev)
8423 {
8424 	struct net_device *dev = pci_get_drvdata(pdev);
8425 	struct bnx2 *bp = netdev_priv(dev);
8426 
8427 	unregister_netdev(dev);
8428 
8429 	del_timer_sync(&bp->timer);
8430 	cancel_work_sync(&bp->reset_task);
8431 
8432 	if (bp->regview)
8433 		iounmap(bp->regview);
8434 
8435 	kfree(bp->temp_stats_blk);
8436 
8437 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8438 		pci_disable_pcie_error_reporting(pdev);
8439 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8440 	}
8441 
8442 	bnx2_release_firmware(bp);
8443 
8444 	free_netdev(dev);
8445 
8446 	pci_release_regions(pdev);
8447 	pci_disable_device(pdev);
8448 	pci_set_drvdata(pdev, NULL);
8449 }
8450 
8451 static int
8452 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8453 {
8454 	struct net_device *dev = pci_get_drvdata(pdev);
8455 	struct bnx2 *bp = netdev_priv(dev);
8456 
8457 	/* PCI register 4 needs to be saved whether netif_running() or not.
8458 	 * MSI address and data need to be saved if using MSI and
8459 	 * netif_running().
8460 	 */
8461 	pci_save_state(pdev);
8462 	if (!netif_running(dev))
8463 		return 0;
8464 
8465 	cancel_work_sync(&bp->reset_task);
8466 	bnx2_netif_stop(bp, true);
8467 	netif_device_detach(dev);
8468 	del_timer_sync(&bp->timer);
8469 	bnx2_shutdown_chip(bp);
8470 	bnx2_free_skbs(bp);
8471 	bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8472 	return 0;
8473 }
8474 
8475 static int
8476 bnx2_resume(struct pci_dev *pdev)
8477 {
8478 	struct net_device *dev = pci_get_drvdata(pdev);
8479 	struct bnx2 *bp = netdev_priv(dev);
8480 
8481 	pci_restore_state(pdev);
8482 	if (!netif_running(dev))
8483 		return 0;
8484 
8485 	bnx2_set_power_state(bp, PCI_D0);
8486 	netif_device_attach(dev);
8487 	bnx2_init_nic(bp, 1);
8488 	bnx2_netif_start(bp, true);
8489 	return 0;
8490 }
8491 
8492 /**
8493  * bnx2_io_error_detected - called when PCI error is detected
8494  * @pdev: Pointer to PCI device
8495  * @state: The current pci connection state
8496  *
8497  * This function is called after a PCI bus error affecting
8498  * this device has been detected.
8499  */
8500 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8501 					       pci_channel_state_t state)
8502 {
8503 	struct net_device *dev = pci_get_drvdata(pdev);
8504 	struct bnx2 *bp = netdev_priv(dev);
8505 
8506 	rtnl_lock();
8507 	netif_device_detach(dev);
8508 
8509 	if (state == pci_channel_io_perm_failure) {
8510 		rtnl_unlock();
8511 		return PCI_ERS_RESULT_DISCONNECT;
8512 	}
8513 
8514 	if (netif_running(dev)) {
8515 		bnx2_netif_stop(bp, true);
8516 		del_timer_sync(&bp->timer);
8517 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8518 	}
8519 
8520 	pci_disable_device(pdev);
8521 	rtnl_unlock();
8522 
8523 	/* Request a slot slot reset. */
8524 	return PCI_ERS_RESULT_NEED_RESET;
8525 }
8526 
8527 /**
8528  * bnx2_io_slot_reset - called after the pci bus has been reset.
8529  * @pdev: Pointer to PCI device
8530  *
8531  * Restart the card from scratch, as if from a cold-boot.
8532  */
8533 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8534 {
8535 	struct net_device *dev = pci_get_drvdata(pdev);
8536 	struct bnx2 *bp = netdev_priv(dev);
8537 	pci_ers_result_t result;
8538 	int err;
8539 
8540 	rtnl_lock();
8541 	if (pci_enable_device(pdev)) {
8542 		dev_err(&pdev->dev,
8543 			"Cannot re-enable PCI device after reset\n");
8544 		result = PCI_ERS_RESULT_DISCONNECT;
8545 	} else {
8546 		pci_set_master(pdev);
8547 		pci_restore_state(pdev);
8548 		pci_save_state(pdev);
8549 
8550 		if (netif_running(dev)) {
8551 			bnx2_set_power_state(bp, PCI_D0);
8552 			bnx2_init_nic(bp, 1);
8553 		}
8554 		result = PCI_ERS_RESULT_RECOVERED;
8555 	}
8556 	rtnl_unlock();
8557 
8558 	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8559 		return result;
8560 
8561 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8562 	if (err) {
8563 		dev_err(&pdev->dev,
8564 			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8565 			 err); /* non-fatal, continue */
8566 	}
8567 
8568 	return result;
8569 }
8570 
8571 /**
8572  * bnx2_io_resume - called when traffic can start flowing again.
8573  * @pdev: Pointer to PCI device
8574  *
8575  * This callback is called when the error recovery driver tells us that
8576  * its OK to resume normal operation.
8577  */
8578 static void bnx2_io_resume(struct pci_dev *pdev)
8579 {
8580 	struct net_device *dev = pci_get_drvdata(pdev);
8581 	struct bnx2 *bp = netdev_priv(dev);
8582 
8583 	rtnl_lock();
8584 	if (netif_running(dev))
8585 		bnx2_netif_start(bp, true);
8586 
8587 	netif_device_attach(dev);
8588 	rtnl_unlock();
8589 }
8590 
8591 static struct pci_error_handlers bnx2_err_handler = {
8592 	.error_detected	= bnx2_io_error_detected,
8593 	.slot_reset	= bnx2_io_slot_reset,
8594 	.resume		= bnx2_io_resume,
8595 };
8596 
8597 static struct pci_driver bnx2_pci_driver = {
8598 	.name		= DRV_MODULE_NAME,
8599 	.id_table	= bnx2_pci_tbl,
8600 	.probe		= bnx2_init_one,
8601 	.remove		= __devexit_p(bnx2_remove_one),
8602 	.suspend	= bnx2_suspend,
8603 	.resume		= bnx2_resume,
8604 	.err_handler	= &bnx2_err_handler,
8605 };
8606 
8607 static int __init bnx2_init(void)
8608 {
8609 	return pci_register_driver(&bnx2_pci_driver);
8610 }
8611 
8612 static void __exit bnx2_cleanup(void)
8613 {
8614 	pci_unregister_driver(&bnx2_pci_driver);
8615 }
8616 
8617 module_init(bnx2_init);
8618 module_exit(bnx2_cleanup);
8619 
8620 
8621 
8622