xref: /linux/drivers/net/ethernet/broadcom/bnx2.c (revision 2d87650a3bf1b80f7d0d150ee1af3f8a89e5b7aa)
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2013 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/timer.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/init.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59 
60 #define DRV_MODULE_NAME		"bnx2"
61 #define DRV_MODULE_VERSION	"2.2.4"
62 #define DRV_MODULE_RELDATE	"Aug 05, 2013"
63 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
64 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
65 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
66 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
68 
69 #define RUN_AT(x) (jiffies + (x))
70 
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73 
74 static char version[] =
75 	"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86 
87 static int disable_msi = 0;
88 
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91 
92 typedef enum {
93 	BCM5706 = 0,
94 	NC370T,
95 	NC370I,
96 	BCM5706S,
97 	NC370F,
98 	BCM5708,
99 	BCM5708S,
100 	BCM5709,
101 	BCM5709S,
102 	BCM5716,
103 	BCM5716S,
104 } board_t;
105 
106 /* indexed by board_t, above */
107 static struct {
108 	char *name;
109 } board_info[] = {
110 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
112 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
113 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
115 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121 	};
122 
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
143 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
145 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 	{ 0, }
147 };
148 
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
153 	/* Slow EEPROM */
154 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 	 "EEPROM - slow"},
158 	/* Expansion entry 0001 */
159 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 	 "Entry 0001"},
163 	/* Saifun SA25F010 (non-buffered flash) */
164 	/* strap, cfg1, & write1 need updates */
165 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 	 "Non-buffered flash (128kB)"},
169 	/* Saifun SA25F020 (non-buffered flash) */
170 	/* strap, cfg1, & write1 need updates */
171 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 	 "Non-buffered flash (256kB)"},
175 	/* Expansion entry 0100 */
176 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 	 "Entry 0100"},
180 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 	/* Saifun SA25F005 (non-buffered flash) */
191 	/* strap, cfg1, & write1 need updates */
192 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 	 "Non-buffered flash (64kB)"},
196 	/* Fast EEPROM */
197 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 	 "EEPROM - fast"},
201 	/* Expansion entry 1001 */
202 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 	 "Entry 1001"},
206 	/* Expansion entry 1010 */
207 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 	 "Entry 1010"},
211 	/* ATMEL AT45DB011B (buffered flash) */
212 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 	 "Buffered flash (128kB)"},
216 	/* Expansion entry 1100 */
217 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 	 "Entry 1100"},
221 	/* Expansion entry 1101 */
222 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 	 "Entry 1101"},
226 	/* Ateml Expansion entry 1110 */
227 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 	 "Entry 1110 (Atmel)"},
231 	/* ATMEL AT45DB021B (buffered flash) */
232 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 	 "Buffered flash (256kB)"},
236 };
237 
238 static const struct flash_spec flash_5709 = {
239 	.flags		= BNX2_NV_BUFFERED,
240 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
241 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
242 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
243 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
244 	.name		= "5709 Buffered flash (256kB)",
245 };
246 
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251 
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254 	u32 diff;
255 
256 	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
257 	barrier();
258 
259 	/* The ring uses 256 indices for 255 entries, one of them
260 	 * needs to be skipped.
261 	 */
262 	diff = txr->tx_prod - txr->tx_cons;
263 	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
264 		diff &= 0xffff;
265 		if (diff == BNX2_TX_DESC_CNT)
266 			diff = BNX2_MAX_TX_DESC_CNT;
267 	}
268 	return bp->tx_ring_size - diff;
269 }
270 
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 {
274 	u32 val;
275 
276 	spin_lock_bh(&bp->indirect_lock);
277 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278 	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
279 	spin_unlock_bh(&bp->indirect_lock);
280 	return val;
281 }
282 
283 static void
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 {
286 	spin_lock_bh(&bp->indirect_lock);
287 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289 	spin_unlock_bh(&bp->indirect_lock);
290 }
291 
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 {
295 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 }
297 
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 {
301 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 }
303 
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 {
307 	offset += cid_addr;
308 	spin_lock_bh(&bp->indirect_lock);
309 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
310 		int i;
311 
312 		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
313 		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
314 			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315 		for (i = 0; i < 5; i++) {
316 			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
317 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318 				break;
319 			udelay(5);
320 		}
321 	} else {
322 		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323 		BNX2_WR(bp, BNX2_CTX_DATA, val);
324 	}
325 	spin_unlock_bh(&bp->indirect_lock);
326 }
327 
328 #ifdef BCM_CNIC
329 static int
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 {
332 	struct bnx2 *bp = netdev_priv(dev);
333 	struct drv_ctl_io *io = &info->data.io;
334 
335 	switch (info->cmd) {
336 	case DRV_CTL_IO_WR_CMD:
337 		bnx2_reg_wr_ind(bp, io->offset, io->data);
338 		break;
339 	case DRV_CTL_IO_RD_CMD:
340 		io->data = bnx2_reg_rd_ind(bp, io->offset);
341 		break;
342 	case DRV_CTL_CTX_WR_CMD:
343 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344 		break;
345 	default:
346 		return -EINVAL;
347 	}
348 	return 0;
349 }
350 
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 {
353 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355 	int sb_id;
356 
357 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
358 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359 		bnapi->cnic_present = 0;
360 		sb_id = bp->irq_nvecs;
361 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362 	} else {
363 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364 		bnapi->cnic_tag = bnapi->last_status_idx;
365 		bnapi->cnic_present = 1;
366 		sb_id = 0;
367 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368 	}
369 
370 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371 	cp->irq_arr[0].status_blk = (void *)
372 		((unsigned long) bnapi->status_blk.msi +
373 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374 	cp->irq_arr[0].status_blk_num = sb_id;
375 	cp->num_irq = 1;
376 }
377 
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379 			      void *data)
380 {
381 	struct bnx2 *bp = netdev_priv(dev);
382 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383 
384 	if (ops == NULL)
385 		return -EINVAL;
386 
387 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
388 		return -EBUSY;
389 
390 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
391 		return -ENODEV;
392 
393 	bp->cnic_data = data;
394 	rcu_assign_pointer(bp->cnic_ops, ops);
395 
396 	cp->num_irq = 0;
397 	cp->drv_state = CNIC_DRV_STATE_REGD;
398 
399 	bnx2_setup_cnic_irq_info(bp);
400 
401 	return 0;
402 }
403 
404 static int bnx2_unregister_cnic(struct net_device *dev)
405 {
406 	struct bnx2 *bp = netdev_priv(dev);
407 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409 
410 	mutex_lock(&bp->cnic_lock);
411 	cp->drv_state = 0;
412 	bnapi->cnic_present = 0;
413 	RCU_INIT_POINTER(bp->cnic_ops, NULL);
414 	mutex_unlock(&bp->cnic_lock);
415 	synchronize_rcu();
416 	return 0;
417 }
418 
419 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 {
421 	struct bnx2 *bp = netdev_priv(dev);
422 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423 
424 	if (!cp->max_iscsi_conn)
425 		return NULL;
426 
427 	cp->drv_owner = THIS_MODULE;
428 	cp->chip_id = bp->chip_id;
429 	cp->pdev = bp->pdev;
430 	cp->io_base = bp->regview;
431 	cp->drv_ctl = bnx2_drv_ctl;
432 	cp->drv_register_cnic = bnx2_register_cnic;
433 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
434 
435 	return cp;
436 }
437 
438 static void
439 bnx2_cnic_stop(struct bnx2 *bp)
440 {
441 	struct cnic_ops *c_ops;
442 	struct cnic_ctl_info info;
443 
444 	mutex_lock(&bp->cnic_lock);
445 	c_ops = rcu_dereference_protected(bp->cnic_ops,
446 					  lockdep_is_held(&bp->cnic_lock));
447 	if (c_ops) {
448 		info.cmd = CNIC_CTL_STOP_CMD;
449 		c_ops->cnic_ctl(bp->cnic_data, &info);
450 	}
451 	mutex_unlock(&bp->cnic_lock);
452 }
453 
454 static void
455 bnx2_cnic_start(struct bnx2 *bp)
456 {
457 	struct cnic_ops *c_ops;
458 	struct cnic_ctl_info info;
459 
460 	mutex_lock(&bp->cnic_lock);
461 	c_ops = rcu_dereference_protected(bp->cnic_ops,
462 					  lockdep_is_held(&bp->cnic_lock));
463 	if (c_ops) {
464 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466 
467 			bnapi->cnic_tag = bnapi->last_status_idx;
468 		}
469 		info.cmd = CNIC_CTL_START_CMD;
470 		c_ops->cnic_ctl(bp->cnic_data, &info);
471 	}
472 	mutex_unlock(&bp->cnic_lock);
473 }
474 
475 #else
476 
477 static void
478 bnx2_cnic_stop(struct bnx2 *bp)
479 {
480 }
481 
482 static void
483 bnx2_cnic_start(struct bnx2 *bp)
484 {
485 }
486 
487 #endif
488 
489 static int
490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491 {
492 	u32 val1;
493 	int i, ret;
494 
495 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498 
499 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
501 
502 		udelay(40);
503 	}
504 
505 	val1 = (bp->phy_addr << 21) | (reg << 16) |
506 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507 		BNX2_EMAC_MDIO_COMM_START_BUSY;
508 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509 
510 	for (i = 0; i < 50; i++) {
511 		udelay(10);
512 
513 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515 			udelay(5);
516 
517 			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
518 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519 
520 			break;
521 		}
522 	}
523 
524 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525 		*val = 0x0;
526 		ret = -EBUSY;
527 	}
528 	else {
529 		*val = val1;
530 		ret = 0;
531 	}
532 
533 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536 
537 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
539 
540 		udelay(40);
541 	}
542 
543 	return ret;
544 }
545 
546 static int
547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548 {
549 	u32 val1;
550 	int i, ret;
551 
552 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555 
556 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
558 
559 		udelay(40);
560 	}
561 
562 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566 
567 	for (i = 0; i < 50; i++) {
568 		udelay(10);
569 
570 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
571 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572 			udelay(5);
573 			break;
574 		}
575 	}
576 
577 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578         	ret = -EBUSY;
579 	else
580 		ret = 0;
581 
582 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585 
586 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
588 
589 		udelay(40);
590 	}
591 
592 	return ret;
593 }
594 
595 static void
596 bnx2_disable_int(struct bnx2 *bp)
597 {
598 	int i;
599 	struct bnx2_napi *bnapi;
600 
601 	for (i = 0; i < bp->irq_nvecs; i++) {
602 		bnapi = &bp->bnx2_napi[i];
603 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605 	}
606 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607 }
608 
609 static void
610 bnx2_enable_int(struct bnx2 *bp)
611 {
612 	int i;
613 	struct bnx2_napi *bnapi;
614 
615 	for (i = 0; i < bp->irq_nvecs; i++) {
616 		bnapi = &bp->bnx2_napi[i];
617 
618 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620 			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621 			bnapi->last_status_idx);
622 
623 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625 			bnapi->last_status_idx);
626 	}
627 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628 }
629 
630 static void
631 bnx2_disable_int_sync(struct bnx2 *bp)
632 {
633 	int i;
634 
635 	atomic_inc(&bp->intr_sem);
636 	if (!netif_running(bp->dev))
637 		return;
638 
639 	bnx2_disable_int(bp);
640 	for (i = 0; i < bp->irq_nvecs; i++)
641 		synchronize_irq(bp->irq_tbl[i].vector);
642 }
643 
644 static void
645 bnx2_napi_disable(struct bnx2 *bp)
646 {
647 	int i;
648 
649 	for (i = 0; i < bp->irq_nvecs; i++)
650 		napi_disable(&bp->bnx2_napi[i].napi);
651 }
652 
653 static void
654 bnx2_napi_enable(struct bnx2 *bp)
655 {
656 	int i;
657 
658 	for (i = 0; i < bp->irq_nvecs; i++)
659 		napi_enable(&bp->bnx2_napi[i].napi);
660 }
661 
662 static void
663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664 {
665 	if (stop_cnic)
666 		bnx2_cnic_stop(bp);
667 	if (netif_running(bp->dev)) {
668 		bnx2_napi_disable(bp);
669 		netif_tx_disable(bp->dev);
670 	}
671 	bnx2_disable_int_sync(bp);
672 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
673 }
674 
675 static void
676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 {
678 	if (atomic_dec_and_test(&bp->intr_sem)) {
679 		if (netif_running(bp->dev)) {
680 			netif_tx_wake_all_queues(bp->dev);
681 			spin_lock_bh(&bp->phy_lock);
682 			if (bp->link_up)
683 				netif_carrier_on(bp->dev);
684 			spin_unlock_bh(&bp->phy_lock);
685 			bnx2_napi_enable(bp);
686 			bnx2_enable_int(bp);
687 			if (start_cnic)
688 				bnx2_cnic_start(bp);
689 		}
690 	}
691 }
692 
693 static void
694 bnx2_free_tx_mem(struct bnx2 *bp)
695 {
696 	int i;
697 
698 	for (i = 0; i < bp->num_tx_rings; i++) {
699 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701 
702 		if (txr->tx_desc_ring) {
703 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704 					  txr->tx_desc_ring,
705 					  txr->tx_desc_mapping);
706 			txr->tx_desc_ring = NULL;
707 		}
708 		kfree(txr->tx_buf_ring);
709 		txr->tx_buf_ring = NULL;
710 	}
711 }
712 
713 static void
714 bnx2_free_rx_mem(struct bnx2 *bp)
715 {
716 	int i;
717 
718 	for (i = 0; i < bp->num_rx_rings; i++) {
719 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721 		int j;
722 
723 		for (j = 0; j < bp->rx_max_ring; j++) {
724 			if (rxr->rx_desc_ring[j])
725 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726 						  rxr->rx_desc_ring[j],
727 						  rxr->rx_desc_mapping[j]);
728 			rxr->rx_desc_ring[j] = NULL;
729 		}
730 		vfree(rxr->rx_buf_ring);
731 		rxr->rx_buf_ring = NULL;
732 
733 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
734 			if (rxr->rx_pg_desc_ring[j])
735 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736 						  rxr->rx_pg_desc_ring[j],
737 						  rxr->rx_pg_desc_mapping[j]);
738 			rxr->rx_pg_desc_ring[j] = NULL;
739 		}
740 		vfree(rxr->rx_pg_ring);
741 		rxr->rx_pg_ring = NULL;
742 	}
743 }
744 
745 static int
746 bnx2_alloc_tx_mem(struct bnx2 *bp)
747 {
748 	int i;
749 
750 	for (i = 0; i < bp->num_tx_rings; i++) {
751 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753 
754 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755 		if (txr->tx_buf_ring == NULL)
756 			return -ENOMEM;
757 
758 		txr->tx_desc_ring =
759 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760 					   &txr->tx_desc_mapping, GFP_KERNEL);
761 		if (txr->tx_desc_ring == NULL)
762 			return -ENOMEM;
763 	}
764 	return 0;
765 }
766 
767 static int
768 bnx2_alloc_rx_mem(struct bnx2 *bp)
769 {
770 	int i;
771 
772 	for (i = 0; i < bp->num_rx_rings; i++) {
773 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775 		int j;
776 
777 		rxr->rx_buf_ring =
778 			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779 		if (rxr->rx_buf_ring == NULL)
780 			return -ENOMEM;
781 
782 		for (j = 0; j < bp->rx_max_ring; j++) {
783 			rxr->rx_desc_ring[j] =
784 				dma_alloc_coherent(&bp->pdev->dev,
785 						   RXBD_RING_SIZE,
786 						   &rxr->rx_desc_mapping[j],
787 						   GFP_KERNEL);
788 			if (rxr->rx_desc_ring[j] == NULL)
789 				return -ENOMEM;
790 
791 		}
792 
793 		if (bp->rx_pg_ring_size) {
794 			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795 						  bp->rx_max_pg_ring);
796 			if (rxr->rx_pg_ring == NULL)
797 				return -ENOMEM;
798 
799 		}
800 
801 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
802 			rxr->rx_pg_desc_ring[j] =
803 				dma_alloc_coherent(&bp->pdev->dev,
804 						   RXBD_RING_SIZE,
805 						   &rxr->rx_pg_desc_mapping[j],
806 						   GFP_KERNEL);
807 			if (rxr->rx_pg_desc_ring[j] == NULL)
808 				return -ENOMEM;
809 
810 		}
811 	}
812 	return 0;
813 }
814 
815 static void
816 bnx2_free_mem(struct bnx2 *bp)
817 {
818 	int i;
819 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820 
821 	bnx2_free_tx_mem(bp);
822 	bnx2_free_rx_mem(bp);
823 
824 	for (i = 0; i < bp->ctx_pages; i++) {
825 		if (bp->ctx_blk[i]) {
826 			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
827 					  bp->ctx_blk[i],
828 					  bp->ctx_blk_mapping[i]);
829 			bp->ctx_blk[i] = NULL;
830 		}
831 	}
832 	if (bnapi->status_blk.msi) {
833 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834 				  bnapi->status_blk.msi,
835 				  bp->status_blk_mapping);
836 		bnapi->status_blk.msi = NULL;
837 		bp->stats_blk = NULL;
838 	}
839 }
840 
841 static int
842 bnx2_alloc_mem(struct bnx2 *bp)
843 {
844 	int i, status_blk_size, err;
845 	struct bnx2_napi *bnapi;
846 	void *status_blk;
847 
848 	/* Combine status and statistics blocks into one allocation. */
849 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
851 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
853 	bp->status_stats_size = status_blk_size +
854 				sizeof(struct statistics_block);
855 
856 	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 					 &bp->status_blk_mapping, GFP_KERNEL);
858 	if (status_blk == NULL)
859 		goto alloc_mem_err;
860 
861 	bnapi = &bp->bnx2_napi[0];
862 	bnapi->status_blk.msi = status_blk;
863 	bnapi->hw_tx_cons_ptr =
864 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
865 	bnapi->hw_rx_cons_ptr =
866 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
867 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
868 		for (i = 1; i < bp->irq_nvecs; i++) {
869 			struct status_block_msix *sblk;
870 
871 			bnapi = &bp->bnx2_napi[i];
872 
873 			sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
874 			bnapi->status_blk.msix = sblk;
875 			bnapi->hw_tx_cons_ptr =
876 				&sblk->status_tx_quick_consumer_index;
877 			bnapi->hw_rx_cons_ptr =
878 				&sblk->status_rx_quick_consumer_index;
879 			bnapi->int_num = i << 24;
880 		}
881 	}
882 
883 	bp->stats_blk = status_blk + status_blk_size;
884 
885 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
886 
887 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
888 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
889 		if (bp->ctx_pages == 0)
890 			bp->ctx_pages = 1;
891 		for (i = 0; i < bp->ctx_pages; i++) {
892 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
893 						BNX2_PAGE_SIZE,
894 						&bp->ctx_blk_mapping[i],
895 						GFP_KERNEL);
896 			if (bp->ctx_blk[i] == NULL)
897 				goto alloc_mem_err;
898 		}
899 	}
900 
901 	err = bnx2_alloc_rx_mem(bp);
902 	if (err)
903 		goto alloc_mem_err;
904 
905 	err = bnx2_alloc_tx_mem(bp);
906 	if (err)
907 		goto alloc_mem_err;
908 
909 	return 0;
910 
911 alloc_mem_err:
912 	bnx2_free_mem(bp);
913 	return -ENOMEM;
914 }
915 
916 static void
917 bnx2_report_fw_link(struct bnx2 *bp)
918 {
919 	u32 fw_link_status = 0;
920 
921 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
922 		return;
923 
924 	if (bp->link_up) {
925 		u32 bmsr;
926 
927 		switch (bp->line_speed) {
928 		case SPEED_10:
929 			if (bp->duplex == DUPLEX_HALF)
930 				fw_link_status = BNX2_LINK_STATUS_10HALF;
931 			else
932 				fw_link_status = BNX2_LINK_STATUS_10FULL;
933 			break;
934 		case SPEED_100:
935 			if (bp->duplex == DUPLEX_HALF)
936 				fw_link_status = BNX2_LINK_STATUS_100HALF;
937 			else
938 				fw_link_status = BNX2_LINK_STATUS_100FULL;
939 			break;
940 		case SPEED_1000:
941 			if (bp->duplex == DUPLEX_HALF)
942 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
943 			else
944 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
945 			break;
946 		case SPEED_2500:
947 			if (bp->duplex == DUPLEX_HALF)
948 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
949 			else
950 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
951 			break;
952 		}
953 
954 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
955 
956 		if (bp->autoneg) {
957 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
958 
959 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
960 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
961 
962 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
963 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
964 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
965 			else
966 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
967 		}
968 	}
969 	else
970 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
971 
972 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
973 }
974 
975 static char *
976 bnx2_xceiver_str(struct bnx2 *bp)
977 {
978 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
979 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
980 		 "Copper");
981 }
982 
983 static void
984 bnx2_report_link(struct bnx2 *bp)
985 {
986 	if (bp->link_up) {
987 		netif_carrier_on(bp->dev);
988 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
989 			    bnx2_xceiver_str(bp),
990 			    bp->line_speed,
991 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
992 
993 		if (bp->flow_ctrl) {
994 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
995 				pr_cont(", receive ");
996 				if (bp->flow_ctrl & FLOW_CTRL_TX)
997 					pr_cont("& transmit ");
998 			}
999 			else {
1000 				pr_cont(", transmit ");
1001 			}
1002 			pr_cont("flow control ON");
1003 		}
1004 		pr_cont("\n");
1005 	} else {
1006 		netif_carrier_off(bp->dev);
1007 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1008 			   bnx2_xceiver_str(bp));
1009 	}
1010 
1011 	bnx2_report_fw_link(bp);
1012 }
1013 
1014 static void
1015 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1016 {
1017 	u32 local_adv, remote_adv;
1018 
1019 	bp->flow_ctrl = 0;
1020 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1021 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1022 
1023 		if (bp->duplex == DUPLEX_FULL) {
1024 			bp->flow_ctrl = bp->req_flow_ctrl;
1025 		}
1026 		return;
1027 	}
1028 
1029 	if (bp->duplex != DUPLEX_FULL) {
1030 		return;
1031 	}
1032 
1033 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1034 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1035 		u32 val;
1036 
1037 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1038 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1039 			bp->flow_ctrl |= FLOW_CTRL_TX;
1040 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1041 			bp->flow_ctrl |= FLOW_CTRL_RX;
1042 		return;
1043 	}
1044 
1045 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1046 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1047 
1048 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1049 		u32 new_local_adv = 0;
1050 		u32 new_remote_adv = 0;
1051 
1052 		if (local_adv & ADVERTISE_1000XPAUSE)
1053 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1054 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1055 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1056 		if (remote_adv & ADVERTISE_1000XPAUSE)
1057 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1058 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1059 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1060 
1061 		local_adv = new_local_adv;
1062 		remote_adv = new_remote_adv;
1063 	}
1064 
1065 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1066 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1067 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1068 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1069 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1070 			}
1071 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1072 				bp->flow_ctrl = FLOW_CTRL_RX;
1073 			}
1074 		}
1075 		else {
1076 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1077 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1078 			}
1079 		}
1080 	}
1081 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1082 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1083 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1084 
1085 			bp->flow_ctrl = FLOW_CTRL_TX;
1086 		}
1087 	}
1088 }
1089 
1090 static int
1091 bnx2_5709s_linkup(struct bnx2 *bp)
1092 {
1093 	u32 val, speed;
1094 
1095 	bp->link_up = 1;
1096 
1097 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1098 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1099 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1100 
1101 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1102 		bp->line_speed = bp->req_line_speed;
1103 		bp->duplex = bp->req_duplex;
1104 		return 0;
1105 	}
1106 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1107 	switch (speed) {
1108 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1109 			bp->line_speed = SPEED_10;
1110 			break;
1111 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1112 			bp->line_speed = SPEED_100;
1113 			break;
1114 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1115 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1116 			bp->line_speed = SPEED_1000;
1117 			break;
1118 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1119 			bp->line_speed = SPEED_2500;
1120 			break;
1121 	}
1122 	if (val & MII_BNX2_GP_TOP_AN_FD)
1123 		bp->duplex = DUPLEX_FULL;
1124 	else
1125 		bp->duplex = DUPLEX_HALF;
1126 	return 0;
1127 }
1128 
1129 static int
1130 bnx2_5708s_linkup(struct bnx2 *bp)
1131 {
1132 	u32 val;
1133 
1134 	bp->link_up = 1;
1135 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1136 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1137 		case BCM5708S_1000X_STAT1_SPEED_10:
1138 			bp->line_speed = SPEED_10;
1139 			break;
1140 		case BCM5708S_1000X_STAT1_SPEED_100:
1141 			bp->line_speed = SPEED_100;
1142 			break;
1143 		case BCM5708S_1000X_STAT1_SPEED_1G:
1144 			bp->line_speed = SPEED_1000;
1145 			break;
1146 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1147 			bp->line_speed = SPEED_2500;
1148 			break;
1149 	}
1150 	if (val & BCM5708S_1000X_STAT1_FD)
1151 		bp->duplex = DUPLEX_FULL;
1152 	else
1153 		bp->duplex = DUPLEX_HALF;
1154 
1155 	return 0;
1156 }
1157 
1158 static int
1159 bnx2_5706s_linkup(struct bnx2 *bp)
1160 {
1161 	u32 bmcr, local_adv, remote_adv, common;
1162 
1163 	bp->link_up = 1;
1164 	bp->line_speed = SPEED_1000;
1165 
1166 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1167 	if (bmcr & BMCR_FULLDPLX) {
1168 		bp->duplex = DUPLEX_FULL;
1169 	}
1170 	else {
1171 		bp->duplex = DUPLEX_HALF;
1172 	}
1173 
1174 	if (!(bmcr & BMCR_ANENABLE)) {
1175 		return 0;
1176 	}
1177 
1178 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1179 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1180 
1181 	common = local_adv & remote_adv;
1182 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1183 
1184 		if (common & ADVERTISE_1000XFULL) {
1185 			bp->duplex = DUPLEX_FULL;
1186 		}
1187 		else {
1188 			bp->duplex = DUPLEX_HALF;
1189 		}
1190 	}
1191 
1192 	return 0;
1193 }
1194 
1195 static int
1196 bnx2_copper_linkup(struct bnx2 *bp)
1197 {
1198 	u32 bmcr;
1199 
1200 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1201 	if (bmcr & BMCR_ANENABLE) {
1202 		u32 local_adv, remote_adv, common;
1203 
1204 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1205 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1206 
1207 		common = local_adv & (remote_adv >> 2);
1208 		if (common & ADVERTISE_1000FULL) {
1209 			bp->line_speed = SPEED_1000;
1210 			bp->duplex = DUPLEX_FULL;
1211 		}
1212 		else if (common & ADVERTISE_1000HALF) {
1213 			bp->line_speed = SPEED_1000;
1214 			bp->duplex = DUPLEX_HALF;
1215 		}
1216 		else {
1217 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1218 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1219 
1220 			common = local_adv & remote_adv;
1221 			if (common & ADVERTISE_100FULL) {
1222 				bp->line_speed = SPEED_100;
1223 				bp->duplex = DUPLEX_FULL;
1224 			}
1225 			else if (common & ADVERTISE_100HALF) {
1226 				bp->line_speed = SPEED_100;
1227 				bp->duplex = DUPLEX_HALF;
1228 			}
1229 			else if (common & ADVERTISE_10FULL) {
1230 				bp->line_speed = SPEED_10;
1231 				bp->duplex = DUPLEX_FULL;
1232 			}
1233 			else if (common & ADVERTISE_10HALF) {
1234 				bp->line_speed = SPEED_10;
1235 				bp->duplex = DUPLEX_HALF;
1236 			}
1237 			else {
1238 				bp->line_speed = 0;
1239 				bp->link_up = 0;
1240 			}
1241 		}
1242 	}
1243 	else {
1244 		if (bmcr & BMCR_SPEED100) {
1245 			bp->line_speed = SPEED_100;
1246 		}
1247 		else {
1248 			bp->line_speed = SPEED_10;
1249 		}
1250 		if (bmcr & BMCR_FULLDPLX) {
1251 			bp->duplex = DUPLEX_FULL;
1252 		}
1253 		else {
1254 			bp->duplex = DUPLEX_HALF;
1255 		}
1256 	}
1257 
1258 	return 0;
1259 }
1260 
1261 static void
1262 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1263 {
1264 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1265 
1266 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1267 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1268 	val |= 0x02 << 8;
1269 
1270 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1271 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1272 
1273 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1274 }
1275 
1276 static void
1277 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1278 {
1279 	int i;
1280 	u32 cid;
1281 
1282 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1283 		if (i == 1)
1284 			cid = RX_RSS_CID;
1285 		bnx2_init_rx_context(bp, cid);
1286 	}
1287 }
1288 
1289 static void
1290 bnx2_set_mac_link(struct bnx2 *bp)
1291 {
1292 	u32 val;
1293 
1294 	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1295 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1296 		(bp->duplex == DUPLEX_HALF)) {
1297 		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1298 	}
1299 
1300 	/* Configure the EMAC mode register. */
1301 	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1302 
1303 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1304 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1305 		BNX2_EMAC_MODE_25G_MODE);
1306 
1307 	if (bp->link_up) {
1308 		switch (bp->line_speed) {
1309 			case SPEED_10:
1310 				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1311 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1312 					break;
1313 				}
1314 				/* fall through */
1315 			case SPEED_100:
1316 				val |= BNX2_EMAC_MODE_PORT_MII;
1317 				break;
1318 			case SPEED_2500:
1319 				val |= BNX2_EMAC_MODE_25G_MODE;
1320 				/* fall through */
1321 			case SPEED_1000:
1322 				val |= BNX2_EMAC_MODE_PORT_GMII;
1323 				break;
1324 		}
1325 	}
1326 	else {
1327 		val |= BNX2_EMAC_MODE_PORT_GMII;
1328 	}
1329 
1330 	/* Set the MAC to operate in the appropriate duplex mode. */
1331 	if (bp->duplex == DUPLEX_HALF)
1332 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1333 	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1334 
1335 	/* Enable/disable rx PAUSE. */
1336 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1337 
1338 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1339 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1340 	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1341 
1342 	/* Enable/disable tx PAUSE. */
1343 	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1344 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1345 
1346 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1347 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1348 	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1349 
1350 	/* Acknowledge the interrupt. */
1351 	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1352 
1353 	bnx2_init_all_rx_contexts(bp);
1354 }
1355 
1356 static void
1357 bnx2_enable_bmsr1(struct bnx2 *bp)
1358 {
1359 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1360 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1361 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1362 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1363 }
1364 
1365 static void
1366 bnx2_disable_bmsr1(struct bnx2 *bp)
1367 {
1368 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1369 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1370 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1371 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1372 }
1373 
1374 static int
1375 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1376 {
1377 	u32 up1;
1378 	int ret = 1;
1379 
1380 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1381 		return 0;
1382 
1383 	if (bp->autoneg & AUTONEG_SPEED)
1384 		bp->advertising |= ADVERTISED_2500baseX_Full;
1385 
1386 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1387 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1388 
1389 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1390 	if (!(up1 & BCM5708S_UP1_2G5)) {
1391 		up1 |= BCM5708S_UP1_2G5;
1392 		bnx2_write_phy(bp, bp->mii_up1, up1);
1393 		ret = 0;
1394 	}
1395 
1396 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1397 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1398 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1399 
1400 	return ret;
1401 }
1402 
1403 static int
1404 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1405 {
1406 	u32 up1;
1407 	int ret = 0;
1408 
1409 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1410 		return 0;
1411 
1412 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1413 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1414 
1415 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1416 	if (up1 & BCM5708S_UP1_2G5) {
1417 		up1 &= ~BCM5708S_UP1_2G5;
1418 		bnx2_write_phy(bp, bp->mii_up1, up1);
1419 		ret = 1;
1420 	}
1421 
1422 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1423 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1424 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1425 
1426 	return ret;
1427 }
1428 
1429 static void
1430 bnx2_enable_forced_2g5(struct bnx2 *bp)
1431 {
1432 	u32 uninitialized_var(bmcr);
1433 	int err;
1434 
1435 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1436 		return;
1437 
1438 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1439 		u32 val;
1440 
1441 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1442 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1443 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1444 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1445 			val |= MII_BNX2_SD_MISC1_FORCE |
1446 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1447 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1448 		}
1449 
1450 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1451 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1452 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1453 
1454 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1455 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1456 		if (!err)
1457 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1458 	} else {
1459 		return;
1460 	}
1461 
1462 	if (err)
1463 		return;
1464 
1465 	if (bp->autoneg & AUTONEG_SPEED) {
1466 		bmcr &= ~BMCR_ANENABLE;
1467 		if (bp->req_duplex == DUPLEX_FULL)
1468 			bmcr |= BMCR_FULLDPLX;
1469 	}
1470 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1471 }
1472 
1473 static void
1474 bnx2_disable_forced_2g5(struct bnx2 *bp)
1475 {
1476 	u32 uninitialized_var(bmcr);
1477 	int err;
1478 
1479 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1480 		return;
1481 
1482 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1483 		u32 val;
1484 
1485 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1486 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1487 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1488 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1489 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1490 		}
1491 
1492 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1493 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1494 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1495 
1496 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1497 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1498 		if (!err)
1499 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1500 	} else {
1501 		return;
1502 	}
1503 
1504 	if (err)
1505 		return;
1506 
1507 	if (bp->autoneg & AUTONEG_SPEED)
1508 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1509 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1510 }
1511 
1512 static void
1513 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1514 {
1515 	u32 val;
1516 
1517 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1518 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1519 	if (start)
1520 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1521 	else
1522 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1523 }
1524 
1525 static int
1526 bnx2_set_link(struct bnx2 *bp)
1527 {
1528 	u32 bmsr;
1529 	u8 link_up;
1530 
1531 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1532 		bp->link_up = 1;
1533 		return 0;
1534 	}
1535 
1536 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1537 		return 0;
1538 
1539 	link_up = bp->link_up;
1540 
1541 	bnx2_enable_bmsr1(bp);
1542 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1543 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1544 	bnx2_disable_bmsr1(bp);
1545 
1546 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1547 	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1548 		u32 val, an_dbg;
1549 
1550 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1551 			bnx2_5706s_force_link_dn(bp, 0);
1552 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1553 		}
1554 		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1555 
1556 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1557 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1558 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1559 
1560 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1561 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1562 			bmsr |= BMSR_LSTATUS;
1563 		else
1564 			bmsr &= ~BMSR_LSTATUS;
1565 	}
1566 
1567 	if (bmsr & BMSR_LSTATUS) {
1568 		bp->link_up = 1;
1569 
1570 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1571 			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1572 				bnx2_5706s_linkup(bp);
1573 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1574 				bnx2_5708s_linkup(bp);
1575 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1576 				bnx2_5709s_linkup(bp);
1577 		}
1578 		else {
1579 			bnx2_copper_linkup(bp);
1580 		}
1581 		bnx2_resolve_flow_ctrl(bp);
1582 	}
1583 	else {
1584 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1585 		    (bp->autoneg & AUTONEG_SPEED))
1586 			bnx2_disable_forced_2g5(bp);
1587 
1588 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1589 			u32 bmcr;
1590 
1591 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1592 			bmcr |= BMCR_ANENABLE;
1593 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1594 
1595 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1596 		}
1597 		bp->link_up = 0;
1598 	}
1599 
1600 	if (bp->link_up != link_up) {
1601 		bnx2_report_link(bp);
1602 	}
1603 
1604 	bnx2_set_mac_link(bp);
1605 
1606 	return 0;
1607 }
1608 
1609 static int
1610 bnx2_reset_phy(struct bnx2 *bp)
1611 {
1612 	int i;
1613 	u32 reg;
1614 
1615         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1616 
1617 #define PHY_RESET_MAX_WAIT 100
1618 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1619 		udelay(10);
1620 
1621 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1622 		if (!(reg & BMCR_RESET)) {
1623 			udelay(20);
1624 			break;
1625 		}
1626 	}
1627 	if (i == PHY_RESET_MAX_WAIT) {
1628 		return -EBUSY;
1629 	}
1630 	return 0;
1631 }
1632 
1633 static u32
1634 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1635 {
1636 	u32 adv = 0;
1637 
1638 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1639 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1640 
1641 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1642 			adv = ADVERTISE_1000XPAUSE;
1643 		}
1644 		else {
1645 			adv = ADVERTISE_PAUSE_CAP;
1646 		}
1647 	}
1648 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1649 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1650 			adv = ADVERTISE_1000XPSE_ASYM;
1651 		}
1652 		else {
1653 			adv = ADVERTISE_PAUSE_ASYM;
1654 		}
1655 	}
1656 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1657 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1658 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1659 		}
1660 		else {
1661 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1662 		}
1663 	}
1664 	return adv;
1665 }
1666 
1667 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1668 
1669 static int
1670 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1671 __releases(&bp->phy_lock)
1672 __acquires(&bp->phy_lock)
1673 {
1674 	u32 speed_arg = 0, pause_adv;
1675 
1676 	pause_adv = bnx2_phy_get_pause_adv(bp);
1677 
1678 	if (bp->autoneg & AUTONEG_SPEED) {
1679 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1680 		if (bp->advertising & ADVERTISED_10baseT_Half)
1681 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1682 		if (bp->advertising & ADVERTISED_10baseT_Full)
1683 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1684 		if (bp->advertising & ADVERTISED_100baseT_Half)
1685 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1686 		if (bp->advertising & ADVERTISED_100baseT_Full)
1687 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1688 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1689 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1690 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1691 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1692 	} else {
1693 		if (bp->req_line_speed == SPEED_2500)
1694 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1695 		else if (bp->req_line_speed == SPEED_1000)
1696 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1697 		else if (bp->req_line_speed == SPEED_100) {
1698 			if (bp->req_duplex == DUPLEX_FULL)
1699 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1700 			else
1701 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1702 		} else if (bp->req_line_speed == SPEED_10) {
1703 			if (bp->req_duplex == DUPLEX_FULL)
1704 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1705 			else
1706 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1707 		}
1708 	}
1709 
1710 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1711 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1712 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1713 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1714 
1715 	if (port == PORT_TP)
1716 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1717 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1718 
1719 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1720 
1721 	spin_unlock_bh(&bp->phy_lock);
1722 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1723 	spin_lock_bh(&bp->phy_lock);
1724 
1725 	return 0;
1726 }
1727 
1728 static int
1729 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1730 __releases(&bp->phy_lock)
1731 __acquires(&bp->phy_lock)
1732 {
1733 	u32 adv, bmcr;
1734 	u32 new_adv = 0;
1735 
1736 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1737 		return bnx2_setup_remote_phy(bp, port);
1738 
1739 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1740 		u32 new_bmcr;
1741 		int force_link_down = 0;
1742 
1743 		if (bp->req_line_speed == SPEED_2500) {
1744 			if (!bnx2_test_and_enable_2g5(bp))
1745 				force_link_down = 1;
1746 		} else if (bp->req_line_speed == SPEED_1000) {
1747 			if (bnx2_test_and_disable_2g5(bp))
1748 				force_link_down = 1;
1749 		}
1750 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1751 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1752 
1753 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1754 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1755 		new_bmcr |= BMCR_SPEED1000;
1756 
1757 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1758 			if (bp->req_line_speed == SPEED_2500)
1759 				bnx2_enable_forced_2g5(bp);
1760 			else if (bp->req_line_speed == SPEED_1000) {
1761 				bnx2_disable_forced_2g5(bp);
1762 				new_bmcr &= ~0x2000;
1763 			}
1764 
1765 		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1766 			if (bp->req_line_speed == SPEED_2500)
1767 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1768 			else
1769 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1770 		}
1771 
1772 		if (bp->req_duplex == DUPLEX_FULL) {
1773 			adv |= ADVERTISE_1000XFULL;
1774 			new_bmcr |= BMCR_FULLDPLX;
1775 		}
1776 		else {
1777 			adv |= ADVERTISE_1000XHALF;
1778 			new_bmcr &= ~BMCR_FULLDPLX;
1779 		}
1780 		if ((new_bmcr != bmcr) || (force_link_down)) {
1781 			/* Force a link down visible on the other side */
1782 			if (bp->link_up) {
1783 				bnx2_write_phy(bp, bp->mii_adv, adv &
1784 					       ~(ADVERTISE_1000XFULL |
1785 						 ADVERTISE_1000XHALF));
1786 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1787 					BMCR_ANRESTART | BMCR_ANENABLE);
1788 
1789 				bp->link_up = 0;
1790 				netif_carrier_off(bp->dev);
1791 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1792 				bnx2_report_link(bp);
1793 			}
1794 			bnx2_write_phy(bp, bp->mii_adv, adv);
1795 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1796 		} else {
1797 			bnx2_resolve_flow_ctrl(bp);
1798 			bnx2_set_mac_link(bp);
1799 		}
1800 		return 0;
1801 	}
1802 
1803 	bnx2_test_and_enable_2g5(bp);
1804 
1805 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1806 		new_adv |= ADVERTISE_1000XFULL;
1807 
1808 	new_adv |= bnx2_phy_get_pause_adv(bp);
1809 
1810 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1811 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1812 
1813 	bp->serdes_an_pending = 0;
1814 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1815 		/* Force a link down visible on the other side */
1816 		if (bp->link_up) {
1817 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1818 			spin_unlock_bh(&bp->phy_lock);
1819 			msleep(20);
1820 			spin_lock_bh(&bp->phy_lock);
1821 		}
1822 
1823 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1824 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1825 			BMCR_ANENABLE);
1826 		/* Speed up link-up time when the link partner
1827 		 * does not autonegotiate which is very common
1828 		 * in blade servers. Some blade servers use
1829 		 * IPMI for kerboard input and it's important
1830 		 * to minimize link disruptions. Autoneg. involves
1831 		 * exchanging base pages plus 3 next pages and
1832 		 * normally completes in about 120 msec.
1833 		 */
1834 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1835 		bp->serdes_an_pending = 1;
1836 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1837 	} else {
1838 		bnx2_resolve_flow_ctrl(bp);
1839 		bnx2_set_mac_link(bp);
1840 	}
1841 
1842 	return 0;
1843 }
1844 
1845 #define ETHTOOL_ALL_FIBRE_SPEED						\
1846 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1847 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1848 		(ADVERTISED_1000baseT_Full)
1849 
1850 #define ETHTOOL_ALL_COPPER_SPEED					\
1851 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1852 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1853 	ADVERTISED_1000baseT_Full)
1854 
1855 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1856 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1857 
1858 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1859 
1860 static void
1861 bnx2_set_default_remote_link(struct bnx2 *bp)
1862 {
1863 	u32 link;
1864 
1865 	if (bp->phy_port == PORT_TP)
1866 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1867 	else
1868 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1869 
1870 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1871 		bp->req_line_speed = 0;
1872 		bp->autoneg |= AUTONEG_SPEED;
1873 		bp->advertising = ADVERTISED_Autoneg;
1874 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1875 			bp->advertising |= ADVERTISED_10baseT_Half;
1876 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1877 			bp->advertising |= ADVERTISED_10baseT_Full;
1878 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1879 			bp->advertising |= ADVERTISED_100baseT_Half;
1880 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1881 			bp->advertising |= ADVERTISED_100baseT_Full;
1882 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1883 			bp->advertising |= ADVERTISED_1000baseT_Full;
1884 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1885 			bp->advertising |= ADVERTISED_2500baseX_Full;
1886 	} else {
1887 		bp->autoneg = 0;
1888 		bp->advertising = 0;
1889 		bp->req_duplex = DUPLEX_FULL;
1890 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1891 			bp->req_line_speed = SPEED_10;
1892 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1893 				bp->req_duplex = DUPLEX_HALF;
1894 		}
1895 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1896 			bp->req_line_speed = SPEED_100;
1897 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1898 				bp->req_duplex = DUPLEX_HALF;
1899 		}
1900 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1901 			bp->req_line_speed = SPEED_1000;
1902 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1903 			bp->req_line_speed = SPEED_2500;
1904 	}
1905 }
1906 
1907 static void
1908 bnx2_set_default_link(struct bnx2 *bp)
1909 {
1910 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1911 		bnx2_set_default_remote_link(bp);
1912 		return;
1913 	}
1914 
1915 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1916 	bp->req_line_speed = 0;
1917 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1918 		u32 reg;
1919 
1920 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1921 
1922 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1923 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1924 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1925 			bp->autoneg = 0;
1926 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1927 			bp->req_duplex = DUPLEX_FULL;
1928 		}
1929 	} else
1930 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1931 }
1932 
1933 static void
1934 bnx2_send_heart_beat(struct bnx2 *bp)
1935 {
1936 	u32 msg;
1937 	u32 addr;
1938 
1939 	spin_lock(&bp->indirect_lock);
1940 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1941 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1942 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1943 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1944 	spin_unlock(&bp->indirect_lock);
1945 }
1946 
1947 static void
1948 bnx2_remote_phy_event(struct bnx2 *bp)
1949 {
1950 	u32 msg;
1951 	u8 link_up = bp->link_up;
1952 	u8 old_port;
1953 
1954 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1955 
1956 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1957 		bnx2_send_heart_beat(bp);
1958 
1959 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1960 
1961 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1962 		bp->link_up = 0;
1963 	else {
1964 		u32 speed;
1965 
1966 		bp->link_up = 1;
1967 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1968 		bp->duplex = DUPLEX_FULL;
1969 		switch (speed) {
1970 			case BNX2_LINK_STATUS_10HALF:
1971 				bp->duplex = DUPLEX_HALF;
1972 				/* fall through */
1973 			case BNX2_LINK_STATUS_10FULL:
1974 				bp->line_speed = SPEED_10;
1975 				break;
1976 			case BNX2_LINK_STATUS_100HALF:
1977 				bp->duplex = DUPLEX_HALF;
1978 				/* fall through */
1979 			case BNX2_LINK_STATUS_100BASE_T4:
1980 			case BNX2_LINK_STATUS_100FULL:
1981 				bp->line_speed = SPEED_100;
1982 				break;
1983 			case BNX2_LINK_STATUS_1000HALF:
1984 				bp->duplex = DUPLEX_HALF;
1985 				/* fall through */
1986 			case BNX2_LINK_STATUS_1000FULL:
1987 				bp->line_speed = SPEED_1000;
1988 				break;
1989 			case BNX2_LINK_STATUS_2500HALF:
1990 				bp->duplex = DUPLEX_HALF;
1991 				/* fall through */
1992 			case BNX2_LINK_STATUS_2500FULL:
1993 				bp->line_speed = SPEED_2500;
1994 				break;
1995 			default:
1996 				bp->line_speed = 0;
1997 				break;
1998 		}
1999 
2000 		bp->flow_ctrl = 0;
2001 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2002 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2003 			if (bp->duplex == DUPLEX_FULL)
2004 				bp->flow_ctrl = bp->req_flow_ctrl;
2005 		} else {
2006 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2007 				bp->flow_ctrl |= FLOW_CTRL_TX;
2008 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2009 				bp->flow_ctrl |= FLOW_CTRL_RX;
2010 		}
2011 
2012 		old_port = bp->phy_port;
2013 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2014 			bp->phy_port = PORT_FIBRE;
2015 		else
2016 			bp->phy_port = PORT_TP;
2017 
2018 		if (old_port != bp->phy_port)
2019 			bnx2_set_default_link(bp);
2020 
2021 	}
2022 	if (bp->link_up != link_up)
2023 		bnx2_report_link(bp);
2024 
2025 	bnx2_set_mac_link(bp);
2026 }
2027 
2028 static int
2029 bnx2_set_remote_link(struct bnx2 *bp)
2030 {
2031 	u32 evt_code;
2032 
2033 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2034 	switch (evt_code) {
2035 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2036 			bnx2_remote_phy_event(bp);
2037 			break;
2038 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2039 		default:
2040 			bnx2_send_heart_beat(bp);
2041 			break;
2042 	}
2043 	return 0;
2044 }
2045 
2046 static int
2047 bnx2_setup_copper_phy(struct bnx2 *bp)
2048 __releases(&bp->phy_lock)
2049 __acquires(&bp->phy_lock)
2050 {
2051 	u32 bmcr;
2052 	u32 new_bmcr;
2053 
2054 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2055 
2056 	if (bp->autoneg & AUTONEG_SPEED) {
2057 		u32 adv_reg, adv1000_reg;
2058 		u32 new_adv = 0;
2059 		u32 new_adv1000 = 0;
2060 
2061 		bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2062 		adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2063 			ADVERTISE_PAUSE_ASYM);
2064 
2065 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2066 		adv1000_reg &= PHY_ALL_1000_SPEED;
2067 
2068 		new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
2069 		new_adv |= ADVERTISE_CSMA;
2070 		new_adv |= bnx2_phy_get_pause_adv(bp);
2071 
2072 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2073 
2074 		if ((adv1000_reg != new_adv1000) ||
2075 			(adv_reg != new_adv) ||
2076 			((bmcr & BMCR_ANENABLE) == 0)) {
2077 
2078 			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2079 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2080 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2081 				BMCR_ANENABLE);
2082 		}
2083 		else if (bp->link_up) {
2084 			/* Flow ctrl may have changed from auto to forced */
2085 			/* or vice-versa. */
2086 
2087 			bnx2_resolve_flow_ctrl(bp);
2088 			bnx2_set_mac_link(bp);
2089 		}
2090 		return 0;
2091 	}
2092 
2093 	new_bmcr = 0;
2094 	if (bp->req_line_speed == SPEED_100) {
2095 		new_bmcr |= BMCR_SPEED100;
2096 	}
2097 	if (bp->req_duplex == DUPLEX_FULL) {
2098 		new_bmcr |= BMCR_FULLDPLX;
2099 	}
2100 	if (new_bmcr != bmcr) {
2101 		u32 bmsr;
2102 
2103 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2104 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2105 
2106 		if (bmsr & BMSR_LSTATUS) {
2107 			/* Force link down */
2108 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2109 			spin_unlock_bh(&bp->phy_lock);
2110 			msleep(50);
2111 			spin_lock_bh(&bp->phy_lock);
2112 
2113 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2114 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115 		}
2116 
2117 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2118 
2119 		/* Normally, the new speed is setup after the link has
2120 		 * gone down and up again. In some cases, link will not go
2121 		 * down so we need to set up the new speed here.
2122 		 */
2123 		if (bmsr & BMSR_LSTATUS) {
2124 			bp->line_speed = bp->req_line_speed;
2125 			bp->duplex = bp->req_duplex;
2126 			bnx2_resolve_flow_ctrl(bp);
2127 			bnx2_set_mac_link(bp);
2128 		}
2129 	} else {
2130 		bnx2_resolve_flow_ctrl(bp);
2131 		bnx2_set_mac_link(bp);
2132 	}
2133 	return 0;
2134 }
2135 
2136 static int
2137 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2138 __releases(&bp->phy_lock)
2139 __acquires(&bp->phy_lock)
2140 {
2141 	if (bp->loopback == MAC_LOOPBACK)
2142 		return 0;
2143 
2144 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2145 		return bnx2_setup_serdes_phy(bp, port);
2146 	}
2147 	else {
2148 		return bnx2_setup_copper_phy(bp);
2149 	}
2150 }
2151 
2152 static int
2153 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2154 {
2155 	u32 val;
2156 
2157 	bp->mii_bmcr = MII_BMCR + 0x10;
2158 	bp->mii_bmsr = MII_BMSR + 0x10;
2159 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2160 	bp->mii_adv = MII_ADVERTISE + 0x10;
2161 	bp->mii_lpa = MII_LPA + 0x10;
2162 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2163 
2164 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2165 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2166 
2167 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2168 	if (reset_phy)
2169 		bnx2_reset_phy(bp);
2170 
2171 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2172 
2173 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2174 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2175 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2176 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2177 
2178 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2179 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2180 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2181 		val |= BCM5708S_UP1_2G5;
2182 	else
2183 		val &= ~BCM5708S_UP1_2G5;
2184 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2185 
2186 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2187 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2188 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2189 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2190 
2191 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2192 
2193 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2194 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2195 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2196 
2197 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2198 
2199 	return 0;
2200 }
2201 
2202 static int
2203 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2204 {
2205 	u32 val;
2206 
2207 	if (reset_phy)
2208 		bnx2_reset_phy(bp);
2209 
2210 	bp->mii_up1 = BCM5708S_UP1;
2211 
2212 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2213 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2214 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2215 
2216 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2217 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2218 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2219 
2220 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2221 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2222 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2223 
2224 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2225 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2226 		val |= BCM5708S_UP1_2G5;
2227 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2228 	}
2229 
2230 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2231 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2232 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2233 		/* increase tx signal amplitude */
2234 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2235 			       BCM5708S_BLK_ADDR_TX_MISC);
2236 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2237 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2238 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2239 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2240 	}
2241 
2242 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2243 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2244 
2245 	if (val) {
2246 		u32 is_backplane;
2247 
2248 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2249 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2250 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2251 				       BCM5708S_BLK_ADDR_TX_MISC);
2252 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2253 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2254 				       BCM5708S_BLK_ADDR_DIG);
2255 		}
2256 	}
2257 	return 0;
2258 }
2259 
2260 static int
2261 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2262 {
2263 	if (reset_phy)
2264 		bnx2_reset_phy(bp);
2265 
2266 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2267 
2268 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2269 		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2270 
2271 	if (bp->dev->mtu > 1500) {
2272 		u32 val;
2273 
2274 		/* Set extended packet length bit */
2275 		bnx2_write_phy(bp, 0x18, 0x7);
2276 		bnx2_read_phy(bp, 0x18, &val);
2277 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2278 
2279 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2280 		bnx2_read_phy(bp, 0x1c, &val);
2281 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2282 	}
2283 	else {
2284 		u32 val;
2285 
2286 		bnx2_write_phy(bp, 0x18, 0x7);
2287 		bnx2_read_phy(bp, 0x18, &val);
2288 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2289 
2290 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2291 		bnx2_read_phy(bp, 0x1c, &val);
2292 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2293 	}
2294 
2295 	return 0;
2296 }
2297 
2298 static int
2299 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2300 {
2301 	u32 val;
2302 
2303 	if (reset_phy)
2304 		bnx2_reset_phy(bp);
2305 
2306 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2307 		bnx2_write_phy(bp, 0x18, 0x0c00);
2308 		bnx2_write_phy(bp, 0x17, 0x000a);
2309 		bnx2_write_phy(bp, 0x15, 0x310b);
2310 		bnx2_write_phy(bp, 0x17, 0x201f);
2311 		bnx2_write_phy(bp, 0x15, 0x9506);
2312 		bnx2_write_phy(bp, 0x17, 0x401f);
2313 		bnx2_write_phy(bp, 0x15, 0x14e2);
2314 		bnx2_write_phy(bp, 0x18, 0x0400);
2315 	}
2316 
2317 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2318 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2319 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2320 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2321 		val &= ~(1 << 8);
2322 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2323 	}
2324 
2325 	if (bp->dev->mtu > 1500) {
2326 		/* Set extended packet length bit */
2327 		bnx2_write_phy(bp, 0x18, 0x7);
2328 		bnx2_read_phy(bp, 0x18, &val);
2329 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2330 
2331 		bnx2_read_phy(bp, 0x10, &val);
2332 		bnx2_write_phy(bp, 0x10, val | 0x1);
2333 	}
2334 	else {
2335 		bnx2_write_phy(bp, 0x18, 0x7);
2336 		bnx2_read_phy(bp, 0x18, &val);
2337 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2338 
2339 		bnx2_read_phy(bp, 0x10, &val);
2340 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2341 	}
2342 
2343 	/* ethernet@wirespeed */
2344 	bnx2_write_phy(bp, 0x18, 0x7007);
2345 	bnx2_read_phy(bp, 0x18, &val);
2346 	bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2347 	return 0;
2348 }
2349 
2350 
2351 static int
2352 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2353 __releases(&bp->phy_lock)
2354 __acquires(&bp->phy_lock)
2355 {
2356 	u32 val;
2357 	int rc = 0;
2358 
2359 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2360 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2361 
2362 	bp->mii_bmcr = MII_BMCR;
2363 	bp->mii_bmsr = MII_BMSR;
2364 	bp->mii_bmsr1 = MII_BMSR;
2365 	bp->mii_adv = MII_ADVERTISE;
2366 	bp->mii_lpa = MII_LPA;
2367 
2368 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2369 
2370 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2371 		goto setup_phy;
2372 
2373 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2374 	bp->phy_id = val << 16;
2375 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2376 	bp->phy_id |= val & 0xffff;
2377 
2378 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2379 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2380 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2381 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2382 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2383 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2384 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2385 	}
2386 	else {
2387 		rc = bnx2_init_copper_phy(bp, reset_phy);
2388 	}
2389 
2390 setup_phy:
2391 	if (!rc)
2392 		rc = bnx2_setup_phy(bp, bp->phy_port);
2393 
2394 	return rc;
2395 }
2396 
2397 static int
2398 bnx2_set_mac_loopback(struct bnx2 *bp)
2399 {
2400 	u32 mac_mode;
2401 
2402 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2403 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2404 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2405 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2406 	bp->link_up = 1;
2407 	return 0;
2408 }
2409 
2410 static int bnx2_test_link(struct bnx2 *);
2411 
2412 static int
2413 bnx2_set_phy_loopback(struct bnx2 *bp)
2414 {
2415 	u32 mac_mode;
2416 	int rc, i;
2417 
2418 	spin_lock_bh(&bp->phy_lock);
2419 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2420 			    BMCR_SPEED1000);
2421 	spin_unlock_bh(&bp->phy_lock);
2422 	if (rc)
2423 		return rc;
2424 
2425 	for (i = 0; i < 10; i++) {
2426 		if (bnx2_test_link(bp) == 0)
2427 			break;
2428 		msleep(100);
2429 	}
2430 
2431 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2432 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2433 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2434 		      BNX2_EMAC_MODE_25G_MODE);
2435 
2436 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2437 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2438 	bp->link_up = 1;
2439 	return 0;
2440 }
2441 
2442 static void
2443 bnx2_dump_mcp_state(struct bnx2 *bp)
2444 {
2445 	struct net_device *dev = bp->dev;
2446 	u32 mcp_p0, mcp_p1;
2447 
2448 	netdev_err(dev, "<--- start MCP states dump --->\n");
2449 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2450 		mcp_p0 = BNX2_MCP_STATE_P0;
2451 		mcp_p1 = BNX2_MCP_STATE_P1;
2452 	} else {
2453 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2454 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2455 	}
2456 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2457 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2458 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2459 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2460 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2461 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2462 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2463 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2464 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2465 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2466 	netdev_err(dev, "DEBUG: shmem states:\n");
2467 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2468 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2469 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2470 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2471 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2472 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2473 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2474 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2475 	pr_cont(" condition[%08x]\n",
2476 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2477 	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2478 	DP_SHMEM_LINE(bp, 0x3cc);
2479 	DP_SHMEM_LINE(bp, 0x3dc);
2480 	DP_SHMEM_LINE(bp, 0x3ec);
2481 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2482 	netdev_err(dev, "<--- end MCP states dump --->\n");
2483 }
2484 
2485 static int
2486 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2487 {
2488 	int i;
2489 	u32 val;
2490 
2491 	bp->fw_wr_seq++;
2492 	msg_data |= bp->fw_wr_seq;
2493 
2494 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2495 
2496 	if (!ack)
2497 		return 0;
2498 
2499 	/* wait for an acknowledgement. */
2500 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2501 		msleep(10);
2502 
2503 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2504 
2505 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2506 			break;
2507 	}
2508 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2509 		return 0;
2510 
2511 	/* If we timed out, inform the firmware that this is the case. */
2512 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2513 		msg_data &= ~BNX2_DRV_MSG_CODE;
2514 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2515 
2516 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2517 		if (!silent) {
2518 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2519 			bnx2_dump_mcp_state(bp);
2520 		}
2521 
2522 		return -EBUSY;
2523 	}
2524 
2525 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2526 		return -EIO;
2527 
2528 	return 0;
2529 }
2530 
2531 static int
2532 bnx2_init_5709_context(struct bnx2 *bp)
2533 {
2534 	int i, ret = 0;
2535 	u32 val;
2536 
2537 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2538 	val |= (BNX2_PAGE_BITS - 8) << 16;
2539 	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2540 	for (i = 0; i < 10; i++) {
2541 		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2542 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2543 			break;
2544 		udelay(2);
2545 	}
2546 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2547 		return -EBUSY;
2548 
2549 	for (i = 0; i < bp->ctx_pages; i++) {
2550 		int j;
2551 
2552 		if (bp->ctx_blk[i])
2553 			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2554 		else
2555 			return -ENOMEM;
2556 
2557 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2558 			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2559 			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2560 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2561 			(u64) bp->ctx_blk_mapping[i] >> 32);
2562 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2563 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2564 		for (j = 0; j < 10; j++) {
2565 
2566 			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2567 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2568 				break;
2569 			udelay(5);
2570 		}
2571 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2572 			ret = -EBUSY;
2573 			break;
2574 		}
2575 	}
2576 	return ret;
2577 }
2578 
2579 static void
2580 bnx2_init_context(struct bnx2 *bp)
2581 {
2582 	u32 vcid;
2583 
2584 	vcid = 96;
2585 	while (vcid) {
2586 		u32 vcid_addr, pcid_addr, offset;
2587 		int i;
2588 
2589 		vcid--;
2590 
2591 		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2592 			u32 new_vcid;
2593 
2594 			vcid_addr = GET_PCID_ADDR(vcid);
2595 			if (vcid & 0x8) {
2596 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2597 			}
2598 			else {
2599 				new_vcid = vcid;
2600 			}
2601 			pcid_addr = GET_PCID_ADDR(new_vcid);
2602 		}
2603 		else {
2604 	    		vcid_addr = GET_CID_ADDR(vcid);
2605 			pcid_addr = vcid_addr;
2606 		}
2607 
2608 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2609 			vcid_addr += (i << PHY_CTX_SHIFT);
2610 			pcid_addr += (i << PHY_CTX_SHIFT);
2611 
2612 			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2613 			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2614 
2615 			/* Zero out the context. */
2616 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2617 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2618 		}
2619 	}
2620 }
2621 
2622 static int
2623 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2624 {
2625 	u16 *good_mbuf;
2626 	u32 good_mbuf_cnt;
2627 	u32 val;
2628 
2629 	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2630 	if (good_mbuf == NULL)
2631 		return -ENOMEM;
2632 
2633 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2634 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2635 
2636 	good_mbuf_cnt = 0;
2637 
2638 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2639 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2640 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2641 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2642 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2643 
2644 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2645 
2646 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2647 
2648 		/* The addresses with Bit 9 set are bad memory blocks. */
2649 		if (!(val & (1 << 9))) {
2650 			good_mbuf[good_mbuf_cnt] = (u16) val;
2651 			good_mbuf_cnt++;
2652 		}
2653 
2654 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2655 	}
2656 
2657 	/* Free the good ones back to the mbuf pool thus discarding
2658 	 * all the bad ones. */
2659 	while (good_mbuf_cnt) {
2660 		good_mbuf_cnt--;
2661 
2662 		val = good_mbuf[good_mbuf_cnt];
2663 		val = (val << 9) | val | 1;
2664 
2665 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2666 	}
2667 	kfree(good_mbuf);
2668 	return 0;
2669 }
2670 
2671 static void
2672 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2673 {
2674 	u32 val;
2675 
2676 	val = (mac_addr[0] << 8) | mac_addr[1];
2677 
2678 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2679 
2680 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2681 		(mac_addr[4] << 8) | mac_addr[5];
2682 
2683 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2684 }
2685 
2686 static inline int
2687 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2688 {
2689 	dma_addr_t mapping;
2690 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2691 	struct bnx2_rx_bd *rxbd =
2692 		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2693 	struct page *page = alloc_page(gfp);
2694 
2695 	if (!page)
2696 		return -ENOMEM;
2697 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2698 			       PCI_DMA_FROMDEVICE);
2699 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2700 		__free_page(page);
2701 		return -EIO;
2702 	}
2703 
2704 	rx_pg->page = page;
2705 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2706 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2707 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2708 	return 0;
2709 }
2710 
2711 static void
2712 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2713 {
2714 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2715 	struct page *page = rx_pg->page;
2716 
2717 	if (!page)
2718 		return;
2719 
2720 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2721 		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2722 
2723 	__free_page(page);
2724 	rx_pg->page = NULL;
2725 }
2726 
2727 static inline int
2728 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2729 {
2730 	u8 *data;
2731 	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2732 	dma_addr_t mapping;
2733 	struct bnx2_rx_bd *rxbd =
2734 		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2735 
2736 	data = kmalloc(bp->rx_buf_size, gfp);
2737 	if (!data)
2738 		return -ENOMEM;
2739 
2740 	mapping = dma_map_single(&bp->pdev->dev,
2741 				 get_l2_fhdr(data),
2742 				 bp->rx_buf_use_size,
2743 				 PCI_DMA_FROMDEVICE);
2744 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2745 		kfree(data);
2746 		return -EIO;
2747 	}
2748 
2749 	rx_buf->data = data;
2750 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2751 
2752 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2753 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2754 
2755 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2756 
2757 	return 0;
2758 }
2759 
2760 static int
2761 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2762 {
2763 	struct status_block *sblk = bnapi->status_blk.msi;
2764 	u32 new_link_state, old_link_state;
2765 	int is_set = 1;
2766 
2767 	new_link_state = sblk->status_attn_bits & event;
2768 	old_link_state = sblk->status_attn_bits_ack & event;
2769 	if (new_link_state != old_link_state) {
2770 		if (new_link_state)
2771 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2772 		else
2773 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2774 	} else
2775 		is_set = 0;
2776 
2777 	return is_set;
2778 }
2779 
2780 static void
2781 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2782 {
2783 	spin_lock(&bp->phy_lock);
2784 
2785 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2786 		bnx2_set_link(bp);
2787 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2788 		bnx2_set_remote_link(bp);
2789 
2790 	spin_unlock(&bp->phy_lock);
2791 
2792 }
2793 
2794 static inline u16
2795 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2796 {
2797 	u16 cons;
2798 
2799 	/* Tell compiler that status block fields can change. */
2800 	barrier();
2801 	cons = *bnapi->hw_tx_cons_ptr;
2802 	barrier();
2803 	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2804 		cons++;
2805 	return cons;
2806 }
2807 
2808 static int
2809 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2810 {
2811 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2812 	u16 hw_cons, sw_cons, sw_ring_cons;
2813 	int tx_pkt = 0, index;
2814 	unsigned int tx_bytes = 0;
2815 	struct netdev_queue *txq;
2816 
2817 	index = (bnapi - bp->bnx2_napi);
2818 	txq = netdev_get_tx_queue(bp->dev, index);
2819 
2820 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2821 	sw_cons = txr->tx_cons;
2822 
2823 	while (sw_cons != hw_cons) {
2824 		struct bnx2_sw_tx_bd *tx_buf;
2825 		struct sk_buff *skb;
2826 		int i, last;
2827 
2828 		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2829 
2830 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2831 		skb = tx_buf->skb;
2832 
2833 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2834 		prefetch(&skb->end);
2835 
2836 		/* partial BD completions possible with TSO packets */
2837 		if (tx_buf->is_gso) {
2838 			u16 last_idx, last_ring_idx;
2839 
2840 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2841 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2842 			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2843 				last_idx++;
2844 			}
2845 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2846 				break;
2847 			}
2848 		}
2849 
2850 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2851 			skb_headlen(skb), PCI_DMA_TODEVICE);
2852 
2853 		tx_buf->skb = NULL;
2854 		last = tx_buf->nr_frags;
2855 
2856 		for (i = 0; i < last; i++) {
2857 			struct bnx2_sw_tx_bd *tx_buf;
2858 
2859 			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2860 
2861 			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2862 			dma_unmap_page(&bp->pdev->dev,
2863 				dma_unmap_addr(tx_buf, mapping),
2864 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2865 				PCI_DMA_TODEVICE);
2866 		}
2867 
2868 		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2869 
2870 		tx_bytes += skb->len;
2871 		dev_kfree_skb(skb);
2872 		tx_pkt++;
2873 		if (tx_pkt == budget)
2874 			break;
2875 
2876 		if (hw_cons == sw_cons)
2877 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2878 	}
2879 
2880 	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2881 	txr->hw_tx_cons = hw_cons;
2882 	txr->tx_cons = sw_cons;
2883 
2884 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2885 	 * before checking for netif_tx_queue_stopped().  Without the
2886 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2887 	 * will miss it and cause the queue to be stopped forever.
2888 	 */
2889 	smp_mb();
2890 
2891 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2892 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2893 		__netif_tx_lock(txq, smp_processor_id());
2894 		if ((netif_tx_queue_stopped(txq)) &&
2895 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2896 			netif_tx_wake_queue(txq);
2897 		__netif_tx_unlock(txq);
2898 	}
2899 
2900 	return tx_pkt;
2901 }
2902 
2903 static void
2904 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2905 			struct sk_buff *skb, int count)
2906 {
2907 	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2908 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2909 	int i;
2910 	u16 hw_prod, prod;
2911 	u16 cons = rxr->rx_pg_cons;
2912 
2913 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2914 
2915 	/* The caller was unable to allocate a new page to replace the
2916 	 * last one in the frags array, so we need to recycle that page
2917 	 * and then free the skb.
2918 	 */
2919 	if (skb) {
2920 		struct page *page;
2921 		struct skb_shared_info *shinfo;
2922 
2923 		shinfo = skb_shinfo(skb);
2924 		shinfo->nr_frags--;
2925 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2926 		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2927 
2928 		cons_rx_pg->page = page;
2929 		dev_kfree_skb(skb);
2930 	}
2931 
2932 	hw_prod = rxr->rx_pg_prod;
2933 
2934 	for (i = 0; i < count; i++) {
2935 		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2936 
2937 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2938 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2939 		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2940 						[BNX2_RX_IDX(cons)];
2941 		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2942 						[BNX2_RX_IDX(prod)];
2943 
2944 		if (prod != cons) {
2945 			prod_rx_pg->page = cons_rx_pg->page;
2946 			cons_rx_pg->page = NULL;
2947 			dma_unmap_addr_set(prod_rx_pg, mapping,
2948 				dma_unmap_addr(cons_rx_pg, mapping));
2949 
2950 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2951 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2952 
2953 		}
2954 		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2955 		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2956 	}
2957 	rxr->rx_pg_prod = hw_prod;
2958 	rxr->rx_pg_cons = cons;
2959 }
2960 
2961 static inline void
2962 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2963 		   u8 *data, u16 cons, u16 prod)
2964 {
2965 	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2966 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2967 
2968 	cons_rx_buf = &rxr->rx_buf_ring[cons];
2969 	prod_rx_buf = &rxr->rx_buf_ring[prod];
2970 
2971 	dma_sync_single_for_device(&bp->pdev->dev,
2972 		dma_unmap_addr(cons_rx_buf, mapping),
2973 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2974 
2975 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2976 
2977 	prod_rx_buf->data = data;
2978 
2979 	if (cons == prod)
2980 		return;
2981 
2982 	dma_unmap_addr_set(prod_rx_buf, mapping,
2983 			dma_unmap_addr(cons_rx_buf, mapping));
2984 
2985 	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
2986 	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
2987 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2988 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2989 }
2990 
2991 static struct sk_buff *
2992 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2993 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2994 	    u32 ring_idx)
2995 {
2996 	int err;
2997 	u16 prod = ring_idx & 0xffff;
2998 	struct sk_buff *skb;
2999 
3000 	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3001 	if (unlikely(err)) {
3002 		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3003 error:
3004 		if (hdr_len) {
3005 			unsigned int raw_len = len + 4;
3006 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3007 
3008 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3009 		}
3010 		return NULL;
3011 	}
3012 
3013 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3014 			 PCI_DMA_FROMDEVICE);
3015 	skb = build_skb(data, 0);
3016 	if (!skb) {
3017 		kfree(data);
3018 		goto error;
3019 	}
3020 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3021 	if (hdr_len == 0) {
3022 		skb_put(skb, len);
3023 		return skb;
3024 	} else {
3025 		unsigned int i, frag_len, frag_size, pages;
3026 		struct bnx2_sw_pg *rx_pg;
3027 		u16 pg_cons = rxr->rx_pg_cons;
3028 		u16 pg_prod = rxr->rx_pg_prod;
3029 
3030 		frag_size = len + 4 - hdr_len;
3031 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3032 		skb_put(skb, hdr_len);
3033 
3034 		for (i = 0; i < pages; i++) {
3035 			dma_addr_t mapping_old;
3036 
3037 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3038 			if (unlikely(frag_len <= 4)) {
3039 				unsigned int tail = 4 - frag_len;
3040 
3041 				rxr->rx_pg_cons = pg_cons;
3042 				rxr->rx_pg_prod = pg_prod;
3043 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3044 							pages - i);
3045 				skb->len -= tail;
3046 				if (i == 0) {
3047 					skb->tail -= tail;
3048 				} else {
3049 					skb_frag_t *frag =
3050 						&skb_shinfo(skb)->frags[i - 1];
3051 					skb_frag_size_sub(frag, tail);
3052 					skb->data_len -= tail;
3053 				}
3054 				return skb;
3055 			}
3056 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3057 
3058 			/* Don't unmap yet.  If we're unable to allocate a new
3059 			 * page, we need to recycle the page and the DMA addr.
3060 			 */
3061 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3062 			if (i == pages - 1)
3063 				frag_len -= 4;
3064 
3065 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3066 			rx_pg->page = NULL;
3067 
3068 			err = bnx2_alloc_rx_page(bp, rxr,
3069 						 BNX2_RX_PG_RING_IDX(pg_prod),
3070 						 GFP_ATOMIC);
3071 			if (unlikely(err)) {
3072 				rxr->rx_pg_cons = pg_cons;
3073 				rxr->rx_pg_prod = pg_prod;
3074 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3075 							pages - i);
3076 				return NULL;
3077 			}
3078 
3079 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3080 				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3081 
3082 			frag_size -= frag_len;
3083 			skb->data_len += frag_len;
3084 			skb->truesize += PAGE_SIZE;
3085 			skb->len += frag_len;
3086 
3087 			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3088 			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3089 		}
3090 		rxr->rx_pg_prod = pg_prod;
3091 		rxr->rx_pg_cons = pg_cons;
3092 	}
3093 	return skb;
3094 }
3095 
3096 static inline u16
3097 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3098 {
3099 	u16 cons;
3100 
3101 	/* Tell compiler that status block fields can change. */
3102 	barrier();
3103 	cons = *bnapi->hw_rx_cons_ptr;
3104 	barrier();
3105 	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3106 		cons++;
3107 	return cons;
3108 }
3109 
3110 static int
3111 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3112 {
3113 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3114 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3115 	struct l2_fhdr *rx_hdr;
3116 	int rx_pkt = 0, pg_ring_used = 0;
3117 
3118 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3119 	sw_cons = rxr->rx_cons;
3120 	sw_prod = rxr->rx_prod;
3121 
3122 	/* Memory barrier necessary as speculative reads of the rx
3123 	 * buffer can be ahead of the index in the status block
3124 	 */
3125 	rmb();
3126 	while (sw_cons != hw_cons) {
3127 		unsigned int len, hdr_len;
3128 		u32 status;
3129 		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3130 		struct sk_buff *skb;
3131 		dma_addr_t dma_addr;
3132 		u8 *data;
3133 		u16 next_ring_idx;
3134 
3135 		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3136 		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3137 
3138 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3139 		data = rx_buf->data;
3140 		rx_buf->data = NULL;
3141 
3142 		rx_hdr = get_l2_fhdr(data);
3143 		prefetch(rx_hdr);
3144 
3145 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3146 
3147 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3148 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3149 			PCI_DMA_FROMDEVICE);
3150 
3151 		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3152 		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3153 		prefetch(get_l2_fhdr(next_rx_buf->data));
3154 
3155 		len = rx_hdr->l2_fhdr_pkt_len;
3156 		status = rx_hdr->l2_fhdr_status;
3157 
3158 		hdr_len = 0;
3159 		if (status & L2_FHDR_STATUS_SPLIT) {
3160 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3161 			pg_ring_used = 1;
3162 		} else if (len > bp->rx_jumbo_thresh) {
3163 			hdr_len = bp->rx_jumbo_thresh;
3164 			pg_ring_used = 1;
3165 		}
3166 
3167 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3168 				       L2_FHDR_ERRORS_PHY_DECODE |
3169 				       L2_FHDR_ERRORS_ALIGNMENT |
3170 				       L2_FHDR_ERRORS_TOO_SHORT |
3171 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3172 
3173 			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3174 					  sw_ring_prod);
3175 			if (pg_ring_used) {
3176 				int pages;
3177 
3178 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3179 
3180 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3181 			}
3182 			goto next_rx;
3183 		}
3184 
3185 		len -= 4;
3186 
3187 		if (len <= bp->rx_copy_thresh) {
3188 			skb = netdev_alloc_skb(bp->dev, len + 6);
3189 			if (skb == NULL) {
3190 				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3191 						  sw_ring_prod);
3192 				goto next_rx;
3193 			}
3194 
3195 			/* aligned copy */
3196 			memcpy(skb->data,
3197 			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3198 			       len + 6);
3199 			skb_reserve(skb, 6);
3200 			skb_put(skb, len);
3201 
3202 			bnx2_reuse_rx_data(bp, rxr, data,
3203 				sw_ring_cons, sw_ring_prod);
3204 
3205 		} else {
3206 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3207 					  (sw_ring_cons << 16) | sw_ring_prod);
3208 			if (!skb)
3209 				goto next_rx;
3210 		}
3211 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3212 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3213 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3214 
3215 		skb->protocol = eth_type_trans(skb, bp->dev);
3216 
3217 		if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3218 			(ntohs(skb->protocol) != 0x8100)) {
3219 
3220 			dev_kfree_skb(skb);
3221 			goto next_rx;
3222 
3223 		}
3224 
3225 		skb_checksum_none_assert(skb);
3226 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3227 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3228 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3229 
3230 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3231 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3232 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3233 		}
3234 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3235 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3236 		     L2_FHDR_STATUS_USE_RXHASH))
3237 			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3238 				     PKT_HASH_TYPE_L3);
3239 
3240 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3241 		napi_gro_receive(&bnapi->napi, skb);
3242 		rx_pkt++;
3243 
3244 next_rx:
3245 		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3246 		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3247 
3248 		if ((rx_pkt == budget))
3249 			break;
3250 
3251 		/* Refresh hw_cons to see if there is new work */
3252 		if (sw_cons == hw_cons) {
3253 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3254 			rmb();
3255 		}
3256 	}
3257 	rxr->rx_cons = sw_cons;
3258 	rxr->rx_prod = sw_prod;
3259 
3260 	if (pg_ring_used)
3261 		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3262 
3263 	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3264 
3265 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3266 
3267 	mmiowb();
3268 
3269 	return rx_pkt;
3270 
3271 }
3272 
3273 /* MSI ISR - The only difference between this and the INTx ISR
3274  * is that the MSI interrupt is always serviced.
3275  */
3276 static irqreturn_t
3277 bnx2_msi(int irq, void *dev_instance)
3278 {
3279 	struct bnx2_napi *bnapi = dev_instance;
3280 	struct bnx2 *bp = bnapi->bp;
3281 
3282 	prefetch(bnapi->status_blk.msi);
3283 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3284 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3285 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3286 
3287 	/* Return here if interrupt is disabled. */
3288 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3289 		return IRQ_HANDLED;
3290 
3291 	napi_schedule(&bnapi->napi);
3292 
3293 	return IRQ_HANDLED;
3294 }
3295 
3296 static irqreturn_t
3297 bnx2_msi_1shot(int irq, void *dev_instance)
3298 {
3299 	struct bnx2_napi *bnapi = dev_instance;
3300 	struct bnx2 *bp = bnapi->bp;
3301 
3302 	prefetch(bnapi->status_blk.msi);
3303 
3304 	/* Return here if interrupt is disabled. */
3305 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3306 		return IRQ_HANDLED;
3307 
3308 	napi_schedule(&bnapi->napi);
3309 
3310 	return IRQ_HANDLED;
3311 }
3312 
3313 static irqreturn_t
3314 bnx2_interrupt(int irq, void *dev_instance)
3315 {
3316 	struct bnx2_napi *bnapi = dev_instance;
3317 	struct bnx2 *bp = bnapi->bp;
3318 	struct status_block *sblk = bnapi->status_blk.msi;
3319 
3320 	/* When using INTx, it is possible for the interrupt to arrive
3321 	 * at the CPU before the status block posted prior to the
3322 	 * interrupt. Reading a register will flush the status block.
3323 	 * When using MSI, the MSI message will always complete after
3324 	 * the status block write.
3325 	 */
3326 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3327 	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3328 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3329 		return IRQ_NONE;
3330 
3331 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3332 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3333 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3334 
3335 	/* Read back to deassert IRQ immediately to avoid too many
3336 	 * spurious interrupts.
3337 	 */
3338 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3339 
3340 	/* Return here if interrupt is shared and is disabled. */
3341 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3342 		return IRQ_HANDLED;
3343 
3344 	if (napi_schedule_prep(&bnapi->napi)) {
3345 		bnapi->last_status_idx = sblk->status_idx;
3346 		__napi_schedule(&bnapi->napi);
3347 	}
3348 
3349 	return IRQ_HANDLED;
3350 }
3351 
3352 static inline int
3353 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3354 {
3355 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3356 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3357 
3358 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3359 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3360 		return 1;
3361 	return 0;
3362 }
3363 
3364 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3365 				 STATUS_ATTN_BITS_TIMER_ABORT)
3366 
3367 static inline int
3368 bnx2_has_work(struct bnx2_napi *bnapi)
3369 {
3370 	struct status_block *sblk = bnapi->status_blk.msi;
3371 
3372 	if (bnx2_has_fast_work(bnapi))
3373 		return 1;
3374 
3375 #ifdef BCM_CNIC
3376 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3377 		return 1;
3378 #endif
3379 
3380 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3381 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3382 		return 1;
3383 
3384 	return 0;
3385 }
3386 
3387 static void
3388 bnx2_chk_missed_msi(struct bnx2 *bp)
3389 {
3390 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3391 	u32 msi_ctrl;
3392 
3393 	if (bnx2_has_work(bnapi)) {
3394 		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3395 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3396 			return;
3397 
3398 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3399 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3400 				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3401 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3402 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3403 		}
3404 	}
3405 
3406 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3407 }
3408 
3409 #ifdef BCM_CNIC
3410 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3411 {
3412 	struct cnic_ops *c_ops;
3413 
3414 	if (!bnapi->cnic_present)
3415 		return;
3416 
3417 	rcu_read_lock();
3418 	c_ops = rcu_dereference(bp->cnic_ops);
3419 	if (c_ops)
3420 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3421 						      bnapi->status_blk.msi);
3422 	rcu_read_unlock();
3423 }
3424 #endif
3425 
3426 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3427 {
3428 	struct status_block *sblk = bnapi->status_blk.msi;
3429 	u32 status_attn_bits = sblk->status_attn_bits;
3430 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3431 
3432 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3433 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3434 
3435 		bnx2_phy_int(bp, bnapi);
3436 
3437 		/* This is needed to take care of transient status
3438 		 * during link changes.
3439 		 */
3440 		BNX2_WR(bp, BNX2_HC_COMMAND,
3441 			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3442 		BNX2_RD(bp, BNX2_HC_COMMAND);
3443 	}
3444 }
3445 
3446 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3447 			  int work_done, int budget)
3448 {
3449 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3450 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3451 
3452 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3453 		bnx2_tx_int(bp, bnapi, 0);
3454 
3455 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3456 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3457 
3458 	return work_done;
3459 }
3460 
3461 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3462 {
3463 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3464 	struct bnx2 *bp = bnapi->bp;
3465 	int work_done = 0;
3466 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3467 
3468 	while (1) {
3469 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3470 		if (unlikely(work_done >= budget))
3471 			break;
3472 
3473 		bnapi->last_status_idx = sblk->status_idx;
3474 		/* status idx must be read before checking for more work. */
3475 		rmb();
3476 		if (likely(!bnx2_has_fast_work(bnapi))) {
3477 
3478 			napi_complete(napi);
3479 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3480 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3481 				bnapi->last_status_idx);
3482 			break;
3483 		}
3484 	}
3485 	return work_done;
3486 }
3487 
3488 static int bnx2_poll(struct napi_struct *napi, int budget)
3489 {
3490 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3491 	struct bnx2 *bp = bnapi->bp;
3492 	int work_done = 0;
3493 	struct status_block *sblk = bnapi->status_blk.msi;
3494 
3495 	while (1) {
3496 		bnx2_poll_link(bp, bnapi);
3497 
3498 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3499 
3500 #ifdef BCM_CNIC
3501 		bnx2_poll_cnic(bp, bnapi);
3502 #endif
3503 
3504 		/* bnapi->last_status_idx is used below to tell the hw how
3505 		 * much work has been processed, so we must read it before
3506 		 * checking for more work.
3507 		 */
3508 		bnapi->last_status_idx = sblk->status_idx;
3509 
3510 		if (unlikely(work_done >= budget))
3511 			break;
3512 
3513 		rmb();
3514 		if (likely(!bnx2_has_work(bnapi))) {
3515 			napi_complete(napi);
3516 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3517 				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3518 					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3519 					bnapi->last_status_idx);
3520 				break;
3521 			}
3522 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3523 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3524 				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3525 				bnapi->last_status_idx);
3526 
3527 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3528 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3529 				bnapi->last_status_idx);
3530 			break;
3531 		}
3532 	}
3533 
3534 	return work_done;
3535 }
3536 
3537 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3538  * from set_multicast.
3539  */
3540 static void
3541 bnx2_set_rx_mode(struct net_device *dev)
3542 {
3543 	struct bnx2 *bp = netdev_priv(dev);
3544 	u32 rx_mode, sort_mode;
3545 	struct netdev_hw_addr *ha;
3546 	int i;
3547 
3548 	if (!netif_running(dev))
3549 		return;
3550 
3551 	spin_lock_bh(&bp->phy_lock);
3552 
3553 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3554 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3555 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3556 	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3557 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3558 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3559 	if (dev->flags & IFF_PROMISC) {
3560 		/* Promiscuous mode. */
3561 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3562 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3563 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3564 	}
3565 	else if (dev->flags & IFF_ALLMULTI) {
3566 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3567 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3568 				0xffffffff);
3569         	}
3570 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3571 	}
3572 	else {
3573 		/* Accept one or more multicast(s). */
3574 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3575 		u32 regidx;
3576 		u32 bit;
3577 		u32 crc;
3578 
3579 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3580 
3581 		netdev_for_each_mc_addr(ha, dev) {
3582 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3583 			bit = crc & 0xff;
3584 			regidx = (bit & 0xe0) >> 5;
3585 			bit &= 0x1f;
3586 			mc_filter[regidx] |= (1 << bit);
3587 		}
3588 
3589 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3590 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3591 				mc_filter[i]);
3592 		}
3593 
3594 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3595 	}
3596 
3597 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3598 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3599 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3600 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3601 	} else if (!(dev->flags & IFF_PROMISC)) {
3602 		/* Add all entries into to the match filter list */
3603 		i = 0;
3604 		netdev_for_each_uc_addr(ha, dev) {
3605 			bnx2_set_mac_addr(bp, ha->addr,
3606 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3607 			sort_mode |= (1 <<
3608 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3609 			i++;
3610 		}
3611 
3612 	}
3613 
3614 	if (rx_mode != bp->rx_mode) {
3615 		bp->rx_mode = rx_mode;
3616 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3617 	}
3618 
3619 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3620 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3621 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3622 
3623 	spin_unlock_bh(&bp->phy_lock);
3624 }
3625 
3626 static int
3627 check_fw_section(const struct firmware *fw,
3628 		 const struct bnx2_fw_file_section *section,
3629 		 u32 alignment, bool non_empty)
3630 {
3631 	u32 offset = be32_to_cpu(section->offset);
3632 	u32 len = be32_to_cpu(section->len);
3633 
3634 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3635 		return -EINVAL;
3636 	if ((non_empty && len == 0) || len > fw->size - offset ||
3637 	    len & (alignment - 1))
3638 		return -EINVAL;
3639 	return 0;
3640 }
3641 
3642 static int
3643 check_mips_fw_entry(const struct firmware *fw,
3644 		    const struct bnx2_mips_fw_file_entry *entry)
3645 {
3646 	if (check_fw_section(fw, &entry->text, 4, true) ||
3647 	    check_fw_section(fw, &entry->data, 4, false) ||
3648 	    check_fw_section(fw, &entry->rodata, 4, false))
3649 		return -EINVAL;
3650 	return 0;
3651 }
3652 
3653 static void bnx2_release_firmware(struct bnx2 *bp)
3654 {
3655 	if (bp->rv2p_firmware) {
3656 		release_firmware(bp->mips_firmware);
3657 		release_firmware(bp->rv2p_firmware);
3658 		bp->rv2p_firmware = NULL;
3659 	}
3660 }
3661 
3662 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3663 {
3664 	const char *mips_fw_file, *rv2p_fw_file;
3665 	const struct bnx2_mips_fw_file *mips_fw;
3666 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3667 	int rc;
3668 
3669 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3670 		mips_fw_file = FW_MIPS_FILE_09;
3671 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3672 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3673 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3674 		else
3675 			rv2p_fw_file = FW_RV2P_FILE_09;
3676 	} else {
3677 		mips_fw_file = FW_MIPS_FILE_06;
3678 		rv2p_fw_file = FW_RV2P_FILE_06;
3679 	}
3680 
3681 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3682 	if (rc) {
3683 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3684 		goto out;
3685 	}
3686 
3687 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3688 	if (rc) {
3689 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3690 		goto err_release_mips_firmware;
3691 	}
3692 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3693 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3694 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3695 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3696 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3697 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3698 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3699 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3700 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3701 		rc = -EINVAL;
3702 		goto err_release_firmware;
3703 	}
3704 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3705 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3706 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3707 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3708 		rc = -EINVAL;
3709 		goto err_release_firmware;
3710 	}
3711 out:
3712 	return rc;
3713 
3714 err_release_firmware:
3715 	release_firmware(bp->rv2p_firmware);
3716 	bp->rv2p_firmware = NULL;
3717 err_release_mips_firmware:
3718 	release_firmware(bp->mips_firmware);
3719 	goto out;
3720 }
3721 
3722 static int bnx2_request_firmware(struct bnx2 *bp)
3723 {
3724 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3725 }
3726 
3727 static u32
3728 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3729 {
3730 	switch (idx) {
3731 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3732 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3733 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3734 		break;
3735 	}
3736 	return rv2p_code;
3737 }
3738 
3739 static int
3740 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3741 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3742 {
3743 	u32 rv2p_code_len, file_offset;
3744 	__be32 *rv2p_code;
3745 	int i;
3746 	u32 val, cmd, addr;
3747 
3748 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3749 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3750 
3751 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3752 
3753 	if (rv2p_proc == RV2P_PROC1) {
3754 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3755 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3756 	} else {
3757 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3758 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3759 	}
3760 
3761 	for (i = 0; i < rv2p_code_len; i += 8) {
3762 		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3763 		rv2p_code++;
3764 		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3765 		rv2p_code++;
3766 
3767 		val = (i / 8) | cmd;
3768 		BNX2_WR(bp, addr, val);
3769 	}
3770 
3771 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3772 	for (i = 0; i < 8; i++) {
3773 		u32 loc, code;
3774 
3775 		loc = be32_to_cpu(fw_entry->fixup[i]);
3776 		if (loc && ((loc * 4) < rv2p_code_len)) {
3777 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3778 			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3779 			code = be32_to_cpu(*(rv2p_code + loc));
3780 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3781 			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3782 
3783 			val = (loc / 2) | cmd;
3784 			BNX2_WR(bp, addr, val);
3785 		}
3786 	}
3787 
3788 	/* Reset the processor, un-stall is done later. */
3789 	if (rv2p_proc == RV2P_PROC1) {
3790 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3791 	}
3792 	else {
3793 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3794 	}
3795 
3796 	return 0;
3797 }
3798 
3799 static int
3800 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3801 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3802 {
3803 	u32 addr, len, file_offset;
3804 	__be32 *data;
3805 	u32 offset;
3806 	u32 val;
3807 
3808 	/* Halt the CPU. */
3809 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3810 	val |= cpu_reg->mode_value_halt;
3811 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3812 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3813 
3814 	/* Load the Text area. */
3815 	addr = be32_to_cpu(fw_entry->text.addr);
3816 	len = be32_to_cpu(fw_entry->text.len);
3817 	file_offset = be32_to_cpu(fw_entry->text.offset);
3818 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3819 
3820 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3821 	if (len) {
3822 		int j;
3823 
3824 		for (j = 0; j < (len / 4); j++, offset += 4)
3825 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3826 	}
3827 
3828 	/* Load the Data area. */
3829 	addr = be32_to_cpu(fw_entry->data.addr);
3830 	len = be32_to_cpu(fw_entry->data.len);
3831 	file_offset = be32_to_cpu(fw_entry->data.offset);
3832 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3833 
3834 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3835 	if (len) {
3836 		int j;
3837 
3838 		for (j = 0; j < (len / 4); j++, offset += 4)
3839 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3840 	}
3841 
3842 	/* Load the Read-Only area. */
3843 	addr = be32_to_cpu(fw_entry->rodata.addr);
3844 	len = be32_to_cpu(fw_entry->rodata.len);
3845 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3846 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3847 
3848 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3849 	if (len) {
3850 		int j;
3851 
3852 		for (j = 0; j < (len / 4); j++, offset += 4)
3853 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3854 	}
3855 
3856 	/* Clear the pre-fetch instruction. */
3857 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3858 
3859 	val = be32_to_cpu(fw_entry->start_addr);
3860 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3861 
3862 	/* Start the CPU. */
3863 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3864 	val &= ~cpu_reg->mode_value_halt;
3865 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3866 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3867 
3868 	return 0;
3869 }
3870 
3871 static int
3872 bnx2_init_cpus(struct bnx2 *bp)
3873 {
3874 	const struct bnx2_mips_fw_file *mips_fw =
3875 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3876 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3877 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3878 	int rc;
3879 
3880 	/* Initialize the RV2P processor. */
3881 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3882 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3883 
3884 	/* Initialize the RX Processor. */
3885 	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3886 	if (rc)
3887 		goto init_cpu_err;
3888 
3889 	/* Initialize the TX Processor. */
3890 	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3891 	if (rc)
3892 		goto init_cpu_err;
3893 
3894 	/* Initialize the TX Patch-up Processor. */
3895 	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3896 	if (rc)
3897 		goto init_cpu_err;
3898 
3899 	/* Initialize the Completion Processor. */
3900 	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3901 	if (rc)
3902 		goto init_cpu_err;
3903 
3904 	/* Initialize the Command Processor. */
3905 	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3906 
3907 init_cpu_err:
3908 	return rc;
3909 }
3910 
3911 static void
3912 bnx2_setup_wol(struct bnx2 *bp)
3913 {
3914 	int i;
3915 	u32 val, wol_msg;
3916 
3917 	if (bp->wol) {
3918 		u32 advertising;
3919 		u8 autoneg;
3920 
3921 		autoneg = bp->autoneg;
3922 		advertising = bp->advertising;
3923 
3924 		if (bp->phy_port == PORT_TP) {
3925 			bp->autoneg = AUTONEG_SPEED;
3926 			bp->advertising = ADVERTISED_10baseT_Half |
3927 				ADVERTISED_10baseT_Full |
3928 				ADVERTISED_100baseT_Half |
3929 				ADVERTISED_100baseT_Full |
3930 				ADVERTISED_Autoneg;
3931 		}
3932 
3933 		spin_lock_bh(&bp->phy_lock);
3934 		bnx2_setup_phy(bp, bp->phy_port);
3935 		spin_unlock_bh(&bp->phy_lock);
3936 
3937 		bp->autoneg = autoneg;
3938 		bp->advertising = advertising;
3939 
3940 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3941 
3942 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3943 
3944 		/* Enable port mode. */
3945 		val &= ~BNX2_EMAC_MODE_PORT;
3946 		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3947 		       BNX2_EMAC_MODE_ACPI_RCVD |
3948 		       BNX2_EMAC_MODE_MPKT;
3949 		if (bp->phy_port == PORT_TP) {
3950 			val |= BNX2_EMAC_MODE_PORT_MII;
3951 		} else {
3952 			val |= BNX2_EMAC_MODE_PORT_GMII;
3953 			if (bp->line_speed == SPEED_2500)
3954 				val |= BNX2_EMAC_MODE_25G_MODE;
3955 		}
3956 
3957 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3958 
3959 		/* receive all multicast */
3960 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3961 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3962 				0xffffffff);
3963 		}
3964 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3965 
3966 		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3967 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3968 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3969 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3970 
3971 		/* Need to enable EMAC and RPM for WOL. */
3972 		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3973 			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3974 			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3975 			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3976 
3977 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3978 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3979 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3980 
3981 		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3982 	} else {
3983 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3984 	}
3985 
3986 	if (!(bp->flags & BNX2_FLAG_NO_WOL))
3987 		bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
3988 
3989 }
3990 
3991 static int
3992 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3993 {
3994 	switch (state) {
3995 	case PCI_D0: {
3996 		u32 val;
3997 
3998 		pci_enable_wake(bp->pdev, PCI_D0, false);
3999 		pci_set_power_state(bp->pdev, PCI_D0);
4000 
4001 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4002 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4003 		val &= ~BNX2_EMAC_MODE_MPKT;
4004 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4005 
4006 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4007 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4008 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4009 		break;
4010 	}
4011 	case PCI_D3hot: {
4012 		bnx2_setup_wol(bp);
4013 		pci_wake_from_d3(bp->pdev, bp->wol);
4014 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4015 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4016 
4017 			if (bp->wol)
4018 				pci_set_power_state(bp->pdev, PCI_D3hot);
4019 		} else {
4020 			pci_set_power_state(bp->pdev, PCI_D3hot);
4021 		}
4022 
4023 		/* No more memory access after this point until
4024 		 * device is brought back to D0.
4025 		 */
4026 		break;
4027 	}
4028 	default:
4029 		return -EINVAL;
4030 	}
4031 	return 0;
4032 }
4033 
4034 static int
4035 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4036 {
4037 	u32 val;
4038 	int j;
4039 
4040 	/* Request access to the flash interface. */
4041 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4042 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4043 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4044 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4045 			break;
4046 
4047 		udelay(5);
4048 	}
4049 
4050 	if (j >= NVRAM_TIMEOUT_COUNT)
4051 		return -EBUSY;
4052 
4053 	return 0;
4054 }
4055 
4056 static int
4057 bnx2_release_nvram_lock(struct bnx2 *bp)
4058 {
4059 	int j;
4060 	u32 val;
4061 
4062 	/* Relinquish nvram interface. */
4063 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4064 
4065 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4066 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4067 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4068 			break;
4069 
4070 		udelay(5);
4071 	}
4072 
4073 	if (j >= NVRAM_TIMEOUT_COUNT)
4074 		return -EBUSY;
4075 
4076 	return 0;
4077 }
4078 
4079 
4080 static int
4081 bnx2_enable_nvram_write(struct bnx2 *bp)
4082 {
4083 	u32 val;
4084 
4085 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4086 	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4087 
4088 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4089 		int j;
4090 
4091 		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4092 		BNX2_WR(bp, BNX2_NVM_COMMAND,
4093 			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4094 
4095 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4096 			udelay(5);
4097 
4098 			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4099 			if (val & BNX2_NVM_COMMAND_DONE)
4100 				break;
4101 		}
4102 
4103 		if (j >= NVRAM_TIMEOUT_COUNT)
4104 			return -EBUSY;
4105 	}
4106 	return 0;
4107 }
4108 
4109 static void
4110 bnx2_disable_nvram_write(struct bnx2 *bp)
4111 {
4112 	u32 val;
4113 
4114 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4115 	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4116 }
4117 
4118 
4119 static void
4120 bnx2_enable_nvram_access(struct bnx2 *bp)
4121 {
4122 	u32 val;
4123 
4124 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4125 	/* Enable both bits, even on read. */
4126 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4127 		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4128 }
4129 
4130 static void
4131 bnx2_disable_nvram_access(struct bnx2 *bp)
4132 {
4133 	u32 val;
4134 
4135 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4136 	/* Disable both bits, even after read. */
4137 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4138 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4139 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4140 }
4141 
4142 static int
4143 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4144 {
4145 	u32 cmd;
4146 	int j;
4147 
4148 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4149 		/* Buffered flash, no erase needed */
4150 		return 0;
4151 
4152 	/* Build an erase command */
4153 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4154 	      BNX2_NVM_COMMAND_DOIT;
4155 
4156 	/* Need to clear DONE bit separately. */
4157 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4158 
4159 	/* Address of the NVRAM to read from. */
4160 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4161 
4162 	/* Issue an erase command. */
4163 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4164 
4165 	/* Wait for completion. */
4166 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4167 		u32 val;
4168 
4169 		udelay(5);
4170 
4171 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4172 		if (val & BNX2_NVM_COMMAND_DONE)
4173 			break;
4174 	}
4175 
4176 	if (j >= NVRAM_TIMEOUT_COUNT)
4177 		return -EBUSY;
4178 
4179 	return 0;
4180 }
4181 
4182 static int
4183 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4184 {
4185 	u32 cmd;
4186 	int j;
4187 
4188 	/* Build the command word. */
4189 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4190 
4191 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4192 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4193 		offset = ((offset / bp->flash_info->page_size) <<
4194 			   bp->flash_info->page_bits) +
4195 			  (offset % bp->flash_info->page_size);
4196 	}
4197 
4198 	/* Need to clear DONE bit separately. */
4199 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4200 
4201 	/* Address of the NVRAM to read from. */
4202 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4203 
4204 	/* Issue a read command. */
4205 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4206 
4207 	/* Wait for completion. */
4208 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4209 		u32 val;
4210 
4211 		udelay(5);
4212 
4213 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4214 		if (val & BNX2_NVM_COMMAND_DONE) {
4215 			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4216 			memcpy(ret_val, &v, 4);
4217 			break;
4218 		}
4219 	}
4220 	if (j >= NVRAM_TIMEOUT_COUNT)
4221 		return -EBUSY;
4222 
4223 	return 0;
4224 }
4225 
4226 
4227 static int
4228 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4229 {
4230 	u32 cmd;
4231 	__be32 val32;
4232 	int j;
4233 
4234 	/* Build the command word. */
4235 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4236 
4237 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4238 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4239 		offset = ((offset / bp->flash_info->page_size) <<
4240 			  bp->flash_info->page_bits) +
4241 			 (offset % bp->flash_info->page_size);
4242 	}
4243 
4244 	/* Need to clear DONE bit separately. */
4245 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4246 
4247 	memcpy(&val32, val, 4);
4248 
4249 	/* Write the data. */
4250 	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4251 
4252 	/* Address of the NVRAM to write to. */
4253 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4254 
4255 	/* Issue the write command. */
4256 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4257 
4258 	/* Wait for completion. */
4259 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4260 		udelay(5);
4261 
4262 		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4263 			break;
4264 	}
4265 	if (j >= NVRAM_TIMEOUT_COUNT)
4266 		return -EBUSY;
4267 
4268 	return 0;
4269 }
4270 
4271 static int
4272 bnx2_init_nvram(struct bnx2 *bp)
4273 {
4274 	u32 val;
4275 	int j, entry_count, rc = 0;
4276 	const struct flash_spec *flash;
4277 
4278 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4279 		bp->flash_info = &flash_5709;
4280 		goto get_flash_size;
4281 	}
4282 
4283 	/* Determine the selected interface. */
4284 	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4285 
4286 	entry_count = ARRAY_SIZE(flash_table);
4287 
4288 	if (val & 0x40000000) {
4289 
4290 		/* Flash interface has been reconfigured */
4291 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4292 		     j++, flash++) {
4293 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4294 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4295 				bp->flash_info = flash;
4296 				break;
4297 			}
4298 		}
4299 	}
4300 	else {
4301 		u32 mask;
4302 		/* Not yet been reconfigured */
4303 
4304 		if (val & (1 << 23))
4305 			mask = FLASH_BACKUP_STRAP_MASK;
4306 		else
4307 			mask = FLASH_STRAP_MASK;
4308 
4309 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4310 			j++, flash++) {
4311 
4312 			if ((val & mask) == (flash->strapping & mask)) {
4313 				bp->flash_info = flash;
4314 
4315 				/* Request access to the flash interface. */
4316 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4317 					return rc;
4318 
4319 				/* Enable access to flash interface */
4320 				bnx2_enable_nvram_access(bp);
4321 
4322 				/* Reconfigure the flash interface */
4323 				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4324 				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4325 				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4326 				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4327 
4328 				/* Disable access to flash interface */
4329 				bnx2_disable_nvram_access(bp);
4330 				bnx2_release_nvram_lock(bp);
4331 
4332 				break;
4333 			}
4334 		}
4335 	} /* if (val & 0x40000000) */
4336 
4337 	if (j == entry_count) {
4338 		bp->flash_info = NULL;
4339 		pr_alert("Unknown flash/EEPROM type\n");
4340 		return -ENODEV;
4341 	}
4342 
4343 get_flash_size:
4344 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4345 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4346 	if (val)
4347 		bp->flash_size = val;
4348 	else
4349 		bp->flash_size = bp->flash_info->total_size;
4350 
4351 	return rc;
4352 }
4353 
4354 static int
4355 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4356 		int buf_size)
4357 {
4358 	int rc = 0;
4359 	u32 cmd_flags, offset32, len32, extra;
4360 
4361 	if (buf_size == 0)
4362 		return 0;
4363 
4364 	/* Request access to the flash interface. */
4365 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4366 		return rc;
4367 
4368 	/* Enable access to flash interface */
4369 	bnx2_enable_nvram_access(bp);
4370 
4371 	len32 = buf_size;
4372 	offset32 = offset;
4373 	extra = 0;
4374 
4375 	cmd_flags = 0;
4376 
4377 	if (offset32 & 3) {
4378 		u8 buf[4];
4379 		u32 pre_len;
4380 
4381 		offset32 &= ~3;
4382 		pre_len = 4 - (offset & 3);
4383 
4384 		if (pre_len >= len32) {
4385 			pre_len = len32;
4386 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4387 				    BNX2_NVM_COMMAND_LAST;
4388 		}
4389 		else {
4390 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4391 		}
4392 
4393 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4394 
4395 		if (rc)
4396 			return rc;
4397 
4398 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4399 
4400 		offset32 += 4;
4401 		ret_buf += pre_len;
4402 		len32 -= pre_len;
4403 	}
4404 	if (len32 & 3) {
4405 		extra = 4 - (len32 & 3);
4406 		len32 = (len32 + 4) & ~3;
4407 	}
4408 
4409 	if (len32 == 4) {
4410 		u8 buf[4];
4411 
4412 		if (cmd_flags)
4413 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4414 		else
4415 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4416 				    BNX2_NVM_COMMAND_LAST;
4417 
4418 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4419 
4420 		memcpy(ret_buf, buf, 4 - extra);
4421 	}
4422 	else if (len32 > 0) {
4423 		u8 buf[4];
4424 
4425 		/* Read the first word. */
4426 		if (cmd_flags)
4427 			cmd_flags = 0;
4428 		else
4429 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4430 
4431 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4432 
4433 		/* Advance to the next dword. */
4434 		offset32 += 4;
4435 		ret_buf += 4;
4436 		len32 -= 4;
4437 
4438 		while (len32 > 4 && rc == 0) {
4439 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4440 
4441 			/* Advance to the next dword. */
4442 			offset32 += 4;
4443 			ret_buf += 4;
4444 			len32 -= 4;
4445 		}
4446 
4447 		if (rc)
4448 			return rc;
4449 
4450 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4451 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4452 
4453 		memcpy(ret_buf, buf, 4 - extra);
4454 	}
4455 
4456 	/* Disable access to flash interface */
4457 	bnx2_disable_nvram_access(bp);
4458 
4459 	bnx2_release_nvram_lock(bp);
4460 
4461 	return rc;
4462 }
4463 
4464 static int
4465 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4466 		int buf_size)
4467 {
4468 	u32 written, offset32, len32;
4469 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4470 	int rc = 0;
4471 	int align_start, align_end;
4472 
4473 	buf = data_buf;
4474 	offset32 = offset;
4475 	len32 = buf_size;
4476 	align_start = align_end = 0;
4477 
4478 	if ((align_start = (offset32 & 3))) {
4479 		offset32 &= ~3;
4480 		len32 += align_start;
4481 		if (len32 < 4)
4482 			len32 = 4;
4483 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4484 			return rc;
4485 	}
4486 
4487 	if (len32 & 3) {
4488 		align_end = 4 - (len32 & 3);
4489 		len32 += align_end;
4490 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4491 			return rc;
4492 	}
4493 
4494 	if (align_start || align_end) {
4495 		align_buf = kmalloc(len32, GFP_KERNEL);
4496 		if (align_buf == NULL)
4497 			return -ENOMEM;
4498 		if (align_start) {
4499 			memcpy(align_buf, start, 4);
4500 		}
4501 		if (align_end) {
4502 			memcpy(align_buf + len32 - 4, end, 4);
4503 		}
4504 		memcpy(align_buf + align_start, data_buf, buf_size);
4505 		buf = align_buf;
4506 	}
4507 
4508 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4509 		flash_buffer = kmalloc(264, GFP_KERNEL);
4510 		if (flash_buffer == NULL) {
4511 			rc = -ENOMEM;
4512 			goto nvram_write_end;
4513 		}
4514 	}
4515 
4516 	written = 0;
4517 	while ((written < len32) && (rc == 0)) {
4518 		u32 page_start, page_end, data_start, data_end;
4519 		u32 addr, cmd_flags;
4520 		int i;
4521 
4522 	        /* Find the page_start addr */
4523 		page_start = offset32 + written;
4524 		page_start -= (page_start % bp->flash_info->page_size);
4525 		/* Find the page_end addr */
4526 		page_end = page_start + bp->flash_info->page_size;
4527 		/* Find the data_start addr */
4528 		data_start = (written == 0) ? offset32 : page_start;
4529 		/* Find the data_end addr */
4530 		data_end = (page_end > offset32 + len32) ?
4531 			(offset32 + len32) : page_end;
4532 
4533 		/* Request access to the flash interface. */
4534 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4535 			goto nvram_write_end;
4536 
4537 		/* Enable access to flash interface */
4538 		bnx2_enable_nvram_access(bp);
4539 
4540 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4541 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4542 			int j;
4543 
4544 			/* Read the whole page into the buffer
4545 			 * (non-buffer flash only) */
4546 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4547 				if (j == (bp->flash_info->page_size - 4)) {
4548 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4549 				}
4550 				rc = bnx2_nvram_read_dword(bp,
4551 					page_start + j,
4552 					&flash_buffer[j],
4553 					cmd_flags);
4554 
4555 				if (rc)
4556 					goto nvram_write_end;
4557 
4558 				cmd_flags = 0;
4559 			}
4560 		}
4561 
4562 		/* Enable writes to flash interface (unlock write-protect) */
4563 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4564 			goto nvram_write_end;
4565 
4566 		/* Loop to write back the buffer data from page_start to
4567 		 * data_start */
4568 		i = 0;
4569 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4570 			/* Erase the page */
4571 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4572 				goto nvram_write_end;
4573 
4574 			/* Re-enable the write again for the actual write */
4575 			bnx2_enable_nvram_write(bp);
4576 
4577 			for (addr = page_start; addr < data_start;
4578 				addr += 4, i += 4) {
4579 
4580 				rc = bnx2_nvram_write_dword(bp, addr,
4581 					&flash_buffer[i], cmd_flags);
4582 
4583 				if (rc != 0)
4584 					goto nvram_write_end;
4585 
4586 				cmd_flags = 0;
4587 			}
4588 		}
4589 
4590 		/* Loop to write the new data from data_start to data_end */
4591 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4592 			if ((addr == page_end - 4) ||
4593 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4594 				 (addr == data_end - 4))) {
4595 
4596 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4597 			}
4598 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4599 				cmd_flags);
4600 
4601 			if (rc != 0)
4602 				goto nvram_write_end;
4603 
4604 			cmd_flags = 0;
4605 			buf += 4;
4606 		}
4607 
4608 		/* Loop to write back the buffer data from data_end
4609 		 * to page_end */
4610 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4611 			for (addr = data_end; addr < page_end;
4612 				addr += 4, i += 4) {
4613 
4614 				if (addr == page_end-4) {
4615 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4616                 		}
4617 				rc = bnx2_nvram_write_dword(bp, addr,
4618 					&flash_buffer[i], cmd_flags);
4619 
4620 				if (rc != 0)
4621 					goto nvram_write_end;
4622 
4623 				cmd_flags = 0;
4624 			}
4625 		}
4626 
4627 		/* Disable writes to flash interface (lock write-protect) */
4628 		bnx2_disable_nvram_write(bp);
4629 
4630 		/* Disable access to flash interface */
4631 		bnx2_disable_nvram_access(bp);
4632 		bnx2_release_nvram_lock(bp);
4633 
4634 		/* Increment written */
4635 		written += data_end - data_start;
4636 	}
4637 
4638 nvram_write_end:
4639 	kfree(flash_buffer);
4640 	kfree(align_buf);
4641 	return rc;
4642 }
4643 
4644 static void
4645 bnx2_init_fw_cap(struct bnx2 *bp)
4646 {
4647 	u32 val, sig = 0;
4648 
4649 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4650 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4651 
4652 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4653 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4654 
4655 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4656 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4657 		return;
4658 
4659 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4660 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4661 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4662 	}
4663 
4664 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4665 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4666 		u32 link;
4667 
4668 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4669 
4670 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4671 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4672 			bp->phy_port = PORT_FIBRE;
4673 		else
4674 			bp->phy_port = PORT_TP;
4675 
4676 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4677 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4678 	}
4679 
4680 	if (netif_running(bp->dev) && sig)
4681 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4682 }
4683 
4684 static void
4685 bnx2_setup_msix_tbl(struct bnx2 *bp)
4686 {
4687 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4688 
4689 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4690 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4691 }
4692 
4693 static int
4694 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4695 {
4696 	u32 val;
4697 	int i, rc = 0;
4698 	u8 old_port;
4699 
4700 	/* Wait for the current PCI transaction to complete before
4701 	 * issuing a reset. */
4702 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4703 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4704 		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4705 			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4706 			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4707 			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4708 			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4709 		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4710 		udelay(5);
4711 	} else {  /* 5709 */
4712 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4713 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4714 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4715 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4716 
4717 		for (i = 0; i < 100; i++) {
4718 			msleep(1);
4719 			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4720 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4721 				break;
4722 		}
4723 	}
4724 
4725 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4726 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4727 
4728 	/* Deposit a driver reset signature so the firmware knows that
4729 	 * this is a soft reset. */
4730 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4731 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4732 
4733 	/* Do a dummy read to force the chip to complete all current transaction
4734 	 * before we issue a reset. */
4735 	val = BNX2_RD(bp, BNX2_MISC_ID);
4736 
4737 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4738 		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4739 		BNX2_RD(bp, BNX2_MISC_COMMAND);
4740 		udelay(5);
4741 
4742 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4743 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4744 
4745 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4746 
4747 	} else {
4748 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4749 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4750 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4751 
4752 		/* Chip reset. */
4753 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4754 
4755 		/* Reading back any register after chip reset will hang the
4756 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4757 		 * of margin for write posting.
4758 		 */
4759 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4760 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4761 			msleep(20);
4762 
4763 		/* Reset takes approximate 30 usec */
4764 		for (i = 0; i < 10; i++) {
4765 			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4766 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4767 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4768 				break;
4769 			udelay(10);
4770 		}
4771 
4772 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4773 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4774 			pr_err("Chip reset did not complete\n");
4775 			return -EBUSY;
4776 		}
4777 	}
4778 
4779 	/* Make sure byte swapping is properly configured. */
4780 	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4781 	if (val != 0x01020304) {
4782 		pr_err("Chip not in correct endian mode\n");
4783 		return -ENODEV;
4784 	}
4785 
4786 	/* Wait for the firmware to finish its initialization. */
4787 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4788 	if (rc)
4789 		return rc;
4790 
4791 	spin_lock_bh(&bp->phy_lock);
4792 	old_port = bp->phy_port;
4793 	bnx2_init_fw_cap(bp);
4794 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4795 	    old_port != bp->phy_port)
4796 		bnx2_set_default_remote_link(bp);
4797 	spin_unlock_bh(&bp->phy_lock);
4798 
4799 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4800 		/* Adjust the voltage regular to two steps lower.  The default
4801 		 * of this register is 0x0000000e. */
4802 		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4803 
4804 		/* Remove bad rbuf memory from the free pool. */
4805 		rc = bnx2_alloc_bad_rbuf(bp);
4806 	}
4807 
4808 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4809 		bnx2_setup_msix_tbl(bp);
4810 		/* Prevent MSIX table reads and write from timing out */
4811 		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4812 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4813 	}
4814 
4815 	return rc;
4816 }
4817 
4818 static int
4819 bnx2_init_chip(struct bnx2 *bp)
4820 {
4821 	u32 val, mtu;
4822 	int rc, i;
4823 
4824 	/* Make sure the interrupt is not active. */
4825 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4826 
4827 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4828 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4829 #ifdef __BIG_ENDIAN
4830 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4831 #endif
4832 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4833 	      DMA_READ_CHANS << 12 |
4834 	      DMA_WRITE_CHANS << 16;
4835 
4836 	val |= (0x2 << 20) | (1 << 11);
4837 
4838 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4839 		val |= (1 << 23);
4840 
4841 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4842 	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4843 	    !(bp->flags & BNX2_FLAG_PCIX))
4844 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4845 
4846 	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4847 
4848 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4849 		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4850 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4851 		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4852 	}
4853 
4854 	if (bp->flags & BNX2_FLAG_PCIX) {
4855 		u16 val16;
4856 
4857 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4858 				     &val16);
4859 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4860 				      val16 & ~PCI_X_CMD_ERO);
4861 	}
4862 
4863 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4864 		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4865 		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4866 		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4867 
4868 	/* Initialize context mapping and zero out the quick contexts.  The
4869 	 * context block must have already been enabled. */
4870 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4871 		rc = bnx2_init_5709_context(bp);
4872 		if (rc)
4873 			return rc;
4874 	} else
4875 		bnx2_init_context(bp);
4876 
4877 	if ((rc = bnx2_init_cpus(bp)) != 0)
4878 		return rc;
4879 
4880 	bnx2_init_nvram(bp);
4881 
4882 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4883 
4884 	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4885 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4886 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4887 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4888 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4889 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4890 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4891 	}
4892 
4893 	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4894 
4895 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4896 	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4897 	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4898 
4899 	val = (BNX2_PAGE_BITS - 8) << 24;
4900 	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4901 
4902 	/* Configure page size. */
4903 	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4904 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4905 	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4906 	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4907 
4908 	val = bp->mac_addr[0] +
4909 	      (bp->mac_addr[1] << 8) +
4910 	      (bp->mac_addr[2] << 16) +
4911 	      bp->mac_addr[3] +
4912 	      (bp->mac_addr[4] << 8) +
4913 	      (bp->mac_addr[5] << 16);
4914 	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4915 
4916 	/* Program the MTU.  Also include 4 bytes for CRC32. */
4917 	mtu = bp->dev->mtu;
4918 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4919 	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4920 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4921 	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4922 
4923 	if (mtu < 1500)
4924 		mtu = 1500;
4925 
4926 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4927 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4928 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4929 
4930 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4931 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4932 		bp->bnx2_napi[i].last_status_idx = 0;
4933 
4934 	bp->idle_chk_status_idx = 0xffff;
4935 
4936 	bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4937 
4938 	/* Set up how to generate a link change interrupt. */
4939 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4940 
4941 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4942 		(u64) bp->status_blk_mapping & 0xffffffff);
4943 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4944 
4945 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4946 		(u64) bp->stats_blk_mapping & 0xffffffff);
4947 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4948 		(u64) bp->stats_blk_mapping >> 32);
4949 
4950 	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4951 		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4952 
4953 	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4954 		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4955 
4956 	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4957 		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4958 
4959 	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4960 
4961 	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4962 
4963 	BNX2_WR(bp, BNX2_HC_COM_TICKS,
4964 		(bp->com_ticks_int << 16) | bp->com_ticks);
4965 
4966 	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
4967 		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4968 
4969 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4970 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
4971 	else
4972 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4973 	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4974 
4975 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
4976 		val = BNX2_HC_CONFIG_COLLECT_STATS;
4977 	else {
4978 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4979 		      BNX2_HC_CONFIG_COLLECT_STATS;
4980 	}
4981 
4982 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4983 		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4984 			BNX2_HC_MSIX_BIT_VECTOR_VAL);
4985 
4986 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4987 	}
4988 
4989 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4990 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4991 
4992 	BNX2_WR(bp, BNX2_HC_CONFIG, val);
4993 
4994 	if (bp->rx_ticks < 25)
4995 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
4996 	else
4997 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
4998 
4999 	for (i = 1; i < bp->irq_nvecs; i++) {
5000 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5001 			   BNX2_HC_SB_CONFIG_1;
5002 
5003 		BNX2_WR(bp, base,
5004 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5005 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5006 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5007 
5008 		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5009 			(bp->tx_quick_cons_trip_int << 16) |
5010 			 bp->tx_quick_cons_trip);
5011 
5012 		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5013 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5014 
5015 		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5016 			(bp->rx_quick_cons_trip_int << 16) |
5017 			bp->rx_quick_cons_trip);
5018 
5019 		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5020 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5021 	}
5022 
5023 	/* Clear internal stats counters. */
5024 	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5025 
5026 	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5027 
5028 	/* Initialize the receive filter. */
5029 	bnx2_set_rx_mode(bp->dev);
5030 
5031 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5032 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5033 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5034 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5035 	}
5036 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5037 			  1, 0);
5038 
5039 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5040 	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5041 
5042 	udelay(20);
5043 
5044 	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5045 
5046 	return rc;
5047 }
5048 
5049 static void
5050 bnx2_clear_ring_states(struct bnx2 *bp)
5051 {
5052 	struct bnx2_napi *bnapi;
5053 	struct bnx2_tx_ring_info *txr;
5054 	struct bnx2_rx_ring_info *rxr;
5055 	int i;
5056 
5057 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5058 		bnapi = &bp->bnx2_napi[i];
5059 		txr = &bnapi->tx_ring;
5060 		rxr = &bnapi->rx_ring;
5061 
5062 		txr->tx_cons = 0;
5063 		txr->hw_tx_cons = 0;
5064 		rxr->rx_prod_bseq = 0;
5065 		rxr->rx_prod = 0;
5066 		rxr->rx_cons = 0;
5067 		rxr->rx_pg_prod = 0;
5068 		rxr->rx_pg_cons = 0;
5069 	}
5070 }
5071 
5072 static void
5073 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5074 {
5075 	u32 val, offset0, offset1, offset2, offset3;
5076 	u32 cid_addr = GET_CID_ADDR(cid);
5077 
5078 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5079 		offset0 = BNX2_L2CTX_TYPE_XI;
5080 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5081 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5082 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5083 	} else {
5084 		offset0 = BNX2_L2CTX_TYPE;
5085 		offset1 = BNX2_L2CTX_CMD_TYPE;
5086 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5087 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5088 	}
5089 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5090 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5091 
5092 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5093 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5094 
5095 	val = (u64) txr->tx_desc_mapping >> 32;
5096 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5097 
5098 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5099 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5100 }
5101 
5102 static void
5103 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5104 {
5105 	struct bnx2_tx_bd *txbd;
5106 	u32 cid = TX_CID;
5107 	struct bnx2_napi *bnapi;
5108 	struct bnx2_tx_ring_info *txr;
5109 
5110 	bnapi = &bp->bnx2_napi[ring_num];
5111 	txr = &bnapi->tx_ring;
5112 
5113 	if (ring_num == 0)
5114 		cid = TX_CID;
5115 	else
5116 		cid = TX_TSS_CID + ring_num - 1;
5117 
5118 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5119 
5120 	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5121 
5122 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5123 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5124 
5125 	txr->tx_prod = 0;
5126 	txr->tx_prod_bseq = 0;
5127 
5128 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5129 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5130 
5131 	bnx2_init_tx_context(bp, cid, txr);
5132 }
5133 
5134 static void
5135 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5136 		     u32 buf_size, int num_rings)
5137 {
5138 	int i;
5139 	struct bnx2_rx_bd *rxbd;
5140 
5141 	for (i = 0; i < num_rings; i++) {
5142 		int j;
5143 
5144 		rxbd = &rx_ring[i][0];
5145 		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5146 			rxbd->rx_bd_len = buf_size;
5147 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5148 		}
5149 		if (i == (num_rings - 1))
5150 			j = 0;
5151 		else
5152 			j = i + 1;
5153 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5154 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5155 	}
5156 }
5157 
5158 static void
5159 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5160 {
5161 	int i;
5162 	u16 prod, ring_prod;
5163 	u32 cid, rx_cid_addr, val;
5164 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5165 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5166 
5167 	if (ring_num == 0)
5168 		cid = RX_CID;
5169 	else
5170 		cid = RX_RSS_CID + ring_num - 1;
5171 
5172 	rx_cid_addr = GET_CID_ADDR(cid);
5173 
5174 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5175 			     bp->rx_buf_use_size, bp->rx_max_ring);
5176 
5177 	bnx2_init_rx_context(bp, cid);
5178 
5179 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5180 		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5181 		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5182 	}
5183 
5184 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5185 	if (bp->rx_pg_ring_size) {
5186 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5187 				     rxr->rx_pg_desc_mapping,
5188 				     PAGE_SIZE, bp->rx_max_pg_ring);
5189 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5190 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5191 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5192 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5193 
5194 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5195 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5196 
5197 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5198 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5199 
5200 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5201 			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5202 	}
5203 
5204 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5205 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5206 
5207 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5208 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5209 
5210 	ring_prod = prod = rxr->rx_pg_prod;
5211 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5212 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5213 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5214 				    ring_num, i, bp->rx_pg_ring_size);
5215 			break;
5216 		}
5217 		prod = BNX2_NEXT_RX_BD(prod);
5218 		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5219 	}
5220 	rxr->rx_pg_prod = prod;
5221 
5222 	ring_prod = prod = rxr->rx_prod;
5223 	for (i = 0; i < bp->rx_ring_size; i++) {
5224 		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5225 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5226 				    ring_num, i, bp->rx_ring_size);
5227 			break;
5228 		}
5229 		prod = BNX2_NEXT_RX_BD(prod);
5230 		ring_prod = BNX2_RX_RING_IDX(prod);
5231 	}
5232 	rxr->rx_prod = prod;
5233 
5234 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5235 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5236 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5237 
5238 	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5239 	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5240 
5241 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5242 }
5243 
5244 static void
5245 bnx2_init_all_rings(struct bnx2 *bp)
5246 {
5247 	int i;
5248 	u32 val;
5249 
5250 	bnx2_clear_ring_states(bp);
5251 
5252 	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5253 	for (i = 0; i < bp->num_tx_rings; i++)
5254 		bnx2_init_tx_ring(bp, i);
5255 
5256 	if (bp->num_tx_rings > 1)
5257 		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5258 			(TX_TSS_CID << 7));
5259 
5260 	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5261 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5262 
5263 	for (i = 0; i < bp->num_rx_rings; i++)
5264 		bnx2_init_rx_ring(bp, i);
5265 
5266 	if (bp->num_rx_rings > 1) {
5267 		u32 tbl_32 = 0;
5268 
5269 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5270 			int shift = (i % 8) << 2;
5271 
5272 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5273 			if ((i % 8) == 7) {
5274 				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5275 				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5276 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5277 					BNX2_RLUP_RSS_COMMAND_WRITE |
5278 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5279 				tbl_32 = 0;
5280 			}
5281 		}
5282 
5283 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5284 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5285 
5286 		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5287 
5288 	}
5289 }
5290 
5291 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5292 {
5293 	u32 max, num_rings = 1;
5294 
5295 	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5296 		ring_size -= BNX2_MAX_RX_DESC_CNT;
5297 		num_rings++;
5298 	}
5299 	/* round to next power of 2 */
5300 	max = max_size;
5301 	while ((max & num_rings) == 0)
5302 		max >>= 1;
5303 
5304 	if (num_rings != max)
5305 		max <<= 1;
5306 
5307 	return max;
5308 }
5309 
5310 static void
5311 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5312 {
5313 	u32 rx_size, rx_space, jumbo_size;
5314 
5315 	/* 8 for CRC and VLAN */
5316 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5317 
5318 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5319 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5320 
5321 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5322 	bp->rx_pg_ring_size = 0;
5323 	bp->rx_max_pg_ring = 0;
5324 	bp->rx_max_pg_ring_idx = 0;
5325 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5326 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5327 
5328 		jumbo_size = size * pages;
5329 		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5330 			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5331 
5332 		bp->rx_pg_ring_size = jumbo_size;
5333 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5334 							BNX2_MAX_RX_PG_RINGS);
5335 		bp->rx_max_pg_ring_idx =
5336 			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5337 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5338 		bp->rx_copy_thresh = 0;
5339 	}
5340 
5341 	bp->rx_buf_use_size = rx_size;
5342 	/* hw alignment + build_skb() overhead*/
5343 	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5344 		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5345 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5346 	bp->rx_ring_size = size;
5347 	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5348 	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5349 }
5350 
5351 static void
5352 bnx2_free_tx_skbs(struct bnx2 *bp)
5353 {
5354 	int i;
5355 
5356 	for (i = 0; i < bp->num_tx_rings; i++) {
5357 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5358 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5359 		int j;
5360 
5361 		if (txr->tx_buf_ring == NULL)
5362 			continue;
5363 
5364 		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5365 			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5366 			struct sk_buff *skb = tx_buf->skb;
5367 			int k, last;
5368 
5369 			if (skb == NULL) {
5370 				j = BNX2_NEXT_TX_BD(j);
5371 				continue;
5372 			}
5373 
5374 			dma_unmap_single(&bp->pdev->dev,
5375 					 dma_unmap_addr(tx_buf, mapping),
5376 					 skb_headlen(skb),
5377 					 PCI_DMA_TODEVICE);
5378 
5379 			tx_buf->skb = NULL;
5380 
5381 			last = tx_buf->nr_frags;
5382 			j = BNX2_NEXT_TX_BD(j);
5383 			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5384 				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5385 				dma_unmap_page(&bp->pdev->dev,
5386 					dma_unmap_addr(tx_buf, mapping),
5387 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5388 					PCI_DMA_TODEVICE);
5389 			}
5390 			dev_kfree_skb(skb);
5391 		}
5392 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5393 	}
5394 }
5395 
5396 static void
5397 bnx2_free_rx_skbs(struct bnx2 *bp)
5398 {
5399 	int i;
5400 
5401 	for (i = 0; i < bp->num_rx_rings; i++) {
5402 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5403 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5404 		int j;
5405 
5406 		if (rxr->rx_buf_ring == NULL)
5407 			return;
5408 
5409 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5410 			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5411 			u8 *data = rx_buf->data;
5412 
5413 			if (data == NULL)
5414 				continue;
5415 
5416 			dma_unmap_single(&bp->pdev->dev,
5417 					 dma_unmap_addr(rx_buf, mapping),
5418 					 bp->rx_buf_use_size,
5419 					 PCI_DMA_FROMDEVICE);
5420 
5421 			rx_buf->data = NULL;
5422 
5423 			kfree(data);
5424 		}
5425 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5426 			bnx2_free_rx_page(bp, rxr, j);
5427 	}
5428 }
5429 
5430 static void
5431 bnx2_free_skbs(struct bnx2 *bp)
5432 {
5433 	bnx2_free_tx_skbs(bp);
5434 	bnx2_free_rx_skbs(bp);
5435 }
5436 
5437 static int
5438 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5439 {
5440 	int rc;
5441 
5442 	rc = bnx2_reset_chip(bp, reset_code);
5443 	bnx2_free_skbs(bp);
5444 	if (rc)
5445 		return rc;
5446 
5447 	if ((rc = bnx2_init_chip(bp)) != 0)
5448 		return rc;
5449 
5450 	bnx2_init_all_rings(bp);
5451 	return 0;
5452 }
5453 
5454 static int
5455 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5456 {
5457 	int rc;
5458 
5459 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5460 		return rc;
5461 
5462 	spin_lock_bh(&bp->phy_lock);
5463 	bnx2_init_phy(bp, reset_phy);
5464 	bnx2_set_link(bp);
5465 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5466 		bnx2_remote_phy_event(bp);
5467 	spin_unlock_bh(&bp->phy_lock);
5468 	return 0;
5469 }
5470 
5471 static int
5472 bnx2_shutdown_chip(struct bnx2 *bp)
5473 {
5474 	u32 reset_code;
5475 
5476 	if (bp->flags & BNX2_FLAG_NO_WOL)
5477 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5478 	else if (bp->wol)
5479 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5480 	else
5481 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5482 
5483 	return bnx2_reset_chip(bp, reset_code);
5484 }
5485 
5486 static int
5487 bnx2_test_registers(struct bnx2 *bp)
5488 {
5489 	int ret;
5490 	int i, is_5709;
5491 	static const struct {
5492 		u16   offset;
5493 		u16   flags;
5494 #define BNX2_FL_NOT_5709	1
5495 		u32   rw_mask;
5496 		u32   ro_mask;
5497 	} reg_tbl[] = {
5498 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5499 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5500 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5501 
5502 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5503 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5504 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5505 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5506 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5507 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5508 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5509 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5510 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5511 
5512 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5513 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5514 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5515 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5516 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5517 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5518 
5519 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5520 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5521 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5522 
5523 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5524 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5525 
5526 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5527 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5528 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5529 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5530 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5531 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5532 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5533 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5534 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5535 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5536 
5537 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5538 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5539 
5540 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5541 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5542 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5543 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5544 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5545 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5546 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5547 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5548 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5549 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5550 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5551 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5552 
5553 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5554 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5555 
5556 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5557 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5558 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5559 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5560 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5561 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5562 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5563 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5564 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5565 
5566 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5567 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5568 
5569 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5570 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5571 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5572 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5573 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5574 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5575 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5576 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5577 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5578 
5579 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5580 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5581 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5582 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5583 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5584 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5585 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5586 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5587 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5588 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5589 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5590 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5591 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5592 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5593 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5594 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5595 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5596 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5597 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5598 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5599 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5600 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5601 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5602 
5603 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5604 	};
5605 
5606 	ret = 0;
5607 	is_5709 = 0;
5608 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5609 		is_5709 = 1;
5610 
5611 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5612 		u32 offset, rw_mask, ro_mask, save_val, val;
5613 		u16 flags = reg_tbl[i].flags;
5614 
5615 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5616 			continue;
5617 
5618 		offset = (u32) reg_tbl[i].offset;
5619 		rw_mask = reg_tbl[i].rw_mask;
5620 		ro_mask = reg_tbl[i].ro_mask;
5621 
5622 		save_val = readl(bp->regview + offset);
5623 
5624 		writel(0, bp->regview + offset);
5625 
5626 		val = readl(bp->regview + offset);
5627 		if ((val & rw_mask) != 0) {
5628 			goto reg_test_err;
5629 		}
5630 
5631 		if ((val & ro_mask) != (save_val & ro_mask)) {
5632 			goto reg_test_err;
5633 		}
5634 
5635 		writel(0xffffffff, bp->regview + offset);
5636 
5637 		val = readl(bp->regview + offset);
5638 		if ((val & rw_mask) != rw_mask) {
5639 			goto reg_test_err;
5640 		}
5641 
5642 		if ((val & ro_mask) != (save_val & ro_mask)) {
5643 			goto reg_test_err;
5644 		}
5645 
5646 		writel(save_val, bp->regview + offset);
5647 		continue;
5648 
5649 reg_test_err:
5650 		writel(save_val, bp->regview + offset);
5651 		ret = -ENODEV;
5652 		break;
5653 	}
5654 	return ret;
5655 }
5656 
5657 static int
5658 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5659 {
5660 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5661 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5662 	int i;
5663 
5664 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5665 		u32 offset;
5666 
5667 		for (offset = 0; offset < size; offset += 4) {
5668 
5669 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5670 
5671 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5672 				test_pattern[i]) {
5673 				return -ENODEV;
5674 			}
5675 		}
5676 	}
5677 	return 0;
5678 }
5679 
5680 static int
5681 bnx2_test_memory(struct bnx2 *bp)
5682 {
5683 	int ret = 0;
5684 	int i;
5685 	static struct mem_entry {
5686 		u32   offset;
5687 		u32   len;
5688 	} mem_tbl_5706[] = {
5689 		{ 0x60000,  0x4000 },
5690 		{ 0xa0000,  0x3000 },
5691 		{ 0xe0000,  0x4000 },
5692 		{ 0x120000, 0x4000 },
5693 		{ 0x1a0000, 0x4000 },
5694 		{ 0x160000, 0x4000 },
5695 		{ 0xffffffff, 0    },
5696 	},
5697 	mem_tbl_5709[] = {
5698 		{ 0x60000,  0x4000 },
5699 		{ 0xa0000,  0x3000 },
5700 		{ 0xe0000,  0x4000 },
5701 		{ 0x120000, 0x4000 },
5702 		{ 0x1a0000, 0x4000 },
5703 		{ 0xffffffff, 0    },
5704 	};
5705 	struct mem_entry *mem_tbl;
5706 
5707 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5708 		mem_tbl = mem_tbl_5709;
5709 	else
5710 		mem_tbl = mem_tbl_5706;
5711 
5712 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5713 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5714 			mem_tbl[i].len)) != 0) {
5715 			return ret;
5716 		}
5717 	}
5718 
5719 	return ret;
5720 }
5721 
5722 #define BNX2_MAC_LOOPBACK	0
5723 #define BNX2_PHY_LOOPBACK	1
5724 
5725 static int
5726 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5727 {
5728 	unsigned int pkt_size, num_pkts, i;
5729 	struct sk_buff *skb;
5730 	u8 *data;
5731 	unsigned char *packet;
5732 	u16 rx_start_idx, rx_idx;
5733 	dma_addr_t map;
5734 	struct bnx2_tx_bd *txbd;
5735 	struct bnx2_sw_bd *rx_buf;
5736 	struct l2_fhdr *rx_hdr;
5737 	int ret = -ENODEV;
5738 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5739 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5740 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5741 
5742 	tx_napi = bnapi;
5743 
5744 	txr = &tx_napi->tx_ring;
5745 	rxr = &bnapi->rx_ring;
5746 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5747 		bp->loopback = MAC_LOOPBACK;
5748 		bnx2_set_mac_loopback(bp);
5749 	}
5750 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5751 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5752 			return 0;
5753 
5754 		bp->loopback = PHY_LOOPBACK;
5755 		bnx2_set_phy_loopback(bp);
5756 	}
5757 	else
5758 		return -EINVAL;
5759 
5760 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5761 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5762 	if (!skb)
5763 		return -ENOMEM;
5764 	packet = skb_put(skb, pkt_size);
5765 	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5766 	memset(packet + ETH_ALEN, 0x0, 8);
5767 	for (i = 14; i < pkt_size; i++)
5768 		packet[i] = (unsigned char) (i & 0xff);
5769 
5770 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5771 			     PCI_DMA_TODEVICE);
5772 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5773 		dev_kfree_skb(skb);
5774 		return -EIO;
5775 	}
5776 
5777 	BNX2_WR(bp, BNX2_HC_COMMAND,
5778 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5779 
5780 	BNX2_RD(bp, BNX2_HC_COMMAND);
5781 
5782 	udelay(5);
5783 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5784 
5785 	num_pkts = 0;
5786 
5787 	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5788 
5789 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5790 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5791 	txbd->tx_bd_mss_nbytes = pkt_size;
5792 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5793 
5794 	num_pkts++;
5795 	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5796 	txr->tx_prod_bseq += pkt_size;
5797 
5798 	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5799 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5800 
5801 	udelay(100);
5802 
5803 	BNX2_WR(bp, BNX2_HC_COMMAND,
5804 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5805 
5806 	BNX2_RD(bp, BNX2_HC_COMMAND);
5807 
5808 	udelay(5);
5809 
5810 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5811 	dev_kfree_skb(skb);
5812 
5813 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5814 		goto loopback_test_done;
5815 
5816 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5817 	if (rx_idx != rx_start_idx + num_pkts) {
5818 		goto loopback_test_done;
5819 	}
5820 
5821 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5822 	data = rx_buf->data;
5823 
5824 	rx_hdr = get_l2_fhdr(data);
5825 	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5826 
5827 	dma_sync_single_for_cpu(&bp->pdev->dev,
5828 		dma_unmap_addr(rx_buf, mapping),
5829 		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5830 
5831 	if (rx_hdr->l2_fhdr_status &
5832 		(L2_FHDR_ERRORS_BAD_CRC |
5833 		L2_FHDR_ERRORS_PHY_DECODE |
5834 		L2_FHDR_ERRORS_ALIGNMENT |
5835 		L2_FHDR_ERRORS_TOO_SHORT |
5836 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5837 
5838 		goto loopback_test_done;
5839 	}
5840 
5841 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5842 		goto loopback_test_done;
5843 	}
5844 
5845 	for (i = 14; i < pkt_size; i++) {
5846 		if (*(data + i) != (unsigned char) (i & 0xff)) {
5847 			goto loopback_test_done;
5848 		}
5849 	}
5850 
5851 	ret = 0;
5852 
5853 loopback_test_done:
5854 	bp->loopback = 0;
5855 	return ret;
5856 }
5857 
5858 #define BNX2_MAC_LOOPBACK_FAILED	1
5859 #define BNX2_PHY_LOOPBACK_FAILED	2
5860 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5861 					 BNX2_PHY_LOOPBACK_FAILED)
5862 
5863 static int
5864 bnx2_test_loopback(struct bnx2 *bp)
5865 {
5866 	int rc = 0;
5867 
5868 	if (!netif_running(bp->dev))
5869 		return BNX2_LOOPBACK_FAILED;
5870 
5871 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5872 	spin_lock_bh(&bp->phy_lock);
5873 	bnx2_init_phy(bp, 1);
5874 	spin_unlock_bh(&bp->phy_lock);
5875 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5876 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5877 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5878 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5879 	return rc;
5880 }
5881 
5882 #define NVRAM_SIZE 0x200
5883 #define CRC32_RESIDUAL 0xdebb20e3
5884 
5885 static int
5886 bnx2_test_nvram(struct bnx2 *bp)
5887 {
5888 	__be32 buf[NVRAM_SIZE / 4];
5889 	u8 *data = (u8 *) buf;
5890 	int rc = 0;
5891 	u32 magic, csum;
5892 
5893 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5894 		goto test_nvram_done;
5895 
5896         magic = be32_to_cpu(buf[0]);
5897 	if (magic != 0x669955aa) {
5898 		rc = -ENODEV;
5899 		goto test_nvram_done;
5900 	}
5901 
5902 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5903 		goto test_nvram_done;
5904 
5905 	csum = ether_crc_le(0x100, data);
5906 	if (csum != CRC32_RESIDUAL) {
5907 		rc = -ENODEV;
5908 		goto test_nvram_done;
5909 	}
5910 
5911 	csum = ether_crc_le(0x100, data + 0x100);
5912 	if (csum != CRC32_RESIDUAL) {
5913 		rc = -ENODEV;
5914 	}
5915 
5916 test_nvram_done:
5917 	return rc;
5918 }
5919 
5920 static int
5921 bnx2_test_link(struct bnx2 *bp)
5922 {
5923 	u32 bmsr;
5924 
5925 	if (!netif_running(bp->dev))
5926 		return -ENODEV;
5927 
5928 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5929 		if (bp->link_up)
5930 			return 0;
5931 		return -ENODEV;
5932 	}
5933 	spin_lock_bh(&bp->phy_lock);
5934 	bnx2_enable_bmsr1(bp);
5935 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5936 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5937 	bnx2_disable_bmsr1(bp);
5938 	spin_unlock_bh(&bp->phy_lock);
5939 
5940 	if (bmsr & BMSR_LSTATUS) {
5941 		return 0;
5942 	}
5943 	return -ENODEV;
5944 }
5945 
5946 static int
5947 bnx2_test_intr(struct bnx2 *bp)
5948 {
5949 	int i;
5950 	u16 status_idx;
5951 
5952 	if (!netif_running(bp->dev))
5953 		return -ENODEV;
5954 
5955 	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5956 
5957 	/* This register is not touched during run-time. */
5958 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5959 	BNX2_RD(bp, BNX2_HC_COMMAND);
5960 
5961 	for (i = 0; i < 10; i++) {
5962 		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5963 			status_idx) {
5964 
5965 			break;
5966 		}
5967 
5968 		msleep_interruptible(10);
5969 	}
5970 	if (i < 10)
5971 		return 0;
5972 
5973 	return -ENODEV;
5974 }
5975 
5976 /* Determining link for parallel detection. */
5977 static int
5978 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5979 {
5980 	u32 mode_ctl, an_dbg, exp;
5981 
5982 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5983 		return 0;
5984 
5985 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5986 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5987 
5988 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5989 		return 0;
5990 
5991 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5992 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5993 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5994 
5995 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5996 		return 0;
5997 
5998 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5999 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6000 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6001 
6002 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6003 		return 0;
6004 
6005 	return 1;
6006 }
6007 
6008 static void
6009 bnx2_5706_serdes_timer(struct bnx2 *bp)
6010 {
6011 	int check_link = 1;
6012 
6013 	spin_lock(&bp->phy_lock);
6014 	if (bp->serdes_an_pending) {
6015 		bp->serdes_an_pending--;
6016 		check_link = 0;
6017 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6018 		u32 bmcr;
6019 
6020 		bp->current_interval = BNX2_TIMER_INTERVAL;
6021 
6022 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6023 
6024 		if (bmcr & BMCR_ANENABLE) {
6025 			if (bnx2_5706_serdes_has_link(bp)) {
6026 				bmcr &= ~BMCR_ANENABLE;
6027 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6028 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6029 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6030 			}
6031 		}
6032 	}
6033 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6034 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6035 		u32 phy2;
6036 
6037 		bnx2_write_phy(bp, 0x17, 0x0f01);
6038 		bnx2_read_phy(bp, 0x15, &phy2);
6039 		if (phy2 & 0x20) {
6040 			u32 bmcr;
6041 
6042 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6043 			bmcr |= BMCR_ANENABLE;
6044 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6045 
6046 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6047 		}
6048 	} else
6049 		bp->current_interval = BNX2_TIMER_INTERVAL;
6050 
6051 	if (check_link) {
6052 		u32 val;
6053 
6054 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6055 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6056 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6057 
6058 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6059 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6060 				bnx2_5706s_force_link_dn(bp, 1);
6061 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6062 			} else
6063 				bnx2_set_link(bp);
6064 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6065 			bnx2_set_link(bp);
6066 	}
6067 	spin_unlock(&bp->phy_lock);
6068 }
6069 
6070 static void
6071 bnx2_5708_serdes_timer(struct bnx2 *bp)
6072 {
6073 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6074 		return;
6075 
6076 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6077 		bp->serdes_an_pending = 0;
6078 		return;
6079 	}
6080 
6081 	spin_lock(&bp->phy_lock);
6082 	if (bp->serdes_an_pending)
6083 		bp->serdes_an_pending--;
6084 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6085 		u32 bmcr;
6086 
6087 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6088 		if (bmcr & BMCR_ANENABLE) {
6089 			bnx2_enable_forced_2g5(bp);
6090 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6091 		} else {
6092 			bnx2_disable_forced_2g5(bp);
6093 			bp->serdes_an_pending = 2;
6094 			bp->current_interval = BNX2_TIMER_INTERVAL;
6095 		}
6096 
6097 	} else
6098 		bp->current_interval = BNX2_TIMER_INTERVAL;
6099 
6100 	spin_unlock(&bp->phy_lock);
6101 }
6102 
6103 static void
6104 bnx2_timer(unsigned long data)
6105 {
6106 	struct bnx2 *bp = (struct bnx2 *) data;
6107 
6108 	if (!netif_running(bp->dev))
6109 		return;
6110 
6111 	if (atomic_read(&bp->intr_sem) != 0)
6112 		goto bnx2_restart_timer;
6113 
6114 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6115 	     BNX2_FLAG_USING_MSI)
6116 		bnx2_chk_missed_msi(bp);
6117 
6118 	bnx2_send_heart_beat(bp);
6119 
6120 	bp->stats_blk->stat_FwRxDrop =
6121 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6122 
6123 	/* workaround occasional corrupted counters */
6124 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6125 		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6126 			BNX2_HC_COMMAND_STATS_NOW);
6127 
6128 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6129 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6130 			bnx2_5706_serdes_timer(bp);
6131 		else
6132 			bnx2_5708_serdes_timer(bp);
6133 	}
6134 
6135 bnx2_restart_timer:
6136 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6137 }
6138 
6139 static int
6140 bnx2_request_irq(struct bnx2 *bp)
6141 {
6142 	unsigned long flags;
6143 	struct bnx2_irq *irq;
6144 	int rc = 0, i;
6145 
6146 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6147 		flags = 0;
6148 	else
6149 		flags = IRQF_SHARED;
6150 
6151 	for (i = 0; i < bp->irq_nvecs; i++) {
6152 		irq = &bp->irq_tbl[i];
6153 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6154 				 &bp->bnx2_napi[i]);
6155 		if (rc)
6156 			break;
6157 		irq->requested = 1;
6158 	}
6159 	return rc;
6160 }
6161 
6162 static void
6163 __bnx2_free_irq(struct bnx2 *bp)
6164 {
6165 	struct bnx2_irq *irq;
6166 	int i;
6167 
6168 	for (i = 0; i < bp->irq_nvecs; i++) {
6169 		irq = &bp->irq_tbl[i];
6170 		if (irq->requested)
6171 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6172 		irq->requested = 0;
6173 	}
6174 }
6175 
6176 static void
6177 bnx2_free_irq(struct bnx2 *bp)
6178 {
6179 
6180 	__bnx2_free_irq(bp);
6181 	if (bp->flags & BNX2_FLAG_USING_MSI)
6182 		pci_disable_msi(bp->pdev);
6183 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6184 		pci_disable_msix(bp->pdev);
6185 
6186 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6187 }
6188 
6189 static void
6190 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6191 {
6192 	int i, total_vecs, rc;
6193 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6194 	struct net_device *dev = bp->dev;
6195 	const int len = sizeof(bp->irq_tbl[0].name);
6196 
6197 	bnx2_setup_msix_tbl(bp);
6198 	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6199 	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6200 	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6201 
6202 	/*  Need to flush the previous three writes to ensure MSI-X
6203 	 *  is setup properly */
6204 	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6205 
6206 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6207 		msix_ent[i].entry = i;
6208 		msix_ent[i].vector = 0;
6209 	}
6210 
6211 	total_vecs = msix_vecs;
6212 #ifdef BCM_CNIC
6213 	total_vecs++;
6214 #endif
6215 	rc = -ENOSPC;
6216 	while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6217 		rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6218 		if (rc <= 0)
6219 			break;
6220 		if (rc > 0)
6221 			total_vecs = rc;
6222 	}
6223 
6224 	if (rc != 0)
6225 		return;
6226 
6227 	msix_vecs = total_vecs;
6228 #ifdef BCM_CNIC
6229 	msix_vecs--;
6230 #endif
6231 	bp->irq_nvecs = msix_vecs;
6232 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6233 	for (i = 0; i < total_vecs; i++) {
6234 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6235 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6236 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6237 	}
6238 }
6239 
6240 static int
6241 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6242 {
6243 	int cpus = netif_get_num_default_rss_queues();
6244 	int msix_vecs;
6245 
6246 	if (!bp->num_req_rx_rings)
6247 		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6248 	else if (!bp->num_req_tx_rings)
6249 		msix_vecs = max(cpus, bp->num_req_rx_rings);
6250 	else
6251 		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6252 
6253 	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6254 
6255 	bp->irq_tbl[0].handler = bnx2_interrupt;
6256 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6257 	bp->irq_nvecs = 1;
6258 	bp->irq_tbl[0].vector = bp->pdev->irq;
6259 
6260 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6261 		bnx2_enable_msix(bp, msix_vecs);
6262 
6263 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6264 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6265 		if (pci_enable_msi(bp->pdev) == 0) {
6266 			bp->flags |= BNX2_FLAG_USING_MSI;
6267 			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6268 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6269 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6270 			} else
6271 				bp->irq_tbl[0].handler = bnx2_msi;
6272 
6273 			bp->irq_tbl[0].vector = bp->pdev->irq;
6274 		}
6275 	}
6276 
6277 	if (!bp->num_req_tx_rings)
6278 		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6279 	else
6280 		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6281 
6282 	if (!bp->num_req_rx_rings)
6283 		bp->num_rx_rings = bp->irq_nvecs;
6284 	else
6285 		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6286 
6287 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6288 
6289 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6290 }
6291 
6292 /* Called with rtnl_lock */
6293 static int
6294 bnx2_open(struct net_device *dev)
6295 {
6296 	struct bnx2 *bp = netdev_priv(dev);
6297 	int rc;
6298 
6299 	rc = bnx2_request_firmware(bp);
6300 	if (rc < 0)
6301 		goto out;
6302 
6303 	netif_carrier_off(dev);
6304 
6305 	bnx2_disable_int(bp);
6306 
6307 	rc = bnx2_setup_int_mode(bp, disable_msi);
6308 	if (rc)
6309 		goto open_err;
6310 	bnx2_init_napi(bp);
6311 	bnx2_napi_enable(bp);
6312 	rc = bnx2_alloc_mem(bp);
6313 	if (rc)
6314 		goto open_err;
6315 
6316 	rc = bnx2_request_irq(bp);
6317 	if (rc)
6318 		goto open_err;
6319 
6320 	rc = bnx2_init_nic(bp, 1);
6321 	if (rc)
6322 		goto open_err;
6323 
6324 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6325 
6326 	atomic_set(&bp->intr_sem, 0);
6327 
6328 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6329 
6330 	bnx2_enable_int(bp);
6331 
6332 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6333 		/* Test MSI to make sure it is working
6334 		 * If MSI test fails, go back to INTx mode
6335 		 */
6336 		if (bnx2_test_intr(bp) != 0) {
6337 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6338 
6339 			bnx2_disable_int(bp);
6340 			bnx2_free_irq(bp);
6341 
6342 			bnx2_setup_int_mode(bp, 1);
6343 
6344 			rc = bnx2_init_nic(bp, 0);
6345 
6346 			if (!rc)
6347 				rc = bnx2_request_irq(bp);
6348 
6349 			if (rc) {
6350 				del_timer_sync(&bp->timer);
6351 				goto open_err;
6352 			}
6353 			bnx2_enable_int(bp);
6354 		}
6355 	}
6356 	if (bp->flags & BNX2_FLAG_USING_MSI)
6357 		netdev_info(dev, "using MSI\n");
6358 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6359 		netdev_info(dev, "using MSIX\n");
6360 
6361 	netif_tx_start_all_queues(dev);
6362 out:
6363 	return rc;
6364 
6365 open_err:
6366 	bnx2_napi_disable(bp);
6367 	bnx2_free_skbs(bp);
6368 	bnx2_free_irq(bp);
6369 	bnx2_free_mem(bp);
6370 	bnx2_del_napi(bp);
6371 	bnx2_release_firmware(bp);
6372 	goto out;
6373 }
6374 
6375 static void
6376 bnx2_reset_task(struct work_struct *work)
6377 {
6378 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6379 	int rc;
6380 	u16 pcicmd;
6381 
6382 	rtnl_lock();
6383 	if (!netif_running(bp->dev)) {
6384 		rtnl_unlock();
6385 		return;
6386 	}
6387 
6388 	bnx2_netif_stop(bp, true);
6389 
6390 	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6391 	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6392 		/* in case PCI block has reset */
6393 		pci_restore_state(bp->pdev);
6394 		pci_save_state(bp->pdev);
6395 	}
6396 	rc = bnx2_init_nic(bp, 1);
6397 	if (rc) {
6398 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6399 		bnx2_napi_enable(bp);
6400 		dev_close(bp->dev);
6401 		rtnl_unlock();
6402 		return;
6403 	}
6404 
6405 	atomic_set(&bp->intr_sem, 1);
6406 	bnx2_netif_start(bp, true);
6407 	rtnl_unlock();
6408 }
6409 
6410 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6411 
6412 static void
6413 bnx2_dump_ftq(struct bnx2 *bp)
6414 {
6415 	int i;
6416 	u32 reg, bdidx, cid, valid;
6417 	struct net_device *dev = bp->dev;
6418 	static const struct ftq_reg {
6419 		char *name;
6420 		u32 off;
6421 	} ftq_arr[] = {
6422 		BNX2_FTQ_ENTRY(RV2P_P),
6423 		BNX2_FTQ_ENTRY(RV2P_T),
6424 		BNX2_FTQ_ENTRY(RV2P_M),
6425 		BNX2_FTQ_ENTRY(TBDR_),
6426 		BNX2_FTQ_ENTRY(TDMA_),
6427 		BNX2_FTQ_ENTRY(TXP_),
6428 		BNX2_FTQ_ENTRY(TXP_),
6429 		BNX2_FTQ_ENTRY(TPAT_),
6430 		BNX2_FTQ_ENTRY(RXP_C),
6431 		BNX2_FTQ_ENTRY(RXP_),
6432 		BNX2_FTQ_ENTRY(COM_COMXQ_),
6433 		BNX2_FTQ_ENTRY(COM_COMTQ_),
6434 		BNX2_FTQ_ENTRY(COM_COMQ_),
6435 		BNX2_FTQ_ENTRY(CP_CPQ_),
6436 	};
6437 
6438 	netdev_err(dev, "<--- start FTQ dump --->\n");
6439 	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6440 		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6441 			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6442 
6443 	netdev_err(dev, "CPU states:\n");
6444 	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6445 		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6446 			   reg, bnx2_reg_rd_ind(bp, reg),
6447 			   bnx2_reg_rd_ind(bp, reg + 4),
6448 			   bnx2_reg_rd_ind(bp, reg + 8),
6449 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6450 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6451 			   bnx2_reg_rd_ind(bp, reg + 0x20));
6452 
6453 	netdev_err(dev, "<--- end FTQ dump --->\n");
6454 	netdev_err(dev, "<--- start TBDC dump --->\n");
6455 	netdev_err(dev, "TBDC free cnt: %ld\n",
6456 		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6457 	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6458 	for (i = 0; i < 0x20; i++) {
6459 		int j = 0;
6460 
6461 		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6462 		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6463 			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6464 		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6465 		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6466 			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6467 			j++;
6468 
6469 		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6470 		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6471 		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6472 		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6473 			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6474 			   bdidx >> 24, (valid >> 8) & 0x0ff);
6475 	}
6476 	netdev_err(dev, "<--- end TBDC dump --->\n");
6477 }
6478 
6479 static void
6480 bnx2_dump_state(struct bnx2 *bp)
6481 {
6482 	struct net_device *dev = bp->dev;
6483 	u32 val1, val2;
6484 
6485 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6486 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6487 		   atomic_read(&bp->intr_sem), val1);
6488 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6489 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6490 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6491 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6492 		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6493 		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6494 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6495 		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6496 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6497 		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6498 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6499 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6500 			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6501 }
6502 
6503 static void
6504 bnx2_tx_timeout(struct net_device *dev)
6505 {
6506 	struct bnx2 *bp = netdev_priv(dev);
6507 
6508 	bnx2_dump_ftq(bp);
6509 	bnx2_dump_state(bp);
6510 	bnx2_dump_mcp_state(bp);
6511 
6512 	/* This allows the netif to be shutdown gracefully before resetting */
6513 	schedule_work(&bp->reset_task);
6514 }
6515 
6516 /* Called with netif_tx_lock.
6517  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6518  * netif_wake_queue().
6519  */
6520 static netdev_tx_t
6521 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6522 {
6523 	struct bnx2 *bp = netdev_priv(dev);
6524 	dma_addr_t mapping;
6525 	struct bnx2_tx_bd *txbd;
6526 	struct bnx2_sw_tx_bd *tx_buf;
6527 	u32 len, vlan_tag_flags, last_frag, mss;
6528 	u16 prod, ring_prod;
6529 	int i;
6530 	struct bnx2_napi *bnapi;
6531 	struct bnx2_tx_ring_info *txr;
6532 	struct netdev_queue *txq;
6533 
6534 	/*  Determine which tx ring we will be placed on */
6535 	i = skb_get_queue_mapping(skb);
6536 	bnapi = &bp->bnx2_napi[i];
6537 	txr = &bnapi->tx_ring;
6538 	txq = netdev_get_tx_queue(dev, i);
6539 
6540 	if (unlikely(bnx2_tx_avail(bp, txr) <
6541 	    (skb_shinfo(skb)->nr_frags + 1))) {
6542 		netif_tx_stop_queue(txq);
6543 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6544 
6545 		return NETDEV_TX_BUSY;
6546 	}
6547 	len = skb_headlen(skb);
6548 	prod = txr->tx_prod;
6549 	ring_prod = BNX2_TX_RING_IDX(prod);
6550 
6551 	vlan_tag_flags = 0;
6552 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6553 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6554 	}
6555 
6556 	if (vlan_tx_tag_present(skb)) {
6557 		vlan_tag_flags |=
6558 			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6559 	}
6560 
6561 	if ((mss = skb_shinfo(skb)->gso_size)) {
6562 		u32 tcp_opt_len;
6563 		struct iphdr *iph;
6564 
6565 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6566 
6567 		tcp_opt_len = tcp_optlen(skb);
6568 
6569 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6570 			u32 tcp_off = skb_transport_offset(skb) -
6571 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6572 
6573 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6574 					  TX_BD_FLAGS_SW_FLAGS;
6575 			if (likely(tcp_off == 0))
6576 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6577 			else {
6578 				tcp_off >>= 3;
6579 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6580 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6581 						  ((tcp_off & 0x10) <<
6582 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6583 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6584 			}
6585 		} else {
6586 			iph = ip_hdr(skb);
6587 			if (tcp_opt_len || (iph->ihl > 5)) {
6588 				vlan_tag_flags |= ((iph->ihl - 5) +
6589 						   (tcp_opt_len >> 2)) << 8;
6590 			}
6591 		}
6592 	} else
6593 		mss = 0;
6594 
6595 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6596 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6597 		dev_kfree_skb(skb);
6598 		return NETDEV_TX_OK;
6599 	}
6600 
6601 	tx_buf = &txr->tx_buf_ring[ring_prod];
6602 	tx_buf->skb = skb;
6603 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6604 
6605 	txbd = &txr->tx_desc_ring[ring_prod];
6606 
6607 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6608 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6609 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6610 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6611 
6612 	last_frag = skb_shinfo(skb)->nr_frags;
6613 	tx_buf->nr_frags = last_frag;
6614 	tx_buf->is_gso = skb_is_gso(skb);
6615 
6616 	for (i = 0; i < last_frag; i++) {
6617 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6618 
6619 		prod = BNX2_NEXT_TX_BD(prod);
6620 		ring_prod = BNX2_TX_RING_IDX(prod);
6621 		txbd = &txr->tx_desc_ring[ring_prod];
6622 
6623 		len = skb_frag_size(frag);
6624 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6625 					   DMA_TO_DEVICE);
6626 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6627 			goto dma_error;
6628 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6629 				   mapping);
6630 
6631 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6632 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6633 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6634 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6635 
6636 	}
6637 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6638 
6639 	/* Sync BD data before updating TX mailbox */
6640 	wmb();
6641 
6642 	netdev_tx_sent_queue(txq, skb->len);
6643 
6644 	prod = BNX2_NEXT_TX_BD(prod);
6645 	txr->tx_prod_bseq += skb->len;
6646 
6647 	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6648 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6649 
6650 	mmiowb();
6651 
6652 	txr->tx_prod = prod;
6653 
6654 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6655 		netif_tx_stop_queue(txq);
6656 
6657 		/* netif_tx_stop_queue() must be done before checking
6658 		 * tx index in bnx2_tx_avail() below, because in
6659 		 * bnx2_tx_int(), we update tx index before checking for
6660 		 * netif_tx_queue_stopped().
6661 		 */
6662 		smp_mb();
6663 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6664 			netif_tx_wake_queue(txq);
6665 	}
6666 
6667 	return NETDEV_TX_OK;
6668 dma_error:
6669 	/* save value of frag that failed */
6670 	last_frag = i;
6671 
6672 	/* start back at beginning and unmap skb */
6673 	prod = txr->tx_prod;
6674 	ring_prod = BNX2_TX_RING_IDX(prod);
6675 	tx_buf = &txr->tx_buf_ring[ring_prod];
6676 	tx_buf->skb = NULL;
6677 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6678 			 skb_headlen(skb), PCI_DMA_TODEVICE);
6679 
6680 	/* unmap remaining mapped pages */
6681 	for (i = 0; i < last_frag; i++) {
6682 		prod = BNX2_NEXT_TX_BD(prod);
6683 		ring_prod = BNX2_TX_RING_IDX(prod);
6684 		tx_buf = &txr->tx_buf_ring[ring_prod];
6685 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6686 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6687 			       PCI_DMA_TODEVICE);
6688 	}
6689 
6690 	dev_kfree_skb(skb);
6691 	return NETDEV_TX_OK;
6692 }
6693 
6694 /* Called with rtnl_lock */
6695 static int
6696 bnx2_close(struct net_device *dev)
6697 {
6698 	struct bnx2 *bp = netdev_priv(dev);
6699 
6700 	bnx2_disable_int_sync(bp);
6701 	bnx2_napi_disable(bp);
6702 	netif_tx_disable(dev);
6703 	del_timer_sync(&bp->timer);
6704 	bnx2_shutdown_chip(bp);
6705 	bnx2_free_irq(bp);
6706 	bnx2_free_skbs(bp);
6707 	bnx2_free_mem(bp);
6708 	bnx2_del_napi(bp);
6709 	bp->link_up = 0;
6710 	netif_carrier_off(bp->dev);
6711 	return 0;
6712 }
6713 
6714 static void
6715 bnx2_save_stats(struct bnx2 *bp)
6716 {
6717 	u32 *hw_stats = (u32 *) bp->stats_blk;
6718 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6719 	int i;
6720 
6721 	/* The 1st 10 counters are 64-bit counters */
6722 	for (i = 0; i < 20; i += 2) {
6723 		u32 hi;
6724 		u64 lo;
6725 
6726 		hi = temp_stats[i] + hw_stats[i];
6727 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6728 		if (lo > 0xffffffff)
6729 			hi++;
6730 		temp_stats[i] = hi;
6731 		temp_stats[i + 1] = lo & 0xffffffff;
6732 	}
6733 
6734 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6735 		temp_stats[i] += hw_stats[i];
6736 }
6737 
6738 #define GET_64BIT_NET_STATS64(ctr)		\
6739 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6740 
6741 #define GET_64BIT_NET_STATS(ctr)				\
6742 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6743 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6744 
6745 #define GET_32BIT_NET_STATS(ctr)				\
6746 	(unsigned long) (bp->stats_blk->ctr +			\
6747 			 bp->temp_stats_blk->ctr)
6748 
6749 static struct rtnl_link_stats64 *
6750 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6751 {
6752 	struct bnx2 *bp = netdev_priv(dev);
6753 
6754 	if (bp->stats_blk == NULL)
6755 		return net_stats;
6756 
6757 	net_stats->rx_packets =
6758 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6759 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6760 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6761 
6762 	net_stats->tx_packets =
6763 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6764 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6765 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6766 
6767 	net_stats->rx_bytes =
6768 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6769 
6770 	net_stats->tx_bytes =
6771 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6772 
6773 	net_stats->multicast =
6774 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6775 
6776 	net_stats->collisions =
6777 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6778 
6779 	net_stats->rx_length_errors =
6780 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6781 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6782 
6783 	net_stats->rx_over_errors =
6784 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6785 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6786 
6787 	net_stats->rx_frame_errors =
6788 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6789 
6790 	net_stats->rx_crc_errors =
6791 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6792 
6793 	net_stats->rx_errors = net_stats->rx_length_errors +
6794 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6795 		net_stats->rx_crc_errors;
6796 
6797 	net_stats->tx_aborted_errors =
6798 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6799 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6800 
6801 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6802 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6803 		net_stats->tx_carrier_errors = 0;
6804 	else {
6805 		net_stats->tx_carrier_errors =
6806 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6807 	}
6808 
6809 	net_stats->tx_errors =
6810 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6811 		net_stats->tx_aborted_errors +
6812 		net_stats->tx_carrier_errors;
6813 
6814 	net_stats->rx_missed_errors =
6815 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6816 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6817 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6818 
6819 	return net_stats;
6820 }
6821 
6822 /* All ethtool functions called with rtnl_lock */
6823 
6824 static int
6825 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6826 {
6827 	struct bnx2 *bp = netdev_priv(dev);
6828 	int support_serdes = 0, support_copper = 0;
6829 
6830 	cmd->supported = SUPPORTED_Autoneg;
6831 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6832 		support_serdes = 1;
6833 		support_copper = 1;
6834 	} else if (bp->phy_port == PORT_FIBRE)
6835 		support_serdes = 1;
6836 	else
6837 		support_copper = 1;
6838 
6839 	if (support_serdes) {
6840 		cmd->supported |= SUPPORTED_1000baseT_Full |
6841 			SUPPORTED_FIBRE;
6842 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6843 			cmd->supported |= SUPPORTED_2500baseX_Full;
6844 
6845 	}
6846 	if (support_copper) {
6847 		cmd->supported |= SUPPORTED_10baseT_Half |
6848 			SUPPORTED_10baseT_Full |
6849 			SUPPORTED_100baseT_Half |
6850 			SUPPORTED_100baseT_Full |
6851 			SUPPORTED_1000baseT_Full |
6852 			SUPPORTED_TP;
6853 
6854 	}
6855 
6856 	spin_lock_bh(&bp->phy_lock);
6857 	cmd->port = bp->phy_port;
6858 	cmd->advertising = bp->advertising;
6859 
6860 	if (bp->autoneg & AUTONEG_SPEED) {
6861 		cmd->autoneg = AUTONEG_ENABLE;
6862 	} else {
6863 		cmd->autoneg = AUTONEG_DISABLE;
6864 	}
6865 
6866 	if (netif_carrier_ok(dev)) {
6867 		ethtool_cmd_speed_set(cmd, bp->line_speed);
6868 		cmd->duplex = bp->duplex;
6869 	}
6870 	else {
6871 		ethtool_cmd_speed_set(cmd, -1);
6872 		cmd->duplex = -1;
6873 	}
6874 	spin_unlock_bh(&bp->phy_lock);
6875 
6876 	cmd->transceiver = XCVR_INTERNAL;
6877 	cmd->phy_address = bp->phy_addr;
6878 
6879 	return 0;
6880 }
6881 
6882 static int
6883 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6884 {
6885 	struct bnx2 *bp = netdev_priv(dev);
6886 	u8 autoneg = bp->autoneg;
6887 	u8 req_duplex = bp->req_duplex;
6888 	u16 req_line_speed = bp->req_line_speed;
6889 	u32 advertising = bp->advertising;
6890 	int err = -EINVAL;
6891 
6892 	spin_lock_bh(&bp->phy_lock);
6893 
6894 	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6895 		goto err_out_unlock;
6896 
6897 	if (cmd->port != bp->phy_port &&
6898 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6899 		goto err_out_unlock;
6900 
6901 	/* If device is down, we can store the settings only if the user
6902 	 * is setting the currently active port.
6903 	 */
6904 	if (!netif_running(dev) && cmd->port != bp->phy_port)
6905 		goto err_out_unlock;
6906 
6907 	if (cmd->autoneg == AUTONEG_ENABLE) {
6908 		autoneg |= AUTONEG_SPEED;
6909 
6910 		advertising = cmd->advertising;
6911 		if (cmd->port == PORT_TP) {
6912 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6913 			if (!advertising)
6914 				advertising = ETHTOOL_ALL_COPPER_SPEED;
6915 		} else {
6916 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6917 			if (!advertising)
6918 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6919 		}
6920 		advertising |= ADVERTISED_Autoneg;
6921 	}
6922 	else {
6923 		u32 speed = ethtool_cmd_speed(cmd);
6924 		if (cmd->port == PORT_FIBRE) {
6925 			if ((speed != SPEED_1000 &&
6926 			     speed != SPEED_2500) ||
6927 			    (cmd->duplex != DUPLEX_FULL))
6928 				goto err_out_unlock;
6929 
6930 			if (speed == SPEED_2500 &&
6931 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6932 				goto err_out_unlock;
6933 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
6934 			goto err_out_unlock;
6935 
6936 		autoneg &= ~AUTONEG_SPEED;
6937 		req_line_speed = speed;
6938 		req_duplex = cmd->duplex;
6939 		advertising = 0;
6940 	}
6941 
6942 	bp->autoneg = autoneg;
6943 	bp->advertising = advertising;
6944 	bp->req_line_speed = req_line_speed;
6945 	bp->req_duplex = req_duplex;
6946 
6947 	err = 0;
6948 	/* If device is down, the new settings will be picked up when it is
6949 	 * brought up.
6950 	 */
6951 	if (netif_running(dev))
6952 		err = bnx2_setup_phy(bp, cmd->port);
6953 
6954 err_out_unlock:
6955 	spin_unlock_bh(&bp->phy_lock);
6956 
6957 	return err;
6958 }
6959 
6960 static void
6961 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6962 {
6963 	struct bnx2 *bp = netdev_priv(dev);
6964 
6965 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6966 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6967 	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6968 	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6969 }
6970 
6971 #define BNX2_REGDUMP_LEN		(32 * 1024)
6972 
6973 static int
6974 bnx2_get_regs_len(struct net_device *dev)
6975 {
6976 	return BNX2_REGDUMP_LEN;
6977 }
6978 
6979 static void
6980 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6981 {
6982 	u32 *p = _p, i, offset;
6983 	u8 *orig_p = _p;
6984 	struct bnx2 *bp = netdev_priv(dev);
6985 	static const u32 reg_boundaries[] = {
6986 		0x0000, 0x0098, 0x0400, 0x045c,
6987 		0x0800, 0x0880, 0x0c00, 0x0c10,
6988 		0x0c30, 0x0d08, 0x1000, 0x101c,
6989 		0x1040, 0x1048, 0x1080, 0x10a4,
6990 		0x1400, 0x1490, 0x1498, 0x14f0,
6991 		0x1500, 0x155c, 0x1580, 0x15dc,
6992 		0x1600, 0x1658, 0x1680, 0x16d8,
6993 		0x1800, 0x1820, 0x1840, 0x1854,
6994 		0x1880, 0x1894, 0x1900, 0x1984,
6995 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6996 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
6997 		0x2000, 0x2030, 0x23c0, 0x2400,
6998 		0x2800, 0x2820, 0x2830, 0x2850,
6999 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7000 		0x3c00, 0x3c94, 0x4000, 0x4010,
7001 		0x4080, 0x4090, 0x43c0, 0x4458,
7002 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7003 		0x4fc0, 0x5010, 0x53c0, 0x5444,
7004 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7005 		0x5fc0, 0x6000, 0x6400, 0x6428,
7006 		0x6800, 0x6848, 0x684c, 0x6860,
7007 		0x6888, 0x6910, 0x8000
7008 	};
7009 
7010 	regs->version = 0;
7011 
7012 	memset(p, 0, BNX2_REGDUMP_LEN);
7013 
7014 	if (!netif_running(bp->dev))
7015 		return;
7016 
7017 	i = 0;
7018 	offset = reg_boundaries[0];
7019 	p += offset;
7020 	while (offset < BNX2_REGDUMP_LEN) {
7021 		*p++ = BNX2_RD(bp, offset);
7022 		offset += 4;
7023 		if (offset == reg_boundaries[i + 1]) {
7024 			offset = reg_boundaries[i + 2];
7025 			p = (u32 *) (orig_p + offset);
7026 			i += 2;
7027 		}
7028 	}
7029 }
7030 
7031 static void
7032 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7033 {
7034 	struct bnx2 *bp = netdev_priv(dev);
7035 
7036 	if (bp->flags & BNX2_FLAG_NO_WOL) {
7037 		wol->supported = 0;
7038 		wol->wolopts = 0;
7039 	}
7040 	else {
7041 		wol->supported = WAKE_MAGIC;
7042 		if (bp->wol)
7043 			wol->wolopts = WAKE_MAGIC;
7044 		else
7045 			wol->wolopts = 0;
7046 	}
7047 	memset(&wol->sopass, 0, sizeof(wol->sopass));
7048 }
7049 
7050 static int
7051 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7052 {
7053 	struct bnx2 *bp = netdev_priv(dev);
7054 
7055 	if (wol->wolopts & ~WAKE_MAGIC)
7056 		return -EINVAL;
7057 
7058 	if (wol->wolopts & WAKE_MAGIC) {
7059 		if (bp->flags & BNX2_FLAG_NO_WOL)
7060 			return -EINVAL;
7061 
7062 		bp->wol = 1;
7063 	}
7064 	else {
7065 		bp->wol = 0;
7066 	}
7067 
7068 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7069 
7070 	return 0;
7071 }
7072 
7073 static int
7074 bnx2_nway_reset(struct net_device *dev)
7075 {
7076 	struct bnx2 *bp = netdev_priv(dev);
7077 	u32 bmcr;
7078 
7079 	if (!netif_running(dev))
7080 		return -EAGAIN;
7081 
7082 	if (!(bp->autoneg & AUTONEG_SPEED)) {
7083 		return -EINVAL;
7084 	}
7085 
7086 	spin_lock_bh(&bp->phy_lock);
7087 
7088 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7089 		int rc;
7090 
7091 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7092 		spin_unlock_bh(&bp->phy_lock);
7093 		return rc;
7094 	}
7095 
7096 	/* Force a link down visible on the other side */
7097 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7098 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7099 		spin_unlock_bh(&bp->phy_lock);
7100 
7101 		msleep(20);
7102 
7103 		spin_lock_bh(&bp->phy_lock);
7104 
7105 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7106 		bp->serdes_an_pending = 1;
7107 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7108 	}
7109 
7110 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7111 	bmcr &= ~BMCR_LOOPBACK;
7112 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7113 
7114 	spin_unlock_bh(&bp->phy_lock);
7115 
7116 	return 0;
7117 }
7118 
7119 static u32
7120 bnx2_get_link(struct net_device *dev)
7121 {
7122 	struct bnx2 *bp = netdev_priv(dev);
7123 
7124 	return bp->link_up;
7125 }
7126 
7127 static int
7128 bnx2_get_eeprom_len(struct net_device *dev)
7129 {
7130 	struct bnx2 *bp = netdev_priv(dev);
7131 
7132 	if (bp->flash_info == NULL)
7133 		return 0;
7134 
7135 	return (int) bp->flash_size;
7136 }
7137 
7138 static int
7139 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7140 		u8 *eebuf)
7141 {
7142 	struct bnx2 *bp = netdev_priv(dev);
7143 	int rc;
7144 
7145 	/* parameters already validated in ethtool_get_eeprom */
7146 
7147 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7148 
7149 	return rc;
7150 }
7151 
7152 static int
7153 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7154 		u8 *eebuf)
7155 {
7156 	struct bnx2 *bp = netdev_priv(dev);
7157 	int rc;
7158 
7159 	/* parameters already validated in ethtool_set_eeprom */
7160 
7161 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7162 
7163 	return rc;
7164 }
7165 
7166 static int
7167 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7168 {
7169 	struct bnx2 *bp = netdev_priv(dev);
7170 
7171 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7172 
7173 	coal->rx_coalesce_usecs = bp->rx_ticks;
7174 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7175 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7176 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7177 
7178 	coal->tx_coalesce_usecs = bp->tx_ticks;
7179 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7180 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7181 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7182 
7183 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7184 
7185 	return 0;
7186 }
7187 
7188 static int
7189 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7190 {
7191 	struct bnx2 *bp = netdev_priv(dev);
7192 
7193 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7194 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7195 
7196 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7197 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7198 
7199 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7200 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7201 
7202 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7203 	if (bp->rx_quick_cons_trip_int > 0xff)
7204 		bp->rx_quick_cons_trip_int = 0xff;
7205 
7206 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7207 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7208 
7209 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7210 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7211 
7212 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7213 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7214 
7215 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7216 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7217 		0xff;
7218 
7219 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7220 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7221 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7222 			bp->stats_ticks = USEC_PER_SEC;
7223 	}
7224 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7225 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7226 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7227 
7228 	if (netif_running(bp->dev)) {
7229 		bnx2_netif_stop(bp, true);
7230 		bnx2_init_nic(bp, 0);
7231 		bnx2_netif_start(bp, true);
7232 	}
7233 
7234 	return 0;
7235 }
7236 
7237 static void
7238 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7239 {
7240 	struct bnx2 *bp = netdev_priv(dev);
7241 
7242 	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7243 	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7244 
7245 	ering->rx_pending = bp->rx_ring_size;
7246 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7247 
7248 	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7249 	ering->tx_pending = bp->tx_ring_size;
7250 }
7251 
7252 static int
7253 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7254 {
7255 	if (netif_running(bp->dev)) {
7256 		/* Reset will erase chipset stats; save them */
7257 		bnx2_save_stats(bp);
7258 
7259 		bnx2_netif_stop(bp, true);
7260 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7261 		if (reset_irq) {
7262 			bnx2_free_irq(bp);
7263 			bnx2_del_napi(bp);
7264 		} else {
7265 			__bnx2_free_irq(bp);
7266 		}
7267 		bnx2_free_skbs(bp);
7268 		bnx2_free_mem(bp);
7269 	}
7270 
7271 	bnx2_set_rx_ring_size(bp, rx);
7272 	bp->tx_ring_size = tx;
7273 
7274 	if (netif_running(bp->dev)) {
7275 		int rc = 0;
7276 
7277 		if (reset_irq) {
7278 			rc = bnx2_setup_int_mode(bp, disable_msi);
7279 			bnx2_init_napi(bp);
7280 		}
7281 
7282 		if (!rc)
7283 			rc = bnx2_alloc_mem(bp);
7284 
7285 		if (!rc)
7286 			rc = bnx2_request_irq(bp);
7287 
7288 		if (!rc)
7289 			rc = bnx2_init_nic(bp, 0);
7290 
7291 		if (rc) {
7292 			bnx2_napi_enable(bp);
7293 			dev_close(bp->dev);
7294 			return rc;
7295 		}
7296 #ifdef BCM_CNIC
7297 		mutex_lock(&bp->cnic_lock);
7298 		/* Let cnic know about the new status block. */
7299 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7300 			bnx2_setup_cnic_irq_info(bp);
7301 		mutex_unlock(&bp->cnic_lock);
7302 #endif
7303 		bnx2_netif_start(bp, true);
7304 	}
7305 	return 0;
7306 }
7307 
7308 static int
7309 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7310 {
7311 	struct bnx2 *bp = netdev_priv(dev);
7312 	int rc;
7313 
7314 	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7315 		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7316 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7317 
7318 		return -EINVAL;
7319 	}
7320 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7321 				   false);
7322 	return rc;
7323 }
7324 
7325 static void
7326 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7327 {
7328 	struct bnx2 *bp = netdev_priv(dev);
7329 
7330 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7331 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7332 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7333 }
7334 
7335 static int
7336 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7337 {
7338 	struct bnx2 *bp = netdev_priv(dev);
7339 
7340 	bp->req_flow_ctrl = 0;
7341 	if (epause->rx_pause)
7342 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7343 	if (epause->tx_pause)
7344 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7345 
7346 	if (epause->autoneg) {
7347 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7348 	}
7349 	else {
7350 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7351 	}
7352 
7353 	if (netif_running(dev)) {
7354 		spin_lock_bh(&bp->phy_lock);
7355 		bnx2_setup_phy(bp, bp->phy_port);
7356 		spin_unlock_bh(&bp->phy_lock);
7357 	}
7358 
7359 	return 0;
7360 }
7361 
7362 static struct {
7363 	char string[ETH_GSTRING_LEN];
7364 } bnx2_stats_str_arr[] = {
7365 	{ "rx_bytes" },
7366 	{ "rx_error_bytes" },
7367 	{ "tx_bytes" },
7368 	{ "tx_error_bytes" },
7369 	{ "rx_ucast_packets" },
7370 	{ "rx_mcast_packets" },
7371 	{ "rx_bcast_packets" },
7372 	{ "tx_ucast_packets" },
7373 	{ "tx_mcast_packets" },
7374 	{ "tx_bcast_packets" },
7375 	{ "tx_mac_errors" },
7376 	{ "tx_carrier_errors" },
7377 	{ "rx_crc_errors" },
7378 	{ "rx_align_errors" },
7379 	{ "tx_single_collisions" },
7380 	{ "tx_multi_collisions" },
7381 	{ "tx_deferred" },
7382 	{ "tx_excess_collisions" },
7383 	{ "tx_late_collisions" },
7384 	{ "tx_total_collisions" },
7385 	{ "rx_fragments" },
7386 	{ "rx_jabbers" },
7387 	{ "rx_undersize_packets" },
7388 	{ "rx_oversize_packets" },
7389 	{ "rx_64_byte_packets" },
7390 	{ "rx_65_to_127_byte_packets" },
7391 	{ "rx_128_to_255_byte_packets" },
7392 	{ "rx_256_to_511_byte_packets" },
7393 	{ "rx_512_to_1023_byte_packets" },
7394 	{ "rx_1024_to_1522_byte_packets" },
7395 	{ "rx_1523_to_9022_byte_packets" },
7396 	{ "tx_64_byte_packets" },
7397 	{ "tx_65_to_127_byte_packets" },
7398 	{ "tx_128_to_255_byte_packets" },
7399 	{ "tx_256_to_511_byte_packets" },
7400 	{ "tx_512_to_1023_byte_packets" },
7401 	{ "tx_1024_to_1522_byte_packets" },
7402 	{ "tx_1523_to_9022_byte_packets" },
7403 	{ "rx_xon_frames" },
7404 	{ "rx_xoff_frames" },
7405 	{ "tx_xon_frames" },
7406 	{ "tx_xoff_frames" },
7407 	{ "rx_mac_ctrl_frames" },
7408 	{ "rx_filtered_packets" },
7409 	{ "rx_ftq_discards" },
7410 	{ "rx_discards" },
7411 	{ "rx_fw_discards" },
7412 };
7413 
7414 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7415 
7416 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7417 
7418 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7419     STATS_OFFSET32(stat_IfHCInOctets_hi),
7420     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7421     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7422     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7423     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7424     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7425     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7426     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7427     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7428     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7429     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7430     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7431     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7432     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7433     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7434     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7435     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7436     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7437     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7438     STATS_OFFSET32(stat_EtherStatsCollisions),
7439     STATS_OFFSET32(stat_EtherStatsFragments),
7440     STATS_OFFSET32(stat_EtherStatsJabbers),
7441     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7442     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7443     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7444     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7445     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7446     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7447     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7448     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7449     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7450     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7451     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7452     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7453     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7454     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7455     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7456     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7457     STATS_OFFSET32(stat_XonPauseFramesReceived),
7458     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7459     STATS_OFFSET32(stat_OutXonSent),
7460     STATS_OFFSET32(stat_OutXoffSent),
7461     STATS_OFFSET32(stat_MacControlFramesReceived),
7462     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7463     STATS_OFFSET32(stat_IfInFTQDiscards),
7464     STATS_OFFSET32(stat_IfInMBUFDiscards),
7465     STATS_OFFSET32(stat_FwRxDrop),
7466 };
7467 
7468 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7469  * skipped because of errata.
7470  */
7471 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7472 	8,0,8,8,8,8,8,8,8,8,
7473 	4,0,4,4,4,4,4,4,4,4,
7474 	4,4,4,4,4,4,4,4,4,4,
7475 	4,4,4,4,4,4,4,4,4,4,
7476 	4,4,4,4,4,4,4,
7477 };
7478 
7479 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7480 	8,0,8,8,8,8,8,8,8,8,
7481 	4,4,4,4,4,4,4,4,4,4,
7482 	4,4,4,4,4,4,4,4,4,4,
7483 	4,4,4,4,4,4,4,4,4,4,
7484 	4,4,4,4,4,4,4,
7485 };
7486 
7487 #define BNX2_NUM_TESTS 6
7488 
7489 static struct {
7490 	char string[ETH_GSTRING_LEN];
7491 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7492 	{ "register_test (offline)" },
7493 	{ "memory_test (offline)" },
7494 	{ "loopback_test (offline)" },
7495 	{ "nvram_test (online)" },
7496 	{ "interrupt_test (online)" },
7497 	{ "link_test (online)" },
7498 };
7499 
7500 static int
7501 bnx2_get_sset_count(struct net_device *dev, int sset)
7502 {
7503 	switch (sset) {
7504 	case ETH_SS_TEST:
7505 		return BNX2_NUM_TESTS;
7506 	case ETH_SS_STATS:
7507 		return BNX2_NUM_STATS;
7508 	default:
7509 		return -EOPNOTSUPP;
7510 	}
7511 }
7512 
7513 static void
7514 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7515 {
7516 	struct bnx2 *bp = netdev_priv(dev);
7517 
7518 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7519 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7520 		int i;
7521 
7522 		bnx2_netif_stop(bp, true);
7523 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7524 		bnx2_free_skbs(bp);
7525 
7526 		if (bnx2_test_registers(bp) != 0) {
7527 			buf[0] = 1;
7528 			etest->flags |= ETH_TEST_FL_FAILED;
7529 		}
7530 		if (bnx2_test_memory(bp) != 0) {
7531 			buf[1] = 1;
7532 			etest->flags |= ETH_TEST_FL_FAILED;
7533 		}
7534 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7535 			etest->flags |= ETH_TEST_FL_FAILED;
7536 
7537 		if (!netif_running(bp->dev))
7538 			bnx2_shutdown_chip(bp);
7539 		else {
7540 			bnx2_init_nic(bp, 1);
7541 			bnx2_netif_start(bp, true);
7542 		}
7543 
7544 		/* wait for link up */
7545 		for (i = 0; i < 7; i++) {
7546 			if (bp->link_up)
7547 				break;
7548 			msleep_interruptible(1000);
7549 		}
7550 	}
7551 
7552 	if (bnx2_test_nvram(bp) != 0) {
7553 		buf[3] = 1;
7554 		etest->flags |= ETH_TEST_FL_FAILED;
7555 	}
7556 	if (bnx2_test_intr(bp) != 0) {
7557 		buf[4] = 1;
7558 		etest->flags |= ETH_TEST_FL_FAILED;
7559 	}
7560 
7561 	if (bnx2_test_link(bp) != 0) {
7562 		buf[5] = 1;
7563 		etest->flags |= ETH_TEST_FL_FAILED;
7564 
7565 	}
7566 }
7567 
7568 static void
7569 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7570 {
7571 	switch (stringset) {
7572 	case ETH_SS_STATS:
7573 		memcpy(buf, bnx2_stats_str_arr,
7574 			sizeof(bnx2_stats_str_arr));
7575 		break;
7576 	case ETH_SS_TEST:
7577 		memcpy(buf, bnx2_tests_str_arr,
7578 			sizeof(bnx2_tests_str_arr));
7579 		break;
7580 	}
7581 }
7582 
7583 static void
7584 bnx2_get_ethtool_stats(struct net_device *dev,
7585 		struct ethtool_stats *stats, u64 *buf)
7586 {
7587 	struct bnx2 *bp = netdev_priv(dev);
7588 	int i;
7589 	u32 *hw_stats = (u32 *) bp->stats_blk;
7590 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7591 	u8 *stats_len_arr = NULL;
7592 
7593 	if (hw_stats == NULL) {
7594 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7595 		return;
7596 	}
7597 
7598 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7599 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7600 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7601 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7602 		stats_len_arr = bnx2_5706_stats_len_arr;
7603 	else
7604 		stats_len_arr = bnx2_5708_stats_len_arr;
7605 
7606 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7607 		unsigned long offset;
7608 
7609 		if (stats_len_arr[i] == 0) {
7610 			/* skip this counter */
7611 			buf[i] = 0;
7612 			continue;
7613 		}
7614 
7615 		offset = bnx2_stats_offset_arr[i];
7616 		if (stats_len_arr[i] == 4) {
7617 			/* 4-byte counter */
7618 			buf[i] = (u64) *(hw_stats + offset) +
7619 				 *(temp_stats + offset);
7620 			continue;
7621 		}
7622 		/* 8-byte counter */
7623 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7624 			 *(hw_stats + offset + 1) +
7625 			 (((u64) *(temp_stats + offset)) << 32) +
7626 			 *(temp_stats + offset + 1);
7627 	}
7628 }
7629 
7630 static int
7631 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7632 {
7633 	struct bnx2 *bp = netdev_priv(dev);
7634 
7635 	switch (state) {
7636 	case ETHTOOL_ID_ACTIVE:
7637 		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7638 		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7639 		return 1;	/* cycle on/off once per second */
7640 
7641 	case ETHTOOL_ID_ON:
7642 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7643 			BNX2_EMAC_LED_1000MB_OVERRIDE |
7644 			BNX2_EMAC_LED_100MB_OVERRIDE |
7645 			BNX2_EMAC_LED_10MB_OVERRIDE |
7646 			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7647 			BNX2_EMAC_LED_TRAFFIC);
7648 		break;
7649 
7650 	case ETHTOOL_ID_OFF:
7651 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7652 		break;
7653 
7654 	case ETHTOOL_ID_INACTIVE:
7655 		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7656 		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7657 		break;
7658 	}
7659 
7660 	return 0;
7661 }
7662 
7663 static netdev_features_t
7664 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7665 {
7666 	struct bnx2 *bp = netdev_priv(dev);
7667 
7668 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7669 		features |= NETIF_F_HW_VLAN_CTAG_RX;
7670 
7671 	return features;
7672 }
7673 
7674 static int
7675 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7676 {
7677 	struct bnx2 *bp = netdev_priv(dev);
7678 
7679 	/* TSO with VLAN tag won't work with current firmware */
7680 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7681 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7682 	else
7683 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7684 
7685 	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7686 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7687 	    netif_running(dev)) {
7688 		bnx2_netif_stop(bp, false);
7689 		dev->features = features;
7690 		bnx2_set_rx_mode(dev);
7691 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7692 		bnx2_netif_start(bp, false);
7693 		return 1;
7694 	}
7695 
7696 	return 0;
7697 }
7698 
7699 static void bnx2_get_channels(struct net_device *dev,
7700 			      struct ethtool_channels *channels)
7701 {
7702 	struct bnx2 *bp = netdev_priv(dev);
7703 	u32 max_rx_rings = 1;
7704 	u32 max_tx_rings = 1;
7705 
7706 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7707 		max_rx_rings = RX_MAX_RINGS;
7708 		max_tx_rings = TX_MAX_RINGS;
7709 	}
7710 
7711 	channels->max_rx = max_rx_rings;
7712 	channels->max_tx = max_tx_rings;
7713 	channels->max_other = 0;
7714 	channels->max_combined = 0;
7715 	channels->rx_count = bp->num_rx_rings;
7716 	channels->tx_count = bp->num_tx_rings;
7717 	channels->other_count = 0;
7718 	channels->combined_count = 0;
7719 }
7720 
7721 static int bnx2_set_channels(struct net_device *dev,
7722 			      struct ethtool_channels *channels)
7723 {
7724 	struct bnx2 *bp = netdev_priv(dev);
7725 	u32 max_rx_rings = 1;
7726 	u32 max_tx_rings = 1;
7727 	int rc = 0;
7728 
7729 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7730 		max_rx_rings = RX_MAX_RINGS;
7731 		max_tx_rings = TX_MAX_RINGS;
7732 	}
7733 	if (channels->rx_count > max_rx_rings ||
7734 	    channels->tx_count > max_tx_rings)
7735 		return -EINVAL;
7736 
7737 	bp->num_req_rx_rings = channels->rx_count;
7738 	bp->num_req_tx_rings = channels->tx_count;
7739 
7740 	if (netif_running(dev))
7741 		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7742 					   bp->tx_ring_size, true);
7743 
7744 	return rc;
7745 }
7746 
7747 static const struct ethtool_ops bnx2_ethtool_ops = {
7748 	.get_settings		= bnx2_get_settings,
7749 	.set_settings		= bnx2_set_settings,
7750 	.get_drvinfo		= bnx2_get_drvinfo,
7751 	.get_regs_len		= bnx2_get_regs_len,
7752 	.get_regs		= bnx2_get_regs,
7753 	.get_wol		= bnx2_get_wol,
7754 	.set_wol		= bnx2_set_wol,
7755 	.nway_reset		= bnx2_nway_reset,
7756 	.get_link		= bnx2_get_link,
7757 	.get_eeprom_len		= bnx2_get_eeprom_len,
7758 	.get_eeprom		= bnx2_get_eeprom,
7759 	.set_eeprom		= bnx2_set_eeprom,
7760 	.get_coalesce		= bnx2_get_coalesce,
7761 	.set_coalesce		= bnx2_set_coalesce,
7762 	.get_ringparam		= bnx2_get_ringparam,
7763 	.set_ringparam		= bnx2_set_ringparam,
7764 	.get_pauseparam		= bnx2_get_pauseparam,
7765 	.set_pauseparam		= bnx2_set_pauseparam,
7766 	.self_test		= bnx2_self_test,
7767 	.get_strings		= bnx2_get_strings,
7768 	.set_phys_id		= bnx2_set_phys_id,
7769 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7770 	.get_sset_count		= bnx2_get_sset_count,
7771 	.get_channels		= bnx2_get_channels,
7772 	.set_channels		= bnx2_set_channels,
7773 };
7774 
7775 /* Called with rtnl_lock */
7776 static int
7777 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7778 {
7779 	struct mii_ioctl_data *data = if_mii(ifr);
7780 	struct bnx2 *bp = netdev_priv(dev);
7781 	int err;
7782 
7783 	switch(cmd) {
7784 	case SIOCGMIIPHY:
7785 		data->phy_id = bp->phy_addr;
7786 
7787 		/* fallthru */
7788 	case SIOCGMIIREG: {
7789 		u32 mii_regval;
7790 
7791 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7792 			return -EOPNOTSUPP;
7793 
7794 		if (!netif_running(dev))
7795 			return -EAGAIN;
7796 
7797 		spin_lock_bh(&bp->phy_lock);
7798 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7799 		spin_unlock_bh(&bp->phy_lock);
7800 
7801 		data->val_out = mii_regval;
7802 
7803 		return err;
7804 	}
7805 
7806 	case SIOCSMIIREG:
7807 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7808 			return -EOPNOTSUPP;
7809 
7810 		if (!netif_running(dev))
7811 			return -EAGAIN;
7812 
7813 		spin_lock_bh(&bp->phy_lock);
7814 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7815 		spin_unlock_bh(&bp->phy_lock);
7816 
7817 		return err;
7818 
7819 	default:
7820 		/* do nothing */
7821 		break;
7822 	}
7823 	return -EOPNOTSUPP;
7824 }
7825 
7826 /* Called with rtnl_lock */
7827 static int
7828 bnx2_change_mac_addr(struct net_device *dev, void *p)
7829 {
7830 	struct sockaddr *addr = p;
7831 	struct bnx2 *bp = netdev_priv(dev);
7832 
7833 	if (!is_valid_ether_addr(addr->sa_data))
7834 		return -EADDRNOTAVAIL;
7835 
7836 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7837 	if (netif_running(dev))
7838 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7839 
7840 	return 0;
7841 }
7842 
7843 /* Called with rtnl_lock */
7844 static int
7845 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7846 {
7847 	struct bnx2 *bp = netdev_priv(dev);
7848 
7849 	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7850 		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7851 		return -EINVAL;
7852 
7853 	dev->mtu = new_mtu;
7854 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7855 				     false);
7856 }
7857 
7858 #ifdef CONFIG_NET_POLL_CONTROLLER
7859 static void
7860 poll_bnx2(struct net_device *dev)
7861 {
7862 	struct bnx2 *bp = netdev_priv(dev);
7863 	int i;
7864 
7865 	for (i = 0; i < bp->irq_nvecs; i++) {
7866 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7867 
7868 		disable_irq(irq->vector);
7869 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7870 		enable_irq(irq->vector);
7871 	}
7872 }
7873 #endif
7874 
7875 static void
7876 bnx2_get_5709_media(struct bnx2 *bp)
7877 {
7878 	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7879 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7880 	u32 strap;
7881 
7882 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7883 		return;
7884 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7885 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7886 		return;
7887 	}
7888 
7889 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7890 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7891 	else
7892 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7893 
7894 	if (bp->func == 0) {
7895 		switch (strap) {
7896 		case 0x4:
7897 		case 0x5:
7898 		case 0x6:
7899 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7900 			return;
7901 		}
7902 	} else {
7903 		switch (strap) {
7904 		case 0x1:
7905 		case 0x2:
7906 		case 0x4:
7907 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7908 			return;
7909 		}
7910 	}
7911 }
7912 
7913 static void
7914 bnx2_get_pci_speed(struct bnx2 *bp)
7915 {
7916 	u32 reg;
7917 
7918 	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7919 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7920 		u32 clkreg;
7921 
7922 		bp->flags |= BNX2_FLAG_PCIX;
7923 
7924 		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7925 
7926 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7927 		switch (clkreg) {
7928 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7929 			bp->bus_speed_mhz = 133;
7930 			break;
7931 
7932 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7933 			bp->bus_speed_mhz = 100;
7934 			break;
7935 
7936 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7937 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7938 			bp->bus_speed_mhz = 66;
7939 			break;
7940 
7941 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7942 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7943 			bp->bus_speed_mhz = 50;
7944 			break;
7945 
7946 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7947 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7948 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7949 			bp->bus_speed_mhz = 33;
7950 			break;
7951 		}
7952 	}
7953 	else {
7954 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7955 			bp->bus_speed_mhz = 66;
7956 		else
7957 			bp->bus_speed_mhz = 33;
7958 	}
7959 
7960 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7961 		bp->flags |= BNX2_FLAG_PCI_32BIT;
7962 
7963 }
7964 
7965 static void
7966 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7967 {
7968 	int rc, i, j;
7969 	u8 *data;
7970 	unsigned int block_end, rosize, len;
7971 
7972 #define BNX2_VPD_NVRAM_OFFSET	0x300
7973 #define BNX2_VPD_LEN		128
7974 #define BNX2_MAX_VER_SLEN	30
7975 
7976 	data = kmalloc(256, GFP_KERNEL);
7977 	if (!data)
7978 		return;
7979 
7980 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7981 			     BNX2_VPD_LEN);
7982 	if (rc)
7983 		goto vpd_done;
7984 
7985 	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7986 		data[i] = data[i + BNX2_VPD_LEN + 3];
7987 		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7988 		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7989 		data[i + 3] = data[i + BNX2_VPD_LEN];
7990 	}
7991 
7992 	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7993 	if (i < 0)
7994 		goto vpd_done;
7995 
7996 	rosize = pci_vpd_lrdt_size(&data[i]);
7997 	i += PCI_VPD_LRDT_TAG_SIZE;
7998 	block_end = i + rosize;
7999 
8000 	if (block_end > BNX2_VPD_LEN)
8001 		goto vpd_done;
8002 
8003 	j = pci_vpd_find_info_keyword(data, i, rosize,
8004 				      PCI_VPD_RO_KEYWORD_MFR_ID);
8005 	if (j < 0)
8006 		goto vpd_done;
8007 
8008 	len = pci_vpd_info_field_size(&data[j]);
8009 
8010 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8011 	if (j + len > block_end || len != 4 ||
8012 	    memcmp(&data[j], "1028", 4))
8013 		goto vpd_done;
8014 
8015 	j = pci_vpd_find_info_keyword(data, i, rosize,
8016 				      PCI_VPD_RO_KEYWORD_VENDOR0);
8017 	if (j < 0)
8018 		goto vpd_done;
8019 
8020 	len = pci_vpd_info_field_size(&data[j]);
8021 
8022 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8023 	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8024 		goto vpd_done;
8025 
8026 	memcpy(bp->fw_version, &data[j], len);
8027 	bp->fw_version[len] = ' ';
8028 
8029 vpd_done:
8030 	kfree(data);
8031 }
8032 
8033 static int
8034 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8035 {
8036 	struct bnx2 *bp;
8037 	int rc, i, j;
8038 	u32 reg;
8039 	u64 dma_mask, persist_dma_mask;
8040 	int err;
8041 
8042 	SET_NETDEV_DEV(dev, &pdev->dev);
8043 	bp = netdev_priv(dev);
8044 
8045 	bp->flags = 0;
8046 	bp->phy_flags = 0;
8047 
8048 	bp->temp_stats_blk =
8049 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8050 
8051 	if (bp->temp_stats_blk == NULL) {
8052 		rc = -ENOMEM;
8053 		goto err_out;
8054 	}
8055 
8056 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8057 	rc = pci_enable_device(pdev);
8058 	if (rc) {
8059 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8060 		goto err_out;
8061 	}
8062 
8063 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8064 		dev_err(&pdev->dev,
8065 			"Cannot find PCI device base address, aborting\n");
8066 		rc = -ENODEV;
8067 		goto err_out_disable;
8068 	}
8069 
8070 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8071 	if (rc) {
8072 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8073 		goto err_out_disable;
8074 	}
8075 
8076 	pci_set_master(pdev);
8077 
8078 	bp->pm_cap = pdev->pm_cap;
8079 	if (bp->pm_cap == 0) {
8080 		dev_err(&pdev->dev,
8081 			"Cannot find power management capability, aborting\n");
8082 		rc = -EIO;
8083 		goto err_out_release;
8084 	}
8085 
8086 	bp->dev = dev;
8087 	bp->pdev = pdev;
8088 
8089 	spin_lock_init(&bp->phy_lock);
8090 	spin_lock_init(&bp->indirect_lock);
8091 #ifdef BCM_CNIC
8092 	mutex_init(&bp->cnic_lock);
8093 #endif
8094 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8095 
8096 	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8097 							 TX_MAX_TSS_RINGS + 1));
8098 	if (!bp->regview) {
8099 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8100 		rc = -ENOMEM;
8101 		goto err_out_release;
8102 	}
8103 
8104 	/* Configure byte swap and enable write to the reg_window registers.
8105 	 * Rely on CPU to do target byte swapping on big endian systems
8106 	 * The chip's target access swapping will not swap all accesses
8107 	 */
8108 	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8109 		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8110 		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8111 
8112 	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8113 
8114 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8115 		if (!pci_is_pcie(pdev)) {
8116 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8117 			rc = -EIO;
8118 			goto err_out_unmap;
8119 		}
8120 		bp->flags |= BNX2_FLAG_PCIE;
8121 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8122 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8123 
8124 		/* AER (Advanced Error Reporting) hooks */
8125 		err = pci_enable_pcie_error_reporting(pdev);
8126 		if (!err)
8127 			bp->flags |= BNX2_FLAG_AER_ENABLED;
8128 
8129 	} else {
8130 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8131 		if (bp->pcix_cap == 0) {
8132 			dev_err(&pdev->dev,
8133 				"Cannot find PCIX capability, aborting\n");
8134 			rc = -EIO;
8135 			goto err_out_unmap;
8136 		}
8137 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8138 	}
8139 
8140 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8141 	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8142 		if (pdev->msix_cap)
8143 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8144 	}
8145 
8146 	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8147 	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8148 		if (pdev->msi_cap)
8149 			bp->flags |= BNX2_FLAG_MSI_CAP;
8150 	}
8151 
8152 	/* 5708 cannot support DMA addresses > 40-bit.  */
8153 	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8154 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8155 	else
8156 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8157 
8158 	/* Configure DMA attributes. */
8159 	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8160 		dev->features |= NETIF_F_HIGHDMA;
8161 		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8162 		if (rc) {
8163 			dev_err(&pdev->dev,
8164 				"pci_set_consistent_dma_mask failed, aborting\n");
8165 			goto err_out_unmap;
8166 		}
8167 	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8168 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8169 		goto err_out_unmap;
8170 	}
8171 
8172 	if (!(bp->flags & BNX2_FLAG_PCIE))
8173 		bnx2_get_pci_speed(bp);
8174 
8175 	/* 5706A0 may falsely detect SERR and PERR. */
8176 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8177 		reg = BNX2_RD(bp, PCI_COMMAND);
8178 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8179 		BNX2_WR(bp, PCI_COMMAND, reg);
8180 	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8181 		!(bp->flags & BNX2_FLAG_PCIX)) {
8182 
8183 		dev_err(&pdev->dev,
8184 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8185 		goto err_out_unmap;
8186 	}
8187 
8188 	bnx2_init_nvram(bp);
8189 
8190 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8191 
8192 	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8193 		bp->func = 1;
8194 
8195 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8196 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8197 		u32 off = bp->func << 2;
8198 
8199 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8200 	} else
8201 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8202 
8203 	/* Get the permanent MAC address.  First we need to make sure the
8204 	 * firmware is actually running.
8205 	 */
8206 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8207 
8208 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8209 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8210 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8211 		rc = -ENODEV;
8212 		goto err_out_unmap;
8213 	}
8214 
8215 	bnx2_read_vpd_fw_ver(bp);
8216 
8217 	j = strlen(bp->fw_version);
8218 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8219 	for (i = 0; i < 3 && j < 24; i++) {
8220 		u8 num, k, skip0;
8221 
8222 		if (i == 0) {
8223 			bp->fw_version[j++] = 'b';
8224 			bp->fw_version[j++] = 'c';
8225 			bp->fw_version[j++] = ' ';
8226 		}
8227 		num = (u8) (reg >> (24 - (i * 8)));
8228 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8229 			if (num >= k || !skip0 || k == 1) {
8230 				bp->fw_version[j++] = (num / k) + '0';
8231 				skip0 = 0;
8232 			}
8233 		}
8234 		if (i != 2)
8235 			bp->fw_version[j++] = '.';
8236 	}
8237 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8238 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8239 		bp->wol = 1;
8240 
8241 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8242 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8243 
8244 		for (i = 0; i < 30; i++) {
8245 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8246 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8247 				break;
8248 			msleep(10);
8249 		}
8250 	}
8251 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8252 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8253 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8254 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8255 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8256 
8257 		if (j < 32)
8258 			bp->fw_version[j++] = ' ';
8259 		for (i = 0; i < 3 && j < 28; i++) {
8260 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8261 			reg = be32_to_cpu(reg);
8262 			memcpy(&bp->fw_version[j], &reg, 4);
8263 			j += 4;
8264 		}
8265 	}
8266 
8267 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8268 	bp->mac_addr[0] = (u8) (reg >> 8);
8269 	bp->mac_addr[1] = (u8) reg;
8270 
8271 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8272 	bp->mac_addr[2] = (u8) (reg >> 24);
8273 	bp->mac_addr[3] = (u8) (reg >> 16);
8274 	bp->mac_addr[4] = (u8) (reg >> 8);
8275 	bp->mac_addr[5] = (u8) reg;
8276 
8277 	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8278 	bnx2_set_rx_ring_size(bp, 255);
8279 
8280 	bp->tx_quick_cons_trip_int = 2;
8281 	bp->tx_quick_cons_trip = 20;
8282 	bp->tx_ticks_int = 18;
8283 	bp->tx_ticks = 80;
8284 
8285 	bp->rx_quick_cons_trip_int = 2;
8286 	bp->rx_quick_cons_trip = 12;
8287 	bp->rx_ticks_int = 18;
8288 	bp->rx_ticks = 18;
8289 
8290 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8291 
8292 	bp->current_interval = BNX2_TIMER_INTERVAL;
8293 
8294 	bp->phy_addr = 1;
8295 
8296 	/* Disable WOL support if we are running on a SERDES chip. */
8297 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8298 		bnx2_get_5709_media(bp);
8299 	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8300 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8301 
8302 	bp->phy_port = PORT_TP;
8303 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8304 		bp->phy_port = PORT_FIBRE;
8305 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8306 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8307 			bp->flags |= BNX2_FLAG_NO_WOL;
8308 			bp->wol = 0;
8309 		}
8310 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8311 			/* Don't do parallel detect on this board because of
8312 			 * some board problems.  The link will not go down
8313 			 * if we do parallel detect.
8314 			 */
8315 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8316 			    pdev->subsystem_device == 0x310c)
8317 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8318 		} else {
8319 			bp->phy_addr = 2;
8320 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8321 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8322 		}
8323 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8324 		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8325 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8326 	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8327 		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8328 		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8329 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8330 
8331 	bnx2_init_fw_cap(bp);
8332 
8333 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8334 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8335 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8336 	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8337 		bp->flags |= BNX2_FLAG_NO_WOL;
8338 		bp->wol = 0;
8339 	}
8340 
8341 	if (bp->flags & BNX2_FLAG_NO_WOL)
8342 		device_set_wakeup_capable(&bp->pdev->dev, false);
8343 	else
8344 		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8345 
8346 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8347 		bp->tx_quick_cons_trip_int =
8348 			bp->tx_quick_cons_trip;
8349 		bp->tx_ticks_int = bp->tx_ticks;
8350 		bp->rx_quick_cons_trip_int =
8351 			bp->rx_quick_cons_trip;
8352 		bp->rx_ticks_int = bp->rx_ticks;
8353 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8354 		bp->com_ticks_int = bp->com_ticks;
8355 		bp->cmd_ticks_int = bp->cmd_ticks;
8356 	}
8357 
8358 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8359 	 *
8360 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8361 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8362 	 * but causes problems on the AMD 8132 which will eventually stop
8363 	 * responding after a while.
8364 	 *
8365 	 * AMD believes this incompatibility is unique to the 5706, and
8366 	 * prefers to locally disable MSI rather than globally disabling it.
8367 	 */
8368 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8369 		struct pci_dev *amd_8132 = NULL;
8370 
8371 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8372 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8373 						  amd_8132))) {
8374 
8375 			if (amd_8132->revision >= 0x10 &&
8376 			    amd_8132->revision <= 0x13) {
8377 				disable_msi = 1;
8378 				pci_dev_put(amd_8132);
8379 				break;
8380 			}
8381 		}
8382 	}
8383 
8384 	bnx2_set_default_link(bp);
8385 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8386 
8387 	init_timer(&bp->timer);
8388 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8389 	bp->timer.data = (unsigned long) bp;
8390 	bp->timer.function = bnx2_timer;
8391 
8392 #ifdef BCM_CNIC
8393 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8394 		bp->cnic_eth_dev.max_iscsi_conn =
8395 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8396 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8397 	bp->cnic_probe = bnx2_cnic_probe;
8398 #endif
8399 	pci_save_state(pdev);
8400 
8401 	return 0;
8402 
8403 err_out_unmap:
8404 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8405 		pci_disable_pcie_error_reporting(pdev);
8406 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8407 	}
8408 
8409 	pci_iounmap(pdev, bp->regview);
8410 	bp->regview = NULL;
8411 
8412 err_out_release:
8413 	pci_release_regions(pdev);
8414 
8415 err_out_disable:
8416 	pci_disable_device(pdev);
8417 
8418 err_out:
8419 	return rc;
8420 }
8421 
8422 static char *
8423 bnx2_bus_string(struct bnx2 *bp, char *str)
8424 {
8425 	char *s = str;
8426 
8427 	if (bp->flags & BNX2_FLAG_PCIE) {
8428 		s += sprintf(s, "PCI Express");
8429 	} else {
8430 		s += sprintf(s, "PCI");
8431 		if (bp->flags & BNX2_FLAG_PCIX)
8432 			s += sprintf(s, "-X");
8433 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8434 			s += sprintf(s, " 32-bit");
8435 		else
8436 			s += sprintf(s, " 64-bit");
8437 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8438 	}
8439 	return str;
8440 }
8441 
8442 static void
8443 bnx2_del_napi(struct bnx2 *bp)
8444 {
8445 	int i;
8446 
8447 	for (i = 0; i < bp->irq_nvecs; i++)
8448 		netif_napi_del(&bp->bnx2_napi[i].napi);
8449 }
8450 
8451 static void
8452 bnx2_init_napi(struct bnx2 *bp)
8453 {
8454 	int i;
8455 
8456 	for (i = 0; i < bp->irq_nvecs; i++) {
8457 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8458 		int (*poll)(struct napi_struct *, int);
8459 
8460 		if (i == 0)
8461 			poll = bnx2_poll;
8462 		else
8463 			poll = bnx2_poll_msix;
8464 
8465 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8466 		bnapi->bp = bp;
8467 	}
8468 }
8469 
8470 static const struct net_device_ops bnx2_netdev_ops = {
8471 	.ndo_open		= bnx2_open,
8472 	.ndo_start_xmit		= bnx2_start_xmit,
8473 	.ndo_stop		= bnx2_close,
8474 	.ndo_get_stats64	= bnx2_get_stats64,
8475 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8476 	.ndo_do_ioctl		= bnx2_ioctl,
8477 	.ndo_validate_addr	= eth_validate_addr,
8478 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8479 	.ndo_change_mtu		= bnx2_change_mtu,
8480 	.ndo_fix_features	= bnx2_fix_features,
8481 	.ndo_set_features	= bnx2_set_features,
8482 	.ndo_tx_timeout		= bnx2_tx_timeout,
8483 #ifdef CONFIG_NET_POLL_CONTROLLER
8484 	.ndo_poll_controller	= poll_bnx2,
8485 #endif
8486 };
8487 
8488 static int
8489 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8490 {
8491 	static int version_printed = 0;
8492 	struct net_device *dev;
8493 	struct bnx2 *bp;
8494 	int rc;
8495 	char str[40];
8496 
8497 	if (version_printed++ == 0)
8498 		pr_info("%s", version);
8499 
8500 	/* dev zeroed in init_etherdev */
8501 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8502 	if (!dev)
8503 		return -ENOMEM;
8504 
8505 	rc = bnx2_init_board(pdev, dev);
8506 	if (rc < 0)
8507 		goto err_free;
8508 
8509 	dev->netdev_ops = &bnx2_netdev_ops;
8510 	dev->watchdog_timeo = TX_TIMEOUT;
8511 	dev->ethtool_ops = &bnx2_ethtool_ops;
8512 
8513 	bp = netdev_priv(dev);
8514 
8515 	pci_set_drvdata(pdev, dev);
8516 
8517 	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8518 
8519 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8520 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8521 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8522 
8523 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8524 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8525 
8526 	dev->vlan_features = dev->hw_features;
8527 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8528 	dev->features |= dev->hw_features;
8529 	dev->priv_flags |= IFF_UNICAST_FLT;
8530 
8531 	if ((rc = register_netdev(dev))) {
8532 		dev_err(&pdev->dev, "Cannot register net device\n");
8533 		goto error;
8534 	}
8535 
8536 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8537 		    "node addr %pM\n", board_info[ent->driver_data].name,
8538 		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8539 		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8540 		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8541 		    pdev->irq, dev->dev_addr);
8542 
8543 	return 0;
8544 
8545 error:
8546 	pci_iounmap(pdev, bp->regview);
8547 	pci_release_regions(pdev);
8548 	pci_disable_device(pdev);
8549 err_free:
8550 	free_netdev(dev);
8551 	return rc;
8552 }
8553 
8554 static void
8555 bnx2_remove_one(struct pci_dev *pdev)
8556 {
8557 	struct net_device *dev = pci_get_drvdata(pdev);
8558 	struct bnx2 *bp = netdev_priv(dev);
8559 
8560 	unregister_netdev(dev);
8561 
8562 	del_timer_sync(&bp->timer);
8563 	cancel_work_sync(&bp->reset_task);
8564 
8565 	pci_iounmap(bp->pdev, bp->regview);
8566 
8567 	kfree(bp->temp_stats_blk);
8568 
8569 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8570 		pci_disable_pcie_error_reporting(pdev);
8571 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8572 	}
8573 
8574 	bnx2_release_firmware(bp);
8575 
8576 	free_netdev(dev);
8577 
8578 	pci_release_regions(pdev);
8579 	pci_disable_device(pdev);
8580 }
8581 
8582 static int
8583 bnx2_suspend(struct device *device)
8584 {
8585 	struct pci_dev *pdev = to_pci_dev(device);
8586 	struct net_device *dev = pci_get_drvdata(pdev);
8587 	struct bnx2 *bp = netdev_priv(dev);
8588 
8589 	if (netif_running(dev)) {
8590 		cancel_work_sync(&bp->reset_task);
8591 		bnx2_netif_stop(bp, true);
8592 		netif_device_detach(dev);
8593 		del_timer_sync(&bp->timer);
8594 		bnx2_shutdown_chip(bp);
8595 		__bnx2_free_irq(bp);
8596 		bnx2_free_skbs(bp);
8597 	}
8598 	bnx2_setup_wol(bp);
8599 	return 0;
8600 }
8601 
8602 static int
8603 bnx2_resume(struct device *device)
8604 {
8605 	struct pci_dev *pdev = to_pci_dev(device);
8606 	struct net_device *dev = pci_get_drvdata(pdev);
8607 	struct bnx2 *bp = netdev_priv(dev);
8608 
8609 	if (!netif_running(dev))
8610 		return 0;
8611 
8612 	bnx2_set_power_state(bp, PCI_D0);
8613 	netif_device_attach(dev);
8614 	bnx2_request_irq(bp);
8615 	bnx2_init_nic(bp, 1);
8616 	bnx2_netif_start(bp, true);
8617 	return 0;
8618 }
8619 
8620 #ifdef CONFIG_PM_SLEEP
8621 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8622 #define BNX2_PM_OPS (&bnx2_pm_ops)
8623 
8624 #else
8625 
8626 #define BNX2_PM_OPS NULL
8627 
8628 #endif /* CONFIG_PM_SLEEP */
8629 /**
8630  * bnx2_io_error_detected - called when PCI error is detected
8631  * @pdev: Pointer to PCI device
8632  * @state: The current pci connection state
8633  *
8634  * This function is called after a PCI bus error affecting
8635  * this device has been detected.
8636  */
8637 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8638 					       pci_channel_state_t state)
8639 {
8640 	struct net_device *dev = pci_get_drvdata(pdev);
8641 	struct bnx2 *bp = netdev_priv(dev);
8642 
8643 	rtnl_lock();
8644 	netif_device_detach(dev);
8645 
8646 	if (state == pci_channel_io_perm_failure) {
8647 		rtnl_unlock();
8648 		return PCI_ERS_RESULT_DISCONNECT;
8649 	}
8650 
8651 	if (netif_running(dev)) {
8652 		bnx2_netif_stop(bp, true);
8653 		del_timer_sync(&bp->timer);
8654 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8655 	}
8656 
8657 	pci_disable_device(pdev);
8658 	rtnl_unlock();
8659 
8660 	/* Request a slot slot reset. */
8661 	return PCI_ERS_RESULT_NEED_RESET;
8662 }
8663 
8664 /**
8665  * bnx2_io_slot_reset - called after the pci bus has been reset.
8666  * @pdev: Pointer to PCI device
8667  *
8668  * Restart the card from scratch, as if from a cold-boot.
8669  */
8670 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8671 {
8672 	struct net_device *dev = pci_get_drvdata(pdev);
8673 	struct bnx2 *bp = netdev_priv(dev);
8674 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8675 	int err = 0;
8676 
8677 	rtnl_lock();
8678 	if (pci_enable_device(pdev)) {
8679 		dev_err(&pdev->dev,
8680 			"Cannot re-enable PCI device after reset\n");
8681 	} else {
8682 		pci_set_master(pdev);
8683 		pci_restore_state(pdev);
8684 		pci_save_state(pdev);
8685 
8686 		if (netif_running(dev))
8687 			err = bnx2_init_nic(bp, 1);
8688 
8689 		if (!err)
8690 			result = PCI_ERS_RESULT_RECOVERED;
8691 	}
8692 
8693 	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8694 		bnx2_napi_enable(bp);
8695 		dev_close(dev);
8696 	}
8697 	rtnl_unlock();
8698 
8699 	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8700 		return result;
8701 
8702 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8703 	if (err) {
8704 		dev_err(&pdev->dev,
8705 			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8706 			 err); /* non-fatal, continue */
8707 	}
8708 
8709 	return result;
8710 }
8711 
8712 /**
8713  * bnx2_io_resume - called when traffic can start flowing again.
8714  * @pdev: Pointer to PCI device
8715  *
8716  * This callback is called when the error recovery driver tells us that
8717  * its OK to resume normal operation.
8718  */
8719 static void bnx2_io_resume(struct pci_dev *pdev)
8720 {
8721 	struct net_device *dev = pci_get_drvdata(pdev);
8722 	struct bnx2 *bp = netdev_priv(dev);
8723 
8724 	rtnl_lock();
8725 	if (netif_running(dev))
8726 		bnx2_netif_start(bp, true);
8727 
8728 	netif_device_attach(dev);
8729 	rtnl_unlock();
8730 }
8731 
8732 static void bnx2_shutdown(struct pci_dev *pdev)
8733 {
8734 	struct net_device *dev = pci_get_drvdata(pdev);
8735 	struct bnx2 *bp;
8736 
8737 	if (!dev)
8738 		return;
8739 
8740 	bp = netdev_priv(dev);
8741 	if (!bp)
8742 		return;
8743 
8744 	rtnl_lock();
8745 	if (netif_running(dev))
8746 		dev_close(bp->dev);
8747 
8748 	if (system_state == SYSTEM_POWER_OFF)
8749 		bnx2_set_power_state(bp, PCI_D3hot);
8750 
8751 	rtnl_unlock();
8752 }
8753 
8754 static const struct pci_error_handlers bnx2_err_handler = {
8755 	.error_detected	= bnx2_io_error_detected,
8756 	.slot_reset	= bnx2_io_slot_reset,
8757 	.resume		= bnx2_io_resume,
8758 };
8759 
8760 static struct pci_driver bnx2_pci_driver = {
8761 	.name		= DRV_MODULE_NAME,
8762 	.id_table	= bnx2_pci_tbl,
8763 	.probe		= bnx2_init_one,
8764 	.remove		= bnx2_remove_one,
8765 	.driver.pm	= BNX2_PM_OPS,
8766 	.err_handler	= &bnx2_err_handler,
8767 	.shutdown	= bnx2_shutdown,
8768 };
8769 
8770 module_pci_driver(bnx2_pci_driver);
8771