xref: /linux/drivers/net/ethernet/broadcom/bnx2.c (revision 2ba9268dd603d23e17643437b2246acb6844953b)
1 /* bnx2.c: QLogic NX2 network driver.
2  *
3  * Copyright (c) 2004-2014 Broadcom Corporation
4  * Copyright (c) 2014 QLogic Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Michael Chan  (mchan@broadcom.com)
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59 
60 #define DRV_MODULE_NAME		"bnx2"
61 #define DRV_MODULE_VERSION	"2.2.5"
62 #define DRV_MODULE_RELDATE	"December 20, 2013"
63 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
64 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
65 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
66 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
68 
69 #define RUN_AT(x) (jiffies + (x))
70 
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73 
74 static char version[] =
75 	"QLogic NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86 
87 static int disable_msi = 0;
88 
89 module_param(disable_msi, int, S_IRUGO);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91 
92 typedef enum {
93 	BCM5706 = 0,
94 	NC370T,
95 	NC370I,
96 	BCM5706S,
97 	NC370F,
98 	BCM5708,
99 	BCM5708S,
100 	BCM5709,
101 	BCM5709S,
102 	BCM5716,
103 	BCM5716S,
104 } board_t;
105 
106 /* indexed by board_t, above */
107 static struct {
108 	char *name;
109 } board_info[] = {
110 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
112 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
113 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
115 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121 	};
122 
123 static const struct pci_device_id bnx2_pci_tbl[] = {
124 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
143 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
145 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 	{ 0, }
147 };
148 
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
153 	/* Slow EEPROM */
154 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 	 "EEPROM - slow"},
158 	/* Expansion entry 0001 */
159 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 	 "Entry 0001"},
163 	/* Saifun SA25F010 (non-buffered flash) */
164 	/* strap, cfg1, & write1 need updates */
165 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 	 "Non-buffered flash (128kB)"},
169 	/* Saifun SA25F020 (non-buffered flash) */
170 	/* strap, cfg1, & write1 need updates */
171 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 	 "Non-buffered flash (256kB)"},
175 	/* Expansion entry 0100 */
176 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 	 "Entry 0100"},
180 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 	/* Saifun SA25F005 (non-buffered flash) */
191 	/* strap, cfg1, & write1 need updates */
192 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 	 "Non-buffered flash (64kB)"},
196 	/* Fast EEPROM */
197 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 	 "EEPROM - fast"},
201 	/* Expansion entry 1001 */
202 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 	 "Entry 1001"},
206 	/* Expansion entry 1010 */
207 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 	 "Entry 1010"},
211 	/* ATMEL AT45DB011B (buffered flash) */
212 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 	 "Buffered flash (128kB)"},
216 	/* Expansion entry 1100 */
217 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 	 "Entry 1100"},
221 	/* Expansion entry 1101 */
222 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 	 "Entry 1101"},
226 	/* Ateml Expansion entry 1110 */
227 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 	 "Entry 1110 (Atmel)"},
231 	/* ATMEL AT45DB021B (buffered flash) */
232 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 	 "Buffered flash (256kB)"},
236 };
237 
238 static const struct flash_spec flash_5709 = {
239 	.flags		= BNX2_NV_BUFFERED,
240 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
241 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
242 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
243 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
244 	.name		= "5709 Buffered flash (256kB)",
245 };
246 
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251 
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254 	u32 diff;
255 
256 	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
257 	barrier();
258 
259 	/* The ring uses 256 indices for 255 entries, one of them
260 	 * needs to be skipped.
261 	 */
262 	diff = txr->tx_prod - txr->tx_cons;
263 	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
264 		diff &= 0xffff;
265 		if (diff == BNX2_TX_DESC_CNT)
266 			diff = BNX2_MAX_TX_DESC_CNT;
267 	}
268 	return bp->tx_ring_size - diff;
269 }
270 
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 {
274 	u32 val;
275 
276 	spin_lock_bh(&bp->indirect_lock);
277 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278 	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
279 	spin_unlock_bh(&bp->indirect_lock);
280 	return val;
281 }
282 
283 static void
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 {
286 	spin_lock_bh(&bp->indirect_lock);
287 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289 	spin_unlock_bh(&bp->indirect_lock);
290 }
291 
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 {
295 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 }
297 
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 {
301 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 }
303 
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 {
307 	offset += cid_addr;
308 	spin_lock_bh(&bp->indirect_lock);
309 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
310 		int i;
311 
312 		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
313 		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
314 			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315 		for (i = 0; i < 5; i++) {
316 			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
317 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318 				break;
319 			udelay(5);
320 		}
321 	} else {
322 		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323 		BNX2_WR(bp, BNX2_CTX_DATA, val);
324 	}
325 	spin_unlock_bh(&bp->indirect_lock);
326 }
327 
328 #ifdef BCM_CNIC
329 static int
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 {
332 	struct bnx2 *bp = netdev_priv(dev);
333 	struct drv_ctl_io *io = &info->data.io;
334 
335 	switch (info->cmd) {
336 	case DRV_CTL_IO_WR_CMD:
337 		bnx2_reg_wr_ind(bp, io->offset, io->data);
338 		break;
339 	case DRV_CTL_IO_RD_CMD:
340 		io->data = bnx2_reg_rd_ind(bp, io->offset);
341 		break;
342 	case DRV_CTL_CTX_WR_CMD:
343 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344 		break;
345 	default:
346 		return -EINVAL;
347 	}
348 	return 0;
349 }
350 
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 {
353 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355 	int sb_id;
356 
357 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
358 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359 		bnapi->cnic_present = 0;
360 		sb_id = bp->irq_nvecs;
361 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362 	} else {
363 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364 		bnapi->cnic_tag = bnapi->last_status_idx;
365 		bnapi->cnic_present = 1;
366 		sb_id = 0;
367 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368 	}
369 
370 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371 	cp->irq_arr[0].status_blk = (void *)
372 		((unsigned long) bnapi->status_blk.msi +
373 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374 	cp->irq_arr[0].status_blk_num = sb_id;
375 	cp->num_irq = 1;
376 }
377 
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379 			      void *data)
380 {
381 	struct bnx2 *bp = netdev_priv(dev);
382 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383 
384 	if (ops == NULL)
385 		return -EINVAL;
386 
387 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
388 		return -EBUSY;
389 
390 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
391 		return -ENODEV;
392 
393 	bp->cnic_data = data;
394 	rcu_assign_pointer(bp->cnic_ops, ops);
395 
396 	cp->num_irq = 0;
397 	cp->drv_state = CNIC_DRV_STATE_REGD;
398 
399 	bnx2_setup_cnic_irq_info(bp);
400 
401 	return 0;
402 }
403 
404 static int bnx2_unregister_cnic(struct net_device *dev)
405 {
406 	struct bnx2 *bp = netdev_priv(dev);
407 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409 
410 	mutex_lock(&bp->cnic_lock);
411 	cp->drv_state = 0;
412 	bnapi->cnic_present = 0;
413 	RCU_INIT_POINTER(bp->cnic_ops, NULL);
414 	mutex_unlock(&bp->cnic_lock);
415 	synchronize_rcu();
416 	return 0;
417 }
418 
419 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 {
421 	struct bnx2 *bp = netdev_priv(dev);
422 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423 
424 	if (!cp->max_iscsi_conn)
425 		return NULL;
426 
427 	cp->drv_owner = THIS_MODULE;
428 	cp->chip_id = bp->chip_id;
429 	cp->pdev = bp->pdev;
430 	cp->io_base = bp->regview;
431 	cp->drv_ctl = bnx2_drv_ctl;
432 	cp->drv_register_cnic = bnx2_register_cnic;
433 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
434 
435 	return cp;
436 }
437 
438 static void
439 bnx2_cnic_stop(struct bnx2 *bp)
440 {
441 	struct cnic_ops *c_ops;
442 	struct cnic_ctl_info info;
443 
444 	mutex_lock(&bp->cnic_lock);
445 	c_ops = rcu_dereference_protected(bp->cnic_ops,
446 					  lockdep_is_held(&bp->cnic_lock));
447 	if (c_ops) {
448 		info.cmd = CNIC_CTL_STOP_CMD;
449 		c_ops->cnic_ctl(bp->cnic_data, &info);
450 	}
451 	mutex_unlock(&bp->cnic_lock);
452 }
453 
454 static void
455 bnx2_cnic_start(struct bnx2 *bp)
456 {
457 	struct cnic_ops *c_ops;
458 	struct cnic_ctl_info info;
459 
460 	mutex_lock(&bp->cnic_lock);
461 	c_ops = rcu_dereference_protected(bp->cnic_ops,
462 					  lockdep_is_held(&bp->cnic_lock));
463 	if (c_ops) {
464 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466 
467 			bnapi->cnic_tag = bnapi->last_status_idx;
468 		}
469 		info.cmd = CNIC_CTL_START_CMD;
470 		c_ops->cnic_ctl(bp->cnic_data, &info);
471 	}
472 	mutex_unlock(&bp->cnic_lock);
473 }
474 
475 #else
476 
477 static void
478 bnx2_cnic_stop(struct bnx2 *bp)
479 {
480 }
481 
482 static void
483 bnx2_cnic_start(struct bnx2 *bp)
484 {
485 }
486 
487 #endif
488 
489 static int
490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491 {
492 	u32 val1;
493 	int i, ret;
494 
495 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498 
499 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
501 
502 		udelay(40);
503 	}
504 
505 	val1 = (bp->phy_addr << 21) | (reg << 16) |
506 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507 		BNX2_EMAC_MDIO_COMM_START_BUSY;
508 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509 
510 	for (i = 0; i < 50; i++) {
511 		udelay(10);
512 
513 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515 			udelay(5);
516 
517 			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
518 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519 
520 			break;
521 		}
522 	}
523 
524 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525 		*val = 0x0;
526 		ret = -EBUSY;
527 	}
528 	else {
529 		*val = val1;
530 		ret = 0;
531 	}
532 
533 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536 
537 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
539 
540 		udelay(40);
541 	}
542 
543 	return ret;
544 }
545 
546 static int
547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548 {
549 	u32 val1;
550 	int i, ret;
551 
552 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555 
556 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
558 
559 		udelay(40);
560 	}
561 
562 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566 
567 	for (i = 0; i < 50; i++) {
568 		udelay(10);
569 
570 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
571 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572 			udelay(5);
573 			break;
574 		}
575 	}
576 
577 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578         	ret = -EBUSY;
579 	else
580 		ret = 0;
581 
582 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585 
586 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
588 
589 		udelay(40);
590 	}
591 
592 	return ret;
593 }
594 
595 static void
596 bnx2_disable_int(struct bnx2 *bp)
597 {
598 	int i;
599 	struct bnx2_napi *bnapi;
600 
601 	for (i = 0; i < bp->irq_nvecs; i++) {
602 		bnapi = &bp->bnx2_napi[i];
603 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605 	}
606 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607 }
608 
609 static void
610 bnx2_enable_int(struct bnx2 *bp)
611 {
612 	int i;
613 	struct bnx2_napi *bnapi;
614 
615 	for (i = 0; i < bp->irq_nvecs; i++) {
616 		bnapi = &bp->bnx2_napi[i];
617 
618 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620 			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621 			bnapi->last_status_idx);
622 
623 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625 			bnapi->last_status_idx);
626 	}
627 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628 }
629 
630 static void
631 bnx2_disable_int_sync(struct bnx2 *bp)
632 {
633 	int i;
634 
635 	atomic_inc(&bp->intr_sem);
636 	if (!netif_running(bp->dev))
637 		return;
638 
639 	bnx2_disable_int(bp);
640 	for (i = 0; i < bp->irq_nvecs; i++)
641 		synchronize_irq(bp->irq_tbl[i].vector);
642 }
643 
644 static void
645 bnx2_napi_disable(struct bnx2 *bp)
646 {
647 	int i;
648 
649 	for (i = 0; i < bp->irq_nvecs; i++)
650 		napi_disable(&bp->bnx2_napi[i].napi);
651 }
652 
653 static void
654 bnx2_napi_enable(struct bnx2 *bp)
655 {
656 	int i;
657 
658 	for (i = 0; i < bp->irq_nvecs; i++)
659 		napi_enable(&bp->bnx2_napi[i].napi);
660 }
661 
662 static void
663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664 {
665 	if (stop_cnic)
666 		bnx2_cnic_stop(bp);
667 	if (netif_running(bp->dev)) {
668 		bnx2_napi_disable(bp);
669 		netif_tx_disable(bp->dev);
670 	}
671 	bnx2_disable_int_sync(bp);
672 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
673 }
674 
675 static void
676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 {
678 	if (atomic_dec_and_test(&bp->intr_sem)) {
679 		if (netif_running(bp->dev)) {
680 			netif_tx_wake_all_queues(bp->dev);
681 			spin_lock_bh(&bp->phy_lock);
682 			if (bp->link_up)
683 				netif_carrier_on(bp->dev);
684 			spin_unlock_bh(&bp->phy_lock);
685 			bnx2_napi_enable(bp);
686 			bnx2_enable_int(bp);
687 			if (start_cnic)
688 				bnx2_cnic_start(bp);
689 		}
690 	}
691 }
692 
693 static void
694 bnx2_free_tx_mem(struct bnx2 *bp)
695 {
696 	int i;
697 
698 	for (i = 0; i < bp->num_tx_rings; i++) {
699 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701 
702 		if (txr->tx_desc_ring) {
703 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704 					  txr->tx_desc_ring,
705 					  txr->tx_desc_mapping);
706 			txr->tx_desc_ring = NULL;
707 		}
708 		kfree(txr->tx_buf_ring);
709 		txr->tx_buf_ring = NULL;
710 	}
711 }
712 
713 static void
714 bnx2_free_rx_mem(struct bnx2 *bp)
715 {
716 	int i;
717 
718 	for (i = 0; i < bp->num_rx_rings; i++) {
719 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721 		int j;
722 
723 		for (j = 0; j < bp->rx_max_ring; j++) {
724 			if (rxr->rx_desc_ring[j])
725 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726 						  rxr->rx_desc_ring[j],
727 						  rxr->rx_desc_mapping[j]);
728 			rxr->rx_desc_ring[j] = NULL;
729 		}
730 		vfree(rxr->rx_buf_ring);
731 		rxr->rx_buf_ring = NULL;
732 
733 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
734 			if (rxr->rx_pg_desc_ring[j])
735 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736 						  rxr->rx_pg_desc_ring[j],
737 						  rxr->rx_pg_desc_mapping[j]);
738 			rxr->rx_pg_desc_ring[j] = NULL;
739 		}
740 		vfree(rxr->rx_pg_ring);
741 		rxr->rx_pg_ring = NULL;
742 	}
743 }
744 
745 static int
746 bnx2_alloc_tx_mem(struct bnx2 *bp)
747 {
748 	int i;
749 
750 	for (i = 0; i < bp->num_tx_rings; i++) {
751 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753 
754 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755 		if (txr->tx_buf_ring == NULL)
756 			return -ENOMEM;
757 
758 		txr->tx_desc_ring =
759 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760 					   &txr->tx_desc_mapping, GFP_KERNEL);
761 		if (txr->tx_desc_ring == NULL)
762 			return -ENOMEM;
763 	}
764 	return 0;
765 }
766 
767 static int
768 bnx2_alloc_rx_mem(struct bnx2 *bp)
769 {
770 	int i;
771 
772 	for (i = 0; i < bp->num_rx_rings; i++) {
773 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775 		int j;
776 
777 		rxr->rx_buf_ring =
778 			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779 		if (rxr->rx_buf_ring == NULL)
780 			return -ENOMEM;
781 
782 		for (j = 0; j < bp->rx_max_ring; j++) {
783 			rxr->rx_desc_ring[j] =
784 				dma_alloc_coherent(&bp->pdev->dev,
785 						   RXBD_RING_SIZE,
786 						   &rxr->rx_desc_mapping[j],
787 						   GFP_KERNEL);
788 			if (rxr->rx_desc_ring[j] == NULL)
789 				return -ENOMEM;
790 
791 		}
792 
793 		if (bp->rx_pg_ring_size) {
794 			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795 						  bp->rx_max_pg_ring);
796 			if (rxr->rx_pg_ring == NULL)
797 				return -ENOMEM;
798 
799 		}
800 
801 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
802 			rxr->rx_pg_desc_ring[j] =
803 				dma_alloc_coherent(&bp->pdev->dev,
804 						   RXBD_RING_SIZE,
805 						   &rxr->rx_pg_desc_mapping[j],
806 						   GFP_KERNEL);
807 			if (rxr->rx_pg_desc_ring[j] == NULL)
808 				return -ENOMEM;
809 
810 		}
811 	}
812 	return 0;
813 }
814 
815 static void
816 bnx2_free_mem(struct bnx2 *bp)
817 {
818 	int i;
819 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820 
821 	bnx2_free_tx_mem(bp);
822 	bnx2_free_rx_mem(bp);
823 
824 	for (i = 0; i < bp->ctx_pages; i++) {
825 		if (bp->ctx_blk[i]) {
826 			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
827 					  bp->ctx_blk[i],
828 					  bp->ctx_blk_mapping[i]);
829 			bp->ctx_blk[i] = NULL;
830 		}
831 	}
832 	if (bnapi->status_blk.msi) {
833 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834 				  bnapi->status_blk.msi,
835 				  bp->status_blk_mapping);
836 		bnapi->status_blk.msi = NULL;
837 		bp->stats_blk = NULL;
838 	}
839 }
840 
841 static int
842 bnx2_alloc_mem(struct bnx2 *bp)
843 {
844 	int i, status_blk_size, err;
845 	struct bnx2_napi *bnapi;
846 	void *status_blk;
847 
848 	/* Combine status and statistics blocks into one allocation. */
849 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
851 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
853 	bp->status_stats_size = status_blk_size +
854 				sizeof(struct statistics_block);
855 
856 	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 					 &bp->status_blk_mapping, GFP_KERNEL);
858 	if (status_blk == NULL)
859 		goto alloc_mem_err;
860 
861 	bnapi = &bp->bnx2_napi[0];
862 	bnapi->status_blk.msi = status_blk;
863 	bnapi->hw_tx_cons_ptr =
864 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
865 	bnapi->hw_rx_cons_ptr =
866 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
867 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
868 		for (i = 1; i < bp->irq_nvecs; i++) {
869 			struct status_block_msix *sblk;
870 
871 			bnapi = &bp->bnx2_napi[i];
872 
873 			sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
874 			bnapi->status_blk.msix = sblk;
875 			bnapi->hw_tx_cons_ptr =
876 				&sblk->status_tx_quick_consumer_index;
877 			bnapi->hw_rx_cons_ptr =
878 				&sblk->status_rx_quick_consumer_index;
879 			bnapi->int_num = i << 24;
880 		}
881 	}
882 
883 	bp->stats_blk = status_blk + status_blk_size;
884 
885 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
886 
887 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
888 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
889 		if (bp->ctx_pages == 0)
890 			bp->ctx_pages = 1;
891 		for (i = 0; i < bp->ctx_pages; i++) {
892 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
893 						BNX2_PAGE_SIZE,
894 						&bp->ctx_blk_mapping[i],
895 						GFP_KERNEL);
896 			if (bp->ctx_blk[i] == NULL)
897 				goto alloc_mem_err;
898 		}
899 	}
900 
901 	err = bnx2_alloc_rx_mem(bp);
902 	if (err)
903 		goto alloc_mem_err;
904 
905 	err = bnx2_alloc_tx_mem(bp);
906 	if (err)
907 		goto alloc_mem_err;
908 
909 	return 0;
910 
911 alloc_mem_err:
912 	bnx2_free_mem(bp);
913 	return -ENOMEM;
914 }
915 
916 static void
917 bnx2_report_fw_link(struct bnx2 *bp)
918 {
919 	u32 fw_link_status = 0;
920 
921 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
922 		return;
923 
924 	if (bp->link_up) {
925 		u32 bmsr;
926 
927 		switch (bp->line_speed) {
928 		case SPEED_10:
929 			if (bp->duplex == DUPLEX_HALF)
930 				fw_link_status = BNX2_LINK_STATUS_10HALF;
931 			else
932 				fw_link_status = BNX2_LINK_STATUS_10FULL;
933 			break;
934 		case SPEED_100:
935 			if (bp->duplex == DUPLEX_HALF)
936 				fw_link_status = BNX2_LINK_STATUS_100HALF;
937 			else
938 				fw_link_status = BNX2_LINK_STATUS_100FULL;
939 			break;
940 		case SPEED_1000:
941 			if (bp->duplex == DUPLEX_HALF)
942 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
943 			else
944 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
945 			break;
946 		case SPEED_2500:
947 			if (bp->duplex == DUPLEX_HALF)
948 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
949 			else
950 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
951 			break;
952 		}
953 
954 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
955 
956 		if (bp->autoneg) {
957 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
958 
959 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
960 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
961 
962 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
963 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
964 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
965 			else
966 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
967 		}
968 	}
969 	else
970 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
971 
972 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
973 }
974 
975 static char *
976 bnx2_xceiver_str(struct bnx2 *bp)
977 {
978 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
979 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
980 		 "Copper");
981 }
982 
983 static void
984 bnx2_report_link(struct bnx2 *bp)
985 {
986 	if (bp->link_up) {
987 		netif_carrier_on(bp->dev);
988 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
989 			    bnx2_xceiver_str(bp),
990 			    bp->line_speed,
991 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
992 
993 		if (bp->flow_ctrl) {
994 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
995 				pr_cont(", receive ");
996 				if (bp->flow_ctrl & FLOW_CTRL_TX)
997 					pr_cont("& transmit ");
998 			}
999 			else {
1000 				pr_cont(", transmit ");
1001 			}
1002 			pr_cont("flow control ON");
1003 		}
1004 		pr_cont("\n");
1005 	} else {
1006 		netif_carrier_off(bp->dev);
1007 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1008 			   bnx2_xceiver_str(bp));
1009 	}
1010 
1011 	bnx2_report_fw_link(bp);
1012 }
1013 
1014 static void
1015 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1016 {
1017 	u32 local_adv, remote_adv;
1018 
1019 	bp->flow_ctrl = 0;
1020 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1021 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1022 
1023 		if (bp->duplex == DUPLEX_FULL) {
1024 			bp->flow_ctrl = bp->req_flow_ctrl;
1025 		}
1026 		return;
1027 	}
1028 
1029 	if (bp->duplex != DUPLEX_FULL) {
1030 		return;
1031 	}
1032 
1033 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1034 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1035 		u32 val;
1036 
1037 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1038 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1039 			bp->flow_ctrl |= FLOW_CTRL_TX;
1040 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1041 			bp->flow_ctrl |= FLOW_CTRL_RX;
1042 		return;
1043 	}
1044 
1045 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1046 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1047 
1048 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1049 		u32 new_local_adv = 0;
1050 		u32 new_remote_adv = 0;
1051 
1052 		if (local_adv & ADVERTISE_1000XPAUSE)
1053 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1054 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1055 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1056 		if (remote_adv & ADVERTISE_1000XPAUSE)
1057 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1058 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1059 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1060 
1061 		local_adv = new_local_adv;
1062 		remote_adv = new_remote_adv;
1063 	}
1064 
1065 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1066 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1067 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1068 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1069 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1070 			}
1071 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1072 				bp->flow_ctrl = FLOW_CTRL_RX;
1073 			}
1074 		}
1075 		else {
1076 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1077 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1078 			}
1079 		}
1080 	}
1081 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1082 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1083 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1084 
1085 			bp->flow_ctrl = FLOW_CTRL_TX;
1086 		}
1087 	}
1088 }
1089 
1090 static int
1091 bnx2_5709s_linkup(struct bnx2 *bp)
1092 {
1093 	u32 val, speed;
1094 
1095 	bp->link_up = 1;
1096 
1097 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1098 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1099 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1100 
1101 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1102 		bp->line_speed = bp->req_line_speed;
1103 		bp->duplex = bp->req_duplex;
1104 		return 0;
1105 	}
1106 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1107 	switch (speed) {
1108 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1109 			bp->line_speed = SPEED_10;
1110 			break;
1111 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1112 			bp->line_speed = SPEED_100;
1113 			break;
1114 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1115 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1116 			bp->line_speed = SPEED_1000;
1117 			break;
1118 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1119 			bp->line_speed = SPEED_2500;
1120 			break;
1121 	}
1122 	if (val & MII_BNX2_GP_TOP_AN_FD)
1123 		bp->duplex = DUPLEX_FULL;
1124 	else
1125 		bp->duplex = DUPLEX_HALF;
1126 	return 0;
1127 }
1128 
1129 static int
1130 bnx2_5708s_linkup(struct bnx2 *bp)
1131 {
1132 	u32 val;
1133 
1134 	bp->link_up = 1;
1135 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1136 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1137 		case BCM5708S_1000X_STAT1_SPEED_10:
1138 			bp->line_speed = SPEED_10;
1139 			break;
1140 		case BCM5708S_1000X_STAT1_SPEED_100:
1141 			bp->line_speed = SPEED_100;
1142 			break;
1143 		case BCM5708S_1000X_STAT1_SPEED_1G:
1144 			bp->line_speed = SPEED_1000;
1145 			break;
1146 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1147 			bp->line_speed = SPEED_2500;
1148 			break;
1149 	}
1150 	if (val & BCM5708S_1000X_STAT1_FD)
1151 		bp->duplex = DUPLEX_FULL;
1152 	else
1153 		bp->duplex = DUPLEX_HALF;
1154 
1155 	return 0;
1156 }
1157 
1158 static int
1159 bnx2_5706s_linkup(struct bnx2 *bp)
1160 {
1161 	u32 bmcr, local_adv, remote_adv, common;
1162 
1163 	bp->link_up = 1;
1164 	bp->line_speed = SPEED_1000;
1165 
1166 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1167 	if (bmcr & BMCR_FULLDPLX) {
1168 		bp->duplex = DUPLEX_FULL;
1169 	}
1170 	else {
1171 		bp->duplex = DUPLEX_HALF;
1172 	}
1173 
1174 	if (!(bmcr & BMCR_ANENABLE)) {
1175 		return 0;
1176 	}
1177 
1178 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1179 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1180 
1181 	common = local_adv & remote_adv;
1182 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1183 
1184 		if (common & ADVERTISE_1000XFULL) {
1185 			bp->duplex = DUPLEX_FULL;
1186 		}
1187 		else {
1188 			bp->duplex = DUPLEX_HALF;
1189 		}
1190 	}
1191 
1192 	return 0;
1193 }
1194 
1195 static int
1196 bnx2_copper_linkup(struct bnx2 *bp)
1197 {
1198 	u32 bmcr;
1199 
1200 	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1201 
1202 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1203 	if (bmcr & BMCR_ANENABLE) {
1204 		u32 local_adv, remote_adv, common;
1205 
1206 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1207 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1208 
1209 		common = local_adv & (remote_adv >> 2);
1210 		if (common & ADVERTISE_1000FULL) {
1211 			bp->line_speed = SPEED_1000;
1212 			bp->duplex = DUPLEX_FULL;
1213 		}
1214 		else if (common & ADVERTISE_1000HALF) {
1215 			bp->line_speed = SPEED_1000;
1216 			bp->duplex = DUPLEX_HALF;
1217 		}
1218 		else {
1219 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1220 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1221 
1222 			common = local_adv & remote_adv;
1223 			if (common & ADVERTISE_100FULL) {
1224 				bp->line_speed = SPEED_100;
1225 				bp->duplex = DUPLEX_FULL;
1226 			}
1227 			else if (common & ADVERTISE_100HALF) {
1228 				bp->line_speed = SPEED_100;
1229 				bp->duplex = DUPLEX_HALF;
1230 			}
1231 			else if (common & ADVERTISE_10FULL) {
1232 				bp->line_speed = SPEED_10;
1233 				bp->duplex = DUPLEX_FULL;
1234 			}
1235 			else if (common & ADVERTISE_10HALF) {
1236 				bp->line_speed = SPEED_10;
1237 				bp->duplex = DUPLEX_HALF;
1238 			}
1239 			else {
1240 				bp->line_speed = 0;
1241 				bp->link_up = 0;
1242 			}
1243 		}
1244 	}
1245 	else {
1246 		if (bmcr & BMCR_SPEED100) {
1247 			bp->line_speed = SPEED_100;
1248 		}
1249 		else {
1250 			bp->line_speed = SPEED_10;
1251 		}
1252 		if (bmcr & BMCR_FULLDPLX) {
1253 			bp->duplex = DUPLEX_FULL;
1254 		}
1255 		else {
1256 			bp->duplex = DUPLEX_HALF;
1257 		}
1258 	}
1259 
1260 	if (bp->link_up) {
1261 		u32 ext_status;
1262 
1263 		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1264 		if (ext_status & EXT_STATUS_MDIX)
1265 			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1266 	}
1267 
1268 	return 0;
1269 }
1270 
1271 static void
1272 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1273 {
1274 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1275 
1276 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1277 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1278 	val |= 0x02 << 8;
1279 
1280 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1281 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1282 
1283 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1284 }
1285 
1286 static void
1287 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1288 {
1289 	int i;
1290 	u32 cid;
1291 
1292 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1293 		if (i == 1)
1294 			cid = RX_RSS_CID;
1295 		bnx2_init_rx_context(bp, cid);
1296 	}
1297 }
1298 
1299 static void
1300 bnx2_set_mac_link(struct bnx2 *bp)
1301 {
1302 	u32 val;
1303 
1304 	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1305 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1306 		(bp->duplex == DUPLEX_HALF)) {
1307 		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1308 	}
1309 
1310 	/* Configure the EMAC mode register. */
1311 	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1312 
1313 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1314 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1315 		BNX2_EMAC_MODE_25G_MODE);
1316 
1317 	if (bp->link_up) {
1318 		switch (bp->line_speed) {
1319 			case SPEED_10:
1320 				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1321 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1322 					break;
1323 				}
1324 				/* fall through */
1325 			case SPEED_100:
1326 				val |= BNX2_EMAC_MODE_PORT_MII;
1327 				break;
1328 			case SPEED_2500:
1329 				val |= BNX2_EMAC_MODE_25G_MODE;
1330 				/* fall through */
1331 			case SPEED_1000:
1332 				val |= BNX2_EMAC_MODE_PORT_GMII;
1333 				break;
1334 		}
1335 	}
1336 	else {
1337 		val |= BNX2_EMAC_MODE_PORT_GMII;
1338 	}
1339 
1340 	/* Set the MAC to operate in the appropriate duplex mode. */
1341 	if (bp->duplex == DUPLEX_HALF)
1342 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1343 	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1344 
1345 	/* Enable/disable rx PAUSE. */
1346 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1347 
1348 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1349 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1350 	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1351 
1352 	/* Enable/disable tx PAUSE. */
1353 	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1354 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1355 
1356 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1357 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1358 	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1359 
1360 	/* Acknowledge the interrupt. */
1361 	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1362 
1363 	bnx2_init_all_rx_contexts(bp);
1364 }
1365 
1366 static void
1367 bnx2_enable_bmsr1(struct bnx2 *bp)
1368 {
1369 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1370 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1371 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1372 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1373 }
1374 
1375 static void
1376 bnx2_disable_bmsr1(struct bnx2 *bp)
1377 {
1378 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1379 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1380 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1381 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1382 }
1383 
1384 static int
1385 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1386 {
1387 	u32 up1;
1388 	int ret = 1;
1389 
1390 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1391 		return 0;
1392 
1393 	if (bp->autoneg & AUTONEG_SPEED)
1394 		bp->advertising |= ADVERTISED_2500baseX_Full;
1395 
1396 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1397 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1398 
1399 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1400 	if (!(up1 & BCM5708S_UP1_2G5)) {
1401 		up1 |= BCM5708S_UP1_2G5;
1402 		bnx2_write_phy(bp, bp->mii_up1, up1);
1403 		ret = 0;
1404 	}
1405 
1406 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1407 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1408 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1409 
1410 	return ret;
1411 }
1412 
1413 static int
1414 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1415 {
1416 	u32 up1;
1417 	int ret = 0;
1418 
1419 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1420 		return 0;
1421 
1422 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1423 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1424 
1425 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1426 	if (up1 & BCM5708S_UP1_2G5) {
1427 		up1 &= ~BCM5708S_UP1_2G5;
1428 		bnx2_write_phy(bp, bp->mii_up1, up1);
1429 		ret = 1;
1430 	}
1431 
1432 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1433 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1434 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1435 
1436 	return ret;
1437 }
1438 
1439 static void
1440 bnx2_enable_forced_2g5(struct bnx2 *bp)
1441 {
1442 	u32 uninitialized_var(bmcr);
1443 	int err;
1444 
1445 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1446 		return;
1447 
1448 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1449 		u32 val;
1450 
1451 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1452 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1453 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1454 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1455 			val |= MII_BNX2_SD_MISC1_FORCE |
1456 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1457 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1458 		}
1459 
1460 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1461 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1462 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1463 
1464 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1465 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1466 		if (!err)
1467 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1468 	} else {
1469 		return;
1470 	}
1471 
1472 	if (err)
1473 		return;
1474 
1475 	if (bp->autoneg & AUTONEG_SPEED) {
1476 		bmcr &= ~BMCR_ANENABLE;
1477 		if (bp->req_duplex == DUPLEX_FULL)
1478 			bmcr |= BMCR_FULLDPLX;
1479 	}
1480 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1481 }
1482 
1483 static void
1484 bnx2_disable_forced_2g5(struct bnx2 *bp)
1485 {
1486 	u32 uninitialized_var(bmcr);
1487 	int err;
1488 
1489 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1490 		return;
1491 
1492 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1493 		u32 val;
1494 
1495 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1496 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1497 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1498 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1499 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1500 		}
1501 
1502 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1503 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1504 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1505 
1506 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1507 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1508 		if (!err)
1509 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1510 	} else {
1511 		return;
1512 	}
1513 
1514 	if (err)
1515 		return;
1516 
1517 	if (bp->autoneg & AUTONEG_SPEED)
1518 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1519 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1520 }
1521 
1522 static void
1523 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1524 {
1525 	u32 val;
1526 
1527 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1528 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1529 	if (start)
1530 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1531 	else
1532 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1533 }
1534 
1535 static int
1536 bnx2_set_link(struct bnx2 *bp)
1537 {
1538 	u32 bmsr;
1539 	u8 link_up;
1540 
1541 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1542 		bp->link_up = 1;
1543 		return 0;
1544 	}
1545 
1546 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1547 		return 0;
1548 
1549 	link_up = bp->link_up;
1550 
1551 	bnx2_enable_bmsr1(bp);
1552 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1553 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1554 	bnx2_disable_bmsr1(bp);
1555 
1556 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1557 	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1558 		u32 val, an_dbg;
1559 
1560 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1561 			bnx2_5706s_force_link_dn(bp, 0);
1562 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1563 		}
1564 		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1565 
1566 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1567 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1568 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1569 
1570 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1571 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1572 			bmsr |= BMSR_LSTATUS;
1573 		else
1574 			bmsr &= ~BMSR_LSTATUS;
1575 	}
1576 
1577 	if (bmsr & BMSR_LSTATUS) {
1578 		bp->link_up = 1;
1579 
1580 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1581 			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1582 				bnx2_5706s_linkup(bp);
1583 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1584 				bnx2_5708s_linkup(bp);
1585 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1586 				bnx2_5709s_linkup(bp);
1587 		}
1588 		else {
1589 			bnx2_copper_linkup(bp);
1590 		}
1591 		bnx2_resolve_flow_ctrl(bp);
1592 	}
1593 	else {
1594 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1595 		    (bp->autoneg & AUTONEG_SPEED))
1596 			bnx2_disable_forced_2g5(bp);
1597 
1598 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1599 			u32 bmcr;
1600 
1601 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1602 			bmcr |= BMCR_ANENABLE;
1603 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1604 
1605 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1606 		}
1607 		bp->link_up = 0;
1608 	}
1609 
1610 	if (bp->link_up != link_up) {
1611 		bnx2_report_link(bp);
1612 	}
1613 
1614 	bnx2_set_mac_link(bp);
1615 
1616 	return 0;
1617 }
1618 
1619 static int
1620 bnx2_reset_phy(struct bnx2 *bp)
1621 {
1622 	int i;
1623 	u32 reg;
1624 
1625         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1626 
1627 #define PHY_RESET_MAX_WAIT 100
1628 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1629 		udelay(10);
1630 
1631 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1632 		if (!(reg & BMCR_RESET)) {
1633 			udelay(20);
1634 			break;
1635 		}
1636 	}
1637 	if (i == PHY_RESET_MAX_WAIT) {
1638 		return -EBUSY;
1639 	}
1640 	return 0;
1641 }
1642 
1643 static u32
1644 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1645 {
1646 	u32 adv = 0;
1647 
1648 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1649 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1650 
1651 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652 			adv = ADVERTISE_1000XPAUSE;
1653 		}
1654 		else {
1655 			adv = ADVERTISE_PAUSE_CAP;
1656 		}
1657 	}
1658 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1659 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1660 			adv = ADVERTISE_1000XPSE_ASYM;
1661 		}
1662 		else {
1663 			adv = ADVERTISE_PAUSE_ASYM;
1664 		}
1665 	}
1666 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1667 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1668 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1669 		}
1670 		else {
1671 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1672 		}
1673 	}
1674 	return adv;
1675 }
1676 
1677 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1678 
1679 static int
1680 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1681 __releases(&bp->phy_lock)
1682 __acquires(&bp->phy_lock)
1683 {
1684 	u32 speed_arg = 0, pause_adv;
1685 
1686 	pause_adv = bnx2_phy_get_pause_adv(bp);
1687 
1688 	if (bp->autoneg & AUTONEG_SPEED) {
1689 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1690 		if (bp->advertising & ADVERTISED_10baseT_Half)
1691 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1692 		if (bp->advertising & ADVERTISED_10baseT_Full)
1693 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1694 		if (bp->advertising & ADVERTISED_100baseT_Half)
1695 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1696 		if (bp->advertising & ADVERTISED_100baseT_Full)
1697 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1698 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1699 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1700 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1701 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1702 	} else {
1703 		if (bp->req_line_speed == SPEED_2500)
1704 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1705 		else if (bp->req_line_speed == SPEED_1000)
1706 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1707 		else if (bp->req_line_speed == SPEED_100) {
1708 			if (bp->req_duplex == DUPLEX_FULL)
1709 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1710 			else
1711 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1712 		} else if (bp->req_line_speed == SPEED_10) {
1713 			if (bp->req_duplex == DUPLEX_FULL)
1714 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1715 			else
1716 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1717 		}
1718 	}
1719 
1720 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1721 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1722 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1723 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1724 
1725 	if (port == PORT_TP)
1726 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1727 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1728 
1729 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1730 
1731 	spin_unlock_bh(&bp->phy_lock);
1732 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1733 	spin_lock_bh(&bp->phy_lock);
1734 
1735 	return 0;
1736 }
1737 
1738 static int
1739 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1740 __releases(&bp->phy_lock)
1741 __acquires(&bp->phy_lock)
1742 {
1743 	u32 adv, bmcr;
1744 	u32 new_adv = 0;
1745 
1746 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1747 		return bnx2_setup_remote_phy(bp, port);
1748 
1749 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1750 		u32 new_bmcr;
1751 		int force_link_down = 0;
1752 
1753 		if (bp->req_line_speed == SPEED_2500) {
1754 			if (!bnx2_test_and_enable_2g5(bp))
1755 				force_link_down = 1;
1756 		} else if (bp->req_line_speed == SPEED_1000) {
1757 			if (bnx2_test_and_disable_2g5(bp))
1758 				force_link_down = 1;
1759 		}
1760 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1761 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1762 
1763 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1764 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1765 		new_bmcr |= BMCR_SPEED1000;
1766 
1767 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1768 			if (bp->req_line_speed == SPEED_2500)
1769 				bnx2_enable_forced_2g5(bp);
1770 			else if (bp->req_line_speed == SPEED_1000) {
1771 				bnx2_disable_forced_2g5(bp);
1772 				new_bmcr &= ~0x2000;
1773 			}
1774 
1775 		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1776 			if (bp->req_line_speed == SPEED_2500)
1777 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1778 			else
1779 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1780 		}
1781 
1782 		if (bp->req_duplex == DUPLEX_FULL) {
1783 			adv |= ADVERTISE_1000XFULL;
1784 			new_bmcr |= BMCR_FULLDPLX;
1785 		}
1786 		else {
1787 			adv |= ADVERTISE_1000XHALF;
1788 			new_bmcr &= ~BMCR_FULLDPLX;
1789 		}
1790 		if ((new_bmcr != bmcr) || (force_link_down)) {
1791 			/* Force a link down visible on the other side */
1792 			if (bp->link_up) {
1793 				bnx2_write_phy(bp, bp->mii_adv, adv &
1794 					       ~(ADVERTISE_1000XFULL |
1795 						 ADVERTISE_1000XHALF));
1796 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1797 					BMCR_ANRESTART | BMCR_ANENABLE);
1798 
1799 				bp->link_up = 0;
1800 				netif_carrier_off(bp->dev);
1801 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1802 				bnx2_report_link(bp);
1803 			}
1804 			bnx2_write_phy(bp, bp->mii_adv, adv);
1805 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1806 		} else {
1807 			bnx2_resolve_flow_ctrl(bp);
1808 			bnx2_set_mac_link(bp);
1809 		}
1810 		return 0;
1811 	}
1812 
1813 	bnx2_test_and_enable_2g5(bp);
1814 
1815 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1816 		new_adv |= ADVERTISE_1000XFULL;
1817 
1818 	new_adv |= bnx2_phy_get_pause_adv(bp);
1819 
1820 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1821 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1822 
1823 	bp->serdes_an_pending = 0;
1824 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1825 		/* Force a link down visible on the other side */
1826 		if (bp->link_up) {
1827 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1828 			spin_unlock_bh(&bp->phy_lock);
1829 			msleep(20);
1830 			spin_lock_bh(&bp->phy_lock);
1831 		}
1832 
1833 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1834 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1835 			BMCR_ANENABLE);
1836 		/* Speed up link-up time when the link partner
1837 		 * does not autonegotiate which is very common
1838 		 * in blade servers. Some blade servers use
1839 		 * IPMI for kerboard input and it's important
1840 		 * to minimize link disruptions. Autoneg. involves
1841 		 * exchanging base pages plus 3 next pages and
1842 		 * normally completes in about 120 msec.
1843 		 */
1844 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1845 		bp->serdes_an_pending = 1;
1846 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1847 	} else {
1848 		bnx2_resolve_flow_ctrl(bp);
1849 		bnx2_set_mac_link(bp);
1850 	}
1851 
1852 	return 0;
1853 }
1854 
1855 #define ETHTOOL_ALL_FIBRE_SPEED						\
1856 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1857 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1858 		(ADVERTISED_1000baseT_Full)
1859 
1860 #define ETHTOOL_ALL_COPPER_SPEED					\
1861 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1862 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1863 	ADVERTISED_1000baseT_Full)
1864 
1865 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1866 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1867 
1868 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1869 
1870 static void
1871 bnx2_set_default_remote_link(struct bnx2 *bp)
1872 {
1873 	u32 link;
1874 
1875 	if (bp->phy_port == PORT_TP)
1876 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1877 	else
1878 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1879 
1880 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1881 		bp->req_line_speed = 0;
1882 		bp->autoneg |= AUTONEG_SPEED;
1883 		bp->advertising = ADVERTISED_Autoneg;
1884 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1885 			bp->advertising |= ADVERTISED_10baseT_Half;
1886 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1887 			bp->advertising |= ADVERTISED_10baseT_Full;
1888 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1889 			bp->advertising |= ADVERTISED_100baseT_Half;
1890 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1891 			bp->advertising |= ADVERTISED_100baseT_Full;
1892 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1893 			bp->advertising |= ADVERTISED_1000baseT_Full;
1894 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1895 			bp->advertising |= ADVERTISED_2500baseX_Full;
1896 	} else {
1897 		bp->autoneg = 0;
1898 		bp->advertising = 0;
1899 		bp->req_duplex = DUPLEX_FULL;
1900 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1901 			bp->req_line_speed = SPEED_10;
1902 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1903 				bp->req_duplex = DUPLEX_HALF;
1904 		}
1905 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1906 			bp->req_line_speed = SPEED_100;
1907 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1908 				bp->req_duplex = DUPLEX_HALF;
1909 		}
1910 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1911 			bp->req_line_speed = SPEED_1000;
1912 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1913 			bp->req_line_speed = SPEED_2500;
1914 	}
1915 }
1916 
1917 static void
1918 bnx2_set_default_link(struct bnx2 *bp)
1919 {
1920 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1921 		bnx2_set_default_remote_link(bp);
1922 		return;
1923 	}
1924 
1925 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1926 	bp->req_line_speed = 0;
1927 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1928 		u32 reg;
1929 
1930 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1931 
1932 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1933 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1934 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1935 			bp->autoneg = 0;
1936 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1937 			bp->req_duplex = DUPLEX_FULL;
1938 		}
1939 	} else
1940 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1941 }
1942 
1943 static void
1944 bnx2_send_heart_beat(struct bnx2 *bp)
1945 {
1946 	u32 msg;
1947 	u32 addr;
1948 
1949 	spin_lock(&bp->indirect_lock);
1950 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1951 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1952 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1953 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1954 	spin_unlock(&bp->indirect_lock);
1955 }
1956 
1957 static void
1958 bnx2_remote_phy_event(struct bnx2 *bp)
1959 {
1960 	u32 msg;
1961 	u8 link_up = bp->link_up;
1962 	u8 old_port;
1963 
1964 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1965 
1966 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1967 		bnx2_send_heart_beat(bp);
1968 
1969 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1970 
1971 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1972 		bp->link_up = 0;
1973 	else {
1974 		u32 speed;
1975 
1976 		bp->link_up = 1;
1977 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1978 		bp->duplex = DUPLEX_FULL;
1979 		switch (speed) {
1980 			case BNX2_LINK_STATUS_10HALF:
1981 				bp->duplex = DUPLEX_HALF;
1982 				/* fall through */
1983 			case BNX2_LINK_STATUS_10FULL:
1984 				bp->line_speed = SPEED_10;
1985 				break;
1986 			case BNX2_LINK_STATUS_100HALF:
1987 				bp->duplex = DUPLEX_HALF;
1988 				/* fall through */
1989 			case BNX2_LINK_STATUS_100BASE_T4:
1990 			case BNX2_LINK_STATUS_100FULL:
1991 				bp->line_speed = SPEED_100;
1992 				break;
1993 			case BNX2_LINK_STATUS_1000HALF:
1994 				bp->duplex = DUPLEX_HALF;
1995 				/* fall through */
1996 			case BNX2_LINK_STATUS_1000FULL:
1997 				bp->line_speed = SPEED_1000;
1998 				break;
1999 			case BNX2_LINK_STATUS_2500HALF:
2000 				bp->duplex = DUPLEX_HALF;
2001 				/* fall through */
2002 			case BNX2_LINK_STATUS_2500FULL:
2003 				bp->line_speed = SPEED_2500;
2004 				break;
2005 			default:
2006 				bp->line_speed = 0;
2007 				break;
2008 		}
2009 
2010 		bp->flow_ctrl = 0;
2011 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2012 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2013 			if (bp->duplex == DUPLEX_FULL)
2014 				bp->flow_ctrl = bp->req_flow_ctrl;
2015 		} else {
2016 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2017 				bp->flow_ctrl |= FLOW_CTRL_TX;
2018 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2019 				bp->flow_ctrl |= FLOW_CTRL_RX;
2020 		}
2021 
2022 		old_port = bp->phy_port;
2023 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2024 			bp->phy_port = PORT_FIBRE;
2025 		else
2026 			bp->phy_port = PORT_TP;
2027 
2028 		if (old_port != bp->phy_port)
2029 			bnx2_set_default_link(bp);
2030 
2031 	}
2032 	if (bp->link_up != link_up)
2033 		bnx2_report_link(bp);
2034 
2035 	bnx2_set_mac_link(bp);
2036 }
2037 
2038 static int
2039 bnx2_set_remote_link(struct bnx2 *bp)
2040 {
2041 	u32 evt_code;
2042 
2043 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2044 	switch (evt_code) {
2045 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2046 			bnx2_remote_phy_event(bp);
2047 			break;
2048 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2049 		default:
2050 			bnx2_send_heart_beat(bp);
2051 			break;
2052 	}
2053 	return 0;
2054 }
2055 
2056 static int
2057 bnx2_setup_copper_phy(struct bnx2 *bp)
2058 __releases(&bp->phy_lock)
2059 __acquires(&bp->phy_lock)
2060 {
2061 	u32 bmcr, adv_reg, new_adv = 0;
2062 	u32 new_bmcr;
2063 
2064 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2065 
2066 	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2067 	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2068 		    ADVERTISE_PAUSE_ASYM);
2069 
2070 	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2071 
2072 	if (bp->autoneg & AUTONEG_SPEED) {
2073 		u32 adv1000_reg;
2074 		u32 new_adv1000 = 0;
2075 
2076 		new_adv |= bnx2_phy_get_pause_adv(bp);
2077 
2078 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2079 		adv1000_reg &= PHY_ALL_1000_SPEED;
2080 
2081 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2082 		if ((adv1000_reg != new_adv1000) ||
2083 			(adv_reg != new_adv) ||
2084 			((bmcr & BMCR_ANENABLE) == 0)) {
2085 
2086 			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2087 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2088 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2089 				BMCR_ANENABLE);
2090 		}
2091 		else if (bp->link_up) {
2092 			/* Flow ctrl may have changed from auto to forced */
2093 			/* or vice-versa. */
2094 
2095 			bnx2_resolve_flow_ctrl(bp);
2096 			bnx2_set_mac_link(bp);
2097 		}
2098 		return 0;
2099 	}
2100 
2101 	/* advertise nothing when forcing speed */
2102 	if (adv_reg != new_adv)
2103 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
2104 
2105 	new_bmcr = 0;
2106 	if (bp->req_line_speed == SPEED_100) {
2107 		new_bmcr |= BMCR_SPEED100;
2108 	}
2109 	if (bp->req_duplex == DUPLEX_FULL) {
2110 		new_bmcr |= BMCR_FULLDPLX;
2111 	}
2112 	if (new_bmcr != bmcr) {
2113 		u32 bmsr;
2114 
2115 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117 
2118 		if (bmsr & BMSR_LSTATUS) {
2119 			/* Force link down */
2120 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2121 			spin_unlock_bh(&bp->phy_lock);
2122 			msleep(50);
2123 			spin_lock_bh(&bp->phy_lock);
2124 
2125 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2127 		}
2128 
2129 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2130 
2131 		/* Normally, the new speed is setup after the link has
2132 		 * gone down and up again. In some cases, link will not go
2133 		 * down so we need to set up the new speed here.
2134 		 */
2135 		if (bmsr & BMSR_LSTATUS) {
2136 			bp->line_speed = bp->req_line_speed;
2137 			bp->duplex = bp->req_duplex;
2138 			bnx2_resolve_flow_ctrl(bp);
2139 			bnx2_set_mac_link(bp);
2140 		}
2141 	} else {
2142 		bnx2_resolve_flow_ctrl(bp);
2143 		bnx2_set_mac_link(bp);
2144 	}
2145 	return 0;
2146 }
2147 
2148 static int
2149 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2150 __releases(&bp->phy_lock)
2151 __acquires(&bp->phy_lock)
2152 {
2153 	if (bp->loopback == MAC_LOOPBACK)
2154 		return 0;
2155 
2156 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2157 		return bnx2_setup_serdes_phy(bp, port);
2158 	}
2159 	else {
2160 		return bnx2_setup_copper_phy(bp);
2161 	}
2162 }
2163 
2164 static int
2165 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2166 {
2167 	u32 val;
2168 
2169 	bp->mii_bmcr = MII_BMCR + 0x10;
2170 	bp->mii_bmsr = MII_BMSR + 0x10;
2171 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2172 	bp->mii_adv = MII_ADVERTISE + 0x10;
2173 	bp->mii_lpa = MII_LPA + 0x10;
2174 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2175 
2176 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2177 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2178 
2179 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2180 	if (reset_phy)
2181 		bnx2_reset_phy(bp);
2182 
2183 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2184 
2185 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2186 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2187 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2188 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2189 
2190 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2191 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2192 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2193 		val |= BCM5708S_UP1_2G5;
2194 	else
2195 		val &= ~BCM5708S_UP1_2G5;
2196 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2197 
2198 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2199 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2200 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2201 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2202 
2203 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2204 
2205 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2206 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2207 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2208 
2209 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2210 
2211 	return 0;
2212 }
2213 
2214 static int
2215 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2216 {
2217 	u32 val;
2218 
2219 	if (reset_phy)
2220 		bnx2_reset_phy(bp);
2221 
2222 	bp->mii_up1 = BCM5708S_UP1;
2223 
2224 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2225 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2226 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2227 
2228 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2229 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2230 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2231 
2232 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2233 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2234 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2235 
2236 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2237 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2238 		val |= BCM5708S_UP1_2G5;
2239 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2240 	}
2241 
2242 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2243 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2244 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2245 		/* increase tx signal amplitude */
2246 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2247 			       BCM5708S_BLK_ADDR_TX_MISC);
2248 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2249 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2250 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2251 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2252 	}
2253 
2254 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2255 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2256 
2257 	if (val) {
2258 		u32 is_backplane;
2259 
2260 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2261 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2262 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263 				       BCM5708S_BLK_ADDR_TX_MISC);
2264 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2265 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2266 				       BCM5708S_BLK_ADDR_DIG);
2267 		}
2268 	}
2269 	return 0;
2270 }
2271 
2272 static int
2273 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2274 {
2275 	if (reset_phy)
2276 		bnx2_reset_phy(bp);
2277 
2278 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2279 
2280 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2281 		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2282 
2283 	if (bp->dev->mtu > 1500) {
2284 		u32 val;
2285 
2286 		/* Set extended packet length bit */
2287 		bnx2_write_phy(bp, 0x18, 0x7);
2288 		bnx2_read_phy(bp, 0x18, &val);
2289 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2290 
2291 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2292 		bnx2_read_phy(bp, 0x1c, &val);
2293 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2294 	}
2295 	else {
2296 		u32 val;
2297 
2298 		bnx2_write_phy(bp, 0x18, 0x7);
2299 		bnx2_read_phy(bp, 0x18, &val);
2300 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2301 
2302 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2303 		bnx2_read_phy(bp, 0x1c, &val);
2304 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2305 	}
2306 
2307 	return 0;
2308 }
2309 
2310 static int
2311 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2312 {
2313 	u32 val;
2314 
2315 	if (reset_phy)
2316 		bnx2_reset_phy(bp);
2317 
2318 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2319 		bnx2_write_phy(bp, 0x18, 0x0c00);
2320 		bnx2_write_phy(bp, 0x17, 0x000a);
2321 		bnx2_write_phy(bp, 0x15, 0x310b);
2322 		bnx2_write_phy(bp, 0x17, 0x201f);
2323 		bnx2_write_phy(bp, 0x15, 0x9506);
2324 		bnx2_write_phy(bp, 0x17, 0x401f);
2325 		bnx2_write_phy(bp, 0x15, 0x14e2);
2326 		bnx2_write_phy(bp, 0x18, 0x0400);
2327 	}
2328 
2329 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2330 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2331 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2332 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2333 		val &= ~(1 << 8);
2334 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2335 	}
2336 
2337 	if (bp->dev->mtu > 1500) {
2338 		/* Set extended packet length bit */
2339 		bnx2_write_phy(bp, 0x18, 0x7);
2340 		bnx2_read_phy(bp, 0x18, &val);
2341 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2342 
2343 		bnx2_read_phy(bp, 0x10, &val);
2344 		bnx2_write_phy(bp, 0x10, val | 0x1);
2345 	}
2346 	else {
2347 		bnx2_write_phy(bp, 0x18, 0x7);
2348 		bnx2_read_phy(bp, 0x18, &val);
2349 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2350 
2351 		bnx2_read_phy(bp, 0x10, &val);
2352 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2353 	}
2354 
2355 	/* ethernet@wirespeed */
2356 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2357 	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2358 	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2359 
2360 	/* auto-mdix */
2361 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2362 		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2363 
2364 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2365 	return 0;
2366 }
2367 
2368 
2369 static int
2370 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2371 __releases(&bp->phy_lock)
2372 __acquires(&bp->phy_lock)
2373 {
2374 	u32 val;
2375 	int rc = 0;
2376 
2377 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2378 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2379 
2380 	bp->mii_bmcr = MII_BMCR;
2381 	bp->mii_bmsr = MII_BMSR;
2382 	bp->mii_bmsr1 = MII_BMSR;
2383 	bp->mii_adv = MII_ADVERTISE;
2384 	bp->mii_lpa = MII_LPA;
2385 
2386 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2387 
2388 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2389 		goto setup_phy;
2390 
2391 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2392 	bp->phy_id = val << 16;
2393 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2394 	bp->phy_id |= val & 0xffff;
2395 
2396 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2397 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2398 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2399 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2400 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2401 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2402 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2403 	}
2404 	else {
2405 		rc = bnx2_init_copper_phy(bp, reset_phy);
2406 	}
2407 
2408 setup_phy:
2409 	if (!rc)
2410 		rc = bnx2_setup_phy(bp, bp->phy_port);
2411 
2412 	return rc;
2413 }
2414 
2415 static int
2416 bnx2_set_mac_loopback(struct bnx2 *bp)
2417 {
2418 	u32 mac_mode;
2419 
2420 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2421 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2422 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2423 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2424 	bp->link_up = 1;
2425 	return 0;
2426 }
2427 
2428 static int bnx2_test_link(struct bnx2 *);
2429 
2430 static int
2431 bnx2_set_phy_loopback(struct bnx2 *bp)
2432 {
2433 	u32 mac_mode;
2434 	int rc, i;
2435 
2436 	spin_lock_bh(&bp->phy_lock);
2437 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2438 			    BMCR_SPEED1000);
2439 	spin_unlock_bh(&bp->phy_lock);
2440 	if (rc)
2441 		return rc;
2442 
2443 	for (i = 0; i < 10; i++) {
2444 		if (bnx2_test_link(bp) == 0)
2445 			break;
2446 		msleep(100);
2447 	}
2448 
2449 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2450 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2451 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2452 		      BNX2_EMAC_MODE_25G_MODE);
2453 
2454 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2455 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2456 	bp->link_up = 1;
2457 	return 0;
2458 }
2459 
2460 static void
2461 bnx2_dump_mcp_state(struct bnx2 *bp)
2462 {
2463 	struct net_device *dev = bp->dev;
2464 	u32 mcp_p0, mcp_p1;
2465 
2466 	netdev_err(dev, "<--- start MCP states dump --->\n");
2467 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2468 		mcp_p0 = BNX2_MCP_STATE_P0;
2469 		mcp_p1 = BNX2_MCP_STATE_P1;
2470 	} else {
2471 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2472 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2473 	}
2474 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2475 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2476 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2477 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2478 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2479 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2480 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2481 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2482 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2483 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2484 	netdev_err(dev, "DEBUG: shmem states:\n");
2485 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2486 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2487 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2488 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2489 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2490 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2491 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2492 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2493 	pr_cont(" condition[%08x]\n",
2494 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2495 	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2496 	DP_SHMEM_LINE(bp, 0x3cc);
2497 	DP_SHMEM_LINE(bp, 0x3dc);
2498 	DP_SHMEM_LINE(bp, 0x3ec);
2499 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2500 	netdev_err(dev, "<--- end MCP states dump --->\n");
2501 }
2502 
2503 static int
2504 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2505 {
2506 	int i;
2507 	u32 val;
2508 
2509 	bp->fw_wr_seq++;
2510 	msg_data |= bp->fw_wr_seq;
2511 	bp->fw_last_msg = msg_data;
2512 
2513 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2514 
2515 	if (!ack)
2516 		return 0;
2517 
2518 	/* wait for an acknowledgement. */
2519 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2520 		msleep(10);
2521 
2522 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2523 
2524 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2525 			break;
2526 	}
2527 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2528 		return 0;
2529 
2530 	/* If we timed out, inform the firmware that this is the case. */
2531 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2532 		msg_data &= ~BNX2_DRV_MSG_CODE;
2533 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2534 
2535 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2536 		if (!silent) {
2537 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2538 			bnx2_dump_mcp_state(bp);
2539 		}
2540 
2541 		return -EBUSY;
2542 	}
2543 
2544 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2545 		return -EIO;
2546 
2547 	return 0;
2548 }
2549 
2550 static int
2551 bnx2_init_5709_context(struct bnx2 *bp)
2552 {
2553 	int i, ret = 0;
2554 	u32 val;
2555 
2556 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2557 	val |= (BNX2_PAGE_BITS - 8) << 16;
2558 	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2559 	for (i = 0; i < 10; i++) {
2560 		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2561 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2562 			break;
2563 		udelay(2);
2564 	}
2565 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2566 		return -EBUSY;
2567 
2568 	for (i = 0; i < bp->ctx_pages; i++) {
2569 		int j;
2570 
2571 		if (bp->ctx_blk[i])
2572 			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2573 		else
2574 			return -ENOMEM;
2575 
2576 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2577 			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2578 			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2579 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2580 			(u64) bp->ctx_blk_mapping[i] >> 32);
2581 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2582 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2583 		for (j = 0; j < 10; j++) {
2584 
2585 			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2586 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2587 				break;
2588 			udelay(5);
2589 		}
2590 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2591 			ret = -EBUSY;
2592 			break;
2593 		}
2594 	}
2595 	return ret;
2596 }
2597 
2598 static void
2599 bnx2_init_context(struct bnx2 *bp)
2600 {
2601 	u32 vcid;
2602 
2603 	vcid = 96;
2604 	while (vcid) {
2605 		u32 vcid_addr, pcid_addr, offset;
2606 		int i;
2607 
2608 		vcid--;
2609 
2610 		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2611 			u32 new_vcid;
2612 
2613 			vcid_addr = GET_PCID_ADDR(vcid);
2614 			if (vcid & 0x8) {
2615 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2616 			}
2617 			else {
2618 				new_vcid = vcid;
2619 			}
2620 			pcid_addr = GET_PCID_ADDR(new_vcid);
2621 		}
2622 		else {
2623 	    		vcid_addr = GET_CID_ADDR(vcid);
2624 			pcid_addr = vcid_addr;
2625 		}
2626 
2627 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2628 			vcid_addr += (i << PHY_CTX_SHIFT);
2629 			pcid_addr += (i << PHY_CTX_SHIFT);
2630 
2631 			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2632 			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2633 
2634 			/* Zero out the context. */
2635 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2636 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2637 		}
2638 	}
2639 }
2640 
2641 static int
2642 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2643 {
2644 	u16 *good_mbuf;
2645 	u32 good_mbuf_cnt;
2646 	u32 val;
2647 
2648 	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2649 	if (good_mbuf == NULL)
2650 		return -ENOMEM;
2651 
2652 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2653 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2654 
2655 	good_mbuf_cnt = 0;
2656 
2657 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2658 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2659 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2660 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2661 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2662 
2663 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2664 
2665 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2666 
2667 		/* The addresses with Bit 9 set are bad memory blocks. */
2668 		if (!(val & (1 << 9))) {
2669 			good_mbuf[good_mbuf_cnt] = (u16) val;
2670 			good_mbuf_cnt++;
2671 		}
2672 
2673 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2674 	}
2675 
2676 	/* Free the good ones back to the mbuf pool thus discarding
2677 	 * all the bad ones. */
2678 	while (good_mbuf_cnt) {
2679 		good_mbuf_cnt--;
2680 
2681 		val = good_mbuf[good_mbuf_cnt];
2682 		val = (val << 9) | val | 1;
2683 
2684 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2685 	}
2686 	kfree(good_mbuf);
2687 	return 0;
2688 }
2689 
2690 static void
2691 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2692 {
2693 	u32 val;
2694 
2695 	val = (mac_addr[0] << 8) | mac_addr[1];
2696 
2697 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2698 
2699 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2700 		(mac_addr[4] << 8) | mac_addr[5];
2701 
2702 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2703 }
2704 
2705 static inline int
2706 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2707 {
2708 	dma_addr_t mapping;
2709 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2710 	struct bnx2_rx_bd *rxbd =
2711 		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2712 	struct page *page = alloc_page(gfp);
2713 
2714 	if (!page)
2715 		return -ENOMEM;
2716 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2717 			       PCI_DMA_FROMDEVICE);
2718 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2719 		__free_page(page);
2720 		return -EIO;
2721 	}
2722 
2723 	rx_pg->page = page;
2724 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2725 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2726 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2727 	return 0;
2728 }
2729 
2730 static void
2731 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2732 {
2733 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2734 	struct page *page = rx_pg->page;
2735 
2736 	if (!page)
2737 		return;
2738 
2739 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2740 		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2741 
2742 	__free_page(page);
2743 	rx_pg->page = NULL;
2744 }
2745 
2746 static inline int
2747 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2748 {
2749 	u8 *data;
2750 	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2751 	dma_addr_t mapping;
2752 	struct bnx2_rx_bd *rxbd =
2753 		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2754 
2755 	data = kmalloc(bp->rx_buf_size, gfp);
2756 	if (!data)
2757 		return -ENOMEM;
2758 
2759 	mapping = dma_map_single(&bp->pdev->dev,
2760 				 get_l2_fhdr(data),
2761 				 bp->rx_buf_use_size,
2762 				 PCI_DMA_FROMDEVICE);
2763 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2764 		kfree(data);
2765 		return -EIO;
2766 	}
2767 
2768 	rx_buf->data = data;
2769 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2770 
2771 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2772 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2773 
2774 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2775 
2776 	return 0;
2777 }
2778 
2779 static int
2780 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2781 {
2782 	struct status_block *sblk = bnapi->status_blk.msi;
2783 	u32 new_link_state, old_link_state;
2784 	int is_set = 1;
2785 
2786 	new_link_state = sblk->status_attn_bits & event;
2787 	old_link_state = sblk->status_attn_bits_ack & event;
2788 	if (new_link_state != old_link_state) {
2789 		if (new_link_state)
2790 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2791 		else
2792 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2793 	} else
2794 		is_set = 0;
2795 
2796 	return is_set;
2797 }
2798 
2799 static void
2800 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2801 {
2802 	spin_lock(&bp->phy_lock);
2803 
2804 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2805 		bnx2_set_link(bp);
2806 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2807 		bnx2_set_remote_link(bp);
2808 
2809 	spin_unlock(&bp->phy_lock);
2810 
2811 }
2812 
2813 static inline u16
2814 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2815 {
2816 	u16 cons;
2817 
2818 	/* Tell compiler that status block fields can change. */
2819 	barrier();
2820 	cons = *bnapi->hw_tx_cons_ptr;
2821 	barrier();
2822 	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2823 		cons++;
2824 	return cons;
2825 }
2826 
2827 static int
2828 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2829 {
2830 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2831 	u16 hw_cons, sw_cons, sw_ring_cons;
2832 	int tx_pkt = 0, index;
2833 	unsigned int tx_bytes = 0;
2834 	struct netdev_queue *txq;
2835 
2836 	index = (bnapi - bp->bnx2_napi);
2837 	txq = netdev_get_tx_queue(bp->dev, index);
2838 
2839 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2840 	sw_cons = txr->tx_cons;
2841 
2842 	while (sw_cons != hw_cons) {
2843 		struct bnx2_sw_tx_bd *tx_buf;
2844 		struct sk_buff *skb;
2845 		int i, last;
2846 
2847 		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2848 
2849 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2850 		skb = tx_buf->skb;
2851 
2852 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2853 		prefetch(&skb->end);
2854 
2855 		/* partial BD completions possible with TSO packets */
2856 		if (tx_buf->is_gso) {
2857 			u16 last_idx, last_ring_idx;
2858 
2859 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2860 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2861 			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2862 				last_idx++;
2863 			}
2864 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2865 				break;
2866 			}
2867 		}
2868 
2869 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2870 			skb_headlen(skb), PCI_DMA_TODEVICE);
2871 
2872 		tx_buf->skb = NULL;
2873 		last = tx_buf->nr_frags;
2874 
2875 		for (i = 0; i < last; i++) {
2876 			struct bnx2_sw_tx_bd *tx_buf;
2877 
2878 			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2879 
2880 			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2881 			dma_unmap_page(&bp->pdev->dev,
2882 				dma_unmap_addr(tx_buf, mapping),
2883 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2884 				PCI_DMA_TODEVICE);
2885 		}
2886 
2887 		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2888 
2889 		tx_bytes += skb->len;
2890 		dev_kfree_skb_any(skb);
2891 		tx_pkt++;
2892 		if (tx_pkt == budget)
2893 			break;
2894 
2895 		if (hw_cons == sw_cons)
2896 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2897 	}
2898 
2899 	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2900 	txr->hw_tx_cons = hw_cons;
2901 	txr->tx_cons = sw_cons;
2902 
2903 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2904 	 * before checking for netif_tx_queue_stopped().  Without the
2905 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2906 	 * will miss it and cause the queue to be stopped forever.
2907 	 */
2908 	smp_mb();
2909 
2910 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2911 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2912 		__netif_tx_lock(txq, smp_processor_id());
2913 		if ((netif_tx_queue_stopped(txq)) &&
2914 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2915 			netif_tx_wake_queue(txq);
2916 		__netif_tx_unlock(txq);
2917 	}
2918 
2919 	return tx_pkt;
2920 }
2921 
2922 static void
2923 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2924 			struct sk_buff *skb, int count)
2925 {
2926 	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2927 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2928 	int i;
2929 	u16 hw_prod, prod;
2930 	u16 cons = rxr->rx_pg_cons;
2931 
2932 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2933 
2934 	/* The caller was unable to allocate a new page to replace the
2935 	 * last one in the frags array, so we need to recycle that page
2936 	 * and then free the skb.
2937 	 */
2938 	if (skb) {
2939 		struct page *page;
2940 		struct skb_shared_info *shinfo;
2941 
2942 		shinfo = skb_shinfo(skb);
2943 		shinfo->nr_frags--;
2944 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2945 		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2946 
2947 		cons_rx_pg->page = page;
2948 		dev_kfree_skb(skb);
2949 	}
2950 
2951 	hw_prod = rxr->rx_pg_prod;
2952 
2953 	for (i = 0; i < count; i++) {
2954 		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2955 
2956 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2957 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2958 		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2959 						[BNX2_RX_IDX(cons)];
2960 		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2961 						[BNX2_RX_IDX(prod)];
2962 
2963 		if (prod != cons) {
2964 			prod_rx_pg->page = cons_rx_pg->page;
2965 			cons_rx_pg->page = NULL;
2966 			dma_unmap_addr_set(prod_rx_pg, mapping,
2967 				dma_unmap_addr(cons_rx_pg, mapping));
2968 
2969 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2970 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2971 
2972 		}
2973 		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2974 		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2975 	}
2976 	rxr->rx_pg_prod = hw_prod;
2977 	rxr->rx_pg_cons = cons;
2978 }
2979 
2980 static inline void
2981 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2982 		   u8 *data, u16 cons, u16 prod)
2983 {
2984 	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2985 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2986 
2987 	cons_rx_buf = &rxr->rx_buf_ring[cons];
2988 	prod_rx_buf = &rxr->rx_buf_ring[prod];
2989 
2990 	dma_sync_single_for_device(&bp->pdev->dev,
2991 		dma_unmap_addr(cons_rx_buf, mapping),
2992 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2993 
2994 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2995 
2996 	prod_rx_buf->data = data;
2997 
2998 	if (cons == prod)
2999 		return;
3000 
3001 	dma_unmap_addr_set(prod_rx_buf, mapping,
3002 			dma_unmap_addr(cons_rx_buf, mapping));
3003 
3004 	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3005 	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3006 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3007 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3008 }
3009 
3010 static struct sk_buff *
3011 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3012 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3013 	    u32 ring_idx)
3014 {
3015 	int err;
3016 	u16 prod = ring_idx & 0xffff;
3017 	struct sk_buff *skb;
3018 
3019 	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3020 	if (unlikely(err)) {
3021 		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3022 error:
3023 		if (hdr_len) {
3024 			unsigned int raw_len = len + 4;
3025 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3026 
3027 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3028 		}
3029 		return NULL;
3030 	}
3031 
3032 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3033 			 PCI_DMA_FROMDEVICE);
3034 	skb = build_skb(data, 0);
3035 	if (!skb) {
3036 		kfree(data);
3037 		goto error;
3038 	}
3039 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3040 	if (hdr_len == 0) {
3041 		skb_put(skb, len);
3042 		return skb;
3043 	} else {
3044 		unsigned int i, frag_len, frag_size, pages;
3045 		struct bnx2_sw_pg *rx_pg;
3046 		u16 pg_cons = rxr->rx_pg_cons;
3047 		u16 pg_prod = rxr->rx_pg_prod;
3048 
3049 		frag_size = len + 4 - hdr_len;
3050 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3051 		skb_put(skb, hdr_len);
3052 
3053 		for (i = 0; i < pages; i++) {
3054 			dma_addr_t mapping_old;
3055 
3056 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3057 			if (unlikely(frag_len <= 4)) {
3058 				unsigned int tail = 4 - frag_len;
3059 
3060 				rxr->rx_pg_cons = pg_cons;
3061 				rxr->rx_pg_prod = pg_prod;
3062 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3063 							pages - i);
3064 				skb->len -= tail;
3065 				if (i == 0) {
3066 					skb->tail -= tail;
3067 				} else {
3068 					skb_frag_t *frag =
3069 						&skb_shinfo(skb)->frags[i - 1];
3070 					skb_frag_size_sub(frag, tail);
3071 					skb->data_len -= tail;
3072 				}
3073 				return skb;
3074 			}
3075 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3076 
3077 			/* Don't unmap yet.  If we're unable to allocate a new
3078 			 * page, we need to recycle the page and the DMA addr.
3079 			 */
3080 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3081 			if (i == pages - 1)
3082 				frag_len -= 4;
3083 
3084 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3085 			rx_pg->page = NULL;
3086 
3087 			err = bnx2_alloc_rx_page(bp, rxr,
3088 						 BNX2_RX_PG_RING_IDX(pg_prod),
3089 						 GFP_ATOMIC);
3090 			if (unlikely(err)) {
3091 				rxr->rx_pg_cons = pg_cons;
3092 				rxr->rx_pg_prod = pg_prod;
3093 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3094 							pages - i);
3095 				return NULL;
3096 			}
3097 
3098 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3099 				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3100 
3101 			frag_size -= frag_len;
3102 			skb->data_len += frag_len;
3103 			skb->truesize += PAGE_SIZE;
3104 			skb->len += frag_len;
3105 
3106 			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3107 			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3108 		}
3109 		rxr->rx_pg_prod = pg_prod;
3110 		rxr->rx_pg_cons = pg_cons;
3111 	}
3112 	return skb;
3113 }
3114 
3115 static inline u16
3116 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3117 {
3118 	u16 cons;
3119 
3120 	/* Tell compiler that status block fields can change. */
3121 	barrier();
3122 	cons = *bnapi->hw_rx_cons_ptr;
3123 	barrier();
3124 	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3125 		cons++;
3126 	return cons;
3127 }
3128 
3129 static int
3130 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3131 {
3132 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3133 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3134 	struct l2_fhdr *rx_hdr;
3135 	int rx_pkt = 0, pg_ring_used = 0;
3136 
3137 	if (budget <= 0)
3138 		return rx_pkt;
3139 
3140 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3141 	sw_cons = rxr->rx_cons;
3142 	sw_prod = rxr->rx_prod;
3143 
3144 	/* Memory barrier necessary as speculative reads of the rx
3145 	 * buffer can be ahead of the index in the status block
3146 	 */
3147 	rmb();
3148 	while (sw_cons != hw_cons) {
3149 		unsigned int len, hdr_len;
3150 		u32 status;
3151 		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3152 		struct sk_buff *skb;
3153 		dma_addr_t dma_addr;
3154 		u8 *data;
3155 		u16 next_ring_idx;
3156 
3157 		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3158 		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3159 
3160 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3161 		data = rx_buf->data;
3162 		rx_buf->data = NULL;
3163 
3164 		rx_hdr = get_l2_fhdr(data);
3165 		prefetch(rx_hdr);
3166 
3167 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3168 
3169 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3170 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3171 			PCI_DMA_FROMDEVICE);
3172 
3173 		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3174 		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3175 		prefetch(get_l2_fhdr(next_rx_buf->data));
3176 
3177 		len = rx_hdr->l2_fhdr_pkt_len;
3178 		status = rx_hdr->l2_fhdr_status;
3179 
3180 		hdr_len = 0;
3181 		if (status & L2_FHDR_STATUS_SPLIT) {
3182 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3183 			pg_ring_used = 1;
3184 		} else if (len > bp->rx_jumbo_thresh) {
3185 			hdr_len = bp->rx_jumbo_thresh;
3186 			pg_ring_used = 1;
3187 		}
3188 
3189 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3190 				       L2_FHDR_ERRORS_PHY_DECODE |
3191 				       L2_FHDR_ERRORS_ALIGNMENT |
3192 				       L2_FHDR_ERRORS_TOO_SHORT |
3193 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3194 
3195 			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3196 					  sw_ring_prod);
3197 			if (pg_ring_used) {
3198 				int pages;
3199 
3200 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3201 
3202 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3203 			}
3204 			goto next_rx;
3205 		}
3206 
3207 		len -= 4;
3208 
3209 		if (len <= bp->rx_copy_thresh) {
3210 			skb = netdev_alloc_skb(bp->dev, len + 6);
3211 			if (skb == NULL) {
3212 				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3213 						  sw_ring_prod);
3214 				goto next_rx;
3215 			}
3216 
3217 			/* aligned copy */
3218 			memcpy(skb->data,
3219 			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3220 			       len + 6);
3221 			skb_reserve(skb, 6);
3222 			skb_put(skb, len);
3223 
3224 			bnx2_reuse_rx_data(bp, rxr, data,
3225 				sw_ring_cons, sw_ring_prod);
3226 
3227 		} else {
3228 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3229 					  (sw_ring_cons << 16) | sw_ring_prod);
3230 			if (!skb)
3231 				goto next_rx;
3232 		}
3233 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3234 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3235 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3236 
3237 		skb->protocol = eth_type_trans(skb, bp->dev);
3238 
3239 		if (len > (bp->dev->mtu + ETH_HLEN) &&
3240 		    skb->protocol != htons(0x8100) &&
3241 		    skb->protocol != htons(ETH_P_8021AD)) {
3242 
3243 			dev_kfree_skb(skb);
3244 			goto next_rx;
3245 
3246 		}
3247 
3248 		skb_checksum_none_assert(skb);
3249 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3250 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3251 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3252 
3253 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3254 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3255 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3256 		}
3257 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3258 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3259 		     L2_FHDR_STATUS_USE_RXHASH))
3260 			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3261 				     PKT_HASH_TYPE_L3);
3262 
3263 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3264 		napi_gro_receive(&bnapi->napi, skb);
3265 		rx_pkt++;
3266 
3267 next_rx:
3268 		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3269 		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3270 
3271 		if ((rx_pkt == budget))
3272 			break;
3273 
3274 		/* Refresh hw_cons to see if there is new work */
3275 		if (sw_cons == hw_cons) {
3276 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3277 			rmb();
3278 		}
3279 	}
3280 	rxr->rx_cons = sw_cons;
3281 	rxr->rx_prod = sw_prod;
3282 
3283 	if (pg_ring_used)
3284 		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3285 
3286 	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3287 
3288 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3289 
3290 	mmiowb();
3291 
3292 	return rx_pkt;
3293 
3294 }
3295 
3296 /* MSI ISR - The only difference between this and the INTx ISR
3297  * is that the MSI interrupt is always serviced.
3298  */
3299 static irqreturn_t
3300 bnx2_msi(int irq, void *dev_instance)
3301 {
3302 	struct bnx2_napi *bnapi = dev_instance;
3303 	struct bnx2 *bp = bnapi->bp;
3304 
3305 	prefetch(bnapi->status_blk.msi);
3306 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3307 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3308 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3309 
3310 	/* Return here if interrupt is disabled. */
3311 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3312 		return IRQ_HANDLED;
3313 
3314 	napi_schedule(&bnapi->napi);
3315 
3316 	return IRQ_HANDLED;
3317 }
3318 
3319 static irqreturn_t
3320 bnx2_msi_1shot(int irq, void *dev_instance)
3321 {
3322 	struct bnx2_napi *bnapi = dev_instance;
3323 	struct bnx2 *bp = bnapi->bp;
3324 
3325 	prefetch(bnapi->status_blk.msi);
3326 
3327 	/* Return here if interrupt is disabled. */
3328 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3329 		return IRQ_HANDLED;
3330 
3331 	napi_schedule(&bnapi->napi);
3332 
3333 	return IRQ_HANDLED;
3334 }
3335 
3336 static irqreturn_t
3337 bnx2_interrupt(int irq, void *dev_instance)
3338 {
3339 	struct bnx2_napi *bnapi = dev_instance;
3340 	struct bnx2 *bp = bnapi->bp;
3341 	struct status_block *sblk = bnapi->status_blk.msi;
3342 
3343 	/* When using INTx, it is possible for the interrupt to arrive
3344 	 * at the CPU before the status block posted prior to the
3345 	 * interrupt. Reading a register will flush the status block.
3346 	 * When using MSI, the MSI message will always complete after
3347 	 * the status block write.
3348 	 */
3349 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3350 	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3351 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3352 		return IRQ_NONE;
3353 
3354 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3355 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3356 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3357 
3358 	/* Read back to deassert IRQ immediately to avoid too many
3359 	 * spurious interrupts.
3360 	 */
3361 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3362 
3363 	/* Return here if interrupt is shared and is disabled. */
3364 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3365 		return IRQ_HANDLED;
3366 
3367 	if (napi_schedule_prep(&bnapi->napi)) {
3368 		bnapi->last_status_idx = sblk->status_idx;
3369 		__napi_schedule(&bnapi->napi);
3370 	}
3371 
3372 	return IRQ_HANDLED;
3373 }
3374 
3375 static inline int
3376 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3377 {
3378 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3379 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3380 
3381 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3382 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3383 		return 1;
3384 	return 0;
3385 }
3386 
3387 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3388 				 STATUS_ATTN_BITS_TIMER_ABORT)
3389 
3390 static inline int
3391 bnx2_has_work(struct bnx2_napi *bnapi)
3392 {
3393 	struct status_block *sblk = bnapi->status_blk.msi;
3394 
3395 	if (bnx2_has_fast_work(bnapi))
3396 		return 1;
3397 
3398 #ifdef BCM_CNIC
3399 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3400 		return 1;
3401 #endif
3402 
3403 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3404 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3405 		return 1;
3406 
3407 	return 0;
3408 }
3409 
3410 static void
3411 bnx2_chk_missed_msi(struct bnx2 *bp)
3412 {
3413 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3414 	u32 msi_ctrl;
3415 
3416 	if (bnx2_has_work(bnapi)) {
3417 		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3418 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3419 			return;
3420 
3421 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3422 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3423 				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3424 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3425 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3426 		}
3427 	}
3428 
3429 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3430 }
3431 
3432 #ifdef BCM_CNIC
3433 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3434 {
3435 	struct cnic_ops *c_ops;
3436 
3437 	if (!bnapi->cnic_present)
3438 		return;
3439 
3440 	rcu_read_lock();
3441 	c_ops = rcu_dereference(bp->cnic_ops);
3442 	if (c_ops)
3443 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3444 						      bnapi->status_blk.msi);
3445 	rcu_read_unlock();
3446 }
3447 #endif
3448 
3449 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3450 {
3451 	struct status_block *sblk = bnapi->status_blk.msi;
3452 	u32 status_attn_bits = sblk->status_attn_bits;
3453 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3454 
3455 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3456 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3457 
3458 		bnx2_phy_int(bp, bnapi);
3459 
3460 		/* This is needed to take care of transient status
3461 		 * during link changes.
3462 		 */
3463 		BNX2_WR(bp, BNX2_HC_COMMAND,
3464 			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3465 		BNX2_RD(bp, BNX2_HC_COMMAND);
3466 	}
3467 }
3468 
3469 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3470 			  int work_done, int budget)
3471 {
3472 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3473 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3474 
3475 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3476 		bnx2_tx_int(bp, bnapi, 0);
3477 
3478 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3479 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3480 
3481 	return work_done;
3482 }
3483 
3484 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3485 {
3486 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3487 	struct bnx2 *bp = bnapi->bp;
3488 	int work_done = 0;
3489 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3490 
3491 	while (1) {
3492 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3493 		if (unlikely(work_done >= budget))
3494 			break;
3495 
3496 		bnapi->last_status_idx = sblk->status_idx;
3497 		/* status idx must be read before checking for more work. */
3498 		rmb();
3499 		if (likely(!bnx2_has_fast_work(bnapi))) {
3500 
3501 			napi_complete(napi);
3502 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3503 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3504 				bnapi->last_status_idx);
3505 			break;
3506 		}
3507 	}
3508 	return work_done;
3509 }
3510 
3511 static int bnx2_poll(struct napi_struct *napi, int budget)
3512 {
3513 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3514 	struct bnx2 *bp = bnapi->bp;
3515 	int work_done = 0;
3516 	struct status_block *sblk = bnapi->status_blk.msi;
3517 
3518 	while (1) {
3519 		bnx2_poll_link(bp, bnapi);
3520 
3521 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3522 
3523 #ifdef BCM_CNIC
3524 		bnx2_poll_cnic(bp, bnapi);
3525 #endif
3526 
3527 		/* bnapi->last_status_idx is used below to tell the hw how
3528 		 * much work has been processed, so we must read it before
3529 		 * checking for more work.
3530 		 */
3531 		bnapi->last_status_idx = sblk->status_idx;
3532 
3533 		if (unlikely(work_done >= budget))
3534 			break;
3535 
3536 		rmb();
3537 		if (likely(!bnx2_has_work(bnapi))) {
3538 			napi_complete(napi);
3539 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3540 				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3541 					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3542 					bnapi->last_status_idx);
3543 				break;
3544 			}
3545 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3546 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3547 				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3548 				bnapi->last_status_idx);
3549 
3550 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3551 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3552 				bnapi->last_status_idx);
3553 			break;
3554 		}
3555 	}
3556 
3557 	return work_done;
3558 }
3559 
3560 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3561  * from set_multicast.
3562  */
3563 static void
3564 bnx2_set_rx_mode(struct net_device *dev)
3565 {
3566 	struct bnx2 *bp = netdev_priv(dev);
3567 	u32 rx_mode, sort_mode;
3568 	struct netdev_hw_addr *ha;
3569 	int i;
3570 
3571 	if (!netif_running(dev))
3572 		return;
3573 
3574 	spin_lock_bh(&bp->phy_lock);
3575 
3576 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3577 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3578 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3579 	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3580 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3581 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3582 	if (dev->flags & IFF_PROMISC) {
3583 		/* Promiscuous mode. */
3584 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3585 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3586 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3587 	}
3588 	else if (dev->flags & IFF_ALLMULTI) {
3589 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3590 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3591 				0xffffffff);
3592         	}
3593 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3594 	}
3595 	else {
3596 		/* Accept one or more multicast(s). */
3597 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3598 		u32 regidx;
3599 		u32 bit;
3600 		u32 crc;
3601 
3602 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3603 
3604 		netdev_for_each_mc_addr(ha, dev) {
3605 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3606 			bit = crc & 0xff;
3607 			regidx = (bit & 0xe0) >> 5;
3608 			bit &= 0x1f;
3609 			mc_filter[regidx] |= (1 << bit);
3610 		}
3611 
3612 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3613 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3614 				mc_filter[i]);
3615 		}
3616 
3617 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3618 	}
3619 
3620 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3621 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3622 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3623 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3624 	} else if (!(dev->flags & IFF_PROMISC)) {
3625 		/* Add all entries into to the match filter list */
3626 		i = 0;
3627 		netdev_for_each_uc_addr(ha, dev) {
3628 			bnx2_set_mac_addr(bp, ha->addr,
3629 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3630 			sort_mode |= (1 <<
3631 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3632 			i++;
3633 		}
3634 
3635 	}
3636 
3637 	if (rx_mode != bp->rx_mode) {
3638 		bp->rx_mode = rx_mode;
3639 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3640 	}
3641 
3642 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3643 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3644 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3645 
3646 	spin_unlock_bh(&bp->phy_lock);
3647 }
3648 
3649 static int
3650 check_fw_section(const struct firmware *fw,
3651 		 const struct bnx2_fw_file_section *section,
3652 		 u32 alignment, bool non_empty)
3653 {
3654 	u32 offset = be32_to_cpu(section->offset);
3655 	u32 len = be32_to_cpu(section->len);
3656 
3657 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3658 		return -EINVAL;
3659 	if ((non_empty && len == 0) || len > fw->size - offset ||
3660 	    len & (alignment - 1))
3661 		return -EINVAL;
3662 	return 0;
3663 }
3664 
3665 static int
3666 check_mips_fw_entry(const struct firmware *fw,
3667 		    const struct bnx2_mips_fw_file_entry *entry)
3668 {
3669 	if (check_fw_section(fw, &entry->text, 4, true) ||
3670 	    check_fw_section(fw, &entry->data, 4, false) ||
3671 	    check_fw_section(fw, &entry->rodata, 4, false))
3672 		return -EINVAL;
3673 	return 0;
3674 }
3675 
3676 static void bnx2_release_firmware(struct bnx2 *bp)
3677 {
3678 	if (bp->rv2p_firmware) {
3679 		release_firmware(bp->mips_firmware);
3680 		release_firmware(bp->rv2p_firmware);
3681 		bp->rv2p_firmware = NULL;
3682 	}
3683 }
3684 
3685 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3686 {
3687 	const char *mips_fw_file, *rv2p_fw_file;
3688 	const struct bnx2_mips_fw_file *mips_fw;
3689 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3690 	int rc;
3691 
3692 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3693 		mips_fw_file = FW_MIPS_FILE_09;
3694 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3695 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3696 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3697 		else
3698 			rv2p_fw_file = FW_RV2P_FILE_09;
3699 	} else {
3700 		mips_fw_file = FW_MIPS_FILE_06;
3701 		rv2p_fw_file = FW_RV2P_FILE_06;
3702 	}
3703 
3704 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3705 	if (rc) {
3706 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3707 		goto out;
3708 	}
3709 
3710 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3711 	if (rc) {
3712 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3713 		goto err_release_mips_firmware;
3714 	}
3715 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3716 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3717 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3718 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3719 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3720 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3721 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3722 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3723 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3724 		rc = -EINVAL;
3725 		goto err_release_firmware;
3726 	}
3727 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3728 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3729 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3730 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3731 		rc = -EINVAL;
3732 		goto err_release_firmware;
3733 	}
3734 out:
3735 	return rc;
3736 
3737 err_release_firmware:
3738 	release_firmware(bp->rv2p_firmware);
3739 	bp->rv2p_firmware = NULL;
3740 err_release_mips_firmware:
3741 	release_firmware(bp->mips_firmware);
3742 	goto out;
3743 }
3744 
3745 static int bnx2_request_firmware(struct bnx2 *bp)
3746 {
3747 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3748 }
3749 
3750 static u32
3751 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3752 {
3753 	switch (idx) {
3754 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3755 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3756 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3757 		break;
3758 	}
3759 	return rv2p_code;
3760 }
3761 
3762 static int
3763 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3764 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3765 {
3766 	u32 rv2p_code_len, file_offset;
3767 	__be32 *rv2p_code;
3768 	int i;
3769 	u32 val, cmd, addr;
3770 
3771 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3772 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3773 
3774 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3775 
3776 	if (rv2p_proc == RV2P_PROC1) {
3777 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3778 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3779 	} else {
3780 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3781 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3782 	}
3783 
3784 	for (i = 0; i < rv2p_code_len; i += 8) {
3785 		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3786 		rv2p_code++;
3787 		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3788 		rv2p_code++;
3789 
3790 		val = (i / 8) | cmd;
3791 		BNX2_WR(bp, addr, val);
3792 	}
3793 
3794 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3795 	for (i = 0; i < 8; i++) {
3796 		u32 loc, code;
3797 
3798 		loc = be32_to_cpu(fw_entry->fixup[i]);
3799 		if (loc && ((loc * 4) < rv2p_code_len)) {
3800 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3801 			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3802 			code = be32_to_cpu(*(rv2p_code + loc));
3803 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3804 			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3805 
3806 			val = (loc / 2) | cmd;
3807 			BNX2_WR(bp, addr, val);
3808 		}
3809 	}
3810 
3811 	/* Reset the processor, un-stall is done later. */
3812 	if (rv2p_proc == RV2P_PROC1) {
3813 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3814 	}
3815 	else {
3816 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3817 	}
3818 
3819 	return 0;
3820 }
3821 
3822 static int
3823 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3824 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3825 {
3826 	u32 addr, len, file_offset;
3827 	__be32 *data;
3828 	u32 offset;
3829 	u32 val;
3830 
3831 	/* Halt the CPU. */
3832 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3833 	val |= cpu_reg->mode_value_halt;
3834 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3835 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3836 
3837 	/* Load the Text area. */
3838 	addr = be32_to_cpu(fw_entry->text.addr);
3839 	len = be32_to_cpu(fw_entry->text.len);
3840 	file_offset = be32_to_cpu(fw_entry->text.offset);
3841 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3842 
3843 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3844 	if (len) {
3845 		int j;
3846 
3847 		for (j = 0; j < (len / 4); j++, offset += 4)
3848 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3849 	}
3850 
3851 	/* Load the Data area. */
3852 	addr = be32_to_cpu(fw_entry->data.addr);
3853 	len = be32_to_cpu(fw_entry->data.len);
3854 	file_offset = be32_to_cpu(fw_entry->data.offset);
3855 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3856 
3857 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3858 	if (len) {
3859 		int j;
3860 
3861 		for (j = 0; j < (len / 4); j++, offset += 4)
3862 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3863 	}
3864 
3865 	/* Load the Read-Only area. */
3866 	addr = be32_to_cpu(fw_entry->rodata.addr);
3867 	len = be32_to_cpu(fw_entry->rodata.len);
3868 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3869 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3870 
3871 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3872 	if (len) {
3873 		int j;
3874 
3875 		for (j = 0; j < (len / 4); j++, offset += 4)
3876 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3877 	}
3878 
3879 	/* Clear the pre-fetch instruction. */
3880 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3881 
3882 	val = be32_to_cpu(fw_entry->start_addr);
3883 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3884 
3885 	/* Start the CPU. */
3886 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3887 	val &= ~cpu_reg->mode_value_halt;
3888 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3889 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3890 
3891 	return 0;
3892 }
3893 
3894 static int
3895 bnx2_init_cpus(struct bnx2 *bp)
3896 {
3897 	const struct bnx2_mips_fw_file *mips_fw =
3898 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3899 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3900 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3901 	int rc;
3902 
3903 	/* Initialize the RV2P processor. */
3904 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3905 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3906 
3907 	/* Initialize the RX Processor. */
3908 	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3909 	if (rc)
3910 		goto init_cpu_err;
3911 
3912 	/* Initialize the TX Processor. */
3913 	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3914 	if (rc)
3915 		goto init_cpu_err;
3916 
3917 	/* Initialize the TX Patch-up Processor. */
3918 	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3919 	if (rc)
3920 		goto init_cpu_err;
3921 
3922 	/* Initialize the Completion Processor. */
3923 	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3924 	if (rc)
3925 		goto init_cpu_err;
3926 
3927 	/* Initialize the Command Processor. */
3928 	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3929 
3930 init_cpu_err:
3931 	return rc;
3932 }
3933 
3934 static void
3935 bnx2_setup_wol(struct bnx2 *bp)
3936 {
3937 	int i;
3938 	u32 val, wol_msg;
3939 
3940 	if (bp->wol) {
3941 		u32 advertising;
3942 		u8 autoneg;
3943 
3944 		autoneg = bp->autoneg;
3945 		advertising = bp->advertising;
3946 
3947 		if (bp->phy_port == PORT_TP) {
3948 			bp->autoneg = AUTONEG_SPEED;
3949 			bp->advertising = ADVERTISED_10baseT_Half |
3950 				ADVERTISED_10baseT_Full |
3951 				ADVERTISED_100baseT_Half |
3952 				ADVERTISED_100baseT_Full |
3953 				ADVERTISED_Autoneg;
3954 		}
3955 
3956 		spin_lock_bh(&bp->phy_lock);
3957 		bnx2_setup_phy(bp, bp->phy_port);
3958 		spin_unlock_bh(&bp->phy_lock);
3959 
3960 		bp->autoneg = autoneg;
3961 		bp->advertising = advertising;
3962 
3963 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3964 
3965 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3966 
3967 		/* Enable port mode. */
3968 		val &= ~BNX2_EMAC_MODE_PORT;
3969 		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3970 		       BNX2_EMAC_MODE_ACPI_RCVD |
3971 		       BNX2_EMAC_MODE_MPKT;
3972 		if (bp->phy_port == PORT_TP) {
3973 			val |= BNX2_EMAC_MODE_PORT_MII;
3974 		} else {
3975 			val |= BNX2_EMAC_MODE_PORT_GMII;
3976 			if (bp->line_speed == SPEED_2500)
3977 				val |= BNX2_EMAC_MODE_25G_MODE;
3978 		}
3979 
3980 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3981 
3982 		/* receive all multicast */
3983 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3984 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3985 				0xffffffff);
3986 		}
3987 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3988 
3989 		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3990 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3991 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3992 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3993 
3994 		/* Need to enable EMAC and RPM for WOL. */
3995 		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3996 			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3997 			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3998 			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3999 
4000 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4001 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4002 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4003 
4004 		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4005 	} else {
4006 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4007 	}
4008 
4009 	if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4010 		u32 val;
4011 
4012 		wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4013 		if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4014 			bnx2_fw_sync(bp, wol_msg, 1, 0);
4015 			return;
4016 		}
4017 		/* Tell firmware not to power down the PHY yet, otherwise
4018 		 * the chip will take a long time to respond to MMIO reads.
4019 		 */
4020 		val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4021 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4022 			      val | BNX2_PORT_FEATURE_ASF_ENABLED);
4023 		bnx2_fw_sync(bp, wol_msg, 1, 0);
4024 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4025 	}
4026 
4027 }
4028 
4029 static int
4030 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4031 {
4032 	switch (state) {
4033 	case PCI_D0: {
4034 		u32 val;
4035 
4036 		pci_enable_wake(bp->pdev, PCI_D0, false);
4037 		pci_set_power_state(bp->pdev, PCI_D0);
4038 
4039 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4040 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4041 		val &= ~BNX2_EMAC_MODE_MPKT;
4042 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4043 
4044 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4045 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4046 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4047 		break;
4048 	}
4049 	case PCI_D3hot: {
4050 		bnx2_setup_wol(bp);
4051 		pci_wake_from_d3(bp->pdev, bp->wol);
4052 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4053 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4054 
4055 			if (bp->wol)
4056 				pci_set_power_state(bp->pdev, PCI_D3hot);
4057 			break;
4058 
4059 		}
4060 		if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4061 			u32 val;
4062 
4063 			/* Tell firmware not to power down the PHY yet,
4064 			 * otherwise the other port may not respond to
4065 			 * MMIO reads.
4066 			 */
4067 			val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4068 			val &= ~BNX2_CONDITION_PM_STATE_MASK;
4069 			val |= BNX2_CONDITION_PM_STATE_UNPREP;
4070 			bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4071 		}
4072 		pci_set_power_state(bp->pdev, PCI_D3hot);
4073 
4074 		/* No more memory access after this point until
4075 		 * device is brought back to D0.
4076 		 */
4077 		break;
4078 	}
4079 	default:
4080 		return -EINVAL;
4081 	}
4082 	return 0;
4083 }
4084 
4085 static int
4086 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4087 {
4088 	u32 val;
4089 	int j;
4090 
4091 	/* Request access to the flash interface. */
4092 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4093 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4094 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4095 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4096 			break;
4097 
4098 		udelay(5);
4099 	}
4100 
4101 	if (j >= NVRAM_TIMEOUT_COUNT)
4102 		return -EBUSY;
4103 
4104 	return 0;
4105 }
4106 
4107 static int
4108 bnx2_release_nvram_lock(struct bnx2 *bp)
4109 {
4110 	int j;
4111 	u32 val;
4112 
4113 	/* Relinquish nvram interface. */
4114 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4115 
4116 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4117 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4118 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4119 			break;
4120 
4121 		udelay(5);
4122 	}
4123 
4124 	if (j >= NVRAM_TIMEOUT_COUNT)
4125 		return -EBUSY;
4126 
4127 	return 0;
4128 }
4129 
4130 
4131 static int
4132 bnx2_enable_nvram_write(struct bnx2 *bp)
4133 {
4134 	u32 val;
4135 
4136 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4137 	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4138 
4139 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4140 		int j;
4141 
4142 		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4143 		BNX2_WR(bp, BNX2_NVM_COMMAND,
4144 			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4145 
4146 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4147 			udelay(5);
4148 
4149 			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4150 			if (val & BNX2_NVM_COMMAND_DONE)
4151 				break;
4152 		}
4153 
4154 		if (j >= NVRAM_TIMEOUT_COUNT)
4155 			return -EBUSY;
4156 	}
4157 	return 0;
4158 }
4159 
4160 static void
4161 bnx2_disable_nvram_write(struct bnx2 *bp)
4162 {
4163 	u32 val;
4164 
4165 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4166 	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4167 }
4168 
4169 
4170 static void
4171 bnx2_enable_nvram_access(struct bnx2 *bp)
4172 {
4173 	u32 val;
4174 
4175 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4176 	/* Enable both bits, even on read. */
4177 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4178 		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4179 }
4180 
4181 static void
4182 bnx2_disable_nvram_access(struct bnx2 *bp)
4183 {
4184 	u32 val;
4185 
4186 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4187 	/* Disable both bits, even after read. */
4188 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4189 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4190 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4191 }
4192 
4193 static int
4194 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4195 {
4196 	u32 cmd;
4197 	int j;
4198 
4199 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4200 		/* Buffered flash, no erase needed */
4201 		return 0;
4202 
4203 	/* Build an erase command */
4204 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4205 	      BNX2_NVM_COMMAND_DOIT;
4206 
4207 	/* Need to clear DONE bit separately. */
4208 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4209 
4210 	/* Address of the NVRAM to read from. */
4211 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4212 
4213 	/* Issue an erase command. */
4214 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4215 
4216 	/* Wait for completion. */
4217 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4218 		u32 val;
4219 
4220 		udelay(5);
4221 
4222 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4223 		if (val & BNX2_NVM_COMMAND_DONE)
4224 			break;
4225 	}
4226 
4227 	if (j >= NVRAM_TIMEOUT_COUNT)
4228 		return -EBUSY;
4229 
4230 	return 0;
4231 }
4232 
4233 static int
4234 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4235 {
4236 	u32 cmd;
4237 	int j;
4238 
4239 	/* Build the command word. */
4240 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4241 
4242 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4243 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4244 		offset = ((offset / bp->flash_info->page_size) <<
4245 			   bp->flash_info->page_bits) +
4246 			  (offset % bp->flash_info->page_size);
4247 	}
4248 
4249 	/* Need to clear DONE bit separately. */
4250 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4251 
4252 	/* Address of the NVRAM to read from. */
4253 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4254 
4255 	/* Issue a read command. */
4256 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4257 
4258 	/* Wait for completion. */
4259 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4260 		u32 val;
4261 
4262 		udelay(5);
4263 
4264 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4265 		if (val & BNX2_NVM_COMMAND_DONE) {
4266 			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4267 			memcpy(ret_val, &v, 4);
4268 			break;
4269 		}
4270 	}
4271 	if (j >= NVRAM_TIMEOUT_COUNT)
4272 		return -EBUSY;
4273 
4274 	return 0;
4275 }
4276 
4277 
4278 static int
4279 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4280 {
4281 	u32 cmd;
4282 	__be32 val32;
4283 	int j;
4284 
4285 	/* Build the command word. */
4286 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4287 
4288 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4289 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4290 		offset = ((offset / bp->flash_info->page_size) <<
4291 			  bp->flash_info->page_bits) +
4292 			 (offset % bp->flash_info->page_size);
4293 	}
4294 
4295 	/* Need to clear DONE bit separately. */
4296 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4297 
4298 	memcpy(&val32, val, 4);
4299 
4300 	/* Write the data. */
4301 	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4302 
4303 	/* Address of the NVRAM to write to. */
4304 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4305 
4306 	/* Issue the write command. */
4307 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4308 
4309 	/* Wait for completion. */
4310 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4311 		udelay(5);
4312 
4313 		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4314 			break;
4315 	}
4316 	if (j >= NVRAM_TIMEOUT_COUNT)
4317 		return -EBUSY;
4318 
4319 	return 0;
4320 }
4321 
4322 static int
4323 bnx2_init_nvram(struct bnx2 *bp)
4324 {
4325 	u32 val;
4326 	int j, entry_count, rc = 0;
4327 	const struct flash_spec *flash;
4328 
4329 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4330 		bp->flash_info = &flash_5709;
4331 		goto get_flash_size;
4332 	}
4333 
4334 	/* Determine the selected interface. */
4335 	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4336 
4337 	entry_count = ARRAY_SIZE(flash_table);
4338 
4339 	if (val & 0x40000000) {
4340 
4341 		/* Flash interface has been reconfigured */
4342 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4343 		     j++, flash++) {
4344 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4345 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4346 				bp->flash_info = flash;
4347 				break;
4348 			}
4349 		}
4350 	}
4351 	else {
4352 		u32 mask;
4353 		/* Not yet been reconfigured */
4354 
4355 		if (val & (1 << 23))
4356 			mask = FLASH_BACKUP_STRAP_MASK;
4357 		else
4358 			mask = FLASH_STRAP_MASK;
4359 
4360 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4361 			j++, flash++) {
4362 
4363 			if ((val & mask) == (flash->strapping & mask)) {
4364 				bp->flash_info = flash;
4365 
4366 				/* Request access to the flash interface. */
4367 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4368 					return rc;
4369 
4370 				/* Enable access to flash interface */
4371 				bnx2_enable_nvram_access(bp);
4372 
4373 				/* Reconfigure the flash interface */
4374 				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4375 				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4376 				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4377 				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4378 
4379 				/* Disable access to flash interface */
4380 				bnx2_disable_nvram_access(bp);
4381 				bnx2_release_nvram_lock(bp);
4382 
4383 				break;
4384 			}
4385 		}
4386 	} /* if (val & 0x40000000) */
4387 
4388 	if (j == entry_count) {
4389 		bp->flash_info = NULL;
4390 		pr_alert("Unknown flash/EEPROM type\n");
4391 		return -ENODEV;
4392 	}
4393 
4394 get_flash_size:
4395 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4396 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4397 	if (val)
4398 		bp->flash_size = val;
4399 	else
4400 		bp->flash_size = bp->flash_info->total_size;
4401 
4402 	return rc;
4403 }
4404 
4405 static int
4406 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4407 		int buf_size)
4408 {
4409 	int rc = 0;
4410 	u32 cmd_flags, offset32, len32, extra;
4411 
4412 	if (buf_size == 0)
4413 		return 0;
4414 
4415 	/* Request access to the flash interface. */
4416 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4417 		return rc;
4418 
4419 	/* Enable access to flash interface */
4420 	bnx2_enable_nvram_access(bp);
4421 
4422 	len32 = buf_size;
4423 	offset32 = offset;
4424 	extra = 0;
4425 
4426 	cmd_flags = 0;
4427 
4428 	if (offset32 & 3) {
4429 		u8 buf[4];
4430 		u32 pre_len;
4431 
4432 		offset32 &= ~3;
4433 		pre_len = 4 - (offset & 3);
4434 
4435 		if (pre_len >= len32) {
4436 			pre_len = len32;
4437 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4438 				    BNX2_NVM_COMMAND_LAST;
4439 		}
4440 		else {
4441 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4442 		}
4443 
4444 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4445 
4446 		if (rc)
4447 			return rc;
4448 
4449 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4450 
4451 		offset32 += 4;
4452 		ret_buf += pre_len;
4453 		len32 -= pre_len;
4454 	}
4455 	if (len32 & 3) {
4456 		extra = 4 - (len32 & 3);
4457 		len32 = (len32 + 4) & ~3;
4458 	}
4459 
4460 	if (len32 == 4) {
4461 		u8 buf[4];
4462 
4463 		if (cmd_flags)
4464 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4465 		else
4466 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4467 				    BNX2_NVM_COMMAND_LAST;
4468 
4469 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4470 
4471 		memcpy(ret_buf, buf, 4 - extra);
4472 	}
4473 	else if (len32 > 0) {
4474 		u8 buf[4];
4475 
4476 		/* Read the first word. */
4477 		if (cmd_flags)
4478 			cmd_flags = 0;
4479 		else
4480 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4481 
4482 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4483 
4484 		/* Advance to the next dword. */
4485 		offset32 += 4;
4486 		ret_buf += 4;
4487 		len32 -= 4;
4488 
4489 		while (len32 > 4 && rc == 0) {
4490 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4491 
4492 			/* Advance to the next dword. */
4493 			offset32 += 4;
4494 			ret_buf += 4;
4495 			len32 -= 4;
4496 		}
4497 
4498 		if (rc)
4499 			return rc;
4500 
4501 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4502 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4503 
4504 		memcpy(ret_buf, buf, 4 - extra);
4505 	}
4506 
4507 	/* Disable access to flash interface */
4508 	bnx2_disable_nvram_access(bp);
4509 
4510 	bnx2_release_nvram_lock(bp);
4511 
4512 	return rc;
4513 }
4514 
4515 static int
4516 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4517 		int buf_size)
4518 {
4519 	u32 written, offset32, len32;
4520 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4521 	int rc = 0;
4522 	int align_start, align_end;
4523 
4524 	buf = data_buf;
4525 	offset32 = offset;
4526 	len32 = buf_size;
4527 	align_start = align_end = 0;
4528 
4529 	if ((align_start = (offset32 & 3))) {
4530 		offset32 &= ~3;
4531 		len32 += align_start;
4532 		if (len32 < 4)
4533 			len32 = 4;
4534 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4535 			return rc;
4536 	}
4537 
4538 	if (len32 & 3) {
4539 		align_end = 4 - (len32 & 3);
4540 		len32 += align_end;
4541 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4542 			return rc;
4543 	}
4544 
4545 	if (align_start || align_end) {
4546 		align_buf = kmalloc(len32, GFP_KERNEL);
4547 		if (align_buf == NULL)
4548 			return -ENOMEM;
4549 		if (align_start) {
4550 			memcpy(align_buf, start, 4);
4551 		}
4552 		if (align_end) {
4553 			memcpy(align_buf + len32 - 4, end, 4);
4554 		}
4555 		memcpy(align_buf + align_start, data_buf, buf_size);
4556 		buf = align_buf;
4557 	}
4558 
4559 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4560 		flash_buffer = kmalloc(264, GFP_KERNEL);
4561 		if (flash_buffer == NULL) {
4562 			rc = -ENOMEM;
4563 			goto nvram_write_end;
4564 		}
4565 	}
4566 
4567 	written = 0;
4568 	while ((written < len32) && (rc == 0)) {
4569 		u32 page_start, page_end, data_start, data_end;
4570 		u32 addr, cmd_flags;
4571 		int i;
4572 
4573 	        /* Find the page_start addr */
4574 		page_start = offset32 + written;
4575 		page_start -= (page_start % bp->flash_info->page_size);
4576 		/* Find the page_end addr */
4577 		page_end = page_start + bp->flash_info->page_size;
4578 		/* Find the data_start addr */
4579 		data_start = (written == 0) ? offset32 : page_start;
4580 		/* Find the data_end addr */
4581 		data_end = (page_end > offset32 + len32) ?
4582 			(offset32 + len32) : page_end;
4583 
4584 		/* Request access to the flash interface. */
4585 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4586 			goto nvram_write_end;
4587 
4588 		/* Enable access to flash interface */
4589 		bnx2_enable_nvram_access(bp);
4590 
4591 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4592 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4593 			int j;
4594 
4595 			/* Read the whole page into the buffer
4596 			 * (non-buffer flash only) */
4597 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4598 				if (j == (bp->flash_info->page_size - 4)) {
4599 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4600 				}
4601 				rc = bnx2_nvram_read_dword(bp,
4602 					page_start + j,
4603 					&flash_buffer[j],
4604 					cmd_flags);
4605 
4606 				if (rc)
4607 					goto nvram_write_end;
4608 
4609 				cmd_flags = 0;
4610 			}
4611 		}
4612 
4613 		/* Enable writes to flash interface (unlock write-protect) */
4614 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4615 			goto nvram_write_end;
4616 
4617 		/* Loop to write back the buffer data from page_start to
4618 		 * data_start */
4619 		i = 0;
4620 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4621 			/* Erase the page */
4622 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4623 				goto nvram_write_end;
4624 
4625 			/* Re-enable the write again for the actual write */
4626 			bnx2_enable_nvram_write(bp);
4627 
4628 			for (addr = page_start; addr < data_start;
4629 				addr += 4, i += 4) {
4630 
4631 				rc = bnx2_nvram_write_dword(bp, addr,
4632 					&flash_buffer[i], cmd_flags);
4633 
4634 				if (rc != 0)
4635 					goto nvram_write_end;
4636 
4637 				cmd_flags = 0;
4638 			}
4639 		}
4640 
4641 		/* Loop to write the new data from data_start to data_end */
4642 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4643 			if ((addr == page_end - 4) ||
4644 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4645 				 (addr == data_end - 4))) {
4646 
4647 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4648 			}
4649 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4650 				cmd_flags);
4651 
4652 			if (rc != 0)
4653 				goto nvram_write_end;
4654 
4655 			cmd_flags = 0;
4656 			buf += 4;
4657 		}
4658 
4659 		/* Loop to write back the buffer data from data_end
4660 		 * to page_end */
4661 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4662 			for (addr = data_end; addr < page_end;
4663 				addr += 4, i += 4) {
4664 
4665 				if (addr == page_end-4) {
4666 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4667                 		}
4668 				rc = bnx2_nvram_write_dword(bp, addr,
4669 					&flash_buffer[i], cmd_flags);
4670 
4671 				if (rc != 0)
4672 					goto nvram_write_end;
4673 
4674 				cmd_flags = 0;
4675 			}
4676 		}
4677 
4678 		/* Disable writes to flash interface (lock write-protect) */
4679 		bnx2_disable_nvram_write(bp);
4680 
4681 		/* Disable access to flash interface */
4682 		bnx2_disable_nvram_access(bp);
4683 		bnx2_release_nvram_lock(bp);
4684 
4685 		/* Increment written */
4686 		written += data_end - data_start;
4687 	}
4688 
4689 nvram_write_end:
4690 	kfree(flash_buffer);
4691 	kfree(align_buf);
4692 	return rc;
4693 }
4694 
4695 static void
4696 bnx2_init_fw_cap(struct bnx2 *bp)
4697 {
4698 	u32 val, sig = 0;
4699 
4700 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4701 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4702 
4703 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4704 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4705 
4706 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4707 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4708 		return;
4709 
4710 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4711 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4712 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4713 	}
4714 
4715 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4716 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4717 		u32 link;
4718 
4719 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4720 
4721 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4722 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4723 			bp->phy_port = PORT_FIBRE;
4724 		else
4725 			bp->phy_port = PORT_TP;
4726 
4727 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4728 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4729 	}
4730 
4731 	if (netif_running(bp->dev) && sig)
4732 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4733 }
4734 
4735 static void
4736 bnx2_setup_msix_tbl(struct bnx2 *bp)
4737 {
4738 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4739 
4740 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4741 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4742 }
4743 
4744 static int
4745 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4746 {
4747 	u32 val;
4748 	int i, rc = 0;
4749 	u8 old_port;
4750 
4751 	/* Wait for the current PCI transaction to complete before
4752 	 * issuing a reset. */
4753 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4754 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4755 		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4756 			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4757 			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4758 			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4759 			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4760 		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4761 		udelay(5);
4762 	} else {  /* 5709 */
4763 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4764 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4765 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4766 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4767 
4768 		for (i = 0; i < 100; i++) {
4769 			msleep(1);
4770 			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4771 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4772 				break;
4773 		}
4774 	}
4775 
4776 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4777 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4778 
4779 	/* Deposit a driver reset signature so the firmware knows that
4780 	 * this is a soft reset. */
4781 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4782 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4783 
4784 	/* Do a dummy read to force the chip to complete all current transaction
4785 	 * before we issue a reset. */
4786 	val = BNX2_RD(bp, BNX2_MISC_ID);
4787 
4788 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4789 		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4790 		BNX2_RD(bp, BNX2_MISC_COMMAND);
4791 		udelay(5);
4792 
4793 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4794 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4795 
4796 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4797 
4798 	} else {
4799 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4800 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4801 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4802 
4803 		/* Chip reset. */
4804 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4805 
4806 		/* Reading back any register after chip reset will hang the
4807 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4808 		 * of margin for write posting.
4809 		 */
4810 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4811 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4812 			msleep(20);
4813 
4814 		/* Reset takes approximate 30 usec */
4815 		for (i = 0; i < 10; i++) {
4816 			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4817 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4818 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4819 				break;
4820 			udelay(10);
4821 		}
4822 
4823 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4824 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4825 			pr_err("Chip reset did not complete\n");
4826 			return -EBUSY;
4827 		}
4828 	}
4829 
4830 	/* Make sure byte swapping is properly configured. */
4831 	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4832 	if (val != 0x01020304) {
4833 		pr_err("Chip not in correct endian mode\n");
4834 		return -ENODEV;
4835 	}
4836 
4837 	/* Wait for the firmware to finish its initialization. */
4838 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4839 	if (rc)
4840 		return rc;
4841 
4842 	spin_lock_bh(&bp->phy_lock);
4843 	old_port = bp->phy_port;
4844 	bnx2_init_fw_cap(bp);
4845 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4846 	    old_port != bp->phy_port)
4847 		bnx2_set_default_remote_link(bp);
4848 	spin_unlock_bh(&bp->phy_lock);
4849 
4850 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4851 		/* Adjust the voltage regular to two steps lower.  The default
4852 		 * of this register is 0x0000000e. */
4853 		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4854 
4855 		/* Remove bad rbuf memory from the free pool. */
4856 		rc = bnx2_alloc_bad_rbuf(bp);
4857 	}
4858 
4859 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4860 		bnx2_setup_msix_tbl(bp);
4861 		/* Prevent MSIX table reads and write from timing out */
4862 		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4863 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4864 	}
4865 
4866 	return rc;
4867 }
4868 
4869 static int
4870 bnx2_init_chip(struct bnx2 *bp)
4871 {
4872 	u32 val, mtu;
4873 	int rc, i;
4874 
4875 	/* Make sure the interrupt is not active. */
4876 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4877 
4878 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4879 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4880 #ifdef __BIG_ENDIAN
4881 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4882 #endif
4883 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4884 	      DMA_READ_CHANS << 12 |
4885 	      DMA_WRITE_CHANS << 16;
4886 
4887 	val |= (0x2 << 20) | (1 << 11);
4888 
4889 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4890 		val |= (1 << 23);
4891 
4892 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4893 	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4894 	    !(bp->flags & BNX2_FLAG_PCIX))
4895 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4896 
4897 	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4898 
4899 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4900 		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4901 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4902 		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4903 	}
4904 
4905 	if (bp->flags & BNX2_FLAG_PCIX) {
4906 		u16 val16;
4907 
4908 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4909 				     &val16);
4910 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4911 				      val16 & ~PCI_X_CMD_ERO);
4912 	}
4913 
4914 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4915 		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4916 		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4917 		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4918 
4919 	/* Initialize context mapping and zero out the quick contexts.  The
4920 	 * context block must have already been enabled. */
4921 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4922 		rc = bnx2_init_5709_context(bp);
4923 		if (rc)
4924 			return rc;
4925 	} else
4926 		bnx2_init_context(bp);
4927 
4928 	if ((rc = bnx2_init_cpus(bp)) != 0)
4929 		return rc;
4930 
4931 	bnx2_init_nvram(bp);
4932 
4933 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4934 
4935 	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4936 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4937 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4938 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4939 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4940 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4941 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4942 	}
4943 
4944 	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4945 
4946 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4947 	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4948 	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4949 
4950 	val = (BNX2_PAGE_BITS - 8) << 24;
4951 	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4952 
4953 	/* Configure page size. */
4954 	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4955 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4956 	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4957 	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4958 
4959 	val = bp->mac_addr[0] +
4960 	      (bp->mac_addr[1] << 8) +
4961 	      (bp->mac_addr[2] << 16) +
4962 	      bp->mac_addr[3] +
4963 	      (bp->mac_addr[4] << 8) +
4964 	      (bp->mac_addr[5] << 16);
4965 	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4966 
4967 	/* Program the MTU.  Also include 4 bytes for CRC32. */
4968 	mtu = bp->dev->mtu;
4969 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4970 	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4971 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4972 	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4973 
4974 	if (mtu < 1500)
4975 		mtu = 1500;
4976 
4977 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4978 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4979 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4980 
4981 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4982 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4983 		bp->bnx2_napi[i].last_status_idx = 0;
4984 
4985 	bp->idle_chk_status_idx = 0xffff;
4986 
4987 	bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4988 
4989 	/* Set up how to generate a link change interrupt. */
4990 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4991 
4992 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4993 		(u64) bp->status_blk_mapping & 0xffffffff);
4994 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4995 
4996 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4997 		(u64) bp->stats_blk_mapping & 0xffffffff);
4998 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4999 		(u64) bp->stats_blk_mapping >> 32);
5000 
5001 	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5002 		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5003 
5004 	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5005 		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5006 
5007 	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5008 		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5009 
5010 	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5011 
5012 	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5013 
5014 	BNX2_WR(bp, BNX2_HC_COM_TICKS,
5015 		(bp->com_ticks_int << 16) | bp->com_ticks);
5016 
5017 	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5018 		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5019 
5020 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5021 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5022 	else
5023 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5024 	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5025 
5026 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5027 		val = BNX2_HC_CONFIG_COLLECT_STATS;
5028 	else {
5029 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5030 		      BNX2_HC_CONFIG_COLLECT_STATS;
5031 	}
5032 
5033 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
5034 		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5035 			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5036 
5037 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5038 	}
5039 
5040 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5041 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5042 
5043 	BNX2_WR(bp, BNX2_HC_CONFIG, val);
5044 
5045 	if (bp->rx_ticks < 25)
5046 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5047 	else
5048 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5049 
5050 	for (i = 1; i < bp->irq_nvecs; i++) {
5051 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5052 			   BNX2_HC_SB_CONFIG_1;
5053 
5054 		BNX2_WR(bp, base,
5055 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5056 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5057 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5058 
5059 		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5060 			(bp->tx_quick_cons_trip_int << 16) |
5061 			 bp->tx_quick_cons_trip);
5062 
5063 		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5064 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5065 
5066 		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5067 			(bp->rx_quick_cons_trip_int << 16) |
5068 			bp->rx_quick_cons_trip);
5069 
5070 		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5071 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5072 	}
5073 
5074 	/* Clear internal stats counters. */
5075 	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5076 
5077 	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5078 
5079 	/* Initialize the receive filter. */
5080 	bnx2_set_rx_mode(bp->dev);
5081 
5082 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5083 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5084 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5085 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5086 	}
5087 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5088 			  1, 0);
5089 
5090 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5091 	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5092 
5093 	udelay(20);
5094 
5095 	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5096 
5097 	return rc;
5098 }
5099 
5100 static void
5101 bnx2_clear_ring_states(struct bnx2 *bp)
5102 {
5103 	struct bnx2_napi *bnapi;
5104 	struct bnx2_tx_ring_info *txr;
5105 	struct bnx2_rx_ring_info *rxr;
5106 	int i;
5107 
5108 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5109 		bnapi = &bp->bnx2_napi[i];
5110 		txr = &bnapi->tx_ring;
5111 		rxr = &bnapi->rx_ring;
5112 
5113 		txr->tx_cons = 0;
5114 		txr->hw_tx_cons = 0;
5115 		rxr->rx_prod_bseq = 0;
5116 		rxr->rx_prod = 0;
5117 		rxr->rx_cons = 0;
5118 		rxr->rx_pg_prod = 0;
5119 		rxr->rx_pg_cons = 0;
5120 	}
5121 }
5122 
5123 static void
5124 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5125 {
5126 	u32 val, offset0, offset1, offset2, offset3;
5127 	u32 cid_addr = GET_CID_ADDR(cid);
5128 
5129 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5130 		offset0 = BNX2_L2CTX_TYPE_XI;
5131 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5132 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5133 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5134 	} else {
5135 		offset0 = BNX2_L2CTX_TYPE;
5136 		offset1 = BNX2_L2CTX_CMD_TYPE;
5137 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5138 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5139 	}
5140 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5141 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5142 
5143 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5144 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5145 
5146 	val = (u64) txr->tx_desc_mapping >> 32;
5147 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5148 
5149 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5150 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5151 }
5152 
5153 static void
5154 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5155 {
5156 	struct bnx2_tx_bd *txbd;
5157 	u32 cid = TX_CID;
5158 	struct bnx2_napi *bnapi;
5159 	struct bnx2_tx_ring_info *txr;
5160 
5161 	bnapi = &bp->bnx2_napi[ring_num];
5162 	txr = &bnapi->tx_ring;
5163 
5164 	if (ring_num == 0)
5165 		cid = TX_CID;
5166 	else
5167 		cid = TX_TSS_CID + ring_num - 1;
5168 
5169 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5170 
5171 	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5172 
5173 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5174 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5175 
5176 	txr->tx_prod = 0;
5177 	txr->tx_prod_bseq = 0;
5178 
5179 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5180 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5181 
5182 	bnx2_init_tx_context(bp, cid, txr);
5183 }
5184 
5185 static void
5186 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5187 		     u32 buf_size, int num_rings)
5188 {
5189 	int i;
5190 	struct bnx2_rx_bd *rxbd;
5191 
5192 	for (i = 0; i < num_rings; i++) {
5193 		int j;
5194 
5195 		rxbd = &rx_ring[i][0];
5196 		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5197 			rxbd->rx_bd_len = buf_size;
5198 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5199 		}
5200 		if (i == (num_rings - 1))
5201 			j = 0;
5202 		else
5203 			j = i + 1;
5204 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5205 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5206 	}
5207 }
5208 
5209 static void
5210 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5211 {
5212 	int i;
5213 	u16 prod, ring_prod;
5214 	u32 cid, rx_cid_addr, val;
5215 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5216 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5217 
5218 	if (ring_num == 0)
5219 		cid = RX_CID;
5220 	else
5221 		cid = RX_RSS_CID + ring_num - 1;
5222 
5223 	rx_cid_addr = GET_CID_ADDR(cid);
5224 
5225 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5226 			     bp->rx_buf_use_size, bp->rx_max_ring);
5227 
5228 	bnx2_init_rx_context(bp, cid);
5229 
5230 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5231 		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5232 		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5233 	}
5234 
5235 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5236 	if (bp->rx_pg_ring_size) {
5237 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5238 				     rxr->rx_pg_desc_mapping,
5239 				     PAGE_SIZE, bp->rx_max_pg_ring);
5240 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5241 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5242 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5243 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5244 
5245 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5246 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5247 
5248 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5249 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5250 
5251 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5252 			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5253 	}
5254 
5255 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5256 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5257 
5258 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5259 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5260 
5261 	ring_prod = prod = rxr->rx_pg_prod;
5262 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5263 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5264 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5265 				    ring_num, i, bp->rx_pg_ring_size);
5266 			break;
5267 		}
5268 		prod = BNX2_NEXT_RX_BD(prod);
5269 		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5270 	}
5271 	rxr->rx_pg_prod = prod;
5272 
5273 	ring_prod = prod = rxr->rx_prod;
5274 	for (i = 0; i < bp->rx_ring_size; i++) {
5275 		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5276 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5277 				    ring_num, i, bp->rx_ring_size);
5278 			break;
5279 		}
5280 		prod = BNX2_NEXT_RX_BD(prod);
5281 		ring_prod = BNX2_RX_RING_IDX(prod);
5282 	}
5283 	rxr->rx_prod = prod;
5284 
5285 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5286 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5287 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5288 
5289 	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5290 	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5291 
5292 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5293 }
5294 
5295 static void
5296 bnx2_init_all_rings(struct bnx2 *bp)
5297 {
5298 	int i;
5299 	u32 val;
5300 
5301 	bnx2_clear_ring_states(bp);
5302 
5303 	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5304 	for (i = 0; i < bp->num_tx_rings; i++)
5305 		bnx2_init_tx_ring(bp, i);
5306 
5307 	if (bp->num_tx_rings > 1)
5308 		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5309 			(TX_TSS_CID << 7));
5310 
5311 	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5312 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5313 
5314 	for (i = 0; i < bp->num_rx_rings; i++)
5315 		bnx2_init_rx_ring(bp, i);
5316 
5317 	if (bp->num_rx_rings > 1) {
5318 		u32 tbl_32 = 0;
5319 
5320 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5321 			int shift = (i % 8) << 2;
5322 
5323 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5324 			if ((i % 8) == 7) {
5325 				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5326 				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5327 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5328 					BNX2_RLUP_RSS_COMMAND_WRITE |
5329 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5330 				tbl_32 = 0;
5331 			}
5332 		}
5333 
5334 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5335 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5336 
5337 		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5338 
5339 	}
5340 }
5341 
5342 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5343 {
5344 	u32 max, num_rings = 1;
5345 
5346 	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5347 		ring_size -= BNX2_MAX_RX_DESC_CNT;
5348 		num_rings++;
5349 	}
5350 	/* round to next power of 2 */
5351 	max = max_size;
5352 	while ((max & num_rings) == 0)
5353 		max >>= 1;
5354 
5355 	if (num_rings != max)
5356 		max <<= 1;
5357 
5358 	return max;
5359 }
5360 
5361 static void
5362 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5363 {
5364 	u32 rx_size, rx_space, jumbo_size;
5365 
5366 	/* 8 for CRC and VLAN */
5367 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5368 
5369 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5370 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5371 
5372 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5373 	bp->rx_pg_ring_size = 0;
5374 	bp->rx_max_pg_ring = 0;
5375 	bp->rx_max_pg_ring_idx = 0;
5376 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5377 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5378 
5379 		jumbo_size = size * pages;
5380 		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5381 			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5382 
5383 		bp->rx_pg_ring_size = jumbo_size;
5384 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5385 							BNX2_MAX_RX_PG_RINGS);
5386 		bp->rx_max_pg_ring_idx =
5387 			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5388 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5389 		bp->rx_copy_thresh = 0;
5390 	}
5391 
5392 	bp->rx_buf_use_size = rx_size;
5393 	/* hw alignment + build_skb() overhead*/
5394 	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5395 		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5396 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5397 	bp->rx_ring_size = size;
5398 	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5399 	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5400 }
5401 
5402 static void
5403 bnx2_free_tx_skbs(struct bnx2 *bp)
5404 {
5405 	int i;
5406 
5407 	for (i = 0; i < bp->num_tx_rings; i++) {
5408 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5409 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5410 		int j;
5411 
5412 		if (txr->tx_buf_ring == NULL)
5413 			continue;
5414 
5415 		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5416 			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5417 			struct sk_buff *skb = tx_buf->skb;
5418 			int k, last;
5419 
5420 			if (skb == NULL) {
5421 				j = BNX2_NEXT_TX_BD(j);
5422 				continue;
5423 			}
5424 
5425 			dma_unmap_single(&bp->pdev->dev,
5426 					 dma_unmap_addr(tx_buf, mapping),
5427 					 skb_headlen(skb),
5428 					 PCI_DMA_TODEVICE);
5429 
5430 			tx_buf->skb = NULL;
5431 
5432 			last = tx_buf->nr_frags;
5433 			j = BNX2_NEXT_TX_BD(j);
5434 			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5435 				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5436 				dma_unmap_page(&bp->pdev->dev,
5437 					dma_unmap_addr(tx_buf, mapping),
5438 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5439 					PCI_DMA_TODEVICE);
5440 			}
5441 			dev_kfree_skb(skb);
5442 		}
5443 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5444 	}
5445 }
5446 
5447 static void
5448 bnx2_free_rx_skbs(struct bnx2 *bp)
5449 {
5450 	int i;
5451 
5452 	for (i = 0; i < bp->num_rx_rings; i++) {
5453 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5454 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5455 		int j;
5456 
5457 		if (rxr->rx_buf_ring == NULL)
5458 			return;
5459 
5460 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5461 			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5462 			u8 *data = rx_buf->data;
5463 
5464 			if (data == NULL)
5465 				continue;
5466 
5467 			dma_unmap_single(&bp->pdev->dev,
5468 					 dma_unmap_addr(rx_buf, mapping),
5469 					 bp->rx_buf_use_size,
5470 					 PCI_DMA_FROMDEVICE);
5471 
5472 			rx_buf->data = NULL;
5473 
5474 			kfree(data);
5475 		}
5476 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5477 			bnx2_free_rx_page(bp, rxr, j);
5478 	}
5479 }
5480 
5481 static void
5482 bnx2_free_skbs(struct bnx2 *bp)
5483 {
5484 	bnx2_free_tx_skbs(bp);
5485 	bnx2_free_rx_skbs(bp);
5486 }
5487 
5488 static int
5489 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5490 {
5491 	int rc;
5492 
5493 	rc = bnx2_reset_chip(bp, reset_code);
5494 	bnx2_free_skbs(bp);
5495 	if (rc)
5496 		return rc;
5497 
5498 	if ((rc = bnx2_init_chip(bp)) != 0)
5499 		return rc;
5500 
5501 	bnx2_init_all_rings(bp);
5502 	return 0;
5503 }
5504 
5505 static int
5506 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5507 {
5508 	int rc;
5509 
5510 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5511 		return rc;
5512 
5513 	spin_lock_bh(&bp->phy_lock);
5514 	bnx2_init_phy(bp, reset_phy);
5515 	bnx2_set_link(bp);
5516 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5517 		bnx2_remote_phy_event(bp);
5518 	spin_unlock_bh(&bp->phy_lock);
5519 	return 0;
5520 }
5521 
5522 static int
5523 bnx2_shutdown_chip(struct bnx2 *bp)
5524 {
5525 	u32 reset_code;
5526 
5527 	if (bp->flags & BNX2_FLAG_NO_WOL)
5528 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5529 	else if (bp->wol)
5530 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5531 	else
5532 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5533 
5534 	return bnx2_reset_chip(bp, reset_code);
5535 }
5536 
5537 static int
5538 bnx2_test_registers(struct bnx2 *bp)
5539 {
5540 	int ret;
5541 	int i, is_5709;
5542 	static const struct {
5543 		u16   offset;
5544 		u16   flags;
5545 #define BNX2_FL_NOT_5709	1
5546 		u32   rw_mask;
5547 		u32   ro_mask;
5548 	} reg_tbl[] = {
5549 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5550 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5551 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5552 
5553 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5554 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5555 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5556 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5557 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5558 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5559 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5560 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5561 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5562 
5563 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5564 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5565 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5566 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5567 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5568 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5569 
5570 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5571 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5572 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5573 
5574 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5575 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5576 
5577 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5578 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5579 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5580 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5581 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5582 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5583 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5584 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5585 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5586 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5587 
5588 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5589 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5590 
5591 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5592 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5593 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5594 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5595 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5596 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5597 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5598 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5599 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5600 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5601 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5602 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5603 
5604 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5605 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5606 
5607 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5608 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5609 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5610 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5611 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5612 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5613 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5614 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5615 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5616 
5617 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5618 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5619 
5620 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5621 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5622 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5623 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5624 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5625 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5626 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5627 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5628 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5629 
5630 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5631 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5632 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5633 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5634 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5635 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5636 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5637 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5638 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5639 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5640 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5641 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5642 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5643 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5644 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5645 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5646 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5647 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5648 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5649 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5650 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5651 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5652 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5653 
5654 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5655 	};
5656 
5657 	ret = 0;
5658 	is_5709 = 0;
5659 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5660 		is_5709 = 1;
5661 
5662 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5663 		u32 offset, rw_mask, ro_mask, save_val, val;
5664 		u16 flags = reg_tbl[i].flags;
5665 
5666 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5667 			continue;
5668 
5669 		offset = (u32) reg_tbl[i].offset;
5670 		rw_mask = reg_tbl[i].rw_mask;
5671 		ro_mask = reg_tbl[i].ro_mask;
5672 
5673 		save_val = readl(bp->regview + offset);
5674 
5675 		writel(0, bp->regview + offset);
5676 
5677 		val = readl(bp->regview + offset);
5678 		if ((val & rw_mask) != 0) {
5679 			goto reg_test_err;
5680 		}
5681 
5682 		if ((val & ro_mask) != (save_val & ro_mask)) {
5683 			goto reg_test_err;
5684 		}
5685 
5686 		writel(0xffffffff, bp->regview + offset);
5687 
5688 		val = readl(bp->regview + offset);
5689 		if ((val & rw_mask) != rw_mask) {
5690 			goto reg_test_err;
5691 		}
5692 
5693 		if ((val & ro_mask) != (save_val & ro_mask)) {
5694 			goto reg_test_err;
5695 		}
5696 
5697 		writel(save_val, bp->regview + offset);
5698 		continue;
5699 
5700 reg_test_err:
5701 		writel(save_val, bp->regview + offset);
5702 		ret = -ENODEV;
5703 		break;
5704 	}
5705 	return ret;
5706 }
5707 
5708 static int
5709 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5710 {
5711 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5712 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5713 	int i;
5714 
5715 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5716 		u32 offset;
5717 
5718 		for (offset = 0; offset < size; offset += 4) {
5719 
5720 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5721 
5722 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5723 				test_pattern[i]) {
5724 				return -ENODEV;
5725 			}
5726 		}
5727 	}
5728 	return 0;
5729 }
5730 
5731 static int
5732 bnx2_test_memory(struct bnx2 *bp)
5733 {
5734 	int ret = 0;
5735 	int i;
5736 	static struct mem_entry {
5737 		u32   offset;
5738 		u32   len;
5739 	} mem_tbl_5706[] = {
5740 		{ 0x60000,  0x4000 },
5741 		{ 0xa0000,  0x3000 },
5742 		{ 0xe0000,  0x4000 },
5743 		{ 0x120000, 0x4000 },
5744 		{ 0x1a0000, 0x4000 },
5745 		{ 0x160000, 0x4000 },
5746 		{ 0xffffffff, 0    },
5747 	},
5748 	mem_tbl_5709[] = {
5749 		{ 0x60000,  0x4000 },
5750 		{ 0xa0000,  0x3000 },
5751 		{ 0xe0000,  0x4000 },
5752 		{ 0x120000, 0x4000 },
5753 		{ 0x1a0000, 0x4000 },
5754 		{ 0xffffffff, 0    },
5755 	};
5756 	struct mem_entry *mem_tbl;
5757 
5758 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5759 		mem_tbl = mem_tbl_5709;
5760 	else
5761 		mem_tbl = mem_tbl_5706;
5762 
5763 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5764 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5765 			mem_tbl[i].len)) != 0) {
5766 			return ret;
5767 		}
5768 	}
5769 
5770 	return ret;
5771 }
5772 
5773 #define BNX2_MAC_LOOPBACK	0
5774 #define BNX2_PHY_LOOPBACK	1
5775 
5776 static int
5777 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5778 {
5779 	unsigned int pkt_size, num_pkts, i;
5780 	struct sk_buff *skb;
5781 	u8 *data;
5782 	unsigned char *packet;
5783 	u16 rx_start_idx, rx_idx;
5784 	dma_addr_t map;
5785 	struct bnx2_tx_bd *txbd;
5786 	struct bnx2_sw_bd *rx_buf;
5787 	struct l2_fhdr *rx_hdr;
5788 	int ret = -ENODEV;
5789 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5790 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5791 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5792 
5793 	tx_napi = bnapi;
5794 
5795 	txr = &tx_napi->tx_ring;
5796 	rxr = &bnapi->rx_ring;
5797 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5798 		bp->loopback = MAC_LOOPBACK;
5799 		bnx2_set_mac_loopback(bp);
5800 	}
5801 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5802 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5803 			return 0;
5804 
5805 		bp->loopback = PHY_LOOPBACK;
5806 		bnx2_set_phy_loopback(bp);
5807 	}
5808 	else
5809 		return -EINVAL;
5810 
5811 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5812 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5813 	if (!skb)
5814 		return -ENOMEM;
5815 	packet = skb_put(skb, pkt_size);
5816 	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5817 	memset(packet + ETH_ALEN, 0x0, 8);
5818 	for (i = 14; i < pkt_size; i++)
5819 		packet[i] = (unsigned char) (i & 0xff);
5820 
5821 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5822 			     PCI_DMA_TODEVICE);
5823 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5824 		dev_kfree_skb(skb);
5825 		return -EIO;
5826 	}
5827 
5828 	BNX2_WR(bp, BNX2_HC_COMMAND,
5829 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5830 
5831 	BNX2_RD(bp, BNX2_HC_COMMAND);
5832 
5833 	udelay(5);
5834 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5835 
5836 	num_pkts = 0;
5837 
5838 	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5839 
5840 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5841 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5842 	txbd->tx_bd_mss_nbytes = pkt_size;
5843 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5844 
5845 	num_pkts++;
5846 	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5847 	txr->tx_prod_bseq += pkt_size;
5848 
5849 	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5850 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5851 
5852 	udelay(100);
5853 
5854 	BNX2_WR(bp, BNX2_HC_COMMAND,
5855 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5856 
5857 	BNX2_RD(bp, BNX2_HC_COMMAND);
5858 
5859 	udelay(5);
5860 
5861 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5862 	dev_kfree_skb(skb);
5863 
5864 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5865 		goto loopback_test_done;
5866 
5867 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5868 	if (rx_idx != rx_start_idx + num_pkts) {
5869 		goto loopback_test_done;
5870 	}
5871 
5872 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5873 	data = rx_buf->data;
5874 
5875 	rx_hdr = get_l2_fhdr(data);
5876 	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5877 
5878 	dma_sync_single_for_cpu(&bp->pdev->dev,
5879 		dma_unmap_addr(rx_buf, mapping),
5880 		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5881 
5882 	if (rx_hdr->l2_fhdr_status &
5883 		(L2_FHDR_ERRORS_BAD_CRC |
5884 		L2_FHDR_ERRORS_PHY_DECODE |
5885 		L2_FHDR_ERRORS_ALIGNMENT |
5886 		L2_FHDR_ERRORS_TOO_SHORT |
5887 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5888 
5889 		goto loopback_test_done;
5890 	}
5891 
5892 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5893 		goto loopback_test_done;
5894 	}
5895 
5896 	for (i = 14; i < pkt_size; i++) {
5897 		if (*(data + i) != (unsigned char) (i & 0xff)) {
5898 			goto loopback_test_done;
5899 		}
5900 	}
5901 
5902 	ret = 0;
5903 
5904 loopback_test_done:
5905 	bp->loopback = 0;
5906 	return ret;
5907 }
5908 
5909 #define BNX2_MAC_LOOPBACK_FAILED	1
5910 #define BNX2_PHY_LOOPBACK_FAILED	2
5911 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5912 					 BNX2_PHY_LOOPBACK_FAILED)
5913 
5914 static int
5915 bnx2_test_loopback(struct bnx2 *bp)
5916 {
5917 	int rc = 0;
5918 
5919 	if (!netif_running(bp->dev))
5920 		return BNX2_LOOPBACK_FAILED;
5921 
5922 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5923 	spin_lock_bh(&bp->phy_lock);
5924 	bnx2_init_phy(bp, 1);
5925 	spin_unlock_bh(&bp->phy_lock);
5926 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5927 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5928 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5929 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5930 	return rc;
5931 }
5932 
5933 #define NVRAM_SIZE 0x200
5934 #define CRC32_RESIDUAL 0xdebb20e3
5935 
5936 static int
5937 bnx2_test_nvram(struct bnx2 *bp)
5938 {
5939 	__be32 buf[NVRAM_SIZE / 4];
5940 	u8 *data = (u8 *) buf;
5941 	int rc = 0;
5942 	u32 magic, csum;
5943 
5944 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5945 		goto test_nvram_done;
5946 
5947         magic = be32_to_cpu(buf[0]);
5948 	if (magic != 0x669955aa) {
5949 		rc = -ENODEV;
5950 		goto test_nvram_done;
5951 	}
5952 
5953 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5954 		goto test_nvram_done;
5955 
5956 	csum = ether_crc_le(0x100, data);
5957 	if (csum != CRC32_RESIDUAL) {
5958 		rc = -ENODEV;
5959 		goto test_nvram_done;
5960 	}
5961 
5962 	csum = ether_crc_le(0x100, data + 0x100);
5963 	if (csum != CRC32_RESIDUAL) {
5964 		rc = -ENODEV;
5965 	}
5966 
5967 test_nvram_done:
5968 	return rc;
5969 }
5970 
5971 static int
5972 bnx2_test_link(struct bnx2 *bp)
5973 {
5974 	u32 bmsr;
5975 
5976 	if (!netif_running(bp->dev))
5977 		return -ENODEV;
5978 
5979 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5980 		if (bp->link_up)
5981 			return 0;
5982 		return -ENODEV;
5983 	}
5984 	spin_lock_bh(&bp->phy_lock);
5985 	bnx2_enable_bmsr1(bp);
5986 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5987 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5988 	bnx2_disable_bmsr1(bp);
5989 	spin_unlock_bh(&bp->phy_lock);
5990 
5991 	if (bmsr & BMSR_LSTATUS) {
5992 		return 0;
5993 	}
5994 	return -ENODEV;
5995 }
5996 
5997 static int
5998 bnx2_test_intr(struct bnx2 *bp)
5999 {
6000 	int i;
6001 	u16 status_idx;
6002 
6003 	if (!netif_running(bp->dev))
6004 		return -ENODEV;
6005 
6006 	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6007 
6008 	/* This register is not touched during run-time. */
6009 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6010 	BNX2_RD(bp, BNX2_HC_COMMAND);
6011 
6012 	for (i = 0; i < 10; i++) {
6013 		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6014 			status_idx) {
6015 
6016 			break;
6017 		}
6018 
6019 		msleep_interruptible(10);
6020 	}
6021 	if (i < 10)
6022 		return 0;
6023 
6024 	return -ENODEV;
6025 }
6026 
6027 /* Determining link for parallel detection. */
6028 static int
6029 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6030 {
6031 	u32 mode_ctl, an_dbg, exp;
6032 
6033 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6034 		return 0;
6035 
6036 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6037 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6038 
6039 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6040 		return 0;
6041 
6042 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6043 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6044 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6045 
6046 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6047 		return 0;
6048 
6049 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6050 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6051 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6052 
6053 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6054 		return 0;
6055 
6056 	return 1;
6057 }
6058 
6059 static void
6060 bnx2_5706_serdes_timer(struct bnx2 *bp)
6061 {
6062 	int check_link = 1;
6063 
6064 	spin_lock(&bp->phy_lock);
6065 	if (bp->serdes_an_pending) {
6066 		bp->serdes_an_pending--;
6067 		check_link = 0;
6068 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6069 		u32 bmcr;
6070 
6071 		bp->current_interval = BNX2_TIMER_INTERVAL;
6072 
6073 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6074 
6075 		if (bmcr & BMCR_ANENABLE) {
6076 			if (bnx2_5706_serdes_has_link(bp)) {
6077 				bmcr &= ~BMCR_ANENABLE;
6078 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6079 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6080 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6081 			}
6082 		}
6083 	}
6084 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6085 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6086 		u32 phy2;
6087 
6088 		bnx2_write_phy(bp, 0x17, 0x0f01);
6089 		bnx2_read_phy(bp, 0x15, &phy2);
6090 		if (phy2 & 0x20) {
6091 			u32 bmcr;
6092 
6093 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6094 			bmcr |= BMCR_ANENABLE;
6095 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6096 
6097 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6098 		}
6099 	} else
6100 		bp->current_interval = BNX2_TIMER_INTERVAL;
6101 
6102 	if (check_link) {
6103 		u32 val;
6104 
6105 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6106 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6107 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6108 
6109 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6110 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6111 				bnx2_5706s_force_link_dn(bp, 1);
6112 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6113 			} else
6114 				bnx2_set_link(bp);
6115 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6116 			bnx2_set_link(bp);
6117 	}
6118 	spin_unlock(&bp->phy_lock);
6119 }
6120 
6121 static void
6122 bnx2_5708_serdes_timer(struct bnx2 *bp)
6123 {
6124 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6125 		return;
6126 
6127 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6128 		bp->serdes_an_pending = 0;
6129 		return;
6130 	}
6131 
6132 	spin_lock(&bp->phy_lock);
6133 	if (bp->serdes_an_pending)
6134 		bp->serdes_an_pending--;
6135 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6136 		u32 bmcr;
6137 
6138 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6139 		if (bmcr & BMCR_ANENABLE) {
6140 			bnx2_enable_forced_2g5(bp);
6141 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6142 		} else {
6143 			bnx2_disable_forced_2g5(bp);
6144 			bp->serdes_an_pending = 2;
6145 			bp->current_interval = BNX2_TIMER_INTERVAL;
6146 		}
6147 
6148 	} else
6149 		bp->current_interval = BNX2_TIMER_INTERVAL;
6150 
6151 	spin_unlock(&bp->phy_lock);
6152 }
6153 
6154 static void
6155 bnx2_timer(unsigned long data)
6156 {
6157 	struct bnx2 *bp = (struct bnx2 *) data;
6158 
6159 	if (!netif_running(bp->dev))
6160 		return;
6161 
6162 	if (atomic_read(&bp->intr_sem) != 0)
6163 		goto bnx2_restart_timer;
6164 
6165 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6166 	     BNX2_FLAG_USING_MSI)
6167 		bnx2_chk_missed_msi(bp);
6168 
6169 	bnx2_send_heart_beat(bp);
6170 
6171 	bp->stats_blk->stat_FwRxDrop =
6172 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6173 
6174 	/* workaround occasional corrupted counters */
6175 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6176 		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6177 			BNX2_HC_COMMAND_STATS_NOW);
6178 
6179 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6180 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6181 			bnx2_5706_serdes_timer(bp);
6182 		else
6183 			bnx2_5708_serdes_timer(bp);
6184 	}
6185 
6186 bnx2_restart_timer:
6187 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6188 }
6189 
6190 static int
6191 bnx2_request_irq(struct bnx2 *bp)
6192 {
6193 	unsigned long flags;
6194 	struct bnx2_irq *irq;
6195 	int rc = 0, i;
6196 
6197 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6198 		flags = 0;
6199 	else
6200 		flags = IRQF_SHARED;
6201 
6202 	for (i = 0; i < bp->irq_nvecs; i++) {
6203 		irq = &bp->irq_tbl[i];
6204 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6205 				 &bp->bnx2_napi[i]);
6206 		if (rc)
6207 			break;
6208 		irq->requested = 1;
6209 	}
6210 	return rc;
6211 }
6212 
6213 static void
6214 __bnx2_free_irq(struct bnx2 *bp)
6215 {
6216 	struct bnx2_irq *irq;
6217 	int i;
6218 
6219 	for (i = 0; i < bp->irq_nvecs; i++) {
6220 		irq = &bp->irq_tbl[i];
6221 		if (irq->requested)
6222 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6223 		irq->requested = 0;
6224 	}
6225 }
6226 
6227 static void
6228 bnx2_free_irq(struct bnx2 *bp)
6229 {
6230 
6231 	__bnx2_free_irq(bp);
6232 	if (bp->flags & BNX2_FLAG_USING_MSI)
6233 		pci_disable_msi(bp->pdev);
6234 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6235 		pci_disable_msix(bp->pdev);
6236 
6237 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6238 }
6239 
6240 static void
6241 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6242 {
6243 	int i, total_vecs;
6244 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6245 	struct net_device *dev = bp->dev;
6246 	const int len = sizeof(bp->irq_tbl[0].name);
6247 
6248 	bnx2_setup_msix_tbl(bp);
6249 	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6250 	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6251 	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6252 
6253 	/*  Need to flush the previous three writes to ensure MSI-X
6254 	 *  is setup properly */
6255 	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6256 
6257 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6258 		msix_ent[i].entry = i;
6259 		msix_ent[i].vector = 0;
6260 	}
6261 
6262 	total_vecs = msix_vecs;
6263 #ifdef BCM_CNIC
6264 	total_vecs++;
6265 #endif
6266 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6267 					   BNX2_MIN_MSIX_VEC, total_vecs);
6268 	if (total_vecs < 0)
6269 		return;
6270 
6271 	msix_vecs = total_vecs;
6272 #ifdef BCM_CNIC
6273 	msix_vecs--;
6274 #endif
6275 	bp->irq_nvecs = msix_vecs;
6276 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6277 	for (i = 0; i < total_vecs; i++) {
6278 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6279 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6280 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6281 	}
6282 }
6283 
6284 static int
6285 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6286 {
6287 	int cpus = netif_get_num_default_rss_queues();
6288 	int msix_vecs;
6289 
6290 	if (!bp->num_req_rx_rings)
6291 		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6292 	else if (!bp->num_req_tx_rings)
6293 		msix_vecs = max(cpus, bp->num_req_rx_rings);
6294 	else
6295 		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6296 
6297 	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6298 
6299 	bp->irq_tbl[0].handler = bnx2_interrupt;
6300 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6301 	bp->irq_nvecs = 1;
6302 	bp->irq_tbl[0].vector = bp->pdev->irq;
6303 
6304 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6305 		bnx2_enable_msix(bp, msix_vecs);
6306 
6307 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6308 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6309 		if (pci_enable_msi(bp->pdev) == 0) {
6310 			bp->flags |= BNX2_FLAG_USING_MSI;
6311 			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6312 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6313 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6314 			} else
6315 				bp->irq_tbl[0].handler = bnx2_msi;
6316 
6317 			bp->irq_tbl[0].vector = bp->pdev->irq;
6318 		}
6319 	}
6320 
6321 	if (!bp->num_req_tx_rings)
6322 		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6323 	else
6324 		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6325 
6326 	if (!bp->num_req_rx_rings)
6327 		bp->num_rx_rings = bp->irq_nvecs;
6328 	else
6329 		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6330 
6331 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6332 
6333 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6334 }
6335 
6336 /* Called with rtnl_lock */
6337 static int
6338 bnx2_open(struct net_device *dev)
6339 {
6340 	struct bnx2 *bp = netdev_priv(dev);
6341 	int rc;
6342 
6343 	rc = bnx2_request_firmware(bp);
6344 	if (rc < 0)
6345 		goto out;
6346 
6347 	netif_carrier_off(dev);
6348 
6349 	bnx2_disable_int(bp);
6350 
6351 	rc = bnx2_setup_int_mode(bp, disable_msi);
6352 	if (rc)
6353 		goto open_err;
6354 	bnx2_init_napi(bp);
6355 	bnx2_napi_enable(bp);
6356 	rc = bnx2_alloc_mem(bp);
6357 	if (rc)
6358 		goto open_err;
6359 
6360 	rc = bnx2_request_irq(bp);
6361 	if (rc)
6362 		goto open_err;
6363 
6364 	rc = bnx2_init_nic(bp, 1);
6365 	if (rc)
6366 		goto open_err;
6367 
6368 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6369 
6370 	atomic_set(&bp->intr_sem, 0);
6371 
6372 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6373 
6374 	bnx2_enable_int(bp);
6375 
6376 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6377 		/* Test MSI to make sure it is working
6378 		 * If MSI test fails, go back to INTx mode
6379 		 */
6380 		if (bnx2_test_intr(bp) != 0) {
6381 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6382 
6383 			bnx2_disable_int(bp);
6384 			bnx2_free_irq(bp);
6385 
6386 			bnx2_setup_int_mode(bp, 1);
6387 
6388 			rc = bnx2_init_nic(bp, 0);
6389 
6390 			if (!rc)
6391 				rc = bnx2_request_irq(bp);
6392 
6393 			if (rc) {
6394 				del_timer_sync(&bp->timer);
6395 				goto open_err;
6396 			}
6397 			bnx2_enable_int(bp);
6398 		}
6399 	}
6400 	if (bp->flags & BNX2_FLAG_USING_MSI)
6401 		netdev_info(dev, "using MSI\n");
6402 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6403 		netdev_info(dev, "using MSIX\n");
6404 
6405 	netif_tx_start_all_queues(dev);
6406 out:
6407 	return rc;
6408 
6409 open_err:
6410 	bnx2_napi_disable(bp);
6411 	bnx2_free_skbs(bp);
6412 	bnx2_free_irq(bp);
6413 	bnx2_free_mem(bp);
6414 	bnx2_del_napi(bp);
6415 	bnx2_release_firmware(bp);
6416 	goto out;
6417 }
6418 
6419 static void
6420 bnx2_reset_task(struct work_struct *work)
6421 {
6422 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6423 	int rc;
6424 	u16 pcicmd;
6425 
6426 	rtnl_lock();
6427 	if (!netif_running(bp->dev)) {
6428 		rtnl_unlock();
6429 		return;
6430 	}
6431 
6432 	bnx2_netif_stop(bp, true);
6433 
6434 	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6435 	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6436 		/* in case PCI block has reset */
6437 		pci_restore_state(bp->pdev);
6438 		pci_save_state(bp->pdev);
6439 	}
6440 	rc = bnx2_init_nic(bp, 1);
6441 	if (rc) {
6442 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6443 		bnx2_napi_enable(bp);
6444 		dev_close(bp->dev);
6445 		rtnl_unlock();
6446 		return;
6447 	}
6448 
6449 	atomic_set(&bp->intr_sem, 1);
6450 	bnx2_netif_start(bp, true);
6451 	rtnl_unlock();
6452 }
6453 
6454 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6455 
6456 static void
6457 bnx2_dump_ftq(struct bnx2 *bp)
6458 {
6459 	int i;
6460 	u32 reg, bdidx, cid, valid;
6461 	struct net_device *dev = bp->dev;
6462 	static const struct ftq_reg {
6463 		char *name;
6464 		u32 off;
6465 	} ftq_arr[] = {
6466 		BNX2_FTQ_ENTRY(RV2P_P),
6467 		BNX2_FTQ_ENTRY(RV2P_T),
6468 		BNX2_FTQ_ENTRY(RV2P_M),
6469 		BNX2_FTQ_ENTRY(TBDR_),
6470 		BNX2_FTQ_ENTRY(TDMA_),
6471 		BNX2_FTQ_ENTRY(TXP_),
6472 		BNX2_FTQ_ENTRY(TXP_),
6473 		BNX2_FTQ_ENTRY(TPAT_),
6474 		BNX2_FTQ_ENTRY(RXP_C),
6475 		BNX2_FTQ_ENTRY(RXP_),
6476 		BNX2_FTQ_ENTRY(COM_COMXQ_),
6477 		BNX2_FTQ_ENTRY(COM_COMTQ_),
6478 		BNX2_FTQ_ENTRY(COM_COMQ_),
6479 		BNX2_FTQ_ENTRY(CP_CPQ_),
6480 	};
6481 
6482 	netdev_err(dev, "<--- start FTQ dump --->\n");
6483 	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6484 		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6485 			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6486 
6487 	netdev_err(dev, "CPU states:\n");
6488 	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6489 		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6490 			   reg, bnx2_reg_rd_ind(bp, reg),
6491 			   bnx2_reg_rd_ind(bp, reg + 4),
6492 			   bnx2_reg_rd_ind(bp, reg + 8),
6493 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6494 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6495 			   bnx2_reg_rd_ind(bp, reg + 0x20));
6496 
6497 	netdev_err(dev, "<--- end FTQ dump --->\n");
6498 	netdev_err(dev, "<--- start TBDC dump --->\n");
6499 	netdev_err(dev, "TBDC free cnt: %ld\n",
6500 		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6501 	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6502 	for (i = 0; i < 0x20; i++) {
6503 		int j = 0;
6504 
6505 		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6506 		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6507 			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6508 		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6509 		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6510 			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6511 			j++;
6512 
6513 		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6514 		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6515 		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6516 		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6517 			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6518 			   bdidx >> 24, (valid >> 8) & 0x0ff);
6519 	}
6520 	netdev_err(dev, "<--- end TBDC dump --->\n");
6521 }
6522 
6523 static void
6524 bnx2_dump_state(struct bnx2 *bp)
6525 {
6526 	struct net_device *dev = bp->dev;
6527 	u32 val1, val2;
6528 
6529 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6530 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6531 		   atomic_read(&bp->intr_sem), val1);
6532 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6533 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6534 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6535 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6536 		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6537 		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6538 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6539 		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6540 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6541 		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6542 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6543 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6544 			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6545 }
6546 
6547 static void
6548 bnx2_tx_timeout(struct net_device *dev)
6549 {
6550 	struct bnx2 *bp = netdev_priv(dev);
6551 
6552 	bnx2_dump_ftq(bp);
6553 	bnx2_dump_state(bp);
6554 	bnx2_dump_mcp_state(bp);
6555 
6556 	/* This allows the netif to be shutdown gracefully before resetting */
6557 	schedule_work(&bp->reset_task);
6558 }
6559 
6560 /* Called with netif_tx_lock.
6561  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6562  * netif_wake_queue().
6563  */
6564 static netdev_tx_t
6565 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6566 {
6567 	struct bnx2 *bp = netdev_priv(dev);
6568 	dma_addr_t mapping;
6569 	struct bnx2_tx_bd *txbd;
6570 	struct bnx2_sw_tx_bd *tx_buf;
6571 	u32 len, vlan_tag_flags, last_frag, mss;
6572 	u16 prod, ring_prod;
6573 	int i;
6574 	struct bnx2_napi *bnapi;
6575 	struct bnx2_tx_ring_info *txr;
6576 	struct netdev_queue *txq;
6577 
6578 	/*  Determine which tx ring we will be placed on */
6579 	i = skb_get_queue_mapping(skb);
6580 	bnapi = &bp->bnx2_napi[i];
6581 	txr = &bnapi->tx_ring;
6582 	txq = netdev_get_tx_queue(dev, i);
6583 
6584 	if (unlikely(bnx2_tx_avail(bp, txr) <
6585 	    (skb_shinfo(skb)->nr_frags + 1))) {
6586 		netif_tx_stop_queue(txq);
6587 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6588 
6589 		return NETDEV_TX_BUSY;
6590 	}
6591 	len = skb_headlen(skb);
6592 	prod = txr->tx_prod;
6593 	ring_prod = BNX2_TX_RING_IDX(prod);
6594 
6595 	vlan_tag_flags = 0;
6596 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6597 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6598 	}
6599 
6600 	if (skb_vlan_tag_present(skb)) {
6601 		vlan_tag_flags |=
6602 			(TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6603 	}
6604 
6605 	if ((mss = skb_shinfo(skb)->gso_size)) {
6606 		u32 tcp_opt_len;
6607 		struct iphdr *iph;
6608 
6609 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6610 
6611 		tcp_opt_len = tcp_optlen(skb);
6612 
6613 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6614 			u32 tcp_off = skb_transport_offset(skb) -
6615 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6616 
6617 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6618 					  TX_BD_FLAGS_SW_FLAGS;
6619 			if (likely(tcp_off == 0))
6620 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6621 			else {
6622 				tcp_off >>= 3;
6623 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6624 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6625 						  ((tcp_off & 0x10) <<
6626 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6627 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6628 			}
6629 		} else {
6630 			iph = ip_hdr(skb);
6631 			if (tcp_opt_len || (iph->ihl > 5)) {
6632 				vlan_tag_flags |= ((iph->ihl - 5) +
6633 						   (tcp_opt_len >> 2)) << 8;
6634 			}
6635 		}
6636 	} else
6637 		mss = 0;
6638 
6639 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6640 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6641 		dev_kfree_skb_any(skb);
6642 		return NETDEV_TX_OK;
6643 	}
6644 
6645 	tx_buf = &txr->tx_buf_ring[ring_prod];
6646 	tx_buf->skb = skb;
6647 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6648 
6649 	txbd = &txr->tx_desc_ring[ring_prod];
6650 
6651 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6652 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6653 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6654 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6655 
6656 	last_frag = skb_shinfo(skb)->nr_frags;
6657 	tx_buf->nr_frags = last_frag;
6658 	tx_buf->is_gso = skb_is_gso(skb);
6659 
6660 	for (i = 0; i < last_frag; i++) {
6661 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6662 
6663 		prod = BNX2_NEXT_TX_BD(prod);
6664 		ring_prod = BNX2_TX_RING_IDX(prod);
6665 		txbd = &txr->tx_desc_ring[ring_prod];
6666 
6667 		len = skb_frag_size(frag);
6668 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6669 					   DMA_TO_DEVICE);
6670 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6671 			goto dma_error;
6672 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6673 				   mapping);
6674 
6675 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6676 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6677 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6678 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6679 
6680 	}
6681 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6682 
6683 	/* Sync BD data before updating TX mailbox */
6684 	wmb();
6685 
6686 	netdev_tx_sent_queue(txq, skb->len);
6687 
6688 	prod = BNX2_NEXT_TX_BD(prod);
6689 	txr->tx_prod_bseq += skb->len;
6690 
6691 	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6692 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6693 
6694 	mmiowb();
6695 
6696 	txr->tx_prod = prod;
6697 
6698 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6699 		netif_tx_stop_queue(txq);
6700 
6701 		/* netif_tx_stop_queue() must be done before checking
6702 		 * tx index in bnx2_tx_avail() below, because in
6703 		 * bnx2_tx_int(), we update tx index before checking for
6704 		 * netif_tx_queue_stopped().
6705 		 */
6706 		smp_mb();
6707 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6708 			netif_tx_wake_queue(txq);
6709 	}
6710 
6711 	return NETDEV_TX_OK;
6712 dma_error:
6713 	/* save value of frag that failed */
6714 	last_frag = i;
6715 
6716 	/* start back at beginning and unmap skb */
6717 	prod = txr->tx_prod;
6718 	ring_prod = BNX2_TX_RING_IDX(prod);
6719 	tx_buf = &txr->tx_buf_ring[ring_prod];
6720 	tx_buf->skb = NULL;
6721 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6722 			 skb_headlen(skb), PCI_DMA_TODEVICE);
6723 
6724 	/* unmap remaining mapped pages */
6725 	for (i = 0; i < last_frag; i++) {
6726 		prod = BNX2_NEXT_TX_BD(prod);
6727 		ring_prod = BNX2_TX_RING_IDX(prod);
6728 		tx_buf = &txr->tx_buf_ring[ring_prod];
6729 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6730 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6731 			       PCI_DMA_TODEVICE);
6732 	}
6733 
6734 	dev_kfree_skb_any(skb);
6735 	return NETDEV_TX_OK;
6736 }
6737 
6738 /* Called with rtnl_lock */
6739 static int
6740 bnx2_close(struct net_device *dev)
6741 {
6742 	struct bnx2 *bp = netdev_priv(dev);
6743 
6744 	bnx2_disable_int_sync(bp);
6745 	bnx2_napi_disable(bp);
6746 	netif_tx_disable(dev);
6747 	del_timer_sync(&bp->timer);
6748 	bnx2_shutdown_chip(bp);
6749 	bnx2_free_irq(bp);
6750 	bnx2_free_skbs(bp);
6751 	bnx2_free_mem(bp);
6752 	bnx2_del_napi(bp);
6753 	bp->link_up = 0;
6754 	netif_carrier_off(bp->dev);
6755 	return 0;
6756 }
6757 
6758 static void
6759 bnx2_save_stats(struct bnx2 *bp)
6760 {
6761 	u32 *hw_stats = (u32 *) bp->stats_blk;
6762 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6763 	int i;
6764 
6765 	/* The 1st 10 counters are 64-bit counters */
6766 	for (i = 0; i < 20; i += 2) {
6767 		u32 hi;
6768 		u64 lo;
6769 
6770 		hi = temp_stats[i] + hw_stats[i];
6771 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6772 		if (lo > 0xffffffff)
6773 			hi++;
6774 		temp_stats[i] = hi;
6775 		temp_stats[i + 1] = lo & 0xffffffff;
6776 	}
6777 
6778 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6779 		temp_stats[i] += hw_stats[i];
6780 }
6781 
6782 #define GET_64BIT_NET_STATS64(ctr)		\
6783 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6784 
6785 #define GET_64BIT_NET_STATS(ctr)				\
6786 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6787 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6788 
6789 #define GET_32BIT_NET_STATS(ctr)				\
6790 	(unsigned long) (bp->stats_blk->ctr +			\
6791 			 bp->temp_stats_blk->ctr)
6792 
6793 static struct rtnl_link_stats64 *
6794 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6795 {
6796 	struct bnx2 *bp = netdev_priv(dev);
6797 
6798 	if (bp->stats_blk == NULL)
6799 		return net_stats;
6800 
6801 	net_stats->rx_packets =
6802 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6803 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6804 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6805 
6806 	net_stats->tx_packets =
6807 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6808 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6809 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6810 
6811 	net_stats->rx_bytes =
6812 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6813 
6814 	net_stats->tx_bytes =
6815 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6816 
6817 	net_stats->multicast =
6818 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6819 
6820 	net_stats->collisions =
6821 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6822 
6823 	net_stats->rx_length_errors =
6824 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6825 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6826 
6827 	net_stats->rx_over_errors =
6828 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6829 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6830 
6831 	net_stats->rx_frame_errors =
6832 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6833 
6834 	net_stats->rx_crc_errors =
6835 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6836 
6837 	net_stats->rx_errors = net_stats->rx_length_errors +
6838 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6839 		net_stats->rx_crc_errors;
6840 
6841 	net_stats->tx_aborted_errors =
6842 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6843 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6844 
6845 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6846 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6847 		net_stats->tx_carrier_errors = 0;
6848 	else {
6849 		net_stats->tx_carrier_errors =
6850 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6851 	}
6852 
6853 	net_stats->tx_errors =
6854 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6855 		net_stats->tx_aborted_errors +
6856 		net_stats->tx_carrier_errors;
6857 
6858 	net_stats->rx_missed_errors =
6859 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6860 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6861 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6862 
6863 	return net_stats;
6864 }
6865 
6866 /* All ethtool functions called with rtnl_lock */
6867 
6868 static int
6869 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6870 {
6871 	struct bnx2 *bp = netdev_priv(dev);
6872 	int support_serdes = 0, support_copper = 0;
6873 
6874 	cmd->supported = SUPPORTED_Autoneg;
6875 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6876 		support_serdes = 1;
6877 		support_copper = 1;
6878 	} else if (bp->phy_port == PORT_FIBRE)
6879 		support_serdes = 1;
6880 	else
6881 		support_copper = 1;
6882 
6883 	if (support_serdes) {
6884 		cmd->supported |= SUPPORTED_1000baseT_Full |
6885 			SUPPORTED_FIBRE;
6886 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6887 			cmd->supported |= SUPPORTED_2500baseX_Full;
6888 
6889 	}
6890 	if (support_copper) {
6891 		cmd->supported |= SUPPORTED_10baseT_Half |
6892 			SUPPORTED_10baseT_Full |
6893 			SUPPORTED_100baseT_Half |
6894 			SUPPORTED_100baseT_Full |
6895 			SUPPORTED_1000baseT_Full |
6896 			SUPPORTED_TP;
6897 
6898 	}
6899 
6900 	spin_lock_bh(&bp->phy_lock);
6901 	cmd->port = bp->phy_port;
6902 	cmd->advertising = bp->advertising;
6903 
6904 	if (bp->autoneg & AUTONEG_SPEED) {
6905 		cmd->autoneg = AUTONEG_ENABLE;
6906 	} else {
6907 		cmd->autoneg = AUTONEG_DISABLE;
6908 	}
6909 
6910 	if (netif_carrier_ok(dev)) {
6911 		ethtool_cmd_speed_set(cmd, bp->line_speed);
6912 		cmd->duplex = bp->duplex;
6913 		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6914 			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6915 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
6916 			else
6917 				cmd->eth_tp_mdix = ETH_TP_MDI;
6918 		}
6919 	}
6920 	else {
6921 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
6922 		cmd->duplex = DUPLEX_UNKNOWN;
6923 	}
6924 	spin_unlock_bh(&bp->phy_lock);
6925 
6926 	cmd->transceiver = XCVR_INTERNAL;
6927 	cmd->phy_address = bp->phy_addr;
6928 
6929 	return 0;
6930 }
6931 
6932 static int
6933 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6934 {
6935 	struct bnx2 *bp = netdev_priv(dev);
6936 	u8 autoneg = bp->autoneg;
6937 	u8 req_duplex = bp->req_duplex;
6938 	u16 req_line_speed = bp->req_line_speed;
6939 	u32 advertising = bp->advertising;
6940 	int err = -EINVAL;
6941 
6942 	spin_lock_bh(&bp->phy_lock);
6943 
6944 	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6945 		goto err_out_unlock;
6946 
6947 	if (cmd->port != bp->phy_port &&
6948 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6949 		goto err_out_unlock;
6950 
6951 	/* If device is down, we can store the settings only if the user
6952 	 * is setting the currently active port.
6953 	 */
6954 	if (!netif_running(dev) && cmd->port != bp->phy_port)
6955 		goto err_out_unlock;
6956 
6957 	if (cmd->autoneg == AUTONEG_ENABLE) {
6958 		autoneg |= AUTONEG_SPEED;
6959 
6960 		advertising = cmd->advertising;
6961 		if (cmd->port == PORT_TP) {
6962 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6963 			if (!advertising)
6964 				advertising = ETHTOOL_ALL_COPPER_SPEED;
6965 		} else {
6966 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6967 			if (!advertising)
6968 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6969 		}
6970 		advertising |= ADVERTISED_Autoneg;
6971 	}
6972 	else {
6973 		u32 speed = ethtool_cmd_speed(cmd);
6974 		if (cmd->port == PORT_FIBRE) {
6975 			if ((speed != SPEED_1000 &&
6976 			     speed != SPEED_2500) ||
6977 			    (cmd->duplex != DUPLEX_FULL))
6978 				goto err_out_unlock;
6979 
6980 			if (speed == SPEED_2500 &&
6981 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6982 				goto err_out_unlock;
6983 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
6984 			goto err_out_unlock;
6985 
6986 		autoneg &= ~AUTONEG_SPEED;
6987 		req_line_speed = speed;
6988 		req_duplex = cmd->duplex;
6989 		advertising = 0;
6990 	}
6991 
6992 	bp->autoneg = autoneg;
6993 	bp->advertising = advertising;
6994 	bp->req_line_speed = req_line_speed;
6995 	bp->req_duplex = req_duplex;
6996 
6997 	err = 0;
6998 	/* If device is down, the new settings will be picked up when it is
6999 	 * brought up.
7000 	 */
7001 	if (netif_running(dev))
7002 		err = bnx2_setup_phy(bp, cmd->port);
7003 
7004 err_out_unlock:
7005 	spin_unlock_bh(&bp->phy_lock);
7006 
7007 	return err;
7008 }
7009 
7010 static void
7011 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7012 {
7013 	struct bnx2 *bp = netdev_priv(dev);
7014 
7015 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7016 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7017 	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7018 	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7019 }
7020 
7021 #define BNX2_REGDUMP_LEN		(32 * 1024)
7022 
7023 static int
7024 bnx2_get_regs_len(struct net_device *dev)
7025 {
7026 	return BNX2_REGDUMP_LEN;
7027 }
7028 
7029 static void
7030 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7031 {
7032 	u32 *p = _p, i, offset;
7033 	u8 *orig_p = _p;
7034 	struct bnx2 *bp = netdev_priv(dev);
7035 	static const u32 reg_boundaries[] = {
7036 		0x0000, 0x0098, 0x0400, 0x045c,
7037 		0x0800, 0x0880, 0x0c00, 0x0c10,
7038 		0x0c30, 0x0d08, 0x1000, 0x101c,
7039 		0x1040, 0x1048, 0x1080, 0x10a4,
7040 		0x1400, 0x1490, 0x1498, 0x14f0,
7041 		0x1500, 0x155c, 0x1580, 0x15dc,
7042 		0x1600, 0x1658, 0x1680, 0x16d8,
7043 		0x1800, 0x1820, 0x1840, 0x1854,
7044 		0x1880, 0x1894, 0x1900, 0x1984,
7045 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7046 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
7047 		0x2000, 0x2030, 0x23c0, 0x2400,
7048 		0x2800, 0x2820, 0x2830, 0x2850,
7049 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7050 		0x3c00, 0x3c94, 0x4000, 0x4010,
7051 		0x4080, 0x4090, 0x43c0, 0x4458,
7052 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7053 		0x4fc0, 0x5010, 0x53c0, 0x5444,
7054 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7055 		0x5fc0, 0x6000, 0x6400, 0x6428,
7056 		0x6800, 0x6848, 0x684c, 0x6860,
7057 		0x6888, 0x6910, 0x8000
7058 	};
7059 
7060 	regs->version = 0;
7061 
7062 	memset(p, 0, BNX2_REGDUMP_LEN);
7063 
7064 	if (!netif_running(bp->dev))
7065 		return;
7066 
7067 	i = 0;
7068 	offset = reg_boundaries[0];
7069 	p += offset;
7070 	while (offset < BNX2_REGDUMP_LEN) {
7071 		*p++ = BNX2_RD(bp, offset);
7072 		offset += 4;
7073 		if (offset == reg_boundaries[i + 1]) {
7074 			offset = reg_boundaries[i + 2];
7075 			p = (u32 *) (orig_p + offset);
7076 			i += 2;
7077 		}
7078 	}
7079 }
7080 
7081 static void
7082 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7083 {
7084 	struct bnx2 *bp = netdev_priv(dev);
7085 
7086 	if (bp->flags & BNX2_FLAG_NO_WOL) {
7087 		wol->supported = 0;
7088 		wol->wolopts = 0;
7089 	}
7090 	else {
7091 		wol->supported = WAKE_MAGIC;
7092 		if (bp->wol)
7093 			wol->wolopts = WAKE_MAGIC;
7094 		else
7095 			wol->wolopts = 0;
7096 	}
7097 	memset(&wol->sopass, 0, sizeof(wol->sopass));
7098 }
7099 
7100 static int
7101 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7102 {
7103 	struct bnx2 *bp = netdev_priv(dev);
7104 
7105 	if (wol->wolopts & ~WAKE_MAGIC)
7106 		return -EINVAL;
7107 
7108 	if (wol->wolopts & WAKE_MAGIC) {
7109 		if (bp->flags & BNX2_FLAG_NO_WOL)
7110 			return -EINVAL;
7111 
7112 		bp->wol = 1;
7113 	}
7114 	else {
7115 		bp->wol = 0;
7116 	}
7117 
7118 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7119 
7120 	return 0;
7121 }
7122 
7123 static int
7124 bnx2_nway_reset(struct net_device *dev)
7125 {
7126 	struct bnx2 *bp = netdev_priv(dev);
7127 	u32 bmcr;
7128 
7129 	if (!netif_running(dev))
7130 		return -EAGAIN;
7131 
7132 	if (!(bp->autoneg & AUTONEG_SPEED)) {
7133 		return -EINVAL;
7134 	}
7135 
7136 	spin_lock_bh(&bp->phy_lock);
7137 
7138 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7139 		int rc;
7140 
7141 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7142 		spin_unlock_bh(&bp->phy_lock);
7143 		return rc;
7144 	}
7145 
7146 	/* Force a link down visible on the other side */
7147 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7148 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7149 		spin_unlock_bh(&bp->phy_lock);
7150 
7151 		msleep(20);
7152 
7153 		spin_lock_bh(&bp->phy_lock);
7154 
7155 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7156 		bp->serdes_an_pending = 1;
7157 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7158 	}
7159 
7160 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7161 	bmcr &= ~BMCR_LOOPBACK;
7162 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7163 
7164 	spin_unlock_bh(&bp->phy_lock);
7165 
7166 	return 0;
7167 }
7168 
7169 static u32
7170 bnx2_get_link(struct net_device *dev)
7171 {
7172 	struct bnx2 *bp = netdev_priv(dev);
7173 
7174 	return bp->link_up;
7175 }
7176 
7177 static int
7178 bnx2_get_eeprom_len(struct net_device *dev)
7179 {
7180 	struct bnx2 *bp = netdev_priv(dev);
7181 
7182 	if (bp->flash_info == NULL)
7183 		return 0;
7184 
7185 	return (int) bp->flash_size;
7186 }
7187 
7188 static int
7189 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7190 		u8 *eebuf)
7191 {
7192 	struct bnx2 *bp = netdev_priv(dev);
7193 	int rc;
7194 
7195 	/* parameters already validated in ethtool_get_eeprom */
7196 
7197 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7198 
7199 	return rc;
7200 }
7201 
7202 static int
7203 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7204 		u8 *eebuf)
7205 {
7206 	struct bnx2 *bp = netdev_priv(dev);
7207 	int rc;
7208 
7209 	/* parameters already validated in ethtool_set_eeprom */
7210 
7211 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7212 
7213 	return rc;
7214 }
7215 
7216 static int
7217 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7218 {
7219 	struct bnx2 *bp = netdev_priv(dev);
7220 
7221 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7222 
7223 	coal->rx_coalesce_usecs = bp->rx_ticks;
7224 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7225 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7226 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7227 
7228 	coal->tx_coalesce_usecs = bp->tx_ticks;
7229 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7230 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7231 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7232 
7233 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7234 
7235 	return 0;
7236 }
7237 
7238 static int
7239 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7240 {
7241 	struct bnx2 *bp = netdev_priv(dev);
7242 
7243 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7244 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7245 
7246 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7247 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7248 
7249 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7250 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7251 
7252 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7253 	if (bp->rx_quick_cons_trip_int > 0xff)
7254 		bp->rx_quick_cons_trip_int = 0xff;
7255 
7256 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7257 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7258 
7259 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7260 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7261 
7262 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7263 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7264 
7265 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7266 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7267 		0xff;
7268 
7269 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7270 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7271 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7272 			bp->stats_ticks = USEC_PER_SEC;
7273 	}
7274 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7275 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7276 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7277 
7278 	if (netif_running(bp->dev)) {
7279 		bnx2_netif_stop(bp, true);
7280 		bnx2_init_nic(bp, 0);
7281 		bnx2_netif_start(bp, true);
7282 	}
7283 
7284 	return 0;
7285 }
7286 
7287 static void
7288 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7289 {
7290 	struct bnx2 *bp = netdev_priv(dev);
7291 
7292 	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7293 	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7294 
7295 	ering->rx_pending = bp->rx_ring_size;
7296 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7297 
7298 	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7299 	ering->tx_pending = bp->tx_ring_size;
7300 }
7301 
7302 static int
7303 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7304 {
7305 	if (netif_running(bp->dev)) {
7306 		/* Reset will erase chipset stats; save them */
7307 		bnx2_save_stats(bp);
7308 
7309 		bnx2_netif_stop(bp, true);
7310 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7311 		if (reset_irq) {
7312 			bnx2_free_irq(bp);
7313 			bnx2_del_napi(bp);
7314 		} else {
7315 			__bnx2_free_irq(bp);
7316 		}
7317 		bnx2_free_skbs(bp);
7318 		bnx2_free_mem(bp);
7319 	}
7320 
7321 	bnx2_set_rx_ring_size(bp, rx);
7322 	bp->tx_ring_size = tx;
7323 
7324 	if (netif_running(bp->dev)) {
7325 		int rc = 0;
7326 
7327 		if (reset_irq) {
7328 			rc = bnx2_setup_int_mode(bp, disable_msi);
7329 			bnx2_init_napi(bp);
7330 		}
7331 
7332 		if (!rc)
7333 			rc = bnx2_alloc_mem(bp);
7334 
7335 		if (!rc)
7336 			rc = bnx2_request_irq(bp);
7337 
7338 		if (!rc)
7339 			rc = bnx2_init_nic(bp, 0);
7340 
7341 		if (rc) {
7342 			bnx2_napi_enable(bp);
7343 			dev_close(bp->dev);
7344 			return rc;
7345 		}
7346 #ifdef BCM_CNIC
7347 		mutex_lock(&bp->cnic_lock);
7348 		/* Let cnic know about the new status block. */
7349 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7350 			bnx2_setup_cnic_irq_info(bp);
7351 		mutex_unlock(&bp->cnic_lock);
7352 #endif
7353 		bnx2_netif_start(bp, true);
7354 	}
7355 	return 0;
7356 }
7357 
7358 static int
7359 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7360 {
7361 	struct bnx2 *bp = netdev_priv(dev);
7362 	int rc;
7363 
7364 	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7365 		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7366 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7367 
7368 		return -EINVAL;
7369 	}
7370 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7371 				   false);
7372 	return rc;
7373 }
7374 
7375 static void
7376 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7377 {
7378 	struct bnx2 *bp = netdev_priv(dev);
7379 
7380 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7381 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7382 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7383 }
7384 
7385 static int
7386 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7387 {
7388 	struct bnx2 *bp = netdev_priv(dev);
7389 
7390 	bp->req_flow_ctrl = 0;
7391 	if (epause->rx_pause)
7392 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7393 	if (epause->tx_pause)
7394 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7395 
7396 	if (epause->autoneg) {
7397 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7398 	}
7399 	else {
7400 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7401 	}
7402 
7403 	if (netif_running(dev)) {
7404 		spin_lock_bh(&bp->phy_lock);
7405 		bnx2_setup_phy(bp, bp->phy_port);
7406 		spin_unlock_bh(&bp->phy_lock);
7407 	}
7408 
7409 	return 0;
7410 }
7411 
7412 static struct {
7413 	char string[ETH_GSTRING_LEN];
7414 } bnx2_stats_str_arr[] = {
7415 	{ "rx_bytes" },
7416 	{ "rx_error_bytes" },
7417 	{ "tx_bytes" },
7418 	{ "tx_error_bytes" },
7419 	{ "rx_ucast_packets" },
7420 	{ "rx_mcast_packets" },
7421 	{ "rx_bcast_packets" },
7422 	{ "tx_ucast_packets" },
7423 	{ "tx_mcast_packets" },
7424 	{ "tx_bcast_packets" },
7425 	{ "tx_mac_errors" },
7426 	{ "tx_carrier_errors" },
7427 	{ "rx_crc_errors" },
7428 	{ "rx_align_errors" },
7429 	{ "tx_single_collisions" },
7430 	{ "tx_multi_collisions" },
7431 	{ "tx_deferred" },
7432 	{ "tx_excess_collisions" },
7433 	{ "tx_late_collisions" },
7434 	{ "tx_total_collisions" },
7435 	{ "rx_fragments" },
7436 	{ "rx_jabbers" },
7437 	{ "rx_undersize_packets" },
7438 	{ "rx_oversize_packets" },
7439 	{ "rx_64_byte_packets" },
7440 	{ "rx_65_to_127_byte_packets" },
7441 	{ "rx_128_to_255_byte_packets" },
7442 	{ "rx_256_to_511_byte_packets" },
7443 	{ "rx_512_to_1023_byte_packets" },
7444 	{ "rx_1024_to_1522_byte_packets" },
7445 	{ "rx_1523_to_9022_byte_packets" },
7446 	{ "tx_64_byte_packets" },
7447 	{ "tx_65_to_127_byte_packets" },
7448 	{ "tx_128_to_255_byte_packets" },
7449 	{ "tx_256_to_511_byte_packets" },
7450 	{ "tx_512_to_1023_byte_packets" },
7451 	{ "tx_1024_to_1522_byte_packets" },
7452 	{ "tx_1523_to_9022_byte_packets" },
7453 	{ "rx_xon_frames" },
7454 	{ "rx_xoff_frames" },
7455 	{ "tx_xon_frames" },
7456 	{ "tx_xoff_frames" },
7457 	{ "rx_mac_ctrl_frames" },
7458 	{ "rx_filtered_packets" },
7459 	{ "rx_ftq_discards" },
7460 	{ "rx_discards" },
7461 	{ "rx_fw_discards" },
7462 };
7463 
7464 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7465 
7466 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7467 
7468 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7469     STATS_OFFSET32(stat_IfHCInOctets_hi),
7470     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7471     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7472     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7473     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7474     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7475     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7476     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7477     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7478     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7479     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7480     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7481     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7482     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7483     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7484     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7485     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7486     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7487     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7488     STATS_OFFSET32(stat_EtherStatsCollisions),
7489     STATS_OFFSET32(stat_EtherStatsFragments),
7490     STATS_OFFSET32(stat_EtherStatsJabbers),
7491     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7492     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7493     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7494     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7495     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7496     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7497     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7498     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7499     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7500     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7501     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7502     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7503     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7504     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7505     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7506     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7507     STATS_OFFSET32(stat_XonPauseFramesReceived),
7508     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7509     STATS_OFFSET32(stat_OutXonSent),
7510     STATS_OFFSET32(stat_OutXoffSent),
7511     STATS_OFFSET32(stat_MacControlFramesReceived),
7512     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7513     STATS_OFFSET32(stat_IfInFTQDiscards),
7514     STATS_OFFSET32(stat_IfInMBUFDiscards),
7515     STATS_OFFSET32(stat_FwRxDrop),
7516 };
7517 
7518 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7519  * skipped because of errata.
7520  */
7521 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7522 	8,0,8,8,8,8,8,8,8,8,
7523 	4,0,4,4,4,4,4,4,4,4,
7524 	4,4,4,4,4,4,4,4,4,4,
7525 	4,4,4,4,4,4,4,4,4,4,
7526 	4,4,4,4,4,4,4,
7527 };
7528 
7529 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7530 	8,0,8,8,8,8,8,8,8,8,
7531 	4,4,4,4,4,4,4,4,4,4,
7532 	4,4,4,4,4,4,4,4,4,4,
7533 	4,4,4,4,4,4,4,4,4,4,
7534 	4,4,4,4,4,4,4,
7535 };
7536 
7537 #define BNX2_NUM_TESTS 6
7538 
7539 static struct {
7540 	char string[ETH_GSTRING_LEN];
7541 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7542 	{ "register_test (offline)" },
7543 	{ "memory_test (offline)" },
7544 	{ "loopback_test (offline)" },
7545 	{ "nvram_test (online)" },
7546 	{ "interrupt_test (online)" },
7547 	{ "link_test (online)" },
7548 };
7549 
7550 static int
7551 bnx2_get_sset_count(struct net_device *dev, int sset)
7552 {
7553 	switch (sset) {
7554 	case ETH_SS_TEST:
7555 		return BNX2_NUM_TESTS;
7556 	case ETH_SS_STATS:
7557 		return BNX2_NUM_STATS;
7558 	default:
7559 		return -EOPNOTSUPP;
7560 	}
7561 }
7562 
7563 static void
7564 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7565 {
7566 	struct bnx2 *bp = netdev_priv(dev);
7567 
7568 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7569 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7570 		int i;
7571 
7572 		bnx2_netif_stop(bp, true);
7573 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7574 		bnx2_free_skbs(bp);
7575 
7576 		if (bnx2_test_registers(bp) != 0) {
7577 			buf[0] = 1;
7578 			etest->flags |= ETH_TEST_FL_FAILED;
7579 		}
7580 		if (bnx2_test_memory(bp) != 0) {
7581 			buf[1] = 1;
7582 			etest->flags |= ETH_TEST_FL_FAILED;
7583 		}
7584 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7585 			etest->flags |= ETH_TEST_FL_FAILED;
7586 
7587 		if (!netif_running(bp->dev))
7588 			bnx2_shutdown_chip(bp);
7589 		else {
7590 			bnx2_init_nic(bp, 1);
7591 			bnx2_netif_start(bp, true);
7592 		}
7593 
7594 		/* wait for link up */
7595 		for (i = 0; i < 7; i++) {
7596 			if (bp->link_up)
7597 				break;
7598 			msleep_interruptible(1000);
7599 		}
7600 	}
7601 
7602 	if (bnx2_test_nvram(bp) != 0) {
7603 		buf[3] = 1;
7604 		etest->flags |= ETH_TEST_FL_FAILED;
7605 	}
7606 	if (bnx2_test_intr(bp) != 0) {
7607 		buf[4] = 1;
7608 		etest->flags |= ETH_TEST_FL_FAILED;
7609 	}
7610 
7611 	if (bnx2_test_link(bp) != 0) {
7612 		buf[5] = 1;
7613 		etest->flags |= ETH_TEST_FL_FAILED;
7614 
7615 	}
7616 }
7617 
7618 static void
7619 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7620 {
7621 	switch (stringset) {
7622 	case ETH_SS_STATS:
7623 		memcpy(buf, bnx2_stats_str_arr,
7624 			sizeof(bnx2_stats_str_arr));
7625 		break;
7626 	case ETH_SS_TEST:
7627 		memcpy(buf, bnx2_tests_str_arr,
7628 			sizeof(bnx2_tests_str_arr));
7629 		break;
7630 	}
7631 }
7632 
7633 static void
7634 bnx2_get_ethtool_stats(struct net_device *dev,
7635 		struct ethtool_stats *stats, u64 *buf)
7636 {
7637 	struct bnx2 *bp = netdev_priv(dev);
7638 	int i;
7639 	u32 *hw_stats = (u32 *) bp->stats_blk;
7640 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7641 	u8 *stats_len_arr = NULL;
7642 
7643 	if (hw_stats == NULL) {
7644 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7645 		return;
7646 	}
7647 
7648 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7649 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7650 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7651 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7652 		stats_len_arr = bnx2_5706_stats_len_arr;
7653 	else
7654 		stats_len_arr = bnx2_5708_stats_len_arr;
7655 
7656 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7657 		unsigned long offset;
7658 
7659 		if (stats_len_arr[i] == 0) {
7660 			/* skip this counter */
7661 			buf[i] = 0;
7662 			continue;
7663 		}
7664 
7665 		offset = bnx2_stats_offset_arr[i];
7666 		if (stats_len_arr[i] == 4) {
7667 			/* 4-byte counter */
7668 			buf[i] = (u64) *(hw_stats + offset) +
7669 				 *(temp_stats + offset);
7670 			continue;
7671 		}
7672 		/* 8-byte counter */
7673 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7674 			 *(hw_stats + offset + 1) +
7675 			 (((u64) *(temp_stats + offset)) << 32) +
7676 			 *(temp_stats + offset + 1);
7677 	}
7678 }
7679 
7680 static int
7681 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7682 {
7683 	struct bnx2 *bp = netdev_priv(dev);
7684 
7685 	switch (state) {
7686 	case ETHTOOL_ID_ACTIVE:
7687 		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7688 		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7689 		return 1;	/* cycle on/off once per second */
7690 
7691 	case ETHTOOL_ID_ON:
7692 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7693 			BNX2_EMAC_LED_1000MB_OVERRIDE |
7694 			BNX2_EMAC_LED_100MB_OVERRIDE |
7695 			BNX2_EMAC_LED_10MB_OVERRIDE |
7696 			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7697 			BNX2_EMAC_LED_TRAFFIC);
7698 		break;
7699 
7700 	case ETHTOOL_ID_OFF:
7701 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7702 		break;
7703 
7704 	case ETHTOOL_ID_INACTIVE:
7705 		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7706 		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7707 		break;
7708 	}
7709 
7710 	return 0;
7711 }
7712 
7713 static netdev_features_t
7714 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7715 {
7716 	struct bnx2 *bp = netdev_priv(dev);
7717 
7718 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7719 		features |= NETIF_F_HW_VLAN_CTAG_RX;
7720 
7721 	return features;
7722 }
7723 
7724 static int
7725 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7726 {
7727 	struct bnx2 *bp = netdev_priv(dev);
7728 
7729 	/* TSO with VLAN tag won't work with current firmware */
7730 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7731 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7732 	else
7733 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7734 
7735 	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7736 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7737 	    netif_running(dev)) {
7738 		bnx2_netif_stop(bp, false);
7739 		dev->features = features;
7740 		bnx2_set_rx_mode(dev);
7741 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7742 		bnx2_netif_start(bp, false);
7743 		return 1;
7744 	}
7745 
7746 	return 0;
7747 }
7748 
7749 static void bnx2_get_channels(struct net_device *dev,
7750 			      struct ethtool_channels *channels)
7751 {
7752 	struct bnx2 *bp = netdev_priv(dev);
7753 	u32 max_rx_rings = 1;
7754 	u32 max_tx_rings = 1;
7755 
7756 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7757 		max_rx_rings = RX_MAX_RINGS;
7758 		max_tx_rings = TX_MAX_RINGS;
7759 	}
7760 
7761 	channels->max_rx = max_rx_rings;
7762 	channels->max_tx = max_tx_rings;
7763 	channels->max_other = 0;
7764 	channels->max_combined = 0;
7765 	channels->rx_count = bp->num_rx_rings;
7766 	channels->tx_count = bp->num_tx_rings;
7767 	channels->other_count = 0;
7768 	channels->combined_count = 0;
7769 }
7770 
7771 static int bnx2_set_channels(struct net_device *dev,
7772 			      struct ethtool_channels *channels)
7773 {
7774 	struct bnx2 *bp = netdev_priv(dev);
7775 	u32 max_rx_rings = 1;
7776 	u32 max_tx_rings = 1;
7777 	int rc = 0;
7778 
7779 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7780 		max_rx_rings = RX_MAX_RINGS;
7781 		max_tx_rings = TX_MAX_RINGS;
7782 	}
7783 	if (channels->rx_count > max_rx_rings ||
7784 	    channels->tx_count > max_tx_rings)
7785 		return -EINVAL;
7786 
7787 	bp->num_req_rx_rings = channels->rx_count;
7788 	bp->num_req_tx_rings = channels->tx_count;
7789 
7790 	if (netif_running(dev))
7791 		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7792 					   bp->tx_ring_size, true);
7793 
7794 	return rc;
7795 }
7796 
7797 static const struct ethtool_ops bnx2_ethtool_ops = {
7798 	.get_settings		= bnx2_get_settings,
7799 	.set_settings		= bnx2_set_settings,
7800 	.get_drvinfo		= bnx2_get_drvinfo,
7801 	.get_regs_len		= bnx2_get_regs_len,
7802 	.get_regs		= bnx2_get_regs,
7803 	.get_wol		= bnx2_get_wol,
7804 	.set_wol		= bnx2_set_wol,
7805 	.nway_reset		= bnx2_nway_reset,
7806 	.get_link		= bnx2_get_link,
7807 	.get_eeprom_len		= bnx2_get_eeprom_len,
7808 	.get_eeprom		= bnx2_get_eeprom,
7809 	.set_eeprom		= bnx2_set_eeprom,
7810 	.get_coalesce		= bnx2_get_coalesce,
7811 	.set_coalesce		= bnx2_set_coalesce,
7812 	.get_ringparam		= bnx2_get_ringparam,
7813 	.set_ringparam		= bnx2_set_ringparam,
7814 	.get_pauseparam		= bnx2_get_pauseparam,
7815 	.set_pauseparam		= bnx2_set_pauseparam,
7816 	.self_test		= bnx2_self_test,
7817 	.get_strings		= bnx2_get_strings,
7818 	.set_phys_id		= bnx2_set_phys_id,
7819 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7820 	.get_sset_count		= bnx2_get_sset_count,
7821 	.get_channels		= bnx2_get_channels,
7822 	.set_channels		= bnx2_set_channels,
7823 };
7824 
7825 /* Called with rtnl_lock */
7826 static int
7827 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7828 {
7829 	struct mii_ioctl_data *data = if_mii(ifr);
7830 	struct bnx2 *bp = netdev_priv(dev);
7831 	int err;
7832 
7833 	switch(cmd) {
7834 	case SIOCGMIIPHY:
7835 		data->phy_id = bp->phy_addr;
7836 
7837 		/* fallthru */
7838 	case SIOCGMIIREG: {
7839 		u32 mii_regval;
7840 
7841 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7842 			return -EOPNOTSUPP;
7843 
7844 		if (!netif_running(dev))
7845 			return -EAGAIN;
7846 
7847 		spin_lock_bh(&bp->phy_lock);
7848 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7849 		spin_unlock_bh(&bp->phy_lock);
7850 
7851 		data->val_out = mii_regval;
7852 
7853 		return err;
7854 	}
7855 
7856 	case SIOCSMIIREG:
7857 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7858 			return -EOPNOTSUPP;
7859 
7860 		if (!netif_running(dev))
7861 			return -EAGAIN;
7862 
7863 		spin_lock_bh(&bp->phy_lock);
7864 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7865 		spin_unlock_bh(&bp->phy_lock);
7866 
7867 		return err;
7868 
7869 	default:
7870 		/* do nothing */
7871 		break;
7872 	}
7873 	return -EOPNOTSUPP;
7874 }
7875 
7876 /* Called with rtnl_lock */
7877 static int
7878 bnx2_change_mac_addr(struct net_device *dev, void *p)
7879 {
7880 	struct sockaddr *addr = p;
7881 	struct bnx2 *bp = netdev_priv(dev);
7882 
7883 	if (!is_valid_ether_addr(addr->sa_data))
7884 		return -EADDRNOTAVAIL;
7885 
7886 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7887 	if (netif_running(dev))
7888 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7889 
7890 	return 0;
7891 }
7892 
7893 /* Called with rtnl_lock */
7894 static int
7895 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7896 {
7897 	struct bnx2 *bp = netdev_priv(dev);
7898 
7899 	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7900 		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7901 		return -EINVAL;
7902 
7903 	dev->mtu = new_mtu;
7904 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7905 				     false);
7906 }
7907 
7908 #ifdef CONFIG_NET_POLL_CONTROLLER
7909 static void
7910 poll_bnx2(struct net_device *dev)
7911 {
7912 	struct bnx2 *bp = netdev_priv(dev);
7913 	int i;
7914 
7915 	for (i = 0; i < bp->irq_nvecs; i++) {
7916 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7917 
7918 		disable_irq(irq->vector);
7919 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7920 		enable_irq(irq->vector);
7921 	}
7922 }
7923 #endif
7924 
7925 static void
7926 bnx2_get_5709_media(struct bnx2 *bp)
7927 {
7928 	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7929 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7930 	u32 strap;
7931 
7932 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7933 		return;
7934 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7935 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7936 		return;
7937 	}
7938 
7939 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7940 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7941 	else
7942 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7943 
7944 	if (bp->func == 0) {
7945 		switch (strap) {
7946 		case 0x4:
7947 		case 0x5:
7948 		case 0x6:
7949 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7950 			return;
7951 		}
7952 	} else {
7953 		switch (strap) {
7954 		case 0x1:
7955 		case 0x2:
7956 		case 0x4:
7957 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7958 			return;
7959 		}
7960 	}
7961 }
7962 
7963 static void
7964 bnx2_get_pci_speed(struct bnx2 *bp)
7965 {
7966 	u32 reg;
7967 
7968 	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7969 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7970 		u32 clkreg;
7971 
7972 		bp->flags |= BNX2_FLAG_PCIX;
7973 
7974 		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7975 
7976 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7977 		switch (clkreg) {
7978 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7979 			bp->bus_speed_mhz = 133;
7980 			break;
7981 
7982 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7983 			bp->bus_speed_mhz = 100;
7984 			break;
7985 
7986 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7987 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7988 			bp->bus_speed_mhz = 66;
7989 			break;
7990 
7991 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7992 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7993 			bp->bus_speed_mhz = 50;
7994 			break;
7995 
7996 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7997 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7998 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7999 			bp->bus_speed_mhz = 33;
8000 			break;
8001 		}
8002 	}
8003 	else {
8004 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8005 			bp->bus_speed_mhz = 66;
8006 		else
8007 			bp->bus_speed_mhz = 33;
8008 	}
8009 
8010 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8011 		bp->flags |= BNX2_FLAG_PCI_32BIT;
8012 
8013 }
8014 
8015 static void
8016 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8017 {
8018 	int rc, i, j;
8019 	u8 *data;
8020 	unsigned int block_end, rosize, len;
8021 
8022 #define BNX2_VPD_NVRAM_OFFSET	0x300
8023 #define BNX2_VPD_LEN		128
8024 #define BNX2_MAX_VER_SLEN	30
8025 
8026 	data = kmalloc(256, GFP_KERNEL);
8027 	if (!data)
8028 		return;
8029 
8030 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8031 			     BNX2_VPD_LEN);
8032 	if (rc)
8033 		goto vpd_done;
8034 
8035 	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8036 		data[i] = data[i + BNX2_VPD_LEN + 3];
8037 		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8038 		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8039 		data[i + 3] = data[i + BNX2_VPD_LEN];
8040 	}
8041 
8042 	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8043 	if (i < 0)
8044 		goto vpd_done;
8045 
8046 	rosize = pci_vpd_lrdt_size(&data[i]);
8047 	i += PCI_VPD_LRDT_TAG_SIZE;
8048 	block_end = i + rosize;
8049 
8050 	if (block_end > BNX2_VPD_LEN)
8051 		goto vpd_done;
8052 
8053 	j = pci_vpd_find_info_keyword(data, i, rosize,
8054 				      PCI_VPD_RO_KEYWORD_MFR_ID);
8055 	if (j < 0)
8056 		goto vpd_done;
8057 
8058 	len = pci_vpd_info_field_size(&data[j]);
8059 
8060 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8061 	if (j + len > block_end || len != 4 ||
8062 	    memcmp(&data[j], "1028", 4))
8063 		goto vpd_done;
8064 
8065 	j = pci_vpd_find_info_keyword(data, i, rosize,
8066 				      PCI_VPD_RO_KEYWORD_VENDOR0);
8067 	if (j < 0)
8068 		goto vpd_done;
8069 
8070 	len = pci_vpd_info_field_size(&data[j]);
8071 
8072 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8073 	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8074 		goto vpd_done;
8075 
8076 	memcpy(bp->fw_version, &data[j], len);
8077 	bp->fw_version[len] = ' ';
8078 
8079 vpd_done:
8080 	kfree(data);
8081 }
8082 
8083 static int
8084 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8085 {
8086 	struct bnx2 *bp;
8087 	int rc, i, j;
8088 	u32 reg;
8089 	u64 dma_mask, persist_dma_mask;
8090 	int err;
8091 
8092 	SET_NETDEV_DEV(dev, &pdev->dev);
8093 	bp = netdev_priv(dev);
8094 
8095 	bp->flags = 0;
8096 	bp->phy_flags = 0;
8097 
8098 	bp->temp_stats_blk =
8099 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8100 
8101 	if (bp->temp_stats_blk == NULL) {
8102 		rc = -ENOMEM;
8103 		goto err_out;
8104 	}
8105 
8106 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8107 	rc = pci_enable_device(pdev);
8108 	if (rc) {
8109 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8110 		goto err_out;
8111 	}
8112 
8113 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8114 		dev_err(&pdev->dev,
8115 			"Cannot find PCI device base address, aborting\n");
8116 		rc = -ENODEV;
8117 		goto err_out_disable;
8118 	}
8119 
8120 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8121 	if (rc) {
8122 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8123 		goto err_out_disable;
8124 	}
8125 
8126 	pci_set_master(pdev);
8127 
8128 	bp->pm_cap = pdev->pm_cap;
8129 	if (bp->pm_cap == 0) {
8130 		dev_err(&pdev->dev,
8131 			"Cannot find power management capability, aborting\n");
8132 		rc = -EIO;
8133 		goto err_out_release;
8134 	}
8135 
8136 	bp->dev = dev;
8137 	bp->pdev = pdev;
8138 
8139 	spin_lock_init(&bp->phy_lock);
8140 	spin_lock_init(&bp->indirect_lock);
8141 #ifdef BCM_CNIC
8142 	mutex_init(&bp->cnic_lock);
8143 #endif
8144 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8145 
8146 	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8147 							 TX_MAX_TSS_RINGS + 1));
8148 	if (!bp->regview) {
8149 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8150 		rc = -ENOMEM;
8151 		goto err_out_release;
8152 	}
8153 
8154 	/* Configure byte swap and enable write to the reg_window registers.
8155 	 * Rely on CPU to do target byte swapping on big endian systems
8156 	 * The chip's target access swapping will not swap all accesses
8157 	 */
8158 	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8159 		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8160 		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8161 
8162 	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8163 
8164 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8165 		if (!pci_is_pcie(pdev)) {
8166 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8167 			rc = -EIO;
8168 			goto err_out_unmap;
8169 		}
8170 		bp->flags |= BNX2_FLAG_PCIE;
8171 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8172 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8173 
8174 		/* AER (Advanced Error Reporting) hooks */
8175 		err = pci_enable_pcie_error_reporting(pdev);
8176 		if (!err)
8177 			bp->flags |= BNX2_FLAG_AER_ENABLED;
8178 
8179 	} else {
8180 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8181 		if (bp->pcix_cap == 0) {
8182 			dev_err(&pdev->dev,
8183 				"Cannot find PCIX capability, aborting\n");
8184 			rc = -EIO;
8185 			goto err_out_unmap;
8186 		}
8187 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8188 	}
8189 
8190 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8191 	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8192 		if (pdev->msix_cap)
8193 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8194 	}
8195 
8196 	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8197 	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8198 		if (pdev->msi_cap)
8199 			bp->flags |= BNX2_FLAG_MSI_CAP;
8200 	}
8201 
8202 	/* 5708 cannot support DMA addresses > 40-bit.  */
8203 	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8204 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8205 	else
8206 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8207 
8208 	/* Configure DMA attributes. */
8209 	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8210 		dev->features |= NETIF_F_HIGHDMA;
8211 		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8212 		if (rc) {
8213 			dev_err(&pdev->dev,
8214 				"pci_set_consistent_dma_mask failed, aborting\n");
8215 			goto err_out_unmap;
8216 		}
8217 	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8218 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8219 		goto err_out_unmap;
8220 	}
8221 
8222 	if (!(bp->flags & BNX2_FLAG_PCIE))
8223 		bnx2_get_pci_speed(bp);
8224 
8225 	/* 5706A0 may falsely detect SERR and PERR. */
8226 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8227 		reg = BNX2_RD(bp, PCI_COMMAND);
8228 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8229 		BNX2_WR(bp, PCI_COMMAND, reg);
8230 	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8231 		!(bp->flags & BNX2_FLAG_PCIX)) {
8232 
8233 		dev_err(&pdev->dev,
8234 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8235 		goto err_out_unmap;
8236 	}
8237 
8238 	bnx2_init_nvram(bp);
8239 
8240 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8241 
8242 	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8243 		bp->func = 1;
8244 
8245 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8246 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8247 		u32 off = bp->func << 2;
8248 
8249 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8250 	} else
8251 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8252 
8253 	/* Get the permanent MAC address.  First we need to make sure the
8254 	 * firmware is actually running.
8255 	 */
8256 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8257 
8258 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8259 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8260 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8261 		rc = -ENODEV;
8262 		goto err_out_unmap;
8263 	}
8264 
8265 	bnx2_read_vpd_fw_ver(bp);
8266 
8267 	j = strlen(bp->fw_version);
8268 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8269 	for (i = 0; i < 3 && j < 24; i++) {
8270 		u8 num, k, skip0;
8271 
8272 		if (i == 0) {
8273 			bp->fw_version[j++] = 'b';
8274 			bp->fw_version[j++] = 'c';
8275 			bp->fw_version[j++] = ' ';
8276 		}
8277 		num = (u8) (reg >> (24 - (i * 8)));
8278 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8279 			if (num >= k || !skip0 || k == 1) {
8280 				bp->fw_version[j++] = (num / k) + '0';
8281 				skip0 = 0;
8282 			}
8283 		}
8284 		if (i != 2)
8285 			bp->fw_version[j++] = '.';
8286 	}
8287 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8288 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8289 		bp->wol = 1;
8290 
8291 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8292 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8293 
8294 		for (i = 0; i < 30; i++) {
8295 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8296 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8297 				break;
8298 			msleep(10);
8299 		}
8300 	}
8301 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8302 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8303 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8304 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8305 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8306 
8307 		if (j < 32)
8308 			bp->fw_version[j++] = ' ';
8309 		for (i = 0; i < 3 && j < 28; i++) {
8310 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8311 			reg = be32_to_cpu(reg);
8312 			memcpy(&bp->fw_version[j], &reg, 4);
8313 			j += 4;
8314 		}
8315 	}
8316 
8317 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8318 	bp->mac_addr[0] = (u8) (reg >> 8);
8319 	bp->mac_addr[1] = (u8) reg;
8320 
8321 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8322 	bp->mac_addr[2] = (u8) (reg >> 24);
8323 	bp->mac_addr[3] = (u8) (reg >> 16);
8324 	bp->mac_addr[4] = (u8) (reg >> 8);
8325 	bp->mac_addr[5] = (u8) reg;
8326 
8327 	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8328 	bnx2_set_rx_ring_size(bp, 255);
8329 
8330 	bp->tx_quick_cons_trip_int = 2;
8331 	bp->tx_quick_cons_trip = 20;
8332 	bp->tx_ticks_int = 18;
8333 	bp->tx_ticks = 80;
8334 
8335 	bp->rx_quick_cons_trip_int = 2;
8336 	bp->rx_quick_cons_trip = 12;
8337 	bp->rx_ticks_int = 18;
8338 	bp->rx_ticks = 18;
8339 
8340 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8341 
8342 	bp->current_interval = BNX2_TIMER_INTERVAL;
8343 
8344 	bp->phy_addr = 1;
8345 
8346 	/* Disable WOL support if we are running on a SERDES chip. */
8347 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8348 		bnx2_get_5709_media(bp);
8349 	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8350 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8351 
8352 	bp->phy_port = PORT_TP;
8353 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8354 		bp->phy_port = PORT_FIBRE;
8355 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8356 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8357 			bp->flags |= BNX2_FLAG_NO_WOL;
8358 			bp->wol = 0;
8359 		}
8360 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8361 			/* Don't do parallel detect on this board because of
8362 			 * some board problems.  The link will not go down
8363 			 * if we do parallel detect.
8364 			 */
8365 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8366 			    pdev->subsystem_device == 0x310c)
8367 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8368 		} else {
8369 			bp->phy_addr = 2;
8370 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8371 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8372 		}
8373 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8374 		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8375 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8376 	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8377 		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8378 		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8379 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8380 
8381 	bnx2_init_fw_cap(bp);
8382 
8383 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8384 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8385 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8386 	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8387 		bp->flags |= BNX2_FLAG_NO_WOL;
8388 		bp->wol = 0;
8389 	}
8390 
8391 	if (bp->flags & BNX2_FLAG_NO_WOL)
8392 		device_set_wakeup_capable(&bp->pdev->dev, false);
8393 	else
8394 		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8395 
8396 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8397 		bp->tx_quick_cons_trip_int =
8398 			bp->tx_quick_cons_trip;
8399 		bp->tx_ticks_int = bp->tx_ticks;
8400 		bp->rx_quick_cons_trip_int =
8401 			bp->rx_quick_cons_trip;
8402 		bp->rx_ticks_int = bp->rx_ticks;
8403 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8404 		bp->com_ticks_int = bp->com_ticks;
8405 		bp->cmd_ticks_int = bp->cmd_ticks;
8406 	}
8407 
8408 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8409 	 *
8410 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8411 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8412 	 * but causes problems on the AMD 8132 which will eventually stop
8413 	 * responding after a while.
8414 	 *
8415 	 * AMD believes this incompatibility is unique to the 5706, and
8416 	 * prefers to locally disable MSI rather than globally disabling it.
8417 	 */
8418 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8419 		struct pci_dev *amd_8132 = NULL;
8420 
8421 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8422 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8423 						  amd_8132))) {
8424 
8425 			if (amd_8132->revision >= 0x10 &&
8426 			    amd_8132->revision <= 0x13) {
8427 				disable_msi = 1;
8428 				pci_dev_put(amd_8132);
8429 				break;
8430 			}
8431 		}
8432 	}
8433 
8434 	bnx2_set_default_link(bp);
8435 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8436 
8437 	init_timer(&bp->timer);
8438 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8439 	bp->timer.data = (unsigned long) bp;
8440 	bp->timer.function = bnx2_timer;
8441 
8442 #ifdef BCM_CNIC
8443 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8444 		bp->cnic_eth_dev.max_iscsi_conn =
8445 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8446 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8447 	bp->cnic_probe = bnx2_cnic_probe;
8448 #endif
8449 	pci_save_state(pdev);
8450 
8451 	return 0;
8452 
8453 err_out_unmap:
8454 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8455 		pci_disable_pcie_error_reporting(pdev);
8456 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8457 	}
8458 
8459 	pci_iounmap(pdev, bp->regview);
8460 	bp->regview = NULL;
8461 
8462 err_out_release:
8463 	pci_release_regions(pdev);
8464 
8465 err_out_disable:
8466 	pci_disable_device(pdev);
8467 
8468 err_out:
8469 	return rc;
8470 }
8471 
8472 static char *
8473 bnx2_bus_string(struct bnx2 *bp, char *str)
8474 {
8475 	char *s = str;
8476 
8477 	if (bp->flags & BNX2_FLAG_PCIE) {
8478 		s += sprintf(s, "PCI Express");
8479 	} else {
8480 		s += sprintf(s, "PCI");
8481 		if (bp->flags & BNX2_FLAG_PCIX)
8482 			s += sprintf(s, "-X");
8483 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8484 			s += sprintf(s, " 32-bit");
8485 		else
8486 			s += sprintf(s, " 64-bit");
8487 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8488 	}
8489 	return str;
8490 }
8491 
8492 static void
8493 bnx2_del_napi(struct bnx2 *bp)
8494 {
8495 	int i;
8496 
8497 	for (i = 0; i < bp->irq_nvecs; i++)
8498 		netif_napi_del(&bp->bnx2_napi[i].napi);
8499 }
8500 
8501 static void
8502 bnx2_init_napi(struct bnx2 *bp)
8503 {
8504 	int i;
8505 
8506 	for (i = 0; i < bp->irq_nvecs; i++) {
8507 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8508 		int (*poll)(struct napi_struct *, int);
8509 
8510 		if (i == 0)
8511 			poll = bnx2_poll;
8512 		else
8513 			poll = bnx2_poll_msix;
8514 
8515 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8516 		bnapi->bp = bp;
8517 	}
8518 }
8519 
8520 static const struct net_device_ops bnx2_netdev_ops = {
8521 	.ndo_open		= bnx2_open,
8522 	.ndo_start_xmit		= bnx2_start_xmit,
8523 	.ndo_stop		= bnx2_close,
8524 	.ndo_get_stats64	= bnx2_get_stats64,
8525 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8526 	.ndo_do_ioctl		= bnx2_ioctl,
8527 	.ndo_validate_addr	= eth_validate_addr,
8528 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8529 	.ndo_change_mtu		= bnx2_change_mtu,
8530 	.ndo_fix_features	= bnx2_fix_features,
8531 	.ndo_set_features	= bnx2_set_features,
8532 	.ndo_tx_timeout		= bnx2_tx_timeout,
8533 #ifdef CONFIG_NET_POLL_CONTROLLER
8534 	.ndo_poll_controller	= poll_bnx2,
8535 #endif
8536 };
8537 
8538 static int
8539 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8540 {
8541 	static int version_printed = 0;
8542 	struct net_device *dev;
8543 	struct bnx2 *bp;
8544 	int rc;
8545 	char str[40];
8546 
8547 	if (version_printed++ == 0)
8548 		pr_info("%s", version);
8549 
8550 	/* dev zeroed in init_etherdev */
8551 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8552 	if (!dev)
8553 		return -ENOMEM;
8554 
8555 	rc = bnx2_init_board(pdev, dev);
8556 	if (rc < 0)
8557 		goto err_free;
8558 
8559 	dev->netdev_ops = &bnx2_netdev_ops;
8560 	dev->watchdog_timeo = TX_TIMEOUT;
8561 	dev->ethtool_ops = &bnx2_ethtool_ops;
8562 
8563 	bp = netdev_priv(dev);
8564 
8565 	pci_set_drvdata(pdev, dev);
8566 
8567 	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8568 
8569 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8570 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8571 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8572 
8573 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8574 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8575 
8576 	dev->vlan_features = dev->hw_features;
8577 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8578 	dev->features |= dev->hw_features;
8579 	dev->priv_flags |= IFF_UNICAST_FLT;
8580 
8581 	if ((rc = register_netdev(dev))) {
8582 		dev_err(&pdev->dev, "Cannot register net device\n");
8583 		goto error;
8584 	}
8585 
8586 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8587 		    "node addr %pM\n", board_info[ent->driver_data].name,
8588 		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8589 		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8590 		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8591 		    pdev->irq, dev->dev_addr);
8592 
8593 	return 0;
8594 
8595 error:
8596 	pci_iounmap(pdev, bp->regview);
8597 	pci_release_regions(pdev);
8598 	pci_disable_device(pdev);
8599 err_free:
8600 	free_netdev(dev);
8601 	return rc;
8602 }
8603 
8604 static void
8605 bnx2_remove_one(struct pci_dev *pdev)
8606 {
8607 	struct net_device *dev = pci_get_drvdata(pdev);
8608 	struct bnx2 *bp = netdev_priv(dev);
8609 
8610 	unregister_netdev(dev);
8611 
8612 	del_timer_sync(&bp->timer);
8613 	cancel_work_sync(&bp->reset_task);
8614 
8615 	pci_iounmap(bp->pdev, bp->regview);
8616 
8617 	kfree(bp->temp_stats_blk);
8618 
8619 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8620 		pci_disable_pcie_error_reporting(pdev);
8621 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8622 	}
8623 
8624 	bnx2_release_firmware(bp);
8625 
8626 	free_netdev(dev);
8627 
8628 	pci_release_regions(pdev);
8629 	pci_disable_device(pdev);
8630 }
8631 
8632 #ifdef CONFIG_PM_SLEEP
8633 static int
8634 bnx2_suspend(struct device *device)
8635 {
8636 	struct pci_dev *pdev = to_pci_dev(device);
8637 	struct net_device *dev = pci_get_drvdata(pdev);
8638 	struct bnx2 *bp = netdev_priv(dev);
8639 
8640 	if (netif_running(dev)) {
8641 		cancel_work_sync(&bp->reset_task);
8642 		bnx2_netif_stop(bp, true);
8643 		netif_device_detach(dev);
8644 		del_timer_sync(&bp->timer);
8645 		bnx2_shutdown_chip(bp);
8646 		__bnx2_free_irq(bp);
8647 		bnx2_free_skbs(bp);
8648 	}
8649 	bnx2_setup_wol(bp);
8650 	return 0;
8651 }
8652 
8653 static int
8654 bnx2_resume(struct device *device)
8655 {
8656 	struct pci_dev *pdev = to_pci_dev(device);
8657 	struct net_device *dev = pci_get_drvdata(pdev);
8658 	struct bnx2 *bp = netdev_priv(dev);
8659 
8660 	if (!netif_running(dev))
8661 		return 0;
8662 
8663 	bnx2_set_power_state(bp, PCI_D0);
8664 	netif_device_attach(dev);
8665 	bnx2_request_irq(bp);
8666 	bnx2_init_nic(bp, 1);
8667 	bnx2_netif_start(bp, true);
8668 	return 0;
8669 }
8670 
8671 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8672 #define BNX2_PM_OPS (&bnx2_pm_ops)
8673 
8674 #else
8675 
8676 #define BNX2_PM_OPS NULL
8677 
8678 #endif /* CONFIG_PM_SLEEP */
8679 /**
8680  * bnx2_io_error_detected - called when PCI error is detected
8681  * @pdev: Pointer to PCI device
8682  * @state: The current pci connection state
8683  *
8684  * This function is called after a PCI bus error affecting
8685  * this device has been detected.
8686  */
8687 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8688 					       pci_channel_state_t state)
8689 {
8690 	struct net_device *dev = pci_get_drvdata(pdev);
8691 	struct bnx2 *bp = netdev_priv(dev);
8692 
8693 	rtnl_lock();
8694 	netif_device_detach(dev);
8695 
8696 	if (state == pci_channel_io_perm_failure) {
8697 		rtnl_unlock();
8698 		return PCI_ERS_RESULT_DISCONNECT;
8699 	}
8700 
8701 	if (netif_running(dev)) {
8702 		bnx2_netif_stop(bp, true);
8703 		del_timer_sync(&bp->timer);
8704 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8705 	}
8706 
8707 	pci_disable_device(pdev);
8708 	rtnl_unlock();
8709 
8710 	/* Request a slot slot reset. */
8711 	return PCI_ERS_RESULT_NEED_RESET;
8712 }
8713 
8714 /**
8715  * bnx2_io_slot_reset - called after the pci bus has been reset.
8716  * @pdev: Pointer to PCI device
8717  *
8718  * Restart the card from scratch, as if from a cold-boot.
8719  */
8720 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8721 {
8722 	struct net_device *dev = pci_get_drvdata(pdev);
8723 	struct bnx2 *bp = netdev_priv(dev);
8724 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8725 	int err = 0;
8726 
8727 	rtnl_lock();
8728 	if (pci_enable_device(pdev)) {
8729 		dev_err(&pdev->dev,
8730 			"Cannot re-enable PCI device after reset\n");
8731 	} else {
8732 		pci_set_master(pdev);
8733 		pci_restore_state(pdev);
8734 		pci_save_state(pdev);
8735 
8736 		if (netif_running(dev))
8737 			err = bnx2_init_nic(bp, 1);
8738 
8739 		if (!err)
8740 			result = PCI_ERS_RESULT_RECOVERED;
8741 	}
8742 
8743 	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8744 		bnx2_napi_enable(bp);
8745 		dev_close(dev);
8746 	}
8747 	rtnl_unlock();
8748 
8749 	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8750 		return result;
8751 
8752 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8753 	if (err) {
8754 		dev_err(&pdev->dev,
8755 			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8756 			 err); /* non-fatal, continue */
8757 	}
8758 
8759 	return result;
8760 }
8761 
8762 /**
8763  * bnx2_io_resume - called when traffic can start flowing again.
8764  * @pdev: Pointer to PCI device
8765  *
8766  * This callback is called when the error recovery driver tells us that
8767  * its OK to resume normal operation.
8768  */
8769 static void bnx2_io_resume(struct pci_dev *pdev)
8770 {
8771 	struct net_device *dev = pci_get_drvdata(pdev);
8772 	struct bnx2 *bp = netdev_priv(dev);
8773 
8774 	rtnl_lock();
8775 	if (netif_running(dev))
8776 		bnx2_netif_start(bp, true);
8777 
8778 	netif_device_attach(dev);
8779 	rtnl_unlock();
8780 }
8781 
8782 static void bnx2_shutdown(struct pci_dev *pdev)
8783 {
8784 	struct net_device *dev = pci_get_drvdata(pdev);
8785 	struct bnx2 *bp;
8786 
8787 	if (!dev)
8788 		return;
8789 
8790 	bp = netdev_priv(dev);
8791 	if (!bp)
8792 		return;
8793 
8794 	rtnl_lock();
8795 	if (netif_running(dev))
8796 		dev_close(bp->dev);
8797 
8798 	if (system_state == SYSTEM_POWER_OFF)
8799 		bnx2_set_power_state(bp, PCI_D3hot);
8800 
8801 	rtnl_unlock();
8802 }
8803 
8804 static const struct pci_error_handlers bnx2_err_handler = {
8805 	.error_detected	= bnx2_io_error_detected,
8806 	.slot_reset	= bnx2_io_slot_reset,
8807 	.resume		= bnx2_io_resume,
8808 };
8809 
8810 static struct pci_driver bnx2_pci_driver = {
8811 	.name		= DRV_MODULE_NAME,
8812 	.id_table	= bnx2_pci_tbl,
8813 	.probe		= bnx2_init_one,
8814 	.remove		= bnx2_remove_one,
8815 	.driver.pm	= BNX2_PM_OPS,
8816 	.err_handler	= &bnx2_err_handler,
8817 	.shutdown	= bnx2_shutdown,
8818 };
8819 
8820 module_pci_driver(bnx2_pci_driver);
8821