xref: /linux/drivers/net/ethernet/broadcom/bnx2.c (revision 99ce286d2d30a31eba4171036bc3f32eeb59e5f3)
1 /* bnx2.c: QLogic bnx2 network driver.
2  *
3  * Copyright (c) 2004-2014 Broadcom Corporation
4  * Copyright (c) 2014-2015 QLogic Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Michael Chan  (mchan@broadcom.com)
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/crash_dump.h>
52 
53 #if IS_ENABLED(CONFIG_CNIC)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59 
60 #define DRV_MODULE_NAME		"bnx2"
61 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
62 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
63 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
64 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
65 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
66 
67 #define RUN_AT(x) (jiffies + (x))
68 
69 /* Time in jiffies before concluding the transmitter is hung. */
70 #define TX_TIMEOUT  (5*HZ)
71 
72 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
73 MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
74 MODULE_LICENSE("GPL");
75 MODULE_FIRMWARE(FW_MIPS_FILE_06);
76 MODULE_FIRMWARE(FW_RV2P_FILE_06);
77 MODULE_FIRMWARE(FW_MIPS_FILE_09);
78 MODULE_FIRMWARE(FW_RV2P_FILE_09);
79 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
80 
81 static int disable_msi = 0;
82 
83 module_param(disable_msi, int, 0444);
84 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
85 
86 typedef enum {
87 	BCM5706 = 0,
88 	NC370T,
89 	NC370I,
90 	BCM5706S,
91 	NC370F,
92 	BCM5708,
93 	BCM5708S,
94 	BCM5709,
95 	BCM5709S,
96 	BCM5716,
97 	BCM5716S,
98 } board_t;
99 
100 /* indexed by board_t, above */
101 static struct {
102 	char *name;
103 } board_info[] = {
104 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
105 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
106 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
107 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
108 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
109 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
110 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
111 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
112 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
113 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
114 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
115 	};
116 
117 static const struct pci_device_id bnx2_pci_tbl[] = {
118 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
119 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
120 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
121 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
122 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
123 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
124 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
125 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
126 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
127 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
128 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
129 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
130 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
131 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
132 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
133 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
134 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
135 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
136 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
137 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
138 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
139 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
140 	{ 0, }
141 };
142 
143 static const struct flash_spec flash_table[] =
144 {
145 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
146 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
147 	/* Slow EEPROM */
148 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
149 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
150 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
151 	 "EEPROM - slow"},
152 	/* Expansion entry 0001 */
153 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
154 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
155 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 	 "Entry 0001"},
157 	/* Saifun SA25F010 (non-buffered flash) */
158 	/* strap, cfg1, & write1 need updates */
159 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
160 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
162 	 "Non-buffered flash (128kB)"},
163 	/* Saifun SA25F020 (non-buffered flash) */
164 	/* strap, cfg1, & write1 need updates */
165 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
166 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
168 	 "Non-buffered flash (256kB)"},
169 	/* Expansion entry 0100 */
170 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
171 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
173 	 "Entry 0100"},
174 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
175 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
176 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
177 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
178 	 "Entry 0101: ST M45PE10 (128kB non-buffered)"},
179 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
180 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
181 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
183 	 "Entry 0110: ST M45PE20 (256kB non-buffered)"},
184 	/* Saifun SA25F005 (non-buffered flash) */
185 	/* strap, cfg1, & write1 need updates */
186 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
187 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
189 	 "Non-buffered flash (64kB)"},
190 	/* Fast EEPROM */
191 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
192 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
193 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
194 	 "EEPROM - fast"},
195 	/* Expansion entry 1001 */
196 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
197 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
198 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199 	 "Entry 1001"},
200 	/* Expansion entry 1010 */
201 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
202 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 	 "Entry 1010"},
205 	/* ATMEL AT45DB011B (buffered flash) */
206 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
207 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
208 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
209 	 "Buffered flash (128kB)"},
210 	/* Expansion entry 1100 */
211 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
212 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
213 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214 	 "Entry 1100"},
215 	/* Expansion entry 1101 */
216 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
217 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 	 "Entry 1101"},
220 	/* Ateml Expansion entry 1110 */
221 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
222 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
223 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
224 	 "Entry 1110 (Atmel)"},
225 	/* ATMEL AT45DB021B (buffered flash) */
226 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
227 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
229 	 "Buffered flash (256kB)"},
230 };
231 
232 static const struct flash_spec flash_5709 = {
233 	.flags		= BNX2_NV_BUFFERED,
234 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
235 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
236 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
237 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
238 	.name		= "5709 Buffered flash (256kB)",
239 };
240 
241 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
242 
243 static void bnx2_init_napi(struct bnx2 *bp);
244 static void bnx2_del_napi(struct bnx2 *bp);
245 
246 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
247 {
248 	u32 diff;
249 
250 	/* The ring uses 256 indices for 255 entries, one of them
251 	 * needs to be skipped.
252 	 */
253 	diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
254 	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
255 		diff &= 0xffff;
256 		if (diff == BNX2_TX_DESC_CNT)
257 			diff = BNX2_MAX_TX_DESC_CNT;
258 	}
259 	return bp->tx_ring_size - diff;
260 }
261 
262 static u32
263 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
264 {
265 	unsigned long flags;
266 	u32 val;
267 
268 	spin_lock_irqsave(&bp->indirect_lock, flags);
269 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
270 	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
271 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
272 	return val;
273 }
274 
275 static void
276 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
277 {
278 	unsigned long flags;
279 
280 	spin_lock_irqsave(&bp->indirect_lock, flags);
281 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
282 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
283 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
284 }
285 
286 static void
287 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
288 {
289 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
290 }
291 
292 static u32
293 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
294 {
295 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
296 }
297 
298 static void
299 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
300 {
301 	unsigned long flags;
302 
303 	offset += cid_addr;
304 	spin_lock_irqsave(&bp->indirect_lock, flags);
305 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
306 		int i;
307 
308 		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
309 		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
310 			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
311 		for (i = 0; i < 5; i++) {
312 			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
313 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
314 				break;
315 			udelay(5);
316 		}
317 	} else {
318 		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
319 		BNX2_WR(bp, BNX2_CTX_DATA, val);
320 	}
321 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
322 }
323 
324 #ifdef BCM_CNIC
325 static int
326 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
327 {
328 	struct bnx2 *bp = netdev_priv(dev);
329 	struct drv_ctl_io *io = &info->data.io;
330 
331 	switch (info->cmd) {
332 	case DRV_CTL_IO_WR_CMD:
333 		bnx2_reg_wr_ind(bp, io->offset, io->data);
334 		break;
335 	case DRV_CTL_IO_RD_CMD:
336 		io->data = bnx2_reg_rd_ind(bp, io->offset);
337 		break;
338 	case DRV_CTL_CTX_WR_CMD:
339 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
340 		break;
341 	default:
342 		return -EINVAL;
343 	}
344 	return 0;
345 }
346 
347 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
348 {
349 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
350 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
351 	int sb_id;
352 
353 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
354 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
355 		bnapi->cnic_present = 0;
356 		sb_id = bp->irq_nvecs;
357 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
358 	} else {
359 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
360 		bnapi->cnic_tag = bnapi->last_status_idx;
361 		bnapi->cnic_present = 1;
362 		sb_id = 0;
363 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
364 	}
365 
366 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
367 	cp->irq_arr[0].status_blk = (void *)
368 		((unsigned long) bnapi->status_blk.msi +
369 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
370 	cp->irq_arr[0].status_blk_num = sb_id;
371 	cp->num_irq = 1;
372 }
373 
374 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
375 			      void *data)
376 {
377 	struct bnx2 *bp = netdev_priv(dev);
378 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
379 
380 	if (!ops)
381 		return -EINVAL;
382 
383 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
384 		return -EBUSY;
385 
386 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
387 		return -ENODEV;
388 
389 	bp->cnic_data = data;
390 	rcu_assign_pointer(bp->cnic_ops, ops);
391 
392 	cp->num_irq = 0;
393 	cp->drv_state = CNIC_DRV_STATE_REGD;
394 
395 	bnx2_setup_cnic_irq_info(bp);
396 
397 	return 0;
398 }
399 
400 static int bnx2_unregister_cnic(struct net_device *dev)
401 {
402 	struct bnx2 *bp = netdev_priv(dev);
403 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
404 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
405 
406 	mutex_lock(&bp->cnic_lock);
407 	cp->drv_state = 0;
408 	bnapi->cnic_present = 0;
409 	RCU_INIT_POINTER(bp->cnic_ops, NULL);
410 	mutex_unlock(&bp->cnic_lock);
411 	synchronize_rcu();
412 	return 0;
413 }
414 
415 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
416 {
417 	struct bnx2 *bp = netdev_priv(dev);
418 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
419 
420 	if (!cp->max_iscsi_conn)
421 		return NULL;
422 
423 	cp->drv_owner = THIS_MODULE;
424 	cp->chip_id = bp->chip_id;
425 	cp->pdev = bp->pdev;
426 	cp->io_base = bp->regview;
427 	cp->drv_ctl = bnx2_drv_ctl;
428 	cp->drv_register_cnic = bnx2_register_cnic;
429 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
430 
431 	return cp;
432 }
433 
434 static void
435 bnx2_cnic_stop(struct bnx2 *bp)
436 {
437 	struct cnic_ops *c_ops;
438 	struct cnic_ctl_info info;
439 
440 	mutex_lock(&bp->cnic_lock);
441 	c_ops = rcu_dereference_protected(bp->cnic_ops,
442 					  lockdep_is_held(&bp->cnic_lock));
443 	if (c_ops) {
444 		info.cmd = CNIC_CTL_STOP_CMD;
445 		c_ops->cnic_ctl(bp->cnic_data, &info);
446 	}
447 	mutex_unlock(&bp->cnic_lock);
448 }
449 
450 static void
451 bnx2_cnic_start(struct bnx2 *bp)
452 {
453 	struct cnic_ops *c_ops;
454 	struct cnic_ctl_info info;
455 
456 	mutex_lock(&bp->cnic_lock);
457 	c_ops = rcu_dereference_protected(bp->cnic_ops,
458 					  lockdep_is_held(&bp->cnic_lock));
459 	if (c_ops) {
460 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
461 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
462 
463 			bnapi->cnic_tag = bnapi->last_status_idx;
464 		}
465 		info.cmd = CNIC_CTL_START_CMD;
466 		c_ops->cnic_ctl(bp->cnic_data, &info);
467 	}
468 	mutex_unlock(&bp->cnic_lock);
469 }
470 
471 #else
472 
473 static void
474 bnx2_cnic_stop(struct bnx2 *bp)
475 {
476 }
477 
478 static void
479 bnx2_cnic_start(struct bnx2 *bp)
480 {
481 }
482 
483 #endif
484 
485 static int
486 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
487 {
488 	u32 val1;
489 	int i, ret;
490 
491 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
492 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
493 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
494 
495 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
496 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497 
498 		udelay(40);
499 	}
500 
501 	val1 = (bp->phy_addr << 21) | (reg << 16) |
502 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
503 		BNX2_EMAC_MDIO_COMM_START_BUSY;
504 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
505 
506 	for (i = 0; i < 50; i++) {
507 		udelay(10);
508 
509 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
510 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
511 			udelay(5);
512 
513 			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
515 
516 			break;
517 		}
518 	}
519 
520 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
521 		*val = 0x0;
522 		ret = -EBUSY;
523 	}
524 	else {
525 		*val = val1;
526 		ret = 0;
527 	}
528 
529 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
530 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
531 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
532 
533 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
534 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535 
536 		udelay(40);
537 	}
538 
539 	return ret;
540 }
541 
542 static int
543 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
544 {
545 	u32 val1;
546 	int i, ret;
547 
548 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
549 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
550 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
551 
552 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
553 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554 
555 		udelay(40);
556 	}
557 
558 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
559 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
560 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
561 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
562 
563 	for (i = 0; i < 50; i++) {
564 		udelay(10);
565 
566 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
567 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
568 			udelay(5);
569 			break;
570 		}
571 	}
572 
573 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
574 		ret = -EBUSY;
575 	else
576 		ret = 0;
577 
578 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
579 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
580 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
581 
582 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
583 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584 
585 		udelay(40);
586 	}
587 
588 	return ret;
589 }
590 
591 static void
592 bnx2_disable_int(struct bnx2 *bp)
593 {
594 	int i;
595 	struct bnx2_napi *bnapi;
596 
597 	for (i = 0; i < bp->irq_nvecs; i++) {
598 		bnapi = &bp->bnx2_napi[i];
599 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
600 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
601 	}
602 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
603 }
604 
605 static void
606 bnx2_enable_int(struct bnx2 *bp)
607 {
608 	int i;
609 	struct bnx2_napi *bnapi;
610 
611 	for (i = 0; i < bp->irq_nvecs; i++) {
612 		bnapi = &bp->bnx2_napi[i];
613 
614 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
615 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
616 			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
617 			bnapi->last_status_idx);
618 
619 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
620 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
621 			bnapi->last_status_idx);
622 	}
623 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
624 }
625 
626 static void
627 bnx2_disable_int_sync(struct bnx2 *bp)
628 {
629 	int i;
630 
631 	atomic_inc(&bp->intr_sem);
632 	if (!netif_running(bp->dev))
633 		return;
634 
635 	bnx2_disable_int(bp);
636 	for (i = 0; i < bp->irq_nvecs; i++)
637 		synchronize_irq(bp->irq_tbl[i].vector);
638 }
639 
640 static void
641 bnx2_napi_disable(struct bnx2 *bp)
642 {
643 	int i;
644 
645 	for (i = 0; i < bp->irq_nvecs; i++)
646 		napi_disable(&bp->bnx2_napi[i].napi);
647 }
648 
649 static void
650 bnx2_napi_enable(struct bnx2 *bp)
651 {
652 	int i;
653 
654 	for (i = 0; i < bp->irq_nvecs; i++)
655 		napi_enable(&bp->bnx2_napi[i].napi);
656 }
657 
658 static void
659 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
660 {
661 	if (stop_cnic)
662 		bnx2_cnic_stop(bp);
663 	if (netif_running(bp->dev)) {
664 		bnx2_napi_disable(bp);
665 		netif_tx_disable(bp->dev);
666 	}
667 	bnx2_disable_int_sync(bp);
668 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
669 }
670 
671 static void
672 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
673 {
674 	if (atomic_dec_and_test(&bp->intr_sem)) {
675 		if (netif_running(bp->dev)) {
676 			netif_tx_wake_all_queues(bp->dev);
677 			spin_lock_bh(&bp->phy_lock);
678 			if (bp->link_up)
679 				netif_carrier_on(bp->dev);
680 			spin_unlock_bh(&bp->phy_lock);
681 			bnx2_napi_enable(bp);
682 			bnx2_enable_int(bp);
683 			if (start_cnic)
684 				bnx2_cnic_start(bp);
685 		}
686 	}
687 }
688 
689 static void
690 bnx2_free_tx_mem(struct bnx2 *bp)
691 {
692 	int i;
693 
694 	for (i = 0; i < bp->num_tx_rings; i++) {
695 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
696 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
697 
698 		if (txr->tx_desc_ring) {
699 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
700 					  txr->tx_desc_ring,
701 					  txr->tx_desc_mapping);
702 			txr->tx_desc_ring = NULL;
703 		}
704 		kfree(txr->tx_buf_ring);
705 		txr->tx_buf_ring = NULL;
706 	}
707 }
708 
709 static void
710 bnx2_free_rx_mem(struct bnx2 *bp)
711 {
712 	int i;
713 
714 	for (i = 0; i < bp->num_rx_rings; i++) {
715 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
716 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
717 		int j;
718 
719 		for (j = 0; j < bp->rx_max_ring; j++) {
720 			if (rxr->rx_desc_ring[j])
721 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
722 						  rxr->rx_desc_ring[j],
723 						  rxr->rx_desc_mapping[j]);
724 			rxr->rx_desc_ring[j] = NULL;
725 		}
726 		vfree(rxr->rx_buf_ring);
727 		rxr->rx_buf_ring = NULL;
728 
729 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
730 			if (rxr->rx_pg_desc_ring[j])
731 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
732 						  rxr->rx_pg_desc_ring[j],
733 						  rxr->rx_pg_desc_mapping[j]);
734 			rxr->rx_pg_desc_ring[j] = NULL;
735 		}
736 		vfree(rxr->rx_pg_ring);
737 		rxr->rx_pg_ring = NULL;
738 	}
739 }
740 
741 static int
742 bnx2_alloc_tx_mem(struct bnx2 *bp)
743 {
744 	int i;
745 
746 	for (i = 0; i < bp->num_tx_rings; i++) {
747 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
748 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
749 
750 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
751 		if (!txr->tx_buf_ring)
752 			return -ENOMEM;
753 
754 		txr->tx_desc_ring =
755 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
756 					   &txr->tx_desc_mapping, GFP_KERNEL);
757 		if (!txr->tx_desc_ring)
758 			return -ENOMEM;
759 	}
760 	return 0;
761 }
762 
763 static int
764 bnx2_alloc_rx_mem(struct bnx2 *bp)
765 {
766 	int i;
767 
768 	for (i = 0; i < bp->num_rx_rings; i++) {
769 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
770 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
771 		int j;
772 
773 		rxr->rx_buf_ring =
774 			vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
775 		if (!rxr->rx_buf_ring)
776 			return -ENOMEM;
777 
778 		for (j = 0; j < bp->rx_max_ring; j++) {
779 			rxr->rx_desc_ring[j] =
780 				dma_alloc_coherent(&bp->pdev->dev,
781 						   RXBD_RING_SIZE,
782 						   &rxr->rx_desc_mapping[j],
783 						   GFP_KERNEL);
784 			if (!rxr->rx_desc_ring[j])
785 				return -ENOMEM;
786 
787 		}
788 
789 		if (bp->rx_pg_ring_size) {
790 			rxr->rx_pg_ring =
791 				vzalloc(array_size(SW_RXPG_RING_SIZE,
792 						   bp->rx_max_pg_ring));
793 			if (!rxr->rx_pg_ring)
794 				return -ENOMEM;
795 
796 		}
797 
798 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
799 			rxr->rx_pg_desc_ring[j] =
800 				dma_alloc_coherent(&bp->pdev->dev,
801 						   RXBD_RING_SIZE,
802 						   &rxr->rx_pg_desc_mapping[j],
803 						   GFP_KERNEL);
804 			if (!rxr->rx_pg_desc_ring[j])
805 				return -ENOMEM;
806 
807 		}
808 	}
809 	return 0;
810 }
811 
812 static void
813 bnx2_free_stats_blk(struct net_device *dev)
814 {
815 	struct bnx2 *bp = netdev_priv(dev);
816 
817 	if (bp->status_blk) {
818 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
819 				  bp->status_blk,
820 				  bp->status_blk_mapping);
821 		bp->status_blk = NULL;
822 		bp->stats_blk = NULL;
823 	}
824 }
825 
826 static int
827 bnx2_alloc_stats_blk(struct net_device *dev)
828 {
829 	int status_blk_size;
830 	void *status_blk;
831 	struct bnx2 *bp = netdev_priv(dev);
832 
833 	/* Combine status and statistics blocks into one allocation. */
834 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
835 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
836 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
837 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
838 	bp->status_stats_size = status_blk_size +
839 				sizeof(struct statistics_block);
840 	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
841 					&bp->status_blk_mapping, GFP_KERNEL);
842 	if (!status_blk)
843 		return -ENOMEM;
844 
845 	bp->status_blk = status_blk;
846 	bp->stats_blk = status_blk + status_blk_size;
847 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
848 
849 	return 0;
850 }
851 
852 static void
853 bnx2_free_mem(struct bnx2 *bp)
854 {
855 	int i;
856 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
857 
858 	bnx2_free_tx_mem(bp);
859 	bnx2_free_rx_mem(bp);
860 
861 	for (i = 0; i < bp->ctx_pages; i++) {
862 		if (bp->ctx_blk[i]) {
863 			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
864 					  bp->ctx_blk[i],
865 					  bp->ctx_blk_mapping[i]);
866 			bp->ctx_blk[i] = NULL;
867 		}
868 	}
869 
870 	if (bnapi->status_blk.msi)
871 		bnapi->status_blk.msi = NULL;
872 }
873 
874 static int
875 bnx2_alloc_mem(struct bnx2 *bp)
876 {
877 	int i, err;
878 	struct bnx2_napi *bnapi;
879 
880 	bnapi = &bp->bnx2_napi[0];
881 	bnapi->status_blk.msi = bp->status_blk;
882 	bnapi->hw_tx_cons_ptr =
883 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
884 	bnapi->hw_rx_cons_ptr =
885 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
886 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
887 		for (i = 1; i < bp->irq_nvecs; i++) {
888 			struct status_block_msix *sblk;
889 
890 			bnapi = &bp->bnx2_napi[i];
891 
892 			sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
893 			bnapi->status_blk.msix = sblk;
894 			bnapi->hw_tx_cons_ptr =
895 				&sblk->status_tx_quick_consumer_index;
896 			bnapi->hw_rx_cons_ptr =
897 				&sblk->status_rx_quick_consumer_index;
898 			bnapi->int_num = i << 24;
899 		}
900 	}
901 
902 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
903 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
904 		if (bp->ctx_pages == 0)
905 			bp->ctx_pages = 1;
906 		for (i = 0; i < bp->ctx_pages; i++) {
907 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
908 						BNX2_PAGE_SIZE,
909 						&bp->ctx_blk_mapping[i],
910 						GFP_KERNEL);
911 			if (!bp->ctx_blk[i])
912 				goto alloc_mem_err;
913 		}
914 	}
915 
916 	err = bnx2_alloc_rx_mem(bp);
917 	if (err)
918 		goto alloc_mem_err;
919 
920 	err = bnx2_alloc_tx_mem(bp);
921 	if (err)
922 		goto alloc_mem_err;
923 
924 	return 0;
925 
926 alloc_mem_err:
927 	bnx2_free_mem(bp);
928 	return -ENOMEM;
929 }
930 
931 static void
932 bnx2_report_fw_link(struct bnx2 *bp)
933 {
934 	u32 fw_link_status = 0;
935 
936 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
937 		return;
938 
939 	if (bp->link_up) {
940 		u32 bmsr;
941 
942 		switch (bp->line_speed) {
943 		case SPEED_10:
944 			if (bp->duplex == DUPLEX_HALF)
945 				fw_link_status = BNX2_LINK_STATUS_10HALF;
946 			else
947 				fw_link_status = BNX2_LINK_STATUS_10FULL;
948 			break;
949 		case SPEED_100:
950 			if (bp->duplex == DUPLEX_HALF)
951 				fw_link_status = BNX2_LINK_STATUS_100HALF;
952 			else
953 				fw_link_status = BNX2_LINK_STATUS_100FULL;
954 			break;
955 		case SPEED_1000:
956 			if (bp->duplex == DUPLEX_HALF)
957 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
958 			else
959 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
960 			break;
961 		case SPEED_2500:
962 			if (bp->duplex == DUPLEX_HALF)
963 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
964 			else
965 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
966 			break;
967 		}
968 
969 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
970 
971 		if (bp->autoneg) {
972 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
973 
974 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
975 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
976 
977 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
978 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
979 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
980 			else
981 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
982 		}
983 	}
984 	else
985 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
986 
987 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
988 }
989 
990 static char *
991 bnx2_xceiver_str(struct bnx2 *bp)
992 {
993 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
994 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
995 		 "Copper");
996 }
997 
998 static void
999 bnx2_report_link(struct bnx2 *bp)
1000 {
1001 	if (bp->link_up) {
1002 		netif_carrier_on(bp->dev);
1003 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1004 			    bnx2_xceiver_str(bp),
1005 			    bp->line_speed,
1006 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
1007 
1008 		if (bp->flow_ctrl) {
1009 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
1010 				pr_cont(", receive ");
1011 				if (bp->flow_ctrl & FLOW_CTRL_TX)
1012 					pr_cont("& transmit ");
1013 			}
1014 			else {
1015 				pr_cont(", transmit ");
1016 			}
1017 			pr_cont("flow control ON");
1018 		}
1019 		pr_cont("\n");
1020 	} else {
1021 		netif_carrier_off(bp->dev);
1022 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1023 			   bnx2_xceiver_str(bp));
1024 	}
1025 
1026 	bnx2_report_fw_link(bp);
1027 }
1028 
1029 static void
1030 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1031 {
1032 	u32 local_adv, remote_adv;
1033 
1034 	bp->flow_ctrl = 0;
1035 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1036 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1037 
1038 		if (bp->duplex == DUPLEX_FULL) {
1039 			bp->flow_ctrl = bp->req_flow_ctrl;
1040 		}
1041 		return;
1042 	}
1043 
1044 	if (bp->duplex != DUPLEX_FULL) {
1045 		return;
1046 	}
1047 
1048 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1049 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1050 		u32 val;
1051 
1052 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1053 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1054 			bp->flow_ctrl |= FLOW_CTRL_TX;
1055 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1056 			bp->flow_ctrl |= FLOW_CTRL_RX;
1057 		return;
1058 	}
1059 
1060 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1061 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1062 
1063 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1064 		u32 new_local_adv = 0;
1065 		u32 new_remote_adv = 0;
1066 
1067 		if (local_adv & ADVERTISE_1000XPAUSE)
1068 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1069 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1070 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1071 		if (remote_adv & ADVERTISE_1000XPAUSE)
1072 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1073 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1074 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1075 
1076 		local_adv = new_local_adv;
1077 		remote_adv = new_remote_adv;
1078 	}
1079 
1080 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1081 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1082 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1083 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1084 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1085 			}
1086 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1087 				bp->flow_ctrl = FLOW_CTRL_RX;
1088 			}
1089 		}
1090 		else {
1091 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1092 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1093 			}
1094 		}
1095 	}
1096 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1097 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1098 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1099 
1100 			bp->flow_ctrl = FLOW_CTRL_TX;
1101 		}
1102 	}
1103 }
1104 
1105 static int
1106 bnx2_5709s_linkup(struct bnx2 *bp)
1107 {
1108 	u32 val, speed;
1109 
1110 	bp->link_up = 1;
1111 
1112 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1113 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1114 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1115 
1116 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1117 		bp->line_speed = bp->req_line_speed;
1118 		bp->duplex = bp->req_duplex;
1119 		return 0;
1120 	}
1121 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1122 	switch (speed) {
1123 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1124 			bp->line_speed = SPEED_10;
1125 			break;
1126 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1127 			bp->line_speed = SPEED_100;
1128 			break;
1129 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1130 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1131 			bp->line_speed = SPEED_1000;
1132 			break;
1133 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1134 			bp->line_speed = SPEED_2500;
1135 			break;
1136 	}
1137 	if (val & MII_BNX2_GP_TOP_AN_FD)
1138 		bp->duplex = DUPLEX_FULL;
1139 	else
1140 		bp->duplex = DUPLEX_HALF;
1141 	return 0;
1142 }
1143 
1144 static int
1145 bnx2_5708s_linkup(struct bnx2 *bp)
1146 {
1147 	u32 val;
1148 
1149 	bp->link_up = 1;
1150 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1151 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1152 		case BCM5708S_1000X_STAT1_SPEED_10:
1153 			bp->line_speed = SPEED_10;
1154 			break;
1155 		case BCM5708S_1000X_STAT1_SPEED_100:
1156 			bp->line_speed = SPEED_100;
1157 			break;
1158 		case BCM5708S_1000X_STAT1_SPEED_1G:
1159 			bp->line_speed = SPEED_1000;
1160 			break;
1161 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1162 			bp->line_speed = SPEED_2500;
1163 			break;
1164 	}
1165 	if (val & BCM5708S_1000X_STAT1_FD)
1166 		bp->duplex = DUPLEX_FULL;
1167 	else
1168 		bp->duplex = DUPLEX_HALF;
1169 
1170 	return 0;
1171 }
1172 
1173 static int
1174 bnx2_5706s_linkup(struct bnx2 *bp)
1175 {
1176 	u32 bmcr, local_adv, remote_adv, common;
1177 
1178 	bp->link_up = 1;
1179 	bp->line_speed = SPEED_1000;
1180 
1181 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1182 	if (bmcr & BMCR_FULLDPLX) {
1183 		bp->duplex = DUPLEX_FULL;
1184 	}
1185 	else {
1186 		bp->duplex = DUPLEX_HALF;
1187 	}
1188 
1189 	if (!(bmcr & BMCR_ANENABLE)) {
1190 		return 0;
1191 	}
1192 
1193 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1194 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1195 
1196 	common = local_adv & remote_adv;
1197 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1198 
1199 		if (common & ADVERTISE_1000XFULL) {
1200 			bp->duplex = DUPLEX_FULL;
1201 		}
1202 		else {
1203 			bp->duplex = DUPLEX_HALF;
1204 		}
1205 	}
1206 
1207 	return 0;
1208 }
1209 
1210 static int
1211 bnx2_copper_linkup(struct bnx2 *bp)
1212 {
1213 	u32 bmcr;
1214 
1215 	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1216 
1217 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1218 	if (bmcr & BMCR_ANENABLE) {
1219 		u32 local_adv, remote_adv, common;
1220 
1221 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1222 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1223 
1224 		common = local_adv & (remote_adv >> 2);
1225 		if (common & ADVERTISE_1000FULL) {
1226 			bp->line_speed = SPEED_1000;
1227 			bp->duplex = DUPLEX_FULL;
1228 		}
1229 		else if (common & ADVERTISE_1000HALF) {
1230 			bp->line_speed = SPEED_1000;
1231 			bp->duplex = DUPLEX_HALF;
1232 		}
1233 		else {
1234 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1235 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1236 
1237 			common = local_adv & remote_adv;
1238 			if (common & ADVERTISE_100FULL) {
1239 				bp->line_speed = SPEED_100;
1240 				bp->duplex = DUPLEX_FULL;
1241 			}
1242 			else if (common & ADVERTISE_100HALF) {
1243 				bp->line_speed = SPEED_100;
1244 				bp->duplex = DUPLEX_HALF;
1245 			}
1246 			else if (common & ADVERTISE_10FULL) {
1247 				bp->line_speed = SPEED_10;
1248 				bp->duplex = DUPLEX_FULL;
1249 			}
1250 			else if (common & ADVERTISE_10HALF) {
1251 				bp->line_speed = SPEED_10;
1252 				bp->duplex = DUPLEX_HALF;
1253 			}
1254 			else {
1255 				bp->line_speed = 0;
1256 				bp->link_up = 0;
1257 			}
1258 		}
1259 	}
1260 	else {
1261 		if (bmcr & BMCR_SPEED100) {
1262 			bp->line_speed = SPEED_100;
1263 		}
1264 		else {
1265 			bp->line_speed = SPEED_10;
1266 		}
1267 		if (bmcr & BMCR_FULLDPLX) {
1268 			bp->duplex = DUPLEX_FULL;
1269 		}
1270 		else {
1271 			bp->duplex = DUPLEX_HALF;
1272 		}
1273 	}
1274 
1275 	if (bp->link_up) {
1276 		u32 ext_status;
1277 
1278 		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1279 		if (ext_status & EXT_STATUS_MDIX)
1280 			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1281 	}
1282 
1283 	return 0;
1284 }
1285 
1286 static void
1287 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1288 {
1289 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1290 
1291 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1292 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1293 	val |= 0x02 << 8;
1294 
1295 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1296 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1297 
1298 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1299 }
1300 
1301 static void
1302 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1303 {
1304 	int i;
1305 	u32 cid;
1306 
1307 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1308 		if (i == 1)
1309 			cid = RX_RSS_CID;
1310 		bnx2_init_rx_context(bp, cid);
1311 	}
1312 }
1313 
1314 static void
1315 bnx2_set_mac_link(struct bnx2 *bp)
1316 {
1317 	u32 val;
1318 
1319 	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1320 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1321 		(bp->duplex == DUPLEX_HALF)) {
1322 		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1323 	}
1324 
1325 	/* Configure the EMAC mode register. */
1326 	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1327 
1328 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1329 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1330 		BNX2_EMAC_MODE_25G_MODE);
1331 
1332 	if (bp->link_up) {
1333 		switch (bp->line_speed) {
1334 			case SPEED_10:
1335 				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1336 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1337 					break;
1338 				}
1339 				fallthrough;
1340 			case SPEED_100:
1341 				val |= BNX2_EMAC_MODE_PORT_MII;
1342 				break;
1343 			case SPEED_2500:
1344 				val |= BNX2_EMAC_MODE_25G_MODE;
1345 				fallthrough;
1346 			case SPEED_1000:
1347 				val |= BNX2_EMAC_MODE_PORT_GMII;
1348 				break;
1349 		}
1350 	}
1351 	else {
1352 		val |= BNX2_EMAC_MODE_PORT_GMII;
1353 	}
1354 
1355 	/* Set the MAC to operate in the appropriate duplex mode. */
1356 	if (bp->duplex == DUPLEX_HALF)
1357 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1358 	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1359 
1360 	/* Enable/disable rx PAUSE. */
1361 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1362 
1363 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1364 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1365 	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1366 
1367 	/* Enable/disable tx PAUSE. */
1368 	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1369 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1370 
1371 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1372 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1373 	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1374 
1375 	/* Acknowledge the interrupt. */
1376 	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1377 
1378 	bnx2_init_all_rx_contexts(bp);
1379 }
1380 
1381 static void
1382 bnx2_enable_bmsr1(struct bnx2 *bp)
1383 {
1384 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1385 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1386 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1387 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1388 }
1389 
1390 static void
1391 bnx2_disable_bmsr1(struct bnx2 *bp)
1392 {
1393 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1394 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1395 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1396 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1397 }
1398 
1399 static int
1400 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1401 {
1402 	u32 up1;
1403 	int ret = 1;
1404 
1405 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1406 		return 0;
1407 
1408 	if (bp->autoneg & AUTONEG_SPEED)
1409 		bp->advertising |= ADVERTISED_2500baseX_Full;
1410 
1411 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1412 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1413 
1414 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1415 	if (!(up1 & BCM5708S_UP1_2G5)) {
1416 		up1 |= BCM5708S_UP1_2G5;
1417 		bnx2_write_phy(bp, bp->mii_up1, up1);
1418 		ret = 0;
1419 	}
1420 
1421 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1422 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1423 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1424 
1425 	return ret;
1426 }
1427 
1428 static int
1429 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1430 {
1431 	u32 up1;
1432 	int ret = 0;
1433 
1434 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1435 		return 0;
1436 
1437 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1438 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1439 
1440 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1441 	if (up1 & BCM5708S_UP1_2G5) {
1442 		up1 &= ~BCM5708S_UP1_2G5;
1443 		bnx2_write_phy(bp, bp->mii_up1, up1);
1444 		ret = 1;
1445 	}
1446 
1447 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1448 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1449 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1450 
1451 	return ret;
1452 }
1453 
1454 static void
1455 bnx2_enable_forced_2g5(struct bnx2 *bp)
1456 {
1457 	u32 bmcr;
1458 	int err;
1459 
1460 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1461 		return;
1462 
1463 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1464 		u32 val;
1465 
1466 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1467 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1468 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1469 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1470 			val |= MII_BNX2_SD_MISC1_FORCE |
1471 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1472 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1473 		}
1474 
1475 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1476 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1477 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1478 
1479 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1480 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1481 		if (!err)
1482 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1483 	} else {
1484 		return;
1485 	}
1486 
1487 	if (err)
1488 		return;
1489 
1490 	if (bp->autoneg & AUTONEG_SPEED) {
1491 		bmcr &= ~BMCR_ANENABLE;
1492 		if (bp->req_duplex == DUPLEX_FULL)
1493 			bmcr |= BMCR_FULLDPLX;
1494 	}
1495 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1496 }
1497 
1498 static void
1499 bnx2_disable_forced_2g5(struct bnx2 *bp)
1500 {
1501 	u32 bmcr;
1502 	int err;
1503 
1504 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1505 		return;
1506 
1507 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1508 		u32 val;
1509 
1510 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1511 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1512 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1513 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1514 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1515 		}
1516 
1517 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1518 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1519 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1520 
1521 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1522 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1523 		if (!err)
1524 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1525 	} else {
1526 		return;
1527 	}
1528 
1529 	if (err)
1530 		return;
1531 
1532 	if (bp->autoneg & AUTONEG_SPEED)
1533 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1534 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1535 }
1536 
1537 static void
1538 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1539 {
1540 	u32 val;
1541 
1542 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1543 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1544 	if (start)
1545 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1546 	else
1547 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1548 }
1549 
1550 static int
1551 bnx2_set_link(struct bnx2 *bp)
1552 {
1553 	u32 bmsr;
1554 	u8 link_up;
1555 
1556 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1557 		bp->link_up = 1;
1558 		return 0;
1559 	}
1560 
1561 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1562 		return 0;
1563 
1564 	link_up = bp->link_up;
1565 
1566 	bnx2_enable_bmsr1(bp);
1567 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1568 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1569 	bnx2_disable_bmsr1(bp);
1570 
1571 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1572 	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1573 		u32 val, an_dbg;
1574 
1575 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1576 			bnx2_5706s_force_link_dn(bp, 0);
1577 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1578 		}
1579 		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1580 
1581 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1582 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1583 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1584 
1585 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1586 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1587 			bmsr |= BMSR_LSTATUS;
1588 		else
1589 			bmsr &= ~BMSR_LSTATUS;
1590 	}
1591 
1592 	if (bmsr & BMSR_LSTATUS) {
1593 		bp->link_up = 1;
1594 
1595 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1596 			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1597 				bnx2_5706s_linkup(bp);
1598 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1599 				bnx2_5708s_linkup(bp);
1600 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1601 				bnx2_5709s_linkup(bp);
1602 		}
1603 		else {
1604 			bnx2_copper_linkup(bp);
1605 		}
1606 		bnx2_resolve_flow_ctrl(bp);
1607 	}
1608 	else {
1609 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1610 		    (bp->autoneg & AUTONEG_SPEED))
1611 			bnx2_disable_forced_2g5(bp);
1612 
1613 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1614 			u32 bmcr;
1615 
1616 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1617 			bmcr |= BMCR_ANENABLE;
1618 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1619 
1620 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1621 		}
1622 		bp->link_up = 0;
1623 	}
1624 
1625 	if (bp->link_up != link_up) {
1626 		bnx2_report_link(bp);
1627 	}
1628 
1629 	bnx2_set_mac_link(bp);
1630 
1631 	return 0;
1632 }
1633 
1634 static int
1635 bnx2_reset_phy(struct bnx2 *bp)
1636 {
1637 	int i;
1638 	u32 reg;
1639 
1640         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1641 
1642 #define PHY_RESET_MAX_WAIT 100
1643 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1644 		udelay(10);
1645 
1646 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1647 		if (!(reg & BMCR_RESET)) {
1648 			udelay(20);
1649 			break;
1650 		}
1651 	}
1652 	if (i == PHY_RESET_MAX_WAIT) {
1653 		return -EBUSY;
1654 	}
1655 	return 0;
1656 }
1657 
1658 static u32
1659 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1660 {
1661 	u32 adv = 0;
1662 
1663 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1664 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1665 
1666 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1667 			adv = ADVERTISE_1000XPAUSE;
1668 		}
1669 		else {
1670 			adv = ADVERTISE_PAUSE_CAP;
1671 		}
1672 	}
1673 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1674 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1675 			adv = ADVERTISE_1000XPSE_ASYM;
1676 		}
1677 		else {
1678 			adv = ADVERTISE_PAUSE_ASYM;
1679 		}
1680 	}
1681 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1682 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1683 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1684 		}
1685 		else {
1686 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1687 		}
1688 	}
1689 	return adv;
1690 }
1691 
1692 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1693 
1694 static int
1695 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1696 __releases(&bp->phy_lock)
1697 __acquires(&bp->phy_lock)
1698 {
1699 	u32 speed_arg = 0, pause_adv;
1700 
1701 	pause_adv = bnx2_phy_get_pause_adv(bp);
1702 
1703 	if (bp->autoneg & AUTONEG_SPEED) {
1704 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1705 		if (bp->advertising & ADVERTISED_10baseT_Half)
1706 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1707 		if (bp->advertising & ADVERTISED_10baseT_Full)
1708 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1709 		if (bp->advertising & ADVERTISED_100baseT_Half)
1710 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1711 		if (bp->advertising & ADVERTISED_100baseT_Full)
1712 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1713 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1714 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1715 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1716 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1717 	} else {
1718 		if (bp->req_line_speed == SPEED_2500)
1719 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1720 		else if (bp->req_line_speed == SPEED_1000)
1721 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1722 		else if (bp->req_line_speed == SPEED_100) {
1723 			if (bp->req_duplex == DUPLEX_FULL)
1724 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1725 			else
1726 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1727 		} else if (bp->req_line_speed == SPEED_10) {
1728 			if (bp->req_duplex == DUPLEX_FULL)
1729 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1730 			else
1731 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1732 		}
1733 	}
1734 
1735 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1736 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1737 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1738 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1739 
1740 	if (port == PORT_TP)
1741 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1742 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1743 
1744 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1745 
1746 	spin_unlock_bh(&bp->phy_lock);
1747 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1748 	spin_lock_bh(&bp->phy_lock);
1749 
1750 	return 0;
1751 }
1752 
1753 static int
1754 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1755 __releases(&bp->phy_lock)
1756 __acquires(&bp->phy_lock)
1757 {
1758 	u32 adv, bmcr;
1759 	u32 new_adv = 0;
1760 
1761 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1762 		return bnx2_setup_remote_phy(bp, port);
1763 
1764 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1765 		u32 new_bmcr;
1766 		int force_link_down = 0;
1767 
1768 		if (bp->req_line_speed == SPEED_2500) {
1769 			if (!bnx2_test_and_enable_2g5(bp))
1770 				force_link_down = 1;
1771 		} else if (bp->req_line_speed == SPEED_1000) {
1772 			if (bnx2_test_and_disable_2g5(bp))
1773 				force_link_down = 1;
1774 		}
1775 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1776 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1777 
1778 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1779 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1780 		new_bmcr |= BMCR_SPEED1000;
1781 
1782 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1783 			if (bp->req_line_speed == SPEED_2500)
1784 				bnx2_enable_forced_2g5(bp);
1785 			else if (bp->req_line_speed == SPEED_1000) {
1786 				bnx2_disable_forced_2g5(bp);
1787 				new_bmcr &= ~0x2000;
1788 			}
1789 
1790 		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1791 			if (bp->req_line_speed == SPEED_2500)
1792 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1793 			else
1794 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1795 		}
1796 
1797 		if (bp->req_duplex == DUPLEX_FULL) {
1798 			adv |= ADVERTISE_1000XFULL;
1799 			new_bmcr |= BMCR_FULLDPLX;
1800 		}
1801 		else {
1802 			adv |= ADVERTISE_1000XHALF;
1803 			new_bmcr &= ~BMCR_FULLDPLX;
1804 		}
1805 		if ((new_bmcr != bmcr) || (force_link_down)) {
1806 			/* Force a link down visible on the other side */
1807 			if (bp->link_up) {
1808 				bnx2_write_phy(bp, bp->mii_adv, adv &
1809 					       ~(ADVERTISE_1000XFULL |
1810 						 ADVERTISE_1000XHALF));
1811 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1812 					BMCR_ANRESTART | BMCR_ANENABLE);
1813 
1814 				bp->link_up = 0;
1815 				netif_carrier_off(bp->dev);
1816 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1817 				bnx2_report_link(bp);
1818 			}
1819 			bnx2_write_phy(bp, bp->mii_adv, adv);
1820 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1821 		} else {
1822 			bnx2_resolve_flow_ctrl(bp);
1823 			bnx2_set_mac_link(bp);
1824 		}
1825 		return 0;
1826 	}
1827 
1828 	bnx2_test_and_enable_2g5(bp);
1829 
1830 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1831 		new_adv |= ADVERTISE_1000XFULL;
1832 
1833 	new_adv |= bnx2_phy_get_pause_adv(bp);
1834 
1835 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1836 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1837 
1838 	bp->serdes_an_pending = 0;
1839 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1840 		/* Force a link down visible on the other side */
1841 		if (bp->link_up) {
1842 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1843 			spin_unlock_bh(&bp->phy_lock);
1844 			msleep(20);
1845 			spin_lock_bh(&bp->phy_lock);
1846 		}
1847 
1848 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1849 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1850 			BMCR_ANENABLE);
1851 		/* Speed up link-up time when the link partner
1852 		 * does not autonegotiate which is very common
1853 		 * in blade servers. Some blade servers use
1854 		 * IPMI for kerboard input and it's important
1855 		 * to minimize link disruptions. Autoneg. involves
1856 		 * exchanging base pages plus 3 next pages and
1857 		 * normally completes in about 120 msec.
1858 		 */
1859 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1860 		bp->serdes_an_pending = 1;
1861 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1862 	} else {
1863 		bnx2_resolve_flow_ctrl(bp);
1864 		bnx2_set_mac_link(bp);
1865 	}
1866 
1867 	return 0;
1868 }
1869 
1870 #define ETHTOOL_ALL_FIBRE_SPEED						\
1871 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1872 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1873 		(ADVERTISED_1000baseT_Full)
1874 
1875 #define ETHTOOL_ALL_COPPER_SPEED					\
1876 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1877 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1878 	ADVERTISED_1000baseT_Full)
1879 
1880 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1881 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1882 
1883 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1884 
1885 static void
1886 bnx2_set_default_remote_link(struct bnx2 *bp)
1887 {
1888 	u32 link;
1889 
1890 	if (bp->phy_port == PORT_TP)
1891 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1892 	else
1893 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1894 
1895 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1896 		bp->req_line_speed = 0;
1897 		bp->autoneg |= AUTONEG_SPEED;
1898 		bp->advertising = ADVERTISED_Autoneg;
1899 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1900 			bp->advertising |= ADVERTISED_10baseT_Half;
1901 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1902 			bp->advertising |= ADVERTISED_10baseT_Full;
1903 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1904 			bp->advertising |= ADVERTISED_100baseT_Half;
1905 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1906 			bp->advertising |= ADVERTISED_100baseT_Full;
1907 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1908 			bp->advertising |= ADVERTISED_1000baseT_Full;
1909 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1910 			bp->advertising |= ADVERTISED_2500baseX_Full;
1911 	} else {
1912 		bp->autoneg = 0;
1913 		bp->advertising = 0;
1914 		bp->req_duplex = DUPLEX_FULL;
1915 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1916 			bp->req_line_speed = SPEED_10;
1917 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1918 				bp->req_duplex = DUPLEX_HALF;
1919 		}
1920 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1921 			bp->req_line_speed = SPEED_100;
1922 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1923 				bp->req_duplex = DUPLEX_HALF;
1924 		}
1925 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1926 			bp->req_line_speed = SPEED_1000;
1927 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1928 			bp->req_line_speed = SPEED_2500;
1929 	}
1930 }
1931 
1932 static void
1933 bnx2_set_default_link(struct bnx2 *bp)
1934 {
1935 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1936 		bnx2_set_default_remote_link(bp);
1937 		return;
1938 	}
1939 
1940 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1941 	bp->req_line_speed = 0;
1942 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1943 		u32 reg;
1944 
1945 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1946 
1947 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1948 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1949 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1950 			bp->autoneg = 0;
1951 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1952 			bp->req_duplex = DUPLEX_FULL;
1953 		}
1954 	} else
1955 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1956 }
1957 
1958 static void
1959 bnx2_send_heart_beat(struct bnx2 *bp)
1960 {
1961 	u32 msg;
1962 	u32 addr;
1963 
1964 	spin_lock(&bp->indirect_lock);
1965 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1966 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1967 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1968 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1969 	spin_unlock(&bp->indirect_lock);
1970 }
1971 
1972 static void
1973 bnx2_remote_phy_event(struct bnx2 *bp)
1974 {
1975 	u32 msg;
1976 	u8 link_up = bp->link_up;
1977 	u8 old_port;
1978 
1979 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1980 
1981 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1982 		bnx2_send_heart_beat(bp);
1983 
1984 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1985 
1986 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1987 		bp->link_up = 0;
1988 	else {
1989 		u32 speed;
1990 
1991 		bp->link_up = 1;
1992 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1993 		bp->duplex = DUPLEX_FULL;
1994 		switch (speed) {
1995 			case BNX2_LINK_STATUS_10HALF:
1996 				bp->duplex = DUPLEX_HALF;
1997 				fallthrough;
1998 			case BNX2_LINK_STATUS_10FULL:
1999 				bp->line_speed = SPEED_10;
2000 				break;
2001 			case BNX2_LINK_STATUS_100HALF:
2002 				bp->duplex = DUPLEX_HALF;
2003 				fallthrough;
2004 			case BNX2_LINK_STATUS_100BASE_T4:
2005 			case BNX2_LINK_STATUS_100FULL:
2006 				bp->line_speed = SPEED_100;
2007 				break;
2008 			case BNX2_LINK_STATUS_1000HALF:
2009 				bp->duplex = DUPLEX_HALF;
2010 				fallthrough;
2011 			case BNX2_LINK_STATUS_1000FULL:
2012 				bp->line_speed = SPEED_1000;
2013 				break;
2014 			case BNX2_LINK_STATUS_2500HALF:
2015 				bp->duplex = DUPLEX_HALF;
2016 				fallthrough;
2017 			case BNX2_LINK_STATUS_2500FULL:
2018 				bp->line_speed = SPEED_2500;
2019 				break;
2020 			default:
2021 				bp->line_speed = 0;
2022 				break;
2023 		}
2024 
2025 		bp->flow_ctrl = 0;
2026 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2027 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2028 			if (bp->duplex == DUPLEX_FULL)
2029 				bp->flow_ctrl = bp->req_flow_ctrl;
2030 		} else {
2031 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2032 				bp->flow_ctrl |= FLOW_CTRL_TX;
2033 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2034 				bp->flow_ctrl |= FLOW_CTRL_RX;
2035 		}
2036 
2037 		old_port = bp->phy_port;
2038 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2039 			bp->phy_port = PORT_FIBRE;
2040 		else
2041 			bp->phy_port = PORT_TP;
2042 
2043 		if (old_port != bp->phy_port)
2044 			bnx2_set_default_link(bp);
2045 
2046 	}
2047 	if (bp->link_up != link_up)
2048 		bnx2_report_link(bp);
2049 
2050 	bnx2_set_mac_link(bp);
2051 }
2052 
2053 static int
2054 bnx2_set_remote_link(struct bnx2 *bp)
2055 {
2056 	u32 evt_code;
2057 
2058 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2059 	switch (evt_code) {
2060 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2061 			bnx2_remote_phy_event(bp);
2062 			break;
2063 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2064 		default:
2065 			bnx2_send_heart_beat(bp);
2066 			break;
2067 	}
2068 	return 0;
2069 }
2070 
2071 static int
2072 bnx2_setup_copper_phy(struct bnx2 *bp)
2073 __releases(&bp->phy_lock)
2074 __acquires(&bp->phy_lock)
2075 {
2076 	u32 bmcr, adv_reg, new_adv = 0;
2077 	u32 new_bmcr;
2078 
2079 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2080 
2081 	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2082 	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2083 		    ADVERTISE_PAUSE_ASYM);
2084 
2085 	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2086 
2087 	if (bp->autoneg & AUTONEG_SPEED) {
2088 		u32 adv1000_reg;
2089 		u32 new_adv1000 = 0;
2090 
2091 		new_adv |= bnx2_phy_get_pause_adv(bp);
2092 
2093 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2094 		adv1000_reg &= PHY_ALL_1000_SPEED;
2095 
2096 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2097 		if ((adv1000_reg != new_adv1000) ||
2098 			(adv_reg != new_adv) ||
2099 			((bmcr & BMCR_ANENABLE) == 0)) {
2100 
2101 			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2102 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2103 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2104 				BMCR_ANENABLE);
2105 		}
2106 		else if (bp->link_up) {
2107 			/* Flow ctrl may have changed from auto to forced */
2108 			/* or vice-versa. */
2109 
2110 			bnx2_resolve_flow_ctrl(bp);
2111 			bnx2_set_mac_link(bp);
2112 		}
2113 		return 0;
2114 	}
2115 
2116 	/* advertise nothing when forcing speed */
2117 	if (adv_reg != new_adv)
2118 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
2119 
2120 	new_bmcr = 0;
2121 	if (bp->req_line_speed == SPEED_100) {
2122 		new_bmcr |= BMCR_SPEED100;
2123 	}
2124 	if (bp->req_duplex == DUPLEX_FULL) {
2125 		new_bmcr |= BMCR_FULLDPLX;
2126 	}
2127 	if (new_bmcr != bmcr) {
2128 		u32 bmsr;
2129 
2130 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2131 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2132 
2133 		if (bmsr & BMSR_LSTATUS) {
2134 			/* Force link down */
2135 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2136 			spin_unlock_bh(&bp->phy_lock);
2137 			msleep(50);
2138 			spin_lock_bh(&bp->phy_lock);
2139 
2140 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2141 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2142 		}
2143 
2144 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2145 
2146 		/* Normally, the new speed is setup after the link has
2147 		 * gone down and up again. In some cases, link will not go
2148 		 * down so we need to set up the new speed here.
2149 		 */
2150 		if (bmsr & BMSR_LSTATUS) {
2151 			bp->line_speed = bp->req_line_speed;
2152 			bp->duplex = bp->req_duplex;
2153 			bnx2_resolve_flow_ctrl(bp);
2154 			bnx2_set_mac_link(bp);
2155 		}
2156 	} else {
2157 		bnx2_resolve_flow_ctrl(bp);
2158 		bnx2_set_mac_link(bp);
2159 	}
2160 	return 0;
2161 }
2162 
2163 static int
2164 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2165 __releases(&bp->phy_lock)
2166 __acquires(&bp->phy_lock)
2167 {
2168 	if (bp->loopback == MAC_LOOPBACK)
2169 		return 0;
2170 
2171 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2172 		return bnx2_setup_serdes_phy(bp, port);
2173 	}
2174 	else {
2175 		return bnx2_setup_copper_phy(bp);
2176 	}
2177 }
2178 
2179 static int
2180 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2181 {
2182 	u32 val;
2183 
2184 	bp->mii_bmcr = MII_BMCR + 0x10;
2185 	bp->mii_bmsr = MII_BMSR + 0x10;
2186 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2187 	bp->mii_adv = MII_ADVERTISE + 0x10;
2188 	bp->mii_lpa = MII_LPA + 0x10;
2189 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2190 
2191 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2192 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2193 
2194 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2195 	if (reset_phy)
2196 		bnx2_reset_phy(bp);
2197 
2198 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2199 
2200 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2201 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2202 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2203 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2204 
2205 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2206 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2207 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2208 		val |= BCM5708S_UP1_2G5;
2209 	else
2210 		val &= ~BCM5708S_UP1_2G5;
2211 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2212 
2213 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2214 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2215 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2216 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2217 
2218 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2219 
2220 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2221 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2222 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2223 
2224 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2225 
2226 	return 0;
2227 }
2228 
2229 static int
2230 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2231 {
2232 	u32 val;
2233 
2234 	if (reset_phy)
2235 		bnx2_reset_phy(bp);
2236 
2237 	bp->mii_up1 = BCM5708S_UP1;
2238 
2239 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2240 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2241 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2242 
2243 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2244 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2245 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2246 
2247 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2248 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2249 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2250 
2251 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2252 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2253 		val |= BCM5708S_UP1_2G5;
2254 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2255 	}
2256 
2257 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2258 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2259 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2260 		/* increase tx signal amplitude */
2261 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2262 			       BCM5708S_BLK_ADDR_TX_MISC);
2263 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2264 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2265 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2266 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2267 	}
2268 
2269 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2270 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2271 
2272 	if (val) {
2273 		u32 is_backplane;
2274 
2275 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2276 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2277 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2278 				       BCM5708S_BLK_ADDR_TX_MISC);
2279 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2280 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2281 				       BCM5708S_BLK_ADDR_DIG);
2282 		}
2283 	}
2284 	return 0;
2285 }
2286 
2287 static int
2288 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2289 {
2290 	if (reset_phy)
2291 		bnx2_reset_phy(bp);
2292 
2293 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2294 
2295 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2296 		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2297 
2298 	if (bp->dev->mtu > ETH_DATA_LEN) {
2299 		u32 val;
2300 
2301 		/* Set extended packet length bit */
2302 		bnx2_write_phy(bp, 0x18, 0x7);
2303 		bnx2_read_phy(bp, 0x18, &val);
2304 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2305 
2306 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2307 		bnx2_read_phy(bp, 0x1c, &val);
2308 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2309 	}
2310 	else {
2311 		u32 val;
2312 
2313 		bnx2_write_phy(bp, 0x18, 0x7);
2314 		bnx2_read_phy(bp, 0x18, &val);
2315 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2316 
2317 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2318 		bnx2_read_phy(bp, 0x1c, &val);
2319 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2320 	}
2321 
2322 	return 0;
2323 }
2324 
2325 static int
2326 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2327 {
2328 	u32 val;
2329 
2330 	if (reset_phy)
2331 		bnx2_reset_phy(bp);
2332 
2333 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2334 		bnx2_write_phy(bp, 0x18, 0x0c00);
2335 		bnx2_write_phy(bp, 0x17, 0x000a);
2336 		bnx2_write_phy(bp, 0x15, 0x310b);
2337 		bnx2_write_phy(bp, 0x17, 0x201f);
2338 		bnx2_write_phy(bp, 0x15, 0x9506);
2339 		bnx2_write_phy(bp, 0x17, 0x401f);
2340 		bnx2_write_phy(bp, 0x15, 0x14e2);
2341 		bnx2_write_phy(bp, 0x18, 0x0400);
2342 	}
2343 
2344 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2345 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2346 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2347 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2348 		val &= ~(1 << 8);
2349 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2350 	}
2351 
2352 	if (bp->dev->mtu > ETH_DATA_LEN) {
2353 		/* Set extended packet length bit */
2354 		bnx2_write_phy(bp, 0x18, 0x7);
2355 		bnx2_read_phy(bp, 0x18, &val);
2356 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2357 
2358 		bnx2_read_phy(bp, 0x10, &val);
2359 		bnx2_write_phy(bp, 0x10, val | 0x1);
2360 	}
2361 	else {
2362 		bnx2_write_phy(bp, 0x18, 0x7);
2363 		bnx2_read_phy(bp, 0x18, &val);
2364 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2365 
2366 		bnx2_read_phy(bp, 0x10, &val);
2367 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2368 	}
2369 
2370 	/* ethernet@wirespeed */
2371 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2372 	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2373 	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2374 
2375 	/* auto-mdix */
2376 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2377 		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2378 
2379 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2380 	return 0;
2381 }
2382 
2383 
2384 static int
2385 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2386 __releases(&bp->phy_lock)
2387 __acquires(&bp->phy_lock)
2388 {
2389 	u32 val;
2390 	int rc = 0;
2391 
2392 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2393 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2394 
2395 	bp->mii_bmcr = MII_BMCR;
2396 	bp->mii_bmsr = MII_BMSR;
2397 	bp->mii_bmsr1 = MII_BMSR;
2398 	bp->mii_adv = MII_ADVERTISE;
2399 	bp->mii_lpa = MII_LPA;
2400 
2401 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2402 
2403 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2404 		goto setup_phy;
2405 
2406 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2407 	bp->phy_id = val << 16;
2408 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2409 	bp->phy_id |= val & 0xffff;
2410 
2411 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2412 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2413 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2414 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2415 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2416 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2417 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2418 	}
2419 	else {
2420 		rc = bnx2_init_copper_phy(bp, reset_phy);
2421 	}
2422 
2423 setup_phy:
2424 	if (!rc)
2425 		rc = bnx2_setup_phy(bp, bp->phy_port);
2426 
2427 	return rc;
2428 }
2429 
2430 static int
2431 bnx2_set_mac_loopback(struct bnx2 *bp)
2432 {
2433 	u32 mac_mode;
2434 
2435 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2436 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2437 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2438 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2439 	bp->link_up = 1;
2440 	return 0;
2441 }
2442 
2443 static int bnx2_test_link(struct bnx2 *);
2444 
2445 static int
2446 bnx2_set_phy_loopback(struct bnx2 *bp)
2447 {
2448 	u32 mac_mode;
2449 	int rc, i;
2450 
2451 	spin_lock_bh(&bp->phy_lock);
2452 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2453 			    BMCR_SPEED1000);
2454 	spin_unlock_bh(&bp->phy_lock);
2455 	if (rc)
2456 		return rc;
2457 
2458 	for (i = 0; i < 10; i++) {
2459 		if (bnx2_test_link(bp) == 0)
2460 			break;
2461 		msleep(100);
2462 	}
2463 
2464 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2465 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2466 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2467 		      BNX2_EMAC_MODE_25G_MODE);
2468 
2469 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2470 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2471 	bp->link_up = 1;
2472 	return 0;
2473 }
2474 
2475 static void
2476 bnx2_dump_mcp_state(struct bnx2 *bp)
2477 {
2478 	struct net_device *dev = bp->dev;
2479 	u32 mcp_p0, mcp_p1;
2480 
2481 	netdev_err(dev, "<--- start MCP states dump --->\n");
2482 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2483 		mcp_p0 = BNX2_MCP_STATE_P0;
2484 		mcp_p1 = BNX2_MCP_STATE_P1;
2485 	} else {
2486 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2487 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2488 	}
2489 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2490 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2491 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2492 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2493 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2494 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2495 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2496 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2497 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2498 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2499 	netdev_err(dev, "DEBUG: shmem states:\n");
2500 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2501 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2502 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2503 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2504 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2505 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2506 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2507 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2508 	pr_cont(" condition[%08x]\n",
2509 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2510 	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2511 	DP_SHMEM_LINE(bp, 0x3cc);
2512 	DP_SHMEM_LINE(bp, 0x3dc);
2513 	DP_SHMEM_LINE(bp, 0x3ec);
2514 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2515 	netdev_err(dev, "<--- end MCP states dump --->\n");
2516 }
2517 
2518 static int
2519 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2520 {
2521 	int i;
2522 	u32 val;
2523 
2524 	bp->fw_wr_seq++;
2525 	msg_data |= bp->fw_wr_seq;
2526 	bp->fw_last_msg = msg_data;
2527 
2528 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2529 
2530 	if (!ack)
2531 		return 0;
2532 
2533 	/* wait for an acknowledgement. */
2534 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2535 		msleep(10);
2536 
2537 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2538 
2539 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2540 			break;
2541 	}
2542 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2543 		return 0;
2544 
2545 	/* If we timed out, inform the firmware that this is the case. */
2546 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2547 		msg_data &= ~BNX2_DRV_MSG_CODE;
2548 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2549 
2550 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2551 		if (!silent) {
2552 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2553 			bnx2_dump_mcp_state(bp);
2554 		}
2555 
2556 		return -EBUSY;
2557 	}
2558 
2559 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2560 		return -EIO;
2561 
2562 	return 0;
2563 }
2564 
2565 static int
2566 bnx2_init_5709_context(struct bnx2 *bp)
2567 {
2568 	int i, ret = 0;
2569 	u32 val;
2570 
2571 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2572 	val |= (BNX2_PAGE_BITS - 8) << 16;
2573 	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2574 	for (i = 0; i < 10; i++) {
2575 		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2576 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2577 			break;
2578 		udelay(2);
2579 	}
2580 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2581 		return -EBUSY;
2582 
2583 	for (i = 0; i < bp->ctx_pages; i++) {
2584 		int j;
2585 
2586 		if (bp->ctx_blk[i])
2587 			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2588 		else
2589 			return -ENOMEM;
2590 
2591 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2592 			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2593 			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2594 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2595 			(u64) bp->ctx_blk_mapping[i] >> 32);
2596 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2597 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2598 		for (j = 0; j < 10; j++) {
2599 
2600 			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2601 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2602 				break;
2603 			udelay(5);
2604 		}
2605 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2606 			ret = -EBUSY;
2607 			break;
2608 		}
2609 	}
2610 	return ret;
2611 }
2612 
2613 static void
2614 bnx2_init_context(struct bnx2 *bp)
2615 {
2616 	u32 vcid;
2617 
2618 	vcid = 96;
2619 	while (vcid) {
2620 		u32 vcid_addr, pcid_addr, offset;
2621 		int i;
2622 
2623 		vcid--;
2624 
2625 		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2626 			u32 new_vcid;
2627 
2628 			vcid_addr = GET_PCID_ADDR(vcid);
2629 			if (vcid & 0x8) {
2630 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2631 			}
2632 			else {
2633 				new_vcid = vcid;
2634 			}
2635 			pcid_addr = GET_PCID_ADDR(new_vcid);
2636 		}
2637 		else {
2638 	    		vcid_addr = GET_CID_ADDR(vcid);
2639 			pcid_addr = vcid_addr;
2640 		}
2641 
2642 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2643 			vcid_addr += (i << PHY_CTX_SHIFT);
2644 			pcid_addr += (i << PHY_CTX_SHIFT);
2645 
2646 			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2647 			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2648 
2649 			/* Zero out the context. */
2650 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2651 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2652 		}
2653 	}
2654 }
2655 
2656 static int
2657 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2658 {
2659 	u16 *good_mbuf;
2660 	u32 good_mbuf_cnt;
2661 	u32 val;
2662 
2663 	good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
2664 	if (!good_mbuf)
2665 		return -ENOMEM;
2666 
2667 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2668 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2669 
2670 	good_mbuf_cnt = 0;
2671 
2672 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2673 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2674 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2675 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2676 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2677 
2678 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2679 
2680 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2681 
2682 		/* The addresses with Bit 9 set are bad memory blocks. */
2683 		if (!(val & (1 << 9))) {
2684 			good_mbuf[good_mbuf_cnt] = (u16) val;
2685 			good_mbuf_cnt++;
2686 		}
2687 
2688 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2689 	}
2690 
2691 	/* Free the good ones back to the mbuf pool thus discarding
2692 	 * all the bad ones. */
2693 	while (good_mbuf_cnt) {
2694 		good_mbuf_cnt--;
2695 
2696 		val = good_mbuf[good_mbuf_cnt];
2697 		val = (val << 9) | val | 1;
2698 
2699 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2700 	}
2701 	kfree(good_mbuf);
2702 	return 0;
2703 }
2704 
2705 static void
2706 bnx2_set_mac_addr(struct bnx2 *bp, const u8 *mac_addr, u32 pos)
2707 {
2708 	u32 val;
2709 
2710 	val = (mac_addr[0] << 8) | mac_addr[1];
2711 
2712 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2713 
2714 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2715 		(mac_addr[4] << 8) | mac_addr[5];
2716 
2717 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2718 }
2719 
2720 static inline int
2721 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2722 {
2723 	dma_addr_t mapping;
2724 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2725 	struct bnx2_rx_bd *rxbd =
2726 		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2727 	struct page *page = alloc_page(gfp);
2728 
2729 	if (!page)
2730 		return -ENOMEM;
2731 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2732 			       DMA_FROM_DEVICE);
2733 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2734 		__free_page(page);
2735 		return -EIO;
2736 	}
2737 
2738 	rx_pg->page = page;
2739 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2740 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2741 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2742 	return 0;
2743 }
2744 
2745 static void
2746 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2747 {
2748 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2749 	struct page *page = rx_pg->page;
2750 
2751 	if (!page)
2752 		return;
2753 
2754 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2755 		       PAGE_SIZE, DMA_FROM_DEVICE);
2756 
2757 	__free_page(page);
2758 	rx_pg->page = NULL;
2759 }
2760 
2761 static inline int
2762 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2763 {
2764 	u8 *data;
2765 	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2766 	dma_addr_t mapping;
2767 	struct bnx2_rx_bd *rxbd =
2768 		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2769 
2770 	data = kmalloc(bp->rx_buf_size, gfp);
2771 	if (!data)
2772 		return -ENOMEM;
2773 
2774 	mapping = dma_map_single(&bp->pdev->dev,
2775 				 get_l2_fhdr(data),
2776 				 bp->rx_buf_use_size,
2777 				 DMA_FROM_DEVICE);
2778 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2779 		kfree(data);
2780 		return -EIO;
2781 	}
2782 
2783 	rx_buf->data = data;
2784 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2785 
2786 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2787 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2788 
2789 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2790 
2791 	return 0;
2792 }
2793 
2794 static int
2795 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2796 {
2797 	struct status_block *sblk = bnapi->status_blk.msi;
2798 	u32 new_link_state, old_link_state;
2799 	int is_set = 1;
2800 
2801 	new_link_state = sblk->status_attn_bits & event;
2802 	old_link_state = sblk->status_attn_bits_ack & event;
2803 	if (new_link_state != old_link_state) {
2804 		if (new_link_state)
2805 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2806 		else
2807 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2808 	} else
2809 		is_set = 0;
2810 
2811 	return is_set;
2812 }
2813 
2814 static void
2815 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2816 {
2817 	spin_lock(&bp->phy_lock);
2818 
2819 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2820 		bnx2_set_link(bp);
2821 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2822 		bnx2_set_remote_link(bp);
2823 
2824 	spin_unlock(&bp->phy_lock);
2825 
2826 }
2827 
2828 static inline u16
2829 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2830 {
2831 	u16 cons;
2832 
2833 	cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2834 
2835 	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2836 		cons++;
2837 	return cons;
2838 }
2839 
2840 static int
2841 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2842 {
2843 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2844 	u16 hw_cons, sw_cons, sw_ring_cons;
2845 	int tx_pkt = 0, index;
2846 	unsigned int tx_bytes = 0;
2847 	struct netdev_queue *txq;
2848 
2849 	index = (bnapi - bp->bnx2_napi);
2850 	txq = netdev_get_tx_queue(bp->dev, index);
2851 
2852 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2853 	sw_cons = txr->tx_cons;
2854 
2855 	while (sw_cons != hw_cons) {
2856 		struct bnx2_sw_tx_bd *tx_buf;
2857 		struct sk_buff *skb;
2858 		int i, last;
2859 
2860 		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2861 
2862 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2863 		skb = tx_buf->skb;
2864 
2865 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2866 		prefetch(&skb->end);
2867 
2868 		/* partial BD completions possible with TSO packets */
2869 		if (tx_buf->is_gso) {
2870 			u16 last_idx, last_ring_idx;
2871 
2872 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2873 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2874 			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2875 				last_idx++;
2876 			}
2877 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2878 				break;
2879 			}
2880 		}
2881 
2882 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2883 			skb_headlen(skb), DMA_TO_DEVICE);
2884 
2885 		tx_buf->skb = NULL;
2886 		last = tx_buf->nr_frags;
2887 
2888 		for (i = 0; i < last; i++) {
2889 			struct bnx2_sw_tx_bd *tx_buf;
2890 
2891 			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2892 
2893 			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2894 			dma_unmap_page(&bp->pdev->dev,
2895 				dma_unmap_addr(tx_buf, mapping),
2896 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2897 				DMA_TO_DEVICE);
2898 		}
2899 
2900 		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2901 
2902 		tx_bytes += skb->len;
2903 		dev_kfree_skb_any(skb);
2904 		tx_pkt++;
2905 		if (tx_pkt == budget)
2906 			break;
2907 
2908 		if (hw_cons == sw_cons)
2909 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2910 	}
2911 
2912 	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2913 	txr->hw_tx_cons = hw_cons;
2914 	txr->tx_cons = sw_cons;
2915 
2916 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2917 	 * before checking for netif_tx_queue_stopped().  Without the
2918 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2919 	 * will miss it and cause the queue to be stopped forever.
2920 	 */
2921 	smp_mb();
2922 
2923 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2924 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2925 		__netif_tx_lock(txq, smp_processor_id());
2926 		if ((netif_tx_queue_stopped(txq)) &&
2927 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2928 			netif_tx_wake_queue(txq);
2929 		__netif_tx_unlock(txq);
2930 	}
2931 
2932 	return tx_pkt;
2933 }
2934 
2935 static void
2936 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2937 			struct sk_buff *skb, int count)
2938 {
2939 	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2940 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2941 	int i;
2942 	u16 hw_prod, prod;
2943 	u16 cons = rxr->rx_pg_cons;
2944 
2945 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2946 
2947 	/* The caller was unable to allocate a new page to replace the
2948 	 * last one in the frags array, so we need to recycle that page
2949 	 * and then free the skb.
2950 	 */
2951 	if (skb) {
2952 		struct page *page;
2953 		struct skb_shared_info *shinfo;
2954 
2955 		shinfo = skb_shinfo(skb);
2956 		shinfo->nr_frags--;
2957 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2958 		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2959 
2960 		cons_rx_pg->page = page;
2961 		dev_kfree_skb(skb);
2962 	}
2963 
2964 	hw_prod = rxr->rx_pg_prod;
2965 
2966 	for (i = 0; i < count; i++) {
2967 		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2968 
2969 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2970 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2971 		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2972 						[BNX2_RX_IDX(cons)];
2973 		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2974 						[BNX2_RX_IDX(prod)];
2975 
2976 		if (prod != cons) {
2977 			prod_rx_pg->page = cons_rx_pg->page;
2978 			cons_rx_pg->page = NULL;
2979 			dma_unmap_addr_set(prod_rx_pg, mapping,
2980 				dma_unmap_addr(cons_rx_pg, mapping));
2981 
2982 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2983 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2984 
2985 		}
2986 		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2987 		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2988 	}
2989 	rxr->rx_pg_prod = hw_prod;
2990 	rxr->rx_pg_cons = cons;
2991 }
2992 
2993 static inline void
2994 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2995 		   u8 *data, u16 cons, u16 prod)
2996 {
2997 	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2998 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2999 
3000 	cons_rx_buf = &rxr->rx_buf_ring[cons];
3001 	prod_rx_buf = &rxr->rx_buf_ring[prod];
3002 
3003 	dma_sync_single_for_device(&bp->pdev->dev,
3004 		dma_unmap_addr(cons_rx_buf, mapping),
3005 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, DMA_FROM_DEVICE);
3006 
3007 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
3008 
3009 	prod_rx_buf->data = data;
3010 
3011 	if (cons == prod)
3012 		return;
3013 
3014 	dma_unmap_addr_set(prod_rx_buf, mapping,
3015 			dma_unmap_addr(cons_rx_buf, mapping));
3016 
3017 	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3018 	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3019 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3020 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3021 }
3022 
3023 static struct sk_buff *
3024 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3025 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3026 	    u32 ring_idx)
3027 {
3028 	int err;
3029 	u16 prod = ring_idx & 0xffff;
3030 	struct sk_buff *skb;
3031 
3032 	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3033 	if (unlikely(err)) {
3034 		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3035 error:
3036 		if (hdr_len) {
3037 			unsigned int raw_len = len + 4;
3038 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3039 
3040 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3041 		}
3042 		return NULL;
3043 	}
3044 
3045 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3046 			 DMA_FROM_DEVICE);
3047 	skb = slab_build_skb(data);
3048 	if (!skb) {
3049 		kfree(data);
3050 		goto error;
3051 	}
3052 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3053 	if (hdr_len == 0) {
3054 		skb_put(skb, len);
3055 		return skb;
3056 	} else {
3057 		unsigned int i, frag_len, frag_size, pages;
3058 		struct bnx2_sw_pg *rx_pg;
3059 		u16 pg_cons = rxr->rx_pg_cons;
3060 		u16 pg_prod = rxr->rx_pg_prod;
3061 
3062 		frag_size = len + 4 - hdr_len;
3063 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3064 		skb_put(skb, hdr_len);
3065 
3066 		for (i = 0; i < pages; i++) {
3067 			dma_addr_t mapping_old;
3068 
3069 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3070 			if (unlikely(frag_len <= 4)) {
3071 				unsigned int tail = 4 - frag_len;
3072 
3073 				rxr->rx_pg_cons = pg_cons;
3074 				rxr->rx_pg_prod = pg_prod;
3075 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3076 							pages - i);
3077 				skb->len -= tail;
3078 				if (i == 0) {
3079 					skb->tail -= tail;
3080 				} else {
3081 					skb_frag_t *frag =
3082 						&skb_shinfo(skb)->frags[i - 1];
3083 					skb_frag_size_sub(frag, tail);
3084 					skb->data_len -= tail;
3085 				}
3086 				return skb;
3087 			}
3088 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3089 
3090 			/* Don't unmap yet.  If we're unable to allocate a new
3091 			 * page, we need to recycle the page and the DMA addr.
3092 			 */
3093 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3094 			if (i == pages - 1)
3095 				frag_len -= 4;
3096 
3097 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3098 			rx_pg->page = NULL;
3099 
3100 			err = bnx2_alloc_rx_page(bp, rxr,
3101 						 BNX2_RX_PG_RING_IDX(pg_prod),
3102 						 GFP_ATOMIC);
3103 			if (unlikely(err)) {
3104 				rxr->rx_pg_cons = pg_cons;
3105 				rxr->rx_pg_prod = pg_prod;
3106 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3107 							pages - i);
3108 				return NULL;
3109 			}
3110 
3111 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3112 				       PAGE_SIZE, DMA_FROM_DEVICE);
3113 
3114 			frag_size -= frag_len;
3115 			skb->data_len += frag_len;
3116 			skb->truesize += PAGE_SIZE;
3117 			skb->len += frag_len;
3118 
3119 			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3120 			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3121 		}
3122 		rxr->rx_pg_prod = pg_prod;
3123 		rxr->rx_pg_cons = pg_cons;
3124 	}
3125 	return skb;
3126 }
3127 
3128 static inline u16
3129 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3130 {
3131 	u16 cons;
3132 
3133 	cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3134 
3135 	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3136 		cons++;
3137 	return cons;
3138 }
3139 
3140 static int
3141 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3142 {
3143 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3144 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3145 	struct l2_fhdr *rx_hdr;
3146 	int rx_pkt = 0, pg_ring_used = 0;
3147 
3148 	if (budget <= 0)
3149 		return rx_pkt;
3150 
3151 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3152 	sw_cons = rxr->rx_cons;
3153 	sw_prod = rxr->rx_prod;
3154 
3155 	/* Memory barrier necessary as speculative reads of the rx
3156 	 * buffer can be ahead of the index in the status block
3157 	 */
3158 	rmb();
3159 	while (sw_cons != hw_cons) {
3160 		unsigned int len, hdr_len;
3161 		u32 status;
3162 		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3163 		struct sk_buff *skb;
3164 		dma_addr_t dma_addr;
3165 		u8 *data;
3166 		u16 next_ring_idx;
3167 
3168 		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3169 		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3170 
3171 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3172 		data = rx_buf->data;
3173 		rx_buf->data = NULL;
3174 
3175 		rx_hdr = get_l2_fhdr(data);
3176 		prefetch(rx_hdr);
3177 
3178 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3179 
3180 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3181 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3182 			DMA_FROM_DEVICE);
3183 
3184 		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3185 		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3186 		prefetch(get_l2_fhdr(next_rx_buf->data));
3187 
3188 		len = rx_hdr->l2_fhdr_pkt_len;
3189 		status = rx_hdr->l2_fhdr_status;
3190 
3191 		hdr_len = 0;
3192 		if (status & L2_FHDR_STATUS_SPLIT) {
3193 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3194 			pg_ring_used = 1;
3195 		} else if (len > bp->rx_jumbo_thresh) {
3196 			hdr_len = bp->rx_jumbo_thresh;
3197 			pg_ring_used = 1;
3198 		}
3199 
3200 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3201 				       L2_FHDR_ERRORS_PHY_DECODE |
3202 				       L2_FHDR_ERRORS_ALIGNMENT |
3203 				       L2_FHDR_ERRORS_TOO_SHORT |
3204 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3205 
3206 			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3207 					  sw_ring_prod);
3208 			if (pg_ring_used) {
3209 				int pages;
3210 
3211 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3212 
3213 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3214 			}
3215 			goto next_rx;
3216 		}
3217 
3218 		len -= 4;
3219 
3220 		if (len <= bp->rx_copy_thresh) {
3221 			skb = netdev_alloc_skb(bp->dev, len + 6);
3222 			if (!skb) {
3223 				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3224 						  sw_ring_prod);
3225 				goto next_rx;
3226 			}
3227 
3228 			/* aligned copy */
3229 			memcpy(skb->data,
3230 			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3231 			       len + 6);
3232 			skb_reserve(skb, 6);
3233 			skb_put(skb, len);
3234 
3235 			bnx2_reuse_rx_data(bp, rxr, data,
3236 				sw_ring_cons, sw_ring_prod);
3237 
3238 		} else {
3239 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3240 					  (sw_ring_cons << 16) | sw_ring_prod);
3241 			if (!skb)
3242 				goto next_rx;
3243 		}
3244 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3245 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3246 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3247 
3248 		skb->protocol = eth_type_trans(skb, bp->dev);
3249 
3250 		if (len > (bp->dev->mtu + ETH_HLEN) &&
3251 		    skb->protocol != htons(0x8100) &&
3252 		    skb->protocol != htons(ETH_P_8021AD)) {
3253 
3254 			dev_kfree_skb(skb);
3255 			goto next_rx;
3256 
3257 		}
3258 
3259 		skb_checksum_none_assert(skb);
3260 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3261 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3262 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3263 
3264 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3265 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3266 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3267 		}
3268 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3269 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3270 		     L2_FHDR_STATUS_USE_RXHASH))
3271 			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3272 				     PKT_HASH_TYPE_L3);
3273 
3274 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3275 		napi_gro_receive(&bnapi->napi, skb);
3276 		rx_pkt++;
3277 
3278 next_rx:
3279 		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3280 		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3281 
3282 		if (rx_pkt == budget)
3283 			break;
3284 
3285 		/* Refresh hw_cons to see if there is new work */
3286 		if (sw_cons == hw_cons) {
3287 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3288 			rmb();
3289 		}
3290 	}
3291 	rxr->rx_cons = sw_cons;
3292 	rxr->rx_prod = sw_prod;
3293 
3294 	if (pg_ring_used)
3295 		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3296 
3297 	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3298 
3299 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3300 
3301 	return rx_pkt;
3302 
3303 }
3304 
3305 /* MSI ISR - The only difference between this and the INTx ISR
3306  * is that the MSI interrupt is always serviced.
3307  */
3308 static irqreturn_t
3309 bnx2_msi(int irq, void *dev_instance)
3310 {
3311 	struct bnx2_napi *bnapi = dev_instance;
3312 	struct bnx2 *bp = bnapi->bp;
3313 
3314 	prefetch(bnapi->status_blk.msi);
3315 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3316 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3317 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3318 
3319 	/* Return here if interrupt is disabled. */
3320 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3321 		return IRQ_HANDLED;
3322 
3323 	napi_schedule(&bnapi->napi);
3324 
3325 	return IRQ_HANDLED;
3326 }
3327 
3328 static irqreturn_t
3329 bnx2_msi_1shot(int irq, void *dev_instance)
3330 {
3331 	struct bnx2_napi *bnapi = dev_instance;
3332 	struct bnx2 *bp = bnapi->bp;
3333 
3334 	prefetch(bnapi->status_blk.msi);
3335 
3336 	/* Return here if interrupt is disabled. */
3337 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3338 		return IRQ_HANDLED;
3339 
3340 	napi_schedule(&bnapi->napi);
3341 
3342 	return IRQ_HANDLED;
3343 }
3344 
3345 static irqreturn_t
3346 bnx2_interrupt(int irq, void *dev_instance)
3347 {
3348 	struct bnx2_napi *bnapi = dev_instance;
3349 	struct bnx2 *bp = bnapi->bp;
3350 	struct status_block *sblk = bnapi->status_blk.msi;
3351 
3352 	/* When using INTx, it is possible for the interrupt to arrive
3353 	 * at the CPU before the status block posted prior to the
3354 	 * interrupt. Reading a register will flush the status block.
3355 	 * When using MSI, the MSI message will always complete after
3356 	 * the status block write.
3357 	 */
3358 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3359 	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3360 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3361 		return IRQ_NONE;
3362 
3363 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3364 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3365 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3366 
3367 	/* Read back to deassert IRQ immediately to avoid too many
3368 	 * spurious interrupts.
3369 	 */
3370 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3371 
3372 	/* Return here if interrupt is shared and is disabled. */
3373 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3374 		return IRQ_HANDLED;
3375 
3376 	if (napi_schedule_prep(&bnapi->napi)) {
3377 		bnapi->last_status_idx = sblk->status_idx;
3378 		__napi_schedule(&bnapi->napi);
3379 	}
3380 
3381 	return IRQ_HANDLED;
3382 }
3383 
3384 static inline int
3385 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3386 {
3387 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3388 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3389 
3390 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3391 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3392 		return 1;
3393 	return 0;
3394 }
3395 
3396 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3397 				 STATUS_ATTN_BITS_TIMER_ABORT)
3398 
3399 static inline int
3400 bnx2_has_work(struct bnx2_napi *bnapi)
3401 {
3402 	struct status_block *sblk = bnapi->status_blk.msi;
3403 
3404 	if (bnx2_has_fast_work(bnapi))
3405 		return 1;
3406 
3407 #ifdef BCM_CNIC
3408 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3409 		return 1;
3410 #endif
3411 
3412 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3413 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3414 		return 1;
3415 
3416 	return 0;
3417 }
3418 
3419 static void
3420 bnx2_chk_missed_msi(struct bnx2 *bp)
3421 {
3422 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3423 	u32 msi_ctrl;
3424 
3425 	if (bnx2_has_work(bnapi)) {
3426 		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3427 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3428 			return;
3429 
3430 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3431 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3432 				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3433 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3434 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3435 		}
3436 	}
3437 
3438 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3439 }
3440 
3441 #ifdef BCM_CNIC
3442 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3443 {
3444 	struct cnic_ops *c_ops;
3445 
3446 	if (!bnapi->cnic_present)
3447 		return;
3448 
3449 	rcu_read_lock();
3450 	c_ops = rcu_dereference(bp->cnic_ops);
3451 	if (c_ops)
3452 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3453 						      bnapi->status_blk.msi);
3454 	rcu_read_unlock();
3455 }
3456 #endif
3457 
3458 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3459 {
3460 	struct status_block *sblk = bnapi->status_blk.msi;
3461 	u32 status_attn_bits = sblk->status_attn_bits;
3462 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3463 
3464 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3465 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3466 
3467 		bnx2_phy_int(bp, bnapi);
3468 
3469 		/* This is needed to take care of transient status
3470 		 * during link changes.
3471 		 */
3472 		BNX2_WR(bp, BNX2_HC_COMMAND,
3473 			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3474 		BNX2_RD(bp, BNX2_HC_COMMAND);
3475 	}
3476 }
3477 
3478 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3479 			  int work_done, int budget)
3480 {
3481 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3482 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3483 
3484 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3485 		bnx2_tx_int(bp, bnapi, 0);
3486 
3487 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3488 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3489 
3490 	return work_done;
3491 }
3492 
3493 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3494 {
3495 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3496 	struct bnx2 *bp = bnapi->bp;
3497 	int work_done = 0;
3498 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3499 
3500 	while (1) {
3501 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3502 		if (unlikely(work_done >= budget))
3503 			break;
3504 
3505 		bnapi->last_status_idx = sblk->status_idx;
3506 		/* status idx must be read before checking for more work. */
3507 		rmb();
3508 		if (likely(!bnx2_has_fast_work(bnapi))) {
3509 
3510 			napi_complete_done(napi, work_done);
3511 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3512 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3513 				bnapi->last_status_idx);
3514 			break;
3515 		}
3516 	}
3517 	return work_done;
3518 }
3519 
3520 static int bnx2_poll(struct napi_struct *napi, int budget)
3521 {
3522 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3523 	struct bnx2 *bp = bnapi->bp;
3524 	int work_done = 0;
3525 	struct status_block *sblk = bnapi->status_blk.msi;
3526 
3527 	while (1) {
3528 		bnx2_poll_link(bp, bnapi);
3529 
3530 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3531 
3532 #ifdef BCM_CNIC
3533 		bnx2_poll_cnic(bp, bnapi);
3534 #endif
3535 
3536 		/* bnapi->last_status_idx is used below to tell the hw how
3537 		 * much work has been processed, so we must read it before
3538 		 * checking for more work.
3539 		 */
3540 		bnapi->last_status_idx = sblk->status_idx;
3541 
3542 		if (unlikely(work_done >= budget))
3543 			break;
3544 
3545 		rmb();
3546 		if (likely(!bnx2_has_work(bnapi))) {
3547 			napi_complete_done(napi, work_done);
3548 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3549 				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3550 					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3551 					bnapi->last_status_idx);
3552 				break;
3553 			}
3554 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3555 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3556 				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3557 				bnapi->last_status_idx);
3558 
3559 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3560 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3561 				bnapi->last_status_idx);
3562 			break;
3563 		}
3564 	}
3565 
3566 	return work_done;
3567 }
3568 
3569 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3570  * from set_multicast.
3571  */
3572 static void
3573 bnx2_set_rx_mode(struct net_device *dev)
3574 {
3575 	struct bnx2 *bp = netdev_priv(dev);
3576 	u32 rx_mode, sort_mode;
3577 	struct netdev_hw_addr *ha;
3578 	int i;
3579 
3580 	if (!netif_running(dev))
3581 		return;
3582 
3583 	spin_lock_bh(&bp->phy_lock);
3584 
3585 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3586 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3587 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3588 	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3589 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3590 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3591 	if (dev->flags & IFF_PROMISC) {
3592 		/* Promiscuous mode. */
3593 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3594 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3595 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3596 	}
3597 	else if (dev->flags & IFF_ALLMULTI) {
3598 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3599 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3600 				0xffffffff);
3601 		}
3602 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3603 	}
3604 	else {
3605 		/* Accept one or more multicast(s). */
3606 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3607 		u32 regidx;
3608 		u32 bit;
3609 		u32 crc;
3610 
3611 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3612 
3613 		netdev_for_each_mc_addr(ha, dev) {
3614 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3615 			bit = crc & 0xff;
3616 			regidx = (bit & 0xe0) >> 5;
3617 			bit &= 0x1f;
3618 			mc_filter[regidx] |= (1 << bit);
3619 		}
3620 
3621 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3622 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3623 				mc_filter[i]);
3624 		}
3625 
3626 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3627 	}
3628 
3629 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3630 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3631 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3632 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3633 	} else if (!(dev->flags & IFF_PROMISC)) {
3634 		/* Add all entries into to the match filter list */
3635 		i = 0;
3636 		netdev_for_each_uc_addr(ha, dev) {
3637 			bnx2_set_mac_addr(bp, ha->addr,
3638 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3639 			sort_mode |= (1 <<
3640 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3641 			i++;
3642 		}
3643 
3644 	}
3645 
3646 	if (rx_mode != bp->rx_mode) {
3647 		bp->rx_mode = rx_mode;
3648 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3649 	}
3650 
3651 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3652 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3653 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3654 
3655 	spin_unlock_bh(&bp->phy_lock);
3656 }
3657 
3658 static int
3659 check_fw_section(const struct firmware *fw,
3660 		 const struct bnx2_fw_file_section *section,
3661 		 u32 alignment, bool non_empty)
3662 {
3663 	u32 offset = be32_to_cpu(section->offset);
3664 	u32 len = be32_to_cpu(section->len);
3665 
3666 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3667 		return -EINVAL;
3668 	if ((non_empty && len == 0) || len > fw->size - offset ||
3669 	    len & (alignment - 1))
3670 		return -EINVAL;
3671 	return 0;
3672 }
3673 
3674 static int
3675 check_mips_fw_entry(const struct firmware *fw,
3676 		    const struct bnx2_mips_fw_file_entry *entry)
3677 {
3678 	if (check_fw_section(fw, &entry->text, 4, true) ||
3679 	    check_fw_section(fw, &entry->data, 4, false) ||
3680 	    check_fw_section(fw, &entry->rodata, 4, false))
3681 		return -EINVAL;
3682 	return 0;
3683 }
3684 
3685 static void bnx2_release_firmware(struct bnx2 *bp)
3686 {
3687 	if (bp->rv2p_firmware) {
3688 		release_firmware(bp->mips_firmware);
3689 		release_firmware(bp->rv2p_firmware);
3690 		bp->rv2p_firmware = NULL;
3691 	}
3692 }
3693 
3694 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3695 {
3696 	const char *mips_fw_file, *rv2p_fw_file;
3697 	const struct bnx2_mips_fw_file *mips_fw;
3698 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3699 	int rc;
3700 
3701 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3702 		mips_fw_file = FW_MIPS_FILE_09;
3703 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3704 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3705 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3706 		else
3707 			rv2p_fw_file = FW_RV2P_FILE_09;
3708 	} else {
3709 		mips_fw_file = FW_MIPS_FILE_06;
3710 		rv2p_fw_file = FW_RV2P_FILE_06;
3711 	}
3712 
3713 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3714 	if (rc) {
3715 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3716 		goto out;
3717 	}
3718 
3719 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3720 	if (rc) {
3721 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3722 		goto err_release_mips_firmware;
3723 	}
3724 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3725 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3726 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3727 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3728 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3729 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3730 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3731 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3732 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3733 		rc = -EINVAL;
3734 		goto err_release_firmware;
3735 	}
3736 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3737 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3738 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3739 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3740 		rc = -EINVAL;
3741 		goto err_release_firmware;
3742 	}
3743 out:
3744 	return rc;
3745 
3746 err_release_firmware:
3747 	release_firmware(bp->rv2p_firmware);
3748 	bp->rv2p_firmware = NULL;
3749 err_release_mips_firmware:
3750 	release_firmware(bp->mips_firmware);
3751 	goto out;
3752 }
3753 
3754 static int bnx2_request_firmware(struct bnx2 *bp)
3755 {
3756 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3757 }
3758 
3759 static u32
3760 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3761 {
3762 	switch (idx) {
3763 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3764 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3765 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3766 		break;
3767 	}
3768 	return rv2p_code;
3769 }
3770 
3771 static int
3772 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3773 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3774 {
3775 	u32 rv2p_code_len, file_offset;
3776 	__be32 *rv2p_code;
3777 	int i;
3778 	u32 val, cmd, addr;
3779 
3780 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3781 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3782 
3783 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3784 
3785 	if (rv2p_proc == RV2P_PROC1) {
3786 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3787 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3788 	} else {
3789 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3790 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3791 	}
3792 
3793 	for (i = 0; i < rv2p_code_len; i += 8) {
3794 		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3795 		rv2p_code++;
3796 		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3797 		rv2p_code++;
3798 
3799 		val = (i / 8) | cmd;
3800 		BNX2_WR(bp, addr, val);
3801 	}
3802 
3803 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3804 	for (i = 0; i < 8; i++) {
3805 		u32 loc, code;
3806 
3807 		loc = be32_to_cpu(fw_entry->fixup[i]);
3808 		if (loc && ((loc * 4) < rv2p_code_len)) {
3809 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3810 			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3811 			code = be32_to_cpu(*(rv2p_code + loc));
3812 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3813 			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3814 
3815 			val = (loc / 2) | cmd;
3816 			BNX2_WR(bp, addr, val);
3817 		}
3818 	}
3819 
3820 	/* Reset the processor, un-stall is done later. */
3821 	if (rv2p_proc == RV2P_PROC1) {
3822 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3823 	}
3824 	else {
3825 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3826 	}
3827 
3828 	return 0;
3829 }
3830 
3831 static int
3832 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3833 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3834 {
3835 	u32 addr, len, file_offset;
3836 	__be32 *data;
3837 	u32 offset;
3838 	u32 val;
3839 
3840 	/* Halt the CPU. */
3841 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3842 	val |= cpu_reg->mode_value_halt;
3843 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3844 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3845 
3846 	/* Load the Text area. */
3847 	addr = be32_to_cpu(fw_entry->text.addr);
3848 	len = be32_to_cpu(fw_entry->text.len);
3849 	file_offset = be32_to_cpu(fw_entry->text.offset);
3850 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3851 
3852 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3853 	if (len) {
3854 		int j;
3855 
3856 		for (j = 0; j < (len / 4); j++, offset += 4)
3857 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3858 	}
3859 
3860 	/* Load the Data area. */
3861 	addr = be32_to_cpu(fw_entry->data.addr);
3862 	len = be32_to_cpu(fw_entry->data.len);
3863 	file_offset = be32_to_cpu(fw_entry->data.offset);
3864 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3865 
3866 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3867 	if (len) {
3868 		int j;
3869 
3870 		for (j = 0; j < (len / 4); j++, offset += 4)
3871 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3872 	}
3873 
3874 	/* Load the Read-Only area. */
3875 	addr = be32_to_cpu(fw_entry->rodata.addr);
3876 	len = be32_to_cpu(fw_entry->rodata.len);
3877 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3878 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3879 
3880 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3881 	if (len) {
3882 		int j;
3883 
3884 		for (j = 0; j < (len / 4); j++, offset += 4)
3885 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3886 	}
3887 
3888 	/* Clear the pre-fetch instruction. */
3889 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3890 
3891 	val = be32_to_cpu(fw_entry->start_addr);
3892 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3893 
3894 	/* Start the CPU. */
3895 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3896 	val &= ~cpu_reg->mode_value_halt;
3897 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3898 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3899 
3900 	return 0;
3901 }
3902 
3903 static int
3904 bnx2_init_cpus(struct bnx2 *bp)
3905 {
3906 	const struct bnx2_mips_fw_file *mips_fw =
3907 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3908 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3909 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3910 	int rc;
3911 
3912 	/* Initialize the RV2P processor. */
3913 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3914 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3915 
3916 	/* Initialize the RX Processor. */
3917 	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3918 	if (rc)
3919 		goto init_cpu_err;
3920 
3921 	/* Initialize the TX Processor. */
3922 	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3923 	if (rc)
3924 		goto init_cpu_err;
3925 
3926 	/* Initialize the TX Patch-up Processor. */
3927 	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3928 	if (rc)
3929 		goto init_cpu_err;
3930 
3931 	/* Initialize the Completion Processor. */
3932 	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3933 	if (rc)
3934 		goto init_cpu_err;
3935 
3936 	/* Initialize the Command Processor. */
3937 	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3938 
3939 init_cpu_err:
3940 	return rc;
3941 }
3942 
3943 static void
3944 bnx2_setup_wol(struct bnx2 *bp)
3945 {
3946 	int i;
3947 	u32 val, wol_msg;
3948 
3949 	if (bp->wol) {
3950 		u32 advertising;
3951 		u8 autoneg;
3952 
3953 		autoneg = bp->autoneg;
3954 		advertising = bp->advertising;
3955 
3956 		if (bp->phy_port == PORT_TP) {
3957 			bp->autoneg = AUTONEG_SPEED;
3958 			bp->advertising = ADVERTISED_10baseT_Half |
3959 				ADVERTISED_10baseT_Full |
3960 				ADVERTISED_100baseT_Half |
3961 				ADVERTISED_100baseT_Full |
3962 				ADVERTISED_Autoneg;
3963 		}
3964 
3965 		spin_lock_bh(&bp->phy_lock);
3966 		bnx2_setup_phy(bp, bp->phy_port);
3967 		spin_unlock_bh(&bp->phy_lock);
3968 
3969 		bp->autoneg = autoneg;
3970 		bp->advertising = advertising;
3971 
3972 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3973 
3974 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3975 
3976 		/* Enable port mode. */
3977 		val &= ~BNX2_EMAC_MODE_PORT;
3978 		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3979 		       BNX2_EMAC_MODE_ACPI_RCVD |
3980 		       BNX2_EMAC_MODE_MPKT;
3981 		if (bp->phy_port == PORT_TP) {
3982 			val |= BNX2_EMAC_MODE_PORT_MII;
3983 		} else {
3984 			val |= BNX2_EMAC_MODE_PORT_GMII;
3985 			if (bp->line_speed == SPEED_2500)
3986 				val |= BNX2_EMAC_MODE_25G_MODE;
3987 		}
3988 
3989 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3990 
3991 		/* receive all multicast */
3992 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3993 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3994 				0xffffffff);
3995 		}
3996 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3997 
3998 		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3999 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4000 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4001 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4002 
4003 		/* Need to enable EMAC and RPM for WOL. */
4004 		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4005 			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4006 			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4007 			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4008 
4009 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4010 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4011 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4012 
4013 		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4014 	} else {
4015 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4016 	}
4017 
4018 	if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4019 		u32 val;
4020 
4021 		wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4022 		if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4023 			bnx2_fw_sync(bp, wol_msg, 1, 0);
4024 			return;
4025 		}
4026 		/* Tell firmware not to power down the PHY yet, otherwise
4027 		 * the chip will take a long time to respond to MMIO reads.
4028 		 */
4029 		val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4030 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4031 			      val | BNX2_PORT_FEATURE_ASF_ENABLED);
4032 		bnx2_fw_sync(bp, wol_msg, 1, 0);
4033 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4034 	}
4035 
4036 }
4037 
4038 static int
4039 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4040 {
4041 	switch (state) {
4042 	case PCI_D0: {
4043 		u32 val;
4044 
4045 		pci_enable_wake(bp->pdev, PCI_D0, false);
4046 		pci_set_power_state(bp->pdev, PCI_D0);
4047 
4048 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4049 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4050 		val &= ~BNX2_EMAC_MODE_MPKT;
4051 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4052 
4053 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4054 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4055 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4056 		break;
4057 	}
4058 	case PCI_D3hot: {
4059 		bnx2_setup_wol(bp);
4060 		pci_wake_from_d3(bp->pdev, bp->wol);
4061 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4062 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4063 
4064 			if (bp->wol)
4065 				pci_set_power_state(bp->pdev, PCI_D3hot);
4066 			break;
4067 
4068 		}
4069 		if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4070 			u32 val;
4071 
4072 			/* Tell firmware not to power down the PHY yet,
4073 			 * otherwise the other port may not respond to
4074 			 * MMIO reads.
4075 			 */
4076 			val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4077 			val &= ~BNX2_CONDITION_PM_STATE_MASK;
4078 			val |= BNX2_CONDITION_PM_STATE_UNPREP;
4079 			bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4080 		}
4081 		pci_set_power_state(bp->pdev, PCI_D3hot);
4082 
4083 		/* No more memory access after this point until
4084 		 * device is brought back to D0.
4085 		 */
4086 		break;
4087 	}
4088 	default:
4089 		return -EINVAL;
4090 	}
4091 	return 0;
4092 }
4093 
4094 static int
4095 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4096 {
4097 	u32 val;
4098 	int j;
4099 
4100 	/* Request access to the flash interface. */
4101 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4102 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4103 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4104 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4105 			break;
4106 
4107 		udelay(5);
4108 	}
4109 
4110 	if (j >= NVRAM_TIMEOUT_COUNT)
4111 		return -EBUSY;
4112 
4113 	return 0;
4114 }
4115 
4116 static int
4117 bnx2_release_nvram_lock(struct bnx2 *bp)
4118 {
4119 	int j;
4120 	u32 val;
4121 
4122 	/* Relinquish nvram interface. */
4123 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4124 
4125 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4126 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4127 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4128 			break;
4129 
4130 		udelay(5);
4131 	}
4132 
4133 	if (j >= NVRAM_TIMEOUT_COUNT)
4134 		return -EBUSY;
4135 
4136 	return 0;
4137 }
4138 
4139 
4140 static int
4141 bnx2_enable_nvram_write(struct bnx2 *bp)
4142 {
4143 	u32 val;
4144 
4145 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4146 	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4147 
4148 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4149 		int j;
4150 
4151 		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4152 		BNX2_WR(bp, BNX2_NVM_COMMAND,
4153 			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4154 
4155 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4156 			udelay(5);
4157 
4158 			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4159 			if (val & BNX2_NVM_COMMAND_DONE)
4160 				break;
4161 		}
4162 
4163 		if (j >= NVRAM_TIMEOUT_COUNT)
4164 			return -EBUSY;
4165 	}
4166 	return 0;
4167 }
4168 
4169 static void
4170 bnx2_disable_nvram_write(struct bnx2 *bp)
4171 {
4172 	u32 val;
4173 
4174 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4175 	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4176 }
4177 
4178 
4179 static void
4180 bnx2_enable_nvram_access(struct bnx2 *bp)
4181 {
4182 	u32 val;
4183 
4184 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4185 	/* Enable both bits, even on read. */
4186 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4187 		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4188 }
4189 
4190 static void
4191 bnx2_disable_nvram_access(struct bnx2 *bp)
4192 {
4193 	u32 val;
4194 
4195 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4196 	/* Disable both bits, even after read. */
4197 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4198 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4199 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4200 }
4201 
4202 static int
4203 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4204 {
4205 	u32 cmd;
4206 	int j;
4207 
4208 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4209 		/* Buffered flash, no erase needed */
4210 		return 0;
4211 
4212 	/* Build an erase command */
4213 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4214 	      BNX2_NVM_COMMAND_DOIT;
4215 
4216 	/* Need to clear DONE bit separately. */
4217 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4218 
4219 	/* Address of the NVRAM to read from. */
4220 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4221 
4222 	/* Issue an erase command. */
4223 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4224 
4225 	/* Wait for completion. */
4226 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4227 		u32 val;
4228 
4229 		udelay(5);
4230 
4231 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4232 		if (val & BNX2_NVM_COMMAND_DONE)
4233 			break;
4234 	}
4235 
4236 	if (j >= NVRAM_TIMEOUT_COUNT)
4237 		return -EBUSY;
4238 
4239 	return 0;
4240 }
4241 
4242 static int
4243 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4244 {
4245 	u32 cmd;
4246 	int j;
4247 
4248 	/* Build the command word. */
4249 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4250 
4251 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4252 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4253 		offset = ((offset / bp->flash_info->page_size) <<
4254 			   bp->flash_info->page_bits) +
4255 			  (offset % bp->flash_info->page_size);
4256 	}
4257 
4258 	/* Need to clear DONE bit separately. */
4259 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4260 
4261 	/* Address of the NVRAM to read from. */
4262 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4263 
4264 	/* Issue a read command. */
4265 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4266 
4267 	/* Wait for completion. */
4268 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4269 		u32 val;
4270 
4271 		udelay(5);
4272 
4273 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4274 		if (val & BNX2_NVM_COMMAND_DONE) {
4275 			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4276 			memcpy(ret_val, &v, 4);
4277 			break;
4278 		}
4279 	}
4280 	if (j >= NVRAM_TIMEOUT_COUNT)
4281 		return -EBUSY;
4282 
4283 	return 0;
4284 }
4285 
4286 
4287 static int
4288 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4289 {
4290 	u32 cmd;
4291 	__be32 val32;
4292 	int j;
4293 
4294 	/* Build the command word. */
4295 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4296 
4297 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4298 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4299 		offset = ((offset / bp->flash_info->page_size) <<
4300 			  bp->flash_info->page_bits) +
4301 			 (offset % bp->flash_info->page_size);
4302 	}
4303 
4304 	/* Need to clear DONE bit separately. */
4305 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4306 
4307 	memcpy(&val32, val, 4);
4308 
4309 	/* Write the data. */
4310 	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4311 
4312 	/* Address of the NVRAM to write to. */
4313 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4314 
4315 	/* Issue the write command. */
4316 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4317 
4318 	/* Wait for completion. */
4319 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4320 		udelay(5);
4321 
4322 		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4323 			break;
4324 	}
4325 	if (j >= NVRAM_TIMEOUT_COUNT)
4326 		return -EBUSY;
4327 
4328 	return 0;
4329 }
4330 
4331 static int
4332 bnx2_init_nvram(struct bnx2 *bp)
4333 {
4334 	u32 val;
4335 	int j, entry_count, rc = 0;
4336 	const struct flash_spec *flash;
4337 
4338 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4339 		bp->flash_info = &flash_5709;
4340 		goto get_flash_size;
4341 	}
4342 
4343 	/* Determine the selected interface. */
4344 	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4345 
4346 	entry_count = ARRAY_SIZE(flash_table);
4347 
4348 	if (val & 0x40000000) {
4349 
4350 		/* Flash interface has been reconfigured */
4351 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4352 		     j++, flash++) {
4353 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4354 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4355 				bp->flash_info = flash;
4356 				break;
4357 			}
4358 		}
4359 	}
4360 	else {
4361 		u32 mask;
4362 		/* Not yet been reconfigured */
4363 
4364 		if (val & (1 << 23))
4365 			mask = FLASH_BACKUP_STRAP_MASK;
4366 		else
4367 			mask = FLASH_STRAP_MASK;
4368 
4369 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4370 			j++, flash++) {
4371 
4372 			if ((val & mask) == (flash->strapping & mask)) {
4373 				bp->flash_info = flash;
4374 
4375 				/* Request access to the flash interface. */
4376 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4377 					return rc;
4378 
4379 				/* Enable access to flash interface */
4380 				bnx2_enable_nvram_access(bp);
4381 
4382 				/* Reconfigure the flash interface */
4383 				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4384 				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4385 				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4386 				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4387 
4388 				/* Disable access to flash interface */
4389 				bnx2_disable_nvram_access(bp);
4390 				bnx2_release_nvram_lock(bp);
4391 
4392 				break;
4393 			}
4394 		}
4395 	} /* if (val & 0x40000000) */
4396 
4397 	if (j == entry_count) {
4398 		bp->flash_info = NULL;
4399 		pr_alert("Unknown flash/EEPROM type\n");
4400 		return -ENODEV;
4401 	}
4402 
4403 get_flash_size:
4404 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4405 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4406 	if (val)
4407 		bp->flash_size = val;
4408 	else
4409 		bp->flash_size = bp->flash_info->total_size;
4410 
4411 	return rc;
4412 }
4413 
4414 static int
4415 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4416 		int buf_size)
4417 {
4418 	int rc = 0;
4419 	u32 cmd_flags, offset32, len32, extra;
4420 
4421 	if (buf_size == 0)
4422 		return 0;
4423 
4424 	/* Request access to the flash interface. */
4425 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4426 		return rc;
4427 
4428 	/* Enable access to flash interface */
4429 	bnx2_enable_nvram_access(bp);
4430 
4431 	len32 = buf_size;
4432 	offset32 = offset;
4433 	extra = 0;
4434 
4435 	cmd_flags = 0;
4436 
4437 	if (offset32 & 3) {
4438 		u8 buf[4];
4439 		u32 pre_len;
4440 
4441 		offset32 &= ~3;
4442 		pre_len = 4 - (offset & 3);
4443 
4444 		if (pre_len >= len32) {
4445 			pre_len = len32;
4446 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4447 				    BNX2_NVM_COMMAND_LAST;
4448 		}
4449 		else {
4450 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4451 		}
4452 
4453 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4454 
4455 		if (rc)
4456 			return rc;
4457 
4458 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4459 
4460 		offset32 += 4;
4461 		ret_buf += pre_len;
4462 		len32 -= pre_len;
4463 	}
4464 	if (len32 & 3) {
4465 		extra = 4 - (len32 & 3);
4466 		len32 = (len32 + 4) & ~3;
4467 	}
4468 
4469 	if (len32 == 4) {
4470 		u8 buf[4];
4471 
4472 		if (cmd_flags)
4473 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4474 		else
4475 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4476 				    BNX2_NVM_COMMAND_LAST;
4477 
4478 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4479 
4480 		memcpy(ret_buf, buf, 4 - extra);
4481 	}
4482 	else if (len32 > 0) {
4483 		u8 buf[4];
4484 
4485 		/* Read the first word. */
4486 		if (cmd_flags)
4487 			cmd_flags = 0;
4488 		else
4489 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4490 
4491 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4492 
4493 		/* Advance to the next dword. */
4494 		offset32 += 4;
4495 		ret_buf += 4;
4496 		len32 -= 4;
4497 
4498 		while (len32 > 4 && rc == 0) {
4499 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4500 
4501 			/* Advance to the next dword. */
4502 			offset32 += 4;
4503 			ret_buf += 4;
4504 			len32 -= 4;
4505 		}
4506 
4507 		if (rc)
4508 			return rc;
4509 
4510 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4511 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4512 
4513 		memcpy(ret_buf, buf, 4 - extra);
4514 	}
4515 
4516 	/* Disable access to flash interface */
4517 	bnx2_disable_nvram_access(bp);
4518 
4519 	bnx2_release_nvram_lock(bp);
4520 
4521 	return rc;
4522 }
4523 
4524 static int
4525 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4526 		int buf_size)
4527 {
4528 	u32 written, offset32, len32;
4529 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4530 	int rc = 0;
4531 	int align_start, align_end;
4532 
4533 	buf = data_buf;
4534 	offset32 = offset;
4535 	len32 = buf_size;
4536 	align_start = align_end = 0;
4537 
4538 	if ((align_start = (offset32 & 3))) {
4539 		offset32 &= ~3;
4540 		len32 += align_start;
4541 		if (len32 < 4)
4542 			len32 = 4;
4543 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4544 			return rc;
4545 	}
4546 
4547 	if (len32 & 3) {
4548 		align_end = 4 - (len32 & 3);
4549 		len32 += align_end;
4550 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4551 			return rc;
4552 	}
4553 
4554 	if (align_start || align_end) {
4555 		align_buf = kmalloc(len32, GFP_KERNEL);
4556 		if (!align_buf)
4557 			return -ENOMEM;
4558 		if (align_start) {
4559 			memcpy(align_buf, start, 4);
4560 		}
4561 		if (align_end) {
4562 			memcpy(align_buf + len32 - 4, end, 4);
4563 		}
4564 		memcpy(align_buf + align_start, data_buf, buf_size);
4565 		buf = align_buf;
4566 	}
4567 
4568 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4569 		flash_buffer = kmalloc(264, GFP_KERNEL);
4570 		if (!flash_buffer) {
4571 			rc = -ENOMEM;
4572 			goto nvram_write_end;
4573 		}
4574 	}
4575 
4576 	written = 0;
4577 	while ((written < len32) && (rc == 0)) {
4578 		u32 page_start, page_end, data_start, data_end;
4579 		u32 addr, cmd_flags;
4580 		int i;
4581 
4582 	        /* Find the page_start addr */
4583 		page_start = offset32 + written;
4584 		page_start -= (page_start % bp->flash_info->page_size);
4585 		/* Find the page_end addr */
4586 		page_end = page_start + bp->flash_info->page_size;
4587 		/* Find the data_start addr */
4588 		data_start = (written == 0) ? offset32 : page_start;
4589 		/* Find the data_end addr */
4590 		data_end = (page_end > offset32 + len32) ?
4591 			(offset32 + len32) : page_end;
4592 
4593 		/* Request access to the flash interface. */
4594 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4595 			goto nvram_write_end;
4596 
4597 		/* Enable access to flash interface */
4598 		bnx2_enable_nvram_access(bp);
4599 
4600 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4601 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4602 			int j;
4603 
4604 			/* Read the whole page into the buffer
4605 			 * (non-buffer flash only) */
4606 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4607 				if (j == (bp->flash_info->page_size - 4)) {
4608 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4609 				}
4610 				rc = bnx2_nvram_read_dword(bp,
4611 					page_start + j,
4612 					&flash_buffer[j],
4613 					cmd_flags);
4614 
4615 				if (rc)
4616 					goto nvram_write_end;
4617 
4618 				cmd_flags = 0;
4619 			}
4620 		}
4621 
4622 		/* Enable writes to flash interface (unlock write-protect) */
4623 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4624 			goto nvram_write_end;
4625 
4626 		/* Loop to write back the buffer data from page_start to
4627 		 * data_start */
4628 		i = 0;
4629 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4630 			/* Erase the page */
4631 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4632 				goto nvram_write_end;
4633 
4634 			/* Re-enable the write again for the actual write */
4635 			bnx2_enable_nvram_write(bp);
4636 
4637 			for (addr = page_start; addr < data_start;
4638 				addr += 4, i += 4) {
4639 
4640 				rc = bnx2_nvram_write_dword(bp, addr,
4641 					&flash_buffer[i], cmd_flags);
4642 
4643 				if (rc != 0)
4644 					goto nvram_write_end;
4645 
4646 				cmd_flags = 0;
4647 			}
4648 		}
4649 
4650 		/* Loop to write the new data from data_start to data_end */
4651 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4652 			if ((addr == page_end - 4) ||
4653 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4654 				 (addr == data_end - 4))) {
4655 
4656 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4657 			}
4658 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4659 				cmd_flags);
4660 
4661 			if (rc != 0)
4662 				goto nvram_write_end;
4663 
4664 			cmd_flags = 0;
4665 			buf += 4;
4666 		}
4667 
4668 		/* Loop to write back the buffer data from data_end
4669 		 * to page_end */
4670 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4671 			for (addr = data_end; addr < page_end;
4672 				addr += 4, i += 4) {
4673 
4674 				if (addr == page_end-4) {
4675 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4676 				}
4677 				rc = bnx2_nvram_write_dword(bp, addr,
4678 					&flash_buffer[i], cmd_flags);
4679 
4680 				if (rc != 0)
4681 					goto nvram_write_end;
4682 
4683 				cmd_flags = 0;
4684 			}
4685 		}
4686 
4687 		/* Disable writes to flash interface (lock write-protect) */
4688 		bnx2_disable_nvram_write(bp);
4689 
4690 		/* Disable access to flash interface */
4691 		bnx2_disable_nvram_access(bp);
4692 		bnx2_release_nvram_lock(bp);
4693 
4694 		/* Increment written */
4695 		written += data_end - data_start;
4696 	}
4697 
4698 nvram_write_end:
4699 	kfree(flash_buffer);
4700 	kfree(align_buf);
4701 	return rc;
4702 }
4703 
4704 static void
4705 bnx2_init_fw_cap(struct bnx2 *bp)
4706 {
4707 	u32 val, sig = 0;
4708 
4709 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4710 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4711 
4712 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4713 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4714 
4715 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4716 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4717 		return;
4718 
4719 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4720 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4721 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4722 	}
4723 
4724 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4725 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4726 		u32 link;
4727 
4728 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4729 
4730 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4731 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4732 			bp->phy_port = PORT_FIBRE;
4733 		else
4734 			bp->phy_port = PORT_TP;
4735 
4736 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4737 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4738 	}
4739 
4740 	if (netif_running(bp->dev) && sig)
4741 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4742 }
4743 
4744 static void
4745 bnx2_setup_msix_tbl(struct bnx2 *bp)
4746 {
4747 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4748 
4749 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4750 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4751 }
4752 
4753 static void
4754 bnx2_wait_dma_complete(struct bnx2 *bp)
4755 {
4756 	u32 val;
4757 	int i;
4758 
4759 	/*
4760 	 * Wait for the current PCI transaction to complete before
4761 	 * issuing a reset.
4762 	 */
4763 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4764 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4765 		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4766 			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4767 			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4768 			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4769 			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4770 		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4771 		udelay(5);
4772 	} else {  /* 5709 */
4773 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4774 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4775 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4776 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4777 
4778 		for (i = 0; i < 100; i++) {
4779 			msleep(1);
4780 			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4781 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4782 				break;
4783 		}
4784 	}
4785 
4786 	return;
4787 }
4788 
4789 
4790 static int
4791 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4792 {
4793 	u32 val;
4794 	int i, rc = 0;
4795 	u8 old_port;
4796 
4797 	/* Wait for the current PCI transaction to complete before
4798 	 * issuing a reset. */
4799 	bnx2_wait_dma_complete(bp);
4800 
4801 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4802 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4803 
4804 	/* Deposit a driver reset signature so the firmware knows that
4805 	 * this is a soft reset. */
4806 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4807 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4808 
4809 	/* Do a dummy read to force the chip to complete all current transaction
4810 	 * before we issue a reset. */
4811 	val = BNX2_RD(bp, BNX2_MISC_ID);
4812 
4813 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4814 		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4815 		BNX2_RD(bp, BNX2_MISC_COMMAND);
4816 		udelay(5);
4817 
4818 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4819 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4820 
4821 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4822 
4823 	} else {
4824 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4825 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4826 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4827 
4828 		/* Chip reset. */
4829 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4830 
4831 		/* Reading back any register after chip reset will hang the
4832 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4833 		 * of margin for write posting.
4834 		 */
4835 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4836 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4837 			msleep(20);
4838 
4839 		/* Reset takes approximate 30 usec */
4840 		for (i = 0; i < 10; i++) {
4841 			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4842 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4843 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4844 				break;
4845 			udelay(10);
4846 		}
4847 
4848 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4849 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4850 			pr_err("Chip reset did not complete\n");
4851 			return -EBUSY;
4852 		}
4853 	}
4854 
4855 	/* Make sure byte swapping is properly configured. */
4856 	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4857 	if (val != 0x01020304) {
4858 		pr_err("Chip not in correct endian mode\n");
4859 		return -ENODEV;
4860 	}
4861 
4862 	/* Wait for the firmware to finish its initialization. */
4863 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4864 	if (rc)
4865 		return rc;
4866 
4867 	spin_lock_bh(&bp->phy_lock);
4868 	old_port = bp->phy_port;
4869 	bnx2_init_fw_cap(bp);
4870 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4871 	    old_port != bp->phy_port)
4872 		bnx2_set_default_remote_link(bp);
4873 	spin_unlock_bh(&bp->phy_lock);
4874 
4875 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4876 		/* Adjust the voltage regular to two steps lower.  The default
4877 		 * of this register is 0x0000000e. */
4878 		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4879 
4880 		/* Remove bad rbuf memory from the free pool. */
4881 		rc = bnx2_alloc_bad_rbuf(bp);
4882 	}
4883 
4884 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4885 		bnx2_setup_msix_tbl(bp);
4886 		/* Prevent MSIX table reads and write from timing out */
4887 		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4888 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4889 	}
4890 
4891 	return rc;
4892 }
4893 
4894 static int
4895 bnx2_init_chip(struct bnx2 *bp)
4896 {
4897 	u32 val, mtu;
4898 	int rc, i;
4899 
4900 	/* Make sure the interrupt is not active. */
4901 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4902 
4903 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4904 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4905 #ifdef __BIG_ENDIAN
4906 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4907 #endif
4908 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4909 	      DMA_READ_CHANS << 12 |
4910 	      DMA_WRITE_CHANS << 16;
4911 
4912 	val |= (0x2 << 20) | (1 << 11);
4913 
4914 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4915 		val |= (1 << 23);
4916 
4917 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4918 	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4919 	    !(bp->flags & BNX2_FLAG_PCIX))
4920 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4921 
4922 	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4923 
4924 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4925 		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4926 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4927 		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4928 	}
4929 
4930 	if (bp->flags & BNX2_FLAG_PCIX) {
4931 		u16 val16;
4932 
4933 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4934 				     &val16);
4935 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4936 				      val16 & ~PCI_X_CMD_ERO);
4937 	}
4938 
4939 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4940 		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4941 		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4942 		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4943 
4944 	/* Initialize context mapping and zero out the quick contexts.  The
4945 	 * context block must have already been enabled. */
4946 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4947 		rc = bnx2_init_5709_context(bp);
4948 		if (rc)
4949 			return rc;
4950 	} else
4951 		bnx2_init_context(bp);
4952 
4953 	if ((rc = bnx2_init_cpus(bp)) != 0)
4954 		return rc;
4955 
4956 	bnx2_init_nvram(bp);
4957 
4958 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4959 
4960 	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4961 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4962 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4963 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4964 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4965 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4966 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4967 	}
4968 
4969 	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4970 
4971 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4972 	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4973 	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4974 
4975 	val = (BNX2_PAGE_BITS - 8) << 24;
4976 	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4977 
4978 	/* Configure page size. */
4979 	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4980 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4981 	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4982 	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4983 
4984 	val = bp->mac_addr[0] +
4985 	      (bp->mac_addr[1] << 8) +
4986 	      (bp->mac_addr[2] << 16) +
4987 	      bp->mac_addr[3] +
4988 	      (bp->mac_addr[4] << 8) +
4989 	      (bp->mac_addr[5] << 16);
4990 	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4991 
4992 	/* Program the MTU.  Also include 4 bytes for CRC32. */
4993 	mtu = bp->dev->mtu;
4994 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4995 	if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
4996 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4997 	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4998 
4999 	if (mtu < ETH_DATA_LEN)
5000 		mtu = ETH_DATA_LEN;
5001 
5002 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5003 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5004 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5005 
5006 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5007 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5008 		bp->bnx2_napi[i].last_status_idx = 0;
5009 
5010 	bp->idle_chk_status_idx = 0xffff;
5011 
5012 	/* Set up how to generate a link change interrupt. */
5013 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5014 
5015 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5016 		(u64) bp->status_blk_mapping & 0xffffffff);
5017 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5018 
5019 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5020 		(u64) bp->stats_blk_mapping & 0xffffffff);
5021 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5022 		(u64) bp->stats_blk_mapping >> 32);
5023 
5024 	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5025 		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5026 
5027 	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5028 		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5029 
5030 	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5031 		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5032 
5033 	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5034 
5035 	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5036 
5037 	BNX2_WR(bp, BNX2_HC_COM_TICKS,
5038 		(bp->com_ticks_int << 16) | bp->com_ticks);
5039 
5040 	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5041 		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5042 
5043 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5044 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5045 	else
5046 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5047 	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5048 
5049 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5050 		val = BNX2_HC_CONFIG_COLLECT_STATS;
5051 	else {
5052 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5053 		      BNX2_HC_CONFIG_COLLECT_STATS;
5054 	}
5055 
5056 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
5057 		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5058 			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5059 
5060 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5061 	}
5062 
5063 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5064 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5065 
5066 	BNX2_WR(bp, BNX2_HC_CONFIG, val);
5067 
5068 	if (bp->rx_ticks < 25)
5069 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5070 	else
5071 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5072 
5073 	for (i = 1; i < bp->irq_nvecs; i++) {
5074 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5075 			   BNX2_HC_SB_CONFIG_1;
5076 
5077 		BNX2_WR(bp, base,
5078 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5079 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5080 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5081 
5082 		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5083 			(bp->tx_quick_cons_trip_int << 16) |
5084 			 bp->tx_quick_cons_trip);
5085 
5086 		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5087 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5088 
5089 		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5090 			(bp->rx_quick_cons_trip_int << 16) |
5091 			bp->rx_quick_cons_trip);
5092 
5093 		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5094 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5095 	}
5096 
5097 	/* Clear internal stats counters. */
5098 	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5099 
5100 	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5101 
5102 	/* Initialize the receive filter. */
5103 	bnx2_set_rx_mode(bp->dev);
5104 
5105 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5106 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5107 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5108 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5109 	}
5110 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5111 			  1, 0);
5112 
5113 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5114 	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5115 
5116 	udelay(20);
5117 
5118 	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5119 
5120 	return rc;
5121 }
5122 
5123 static void
5124 bnx2_clear_ring_states(struct bnx2 *bp)
5125 {
5126 	struct bnx2_napi *bnapi;
5127 	struct bnx2_tx_ring_info *txr;
5128 	struct bnx2_rx_ring_info *rxr;
5129 	int i;
5130 
5131 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5132 		bnapi = &bp->bnx2_napi[i];
5133 		txr = &bnapi->tx_ring;
5134 		rxr = &bnapi->rx_ring;
5135 
5136 		txr->tx_cons = 0;
5137 		txr->hw_tx_cons = 0;
5138 		rxr->rx_prod_bseq = 0;
5139 		rxr->rx_prod = 0;
5140 		rxr->rx_cons = 0;
5141 		rxr->rx_pg_prod = 0;
5142 		rxr->rx_pg_cons = 0;
5143 	}
5144 }
5145 
5146 static void
5147 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5148 {
5149 	u32 val, offset0, offset1, offset2, offset3;
5150 	u32 cid_addr = GET_CID_ADDR(cid);
5151 
5152 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5153 		offset0 = BNX2_L2CTX_TYPE_XI;
5154 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5155 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5156 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5157 	} else {
5158 		offset0 = BNX2_L2CTX_TYPE;
5159 		offset1 = BNX2_L2CTX_CMD_TYPE;
5160 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5161 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5162 	}
5163 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5164 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5165 
5166 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5167 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5168 
5169 	val = (u64) txr->tx_desc_mapping >> 32;
5170 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5171 
5172 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5173 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5174 }
5175 
5176 static void
5177 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5178 {
5179 	struct bnx2_tx_bd *txbd;
5180 	u32 cid = TX_CID;
5181 	struct bnx2_napi *bnapi;
5182 	struct bnx2_tx_ring_info *txr;
5183 
5184 	bnapi = &bp->bnx2_napi[ring_num];
5185 	txr = &bnapi->tx_ring;
5186 
5187 	if (ring_num == 0)
5188 		cid = TX_CID;
5189 	else
5190 		cid = TX_TSS_CID + ring_num - 1;
5191 
5192 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5193 
5194 	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5195 
5196 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5197 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5198 
5199 	txr->tx_prod = 0;
5200 	txr->tx_prod_bseq = 0;
5201 
5202 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5203 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5204 
5205 	bnx2_init_tx_context(bp, cid, txr);
5206 }
5207 
5208 static void
5209 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5210 		     u32 buf_size, int num_rings)
5211 {
5212 	int i;
5213 	struct bnx2_rx_bd *rxbd;
5214 
5215 	for (i = 0; i < num_rings; i++) {
5216 		int j;
5217 
5218 		rxbd = &rx_ring[i][0];
5219 		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5220 			rxbd->rx_bd_len = buf_size;
5221 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5222 		}
5223 		if (i == (num_rings - 1))
5224 			j = 0;
5225 		else
5226 			j = i + 1;
5227 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5228 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5229 	}
5230 }
5231 
5232 static void
5233 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5234 {
5235 	int i;
5236 	u16 prod, ring_prod;
5237 	u32 cid, rx_cid_addr, val;
5238 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5239 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5240 
5241 	if (ring_num == 0)
5242 		cid = RX_CID;
5243 	else
5244 		cid = RX_RSS_CID + ring_num - 1;
5245 
5246 	rx_cid_addr = GET_CID_ADDR(cid);
5247 
5248 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5249 			     bp->rx_buf_use_size, bp->rx_max_ring);
5250 
5251 	bnx2_init_rx_context(bp, cid);
5252 
5253 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5254 		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5255 		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5256 	}
5257 
5258 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5259 	if (bp->rx_pg_ring_size) {
5260 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5261 				     rxr->rx_pg_desc_mapping,
5262 				     PAGE_SIZE, bp->rx_max_pg_ring);
5263 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5264 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5265 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5266 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5267 
5268 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5269 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5270 
5271 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5272 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5273 
5274 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5275 			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5276 	}
5277 
5278 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5279 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5280 
5281 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5282 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5283 
5284 	ring_prod = prod = rxr->rx_pg_prod;
5285 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5286 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5287 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5288 				    ring_num, i, bp->rx_pg_ring_size);
5289 			break;
5290 		}
5291 		prod = BNX2_NEXT_RX_BD(prod);
5292 		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5293 	}
5294 	rxr->rx_pg_prod = prod;
5295 
5296 	ring_prod = prod = rxr->rx_prod;
5297 	for (i = 0; i < bp->rx_ring_size; i++) {
5298 		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5299 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5300 				    ring_num, i, bp->rx_ring_size);
5301 			break;
5302 		}
5303 		prod = BNX2_NEXT_RX_BD(prod);
5304 		ring_prod = BNX2_RX_RING_IDX(prod);
5305 	}
5306 	rxr->rx_prod = prod;
5307 
5308 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5309 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5310 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5311 
5312 	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5313 	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5314 
5315 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5316 }
5317 
5318 static void
5319 bnx2_init_all_rings(struct bnx2 *bp)
5320 {
5321 	int i;
5322 	u32 val;
5323 
5324 	bnx2_clear_ring_states(bp);
5325 
5326 	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5327 	for (i = 0; i < bp->num_tx_rings; i++)
5328 		bnx2_init_tx_ring(bp, i);
5329 
5330 	if (bp->num_tx_rings > 1)
5331 		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5332 			(TX_TSS_CID << 7));
5333 
5334 	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5335 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5336 
5337 	for (i = 0; i < bp->num_rx_rings; i++)
5338 		bnx2_init_rx_ring(bp, i);
5339 
5340 	if (bp->num_rx_rings > 1) {
5341 		u32 tbl_32 = 0;
5342 
5343 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5344 			int shift = (i % 8) << 2;
5345 
5346 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5347 			if ((i % 8) == 7) {
5348 				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5349 				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5350 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5351 					BNX2_RLUP_RSS_COMMAND_WRITE |
5352 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5353 				tbl_32 = 0;
5354 			}
5355 		}
5356 
5357 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5358 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5359 
5360 		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5361 
5362 	}
5363 }
5364 
5365 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5366 {
5367 	u32 max, num_rings = 1;
5368 
5369 	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5370 		ring_size -= BNX2_MAX_RX_DESC_CNT;
5371 		num_rings++;
5372 	}
5373 	/* round to next power of 2 */
5374 	max = max_size;
5375 	while ((max & num_rings) == 0)
5376 		max >>= 1;
5377 
5378 	if (num_rings != max)
5379 		max <<= 1;
5380 
5381 	return max;
5382 }
5383 
5384 static void
5385 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5386 {
5387 	u32 rx_size, rx_space, jumbo_size;
5388 
5389 	/* 8 for CRC and VLAN */
5390 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5391 
5392 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5393 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5394 
5395 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5396 	bp->rx_pg_ring_size = 0;
5397 	bp->rx_max_pg_ring = 0;
5398 	bp->rx_max_pg_ring_idx = 0;
5399 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5400 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5401 
5402 		jumbo_size = size * pages;
5403 		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5404 			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5405 
5406 		bp->rx_pg_ring_size = jumbo_size;
5407 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5408 							BNX2_MAX_RX_PG_RINGS);
5409 		bp->rx_max_pg_ring_idx =
5410 			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5411 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5412 		bp->rx_copy_thresh = 0;
5413 	}
5414 
5415 	bp->rx_buf_use_size = rx_size;
5416 	/* hw alignment + build_skb() overhead*/
5417 	bp->rx_buf_size = kmalloc_size_roundup(
5418 		SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5419 		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
5420 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5421 	bp->rx_ring_size = size;
5422 	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5423 	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5424 }
5425 
5426 static void
5427 bnx2_free_tx_skbs(struct bnx2 *bp)
5428 {
5429 	int i;
5430 
5431 	for (i = 0; i < bp->num_tx_rings; i++) {
5432 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5433 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5434 		int j;
5435 
5436 		if (!txr->tx_buf_ring)
5437 			continue;
5438 
5439 		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5440 			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5441 			struct sk_buff *skb = tx_buf->skb;
5442 			int k, last;
5443 
5444 			if (!skb) {
5445 				j = BNX2_NEXT_TX_BD(j);
5446 				continue;
5447 			}
5448 
5449 			dma_unmap_single(&bp->pdev->dev,
5450 					 dma_unmap_addr(tx_buf, mapping),
5451 					 skb_headlen(skb),
5452 					 DMA_TO_DEVICE);
5453 
5454 			tx_buf->skb = NULL;
5455 
5456 			last = tx_buf->nr_frags;
5457 			j = BNX2_NEXT_TX_BD(j);
5458 			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5459 				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5460 				dma_unmap_page(&bp->pdev->dev,
5461 					dma_unmap_addr(tx_buf, mapping),
5462 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5463 					DMA_TO_DEVICE);
5464 			}
5465 			dev_kfree_skb(skb);
5466 		}
5467 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5468 	}
5469 }
5470 
5471 static void
5472 bnx2_free_rx_skbs(struct bnx2 *bp)
5473 {
5474 	int i;
5475 
5476 	for (i = 0; i < bp->num_rx_rings; i++) {
5477 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5478 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5479 		int j;
5480 
5481 		if (!rxr->rx_buf_ring)
5482 			return;
5483 
5484 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5485 			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5486 			u8 *data = rx_buf->data;
5487 
5488 			if (!data)
5489 				continue;
5490 
5491 			dma_unmap_single(&bp->pdev->dev,
5492 					 dma_unmap_addr(rx_buf, mapping),
5493 					 bp->rx_buf_use_size,
5494 					 DMA_FROM_DEVICE);
5495 
5496 			rx_buf->data = NULL;
5497 
5498 			kfree(data);
5499 		}
5500 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5501 			bnx2_free_rx_page(bp, rxr, j);
5502 	}
5503 }
5504 
5505 static void
5506 bnx2_free_skbs(struct bnx2 *bp)
5507 {
5508 	bnx2_free_tx_skbs(bp);
5509 	bnx2_free_rx_skbs(bp);
5510 }
5511 
5512 static int
5513 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5514 {
5515 	int rc;
5516 
5517 	rc = bnx2_reset_chip(bp, reset_code);
5518 	bnx2_free_skbs(bp);
5519 	if (rc)
5520 		return rc;
5521 
5522 	if ((rc = bnx2_init_chip(bp)) != 0)
5523 		return rc;
5524 
5525 	bnx2_init_all_rings(bp);
5526 	return 0;
5527 }
5528 
5529 static int
5530 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5531 {
5532 	int rc;
5533 
5534 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5535 		return rc;
5536 
5537 	spin_lock_bh(&bp->phy_lock);
5538 	bnx2_init_phy(bp, reset_phy);
5539 	bnx2_set_link(bp);
5540 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5541 		bnx2_remote_phy_event(bp);
5542 	spin_unlock_bh(&bp->phy_lock);
5543 	return 0;
5544 }
5545 
5546 static int
5547 bnx2_shutdown_chip(struct bnx2 *bp)
5548 {
5549 	u32 reset_code;
5550 
5551 	if (bp->flags & BNX2_FLAG_NO_WOL)
5552 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5553 	else if (bp->wol)
5554 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5555 	else
5556 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5557 
5558 	return bnx2_reset_chip(bp, reset_code);
5559 }
5560 
5561 static int
5562 bnx2_test_registers(struct bnx2 *bp)
5563 {
5564 	int ret;
5565 	int i, is_5709;
5566 	static const struct {
5567 		u16   offset;
5568 		u16   flags;
5569 #define BNX2_FL_NOT_5709	1
5570 		u32   rw_mask;
5571 		u32   ro_mask;
5572 	} reg_tbl[] = {
5573 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5574 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5575 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5576 
5577 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5578 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5579 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5580 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5581 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5582 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5583 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5584 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5585 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5586 
5587 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5588 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5589 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5590 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5591 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5592 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5593 
5594 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5595 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5596 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5597 
5598 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5599 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5600 
5601 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5602 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5603 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5604 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5605 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5606 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5607 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5608 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5609 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5610 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5611 
5612 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5613 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5614 
5615 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5616 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5617 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5618 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5619 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5620 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5621 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5622 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5623 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5624 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5625 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5626 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5627 
5628 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5629 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5630 
5631 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5632 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5633 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5634 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5635 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5636 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5637 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5638 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5639 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5640 
5641 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5642 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5643 
5644 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5645 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5646 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5647 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5648 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5649 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5650 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5651 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5652 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5653 
5654 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5655 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5656 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5657 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5658 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5659 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5660 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5661 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5662 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5663 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5664 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5665 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5666 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5667 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5668 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5669 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5670 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5671 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5672 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5673 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5674 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5675 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5676 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5677 
5678 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5679 	};
5680 
5681 	ret = 0;
5682 	is_5709 = 0;
5683 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5684 		is_5709 = 1;
5685 
5686 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5687 		u32 offset, rw_mask, ro_mask, save_val, val;
5688 		u16 flags = reg_tbl[i].flags;
5689 
5690 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5691 			continue;
5692 
5693 		offset = (u32) reg_tbl[i].offset;
5694 		rw_mask = reg_tbl[i].rw_mask;
5695 		ro_mask = reg_tbl[i].ro_mask;
5696 
5697 		save_val = readl(bp->regview + offset);
5698 
5699 		writel(0, bp->regview + offset);
5700 
5701 		val = readl(bp->regview + offset);
5702 		if ((val & rw_mask) != 0) {
5703 			goto reg_test_err;
5704 		}
5705 
5706 		if ((val & ro_mask) != (save_val & ro_mask)) {
5707 			goto reg_test_err;
5708 		}
5709 
5710 		writel(0xffffffff, bp->regview + offset);
5711 
5712 		val = readl(bp->regview + offset);
5713 		if ((val & rw_mask) != rw_mask) {
5714 			goto reg_test_err;
5715 		}
5716 
5717 		if ((val & ro_mask) != (save_val & ro_mask)) {
5718 			goto reg_test_err;
5719 		}
5720 
5721 		writel(save_val, bp->regview + offset);
5722 		continue;
5723 
5724 reg_test_err:
5725 		writel(save_val, bp->regview + offset);
5726 		ret = -ENODEV;
5727 		break;
5728 	}
5729 	return ret;
5730 }
5731 
5732 static int
5733 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5734 {
5735 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5736 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5737 	int i;
5738 
5739 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5740 		u32 offset;
5741 
5742 		for (offset = 0; offset < size; offset += 4) {
5743 
5744 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5745 
5746 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5747 				test_pattern[i]) {
5748 				return -ENODEV;
5749 			}
5750 		}
5751 	}
5752 	return 0;
5753 }
5754 
5755 static int
5756 bnx2_test_memory(struct bnx2 *bp)
5757 {
5758 	int ret = 0;
5759 	int i;
5760 	static struct mem_entry {
5761 		u32   offset;
5762 		u32   len;
5763 	} mem_tbl_5706[] = {
5764 		{ 0x60000,  0x4000 },
5765 		{ 0xa0000,  0x3000 },
5766 		{ 0xe0000,  0x4000 },
5767 		{ 0x120000, 0x4000 },
5768 		{ 0x1a0000, 0x4000 },
5769 		{ 0x160000, 0x4000 },
5770 		{ 0xffffffff, 0    },
5771 	},
5772 	mem_tbl_5709[] = {
5773 		{ 0x60000,  0x4000 },
5774 		{ 0xa0000,  0x3000 },
5775 		{ 0xe0000,  0x4000 },
5776 		{ 0x120000, 0x4000 },
5777 		{ 0x1a0000, 0x4000 },
5778 		{ 0xffffffff, 0    },
5779 	};
5780 	struct mem_entry *mem_tbl;
5781 
5782 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5783 		mem_tbl = mem_tbl_5709;
5784 	else
5785 		mem_tbl = mem_tbl_5706;
5786 
5787 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5788 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5789 			mem_tbl[i].len)) != 0) {
5790 			return ret;
5791 		}
5792 	}
5793 
5794 	return ret;
5795 }
5796 
5797 #define BNX2_MAC_LOOPBACK	0
5798 #define BNX2_PHY_LOOPBACK	1
5799 
5800 static int
5801 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5802 {
5803 	unsigned int pkt_size, num_pkts, i;
5804 	struct sk_buff *skb;
5805 	u8 *data;
5806 	unsigned char *packet;
5807 	u16 rx_start_idx, rx_idx;
5808 	dma_addr_t map;
5809 	struct bnx2_tx_bd *txbd;
5810 	struct bnx2_sw_bd *rx_buf;
5811 	struct l2_fhdr *rx_hdr;
5812 	int ret = -ENODEV;
5813 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5814 	struct bnx2_tx_ring_info *txr;
5815 	struct bnx2_rx_ring_info *rxr;
5816 
5817 	tx_napi = bnapi;
5818 
5819 	txr = &tx_napi->tx_ring;
5820 	rxr = &bnapi->rx_ring;
5821 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5822 		bp->loopback = MAC_LOOPBACK;
5823 		bnx2_set_mac_loopback(bp);
5824 	}
5825 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5826 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5827 			return 0;
5828 
5829 		bp->loopback = PHY_LOOPBACK;
5830 		bnx2_set_phy_loopback(bp);
5831 	}
5832 	else
5833 		return -EINVAL;
5834 
5835 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5836 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5837 	if (!skb)
5838 		return -ENOMEM;
5839 	packet = skb_put(skb, pkt_size);
5840 	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5841 	memset(packet + ETH_ALEN, 0x0, 8);
5842 	for (i = 14; i < pkt_size; i++)
5843 		packet[i] = (unsigned char) (i & 0xff);
5844 
5845 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5846 			     DMA_TO_DEVICE);
5847 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5848 		dev_kfree_skb(skb);
5849 		return -EIO;
5850 	}
5851 
5852 	BNX2_WR(bp, BNX2_HC_COMMAND,
5853 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5854 
5855 	BNX2_RD(bp, BNX2_HC_COMMAND);
5856 
5857 	udelay(5);
5858 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5859 
5860 	num_pkts = 0;
5861 
5862 	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5863 
5864 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5865 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5866 	txbd->tx_bd_mss_nbytes = pkt_size;
5867 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5868 
5869 	num_pkts++;
5870 	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5871 	txr->tx_prod_bseq += pkt_size;
5872 
5873 	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5874 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5875 
5876 	udelay(100);
5877 
5878 	BNX2_WR(bp, BNX2_HC_COMMAND,
5879 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5880 
5881 	BNX2_RD(bp, BNX2_HC_COMMAND);
5882 
5883 	udelay(5);
5884 
5885 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
5886 	dev_kfree_skb(skb);
5887 
5888 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5889 		goto loopback_test_done;
5890 
5891 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5892 	if (rx_idx != rx_start_idx + num_pkts) {
5893 		goto loopback_test_done;
5894 	}
5895 
5896 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5897 	data = rx_buf->data;
5898 
5899 	rx_hdr = get_l2_fhdr(data);
5900 	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5901 
5902 	dma_sync_single_for_cpu(&bp->pdev->dev,
5903 		dma_unmap_addr(rx_buf, mapping),
5904 		bp->rx_buf_use_size, DMA_FROM_DEVICE);
5905 
5906 	if (rx_hdr->l2_fhdr_status &
5907 		(L2_FHDR_ERRORS_BAD_CRC |
5908 		L2_FHDR_ERRORS_PHY_DECODE |
5909 		L2_FHDR_ERRORS_ALIGNMENT |
5910 		L2_FHDR_ERRORS_TOO_SHORT |
5911 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5912 
5913 		goto loopback_test_done;
5914 	}
5915 
5916 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5917 		goto loopback_test_done;
5918 	}
5919 
5920 	for (i = 14; i < pkt_size; i++) {
5921 		if (*(data + i) != (unsigned char) (i & 0xff)) {
5922 			goto loopback_test_done;
5923 		}
5924 	}
5925 
5926 	ret = 0;
5927 
5928 loopback_test_done:
5929 	bp->loopback = 0;
5930 	return ret;
5931 }
5932 
5933 #define BNX2_MAC_LOOPBACK_FAILED	1
5934 #define BNX2_PHY_LOOPBACK_FAILED	2
5935 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5936 					 BNX2_PHY_LOOPBACK_FAILED)
5937 
5938 static int
5939 bnx2_test_loopback(struct bnx2 *bp)
5940 {
5941 	int rc = 0;
5942 
5943 	if (!netif_running(bp->dev))
5944 		return BNX2_LOOPBACK_FAILED;
5945 
5946 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5947 	spin_lock_bh(&bp->phy_lock);
5948 	bnx2_init_phy(bp, 1);
5949 	spin_unlock_bh(&bp->phy_lock);
5950 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5951 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5952 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5953 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5954 	return rc;
5955 }
5956 
5957 #define NVRAM_SIZE 0x200
5958 #define CRC32_RESIDUAL 0xdebb20e3
5959 
5960 static int
5961 bnx2_test_nvram(struct bnx2 *bp)
5962 {
5963 	__be32 buf[NVRAM_SIZE / 4];
5964 	u8 *data = (u8 *) buf;
5965 	int rc = 0;
5966 	u32 magic, csum;
5967 
5968 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5969 		goto test_nvram_done;
5970 
5971         magic = be32_to_cpu(buf[0]);
5972 	if (magic != 0x669955aa) {
5973 		rc = -ENODEV;
5974 		goto test_nvram_done;
5975 	}
5976 
5977 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5978 		goto test_nvram_done;
5979 
5980 	csum = ether_crc_le(0x100, data);
5981 	if (csum != CRC32_RESIDUAL) {
5982 		rc = -ENODEV;
5983 		goto test_nvram_done;
5984 	}
5985 
5986 	csum = ether_crc_le(0x100, data + 0x100);
5987 	if (csum != CRC32_RESIDUAL) {
5988 		rc = -ENODEV;
5989 	}
5990 
5991 test_nvram_done:
5992 	return rc;
5993 }
5994 
5995 static int
5996 bnx2_test_link(struct bnx2 *bp)
5997 {
5998 	u32 bmsr;
5999 
6000 	if (!netif_running(bp->dev))
6001 		return -ENODEV;
6002 
6003 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6004 		if (bp->link_up)
6005 			return 0;
6006 		return -ENODEV;
6007 	}
6008 	spin_lock_bh(&bp->phy_lock);
6009 	bnx2_enable_bmsr1(bp);
6010 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6011 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6012 	bnx2_disable_bmsr1(bp);
6013 	spin_unlock_bh(&bp->phy_lock);
6014 
6015 	if (bmsr & BMSR_LSTATUS) {
6016 		return 0;
6017 	}
6018 	return -ENODEV;
6019 }
6020 
6021 static int
6022 bnx2_test_intr(struct bnx2 *bp)
6023 {
6024 	int i;
6025 	u16 status_idx;
6026 
6027 	if (!netif_running(bp->dev))
6028 		return -ENODEV;
6029 
6030 	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6031 
6032 	/* This register is not touched during run-time. */
6033 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6034 	BNX2_RD(bp, BNX2_HC_COMMAND);
6035 
6036 	for (i = 0; i < 10; i++) {
6037 		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6038 			status_idx) {
6039 
6040 			break;
6041 		}
6042 
6043 		msleep_interruptible(10);
6044 	}
6045 	if (i < 10)
6046 		return 0;
6047 
6048 	return -ENODEV;
6049 }
6050 
6051 /* Determining link for parallel detection. */
6052 static int
6053 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6054 {
6055 	u32 mode_ctl, an_dbg, exp;
6056 
6057 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6058 		return 0;
6059 
6060 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6061 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6062 
6063 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6064 		return 0;
6065 
6066 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6067 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6068 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6069 
6070 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6071 		return 0;
6072 
6073 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6074 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6075 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6076 
6077 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6078 		return 0;
6079 
6080 	return 1;
6081 }
6082 
6083 static void
6084 bnx2_5706_serdes_timer(struct bnx2 *bp)
6085 {
6086 	int check_link = 1;
6087 
6088 	spin_lock(&bp->phy_lock);
6089 	if (bp->serdes_an_pending) {
6090 		bp->serdes_an_pending--;
6091 		check_link = 0;
6092 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6093 		u32 bmcr;
6094 
6095 		bp->current_interval = BNX2_TIMER_INTERVAL;
6096 
6097 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6098 
6099 		if (bmcr & BMCR_ANENABLE) {
6100 			if (bnx2_5706_serdes_has_link(bp)) {
6101 				bmcr &= ~BMCR_ANENABLE;
6102 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6103 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6104 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6105 			}
6106 		}
6107 	}
6108 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6109 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6110 		u32 phy2;
6111 
6112 		bnx2_write_phy(bp, 0x17, 0x0f01);
6113 		bnx2_read_phy(bp, 0x15, &phy2);
6114 		if (phy2 & 0x20) {
6115 			u32 bmcr;
6116 
6117 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6118 			bmcr |= BMCR_ANENABLE;
6119 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6120 
6121 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6122 		}
6123 	} else
6124 		bp->current_interval = BNX2_TIMER_INTERVAL;
6125 
6126 	if (check_link) {
6127 		u32 val;
6128 
6129 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6130 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6131 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6132 
6133 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6134 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6135 				bnx2_5706s_force_link_dn(bp, 1);
6136 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6137 			} else
6138 				bnx2_set_link(bp);
6139 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6140 			bnx2_set_link(bp);
6141 	}
6142 	spin_unlock(&bp->phy_lock);
6143 }
6144 
6145 static void
6146 bnx2_5708_serdes_timer(struct bnx2 *bp)
6147 {
6148 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6149 		return;
6150 
6151 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6152 		bp->serdes_an_pending = 0;
6153 		return;
6154 	}
6155 
6156 	spin_lock(&bp->phy_lock);
6157 	if (bp->serdes_an_pending)
6158 		bp->serdes_an_pending--;
6159 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6160 		u32 bmcr;
6161 
6162 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6163 		if (bmcr & BMCR_ANENABLE) {
6164 			bnx2_enable_forced_2g5(bp);
6165 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6166 		} else {
6167 			bnx2_disable_forced_2g5(bp);
6168 			bp->serdes_an_pending = 2;
6169 			bp->current_interval = BNX2_TIMER_INTERVAL;
6170 		}
6171 
6172 	} else
6173 		bp->current_interval = BNX2_TIMER_INTERVAL;
6174 
6175 	spin_unlock(&bp->phy_lock);
6176 }
6177 
6178 static void
6179 bnx2_timer(struct timer_list *t)
6180 {
6181 	struct bnx2 *bp = from_timer(bp, t, timer);
6182 
6183 	if (!netif_running(bp->dev))
6184 		return;
6185 
6186 	if (atomic_read(&bp->intr_sem) != 0)
6187 		goto bnx2_restart_timer;
6188 
6189 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6190 	     BNX2_FLAG_USING_MSI)
6191 		bnx2_chk_missed_msi(bp);
6192 
6193 	bnx2_send_heart_beat(bp);
6194 
6195 	bp->stats_blk->stat_FwRxDrop =
6196 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6197 
6198 	/* workaround occasional corrupted counters */
6199 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6200 		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6201 			BNX2_HC_COMMAND_STATS_NOW);
6202 
6203 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6204 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6205 			bnx2_5706_serdes_timer(bp);
6206 		else
6207 			bnx2_5708_serdes_timer(bp);
6208 	}
6209 
6210 bnx2_restart_timer:
6211 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6212 }
6213 
6214 static int
6215 bnx2_request_irq(struct bnx2 *bp)
6216 {
6217 	unsigned long flags;
6218 	struct bnx2_irq *irq;
6219 	int rc = 0, i;
6220 
6221 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6222 		flags = 0;
6223 	else
6224 		flags = IRQF_SHARED;
6225 
6226 	for (i = 0; i < bp->irq_nvecs; i++) {
6227 		irq = &bp->irq_tbl[i];
6228 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6229 				 &bp->bnx2_napi[i]);
6230 		if (rc)
6231 			break;
6232 		irq->requested = 1;
6233 	}
6234 	return rc;
6235 }
6236 
6237 static void
6238 __bnx2_free_irq(struct bnx2 *bp)
6239 {
6240 	struct bnx2_irq *irq;
6241 	int i;
6242 
6243 	for (i = 0; i < bp->irq_nvecs; i++) {
6244 		irq = &bp->irq_tbl[i];
6245 		if (irq->requested)
6246 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6247 		irq->requested = 0;
6248 	}
6249 }
6250 
6251 static void
6252 bnx2_free_irq(struct bnx2 *bp)
6253 {
6254 
6255 	__bnx2_free_irq(bp);
6256 	if (bp->flags & BNX2_FLAG_USING_MSI)
6257 		pci_disable_msi(bp->pdev);
6258 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6259 		pci_disable_msix(bp->pdev);
6260 
6261 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6262 }
6263 
6264 static void
6265 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6266 {
6267 	int i, total_vecs;
6268 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6269 	struct net_device *dev = bp->dev;
6270 	const int len = sizeof(bp->irq_tbl[0].name);
6271 
6272 	bnx2_setup_msix_tbl(bp);
6273 	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6274 	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6275 	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6276 
6277 	/*  Need to flush the previous three writes to ensure MSI-X
6278 	 *  is setup properly */
6279 	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6280 
6281 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6282 		msix_ent[i].entry = i;
6283 		msix_ent[i].vector = 0;
6284 	}
6285 
6286 	total_vecs = msix_vecs;
6287 #ifdef BCM_CNIC
6288 	total_vecs++;
6289 #endif
6290 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6291 					   BNX2_MIN_MSIX_VEC, total_vecs);
6292 	if (total_vecs < 0)
6293 		return;
6294 
6295 	msix_vecs = total_vecs;
6296 #ifdef BCM_CNIC
6297 	msix_vecs--;
6298 #endif
6299 	bp->irq_nvecs = msix_vecs;
6300 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6301 	for (i = 0; i < total_vecs; i++) {
6302 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6303 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6304 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6305 	}
6306 }
6307 
6308 static int
6309 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6310 {
6311 	int cpus = netif_get_num_default_rss_queues();
6312 	int msix_vecs;
6313 
6314 	if (!bp->num_req_rx_rings)
6315 		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6316 	else if (!bp->num_req_tx_rings)
6317 		msix_vecs = max(cpus, bp->num_req_rx_rings);
6318 	else
6319 		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6320 
6321 	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6322 
6323 	bp->irq_tbl[0].handler = bnx2_interrupt;
6324 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6325 	bp->irq_nvecs = 1;
6326 	bp->irq_tbl[0].vector = bp->pdev->irq;
6327 
6328 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6329 		bnx2_enable_msix(bp, msix_vecs);
6330 
6331 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6332 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6333 		if (pci_enable_msi(bp->pdev) == 0) {
6334 			bp->flags |= BNX2_FLAG_USING_MSI;
6335 			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6336 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6337 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6338 			} else
6339 				bp->irq_tbl[0].handler = bnx2_msi;
6340 
6341 			bp->irq_tbl[0].vector = bp->pdev->irq;
6342 		}
6343 	}
6344 
6345 	if (!bp->num_req_tx_rings)
6346 		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6347 	else
6348 		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6349 
6350 	if (!bp->num_req_rx_rings)
6351 		bp->num_rx_rings = bp->irq_nvecs;
6352 	else
6353 		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6354 
6355 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6356 
6357 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6358 }
6359 
6360 /* Called with rtnl_lock */
6361 static int
6362 bnx2_open(struct net_device *dev)
6363 {
6364 	struct bnx2 *bp = netdev_priv(dev);
6365 	int rc;
6366 
6367 	rc = bnx2_request_firmware(bp);
6368 	if (rc < 0)
6369 		goto out;
6370 
6371 	netif_carrier_off(dev);
6372 
6373 	bnx2_disable_int(bp);
6374 
6375 	rc = bnx2_setup_int_mode(bp, disable_msi);
6376 	if (rc)
6377 		goto open_err;
6378 	bnx2_init_napi(bp);
6379 	bnx2_napi_enable(bp);
6380 	rc = bnx2_alloc_mem(bp);
6381 	if (rc)
6382 		goto open_err;
6383 
6384 	rc = bnx2_request_irq(bp);
6385 	if (rc)
6386 		goto open_err;
6387 
6388 	rc = bnx2_init_nic(bp, 1);
6389 	if (rc)
6390 		goto open_err;
6391 
6392 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6393 
6394 	atomic_set(&bp->intr_sem, 0);
6395 
6396 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6397 
6398 	bnx2_enable_int(bp);
6399 
6400 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6401 		/* Test MSI to make sure it is working
6402 		 * If MSI test fails, go back to INTx mode
6403 		 */
6404 		if (bnx2_test_intr(bp) != 0) {
6405 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6406 
6407 			bnx2_disable_int(bp);
6408 			bnx2_free_irq(bp);
6409 
6410 			bnx2_setup_int_mode(bp, 1);
6411 
6412 			rc = bnx2_init_nic(bp, 0);
6413 
6414 			if (!rc)
6415 				rc = bnx2_request_irq(bp);
6416 
6417 			if (rc) {
6418 				del_timer_sync(&bp->timer);
6419 				goto open_err;
6420 			}
6421 			bnx2_enable_int(bp);
6422 		}
6423 	}
6424 	if (bp->flags & BNX2_FLAG_USING_MSI)
6425 		netdev_info(dev, "using MSI\n");
6426 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6427 		netdev_info(dev, "using MSIX\n");
6428 
6429 	netif_tx_start_all_queues(dev);
6430 out:
6431 	return rc;
6432 
6433 open_err:
6434 	bnx2_napi_disable(bp);
6435 	bnx2_free_skbs(bp);
6436 	bnx2_free_irq(bp);
6437 	bnx2_free_mem(bp);
6438 	bnx2_del_napi(bp);
6439 	bnx2_release_firmware(bp);
6440 	goto out;
6441 }
6442 
6443 static void
6444 bnx2_reset_task(struct work_struct *work)
6445 {
6446 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6447 	int rc;
6448 	u16 pcicmd;
6449 
6450 	rtnl_lock();
6451 	if (!netif_running(bp->dev)) {
6452 		rtnl_unlock();
6453 		return;
6454 	}
6455 
6456 	bnx2_netif_stop(bp, true);
6457 
6458 	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6459 	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6460 		/* in case PCI block has reset */
6461 		pci_restore_state(bp->pdev);
6462 		pci_save_state(bp->pdev);
6463 	}
6464 	rc = bnx2_init_nic(bp, 1);
6465 	if (rc) {
6466 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6467 		bnx2_napi_enable(bp);
6468 		dev_close(bp->dev);
6469 		rtnl_unlock();
6470 		return;
6471 	}
6472 
6473 	atomic_set(&bp->intr_sem, 1);
6474 	bnx2_netif_start(bp, true);
6475 	rtnl_unlock();
6476 }
6477 
6478 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6479 
6480 static void
6481 bnx2_dump_ftq(struct bnx2 *bp)
6482 {
6483 	int i;
6484 	u32 reg, bdidx, cid, valid;
6485 	struct net_device *dev = bp->dev;
6486 	static const struct ftq_reg {
6487 		char *name;
6488 		u32 off;
6489 	} ftq_arr[] = {
6490 		BNX2_FTQ_ENTRY(RV2P_P),
6491 		BNX2_FTQ_ENTRY(RV2P_T),
6492 		BNX2_FTQ_ENTRY(RV2P_M),
6493 		BNX2_FTQ_ENTRY(TBDR_),
6494 		BNX2_FTQ_ENTRY(TDMA_),
6495 		BNX2_FTQ_ENTRY(TXP_),
6496 		BNX2_FTQ_ENTRY(TXP_),
6497 		BNX2_FTQ_ENTRY(TPAT_),
6498 		BNX2_FTQ_ENTRY(RXP_C),
6499 		BNX2_FTQ_ENTRY(RXP_),
6500 		BNX2_FTQ_ENTRY(COM_COMXQ_),
6501 		BNX2_FTQ_ENTRY(COM_COMTQ_),
6502 		BNX2_FTQ_ENTRY(COM_COMQ_),
6503 		BNX2_FTQ_ENTRY(CP_CPQ_),
6504 	};
6505 
6506 	netdev_err(dev, "<--- start FTQ dump --->\n");
6507 	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6508 		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6509 			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6510 
6511 	netdev_err(dev, "CPU states:\n");
6512 	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6513 		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6514 			   reg, bnx2_reg_rd_ind(bp, reg),
6515 			   bnx2_reg_rd_ind(bp, reg + 4),
6516 			   bnx2_reg_rd_ind(bp, reg + 8),
6517 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6518 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6519 			   bnx2_reg_rd_ind(bp, reg + 0x20));
6520 
6521 	netdev_err(dev, "<--- end FTQ dump --->\n");
6522 	netdev_err(dev, "<--- start TBDC dump --->\n");
6523 	netdev_err(dev, "TBDC free cnt: %ld\n",
6524 		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6525 	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6526 	for (i = 0; i < 0x20; i++) {
6527 		int j = 0;
6528 
6529 		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6530 		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6531 			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6532 		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6533 		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6534 			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6535 			j++;
6536 
6537 		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6538 		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6539 		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6540 		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6541 			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6542 			   bdidx >> 24, (valid >> 8) & 0x0ff);
6543 	}
6544 	netdev_err(dev, "<--- end TBDC dump --->\n");
6545 }
6546 
6547 static void
6548 bnx2_dump_state(struct bnx2 *bp)
6549 {
6550 	struct net_device *dev = bp->dev;
6551 	u32 val1, val2;
6552 
6553 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6554 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6555 		   atomic_read(&bp->intr_sem), val1);
6556 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6557 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6558 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6559 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6560 		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6561 		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6562 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6563 		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6564 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6565 		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6566 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6567 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6568 			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6569 }
6570 
6571 static void
6572 bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue)
6573 {
6574 	struct bnx2 *bp = netdev_priv(dev);
6575 
6576 	bnx2_dump_ftq(bp);
6577 	bnx2_dump_state(bp);
6578 	bnx2_dump_mcp_state(bp);
6579 
6580 	/* This allows the netif to be shutdown gracefully before resetting */
6581 	schedule_work(&bp->reset_task);
6582 }
6583 
6584 /* Called with netif_tx_lock.
6585  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6586  * netif_wake_queue().
6587  */
6588 static netdev_tx_t
6589 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6590 {
6591 	struct bnx2 *bp = netdev_priv(dev);
6592 	dma_addr_t mapping;
6593 	struct bnx2_tx_bd *txbd;
6594 	struct bnx2_sw_tx_bd *tx_buf;
6595 	u32 len, vlan_tag_flags, last_frag, mss;
6596 	u16 prod, ring_prod;
6597 	int i;
6598 	struct bnx2_napi *bnapi;
6599 	struct bnx2_tx_ring_info *txr;
6600 	struct netdev_queue *txq;
6601 
6602 	/*  Determine which tx ring we will be placed on */
6603 	i = skb_get_queue_mapping(skb);
6604 	bnapi = &bp->bnx2_napi[i];
6605 	txr = &bnapi->tx_ring;
6606 	txq = netdev_get_tx_queue(dev, i);
6607 
6608 	if (unlikely(bnx2_tx_avail(bp, txr) <
6609 	    (skb_shinfo(skb)->nr_frags + 1))) {
6610 		netif_tx_stop_queue(txq);
6611 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6612 
6613 		return NETDEV_TX_BUSY;
6614 	}
6615 	len = skb_headlen(skb);
6616 	prod = txr->tx_prod;
6617 	ring_prod = BNX2_TX_RING_IDX(prod);
6618 
6619 	vlan_tag_flags = 0;
6620 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6621 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6622 	}
6623 
6624 	if (skb_vlan_tag_present(skb)) {
6625 		vlan_tag_flags |=
6626 			(TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6627 	}
6628 
6629 	if ((mss = skb_shinfo(skb)->gso_size)) {
6630 		u32 tcp_opt_len;
6631 		struct iphdr *iph;
6632 
6633 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6634 
6635 		tcp_opt_len = tcp_optlen(skb);
6636 
6637 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6638 			u32 tcp_off = skb_transport_offset(skb) -
6639 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6640 
6641 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6642 					  TX_BD_FLAGS_SW_FLAGS;
6643 			if (likely(tcp_off == 0))
6644 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6645 			else {
6646 				tcp_off >>= 3;
6647 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6648 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6649 						  ((tcp_off & 0x10) <<
6650 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6651 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6652 			}
6653 		} else {
6654 			iph = ip_hdr(skb);
6655 			if (tcp_opt_len || (iph->ihl > 5)) {
6656 				vlan_tag_flags |= ((iph->ihl - 5) +
6657 						   (tcp_opt_len >> 2)) << 8;
6658 			}
6659 		}
6660 	} else
6661 		mss = 0;
6662 
6663 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len,
6664 				 DMA_TO_DEVICE);
6665 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6666 		dev_kfree_skb_any(skb);
6667 		return NETDEV_TX_OK;
6668 	}
6669 
6670 	tx_buf = &txr->tx_buf_ring[ring_prod];
6671 	tx_buf->skb = skb;
6672 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6673 
6674 	txbd = &txr->tx_desc_ring[ring_prod];
6675 
6676 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6677 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6678 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6679 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6680 
6681 	last_frag = skb_shinfo(skb)->nr_frags;
6682 	tx_buf->nr_frags = last_frag;
6683 	tx_buf->is_gso = skb_is_gso(skb);
6684 
6685 	for (i = 0; i < last_frag; i++) {
6686 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6687 
6688 		prod = BNX2_NEXT_TX_BD(prod);
6689 		ring_prod = BNX2_TX_RING_IDX(prod);
6690 		txbd = &txr->tx_desc_ring[ring_prod];
6691 
6692 		len = skb_frag_size(frag);
6693 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6694 					   DMA_TO_DEVICE);
6695 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6696 			goto dma_error;
6697 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6698 				   mapping);
6699 
6700 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6701 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6702 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6703 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6704 
6705 	}
6706 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6707 
6708 	/* Sync BD data before updating TX mailbox */
6709 	wmb();
6710 
6711 	netdev_tx_sent_queue(txq, skb->len);
6712 
6713 	prod = BNX2_NEXT_TX_BD(prod);
6714 	txr->tx_prod_bseq += skb->len;
6715 
6716 	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6717 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6718 
6719 	txr->tx_prod = prod;
6720 
6721 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6722 		netif_tx_stop_queue(txq);
6723 
6724 		/* netif_tx_stop_queue() must be done before checking
6725 		 * tx index in bnx2_tx_avail() below, because in
6726 		 * bnx2_tx_int(), we update tx index before checking for
6727 		 * netif_tx_queue_stopped().
6728 		 */
6729 		smp_mb();
6730 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6731 			netif_tx_wake_queue(txq);
6732 	}
6733 
6734 	return NETDEV_TX_OK;
6735 dma_error:
6736 	/* save value of frag that failed */
6737 	last_frag = i;
6738 
6739 	/* start back at beginning and unmap skb */
6740 	prod = txr->tx_prod;
6741 	ring_prod = BNX2_TX_RING_IDX(prod);
6742 	tx_buf = &txr->tx_buf_ring[ring_prod];
6743 	tx_buf->skb = NULL;
6744 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6745 			 skb_headlen(skb), DMA_TO_DEVICE);
6746 
6747 	/* unmap remaining mapped pages */
6748 	for (i = 0; i < last_frag; i++) {
6749 		prod = BNX2_NEXT_TX_BD(prod);
6750 		ring_prod = BNX2_TX_RING_IDX(prod);
6751 		tx_buf = &txr->tx_buf_ring[ring_prod];
6752 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6753 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6754 			       DMA_TO_DEVICE);
6755 	}
6756 
6757 	dev_kfree_skb_any(skb);
6758 	return NETDEV_TX_OK;
6759 }
6760 
6761 /* Called with rtnl_lock */
6762 static int
6763 bnx2_close(struct net_device *dev)
6764 {
6765 	struct bnx2 *bp = netdev_priv(dev);
6766 
6767 	bnx2_disable_int_sync(bp);
6768 	bnx2_napi_disable(bp);
6769 	netif_tx_disable(dev);
6770 	del_timer_sync(&bp->timer);
6771 	bnx2_shutdown_chip(bp);
6772 	bnx2_free_irq(bp);
6773 	bnx2_free_skbs(bp);
6774 	bnx2_free_mem(bp);
6775 	bnx2_del_napi(bp);
6776 	bp->link_up = 0;
6777 	netif_carrier_off(bp->dev);
6778 	return 0;
6779 }
6780 
6781 static void
6782 bnx2_save_stats(struct bnx2 *bp)
6783 {
6784 	u32 *hw_stats = (u32 *) bp->stats_blk;
6785 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6786 	int i;
6787 
6788 	/* The 1st 10 counters are 64-bit counters */
6789 	for (i = 0; i < 20; i += 2) {
6790 		u32 hi;
6791 		u64 lo;
6792 
6793 		hi = temp_stats[i] + hw_stats[i];
6794 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6795 		if (lo > 0xffffffff)
6796 			hi++;
6797 		temp_stats[i] = hi;
6798 		temp_stats[i + 1] = lo & 0xffffffff;
6799 	}
6800 
6801 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6802 		temp_stats[i] += hw_stats[i];
6803 }
6804 
6805 #define GET_64BIT_NET_STATS64(ctr)		\
6806 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6807 
6808 #define GET_64BIT_NET_STATS(ctr)				\
6809 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6810 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6811 
6812 #define GET_32BIT_NET_STATS(ctr)				\
6813 	(unsigned long) (bp->stats_blk->ctr +			\
6814 			 bp->temp_stats_blk->ctr)
6815 
6816 static void
6817 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6818 {
6819 	struct bnx2 *bp = netdev_priv(dev);
6820 
6821 	if (!bp->stats_blk)
6822 		return;
6823 
6824 	net_stats->rx_packets =
6825 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6826 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6827 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6828 
6829 	net_stats->tx_packets =
6830 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6831 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6832 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6833 
6834 	net_stats->rx_bytes =
6835 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6836 
6837 	net_stats->tx_bytes =
6838 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6839 
6840 	net_stats->multicast =
6841 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6842 
6843 	net_stats->collisions =
6844 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6845 
6846 	net_stats->rx_length_errors =
6847 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6848 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6849 
6850 	net_stats->rx_over_errors =
6851 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6852 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6853 
6854 	net_stats->rx_frame_errors =
6855 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6856 
6857 	net_stats->rx_crc_errors =
6858 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6859 
6860 	net_stats->rx_errors = net_stats->rx_length_errors +
6861 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6862 		net_stats->rx_crc_errors;
6863 
6864 	net_stats->tx_aborted_errors =
6865 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6866 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6867 
6868 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6869 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6870 		net_stats->tx_carrier_errors = 0;
6871 	else {
6872 		net_stats->tx_carrier_errors =
6873 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6874 	}
6875 
6876 	net_stats->tx_errors =
6877 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6878 		net_stats->tx_aborted_errors +
6879 		net_stats->tx_carrier_errors;
6880 
6881 	net_stats->rx_missed_errors =
6882 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6883 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6884 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6885 
6886 }
6887 
6888 /* All ethtool functions called with rtnl_lock */
6889 
6890 static int
6891 bnx2_get_link_ksettings(struct net_device *dev,
6892 			struct ethtool_link_ksettings *cmd)
6893 {
6894 	struct bnx2 *bp = netdev_priv(dev);
6895 	int support_serdes = 0, support_copper = 0;
6896 	u32 supported, advertising;
6897 
6898 	supported = SUPPORTED_Autoneg;
6899 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6900 		support_serdes = 1;
6901 		support_copper = 1;
6902 	} else if (bp->phy_port == PORT_FIBRE)
6903 		support_serdes = 1;
6904 	else
6905 		support_copper = 1;
6906 
6907 	if (support_serdes) {
6908 		supported |= SUPPORTED_1000baseT_Full |
6909 			SUPPORTED_FIBRE;
6910 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6911 			supported |= SUPPORTED_2500baseX_Full;
6912 	}
6913 	if (support_copper) {
6914 		supported |= SUPPORTED_10baseT_Half |
6915 			SUPPORTED_10baseT_Full |
6916 			SUPPORTED_100baseT_Half |
6917 			SUPPORTED_100baseT_Full |
6918 			SUPPORTED_1000baseT_Full |
6919 			SUPPORTED_TP;
6920 	}
6921 
6922 	spin_lock_bh(&bp->phy_lock);
6923 	cmd->base.port = bp->phy_port;
6924 	advertising = bp->advertising;
6925 
6926 	if (bp->autoneg & AUTONEG_SPEED) {
6927 		cmd->base.autoneg = AUTONEG_ENABLE;
6928 	} else {
6929 		cmd->base.autoneg = AUTONEG_DISABLE;
6930 	}
6931 
6932 	if (netif_carrier_ok(dev)) {
6933 		cmd->base.speed = bp->line_speed;
6934 		cmd->base.duplex = bp->duplex;
6935 		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6936 			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6937 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6938 			else
6939 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
6940 		}
6941 	}
6942 	else {
6943 		cmd->base.speed = SPEED_UNKNOWN;
6944 		cmd->base.duplex = DUPLEX_UNKNOWN;
6945 	}
6946 	spin_unlock_bh(&bp->phy_lock);
6947 
6948 	cmd->base.phy_address = bp->phy_addr;
6949 
6950 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6951 						supported);
6952 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6953 						advertising);
6954 
6955 	return 0;
6956 }
6957 
6958 static int
6959 bnx2_set_link_ksettings(struct net_device *dev,
6960 			const struct ethtool_link_ksettings *cmd)
6961 {
6962 	struct bnx2 *bp = netdev_priv(dev);
6963 	u8 autoneg = bp->autoneg;
6964 	u8 req_duplex = bp->req_duplex;
6965 	u16 req_line_speed = bp->req_line_speed;
6966 	u32 advertising = bp->advertising;
6967 	int err = -EINVAL;
6968 
6969 	spin_lock_bh(&bp->phy_lock);
6970 
6971 	if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6972 		goto err_out_unlock;
6973 
6974 	if (cmd->base.port != bp->phy_port &&
6975 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6976 		goto err_out_unlock;
6977 
6978 	/* If device is down, we can store the settings only if the user
6979 	 * is setting the currently active port.
6980 	 */
6981 	if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6982 		goto err_out_unlock;
6983 
6984 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
6985 		autoneg |= AUTONEG_SPEED;
6986 
6987 		ethtool_convert_link_mode_to_legacy_u32(
6988 			&advertising, cmd->link_modes.advertising);
6989 
6990 		if (cmd->base.port == PORT_TP) {
6991 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6992 			if (!advertising)
6993 				advertising = ETHTOOL_ALL_COPPER_SPEED;
6994 		} else {
6995 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6996 			if (!advertising)
6997 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6998 		}
6999 		advertising |= ADVERTISED_Autoneg;
7000 	}
7001 	else {
7002 		u32 speed = cmd->base.speed;
7003 
7004 		if (cmd->base.port == PORT_FIBRE) {
7005 			if ((speed != SPEED_1000 &&
7006 			     speed != SPEED_2500) ||
7007 			    (cmd->base.duplex != DUPLEX_FULL))
7008 				goto err_out_unlock;
7009 
7010 			if (speed == SPEED_2500 &&
7011 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7012 				goto err_out_unlock;
7013 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
7014 			goto err_out_unlock;
7015 
7016 		autoneg &= ~AUTONEG_SPEED;
7017 		req_line_speed = speed;
7018 		req_duplex = cmd->base.duplex;
7019 		advertising = 0;
7020 	}
7021 
7022 	bp->autoneg = autoneg;
7023 	bp->advertising = advertising;
7024 	bp->req_line_speed = req_line_speed;
7025 	bp->req_duplex = req_duplex;
7026 
7027 	err = 0;
7028 	/* If device is down, the new settings will be picked up when it is
7029 	 * brought up.
7030 	 */
7031 	if (netif_running(dev))
7032 		err = bnx2_setup_phy(bp, cmd->base.port);
7033 
7034 err_out_unlock:
7035 	spin_unlock_bh(&bp->phy_lock);
7036 
7037 	return err;
7038 }
7039 
7040 static void
7041 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7042 {
7043 	struct bnx2 *bp = netdev_priv(dev);
7044 
7045 	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7046 	strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7047 	strscpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7048 }
7049 
7050 #define BNX2_REGDUMP_LEN		(32 * 1024)
7051 
7052 static int
7053 bnx2_get_regs_len(struct net_device *dev)
7054 {
7055 	return BNX2_REGDUMP_LEN;
7056 }
7057 
7058 static void
7059 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7060 {
7061 	u32 *p = _p, i, offset;
7062 	u8 *orig_p = _p;
7063 	struct bnx2 *bp = netdev_priv(dev);
7064 	static const u32 reg_boundaries[] = {
7065 		0x0000, 0x0098, 0x0400, 0x045c,
7066 		0x0800, 0x0880, 0x0c00, 0x0c10,
7067 		0x0c30, 0x0d08, 0x1000, 0x101c,
7068 		0x1040, 0x1048, 0x1080, 0x10a4,
7069 		0x1400, 0x1490, 0x1498, 0x14f0,
7070 		0x1500, 0x155c, 0x1580, 0x15dc,
7071 		0x1600, 0x1658, 0x1680, 0x16d8,
7072 		0x1800, 0x1820, 0x1840, 0x1854,
7073 		0x1880, 0x1894, 0x1900, 0x1984,
7074 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7075 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
7076 		0x2000, 0x2030, 0x23c0, 0x2400,
7077 		0x2800, 0x2820, 0x2830, 0x2850,
7078 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7079 		0x3c00, 0x3c94, 0x4000, 0x4010,
7080 		0x4080, 0x4090, 0x43c0, 0x4458,
7081 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7082 		0x4fc0, 0x5010, 0x53c0, 0x5444,
7083 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7084 		0x5fc0, 0x6000, 0x6400, 0x6428,
7085 		0x6800, 0x6848, 0x684c, 0x6860,
7086 		0x6888, 0x6910, 0x8000
7087 	};
7088 
7089 	regs->version = 0;
7090 
7091 	memset(p, 0, BNX2_REGDUMP_LEN);
7092 
7093 	if (!netif_running(bp->dev))
7094 		return;
7095 
7096 	i = 0;
7097 	offset = reg_boundaries[0];
7098 	p += offset;
7099 	while (offset < BNX2_REGDUMP_LEN) {
7100 		*p++ = BNX2_RD(bp, offset);
7101 		offset += 4;
7102 		if (offset == reg_boundaries[i + 1]) {
7103 			offset = reg_boundaries[i + 2];
7104 			p = (u32 *) (orig_p + offset);
7105 			i += 2;
7106 		}
7107 	}
7108 }
7109 
7110 static void
7111 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7112 {
7113 	struct bnx2 *bp = netdev_priv(dev);
7114 
7115 	if (bp->flags & BNX2_FLAG_NO_WOL) {
7116 		wol->supported = 0;
7117 		wol->wolopts = 0;
7118 	}
7119 	else {
7120 		wol->supported = WAKE_MAGIC;
7121 		if (bp->wol)
7122 			wol->wolopts = WAKE_MAGIC;
7123 		else
7124 			wol->wolopts = 0;
7125 	}
7126 	memset(&wol->sopass, 0, sizeof(wol->sopass));
7127 }
7128 
7129 static int
7130 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7131 {
7132 	struct bnx2 *bp = netdev_priv(dev);
7133 
7134 	if (wol->wolopts & ~WAKE_MAGIC)
7135 		return -EINVAL;
7136 
7137 	if (wol->wolopts & WAKE_MAGIC) {
7138 		if (bp->flags & BNX2_FLAG_NO_WOL)
7139 			return -EINVAL;
7140 
7141 		bp->wol = 1;
7142 	}
7143 	else {
7144 		bp->wol = 0;
7145 	}
7146 
7147 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7148 
7149 	return 0;
7150 }
7151 
7152 static int
7153 bnx2_nway_reset(struct net_device *dev)
7154 {
7155 	struct bnx2 *bp = netdev_priv(dev);
7156 	u32 bmcr;
7157 
7158 	if (!netif_running(dev))
7159 		return -EAGAIN;
7160 
7161 	if (!(bp->autoneg & AUTONEG_SPEED)) {
7162 		return -EINVAL;
7163 	}
7164 
7165 	spin_lock_bh(&bp->phy_lock);
7166 
7167 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7168 		int rc;
7169 
7170 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7171 		spin_unlock_bh(&bp->phy_lock);
7172 		return rc;
7173 	}
7174 
7175 	/* Force a link down visible on the other side */
7176 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7177 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7178 		spin_unlock_bh(&bp->phy_lock);
7179 
7180 		msleep(20);
7181 
7182 		spin_lock_bh(&bp->phy_lock);
7183 
7184 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7185 		bp->serdes_an_pending = 1;
7186 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7187 	}
7188 
7189 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7190 	bmcr &= ~BMCR_LOOPBACK;
7191 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7192 
7193 	spin_unlock_bh(&bp->phy_lock);
7194 
7195 	return 0;
7196 }
7197 
7198 static u32
7199 bnx2_get_link(struct net_device *dev)
7200 {
7201 	struct bnx2 *bp = netdev_priv(dev);
7202 
7203 	return bp->link_up;
7204 }
7205 
7206 static int
7207 bnx2_get_eeprom_len(struct net_device *dev)
7208 {
7209 	struct bnx2 *bp = netdev_priv(dev);
7210 
7211 	if (!bp->flash_info)
7212 		return 0;
7213 
7214 	return (int) bp->flash_size;
7215 }
7216 
7217 static int
7218 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7219 		u8 *eebuf)
7220 {
7221 	struct bnx2 *bp = netdev_priv(dev);
7222 	int rc;
7223 
7224 	/* parameters already validated in ethtool_get_eeprom */
7225 
7226 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7227 
7228 	return rc;
7229 }
7230 
7231 static int
7232 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7233 		u8 *eebuf)
7234 {
7235 	struct bnx2 *bp = netdev_priv(dev);
7236 	int rc;
7237 
7238 	/* parameters already validated in ethtool_set_eeprom */
7239 
7240 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7241 
7242 	return rc;
7243 }
7244 
7245 static int bnx2_get_coalesce(struct net_device *dev,
7246 			     struct ethtool_coalesce *coal,
7247 			     struct kernel_ethtool_coalesce *kernel_coal,
7248 			     struct netlink_ext_ack *extack)
7249 {
7250 	struct bnx2 *bp = netdev_priv(dev);
7251 
7252 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7253 
7254 	coal->rx_coalesce_usecs = bp->rx_ticks;
7255 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7256 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7257 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7258 
7259 	coal->tx_coalesce_usecs = bp->tx_ticks;
7260 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7261 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7262 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7263 
7264 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7265 
7266 	return 0;
7267 }
7268 
7269 static int bnx2_set_coalesce(struct net_device *dev,
7270 			     struct ethtool_coalesce *coal,
7271 			     struct kernel_ethtool_coalesce *kernel_coal,
7272 			     struct netlink_ext_ack *extack)
7273 {
7274 	struct bnx2 *bp = netdev_priv(dev);
7275 
7276 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7277 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7278 
7279 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7280 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7281 
7282 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7283 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7284 
7285 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7286 	if (bp->rx_quick_cons_trip_int > 0xff)
7287 		bp->rx_quick_cons_trip_int = 0xff;
7288 
7289 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7290 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7291 
7292 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7293 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7294 
7295 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7296 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7297 
7298 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7299 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7300 		0xff;
7301 
7302 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7303 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7304 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7305 			bp->stats_ticks = USEC_PER_SEC;
7306 	}
7307 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7308 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7309 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7310 
7311 	if (netif_running(bp->dev)) {
7312 		bnx2_netif_stop(bp, true);
7313 		bnx2_init_nic(bp, 0);
7314 		bnx2_netif_start(bp, true);
7315 	}
7316 
7317 	return 0;
7318 }
7319 
7320 static void
7321 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
7322 		   struct kernel_ethtool_ringparam *kernel_ering,
7323 		   struct netlink_ext_ack *extack)
7324 {
7325 	struct bnx2 *bp = netdev_priv(dev);
7326 
7327 	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7328 	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7329 
7330 	ering->rx_pending = bp->rx_ring_size;
7331 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7332 
7333 	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7334 	ering->tx_pending = bp->tx_ring_size;
7335 }
7336 
7337 static int
7338 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7339 {
7340 	if (netif_running(bp->dev)) {
7341 		/* Reset will erase chipset stats; save them */
7342 		bnx2_save_stats(bp);
7343 
7344 		bnx2_netif_stop(bp, true);
7345 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7346 		if (reset_irq) {
7347 			bnx2_free_irq(bp);
7348 			bnx2_del_napi(bp);
7349 		} else {
7350 			__bnx2_free_irq(bp);
7351 		}
7352 		bnx2_free_skbs(bp);
7353 		bnx2_free_mem(bp);
7354 	}
7355 
7356 	bnx2_set_rx_ring_size(bp, rx);
7357 	bp->tx_ring_size = tx;
7358 
7359 	if (netif_running(bp->dev)) {
7360 		int rc = 0;
7361 
7362 		if (reset_irq) {
7363 			rc = bnx2_setup_int_mode(bp, disable_msi);
7364 			bnx2_init_napi(bp);
7365 		}
7366 
7367 		if (!rc)
7368 			rc = bnx2_alloc_mem(bp);
7369 
7370 		if (!rc)
7371 			rc = bnx2_request_irq(bp);
7372 
7373 		if (!rc)
7374 			rc = bnx2_init_nic(bp, 0);
7375 
7376 		if (rc) {
7377 			bnx2_napi_enable(bp);
7378 			dev_close(bp->dev);
7379 			return rc;
7380 		}
7381 #ifdef BCM_CNIC
7382 		mutex_lock(&bp->cnic_lock);
7383 		/* Let cnic know about the new status block. */
7384 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7385 			bnx2_setup_cnic_irq_info(bp);
7386 		mutex_unlock(&bp->cnic_lock);
7387 #endif
7388 		bnx2_netif_start(bp, true);
7389 	}
7390 	return 0;
7391 }
7392 
7393 static int
7394 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
7395 		   struct kernel_ethtool_ringparam *kernel_ering,
7396 		   struct netlink_ext_ack *extack)
7397 {
7398 	struct bnx2 *bp = netdev_priv(dev);
7399 	int rc;
7400 
7401 	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7402 		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7403 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7404 
7405 		return -EINVAL;
7406 	}
7407 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7408 				   false);
7409 	return rc;
7410 }
7411 
7412 static void
7413 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7414 {
7415 	struct bnx2 *bp = netdev_priv(dev);
7416 
7417 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7418 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7419 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7420 }
7421 
7422 static int
7423 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7424 {
7425 	struct bnx2 *bp = netdev_priv(dev);
7426 
7427 	bp->req_flow_ctrl = 0;
7428 	if (epause->rx_pause)
7429 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7430 	if (epause->tx_pause)
7431 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7432 
7433 	if (epause->autoneg) {
7434 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7435 	}
7436 	else {
7437 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7438 	}
7439 
7440 	if (netif_running(dev)) {
7441 		spin_lock_bh(&bp->phy_lock);
7442 		bnx2_setup_phy(bp, bp->phy_port);
7443 		spin_unlock_bh(&bp->phy_lock);
7444 	}
7445 
7446 	return 0;
7447 }
7448 
7449 static struct {
7450 	char string[ETH_GSTRING_LEN];
7451 } bnx2_stats_str_arr[] = {
7452 	{ "rx_bytes" },
7453 	{ "rx_error_bytes" },
7454 	{ "tx_bytes" },
7455 	{ "tx_error_bytes" },
7456 	{ "rx_ucast_packets" },
7457 	{ "rx_mcast_packets" },
7458 	{ "rx_bcast_packets" },
7459 	{ "tx_ucast_packets" },
7460 	{ "tx_mcast_packets" },
7461 	{ "tx_bcast_packets" },
7462 	{ "tx_mac_errors" },
7463 	{ "tx_carrier_errors" },
7464 	{ "rx_crc_errors" },
7465 	{ "rx_align_errors" },
7466 	{ "tx_single_collisions" },
7467 	{ "tx_multi_collisions" },
7468 	{ "tx_deferred" },
7469 	{ "tx_excess_collisions" },
7470 	{ "tx_late_collisions" },
7471 	{ "tx_total_collisions" },
7472 	{ "rx_fragments" },
7473 	{ "rx_jabbers" },
7474 	{ "rx_undersize_packets" },
7475 	{ "rx_oversize_packets" },
7476 	{ "rx_64_byte_packets" },
7477 	{ "rx_65_to_127_byte_packets" },
7478 	{ "rx_128_to_255_byte_packets" },
7479 	{ "rx_256_to_511_byte_packets" },
7480 	{ "rx_512_to_1023_byte_packets" },
7481 	{ "rx_1024_to_1522_byte_packets" },
7482 	{ "rx_1523_to_9022_byte_packets" },
7483 	{ "tx_64_byte_packets" },
7484 	{ "tx_65_to_127_byte_packets" },
7485 	{ "tx_128_to_255_byte_packets" },
7486 	{ "tx_256_to_511_byte_packets" },
7487 	{ "tx_512_to_1023_byte_packets" },
7488 	{ "tx_1024_to_1522_byte_packets" },
7489 	{ "tx_1523_to_9022_byte_packets" },
7490 	{ "rx_xon_frames" },
7491 	{ "rx_xoff_frames" },
7492 	{ "tx_xon_frames" },
7493 	{ "tx_xoff_frames" },
7494 	{ "rx_mac_ctrl_frames" },
7495 	{ "rx_filtered_packets" },
7496 	{ "rx_ftq_discards" },
7497 	{ "rx_discards" },
7498 	{ "rx_fw_discards" },
7499 };
7500 
7501 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7502 
7503 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7504 
7505 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7506     STATS_OFFSET32(stat_IfHCInOctets_hi),
7507     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7508     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7509     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7510     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7511     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7512     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7513     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7514     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7515     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7516     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7517     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7518     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7519     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7520     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7521     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7522     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7523     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7524     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7525     STATS_OFFSET32(stat_EtherStatsCollisions),
7526     STATS_OFFSET32(stat_EtherStatsFragments),
7527     STATS_OFFSET32(stat_EtherStatsJabbers),
7528     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7529     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7530     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7531     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7532     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7533     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7534     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7535     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7536     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7537     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7538     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7539     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7540     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7541     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7542     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7543     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7544     STATS_OFFSET32(stat_XonPauseFramesReceived),
7545     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7546     STATS_OFFSET32(stat_OutXonSent),
7547     STATS_OFFSET32(stat_OutXoffSent),
7548     STATS_OFFSET32(stat_MacControlFramesReceived),
7549     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7550     STATS_OFFSET32(stat_IfInFTQDiscards),
7551     STATS_OFFSET32(stat_IfInMBUFDiscards),
7552     STATS_OFFSET32(stat_FwRxDrop),
7553 };
7554 
7555 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7556  * skipped because of errata.
7557  */
7558 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7559 	8,0,8,8,8,8,8,8,8,8,
7560 	4,0,4,4,4,4,4,4,4,4,
7561 	4,4,4,4,4,4,4,4,4,4,
7562 	4,4,4,4,4,4,4,4,4,4,
7563 	4,4,4,4,4,4,4,
7564 };
7565 
7566 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7567 	8,0,8,8,8,8,8,8,8,8,
7568 	4,4,4,4,4,4,4,4,4,4,
7569 	4,4,4,4,4,4,4,4,4,4,
7570 	4,4,4,4,4,4,4,4,4,4,
7571 	4,4,4,4,4,4,4,
7572 };
7573 
7574 #define BNX2_NUM_TESTS 6
7575 
7576 static struct {
7577 	char string[ETH_GSTRING_LEN];
7578 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7579 	{ "register_test (offline)" },
7580 	{ "memory_test (offline)" },
7581 	{ "loopback_test (offline)" },
7582 	{ "nvram_test (online)" },
7583 	{ "interrupt_test (online)" },
7584 	{ "link_test (online)" },
7585 };
7586 
7587 static int
7588 bnx2_get_sset_count(struct net_device *dev, int sset)
7589 {
7590 	switch (sset) {
7591 	case ETH_SS_TEST:
7592 		return BNX2_NUM_TESTS;
7593 	case ETH_SS_STATS:
7594 		return BNX2_NUM_STATS;
7595 	default:
7596 		return -EOPNOTSUPP;
7597 	}
7598 }
7599 
7600 static void
7601 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7602 {
7603 	struct bnx2 *bp = netdev_priv(dev);
7604 
7605 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7606 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7607 		int i;
7608 
7609 		bnx2_netif_stop(bp, true);
7610 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7611 		bnx2_free_skbs(bp);
7612 
7613 		if (bnx2_test_registers(bp) != 0) {
7614 			buf[0] = 1;
7615 			etest->flags |= ETH_TEST_FL_FAILED;
7616 		}
7617 		if (bnx2_test_memory(bp) != 0) {
7618 			buf[1] = 1;
7619 			etest->flags |= ETH_TEST_FL_FAILED;
7620 		}
7621 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7622 			etest->flags |= ETH_TEST_FL_FAILED;
7623 
7624 		if (!netif_running(bp->dev))
7625 			bnx2_shutdown_chip(bp);
7626 		else {
7627 			bnx2_init_nic(bp, 1);
7628 			bnx2_netif_start(bp, true);
7629 		}
7630 
7631 		/* wait for link up */
7632 		for (i = 0; i < 7; i++) {
7633 			if (bp->link_up)
7634 				break;
7635 			msleep_interruptible(1000);
7636 		}
7637 	}
7638 
7639 	if (bnx2_test_nvram(bp) != 0) {
7640 		buf[3] = 1;
7641 		etest->flags |= ETH_TEST_FL_FAILED;
7642 	}
7643 	if (bnx2_test_intr(bp) != 0) {
7644 		buf[4] = 1;
7645 		etest->flags |= ETH_TEST_FL_FAILED;
7646 	}
7647 
7648 	if (bnx2_test_link(bp) != 0) {
7649 		buf[5] = 1;
7650 		etest->flags |= ETH_TEST_FL_FAILED;
7651 
7652 	}
7653 }
7654 
7655 static void
7656 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7657 {
7658 	switch (stringset) {
7659 	case ETH_SS_STATS:
7660 		memcpy(buf, bnx2_stats_str_arr,
7661 			sizeof(bnx2_stats_str_arr));
7662 		break;
7663 	case ETH_SS_TEST:
7664 		memcpy(buf, bnx2_tests_str_arr,
7665 			sizeof(bnx2_tests_str_arr));
7666 		break;
7667 	}
7668 }
7669 
7670 static void
7671 bnx2_get_ethtool_stats(struct net_device *dev,
7672 		struct ethtool_stats *stats, u64 *buf)
7673 {
7674 	struct bnx2 *bp = netdev_priv(dev);
7675 	int i;
7676 	u32 *hw_stats = (u32 *) bp->stats_blk;
7677 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7678 	u8 *stats_len_arr = NULL;
7679 
7680 	if (!hw_stats) {
7681 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7682 		return;
7683 	}
7684 
7685 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7686 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7687 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7688 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7689 		stats_len_arr = bnx2_5706_stats_len_arr;
7690 	else
7691 		stats_len_arr = bnx2_5708_stats_len_arr;
7692 
7693 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7694 		unsigned long offset;
7695 
7696 		if (stats_len_arr[i] == 0) {
7697 			/* skip this counter */
7698 			buf[i] = 0;
7699 			continue;
7700 		}
7701 
7702 		offset = bnx2_stats_offset_arr[i];
7703 		if (stats_len_arr[i] == 4) {
7704 			/* 4-byte counter */
7705 			buf[i] = (u64) *(hw_stats + offset) +
7706 				 *(temp_stats + offset);
7707 			continue;
7708 		}
7709 		/* 8-byte counter */
7710 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7711 			 *(hw_stats + offset + 1) +
7712 			 (((u64) *(temp_stats + offset)) << 32) +
7713 			 *(temp_stats + offset + 1);
7714 	}
7715 }
7716 
7717 static int
7718 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7719 {
7720 	struct bnx2 *bp = netdev_priv(dev);
7721 
7722 	switch (state) {
7723 	case ETHTOOL_ID_ACTIVE:
7724 		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7725 		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7726 		return 1;	/* cycle on/off once per second */
7727 
7728 	case ETHTOOL_ID_ON:
7729 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7730 			BNX2_EMAC_LED_1000MB_OVERRIDE |
7731 			BNX2_EMAC_LED_100MB_OVERRIDE |
7732 			BNX2_EMAC_LED_10MB_OVERRIDE |
7733 			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7734 			BNX2_EMAC_LED_TRAFFIC);
7735 		break;
7736 
7737 	case ETHTOOL_ID_OFF:
7738 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7739 		break;
7740 
7741 	case ETHTOOL_ID_INACTIVE:
7742 		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7743 		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7744 		break;
7745 	}
7746 
7747 	return 0;
7748 }
7749 
7750 static int
7751 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7752 {
7753 	struct bnx2 *bp = netdev_priv(dev);
7754 
7755 	/* TSO with VLAN tag won't work with current firmware */
7756 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7757 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7758 	else
7759 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7760 
7761 	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7762 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7763 	    netif_running(dev)) {
7764 		bnx2_netif_stop(bp, false);
7765 		dev->features = features;
7766 		bnx2_set_rx_mode(dev);
7767 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7768 		bnx2_netif_start(bp, false);
7769 		return 1;
7770 	}
7771 
7772 	return 0;
7773 }
7774 
7775 static void bnx2_get_channels(struct net_device *dev,
7776 			      struct ethtool_channels *channels)
7777 {
7778 	struct bnx2 *bp = netdev_priv(dev);
7779 	u32 max_rx_rings = 1;
7780 	u32 max_tx_rings = 1;
7781 
7782 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7783 		max_rx_rings = RX_MAX_RINGS;
7784 		max_tx_rings = TX_MAX_RINGS;
7785 	}
7786 
7787 	channels->max_rx = max_rx_rings;
7788 	channels->max_tx = max_tx_rings;
7789 	channels->max_other = 0;
7790 	channels->max_combined = 0;
7791 	channels->rx_count = bp->num_rx_rings;
7792 	channels->tx_count = bp->num_tx_rings;
7793 	channels->other_count = 0;
7794 	channels->combined_count = 0;
7795 }
7796 
7797 static int bnx2_set_channels(struct net_device *dev,
7798 			      struct ethtool_channels *channels)
7799 {
7800 	struct bnx2 *bp = netdev_priv(dev);
7801 	u32 max_rx_rings = 1;
7802 	u32 max_tx_rings = 1;
7803 	int rc = 0;
7804 
7805 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7806 		max_rx_rings = RX_MAX_RINGS;
7807 		max_tx_rings = TX_MAX_RINGS;
7808 	}
7809 	if (channels->rx_count > max_rx_rings ||
7810 	    channels->tx_count > max_tx_rings)
7811 		return -EINVAL;
7812 
7813 	bp->num_req_rx_rings = channels->rx_count;
7814 	bp->num_req_tx_rings = channels->tx_count;
7815 
7816 	if (netif_running(dev))
7817 		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7818 					   bp->tx_ring_size, true);
7819 
7820 	return rc;
7821 }
7822 
7823 static const struct ethtool_ops bnx2_ethtool_ops = {
7824 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
7825 				     ETHTOOL_COALESCE_MAX_FRAMES |
7826 				     ETHTOOL_COALESCE_USECS_IRQ |
7827 				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
7828 				     ETHTOOL_COALESCE_STATS_BLOCK_USECS,
7829 	.get_drvinfo		= bnx2_get_drvinfo,
7830 	.get_regs_len		= bnx2_get_regs_len,
7831 	.get_regs		= bnx2_get_regs,
7832 	.get_wol		= bnx2_get_wol,
7833 	.set_wol		= bnx2_set_wol,
7834 	.nway_reset		= bnx2_nway_reset,
7835 	.get_link		= bnx2_get_link,
7836 	.get_eeprom_len		= bnx2_get_eeprom_len,
7837 	.get_eeprom		= bnx2_get_eeprom,
7838 	.set_eeprom		= bnx2_set_eeprom,
7839 	.get_coalesce		= bnx2_get_coalesce,
7840 	.set_coalesce		= bnx2_set_coalesce,
7841 	.get_ringparam		= bnx2_get_ringparam,
7842 	.set_ringparam		= bnx2_set_ringparam,
7843 	.get_pauseparam		= bnx2_get_pauseparam,
7844 	.set_pauseparam		= bnx2_set_pauseparam,
7845 	.self_test		= bnx2_self_test,
7846 	.get_strings		= bnx2_get_strings,
7847 	.set_phys_id		= bnx2_set_phys_id,
7848 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7849 	.get_sset_count		= bnx2_get_sset_count,
7850 	.get_channels		= bnx2_get_channels,
7851 	.set_channels		= bnx2_set_channels,
7852 	.get_link_ksettings	= bnx2_get_link_ksettings,
7853 	.set_link_ksettings	= bnx2_set_link_ksettings,
7854 };
7855 
7856 /* Called with rtnl_lock */
7857 static int
7858 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7859 {
7860 	struct mii_ioctl_data *data = if_mii(ifr);
7861 	struct bnx2 *bp = netdev_priv(dev);
7862 	int err;
7863 
7864 	switch(cmd) {
7865 	case SIOCGMIIPHY:
7866 		data->phy_id = bp->phy_addr;
7867 
7868 		fallthrough;
7869 	case SIOCGMIIREG: {
7870 		u32 mii_regval;
7871 
7872 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7873 			return -EOPNOTSUPP;
7874 
7875 		if (!netif_running(dev))
7876 			return -EAGAIN;
7877 
7878 		spin_lock_bh(&bp->phy_lock);
7879 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7880 		spin_unlock_bh(&bp->phy_lock);
7881 
7882 		data->val_out = mii_regval;
7883 
7884 		return err;
7885 	}
7886 
7887 	case SIOCSMIIREG:
7888 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7889 			return -EOPNOTSUPP;
7890 
7891 		if (!netif_running(dev))
7892 			return -EAGAIN;
7893 
7894 		spin_lock_bh(&bp->phy_lock);
7895 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7896 		spin_unlock_bh(&bp->phy_lock);
7897 
7898 		return err;
7899 
7900 	default:
7901 		/* do nothing */
7902 		break;
7903 	}
7904 	return -EOPNOTSUPP;
7905 }
7906 
7907 /* Called with rtnl_lock */
7908 static int
7909 bnx2_change_mac_addr(struct net_device *dev, void *p)
7910 {
7911 	struct sockaddr *addr = p;
7912 	struct bnx2 *bp = netdev_priv(dev);
7913 
7914 	if (!is_valid_ether_addr(addr->sa_data))
7915 		return -EADDRNOTAVAIL;
7916 
7917 	eth_hw_addr_set(dev, addr->sa_data);
7918 	if (netif_running(dev))
7919 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7920 
7921 	return 0;
7922 }
7923 
7924 /* Called with rtnl_lock */
7925 static int
7926 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7927 {
7928 	struct bnx2 *bp = netdev_priv(dev);
7929 
7930 	dev->mtu = new_mtu;
7931 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7932 				     false);
7933 }
7934 
7935 #ifdef CONFIG_NET_POLL_CONTROLLER
7936 static void
7937 poll_bnx2(struct net_device *dev)
7938 {
7939 	struct bnx2 *bp = netdev_priv(dev);
7940 	int i;
7941 
7942 	for (i = 0; i < bp->irq_nvecs; i++) {
7943 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7944 
7945 		disable_irq(irq->vector);
7946 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7947 		enable_irq(irq->vector);
7948 	}
7949 }
7950 #endif
7951 
7952 static void
7953 bnx2_get_5709_media(struct bnx2 *bp)
7954 {
7955 	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7956 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7957 	u32 strap;
7958 
7959 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7960 		return;
7961 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7962 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7963 		return;
7964 	}
7965 
7966 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7967 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7968 	else
7969 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7970 
7971 	if (bp->func == 0) {
7972 		switch (strap) {
7973 		case 0x4:
7974 		case 0x5:
7975 		case 0x6:
7976 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7977 			return;
7978 		}
7979 	} else {
7980 		switch (strap) {
7981 		case 0x1:
7982 		case 0x2:
7983 		case 0x4:
7984 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7985 			return;
7986 		}
7987 	}
7988 }
7989 
7990 static void
7991 bnx2_get_pci_speed(struct bnx2 *bp)
7992 {
7993 	u32 reg;
7994 
7995 	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7996 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7997 		u32 clkreg;
7998 
7999 		bp->flags |= BNX2_FLAG_PCIX;
8000 
8001 		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
8002 
8003 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
8004 		switch (clkreg) {
8005 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
8006 			bp->bus_speed_mhz = 133;
8007 			break;
8008 
8009 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
8010 			bp->bus_speed_mhz = 100;
8011 			break;
8012 
8013 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
8014 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8015 			bp->bus_speed_mhz = 66;
8016 			break;
8017 
8018 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8019 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8020 			bp->bus_speed_mhz = 50;
8021 			break;
8022 
8023 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8024 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8025 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8026 			bp->bus_speed_mhz = 33;
8027 			break;
8028 		}
8029 	}
8030 	else {
8031 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8032 			bp->bus_speed_mhz = 66;
8033 		else
8034 			bp->bus_speed_mhz = 33;
8035 	}
8036 
8037 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8038 		bp->flags |= BNX2_FLAG_PCI_32BIT;
8039 
8040 }
8041 
8042 static void
8043 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8044 {
8045 	unsigned int len;
8046 	int rc, i, j;
8047 	u8 *data;
8048 
8049 #define BNX2_VPD_NVRAM_OFFSET	0x300
8050 #define BNX2_VPD_LEN		128
8051 #define BNX2_MAX_VER_SLEN	30
8052 
8053 	data = kmalloc(BNX2_VPD_LEN, GFP_KERNEL);
8054 	if (!data)
8055 		return;
8056 
8057 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data, BNX2_VPD_LEN);
8058 	if (rc)
8059 		goto vpd_done;
8060 
8061 	for (i = 0; i < BNX2_VPD_LEN; i += 4)
8062 		swab32s((u32 *)&data[i]);
8063 
8064 	j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
8065 					 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
8066 	if (j < 0)
8067 		goto vpd_done;
8068 
8069 	if (len != 4 || memcmp(&data[j], "1028", 4))
8070 		goto vpd_done;
8071 
8072 	j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
8073 					 PCI_VPD_RO_KEYWORD_VENDOR0,
8074 					 &len);
8075 	if (j < 0)
8076 		goto vpd_done;
8077 
8078 	if (len > BNX2_MAX_VER_SLEN)
8079 		goto vpd_done;
8080 
8081 	memcpy(bp->fw_version, &data[j], len);
8082 	bp->fw_version[len] = ' ';
8083 
8084 vpd_done:
8085 	kfree(data);
8086 }
8087 
8088 static int
8089 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8090 {
8091 	struct bnx2 *bp;
8092 	int rc, i, j;
8093 	u32 reg;
8094 	u64 dma_mask, persist_dma_mask;
8095 
8096 	SET_NETDEV_DEV(dev, &pdev->dev);
8097 	bp = netdev_priv(dev);
8098 
8099 	bp->flags = 0;
8100 	bp->phy_flags = 0;
8101 
8102 	bp->temp_stats_blk =
8103 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8104 
8105 	if (!bp->temp_stats_blk) {
8106 		rc = -ENOMEM;
8107 		goto err_out;
8108 	}
8109 
8110 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8111 	rc = pci_enable_device(pdev);
8112 	if (rc) {
8113 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8114 		goto err_out;
8115 	}
8116 
8117 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8118 		dev_err(&pdev->dev,
8119 			"Cannot find PCI device base address, aborting\n");
8120 		rc = -ENODEV;
8121 		goto err_out_disable;
8122 	}
8123 
8124 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8125 	if (rc) {
8126 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8127 		goto err_out_disable;
8128 	}
8129 
8130 	pci_set_master(pdev);
8131 
8132 	bp->pm_cap = pdev->pm_cap;
8133 	if (bp->pm_cap == 0) {
8134 		dev_err(&pdev->dev,
8135 			"Cannot find power management capability, aborting\n");
8136 		rc = -EIO;
8137 		goto err_out_release;
8138 	}
8139 
8140 	bp->dev = dev;
8141 	bp->pdev = pdev;
8142 
8143 	spin_lock_init(&bp->phy_lock);
8144 	spin_lock_init(&bp->indirect_lock);
8145 #ifdef BCM_CNIC
8146 	mutex_init(&bp->cnic_lock);
8147 #endif
8148 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8149 
8150 	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8151 							 TX_MAX_TSS_RINGS + 1));
8152 	if (!bp->regview) {
8153 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8154 		rc = -ENOMEM;
8155 		goto err_out_release;
8156 	}
8157 
8158 	/* Configure byte swap and enable write to the reg_window registers.
8159 	 * Rely on CPU to do target byte swapping on big endian systems
8160 	 * The chip's target access swapping will not swap all accesses
8161 	 */
8162 	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8163 		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8164 		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8165 
8166 	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8167 
8168 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8169 		if (!pci_is_pcie(pdev)) {
8170 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8171 			rc = -EIO;
8172 			goto err_out_unmap;
8173 		}
8174 		bp->flags |= BNX2_FLAG_PCIE;
8175 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8176 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8177 	} else {
8178 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8179 		if (bp->pcix_cap == 0) {
8180 			dev_err(&pdev->dev,
8181 				"Cannot find PCIX capability, aborting\n");
8182 			rc = -EIO;
8183 			goto err_out_unmap;
8184 		}
8185 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8186 	}
8187 
8188 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8189 	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8190 		if (pdev->msix_cap)
8191 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8192 	}
8193 
8194 	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8195 	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8196 		if (pdev->msi_cap)
8197 			bp->flags |= BNX2_FLAG_MSI_CAP;
8198 	}
8199 
8200 	/* 5708 cannot support DMA addresses > 40-bit.  */
8201 	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8202 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8203 	else
8204 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8205 
8206 	/* Configure DMA attributes. */
8207 	if (dma_set_mask(&pdev->dev, dma_mask) == 0) {
8208 		dev->features |= NETIF_F_HIGHDMA;
8209 		rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask);
8210 		if (rc) {
8211 			dev_err(&pdev->dev,
8212 				"dma_set_coherent_mask failed, aborting\n");
8213 			goto err_out_unmap;
8214 		}
8215 	} else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
8216 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8217 		goto err_out_unmap;
8218 	}
8219 
8220 	if (!(bp->flags & BNX2_FLAG_PCIE))
8221 		bnx2_get_pci_speed(bp);
8222 
8223 	/* 5706A0 may falsely detect SERR and PERR. */
8224 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8225 		reg = BNX2_RD(bp, PCI_COMMAND);
8226 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8227 		BNX2_WR(bp, PCI_COMMAND, reg);
8228 	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8229 		!(bp->flags & BNX2_FLAG_PCIX)) {
8230 		dev_err(&pdev->dev,
8231 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8232 		rc = -EPERM;
8233 		goto err_out_unmap;
8234 	}
8235 
8236 	bnx2_init_nvram(bp);
8237 
8238 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8239 
8240 	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8241 		bp->func = 1;
8242 
8243 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8244 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8245 		u32 off = bp->func << 2;
8246 
8247 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8248 	} else
8249 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8250 
8251 	/* Get the permanent MAC address.  First we need to make sure the
8252 	 * firmware is actually running.
8253 	 */
8254 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8255 
8256 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8257 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8258 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8259 		rc = -ENODEV;
8260 		goto err_out_unmap;
8261 	}
8262 
8263 	bnx2_read_vpd_fw_ver(bp);
8264 
8265 	j = strlen(bp->fw_version);
8266 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8267 	for (i = 0; i < 3 && j < 24; i++) {
8268 		u8 num, k, skip0;
8269 
8270 		if (i == 0) {
8271 			bp->fw_version[j++] = 'b';
8272 			bp->fw_version[j++] = 'c';
8273 			bp->fw_version[j++] = ' ';
8274 		}
8275 		num = (u8) (reg >> (24 - (i * 8)));
8276 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8277 			if (num >= k || !skip0 || k == 1) {
8278 				bp->fw_version[j++] = (num / k) + '0';
8279 				skip0 = 0;
8280 			}
8281 		}
8282 		if (i != 2)
8283 			bp->fw_version[j++] = '.';
8284 	}
8285 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8286 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8287 		bp->wol = 1;
8288 
8289 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8290 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8291 
8292 		for (i = 0; i < 30; i++) {
8293 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8294 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8295 				break;
8296 			msleep(10);
8297 		}
8298 	}
8299 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8300 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8301 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8302 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8303 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8304 
8305 		if (j < 32)
8306 			bp->fw_version[j++] = ' ';
8307 		for (i = 0; i < 3 && j < 28; i++) {
8308 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8309 			reg = be32_to_cpu(reg);
8310 			memcpy(&bp->fw_version[j], &reg, 4);
8311 			j += 4;
8312 		}
8313 	}
8314 
8315 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8316 	bp->mac_addr[0] = (u8) (reg >> 8);
8317 	bp->mac_addr[1] = (u8) reg;
8318 
8319 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8320 	bp->mac_addr[2] = (u8) (reg >> 24);
8321 	bp->mac_addr[3] = (u8) (reg >> 16);
8322 	bp->mac_addr[4] = (u8) (reg >> 8);
8323 	bp->mac_addr[5] = (u8) reg;
8324 
8325 	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8326 	bnx2_set_rx_ring_size(bp, 255);
8327 
8328 	bp->tx_quick_cons_trip_int = 2;
8329 	bp->tx_quick_cons_trip = 20;
8330 	bp->tx_ticks_int = 18;
8331 	bp->tx_ticks = 80;
8332 
8333 	bp->rx_quick_cons_trip_int = 2;
8334 	bp->rx_quick_cons_trip = 12;
8335 	bp->rx_ticks_int = 18;
8336 	bp->rx_ticks = 18;
8337 
8338 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8339 
8340 	bp->current_interval = BNX2_TIMER_INTERVAL;
8341 
8342 	bp->phy_addr = 1;
8343 
8344 	/* allocate stats_blk */
8345 	rc = bnx2_alloc_stats_blk(dev);
8346 	if (rc)
8347 		goto err_out_unmap;
8348 
8349 	/* Disable WOL support if we are running on a SERDES chip. */
8350 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8351 		bnx2_get_5709_media(bp);
8352 	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8353 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8354 
8355 	bp->phy_port = PORT_TP;
8356 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8357 		bp->phy_port = PORT_FIBRE;
8358 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8359 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8360 			bp->flags |= BNX2_FLAG_NO_WOL;
8361 			bp->wol = 0;
8362 		}
8363 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8364 			/* Don't do parallel detect on this board because of
8365 			 * some board problems.  The link will not go down
8366 			 * if we do parallel detect.
8367 			 */
8368 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8369 			    pdev->subsystem_device == 0x310c)
8370 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8371 		} else {
8372 			bp->phy_addr = 2;
8373 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8374 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8375 		}
8376 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8377 		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8378 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8379 	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8380 		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8381 		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8382 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8383 
8384 	bnx2_init_fw_cap(bp);
8385 
8386 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8387 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8388 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8389 	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8390 		bp->flags |= BNX2_FLAG_NO_WOL;
8391 		bp->wol = 0;
8392 	}
8393 
8394 	if (bp->flags & BNX2_FLAG_NO_WOL)
8395 		device_set_wakeup_capable(&bp->pdev->dev, false);
8396 	else
8397 		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8398 
8399 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8400 		bp->tx_quick_cons_trip_int =
8401 			bp->tx_quick_cons_trip;
8402 		bp->tx_ticks_int = bp->tx_ticks;
8403 		bp->rx_quick_cons_trip_int =
8404 			bp->rx_quick_cons_trip;
8405 		bp->rx_ticks_int = bp->rx_ticks;
8406 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8407 		bp->com_ticks_int = bp->com_ticks;
8408 		bp->cmd_ticks_int = bp->cmd_ticks;
8409 	}
8410 
8411 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8412 	 *
8413 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8414 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8415 	 * but causes problems on the AMD 8132 which will eventually stop
8416 	 * responding after a while.
8417 	 *
8418 	 * AMD believes this incompatibility is unique to the 5706, and
8419 	 * prefers to locally disable MSI rather than globally disabling it.
8420 	 */
8421 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8422 		struct pci_dev *amd_8132 = NULL;
8423 
8424 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8425 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8426 						  amd_8132))) {
8427 
8428 			if (amd_8132->revision >= 0x10 &&
8429 			    amd_8132->revision <= 0x13) {
8430 				disable_msi = 1;
8431 				pci_dev_put(amd_8132);
8432 				break;
8433 			}
8434 		}
8435 	}
8436 
8437 	bnx2_set_default_link(bp);
8438 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8439 
8440 	timer_setup(&bp->timer, bnx2_timer, 0);
8441 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8442 
8443 #ifdef BCM_CNIC
8444 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8445 		bp->cnic_eth_dev.max_iscsi_conn =
8446 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8447 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8448 	bp->cnic_probe = bnx2_cnic_probe;
8449 #endif
8450 	pci_save_state(pdev);
8451 
8452 	return 0;
8453 
8454 err_out_unmap:
8455 	pci_iounmap(pdev, bp->regview);
8456 	bp->regview = NULL;
8457 
8458 err_out_release:
8459 	pci_release_regions(pdev);
8460 
8461 err_out_disable:
8462 	pci_disable_device(pdev);
8463 
8464 err_out:
8465 	kfree(bp->temp_stats_blk);
8466 
8467 	return rc;
8468 }
8469 
8470 static char *
8471 bnx2_bus_string(struct bnx2 *bp, char *str)
8472 {
8473 	char *s = str;
8474 
8475 	if (bp->flags & BNX2_FLAG_PCIE) {
8476 		s += sprintf(s, "PCI Express");
8477 	} else {
8478 		s += sprintf(s, "PCI");
8479 		if (bp->flags & BNX2_FLAG_PCIX)
8480 			s += sprintf(s, "-X");
8481 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8482 			s += sprintf(s, " 32-bit");
8483 		else
8484 			s += sprintf(s, " 64-bit");
8485 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8486 	}
8487 	return str;
8488 }
8489 
8490 static void
8491 bnx2_del_napi(struct bnx2 *bp)
8492 {
8493 	int i;
8494 
8495 	for (i = 0; i < bp->irq_nvecs; i++)
8496 		netif_napi_del(&bp->bnx2_napi[i].napi);
8497 }
8498 
8499 static void
8500 bnx2_init_napi(struct bnx2 *bp)
8501 {
8502 	int i;
8503 
8504 	for (i = 0; i < bp->irq_nvecs; i++) {
8505 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8506 		int (*poll)(struct napi_struct *, int);
8507 
8508 		if (i == 0)
8509 			poll = bnx2_poll;
8510 		else
8511 			poll = bnx2_poll_msix;
8512 
8513 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll);
8514 		bnapi->bp = bp;
8515 	}
8516 }
8517 
8518 static const struct net_device_ops bnx2_netdev_ops = {
8519 	.ndo_open		= bnx2_open,
8520 	.ndo_start_xmit		= bnx2_start_xmit,
8521 	.ndo_stop		= bnx2_close,
8522 	.ndo_get_stats64	= bnx2_get_stats64,
8523 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8524 	.ndo_eth_ioctl		= bnx2_ioctl,
8525 	.ndo_validate_addr	= eth_validate_addr,
8526 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8527 	.ndo_change_mtu		= bnx2_change_mtu,
8528 	.ndo_set_features	= bnx2_set_features,
8529 	.ndo_tx_timeout		= bnx2_tx_timeout,
8530 #ifdef CONFIG_NET_POLL_CONTROLLER
8531 	.ndo_poll_controller	= poll_bnx2,
8532 #endif
8533 };
8534 
8535 static int
8536 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8537 {
8538 	struct net_device *dev;
8539 	struct bnx2 *bp;
8540 	int rc;
8541 	char str[40];
8542 
8543 	/* dev zeroed in init_etherdev */
8544 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8545 	if (!dev)
8546 		return -ENOMEM;
8547 
8548 	rc = bnx2_init_board(pdev, dev);
8549 	if (rc < 0)
8550 		goto err_free;
8551 
8552 	dev->netdev_ops = &bnx2_netdev_ops;
8553 	dev->watchdog_timeo = TX_TIMEOUT;
8554 	dev->ethtool_ops = &bnx2_ethtool_ops;
8555 
8556 	bp = netdev_priv(dev);
8557 
8558 	pci_set_drvdata(pdev, dev);
8559 
8560 	/*
8561 	 * In-flight DMA from 1st kernel could continue going in kdump kernel.
8562 	 * New io-page table has been created before bnx2 does reset at open stage.
8563 	 * We have to wait for the in-flight DMA to complete to avoid it look up
8564 	 * into the newly created io-page table.
8565 	 */
8566 	if (is_kdump_kernel())
8567 		bnx2_wait_dma_complete(bp);
8568 
8569 	eth_hw_addr_set(dev, bp->mac_addr);
8570 
8571 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8572 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8573 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8574 
8575 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8576 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8577 
8578 	dev->vlan_features = dev->hw_features;
8579 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8580 	dev->features |= dev->hw_features;
8581 	dev->priv_flags |= IFF_UNICAST_FLT;
8582 	dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8583 	dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8584 
8585 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8586 		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8587 
8588 	if ((rc = register_netdev(dev))) {
8589 		dev_err(&pdev->dev, "Cannot register net device\n");
8590 		goto error;
8591 	}
8592 
8593 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8594 		    "node addr %pM\n", board_info[ent->driver_data].name,
8595 		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8596 		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8597 		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8598 		    pdev->irq, dev->dev_addr);
8599 
8600 	return 0;
8601 
8602 error:
8603 	pci_iounmap(pdev, bp->regview);
8604 	pci_release_regions(pdev);
8605 	pci_disable_device(pdev);
8606 err_free:
8607 	bnx2_free_stats_blk(dev);
8608 	free_netdev(dev);
8609 	return rc;
8610 }
8611 
8612 static void
8613 bnx2_remove_one(struct pci_dev *pdev)
8614 {
8615 	struct net_device *dev = pci_get_drvdata(pdev);
8616 	struct bnx2 *bp = netdev_priv(dev);
8617 
8618 	unregister_netdev(dev);
8619 
8620 	del_timer_sync(&bp->timer);
8621 	cancel_work_sync(&bp->reset_task);
8622 
8623 	pci_iounmap(bp->pdev, bp->regview);
8624 
8625 	bnx2_free_stats_blk(dev);
8626 	kfree(bp->temp_stats_blk);
8627 
8628 	bnx2_release_firmware(bp);
8629 
8630 	free_netdev(dev);
8631 
8632 	pci_release_regions(pdev);
8633 	pci_disable_device(pdev);
8634 }
8635 
8636 #ifdef CONFIG_PM_SLEEP
8637 static int
8638 bnx2_suspend(struct device *device)
8639 {
8640 	struct net_device *dev = dev_get_drvdata(device);
8641 	struct bnx2 *bp = netdev_priv(dev);
8642 
8643 	if (netif_running(dev)) {
8644 		cancel_work_sync(&bp->reset_task);
8645 		bnx2_netif_stop(bp, true);
8646 		netif_device_detach(dev);
8647 		del_timer_sync(&bp->timer);
8648 		bnx2_shutdown_chip(bp);
8649 		__bnx2_free_irq(bp);
8650 		bnx2_free_skbs(bp);
8651 	}
8652 	bnx2_setup_wol(bp);
8653 	return 0;
8654 }
8655 
8656 static int
8657 bnx2_resume(struct device *device)
8658 {
8659 	struct net_device *dev = dev_get_drvdata(device);
8660 	struct bnx2 *bp = netdev_priv(dev);
8661 
8662 	if (!netif_running(dev))
8663 		return 0;
8664 
8665 	bnx2_set_power_state(bp, PCI_D0);
8666 	netif_device_attach(dev);
8667 	bnx2_request_irq(bp);
8668 	bnx2_init_nic(bp, 1);
8669 	bnx2_netif_start(bp, true);
8670 	return 0;
8671 }
8672 
8673 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8674 #define BNX2_PM_OPS (&bnx2_pm_ops)
8675 
8676 #else
8677 
8678 #define BNX2_PM_OPS NULL
8679 
8680 #endif /* CONFIG_PM_SLEEP */
8681 /**
8682  * bnx2_io_error_detected - called when PCI error is detected
8683  * @pdev: Pointer to PCI device
8684  * @state: The current pci connection state
8685  *
8686  * This function is called after a PCI bus error affecting
8687  * this device has been detected.
8688  */
8689 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8690 					       pci_channel_state_t state)
8691 {
8692 	struct net_device *dev = pci_get_drvdata(pdev);
8693 	struct bnx2 *bp = netdev_priv(dev);
8694 
8695 	rtnl_lock();
8696 	netif_device_detach(dev);
8697 
8698 	if (state == pci_channel_io_perm_failure) {
8699 		rtnl_unlock();
8700 		return PCI_ERS_RESULT_DISCONNECT;
8701 	}
8702 
8703 	if (netif_running(dev)) {
8704 		bnx2_netif_stop(bp, true);
8705 		del_timer_sync(&bp->timer);
8706 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8707 	}
8708 
8709 	pci_disable_device(pdev);
8710 	rtnl_unlock();
8711 
8712 	/* Request a slot slot reset. */
8713 	return PCI_ERS_RESULT_NEED_RESET;
8714 }
8715 
8716 /**
8717  * bnx2_io_slot_reset - called after the pci bus has been reset.
8718  * @pdev: Pointer to PCI device
8719  *
8720  * Restart the card from scratch, as if from a cold-boot.
8721  */
8722 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8723 {
8724 	struct net_device *dev = pci_get_drvdata(pdev);
8725 	struct bnx2 *bp = netdev_priv(dev);
8726 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8727 	int err = 0;
8728 
8729 	rtnl_lock();
8730 	if (pci_enable_device(pdev)) {
8731 		dev_err(&pdev->dev,
8732 			"Cannot re-enable PCI device after reset\n");
8733 	} else {
8734 		pci_set_master(pdev);
8735 		pci_restore_state(pdev);
8736 		pci_save_state(pdev);
8737 
8738 		if (netif_running(dev))
8739 			err = bnx2_init_nic(bp, 1);
8740 
8741 		if (!err)
8742 			result = PCI_ERS_RESULT_RECOVERED;
8743 	}
8744 
8745 	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8746 		bnx2_napi_enable(bp);
8747 		dev_close(dev);
8748 	}
8749 	rtnl_unlock();
8750 
8751 	return result;
8752 }
8753 
8754 /**
8755  * bnx2_io_resume - called when traffic can start flowing again.
8756  * @pdev: Pointer to PCI device
8757  *
8758  * This callback is called when the error recovery driver tells us that
8759  * its OK to resume normal operation.
8760  */
8761 static void bnx2_io_resume(struct pci_dev *pdev)
8762 {
8763 	struct net_device *dev = pci_get_drvdata(pdev);
8764 	struct bnx2 *bp = netdev_priv(dev);
8765 
8766 	rtnl_lock();
8767 	if (netif_running(dev))
8768 		bnx2_netif_start(bp, true);
8769 
8770 	netif_device_attach(dev);
8771 	rtnl_unlock();
8772 }
8773 
8774 static void bnx2_shutdown(struct pci_dev *pdev)
8775 {
8776 	struct net_device *dev = pci_get_drvdata(pdev);
8777 	struct bnx2 *bp;
8778 
8779 	if (!dev)
8780 		return;
8781 
8782 	bp = netdev_priv(dev);
8783 	if (!bp)
8784 		return;
8785 
8786 	rtnl_lock();
8787 	if (netif_running(dev))
8788 		dev_close(bp->dev);
8789 
8790 	if (system_state == SYSTEM_POWER_OFF)
8791 		bnx2_set_power_state(bp, PCI_D3hot);
8792 
8793 	rtnl_unlock();
8794 }
8795 
8796 static const struct pci_error_handlers bnx2_err_handler = {
8797 	.error_detected	= bnx2_io_error_detected,
8798 	.slot_reset	= bnx2_io_slot_reset,
8799 	.resume		= bnx2_io_resume,
8800 };
8801 
8802 static struct pci_driver bnx2_pci_driver = {
8803 	.name		= DRV_MODULE_NAME,
8804 	.id_table	= bnx2_pci_tbl,
8805 	.probe		= bnx2_init_one,
8806 	.remove		= bnx2_remove_one,
8807 	.driver.pm	= BNX2_PM_OPS,
8808 	.err_handler	= &bnx2_err_handler,
8809 	.shutdown	= bnx2_shutdown,
8810 };
8811 
8812 module_pci_driver(bnx2_pci_driver);
8813