xref: /linux/drivers/net/ethernet/broadcom/bnx2.c (revision c33a605dd6f36618f9f658e71c09fcdcb44fc500)
1 /* bnx2.c: QLogic bnx2 network driver.
2  *
3  * Copyright (c) 2004-2014 Broadcom Corporation
4  * Copyright (c) 2014-2015 QLogic Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Michael Chan  (mchan@broadcom.com)
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 #include <linux/crash_dump.h>
53 
54 #if IS_ENABLED(CONFIG_CNIC)
55 #define BCM_CNIC 1
56 #include "cnic_if.h"
57 #endif
58 #include "bnx2.h"
59 #include "bnx2_fw.h"
60 
61 #define DRV_MODULE_NAME		"bnx2"
62 #define DRV_MODULE_VERSION	"2.2.6"
63 #define DRV_MODULE_RELDATE	"January 29, 2014"
64 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
65 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
66 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
67 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
68 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
69 
70 #define RUN_AT(x) (jiffies + (x))
71 
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT  (5*HZ)
74 
75 static char version[] =
76 	"QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77 
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 MODULE_FIRMWARE(FW_MIPS_FILE_06);
83 MODULE_FIRMWARE(FW_RV2P_FILE_06);
84 MODULE_FIRMWARE(FW_MIPS_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09);
86 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87 
88 static int disable_msi = 0;
89 
90 module_param(disable_msi, int, S_IRUGO);
91 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 
93 typedef enum {
94 	BCM5706 = 0,
95 	NC370T,
96 	NC370I,
97 	BCM5706S,
98 	NC370F,
99 	BCM5708,
100 	BCM5708S,
101 	BCM5709,
102 	BCM5709S,
103 	BCM5716,
104 	BCM5716S,
105 } board_t;
106 
107 /* indexed by board_t, above */
108 static struct {
109 	char *name;
110 } board_info[] = {
111 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
112 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
113 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
114 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
115 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
116 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
117 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
118 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
119 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
120 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
121 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122 	};
123 
124 static const struct pci_device_id bnx2_pci_tbl[] = {
125 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
130 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
132 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
136 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
138 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
140 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
142 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
143 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
144 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
145 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
146 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147 	{ 0, }
148 };
149 
150 static const struct flash_spec flash_table[] =
151 {
152 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
153 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
154 	/* Slow EEPROM */
155 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
156 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158 	 "EEPROM - slow"},
159 	/* Expansion entry 0001 */
160 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
161 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163 	 "Entry 0001"},
164 	/* Saifun SA25F010 (non-buffered flash) */
165 	/* strap, cfg1, & write1 need updates */
166 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
167 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
169 	 "Non-buffered flash (128kB)"},
170 	/* Saifun SA25F020 (non-buffered flash) */
171 	/* strap, cfg1, & write1 need updates */
172 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
173 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
175 	 "Non-buffered flash (256kB)"},
176 	/* Expansion entry 0100 */
177 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
178 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 	 "Entry 0100"},
181 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
182 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
183 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
185 	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
186 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
187 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
188 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
189 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
190 	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
191 	/* Saifun SA25F005 (non-buffered flash) */
192 	/* strap, cfg1, & write1 need updates */
193 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
194 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
196 	 "Non-buffered flash (64kB)"},
197 	/* Fast EEPROM */
198 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
199 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
200 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201 	 "EEPROM - fast"},
202 	/* Expansion entry 1001 */
203 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
204 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 	 "Entry 1001"},
207 	/* Expansion entry 1010 */
208 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
209 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211 	 "Entry 1010"},
212 	/* ATMEL AT45DB011B (buffered flash) */
213 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
214 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
215 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
216 	 "Buffered flash (128kB)"},
217 	/* Expansion entry 1100 */
218 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
219 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221 	 "Entry 1100"},
222 	/* Expansion entry 1101 */
223 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
224 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226 	 "Entry 1101"},
227 	/* Ateml Expansion entry 1110 */
228 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
229 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
231 	 "Entry 1110 (Atmel)"},
232 	/* ATMEL AT45DB021B (buffered flash) */
233 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
234 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
235 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
236 	 "Buffered flash (256kB)"},
237 };
238 
239 static const struct flash_spec flash_5709 = {
240 	.flags		= BNX2_NV_BUFFERED,
241 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
242 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
243 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
244 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
245 	.name		= "5709 Buffered flash (256kB)",
246 };
247 
248 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249 
250 static void bnx2_init_napi(struct bnx2 *bp);
251 static void bnx2_del_napi(struct bnx2 *bp);
252 
253 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
254 {
255 	u32 diff;
256 
257 	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
258 	barrier();
259 
260 	/* The ring uses 256 indices for 255 entries, one of them
261 	 * needs to be skipped.
262 	 */
263 	diff = txr->tx_prod - txr->tx_cons;
264 	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
265 		diff &= 0xffff;
266 		if (diff == BNX2_TX_DESC_CNT)
267 			diff = BNX2_MAX_TX_DESC_CNT;
268 	}
269 	return bp->tx_ring_size - diff;
270 }
271 
272 static u32
273 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
274 {
275 	unsigned long flags;
276 	u32 val;
277 
278 	spin_lock_irqsave(&bp->indirect_lock, flags);
279 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
280 	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
281 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
282 	return val;
283 }
284 
285 static void
286 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
287 {
288 	unsigned long flags;
289 
290 	spin_lock_irqsave(&bp->indirect_lock, flags);
291 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
292 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
293 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
294 }
295 
296 static void
297 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
298 {
299 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
300 }
301 
302 static u32
303 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
304 {
305 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
306 }
307 
308 static void
309 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
310 {
311 	unsigned long flags;
312 
313 	offset += cid_addr;
314 	spin_lock_irqsave(&bp->indirect_lock, flags);
315 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
316 		int i;
317 
318 		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
319 		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
320 			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
321 		for (i = 0; i < 5; i++) {
322 			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
323 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
324 				break;
325 			udelay(5);
326 		}
327 	} else {
328 		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
329 		BNX2_WR(bp, BNX2_CTX_DATA, val);
330 	}
331 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
332 }
333 
334 #ifdef BCM_CNIC
335 static int
336 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
337 {
338 	struct bnx2 *bp = netdev_priv(dev);
339 	struct drv_ctl_io *io = &info->data.io;
340 
341 	switch (info->cmd) {
342 	case DRV_CTL_IO_WR_CMD:
343 		bnx2_reg_wr_ind(bp, io->offset, io->data);
344 		break;
345 	case DRV_CTL_IO_RD_CMD:
346 		io->data = bnx2_reg_rd_ind(bp, io->offset);
347 		break;
348 	case DRV_CTL_CTX_WR_CMD:
349 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
350 		break;
351 	default:
352 		return -EINVAL;
353 	}
354 	return 0;
355 }
356 
357 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
358 {
359 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
360 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
361 	int sb_id;
362 
363 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
364 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
365 		bnapi->cnic_present = 0;
366 		sb_id = bp->irq_nvecs;
367 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
368 	} else {
369 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
370 		bnapi->cnic_tag = bnapi->last_status_idx;
371 		bnapi->cnic_present = 1;
372 		sb_id = 0;
373 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
374 	}
375 
376 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
377 	cp->irq_arr[0].status_blk = (void *)
378 		((unsigned long) bnapi->status_blk.msi +
379 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
380 	cp->irq_arr[0].status_blk_num = sb_id;
381 	cp->num_irq = 1;
382 }
383 
384 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
385 			      void *data)
386 {
387 	struct bnx2 *bp = netdev_priv(dev);
388 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
389 
390 	if (ops == NULL)
391 		return -EINVAL;
392 
393 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
394 		return -EBUSY;
395 
396 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
397 		return -ENODEV;
398 
399 	bp->cnic_data = data;
400 	rcu_assign_pointer(bp->cnic_ops, ops);
401 
402 	cp->num_irq = 0;
403 	cp->drv_state = CNIC_DRV_STATE_REGD;
404 
405 	bnx2_setup_cnic_irq_info(bp);
406 
407 	return 0;
408 }
409 
410 static int bnx2_unregister_cnic(struct net_device *dev)
411 {
412 	struct bnx2 *bp = netdev_priv(dev);
413 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
414 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
415 
416 	mutex_lock(&bp->cnic_lock);
417 	cp->drv_state = 0;
418 	bnapi->cnic_present = 0;
419 	RCU_INIT_POINTER(bp->cnic_ops, NULL);
420 	mutex_unlock(&bp->cnic_lock);
421 	synchronize_rcu();
422 	return 0;
423 }
424 
425 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
426 {
427 	struct bnx2 *bp = netdev_priv(dev);
428 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
429 
430 	if (!cp->max_iscsi_conn)
431 		return NULL;
432 
433 	cp->drv_owner = THIS_MODULE;
434 	cp->chip_id = bp->chip_id;
435 	cp->pdev = bp->pdev;
436 	cp->io_base = bp->regview;
437 	cp->drv_ctl = bnx2_drv_ctl;
438 	cp->drv_register_cnic = bnx2_register_cnic;
439 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
440 
441 	return cp;
442 }
443 
444 static void
445 bnx2_cnic_stop(struct bnx2 *bp)
446 {
447 	struct cnic_ops *c_ops;
448 	struct cnic_ctl_info info;
449 
450 	mutex_lock(&bp->cnic_lock);
451 	c_ops = rcu_dereference_protected(bp->cnic_ops,
452 					  lockdep_is_held(&bp->cnic_lock));
453 	if (c_ops) {
454 		info.cmd = CNIC_CTL_STOP_CMD;
455 		c_ops->cnic_ctl(bp->cnic_data, &info);
456 	}
457 	mutex_unlock(&bp->cnic_lock);
458 }
459 
460 static void
461 bnx2_cnic_start(struct bnx2 *bp)
462 {
463 	struct cnic_ops *c_ops;
464 	struct cnic_ctl_info info;
465 
466 	mutex_lock(&bp->cnic_lock);
467 	c_ops = rcu_dereference_protected(bp->cnic_ops,
468 					  lockdep_is_held(&bp->cnic_lock));
469 	if (c_ops) {
470 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
471 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
472 
473 			bnapi->cnic_tag = bnapi->last_status_idx;
474 		}
475 		info.cmd = CNIC_CTL_START_CMD;
476 		c_ops->cnic_ctl(bp->cnic_data, &info);
477 	}
478 	mutex_unlock(&bp->cnic_lock);
479 }
480 
481 #else
482 
483 static void
484 bnx2_cnic_stop(struct bnx2 *bp)
485 {
486 }
487 
488 static void
489 bnx2_cnic_start(struct bnx2 *bp)
490 {
491 }
492 
493 #endif
494 
495 static int
496 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
497 {
498 	u32 val1;
499 	int i, ret;
500 
501 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
502 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
503 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
504 
505 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
506 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
507 
508 		udelay(40);
509 	}
510 
511 	val1 = (bp->phy_addr << 21) | (reg << 16) |
512 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
513 		BNX2_EMAC_MDIO_COMM_START_BUSY;
514 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
515 
516 	for (i = 0; i < 50; i++) {
517 		udelay(10);
518 
519 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
520 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
521 			udelay(5);
522 
523 			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
524 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
525 
526 			break;
527 		}
528 	}
529 
530 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
531 		*val = 0x0;
532 		ret = -EBUSY;
533 	}
534 	else {
535 		*val = val1;
536 		ret = 0;
537 	}
538 
539 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
540 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
541 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
542 
543 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
544 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
545 
546 		udelay(40);
547 	}
548 
549 	return ret;
550 }
551 
552 static int
553 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
554 {
555 	u32 val1;
556 	int i, ret;
557 
558 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
559 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
560 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
561 
562 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
563 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
564 
565 		udelay(40);
566 	}
567 
568 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
569 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
570 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
571 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
572 
573 	for (i = 0; i < 50; i++) {
574 		udelay(10);
575 
576 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
577 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
578 			udelay(5);
579 			break;
580 		}
581 	}
582 
583 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
584         	ret = -EBUSY;
585 	else
586 		ret = 0;
587 
588 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
589 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
590 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
591 
592 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
593 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
594 
595 		udelay(40);
596 	}
597 
598 	return ret;
599 }
600 
601 static void
602 bnx2_disable_int(struct bnx2 *bp)
603 {
604 	int i;
605 	struct bnx2_napi *bnapi;
606 
607 	for (i = 0; i < bp->irq_nvecs; i++) {
608 		bnapi = &bp->bnx2_napi[i];
609 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
610 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
611 	}
612 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
613 }
614 
615 static void
616 bnx2_enable_int(struct bnx2 *bp)
617 {
618 	int i;
619 	struct bnx2_napi *bnapi;
620 
621 	for (i = 0; i < bp->irq_nvecs; i++) {
622 		bnapi = &bp->bnx2_napi[i];
623 
624 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
625 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
626 			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
627 			bnapi->last_status_idx);
628 
629 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
630 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
631 			bnapi->last_status_idx);
632 	}
633 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
634 }
635 
636 static void
637 bnx2_disable_int_sync(struct bnx2 *bp)
638 {
639 	int i;
640 
641 	atomic_inc(&bp->intr_sem);
642 	if (!netif_running(bp->dev))
643 		return;
644 
645 	bnx2_disable_int(bp);
646 	for (i = 0; i < bp->irq_nvecs; i++)
647 		synchronize_irq(bp->irq_tbl[i].vector);
648 }
649 
650 static void
651 bnx2_napi_disable(struct bnx2 *bp)
652 {
653 	int i;
654 
655 	for (i = 0; i < bp->irq_nvecs; i++)
656 		napi_disable(&bp->bnx2_napi[i].napi);
657 }
658 
659 static void
660 bnx2_napi_enable(struct bnx2 *bp)
661 {
662 	int i;
663 
664 	for (i = 0; i < bp->irq_nvecs; i++)
665 		napi_enable(&bp->bnx2_napi[i].napi);
666 }
667 
668 static void
669 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
670 {
671 	if (stop_cnic)
672 		bnx2_cnic_stop(bp);
673 	if (netif_running(bp->dev)) {
674 		bnx2_napi_disable(bp);
675 		netif_tx_disable(bp->dev);
676 	}
677 	bnx2_disable_int_sync(bp);
678 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
679 }
680 
681 static void
682 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
683 {
684 	if (atomic_dec_and_test(&bp->intr_sem)) {
685 		if (netif_running(bp->dev)) {
686 			netif_tx_wake_all_queues(bp->dev);
687 			spin_lock_bh(&bp->phy_lock);
688 			if (bp->link_up)
689 				netif_carrier_on(bp->dev);
690 			spin_unlock_bh(&bp->phy_lock);
691 			bnx2_napi_enable(bp);
692 			bnx2_enable_int(bp);
693 			if (start_cnic)
694 				bnx2_cnic_start(bp);
695 		}
696 	}
697 }
698 
699 static void
700 bnx2_free_tx_mem(struct bnx2 *bp)
701 {
702 	int i;
703 
704 	for (i = 0; i < bp->num_tx_rings; i++) {
705 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
706 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
707 
708 		if (txr->tx_desc_ring) {
709 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
710 					  txr->tx_desc_ring,
711 					  txr->tx_desc_mapping);
712 			txr->tx_desc_ring = NULL;
713 		}
714 		kfree(txr->tx_buf_ring);
715 		txr->tx_buf_ring = NULL;
716 	}
717 }
718 
719 static void
720 bnx2_free_rx_mem(struct bnx2 *bp)
721 {
722 	int i;
723 
724 	for (i = 0; i < bp->num_rx_rings; i++) {
725 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
726 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
727 		int j;
728 
729 		for (j = 0; j < bp->rx_max_ring; j++) {
730 			if (rxr->rx_desc_ring[j])
731 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
732 						  rxr->rx_desc_ring[j],
733 						  rxr->rx_desc_mapping[j]);
734 			rxr->rx_desc_ring[j] = NULL;
735 		}
736 		vfree(rxr->rx_buf_ring);
737 		rxr->rx_buf_ring = NULL;
738 
739 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
740 			if (rxr->rx_pg_desc_ring[j])
741 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
742 						  rxr->rx_pg_desc_ring[j],
743 						  rxr->rx_pg_desc_mapping[j]);
744 			rxr->rx_pg_desc_ring[j] = NULL;
745 		}
746 		vfree(rxr->rx_pg_ring);
747 		rxr->rx_pg_ring = NULL;
748 	}
749 }
750 
751 static int
752 bnx2_alloc_tx_mem(struct bnx2 *bp)
753 {
754 	int i;
755 
756 	for (i = 0; i < bp->num_tx_rings; i++) {
757 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
758 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
759 
760 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
761 		if (txr->tx_buf_ring == NULL)
762 			return -ENOMEM;
763 
764 		txr->tx_desc_ring =
765 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
766 					   &txr->tx_desc_mapping, GFP_KERNEL);
767 		if (txr->tx_desc_ring == NULL)
768 			return -ENOMEM;
769 	}
770 	return 0;
771 }
772 
773 static int
774 bnx2_alloc_rx_mem(struct bnx2 *bp)
775 {
776 	int i;
777 
778 	for (i = 0; i < bp->num_rx_rings; i++) {
779 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
780 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
781 		int j;
782 
783 		rxr->rx_buf_ring =
784 			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
785 		if (rxr->rx_buf_ring == NULL)
786 			return -ENOMEM;
787 
788 		for (j = 0; j < bp->rx_max_ring; j++) {
789 			rxr->rx_desc_ring[j] =
790 				dma_alloc_coherent(&bp->pdev->dev,
791 						   RXBD_RING_SIZE,
792 						   &rxr->rx_desc_mapping[j],
793 						   GFP_KERNEL);
794 			if (rxr->rx_desc_ring[j] == NULL)
795 				return -ENOMEM;
796 
797 		}
798 
799 		if (bp->rx_pg_ring_size) {
800 			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
801 						  bp->rx_max_pg_ring);
802 			if (rxr->rx_pg_ring == NULL)
803 				return -ENOMEM;
804 
805 		}
806 
807 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
808 			rxr->rx_pg_desc_ring[j] =
809 				dma_alloc_coherent(&bp->pdev->dev,
810 						   RXBD_RING_SIZE,
811 						   &rxr->rx_pg_desc_mapping[j],
812 						   GFP_KERNEL);
813 			if (rxr->rx_pg_desc_ring[j] == NULL)
814 				return -ENOMEM;
815 
816 		}
817 	}
818 	return 0;
819 }
820 
821 static void
822 bnx2_free_stats_blk(struct net_device *dev)
823 {
824 	struct bnx2 *bp = netdev_priv(dev);
825 
826 	if (bp->status_blk) {
827 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
828 				  bp->status_blk,
829 				  bp->status_blk_mapping);
830 		bp->status_blk = NULL;
831 		bp->stats_blk = NULL;
832 	}
833 }
834 
835 static int
836 bnx2_alloc_stats_blk(struct net_device *dev)
837 {
838 	int status_blk_size;
839 	void *status_blk;
840 	struct bnx2 *bp = netdev_priv(dev);
841 
842 	/* Combine status and statistics blocks into one allocation. */
843 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
844 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
845 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
846 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
847 	bp->status_stats_size = status_blk_size +
848 				sizeof(struct statistics_block);
849 	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
850 					 &bp->status_blk_mapping, GFP_KERNEL);
851 	if (status_blk == NULL)
852 		return -ENOMEM;
853 
854 	bp->status_blk = status_blk;
855 	bp->stats_blk = status_blk + status_blk_size;
856 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
857 
858 	return 0;
859 }
860 
861 static void
862 bnx2_free_mem(struct bnx2 *bp)
863 {
864 	int i;
865 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
866 
867 	bnx2_free_tx_mem(bp);
868 	bnx2_free_rx_mem(bp);
869 
870 	for (i = 0; i < bp->ctx_pages; i++) {
871 		if (bp->ctx_blk[i]) {
872 			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
873 					  bp->ctx_blk[i],
874 					  bp->ctx_blk_mapping[i]);
875 			bp->ctx_blk[i] = NULL;
876 		}
877 	}
878 
879 	if (bnapi->status_blk.msi)
880 		bnapi->status_blk.msi = NULL;
881 }
882 
883 static int
884 bnx2_alloc_mem(struct bnx2 *bp)
885 {
886 	int i, err;
887 	struct bnx2_napi *bnapi;
888 
889 	bnapi = &bp->bnx2_napi[0];
890 	bnapi->status_blk.msi = bp->status_blk;
891 	bnapi->hw_tx_cons_ptr =
892 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
893 	bnapi->hw_rx_cons_ptr =
894 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
895 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
896 		for (i = 1; i < bp->irq_nvecs; i++) {
897 			struct status_block_msix *sblk;
898 
899 			bnapi = &bp->bnx2_napi[i];
900 
901 			sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
902 			bnapi->status_blk.msix = sblk;
903 			bnapi->hw_tx_cons_ptr =
904 				&sblk->status_tx_quick_consumer_index;
905 			bnapi->hw_rx_cons_ptr =
906 				&sblk->status_rx_quick_consumer_index;
907 			bnapi->int_num = i << 24;
908 		}
909 	}
910 
911 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
912 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
913 		if (bp->ctx_pages == 0)
914 			bp->ctx_pages = 1;
915 		for (i = 0; i < bp->ctx_pages; i++) {
916 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
917 						BNX2_PAGE_SIZE,
918 						&bp->ctx_blk_mapping[i],
919 						GFP_KERNEL);
920 			if (bp->ctx_blk[i] == NULL)
921 				goto alloc_mem_err;
922 		}
923 	}
924 
925 	err = bnx2_alloc_rx_mem(bp);
926 	if (err)
927 		goto alloc_mem_err;
928 
929 	err = bnx2_alloc_tx_mem(bp);
930 	if (err)
931 		goto alloc_mem_err;
932 
933 	return 0;
934 
935 alloc_mem_err:
936 	bnx2_free_mem(bp);
937 	return -ENOMEM;
938 }
939 
940 static void
941 bnx2_report_fw_link(struct bnx2 *bp)
942 {
943 	u32 fw_link_status = 0;
944 
945 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
946 		return;
947 
948 	if (bp->link_up) {
949 		u32 bmsr;
950 
951 		switch (bp->line_speed) {
952 		case SPEED_10:
953 			if (bp->duplex == DUPLEX_HALF)
954 				fw_link_status = BNX2_LINK_STATUS_10HALF;
955 			else
956 				fw_link_status = BNX2_LINK_STATUS_10FULL;
957 			break;
958 		case SPEED_100:
959 			if (bp->duplex == DUPLEX_HALF)
960 				fw_link_status = BNX2_LINK_STATUS_100HALF;
961 			else
962 				fw_link_status = BNX2_LINK_STATUS_100FULL;
963 			break;
964 		case SPEED_1000:
965 			if (bp->duplex == DUPLEX_HALF)
966 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
967 			else
968 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
969 			break;
970 		case SPEED_2500:
971 			if (bp->duplex == DUPLEX_HALF)
972 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
973 			else
974 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
975 			break;
976 		}
977 
978 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
979 
980 		if (bp->autoneg) {
981 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
982 
983 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
984 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
985 
986 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
987 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
988 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
989 			else
990 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
991 		}
992 	}
993 	else
994 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
995 
996 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
997 }
998 
999 static char *
1000 bnx2_xceiver_str(struct bnx2 *bp)
1001 {
1002 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
1003 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
1004 		 "Copper");
1005 }
1006 
1007 static void
1008 bnx2_report_link(struct bnx2 *bp)
1009 {
1010 	if (bp->link_up) {
1011 		netif_carrier_on(bp->dev);
1012 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1013 			    bnx2_xceiver_str(bp),
1014 			    bp->line_speed,
1015 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
1016 
1017 		if (bp->flow_ctrl) {
1018 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
1019 				pr_cont(", receive ");
1020 				if (bp->flow_ctrl & FLOW_CTRL_TX)
1021 					pr_cont("& transmit ");
1022 			}
1023 			else {
1024 				pr_cont(", transmit ");
1025 			}
1026 			pr_cont("flow control ON");
1027 		}
1028 		pr_cont("\n");
1029 	} else {
1030 		netif_carrier_off(bp->dev);
1031 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1032 			   bnx2_xceiver_str(bp));
1033 	}
1034 
1035 	bnx2_report_fw_link(bp);
1036 }
1037 
1038 static void
1039 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1040 {
1041 	u32 local_adv, remote_adv;
1042 
1043 	bp->flow_ctrl = 0;
1044 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1045 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1046 
1047 		if (bp->duplex == DUPLEX_FULL) {
1048 			bp->flow_ctrl = bp->req_flow_ctrl;
1049 		}
1050 		return;
1051 	}
1052 
1053 	if (bp->duplex != DUPLEX_FULL) {
1054 		return;
1055 	}
1056 
1057 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1058 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1059 		u32 val;
1060 
1061 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1062 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1063 			bp->flow_ctrl |= FLOW_CTRL_TX;
1064 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1065 			bp->flow_ctrl |= FLOW_CTRL_RX;
1066 		return;
1067 	}
1068 
1069 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1070 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1071 
1072 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1073 		u32 new_local_adv = 0;
1074 		u32 new_remote_adv = 0;
1075 
1076 		if (local_adv & ADVERTISE_1000XPAUSE)
1077 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1078 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1079 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1080 		if (remote_adv & ADVERTISE_1000XPAUSE)
1081 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1082 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1083 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1084 
1085 		local_adv = new_local_adv;
1086 		remote_adv = new_remote_adv;
1087 	}
1088 
1089 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1090 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1091 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1092 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1093 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1094 			}
1095 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1096 				bp->flow_ctrl = FLOW_CTRL_RX;
1097 			}
1098 		}
1099 		else {
1100 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1101 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1102 			}
1103 		}
1104 	}
1105 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1106 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1107 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1108 
1109 			bp->flow_ctrl = FLOW_CTRL_TX;
1110 		}
1111 	}
1112 }
1113 
1114 static int
1115 bnx2_5709s_linkup(struct bnx2 *bp)
1116 {
1117 	u32 val, speed;
1118 
1119 	bp->link_up = 1;
1120 
1121 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1122 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1123 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1124 
1125 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1126 		bp->line_speed = bp->req_line_speed;
1127 		bp->duplex = bp->req_duplex;
1128 		return 0;
1129 	}
1130 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1131 	switch (speed) {
1132 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1133 			bp->line_speed = SPEED_10;
1134 			break;
1135 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1136 			bp->line_speed = SPEED_100;
1137 			break;
1138 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1139 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1140 			bp->line_speed = SPEED_1000;
1141 			break;
1142 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1143 			bp->line_speed = SPEED_2500;
1144 			break;
1145 	}
1146 	if (val & MII_BNX2_GP_TOP_AN_FD)
1147 		bp->duplex = DUPLEX_FULL;
1148 	else
1149 		bp->duplex = DUPLEX_HALF;
1150 	return 0;
1151 }
1152 
1153 static int
1154 bnx2_5708s_linkup(struct bnx2 *bp)
1155 {
1156 	u32 val;
1157 
1158 	bp->link_up = 1;
1159 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1160 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1161 		case BCM5708S_1000X_STAT1_SPEED_10:
1162 			bp->line_speed = SPEED_10;
1163 			break;
1164 		case BCM5708S_1000X_STAT1_SPEED_100:
1165 			bp->line_speed = SPEED_100;
1166 			break;
1167 		case BCM5708S_1000X_STAT1_SPEED_1G:
1168 			bp->line_speed = SPEED_1000;
1169 			break;
1170 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1171 			bp->line_speed = SPEED_2500;
1172 			break;
1173 	}
1174 	if (val & BCM5708S_1000X_STAT1_FD)
1175 		bp->duplex = DUPLEX_FULL;
1176 	else
1177 		bp->duplex = DUPLEX_HALF;
1178 
1179 	return 0;
1180 }
1181 
1182 static int
1183 bnx2_5706s_linkup(struct bnx2 *bp)
1184 {
1185 	u32 bmcr, local_adv, remote_adv, common;
1186 
1187 	bp->link_up = 1;
1188 	bp->line_speed = SPEED_1000;
1189 
1190 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1191 	if (bmcr & BMCR_FULLDPLX) {
1192 		bp->duplex = DUPLEX_FULL;
1193 	}
1194 	else {
1195 		bp->duplex = DUPLEX_HALF;
1196 	}
1197 
1198 	if (!(bmcr & BMCR_ANENABLE)) {
1199 		return 0;
1200 	}
1201 
1202 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1203 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1204 
1205 	common = local_adv & remote_adv;
1206 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1207 
1208 		if (common & ADVERTISE_1000XFULL) {
1209 			bp->duplex = DUPLEX_FULL;
1210 		}
1211 		else {
1212 			bp->duplex = DUPLEX_HALF;
1213 		}
1214 	}
1215 
1216 	return 0;
1217 }
1218 
1219 static int
1220 bnx2_copper_linkup(struct bnx2 *bp)
1221 {
1222 	u32 bmcr;
1223 
1224 	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1225 
1226 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1227 	if (bmcr & BMCR_ANENABLE) {
1228 		u32 local_adv, remote_adv, common;
1229 
1230 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1231 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1232 
1233 		common = local_adv & (remote_adv >> 2);
1234 		if (common & ADVERTISE_1000FULL) {
1235 			bp->line_speed = SPEED_1000;
1236 			bp->duplex = DUPLEX_FULL;
1237 		}
1238 		else if (common & ADVERTISE_1000HALF) {
1239 			bp->line_speed = SPEED_1000;
1240 			bp->duplex = DUPLEX_HALF;
1241 		}
1242 		else {
1243 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1244 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1245 
1246 			common = local_adv & remote_adv;
1247 			if (common & ADVERTISE_100FULL) {
1248 				bp->line_speed = SPEED_100;
1249 				bp->duplex = DUPLEX_FULL;
1250 			}
1251 			else if (common & ADVERTISE_100HALF) {
1252 				bp->line_speed = SPEED_100;
1253 				bp->duplex = DUPLEX_HALF;
1254 			}
1255 			else if (common & ADVERTISE_10FULL) {
1256 				bp->line_speed = SPEED_10;
1257 				bp->duplex = DUPLEX_FULL;
1258 			}
1259 			else if (common & ADVERTISE_10HALF) {
1260 				bp->line_speed = SPEED_10;
1261 				bp->duplex = DUPLEX_HALF;
1262 			}
1263 			else {
1264 				bp->line_speed = 0;
1265 				bp->link_up = 0;
1266 			}
1267 		}
1268 	}
1269 	else {
1270 		if (bmcr & BMCR_SPEED100) {
1271 			bp->line_speed = SPEED_100;
1272 		}
1273 		else {
1274 			bp->line_speed = SPEED_10;
1275 		}
1276 		if (bmcr & BMCR_FULLDPLX) {
1277 			bp->duplex = DUPLEX_FULL;
1278 		}
1279 		else {
1280 			bp->duplex = DUPLEX_HALF;
1281 		}
1282 	}
1283 
1284 	if (bp->link_up) {
1285 		u32 ext_status;
1286 
1287 		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1288 		if (ext_status & EXT_STATUS_MDIX)
1289 			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1290 	}
1291 
1292 	return 0;
1293 }
1294 
1295 static void
1296 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1297 {
1298 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1299 
1300 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1301 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1302 	val |= 0x02 << 8;
1303 
1304 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1305 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1306 
1307 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1308 }
1309 
1310 static void
1311 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1312 {
1313 	int i;
1314 	u32 cid;
1315 
1316 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1317 		if (i == 1)
1318 			cid = RX_RSS_CID;
1319 		bnx2_init_rx_context(bp, cid);
1320 	}
1321 }
1322 
1323 static void
1324 bnx2_set_mac_link(struct bnx2 *bp)
1325 {
1326 	u32 val;
1327 
1328 	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1329 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1330 		(bp->duplex == DUPLEX_HALF)) {
1331 		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1332 	}
1333 
1334 	/* Configure the EMAC mode register. */
1335 	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1336 
1337 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1338 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1339 		BNX2_EMAC_MODE_25G_MODE);
1340 
1341 	if (bp->link_up) {
1342 		switch (bp->line_speed) {
1343 			case SPEED_10:
1344 				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1345 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1346 					break;
1347 				}
1348 				/* fall through */
1349 			case SPEED_100:
1350 				val |= BNX2_EMAC_MODE_PORT_MII;
1351 				break;
1352 			case SPEED_2500:
1353 				val |= BNX2_EMAC_MODE_25G_MODE;
1354 				/* fall through */
1355 			case SPEED_1000:
1356 				val |= BNX2_EMAC_MODE_PORT_GMII;
1357 				break;
1358 		}
1359 	}
1360 	else {
1361 		val |= BNX2_EMAC_MODE_PORT_GMII;
1362 	}
1363 
1364 	/* Set the MAC to operate in the appropriate duplex mode. */
1365 	if (bp->duplex == DUPLEX_HALF)
1366 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1367 	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1368 
1369 	/* Enable/disable rx PAUSE. */
1370 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1371 
1372 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1373 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1374 	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1375 
1376 	/* Enable/disable tx PAUSE. */
1377 	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1378 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1379 
1380 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1381 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1382 	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1383 
1384 	/* Acknowledge the interrupt. */
1385 	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1386 
1387 	bnx2_init_all_rx_contexts(bp);
1388 }
1389 
1390 static void
1391 bnx2_enable_bmsr1(struct bnx2 *bp)
1392 {
1393 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1394 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1395 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1396 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1397 }
1398 
1399 static void
1400 bnx2_disable_bmsr1(struct bnx2 *bp)
1401 {
1402 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1403 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1404 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1405 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1406 }
1407 
1408 static int
1409 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1410 {
1411 	u32 up1;
1412 	int ret = 1;
1413 
1414 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1415 		return 0;
1416 
1417 	if (bp->autoneg & AUTONEG_SPEED)
1418 		bp->advertising |= ADVERTISED_2500baseX_Full;
1419 
1420 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1421 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1422 
1423 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1424 	if (!(up1 & BCM5708S_UP1_2G5)) {
1425 		up1 |= BCM5708S_UP1_2G5;
1426 		bnx2_write_phy(bp, bp->mii_up1, up1);
1427 		ret = 0;
1428 	}
1429 
1430 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1431 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1432 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1433 
1434 	return ret;
1435 }
1436 
1437 static int
1438 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1439 {
1440 	u32 up1;
1441 	int ret = 0;
1442 
1443 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1444 		return 0;
1445 
1446 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1447 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1448 
1449 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1450 	if (up1 & BCM5708S_UP1_2G5) {
1451 		up1 &= ~BCM5708S_UP1_2G5;
1452 		bnx2_write_phy(bp, bp->mii_up1, up1);
1453 		ret = 1;
1454 	}
1455 
1456 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1457 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1458 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1459 
1460 	return ret;
1461 }
1462 
1463 static void
1464 bnx2_enable_forced_2g5(struct bnx2 *bp)
1465 {
1466 	u32 uninitialized_var(bmcr);
1467 	int err;
1468 
1469 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1470 		return;
1471 
1472 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1473 		u32 val;
1474 
1475 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1476 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1477 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1478 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1479 			val |= MII_BNX2_SD_MISC1_FORCE |
1480 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1481 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1482 		}
1483 
1484 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1485 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1486 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1487 
1488 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1489 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1490 		if (!err)
1491 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1492 	} else {
1493 		return;
1494 	}
1495 
1496 	if (err)
1497 		return;
1498 
1499 	if (bp->autoneg & AUTONEG_SPEED) {
1500 		bmcr &= ~BMCR_ANENABLE;
1501 		if (bp->req_duplex == DUPLEX_FULL)
1502 			bmcr |= BMCR_FULLDPLX;
1503 	}
1504 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1505 }
1506 
1507 static void
1508 bnx2_disable_forced_2g5(struct bnx2 *bp)
1509 {
1510 	u32 uninitialized_var(bmcr);
1511 	int err;
1512 
1513 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1514 		return;
1515 
1516 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1517 		u32 val;
1518 
1519 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1520 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1521 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1522 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1523 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1524 		}
1525 
1526 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1527 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1528 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1529 
1530 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1531 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1532 		if (!err)
1533 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1534 	} else {
1535 		return;
1536 	}
1537 
1538 	if (err)
1539 		return;
1540 
1541 	if (bp->autoneg & AUTONEG_SPEED)
1542 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1543 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1544 }
1545 
1546 static void
1547 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1548 {
1549 	u32 val;
1550 
1551 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1552 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1553 	if (start)
1554 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1555 	else
1556 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1557 }
1558 
1559 static int
1560 bnx2_set_link(struct bnx2 *bp)
1561 {
1562 	u32 bmsr;
1563 	u8 link_up;
1564 
1565 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1566 		bp->link_up = 1;
1567 		return 0;
1568 	}
1569 
1570 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1571 		return 0;
1572 
1573 	link_up = bp->link_up;
1574 
1575 	bnx2_enable_bmsr1(bp);
1576 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1577 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1578 	bnx2_disable_bmsr1(bp);
1579 
1580 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1581 	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1582 		u32 val, an_dbg;
1583 
1584 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1585 			bnx2_5706s_force_link_dn(bp, 0);
1586 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1587 		}
1588 		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1589 
1590 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1591 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1592 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1593 
1594 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1595 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1596 			bmsr |= BMSR_LSTATUS;
1597 		else
1598 			bmsr &= ~BMSR_LSTATUS;
1599 	}
1600 
1601 	if (bmsr & BMSR_LSTATUS) {
1602 		bp->link_up = 1;
1603 
1604 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1605 			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1606 				bnx2_5706s_linkup(bp);
1607 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1608 				bnx2_5708s_linkup(bp);
1609 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1610 				bnx2_5709s_linkup(bp);
1611 		}
1612 		else {
1613 			bnx2_copper_linkup(bp);
1614 		}
1615 		bnx2_resolve_flow_ctrl(bp);
1616 	}
1617 	else {
1618 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1619 		    (bp->autoneg & AUTONEG_SPEED))
1620 			bnx2_disable_forced_2g5(bp);
1621 
1622 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1623 			u32 bmcr;
1624 
1625 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1626 			bmcr |= BMCR_ANENABLE;
1627 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1628 
1629 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1630 		}
1631 		bp->link_up = 0;
1632 	}
1633 
1634 	if (bp->link_up != link_up) {
1635 		bnx2_report_link(bp);
1636 	}
1637 
1638 	bnx2_set_mac_link(bp);
1639 
1640 	return 0;
1641 }
1642 
1643 static int
1644 bnx2_reset_phy(struct bnx2 *bp)
1645 {
1646 	int i;
1647 	u32 reg;
1648 
1649         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1650 
1651 #define PHY_RESET_MAX_WAIT 100
1652 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1653 		udelay(10);
1654 
1655 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1656 		if (!(reg & BMCR_RESET)) {
1657 			udelay(20);
1658 			break;
1659 		}
1660 	}
1661 	if (i == PHY_RESET_MAX_WAIT) {
1662 		return -EBUSY;
1663 	}
1664 	return 0;
1665 }
1666 
1667 static u32
1668 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1669 {
1670 	u32 adv = 0;
1671 
1672 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1673 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1674 
1675 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1676 			adv = ADVERTISE_1000XPAUSE;
1677 		}
1678 		else {
1679 			adv = ADVERTISE_PAUSE_CAP;
1680 		}
1681 	}
1682 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1683 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1684 			adv = ADVERTISE_1000XPSE_ASYM;
1685 		}
1686 		else {
1687 			adv = ADVERTISE_PAUSE_ASYM;
1688 		}
1689 	}
1690 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1691 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1692 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1693 		}
1694 		else {
1695 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1696 		}
1697 	}
1698 	return adv;
1699 }
1700 
1701 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1702 
1703 static int
1704 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1705 __releases(&bp->phy_lock)
1706 __acquires(&bp->phy_lock)
1707 {
1708 	u32 speed_arg = 0, pause_adv;
1709 
1710 	pause_adv = bnx2_phy_get_pause_adv(bp);
1711 
1712 	if (bp->autoneg & AUTONEG_SPEED) {
1713 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1714 		if (bp->advertising & ADVERTISED_10baseT_Half)
1715 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1716 		if (bp->advertising & ADVERTISED_10baseT_Full)
1717 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1718 		if (bp->advertising & ADVERTISED_100baseT_Half)
1719 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1720 		if (bp->advertising & ADVERTISED_100baseT_Full)
1721 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1722 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1723 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1724 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1725 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1726 	} else {
1727 		if (bp->req_line_speed == SPEED_2500)
1728 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1729 		else if (bp->req_line_speed == SPEED_1000)
1730 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1731 		else if (bp->req_line_speed == SPEED_100) {
1732 			if (bp->req_duplex == DUPLEX_FULL)
1733 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1734 			else
1735 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1736 		} else if (bp->req_line_speed == SPEED_10) {
1737 			if (bp->req_duplex == DUPLEX_FULL)
1738 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1739 			else
1740 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1741 		}
1742 	}
1743 
1744 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1745 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1746 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1747 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1748 
1749 	if (port == PORT_TP)
1750 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1751 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1752 
1753 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1754 
1755 	spin_unlock_bh(&bp->phy_lock);
1756 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1757 	spin_lock_bh(&bp->phy_lock);
1758 
1759 	return 0;
1760 }
1761 
1762 static int
1763 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1764 __releases(&bp->phy_lock)
1765 __acquires(&bp->phy_lock)
1766 {
1767 	u32 adv, bmcr;
1768 	u32 new_adv = 0;
1769 
1770 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1771 		return bnx2_setup_remote_phy(bp, port);
1772 
1773 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1774 		u32 new_bmcr;
1775 		int force_link_down = 0;
1776 
1777 		if (bp->req_line_speed == SPEED_2500) {
1778 			if (!bnx2_test_and_enable_2g5(bp))
1779 				force_link_down = 1;
1780 		} else if (bp->req_line_speed == SPEED_1000) {
1781 			if (bnx2_test_and_disable_2g5(bp))
1782 				force_link_down = 1;
1783 		}
1784 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1785 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1786 
1787 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1788 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1789 		new_bmcr |= BMCR_SPEED1000;
1790 
1791 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1792 			if (bp->req_line_speed == SPEED_2500)
1793 				bnx2_enable_forced_2g5(bp);
1794 			else if (bp->req_line_speed == SPEED_1000) {
1795 				bnx2_disable_forced_2g5(bp);
1796 				new_bmcr &= ~0x2000;
1797 			}
1798 
1799 		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1800 			if (bp->req_line_speed == SPEED_2500)
1801 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1802 			else
1803 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1804 		}
1805 
1806 		if (bp->req_duplex == DUPLEX_FULL) {
1807 			adv |= ADVERTISE_1000XFULL;
1808 			new_bmcr |= BMCR_FULLDPLX;
1809 		}
1810 		else {
1811 			adv |= ADVERTISE_1000XHALF;
1812 			new_bmcr &= ~BMCR_FULLDPLX;
1813 		}
1814 		if ((new_bmcr != bmcr) || (force_link_down)) {
1815 			/* Force a link down visible on the other side */
1816 			if (bp->link_up) {
1817 				bnx2_write_phy(bp, bp->mii_adv, adv &
1818 					       ~(ADVERTISE_1000XFULL |
1819 						 ADVERTISE_1000XHALF));
1820 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1821 					BMCR_ANRESTART | BMCR_ANENABLE);
1822 
1823 				bp->link_up = 0;
1824 				netif_carrier_off(bp->dev);
1825 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1826 				bnx2_report_link(bp);
1827 			}
1828 			bnx2_write_phy(bp, bp->mii_adv, adv);
1829 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1830 		} else {
1831 			bnx2_resolve_flow_ctrl(bp);
1832 			bnx2_set_mac_link(bp);
1833 		}
1834 		return 0;
1835 	}
1836 
1837 	bnx2_test_and_enable_2g5(bp);
1838 
1839 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1840 		new_adv |= ADVERTISE_1000XFULL;
1841 
1842 	new_adv |= bnx2_phy_get_pause_adv(bp);
1843 
1844 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1845 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1846 
1847 	bp->serdes_an_pending = 0;
1848 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1849 		/* Force a link down visible on the other side */
1850 		if (bp->link_up) {
1851 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1852 			spin_unlock_bh(&bp->phy_lock);
1853 			msleep(20);
1854 			spin_lock_bh(&bp->phy_lock);
1855 		}
1856 
1857 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1858 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1859 			BMCR_ANENABLE);
1860 		/* Speed up link-up time when the link partner
1861 		 * does not autonegotiate which is very common
1862 		 * in blade servers. Some blade servers use
1863 		 * IPMI for kerboard input and it's important
1864 		 * to minimize link disruptions. Autoneg. involves
1865 		 * exchanging base pages plus 3 next pages and
1866 		 * normally completes in about 120 msec.
1867 		 */
1868 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1869 		bp->serdes_an_pending = 1;
1870 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1871 	} else {
1872 		bnx2_resolve_flow_ctrl(bp);
1873 		bnx2_set_mac_link(bp);
1874 	}
1875 
1876 	return 0;
1877 }
1878 
1879 #define ETHTOOL_ALL_FIBRE_SPEED						\
1880 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1881 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1882 		(ADVERTISED_1000baseT_Full)
1883 
1884 #define ETHTOOL_ALL_COPPER_SPEED					\
1885 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1886 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1887 	ADVERTISED_1000baseT_Full)
1888 
1889 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1890 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1891 
1892 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1893 
1894 static void
1895 bnx2_set_default_remote_link(struct bnx2 *bp)
1896 {
1897 	u32 link;
1898 
1899 	if (bp->phy_port == PORT_TP)
1900 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1901 	else
1902 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1903 
1904 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1905 		bp->req_line_speed = 0;
1906 		bp->autoneg |= AUTONEG_SPEED;
1907 		bp->advertising = ADVERTISED_Autoneg;
1908 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1909 			bp->advertising |= ADVERTISED_10baseT_Half;
1910 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1911 			bp->advertising |= ADVERTISED_10baseT_Full;
1912 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1913 			bp->advertising |= ADVERTISED_100baseT_Half;
1914 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1915 			bp->advertising |= ADVERTISED_100baseT_Full;
1916 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1917 			bp->advertising |= ADVERTISED_1000baseT_Full;
1918 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1919 			bp->advertising |= ADVERTISED_2500baseX_Full;
1920 	} else {
1921 		bp->autoneg = 0;
1922 		bp->advertising = 0;
1923 		bp->req_duplex = DUPLEX_FULL;
1924 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1925 			bp->req_line_speed = SPEED_10;
1926 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1927 				bp->req_duplex = DUPLEX_HALF;
1928 		}
1929 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1930 			bp->req_line_speed = SPEED_100;
1931 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1932 				bp->req_duplex = DUPLEX_HALF;
1933 		}
1934 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1935 			bp->req_line_speed = SPEED_1000;
1936 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1937 			bp->req_line_speed = SPEED_2500;
1938 	}
1939 }
1940 
1941 static void
1942 bnx2_set_default_link(struct bnx2 *bp)
1943 {
1944 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1945 		bnx2_set_default_remote_link(bp);
1946 		return;
1947 	}
1948 
1949 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1950 	bp->req_line_speed = 0;
1951 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1952 		u32 reg;
1953 
1954 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1955 
1956 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1957 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1958 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1959 			bp->autoneg = 0;
1960 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1961 			bp->req_duplex = DUPLEX_FULL;
1962 		}
1963 	} else
1964 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1965 }
1966 
1967 static void
1968 bnx2_send_heart_beat(struct bnx2 *bp)
1969 {
1970 	u32 msg;
1971 	u32 addr;
1972 
1973 	spin_lock(&bp->indirect_lock);
1974 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1975 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1976 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1977 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1978 	spin_unlock(&bp->indirect_lock);
1979 }
1980 
1981 static void
1982 bnx2_remote_phy_event(struct bnx2 *bp)
1983 {
1984 	u32 msg;
1985 	u8 link_up = bp->link_up;
1986 	u8 old_port;
1987 
1988 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1989 
1990 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1991 		bnx2_send_heart_beat(bp);
1992 
1993 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1994 
1995 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1996 		bp->link_up = 0;
1997 	else {
1998 		u32 speed;
1999 
2000 		bp->link_up = 1;
2001 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
2002 		bp->duplex = DUPLEX_FULL;
2003 		switch (speed) {
2004 			case BNX2_LINK_STATUS_10HALF:
2005 				bp->duplex = DUPLEX_HALF;
2006 				/* fall through */
2007 			case BNX2_LINK_STATUS_10FULL:
2008 				bp->line_speed = SPEED_10;
2009 				break;
2010 			case BNX2_LINK_STATUS_100HALF:
2011 				bp->duplex = DUPLEX_HALF;
2012 				/* fall through */
2013 			case BNX2_LINK_STATUS_100BASE_T4:
2014 			case BNX2_LINK_STATUS_100FULL:
2015 				bp->line_speed = SPEED_100;
2016 				break;
2017 			case BNX2_LINK_STATUS_1000HALF:
2018 				bp->duplex = DUPLEX_HALF;
2019 				/* fall through */
2020 			case BNX2_LINK_STATUS_1000FULL:
2021 				bp->line_speed = SPEED_1000;
2022 				break;
2023 			case BNX2_LINK_STATUS_2500HALF:
2024 				bp->duplex = DUPLEX_HALF;
2025 				/* fall through */
2026 			case BNX2_LINK_STATUS_2500FULL:
2027 				bp->line_speed = SPEED_2500;
2028 				break;
2029 			default:
2030 				bp->line_speed = 0;
2031 				break;
2032 		}
2033 
2034 		bp->flow_ctrl = 0;
2035 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2036 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2037 			if (bp->duplex == DUPLEX_FULL)
2038 				bp->flow_ctrl = bp->req_flow_ctrl;
2039 		} else {
2040 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2041 				bp->flow_ctrl |= FLOW_CTRL_TX;
2042 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2043 				bp->flow_ctrl |= FLOW_CTRL_RX;
2044 		}
2045 
2046 		old_port = bp->phy_port;
2047 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2048 			bp->phy_port = PORT_FIBRE;
2049 		else
2050 			bp->phy_port = PORT_TP;
2051 
2052 		if (old_port != bp->phy_port)
2053 			bnx2_set_default_link(bp);
2054 
2055 	}
2056 	if (bp->link_up != link_up)
2057 		bnx2_report_link(bp);
2058 
2059 	bnx2_set_mac_link(bp);
2060 }
2061 
2062 static int
2063 bnx2_set_remote_link(struct bnx2 *bp)
2064 {
2065 	u32 evt_code;
2066 
2067 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2068 	switch (evt_code) {
2069 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2070 			bnx2_remote_phy_event(bp);
2071 			break;
2072 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2073 		default:
2074 			bnx2_send_heart_beat(bp);
2075 			break;
2076 	}
2077 	return 0;
2078 }
2079 
2080 static int
2081 bnx2_setup_copper_phy(struct bnx2 *bp)
2082 __releases(&bp->phy_lock)
2083 __acquires(&bp->phy_lock)
2084 {
2085 	u32 bmcr, adv_reg, new_adv = 0;
2086 	u32 new_bmcr;
2087 
2088 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2089 
2090 	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2091 	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2092 		    ADVERTISE_PAUSE_ASYM);
2093 
2094 	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2095 
2096 	if (bp->autoneg & AUTONEG_SPEED) {
2097 		u32 adv1000_reg;
2098 		u32 new_adv1000 = 0;
2099 
2100 		new_adv |= bnx2_phy_get_pause_adv(bp);
2101 
2102 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2103 		adv1000_reg &= PHY_ALL_1000_SPEED;
2104 
2105 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2106 		if ((adv1000_reg != new_adv1000) ||
2107 			(adv_reg != new_adv) ||
2108 			((bmcr & BMCR_ANENABLE) == 0)) {
2109 
2110 			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2111 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2112 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2113 				BMCR_ANENABLE);
2114 		}
2115 		else if (bp->link_up) {
2116 			/* Flow ctrl may have changed from auto to forced */
2117 			/* or vice-versa. */
2118 
2119 			bnx2_resolve_flow_ctrl(bp);
2120 			bnx2_set_mac_link(bp);
2121 		}
2122 		return 0;
2123 	}
2124 
2125 	/* advertise nothing when forcing speed */
2126 	if (adv_reg != new_adv)
2127 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
2128 
2129 	new_bmcr = 0;
2130 	if (bp->req_line_speed == SPEED_100) {
2131 		new_bmcr |= BMCR_SPEED100;
2132 	}
2133 	if (bp->req_duplex == DUPLEX_FULL) {
2134 		new_bmcr |= BMCR_FULLDPLX;
2135 	}
2136 	if (new_bmcr != bmcr) {
2137 		u32 bmsr;
2138 
2139 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2140 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2141 
2142 		if (bmsr & BMSR_LSTATUS) {
2143 			/* Force link down */
2144 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2145 			spin_unlock_bh(&bp->phy_lock);
2146 			msleep(50);
2147 			spin_lock_bh(&bp->phy_lock);
2148 
2149 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2150 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2151 		}
2152 
2153 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2154 
2155 		/* Normally, the new speed is setup after the link has
2156 		 * gone down and up again. In some cases, link will not go
2157 		 * down so we need to set up the new speed here.
2158 		 */
2159 		if (bmsr & BMSR_LSTATUS) {
2160 			bp->line_speed = bp->req_line_speed;
2161 			bp->duplex = bp->req_duplex;
2162 			bnx2_resolve_flow_ctrl(bp);
2163 			bnx2_set_mac_link(bp);
2164 		}
2165 	} else {
2166 		bnx2_resolve_flow_ctrl(bp);
2167 		bnx2_set_mac_link(bp);
2168 	}
2169 	return 0;
2170 }
2171 
2172 static int
2173 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2174 __releases(&bp->phy_lock)
2175 __acquires(&bp->phy_lock)
2176 {
2177 	if (bp->loopback == MAC_LOOPBACK)
2178 		return 0;
2179 
2180 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2181 		return bnx2_setup_serdes_phy(bp, port);
2182 	}
2183 	else {
2184 		return bnx2_setup_copper_phy(bp);
2185 	}
2186 }
2187 
2188 static int
2189 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2190 {
2191 	u32 val;
2192 
2193 	bp->mii_bmcr = MII_BMCR + 0x10;
2194 	bp->mii_bmsr = MII_BMSR + 0x10;
2195 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2196 	bp->mii_adv = MII_ADVERTISE + 0x10;
2197 	bp->mii_lpa = MII_LPA + 0x10;
2198 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2199 
2200 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2201 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2202 
2203 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2204 	if (reset_phy)
2205 		bnx2_reset_phy(bp);
2206 
2207 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2208 
2209 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2210 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2211 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2212 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2213 
2214 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2215 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2216 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2217 		val |= BCM5708S_UP1_2G5;
2218 	else
2219 		val &= ~BCM5708S_UP1_2G5;
2220 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2221 
2222 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2223 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2224 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2225 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2226 
2227 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2228 
2229 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2230 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2231 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2232 
2233 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2234 
2235 	return 0;
2236 }
2237 
2238 static int
2239 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2240 {
2241 	u32 val;
2242 
2243 	if (reset_phy)
2244 		bnx2_reset_phy(bp);
2245 
2246 	bp->mii_up1 = BCM5708S_UP1;
2247 
2248 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2249 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2250 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2251 
2252 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2253 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2254 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2255 
2256 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2257 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2258 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2259 
2260 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2261 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2262 		val |= BCM5708S_UP1_2G5;
2263 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2264 	}
2265 
2266 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2267 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2268 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2269 		/* increase tx signal amplitude */
2270 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2271 			       BCM5708S_BLK_ADDR_TX_MISC);
2272 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2273 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2274 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2275 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2276 	}
2277 
2278 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2279 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2280 
2281 	if (val) {
2282 		u32 is_backplane;
2283 
2284 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2285 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2286 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2287 				       BCM5708S_BLK_ADDR_TX_MISC);
2288 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2289 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2290 				       BCM5708S_BLK_ADDR_DIG);
2291 		}
2292 	}
2293 	return 0;
2294 }
2295 
2296 static int
2297 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2298 {
2299 	if (reset_phy)
2300 		bnx2_reset_phy(bp);
2301 
2302 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2303 
2304 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2305 		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2306 
2307 	if (bp->dev->mtu > 1500) {
2308 		u32 val;
2309 
2310 		/* Set extended packet length bit */
2311 		bnx2_write_phy(bp, 0x18, 0x7);
2312 		bnx2_read_phy(bp, 0x18, &val);
2313 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2314 
2315 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2316 		bnx2_read_phy(bp, 0x1c, &val);
2317 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2318 	}
2319 	else {
2320 		u32 val;
2321 
2322 		bnx2_write_phy(bp, 0x18, 0x7);
2323 		bnx2_read_phy(bp, 0x18, &val);
2324 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2325 
2326 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2327 		bnx2_read_phy(bp, 0x1c, &val);
2328 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2329 	}
2330 
2331 	return 0;
2332 }
2333 
2334 static int
2335 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2336 {
2337 	u32 val;
2338 
2339 	if (reset_phy)
2340 		bnx2_reset_phy(bp);
2341 
2342 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2343 		bnx2_write_phy(bp, 0x18, 0x0c00);
2344 		bnx2_write_phy(bp, 0x17, 0x000a);
2345 		bnx2_write_phy(bp, 0x15, 0x310b);
2346 		bnx2_write_phy(bp, 0x17, 0x201f);
2347 		bnx2_write_phy(bp, 0x15, 0x9506);
2348 		bnx2_write_phy(bp, 0x17, 0x401f);
2349 		bnx2_write_phy(bp, 0x15, 0x14e2);
2350 		bnx2_write_phy(bp, 0x18, 0x0400);
2351 	}
2352 
2353 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2354 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2355 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2356 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2357 		val &= ~(1 << 8);
2358 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2359 	}
2360 
2361 	if (bp->dev->mtu > 1500) {
2362 		/* Set extended packet length bit */
2363 		bnx2_write_phy(bp, 0x18, 0x7);
2364 		bnx2_read_phy(bp, 0x18, &val);
2365 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2366 
2367 		bnx2_read_phy(bp, 0x10, &val);
2368 		bnx2_write_phy(bp, 0x10, val | 0x1);
2369 	}
2370 	else {
2371 		bnx2_write_phy(bp, 0x18, 0x7);
2372 		bnx2_read_phy(bp, 0x18, &val);
2373 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2374 
2375 		bnx2_read_phy(bp, 0x10, &val);
2376 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2377 	}
2378 
2379 	/* ethernet@wirespeed */
2380 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2381 	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2382 	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2383 
2384 	/* auto-mdix */
2385 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2386 		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2387 
2388 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2389 	return 0;
2390 }
2391 
2392 
2393 static int
2394 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2395 __releases(&bp->phy_lock)
2396 __acquires(&bp->phy_lock)
2397 {
2398 	u32 val;
2399 	int rc = 0;
2400 
2401 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2402 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2403 
2404 	bp->mii_bmcr = MII_BMCR;
2405 	bp->mii_bmsr = MII_BMSR;
2406 	bp->mii_bmsr1 = MII_BMSR;
2407 	bp->mii_adv = MII_ADVERTISE;
2408 	bp->mii_lpa = MII_LPA;
2409 
2410 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2411 
2412 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2413 		goto setup_phy;
2414 
2415 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2416 	bp->phy_id = val << 16;
2417 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2418 	bp->phy_id |= val & 0xffff;
2419 
2420 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2421 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2422 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2423 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2424 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2425 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2426 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2427 	}
2428 	else {
2429 		rc = bnx2_init_copper_phy(bp, reset_phy);
2430 	}
2431 
2432 setup_phy:
2433 	if (!rc)
2434 		rc = bnx2_setup_phy(bp, bp->phy_port);
2435 
2436 	return rc;
2437 }
2438 
2439 static int
2440 bnx2_set_mac_loopback(struct bnx2 *bp)
2441 {
2442 	u32 mac_mode;
2443 
2444 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2445 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2446 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2447 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2448 	bp->link_up = 1;
2449 	return 0;
2450 }
2451 
2452 static int bnx2_test_link(struct bnx2 *);
2453 
2454 static int
2455 bnx2_set_phy_loopback(struct bnx2 *bp)
2456 {
2457 	u32 mac_mode;
2458 	int rc, i;
2459 
2460 	spin_lock_bh(&bp->phy_lock);
2461 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2462 			    BMCR_SPEED1000);
2463 	spin_unlock_bh(&bp->phy_lock);
2464 	if (rc)
2465 		return rc;
2466 
2467 	for (i = 0; i < 10; i++) {
2468 		if (bnx2_test_link(bp) == 0)
2469 			break;
2470 		msleep(100);
2471 	}
2472 
2473 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2474 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2475 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2476 		      BNX2_EMAC_MODE_25G_MODE);
2477 
2478 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2479 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2480 	bp->link_up = 1;
2481 	return 0;
2482 }
2483 
2484 static void
2485 bnx2_dump_mcp_state(struct bnx2 *bp)
2486 {
2487 	struct net_device *dev = bp->dev;
2488 	u32 mcp_p0, mcp_p1;
2489 
2490 	netdev_err(dev, "<--- start MCP states dump --->\n");
2491 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2492 		mcp_p0 = BNX2_MCP_STATE_P0;
2493 		mcp_p1 = BNX2_MCP_STATE_P1;
2494 	} else {
2495 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2496 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2497 	}
2498 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2499 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2500 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2501 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2502 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2503 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2504 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2505 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2506 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2507 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2508 	netdev_err(dev, "DEBUG: shmem states:\n");
2509 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2510 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2511 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2512 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2513 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2514 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2515 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2516 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2517 	pr_cont(" condition[%08x]\n",
2518 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2519 	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2520 	DP_SHMEM_LINE(bp, 0x3cc);
2521 	DP_SHMEM_LINE(bp, 0x3dc);
2522 	DP_SHMEM_LINE(bp, 0x3ec);
2523 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2524 	netdev_err(dev, "<--- end MCP states dump --->\n");
2525 }
2526 
2527 static int
2528 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2529 {
2530 	int i;
2531 	u32 val;
2532 
2533 	bp->fw_wr_seq++;
2534 	msg_data |= bp->fw_wr_seq;
2535 	bp->fw_last_msg = msg_data;
2536 
2537 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2538 
2539 	if (!ack)
2540 		return 0;
2541 
2542 	/* wait for an acknowledgement. */
2543 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2544 		msleep(10);
2545 
2546 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2547 
2548 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2549 			break;
2550 	}
2551 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2552 		return 0;
2553 
2554 	/* If we timed out, inform the firmware that this is the case. */
2555 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2556 		msg_data &= ~BNX2_DRV_MSG_CODE;
2557 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2558 
2559 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2560 		if (!silent) {
2561 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2562 			bnx2_dump_mcp_state(bp);
2563 		}
2564 
2565 		return -EBUSY;
2566 	}
2567 
2568 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2569 		return -EIO;
2570 
2571 	return 0;
2572 }
2573 
2574 static int
2575 bnx2_init_5709_context(struct bnx2 *bp)
2576 {
2577 	int i, ret = 0;
2578 	u32 val;
2579 
2580 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2581 	val |= (BNX2_PAGE_BITS - 8) << 16;
2582 	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2583 	for (i = 0; i < 10; i++) {
2584 		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2585 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2586 			break;
2587 		udelay(2);
2588 	}
2589 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2590 		return -EBUSY;
2591 
2592 	for (i = 0; i < bp->ctx_pages; i++) {
2593 		int j;
2594 
2595 		if (bp->ctx_blk[i])
2596 			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2597 		else
2598 			return -ENOMEM;
2599 
2600 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2601 			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2602 			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2603 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2604 			(u64) bp->ctx_blk_mapping[i] >> 32);
2605 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2606 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2607 		for (j = 0; j < 10; j++) {
2608 
2609 			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2610 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2611 				break;
2612 			udelay(5);
2613 		}
2614 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2615 			ret = -EBUSY;
2616 			break;
2617 		}
2618 	}
2619 	return ret;
2620 }
2621 
2622 static void
2623 bnx2_init_context(struct bnx2 *bp)
2624 {
2625 	u32 vcid;
2626 
2627 	vcid = 96;
2628 	while (vcid) {
2629 		u32 vcid_addr, pcid_addr, offset;
2630 		int i;
2631 
2632 		vcid--;
2633 
2634 		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2635 			u32 new_vcid;
2636 
2637 			vcid_addr = GET_PCID_ADDR(vcid);
2638 			if (vcid & 0x8) {
2639 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2640 			}
2641 			else {
2642 				new_vcid = vcid;
2643 			}
2644 			pcid_addr = GET_PCID_ADDR(new_vcid);
2645 		}
2646 		else {
2647 	    		vcid_addr = GET_CID_ADDR(vcid);
2648 			pcid_addr = vcid_addr;
2649 		}
2650 
2651 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2652 			vcid_addr += (i << PHY_CTX_SHIFT);
2653 			pcid_addr += (i << PHY_CTX_SHIFT);
2654 
2655 			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2656 			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2657 
2658 			/* Zero out the context. */
2659 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2660 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2661 		}
2662 	}
2663 }
2664 
2665 static int
2666 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2667 {
2668 	u16 *good_mbuf;
2669 	u32 good_mbuf_cnt;
2670 	u32 val;
2671 
2672 	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2673 	if (good_mbuf == NULL)
2674 		return -ENOMEM;
2675 
2676 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2677 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2678 
2679 	good_mbuf_cnt = 0;
2680 
2681 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2682 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2683 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2684 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2685 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2686 
2687 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2688 
2689 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2690 
2691 		/* The addresses with Bit 9 set are bad memory blocks. */
2692 		if (!(val & (1 << 9))) {
2693 			good_mbuf[good_mbuf_cnt] = (u16) val;
2694 			good_mbuf_cnt++;
2695 		}
2696 
2697 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2698 	}
2699 
2700 	/* Free the good ones back to the mbuf pool thus discarding
2701 	 * all the bad ones. */
2702 	while (good_mbuf_cnt) {
2703 		good_mbuf_cnt--;
2704 
2705 		val = good_mbuf[good_mbuf_cnt];
2706 		val = (val << 9) | val | 1;
2707 
2708 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2709 	}
2710 	kfree(good_mbuf);
2711 	return 0;
2712 }
2713 
2714 static void
2715 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2716 {
2717 	u32 val;
2718 
2719 	val = (mac_addr[0] << 8) | mac_addr[1];
2720 
2721 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2722 
2723 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2724 		(mac_addr[4] << 8) | mac_addr[5];
2725 
2726 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2727 }
2728 
2729 static inline int
2730 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2731 {
2732 	dma_addr_t mapping;
2733 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2734 	struct bnx2_rx_bd *rxbd =
2735 		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2736 	struct page *page = alloc_page(gfp);
2737 
2738 	if (!page)
2739 		return -ENOMEM;
2740 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2741 			       PCI_DMA_FROMDEVICE);
2742 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2743 		__free_page(page);
2744 		return -EIO;
2745 	}
2746 
2747 	rx_pg->page = page;
2748 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2749 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2750 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2751 	return 0;
2752 }
2753 
2754 static void
2755 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2756 {
2757 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2758 	struct page *page = rx_pg->page;
2759 
2760 	if (!page)
2761 		return;
2762 
2763 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2764 		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2765 
2766 	__free_page(page);
2767 	rx_pg->page = NULL;
2768 }
2769 
2770 static inline int
2771 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2772 {
2773 	u8 *data;
2774 	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2775 	dma_addr_t mapping;
2776 	struct bnx2_rx_bd *rxbd =
2777 		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2778 
2779 	data = kmalloc(bp->rx_buf_size, gfp);
2780 	if (!data)
2781 		return -ENOMEM;
2782 
2783 	mapping = dma_map_single(&bp->pdev->dev,
2784 				 get_l2_fhdr(data),
2785 				 bp->rx_buf_use_size,
2786 				 PCI_DMA_FROMDEVICE);
2787 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2788 		kfree(data);
2789 		return -EIO;
2790 	}
2791 
2792 	rx_buf->data = data;
2793 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2794 
2795 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2796 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2797 
2798 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2799 
2800 	return 0;
2801 }
2802 
2803 static int
2804 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2805 {
2806 	struct status_block *sblk = bnapi->status_blk.msi;
2807 	u32 new_link_state, old_link_state;
2808 	int is_set = 1;
2809 
2810 	new_link_state = sblk->status_attn_bits & event;
2811 	old_link_state = sblk->status_attn_bits_ack & event;
2812 	if (new_link_state != old_link_state) {
2813 		if (new_link_state)
2814 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2815 		else
2816 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2817 	} else
2818 		is_set = 0;
2819 
2820 	return is_set;
2821 }
2822 
2823 static void
2824 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2825 {
2826 	spin_lock(&bp->phy_lock);
2827 
2828 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2829 		bnx2_set_link(bp);
2830 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2831 		bnx2_set_remote_link(bp);
2832 
2833 	spin_unlock(&bp->phy_lock);
2834 
2835 }
2836 
2837 static inline u16
2838 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2839 {
2840 	u16 cons;
2841 
2842 	/* Tell compiler that status block fields can change. */
2843 	barrier();
2844 	cons = *bnapi->hw_tx_cons_ptr;
2845 	barrier();
2846 	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2847 		cons++;
2848 	return cons;
2849 }
2850 
2851 static int
2852 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2853 {
2854 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2855 	u16 hw_cons, sw_cons, sw_ring_cons;
2856 	int tx_pkt = 0, index;
2857 	unsigned int tx_bytes = 0;
2858 	struct netdev_queue *txq;
2859 
2860 	index = (bnapi - bp->bnx2_napi);
2861 	txq = netdev_get_tx_queue(bp->dev, index);
2862 
2863 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2864 	sw_cons = txr->tx_cons;
2865 
2866 	while (sw_cons != hw_cons) {
2867 		struct bnx2_sw_tx_bd *tx_buf;
2868 		struct sk_buff *skb;
2869 		int i, last;
2870 
2871 		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2872 
2873 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2874 		skb = tx_buf->skb;
2875 
2876 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2877 		prefetch(&skb->end);
2878 
2879 		/* partial BD completions possible with TSO packets */
2880 		if (tx_buf->is_gso) {
2881 			u16 last_idx, last_ring_idx;
2882 
2883 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2884 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2885 			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2886 				last_idx++;
2887 			}
2888 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2889 				break;
2890 			}
2891 		}
2892 
2893 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2894 			skb_headlen(skb), PCI_DMA_TODEVICE);
2895 
2896 		tx_buf->skb = NULL;
2897 		last = tx_buf->nr_frags;
2898 
2899 		for (i = 0; i < last; i++) {
2900 			struct bnx2_sw_tx_bd *tx_buf;
2901 
2902 			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2903 
2904 			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2905 			dma_unmap_page(&bp->pdev->dev,
2906 				dma_unmap_addr(tx_buf, mapping),
2907 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2908 				PCI_DMA_TODEVICE);
2909 		}
2910 
2911 		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2912 
2913 		tx_bytes += skb->len;
2914 		dev_kfree_skb_any(skb);
2915 		tx_pkt++;
2916 		if (tx_pkt == budget)
2917 			break;
2918 
2919 		if (hw_cons == sw_cons)
2920 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2921 	}
2922 
2923 	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2924 	txr->hw_tx_cons = hw_cons;
2925 	txr->tx_cons = sw_cons;
2926 
2927 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2928 	 * before checking for netif_tx_queue_stopped().  Without the
2929 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2930 	 * will miss it and cause the queue to be stopped forever.
2931 	 */
2932 	smp_mb();
2933 
2934 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2935 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2936 		__netif_tx_lock(txq, smp_processor_id());
2937 		if ((netif_tx_queue_stopped(txq)) &&
2938 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2939 			netif_tx_wake_queue(txq);
2940 		__netif_tx_unlock(txq);
2941 	}
2942 
2943 	return tx_pkt;
2944 }
2945 
2946 static void
2947 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2948 			struct sk_buff *skb, int count)
2949 {
2950 	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2951 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2952 	int i;
2953 	u16 hw_prod, prod;
2954 	u16 cons = rxr->rx_pg_cons;
2955 
2956 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2957 
2958 	/* The caller was unable to allocate a new page to replace the
2959 	 * last one in the frags array, so we need to recycle that page
2960 	 * and then free the skb.
2961 	 */
2962 	if (skb) {
2963 		struct page *page;
2964 		struct skb_shared_info *shinfo;
2965 
2966 		shinfo = skb_shinfo(skb);
2967 		shinfo->nr_frags--;
2968 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2969 		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2970 
2971 		cons_rx_pg->page = page;
2972 		dev_kfree_skb(skb);
2973 	}
2974 
2975 	hw_prod = rxr->rx_pg_prod;
2976 
2977 	for (i = 0; i < count; i++) {
2978 		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2979 
2980 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2981 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2982 		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2983 						[BNX2_RX_IDX(cons)];
2984 		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2985 						[BNX2_RX_IDX(prod)];
2986 
2987 		if (prod != cons) {
2988 			prod_rx_pg->page = cons_rx_pg->page;
2989 			cons_rx_pg->page = NULL;
2990 			dma_unmap_addr_set(prod_rx_pg, mapping,
2991 				dma_unmap_addr(cons_rx_pg, mapping));
2992 
2993 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2994 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2995 
2996 		}
2997 		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2998 		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2999 	}
3000 	rxr->rx_pg_prod = hw_prod;
3001 	rxr->rx_pg_cons = cons;
3002 }
3003 
3004 static inline void
3005 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
3006 		   u8 *data, u16 cons, u16 prod)
3007 {
3008 	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
3009 	struct bnx2_rx_bd *cons_bd, *prod_bd;
3010 
3011 	cons_rx_buf = &rxr->rx_buf_ring[cons];
3012 	prod_rx_buf = &rxr->rx_buf_ring[prod];
3013 
3014 	dma_sync_single_for_device(&bp->pdev->dev,
3015 		dma_unmap_addr(cons_rx_buf, mapping),
3016 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
3017 
3018 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
3019 
3020 	prod_rx_buf->data = data;
3021 
3022 	if (cons == prod)
3023 		return;
3024 
3025 	dma_unmap_addr_set(prod_rx_buf, mapping,
3026 			dma_unmap_addr(cons_rx_buf, mapping));
3027 
3028 	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3029 	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3030 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3031 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3032 }
3033 
3034 static struct sk_buff *
3035 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3036 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3037 	    u32 ring_idx)
3038 {
3039 	int err;
3040 	u16 prod = ring_idx & 0xffff;
3041 	struct sk_buff *skb;
3042 
3043 	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3044 	if (unlikely(err)) {
3045 		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3046 error:
3047 		if (hdr_len) {
3048 			unsigned int raw_len = len + 4;
3049 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3050 
3051 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3052 		}
3053 		return NULL;
3054 	}
3055 
3056 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3057 			 PCI_DMA_FROMDEVICE);
3058 	skb = build_skb(data, 0);
3059 	if (!skb) {
3060 		kfree(data);
3061 		goto error;
3062 	}
3063 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3064 	if (hdr_len == 0) {
3065 		skb_put(skb, len);
3066 		return skb;
3067 	} else {
3068 		unsigned int i, frag_len, frag_size, pages;
3069 		struct bnx2_sw_pg *rx_pg;
3070 		u16 pg_cons = rxr->rx_pg_cons;
3071 		u16 pg_prod = rxr->rx_pg_prod;
3072 
3073 		frag_size = len + 4 - hdr_len;
3074 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3075 		skb_put(skb, hdr_len);
3076 
3077 		for (i = 0; i < pages; i++) {
3078 			dma_addr_t mapping_old;
3079 
3080 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3081 			if (unlikely(frag_len <= 4)) {
3082 				unsigned int tail = 4 - frag_len;
3083 
3084 				rxr->rx_pg_cons = pg_cons;
3085 				rxr->rx_pg_prod = pg_prod;
3086 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3087 							pages - i);
3088 				skb->len -= tail;
3089 				if (i == 0) {
3090 					skb->tail -= tail;
3091 				} else {
3092 					skb_frag_t *frag =
3093 						&skb_shinfo(skb)->frags[i - 1];
3094 					skb_frag_size_sub(frag, tail);
3095 					skb->data_len -= tail;
3096 				}
3097 				return skb;
3098 			}
3099 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3100 
3101 			/* Don't unmap yet.  If we're unable to allocate a new
3102 			 * page, we need to recycle the page and the DMA addr.
3103 			 */
3104 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3105 			if (i == pages - 1)
3106 				frag_len -= 4;
3107 
3108 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3109 			rx_pg->page = NULL;
3110 
3111 			err = bnx2_alloc_rx_page(bp, rxr,
3112 						 BNX2_RX_PG_RING_IDX(pg_prod),
3113 						 GFP_ATOMIC);
3114 			if (unlikely(err)) {
3115 				rxr->rx_pg_cons = pg_cons;
3116 				rxr->rx_pg_prod = pg_prod;
3117 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3118 							pages - i);
3119 				return NULL;
3120 			}
3121 
3122 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3123 				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3124 
3125 			frag_size -= frag_len;
3126 			skb->data_len += frag_len;
3127 			skb->truesize += PAGE_SIZE;
3128 			skb->len += frag_len;
3129 
3130 			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3131 			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3132 		}
3133 		rxr->rx_pg_prod = pg_prod;
3134 		rxr->rx_pg_cons = pg_cons;
3135 	}
3136 	return skb;
3137 }
3138 
3139 static inline u16
3140 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3141 {
3142 	u16 cons;
3143 
3144 	/* Tell compiler that status block fields can change. */
3145 	barrier();
3146 	cons = *bnapi->hw_rx_cons_ptr;
3147 	barrier();
3148 	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3149 		cons++;
3150 	return cons;
3151 }
3152 
3153 static int
3154 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3155 {
3156 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3157 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3158 	struct l2_fhdr *rx_hdr;
3159 	int rx_pkt = 0, pg_ring_used = 0;
3160 
3161 	if (budget <= 0)
3162 		return rx_pkt;
3163 
3164 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3165 	sw_cons = rxr->rx_cons;
3166 	sw_prod = rxr->rx_prod;
3167 
3168 	/* Memory barrier necessary as speculative reads of the rx
3169 	 * buffer can be ahead of the index in the status block
3170 	 */
3171 	rmb();
3172 	while (sw_cons != hw_cons) {
3173 		unsigned int len, hdr_len;
3174 		u32 status;
3175 		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3176 		struct sk_buff *skb;
3177 		dma_addr_t dma_addr;
3178 		u8 *data;
3179 		u16 next_ring_idx;
3180 
3181 		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3182 		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3183 
3184 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3185 		data = rx_buf->data;
3186 		rx_buf->data = NULL;
3187 
3188 		rx_hdr = get_l2_fhdr(data);
3189 		prefetch(rx_hdr);
3190 
3191 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3192 
3193 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3194 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3195 			PCI_DMA_FROMDEVICE);
3196 
3197 		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3198 		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3199 		prefetch(get_l2_fhdr(next_rx_buf->data));
3200 
3201 		len = rx_hdr->l2_fhdr_pkt_len;
3202 		status = rx_hdr->l2_fhdr_status;
3203 
3204 		hdr_len = 0;
3205 		if (status & L2_FHDR_STATUS_SPLIT) {
3206 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3207 			pg_ring_used = 1;
3208 		} else if (len > bp->rx_jumbo_thresh) {
3209 			hdr_len = bp->rx_jumbo_thresh;
3210 			pg_ring_used = 1;
3211 		}
3212 
3213 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3214 				       L2_FHDR_ERRORS_PHY_DECODE |
3215 				       L2_FHDR_ERRORS_ALIGNMENT |
3216 				       L2_FHDR_ERRORS_TOO_SHORT |
3217 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3218 
3219 			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3220 					  sw_ring_prod);
3221 			if (pg_ring_used) {
3222 				int pages;
3223 
3224 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3225 
3226 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3227 			}
3228 			goto next_rx;
3229 		}
3230 
3231 		len -= 4;
3232 
3233 		if (len <= bp->rx_copy_thresh) {
3234 			skb = netdev_alloc_skb(bp->dev, len + 6);
3235 			if (skb == NULL) {
3236 				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3237 						  sw_ring_prod);
3238 				goto next_rx;
3239 			}
3240 
3241 			/* aligned copy */
3242 			memcpy(skb->data,
3243 			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3244 			       len + 6);
3245 			skb_reserve(skb, 6);
3246 			skb_put(skb, len);
3247 
3248 			bnx2_reuse_rx_data(bp, rxr, data,
3249 				sw_ring_cons, sw_ring_prod);
3250 
3251 		} else {
3252 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3253 					  (sw_ring_cons << 16) | sw_ring_prod);
3254 			if (!skb)
3255 				goto next_rx;
3256 		}
3257 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3258 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3259 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3260 
3261 		skb->protocol = eth_type_trans(skb, bp->dev);
3262 
3263 		if (len > (bp->dev->mtu + ETH_HLEN) &&
3264 		    skb->protocol != htons(0x8100) &&
3265 		    skb->protocol != htons(ETH_P_8021AD)) {
3266 
3267 			dev_kfree_skb(skb);
3268 			goto next_rx;
3269 
3270 		}
3271 
3272 		skb_checksum_none_assert(skb);
3273 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3274 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3275 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3276 
3277 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3278 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3279 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3280 		}
3281 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3282 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3283 		     L2_FHDR_STATUS_USE_RXHASH))
3284 			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3285 				     PKT_HASH_TYPE_L3);
3286 
3287 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3288 		napi_gro_receive(&bnapi->napi, skb);
3289 		rx_pkt++;
3290 
3291 next_rx:
3292 		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3293 		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3294 
3295 		if ((rx_pkt == budget))
3296 			break;
3297 
3298 		/* Refresh hw_cons to see if there is new work */
3299 		if (sw_cons == hw_cons) {
3300 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3301 			rmb();
3302 		}
3303 	}
3304 	rxr->rx_cons = sw_cons;
3305 	rxr->rx_prod = sw_prod;
3306 
3307 	if (pg_ring_used)
3308 		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3309 
3310 	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3311 
3312 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3313 
3314 	mmiowb();
3315 
3316 	return rx_pkt;
3317 
3318 }
3319 
3320 /* MSI ISR - The only difference between this and the INTx ISR
3321  * is that the MSI interrupt is always serviced.
3322  */
3323 static irqreturn_t
3324 bnx2_msi(int irq, void *dev_instance)
3325 {
3326 	struct bnx2_napi *bnapi = dev_instance;
3327 	struct bnx2 *bp = bnapi->bp;
3328 
3329 	prefetch(bnapi->status_blk.msi);
3330 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3331 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3332 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3333 
3334 	/* Return here if interrupt is disabled. */
3335 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3336 		return IRQ_HANDLED;
3337 
3338 	napi_schedule(&bnapi->napi);
3339 
3340 	return IRQ_HANDLED;
3341 }
3342 
3343 static irqreturn_t
3344 bnx2_msi_1shot(int irq, void *dev_instance)
3345 {
3346 	struct bnx2_napi *bnapi = dev_instance;
3347 	struct bnx2 *bp = bnapi->bp;
3348 
3349 	prefetch(bnapi->status_blk.msi);
3350 
3351 	/* Return here if interrupt is disabled. */
3352 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3353 		return IRQ_HANDLED;
3354 
3355 	napi_schedule(&bnapi->napi);
3356 
3357 	return IRQ_HANDLED;
3358 }
3359 
3360 static irqreturn_t
3361 bnx2_interrupt(int irq, void *dev_instance)
3362 {
3363 	struct bnx2_napi *bnapi = dev_instance;
3364 	struct bnx2 *bp = bnapi->bp;
3365 	struct status_block *sblk = bnapi->status_blk.msi;
3366 
3367 	/* When using INTx, it is possible for the interrupt to arrive
3368 	 * at the CPU before the status block posted prior to the
3369 	 * interrupt. Reading a register will flush the status block.
3370 	 * When using MSI, the MSI message will always complete after
3371 	 * the status block write.
3372 	 */
3373 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3374 	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3375 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3376 		return IRQ_NONE;
3377 
3378 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3379 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3380 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3381 
3382 	/* Read back to deassert IRQ immediately to avoid too many
3383 	 * spurious interrupts.
3384 	 */
3385 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3386 
3387 	/* Return here if interrupt is shared and is disabled. */
3388 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3389 		return IRQ_HANDLED;
3390 
3391 	if (napi_schedule_prep(&bnapi->napi)) {
3392 		bnapi->last_status_idx = sblk->status_idx;
3393 		__napi_schedule(&bnapi->napi);
3394 	}
3395 
3396 	return IRQ_HANDLED;
3397 }
3398 
3399 static inline int
3400 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3401 {
3402 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3403 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3404 
3405 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3406 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3407 		return 1;
3408 	return 0;
3409 }
3410 
3411 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3412 				 STATUS_ATTN_BITS_TIMER_ABORT)
3413 
3414 static inline int
3415 bnx2_has_work(struct bnx2_napi *bnapi)
3416 {
3417 	struct status_block *sblk = bnapi->status_blk.msi;
3418 
3419 	if (bnx2_has_fast_work(bnapi))
3420 		return 1;
3421 
3422 #ifdef BCM_CNIC
3423 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3424 		return 1;
3425 #endif
3426 
3427 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3428 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3429 		return 1;
3430 
3431 	return 0;
3432 }
3433 
3434 static void
3435 bnx2_chk_missed_msi(struct bnx2 *bp)
3436 {
3437 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3438 	u32 msi_ctrl;
3439 
3440 	if (bnx2_has_work(bnapi)) {
3441 		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3442 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3443 			return;
3444 
3445 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3446 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3447 				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3448 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3449 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3450 		}
3451 	}
3452 
3453 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3454 }
3455 
3456 #ifdef BCM_CNIC
3457 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3458 {
3459 	struct cnic_ops *c_ops;
3460 
3461 	if (!bnapi->cnic_present)
3462 		return;
3463 
3464 	rcu_read_lock();
3465 	c_ops = rcu_dereference(bp->cnic_ops);
3466 	if (c_ops)
3467 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3468 						      bnapi->status_blk.msi);
3469 	rcu_read_unlock();
3470 }
3471 #endif
3472 
3473 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3474 {
3475 	struct status_block *sblk = bnapi->status_blk.msi;
3476 	u32 status_attn_bits = sblk->status_attn_bits;
3477 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3478 
3479 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3480 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3481 
3482 		bnx2_phy_int(bp, bnapi);
3483 
3484 		/* This is needed to take care of transient status
3485 		 * during link changes.
3486 		 */
3487 		BNX2_WR(bp, BNX2_HC_COMMAND,
3488 			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3489 		BNX2_RD(bp, BNX2_HC_COMMAND);
3490 	}
3491 }
3492 
3493 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3494 			  int work_done, int budget)
3495 {
3496 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3497 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3498 
3499 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3500 		bnx2_tx_int(bp, bnapi, 0);
3501 
3502 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3503 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3504 
3505 	return work_done;
3506 }
3507 
3508 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3509 {
3510 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3511 	struct bnx2 *bp = bnapi->bp;
3512 	int work_done = 0;
3513 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3514 
3515 	while (1) {
3516 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3517 		if (unlikely(work_done >= budget))
3518 			break;
3519 
3520 		bnapi->last_status_idx = sblk->status_idx;
3521 		/* status idx must be read before checking for more work. */
3522 		rmb();
3523 		if (likely(!bnx2_has_fast_work(bnapi))) {
3524 
3525 			napi_complete(napi);
3526 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3527 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3528 				bnapi->last_status_idx);
3529 			break;
3530 		}
3531 	}
3532 	return work_done;
3533 }
3534 
3535 static int bnx2_poll(struct napi_struct *napi, int budget)
3536 {
3537 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3538 	struct bnx2 *bp = bnapi->bp;
3539 	int work_done = 0;
3540 	struct status_block *sblk = bnapi->status_blk.msi;
3541 
3542 	while (1) {
3543 		bnx2_poll_link(bp, bnapi);
3544 
3545 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3546 
3547 #ifdef BCM_CNIC
3548 		bnx2_poll_cnic(bp, bnapi);
3549 #endif
3550 
3551 		/* bnapi->last_status_idx is used below to tell the hw how
3552 		 * much work has been processed, so we must read it before
3553 		 * checking for more work.
3554 		 */
3555 		bnapi->last_status_idx = sblk->status_idx;
3556 
3557 		if (unlikely(work_done >= budget))
3558 			break;
3559 
3560 		rmb();
3561 		if (likely(!bnx2_has_work(bnapi))) {
3562 			napi_complete(napi);
3563 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3564 				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3565 					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3566 					bnapi->last_status_idx);
3567 				break;
3568 			}
3569 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3570 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3571 				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3572 				bnapi->last_status_idx);
3573 
3574 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3575 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3576 				bnapi->last_status_idx);
3577 			break;
3578 		}
3579 	}
3580 
3581 	return work_done;
3582 }
3583 
3584 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3585  * from set_multicast.
3586  */
3587 static void
3588 bnx2_set_rx_mode(struct net_device *dev)
3589 {
3590 	struct bnx2 *bp = netdev_priv(dev);
3591 	u32 rx_mode, sort_mode;
3592 	struct netdev_hw_addr *ha;
3593 	int i;
3594 
3595 	if (!netif_running(dev))
3596 		return;
3597 
3598 	spin_lock_bh(&bp->phy_lock);
3599 
3600 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3601 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3602 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3603 	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3604 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3605 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3606 	if (dev->flags & IFF_PROMISC) {
3607 		/* Promiscuous mode. */
3608 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3609 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3610 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3611 	}
3612 	else if (dev->flags & IFF_ALLMULTI) {
3613 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3614 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3615 				0xffffffff);
3616         	}
3617 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3618 	}
3619 	else {
3620 		/* Accept one or more multicast(s). */
3621 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3622 		u32 regidx;
3623 		u32 bit;
3624 		u32 crc;
3625 
3626 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3627 
3628 		netdev_for_each_mc_addr(ha, dev) {
3629 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3630 			bit = crc & 0xff;
3631 			regidx = (bit & 0xe0) >> 5;
3632 			bit &= 0x1f;
3633 			mc_filter[regidx] |= (1 << bit);
3634 		}
3635 
3636 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3637 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3638 				mc_filter[i]);
3639 		}
3640 
3641 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3642 	}
3643 
3644 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3645 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3646 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3647 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3648 	} else if (!(dev->flags & IFF_PROMISC)) {
3649 		/* Add all entries into to the match filter list */
3650 		i = 0;
3651 		netdev_for_each_uc_addr(ha, dev) {
3652 			bnx2_set_mac_addr(bp, ha->addr,
3653 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3654 			sort_mode |= (1 <<
3655 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3656 			i++;
3657 		}
3658 
3659 	}
3660 
3661 	if (rx_mode != bp->rx_mode) {
3662 		bp->rx_mode = rx_mode;
3663 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3664 	}
3665 
3666 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3667 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3668 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3669 
3670 	spin_unlock_bh(&bp->phy_lock);
3671 }
3672 
3673 static int
3674 check_fw_section(const struct firmware *fw,
3675 		 const struct bnx2_fw_file_section *section,
3676 		 u32 alignment, bool non_empty)
3677 {
3678 	u32 offset = be32_to_cpu(section->offset);
3679 	u32 len = be32_to_cpu(section->len);
3680 
3681 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3682 		return -EINVAL;
3683 	if ((non_empty && len == 0) || len > fw->size - offset ||
3684 	    len & (alignment - 1))
3685 		return -EINVAL;
3686 	return 0;
3687 }
3688 
3689 static int
3690 check_mips_fw_entry(const struct firmware *fw,
3691 		    const struct bnx2_mips_fw_file_entry *entry)
3692 {
3693 	if (check_fw_section(fw, &entry->text, 4, true) ||
3694 	    check_fw_section(fw, &entry->data, 4, false) ||
3695 	    check_fw_section(fw, &entry->rodata, 4, false))
3696 		return -EINVAL;
3697 	return 0;
3698 }
3699 
3700 static void bnx2_release_firmware(struct bnx2 *bp)
3701 {
3702 	if (bp->rv2p_firmware) {
3703 		release_firmware(bp->mips_firmware);
3704 		release_firmware(bp->rv2p_firmware);
3705 		bp->rv2p_firmware = NULL;
3706 	}
3707 }
3708 
3709 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3710 {
3711 	const char *mips_fw_file, *rv2p_fw_file;
3712 	const struct bnx2_mips_fw_file *mips_fw;
3713 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3714 	int rc;
3715 
3716 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3717 		mips_fw_file = FW_MIPS_FILE_09;
3718 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3719 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3720 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3721 		else
3722 			rv2p_fw_file = FW_RV2P_FILE_09;
3723 	} else {
3724 		mips_fw_file = FW_MIPS_FILE_06;
3725 		rv2p_fw_file = FW_RV2P_FILE_06;
3726 	}
3727 
3728 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3729 	if (rc) {
3730 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3731 		goto out;
3732 	}
3733 
3734 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3735 	if (rc) {
3736 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3737 		goto err_release_mips_firmware;
3738 	}
3739 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3740 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3741 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3742 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3743 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3744 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3745 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3746 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3747 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3748 		rc = -EINVAL;
3749 		goto err_release_firmware;
3750 	}
3751 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3752 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3753 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3754 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3755 		rc = -EINVAL;
3756 		goto err_release_firmware;
3757 	}
3758 out:
3759 	return rc;
3760 
3761 err_release_firmware:
3762 	release_firmware(bp->rv2p_firmware);
3763 	bp->rv2p_firmware = NULL;
3764 err_release_mips_firmware:
3765 	release_firmware(bp->mips_firmware);
3766 	goto out;
3767 }
3768 
3769 static int bnx2_request_firmware(struct bnx2 *bp)
3770 {
3771 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3772 }
3773 
3774 static u32
3775 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3776 {
3777 	switch (idx) {
3778 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3779 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3780 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3781 		break;
3782 	}
3783 	return rv2p_code;
3784 }
3785 
3786 static int
3787 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3788 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3789 {
3790 	u32 rv2p_code_len, file_offset;
3791 	__be32 *rv2p_code;
3792 	int i;
3793 	u32 val, cmd, addr;
3794 
3795 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3796 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3797 
3798 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3799 
3800 	if (rv2p_proc == RV2P_PROC1) {
3801 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3802 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3803 	} else {
3804 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3805 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3806 	}
3807 
3808 	for (i = 0; i < rv2p_code_len; i += 8) {
3809 		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3810 		rv2p_code++;
3811 		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3812 		rv2p_code++;
3813 
3814 		val = (i / 8) | cmd;
3815 		BNX2_WR(bp, addr, val);
3816 	}
3817 
3818 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3819 	for (i = 0; i < 8; i++) {
3820 		u32 loc, code;
3821 
3822 		loc = be32_to_cpu(fw_entry->fixup[i]);
3823 		if (loc && ((loc * 4) < rv2p_code_len)) {
3824 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3825 			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3826 			code = be32_to_cpu(*(rv2p_code + loc));
3827 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3828 			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3829 
3830 			val = (loc / 2) | cmd;
3831 			BNX2_WR(bp, addr, val);
3832 		}
3833 	}
3834 
3835 	/* Reset the processor, un-stall is done later. */
3836 	if (rv2p_proc == RV2P_PROC1) {
3837 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3838 	}
3839 	else {
3840 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3841 	}
3842 
3843 	return 0;
3844 }
3845 
3846 static int
3847 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3848 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3849 {
3850 	u32 addr, len, file_offset;
3851 	__be32 *data;
3852 	u32 offset;
3853 	u32 val;
3854 
3855 	/* Halt the CPU. */
3856 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3857 	val |= cpu_reg->mode_value_halt;
3858 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3859 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3860 
3861 	/* Load the Text area. */
3862 	addr = be32_to_cpu(fw_entry->text.addr);
3863 	len = be32_to_cpu(fw_entry->text.len);
3864 	file_offset = be32_to_cpu(fw_entry->text.offset);
3865 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3866 
3867 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3868 	if (len) {
3869 		int j;
3870 
3871 		for (j = 0; j < (len / 4); j++, offset += 4)
3872 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3873 	}
3874 
3875 	/* Load the Data area. */
3876 	addr = be32_to_cpu(fw_entry->data.addr);
3877 	len = be32_to_cpu(fw_entry->data.len);
3878 	file_offset = be32_to_cpu(fw_entry->data.offset);
3879 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3880 
3881 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3882 	if (len) {
3883 		int j;
3884 
3885 		for (j = 0; j < (len / 4); j++, offset += 4)
3886 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3887 	}
3888 
3889 	/* Load the Read-Only area. */
3890 	addr = be32_to_cpu(fw_entry->rodata.addr);
3891 	len = be32_to_cpu(fw_entry->rodata.len);
3892 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3893 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3894 
3895 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3896 	if (len) {
3897 		int j;
3898 
3899 		for (j = 0; j < (len / 4); j++, offset += 4)
3900 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3901 	}
3902 
3903 	/* Clear the pre-fetch instruction. */
3904 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3905 
3906 	val = be32_to_cpu(fw_entry->start_addr);
3907 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3908 
3909 	/* Start the CPU. */
3910 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3911 	val &= ~cpu_reg->mode_value_halt;
3912 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3913 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3914 
3915 	return 0;
3916 }
3917 
3918 static int
3919 bnx2_init_cpus(struct bnx2 *bp)
3920 {
3921 	const struct bnx2_mips_fw_file *mips_fw =
3922 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3923 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3924 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3925 	int rc;
3926 
3927 	/* Initialize the RV2P processor. */
3928 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3929 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3930 
3931 	/* Initialize the RX Processor. */
3932 	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3933 	if (rc)
3934 		goto init_cpu_err;
3935 
3936 	/* Initialize the TX Processor. */
3937 	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3938 	if (rc)
3939 		goto init_cpu_err;
3940 
3941 	/* Initialize the TX Patch-up Processor. */
3942 	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3943 	if (rc)
3944 		goto init_cpu_err;
3945 
3946 	/* Initialize the Completion Processor. */
3947 	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3948 	if (rc)
3949 		goto init_cpu_err;
3950 
3951 	/* Initialize the Command Processor. */
3952 	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3953 
3954 init_cpu_err:
3955 	return rc;
3956 }
3957 
3958 static void
3959 bnx2_setup_wol(struct bnx2 *bp)
3960 {
3961 	int i;
3962 	u32 val, wol_msg;
3963 
3964 	if (bp->wol) {
3965 		u32 advertising;
3966 		u8 autoneg;
3967 
3968 		autoneg = bp->autoneg;
3969 		advertising = bp->advertising;
3970 
3971 		if (bp->phy_port == PORT_TP) {
3972 			bp->autoneg = AUTONEG_SPEED;
3973 			bp->advertising = ADVERTISED_10baseT_Half |
3974 				ADVERTISED_10baseT_Full |
3975 				ADVERTISED_100baseT_Half |
3976 				ADVERTISED_100baseT_Full |
3977 				ADVERTISED_Autoneg;
3978 		}
3979 
3980 		spin_lock_bh(&bp->phy_lock);
3981 		bnx2_setup_phy(bp, bp->phy_port);
3982 		spin_unlock_bh(&bp->phy_lock);
3983 
3984 		bp->autoneg = autoneg;
3985 		bp->advertising = advertising;
3986 
3987 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3988 
3989 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3990 
3991 		/* Enable port mode. */
3992 		val &= ~BNX2_EMAC_MODE_PORT;
3993 		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3994 		       BNX2_EMAC_MODE_ACPI_RCVD |
3995 		       BNX2_EMAC_MODE_MPKT;
3996 		if (bp->phy_port == PORT_TP) {
3997 			val |= BNX2_EMAC_MODE_PORT_MII;
3998 		} else {
3999 			val |= BNX2_EMAC_MODE_PORT_GMII;
4000 			if (bp->line_speed == SPEED_2500)
4001 				val |= BNX2_EMAC_MODE_25G_MODE;
4002 		}
4003 
4004 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4005 
4006 		/* receive all multicast */
4007 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
4008 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
4009 				0xffffffff);
4010 		}
4011 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
4012 
4013 		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4014 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4015 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4016 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4017 
4018 		/* Need to enable EMAC and RPM for WOL. */
4019 		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4020 			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4021 			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4022 			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4023 
4024 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4025 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4026 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4027 
4028 		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4029 	} else {
4030 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4031 	}
4032 
4033 	if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4034 		u32 val;
4035 
4036 		wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4037 		if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4038 			bnx2_fw_sync(bp, wol_msg, 1, 0);
4039 			return;
4040 		}
4041 		/* Tell firmware not to power down the PHY yet, otherwise
4042 		 * the chip will take a long time to respond to MMIO reads.
4043 		 */
4044 		val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4045 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4046 			      val | BNX2_PORT_FEATURE_ASF_ENABLED);
4047 		bnx2_fw_sync(bp, wol_msg, 1, 0);
4048 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4049 	}
4050 
4051 }
4052 
4053 static int
4054 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4055 {
4056 	switch (state) {
4057 	case PCI_D0: {
4058 		u32 val;
4059 
4060 		pci_enable_wake(bp->pdev, PCI_D0, false);
4061 		pci_set_power_state(bp->pdev, PCI_D0);
4062 
4063 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4064 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4065 		val &= ~BNX2_EMAC_MODE_MPKT;
4066 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4067 
4068 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4069 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4070 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4071 		break;
4072 	}
4073 	case PCI_D3hot: {
4074 		bnx2_setup_wol(bp);
4075 		pci_wake_from_d3(bp->pdev, bp->wol);
4076 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4077 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4078 
4079 			if (bp->wol)
4080 				pci_set_power_state(bp->pdev, PCI_D3hot);
4081 			break;
4082 
4083 		}
4084 		if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4085 			u32 val;
4086 
4087 			/* Tell firmware not to power down the PHY yet,
4088 			 * otherwise the other port may not respond to
4089 			 * MMIO reads.
4090 			 */
4091 			val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4092 			val &= ~BNX2_CONDITION_PM_STATE_MASK;
4093 			val |= BNX2_CONDITION_PM_STATE_UNPREP;
4094 			bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4095 		}
4096 		pci_set_power_state(bp->pdev, PCI_D3hot);
4097 
4098 		/* No more memory access after this point until
4099 		 * device is brought back to D0.
4100 		 */
4101 		break;
4102 	}
4103 	default:
4104 		return -EINVAL;
4105 	}
4106 	return 0;
4107 }
4108 
4109 static int
4110 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4111 {
4112 	u32 val;
4113 	int j;
4114 
4115 	/* Request access to the flash interface. */
4116 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4117 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4118 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4119 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4120 			break;
4121 
4122 		udelay(5);
4123 	}
4124 
4125 	if (j >= NVRAM_TIMEOUT_COUNT)
4126 		return -EBUSY;
4127 
4128 	return 0;
4129 }
4130 
4131 static int
4132 bnx2_release_nvram_lock(struct bnx2 *bp)
4133 {
4134 	int j;
4135 	u32 val;
4136 
4137 	/* Relinquish nvram interface. */
4138 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4139 
4140 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4141 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4142 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4143 			break;
4144 
4145 		udelay(5);
4146 	}
4147 
4148 	if (j >= NVRAM_TIMEOUT_COUNT)
4149 		return -EBUSY;
4150 
4151 	return 0;
4152 }
4153 
4154 
4155 static int
4156 bnx2_enable_nvram_write(struct bnx2 *bp)
4157 {
4158 	u32 val;
4159 
4160 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4161 	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4162 
4163 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4164 		int j;
4165 
4166 		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4167 		BNX2_WR(bp, BNX2_NVM_COMMAND,
4168 			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4169 
4170 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4171 			udelay(5);
4172 
4173 			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4174 			if (val & BNX2_NVM_COMMAND_DONE)
4175 				break;
4176 		}
4177 
4178 		if (j >= NVRAM_TIMEOUT_COUNT)
4179 			return -EBUSY;
4180 	}
4181 	return 0;
4182 }
4183 
4184 static void
4185 bnx2_disable_nvram_write(struct bnx2 *bp)
4186 {
4187 	u32 val;
4188 
4189 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4190 	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4191 }
4192 
4193 
4194 static void
4195 bnx2_enable_nvram_access(struct bnx2 *bp)
4196 {
4197 	u32 val;
4198 
4199 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4200 	/* Enable both bits, even on read. */
4201 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4202 		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4203 }
4204 
4205 static void
4206 bnx2_disable_nvram_access(struct bnx2 *bp)
4207 {
4208 	u32 val;
4209 
4210 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4211 	/* Disable both bits, even after read. */
4212 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4213 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4214 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4215 }
4216 
4217 static int
4218 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4219 {
4220 	u32 cmd;
4221 	int j;
4222 
4223 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4224 		/* Buffered flash, no erase needed */
4225 		return 0;
4226 
4227 	/* Build an erase command */
4228 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4229 	      BNX2_NVM_COMMAND_DOIT;
4230 
4231 	/* Need to clear DONE bit separately. */
4232 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4233 
4234 	/* Address of the NVRAM to read from. */
4235 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4236 
4237 	/* Issue an erase command. */
4238 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4239 
4240 	/* Wait for completion. */
4241 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4242 		u32 val;
4243 
4244 		udelay(5);
4245 
4246 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4247 		if (val & BNX2_NVM_COMMAND_DONE)
4248 			break;
4249 	}
4250 
4251 	if (j >= NVRAM_TIMEOUT_COUNT)
4252 		return -EBUSY;
4253 
4254 	return 0;
4255 }
4256 
4257 static int
4258 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4259 {
4260 	u32 cmd;
4261 	int j;
4262 
4263 	/* Build the command word. */
4264 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4265 
4266 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4267 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4268 		offset = ((offset / bp->flash_info->page_size) <<
4269 			   bp->flash_info->page_bits) +
4270 			  (offset % bp->flash_info->page_size);
4271 	}
4272 
4273 	/* Need to clear DONE bit separately. */
4274 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4275 
4276 	/* Address of the NVRAM to read from. */
4277 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4278 
4279 	/* Issue a read command. */
4280 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4281 
4282 	/* Wait for completion. */
4283 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4284 		u32 val;
4285 
4286 		udelay(5);
4287 
4288 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4289 		if (val & BNX2_NVM_COMMAND_DONE) {
4290 			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4291 			memcpy(ret_val, &v, 4);
4292 			break;
4293 		}
4294 	}
4295 	if (j >= NVRAM_TIMEOUT_COUNT)
4296 		return -EBUSY;
4297 
4298 	return 0;
4299 }
4300 
4301 
4302 static int
4303 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4304 {
4305 	u32 cmd;
4306 	__be32 val32;
4307 	int j;
4308 
4309 	/* Build the command word. */
4310 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4311 
4312 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4313 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4314 		offset = ((offset / bp->flash_info->page_size) <<
4315 			  bp->flash_info->page_bits) +
4316 			 (offset % bp->flash_info->page_size);
4317 	}
4318 
4319 	/* Need to clear DONE bit separately. */
4320 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4321 
4322 	memcpy(&val32, val, 4);
4323 
4324 	/* Write the data. */
4325 	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4326 
4327 	/* Address of the NVRAM to write to. */
4328 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4329 
4330 	/* Issue the write command. */
4331 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4332 
4333 	/* Wait for completion. */
4334 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4335 		udelay(5);
4336 
4337 		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4338 			break;
4339 	}
4340 	if (j >= NVRAM_TIMEOUT_COUNT)
4341 		return -EBUSY;
4342 
4343 	return 0;
4344 }
4345 
4346 static int
4347 bnx2_init_nvram(struct bnx2 *bp)
4348 {
4349 	u32 val;
4350 	int j, entry_count, rc = 0;
4351 	const struct flash_spec *flash;
4352 
4353 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4354 		bp->flash_info = &flash_5709;
4355 		goto get_flash_size;
4356 	}
4357 
4358 	/* Determine the selected interface. */
4359 	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4360 
4361 	entry_count = ARRAY_SIZE(flash_table);
4362 
4363 	if (val & 0x40000000) {
4364 
4365 		/* Flash interface has been reconfigured */
4366 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4367 		     j++, flash++) {
4368 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4369 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4370 				bp->flash_info = flash;
4371 				break;
4372 			}
4373 		}
4374 	}
4375 	else {
4376 		u32 mask;
4377 		/* Not yet been reconfigured */
4378 
4379 		if (val & (1 << 23))
4380 			mask = FLASH_BACKUP_STRAP_MASK;
4381 		else
4382 			mask = FLASH_STRAP_MASK;
4383 
4384 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4385 			j++, flash++) {
4386 
4387 			if ((val & mask) == (flash->strapping & mask)) {
4388 				bp->flash_info = flash;
4389 
4390 				/* Request access to the flash interface. */
4391 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4392 					return rc;
4393 
4394 				/* Enable access to flash interface */
4395 				bnx2_enable_nvram_access(bp);
4396 
4397 				/* Reconfigure the flash interface */
4398 				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4399 				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4400 				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4401 				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4402 
4403 				/* Disable access to flash interface */
4404 				bnx2_disable_nvram_access(bp);
4405 				bnx2_release_nvram_lock(bp);
4406 
4407 				break;
4408 			}
4409 		}
4410 	} /* if (val & 0x40000000) */
4411 
4412 	if (j == entry_count) {
4413 		bp->flash_info = NULL;
4414 		pr_alert("Unknown flash/EEPROM type\n");
4415 		return -ENODEV;
4416 	}
4417 
4418 get_flash_size:
4419 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4420 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4421 	if (val)
4422 		bp->flash_size = val;
4423 	else
4424 		bp->flash_size = bp->flash_info->total_size;
4425 
4426 	return rc;
4427 }
4428 
4429 static int
4430 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4431 		int buf_size)
4432 {
4433 	int rc = 0;
4434 	u32 cmd_flags, offset32, len32, extra;
4435 
4436 	if (buf_size == 0)
4437 		return 0;
4438 
4439 	/* Request access to the flash interface. */
4440 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4441 		return rc;
4442 
4443 	/* Enable access to flash interface */
4444 	bnx2_enable_nvram_access(bp);
4445 
4446 	len32 = buf_size;
4447 	offset32 = offset;
4448 	extra = 0;
4449 
4450 	cmd_flags = 0;
4451 
4452 	if (offset32 & 3) {
4453 		u8 buf[4];
4454 		u32 pre_len;
4455 
4456 		offset32 &= ~3;
4457 		pre_len = 4 - (offset & 3);
4458 
4459 		if (pre_len >= len32) {
4460 			pre_len = len32;
4461 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4462 				    BNX2_NVM_COMMAND_LAST;
4463 		}
4464 		else {
4465 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4466 		}
4467 
4468 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4469 
4470 		if (rc)
4471 			return rc;
4472 
4473 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4474 
4475 		offset32 += 4;
4476 		ret_buf += pre_len;
4477 		len32 -= pre_len;
4478 	}
4479 	if (len32 & 3) {
4480 		extra = 4 - (len32 & 3);
4481 		len32 = (len32 + 4) & ~3;
4482 	}
4483 
4484 	if (len32 == 4) {
4485 		u8 buf[4];
4486 
4487 		if (cmd_flags)
4488 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4489 		else
4490 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4491 				    BNX2_NVM_COMMAND_LAST;
4492 
4493 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4494 
4495 		memcpy(ret_buf, buf, 4 - extra);
4496 	}
4497 	else if (len32 > 0) {
4498 		u8 buf[4];
4499 
4500 		/* Read the first word. */
4501 		if (cmd_flags)
4502 			cmd_flags = 0;
4503 		else
4504 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4505 
4506 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4507 
4508 		/* Advance to the next dword. */
4509 		offset32 += 4;
4510 		ret_buf += 4;
4511 		len32 -= 4;
4512 
4513 		while (len32 > 4 && rc == 0) {
4514 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4515 
4516 			/* Advance to the next dword. */
4517 			offset32 += 4;
4518 			ret_buf += 4;
4519 			len32 -= 4;
4520 		}
4521 
4522 		if (rc)
4523 			return rc;
4524 
4525 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4526 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4527 
4528 		memcpy(ret_buf, buf, 4 - extra);
4529 	}
4530 
4531 	/* Disable access to flash interface */
4532 	bnx2_disable_nvram_access(bp);
4533 
4534 	bnx2_release_nvram_lock(bp);
4535 
4536 	return rc;
4537 }
4538 
4539 static int
4540 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4541 		int buf_size)
4542 {
4543 	u32 written, offset32, len32;
4544 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4545 	int rc = 0;
4546 	int align_start, align_end;
4547 
4548 	buf = data_buf;
4549 	offset32 = offset;
4550 	len32 = buf_size;
4551 	align_start = align_end = 0;
4552 
4553 	if ((align_start = (offset32 & 3))) {
4554 		offset32 &= ~3;
4555 		len32 += align_start;
4556 		if (len32 < 4)
4557 			len32 = 4;
4558 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4559 			return rc;
4560 	}
4561 
4562 	if (len32 & 3) {
4563 		align_end = 4 - (len32 & 3);
4564 		len32 += align_end;
4565 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4566 			return rc;
4567 	}
4568 
4569 	if (align_start || align_end) {
4570 		align_buf = kmalloc(len32, GFP_KERNEL);
4571 		if (align_buf == NULL)
4572 			return -ENOMEM;
4573 		if (align_start) {
4574 			memcpy(align_buf, start, 4);
4575 		}
4576 		if (align_end) {
4577 			memcpy(align_buf + len32 - 4, end, 4);
4578 		}
4579 		memcpy(align_buf + align_start, data_buf, buf_size);
4580 		buf = align_buf;
4581 	}
4582 
4583 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4584 		flash_buffer = kmalloc(264, GFP_KERNEL);
4585 		if (flash_buffer == NULL) {
4586 			rc = -ENOMEM;
4587 			goto nvram_write_end;
4588 		}
4589 	}
4590 
4591 	written = 0;
4592 	while ((written < len32) && (rc == 0)) {
4593 		u32 page_start, page_end, data_start, data_end;
4594 		u32 addr, cmd_flags;
4595 		int i;
4596 
4597 	        /* Find the page_start addr */
4598 		page_start = offset32 + written;
4599 		page_start -= (page_start % bp->flash_info->page_size);
4600 		/* Find the page_end addr */
4601 		page_end = page_start + bp->flash_info->page_size;
4602 		/* Find the data_start addr */
4603 		data_start = (written == 0) ? offset32 : page_start;
4604 		/* Find the data_end addr */
4605 		data_end = (page_end > offset32 + len32) ?
4606 			(offset32 + len32) : page_end;
4607 
4608 		/* Request access to the flash interface. */
4609 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4610 			goto nvram_write_end;
4611 
4612 		/* Enable access to flash interface */
4613 		bnx2_enable_nvram_access(bp);
4614 
4615 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4616 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4617 			int j;
4618 
4619 			/* Read the whole page into the buffer
4620 			 * (non-buffer flash only) */
4621 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4622 				if (j == (bp->flash_info->page_size - 4)) {
4623 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4624 				}
4625 				rc = bnx2_nvram_read_dword(bp,
4626 					page_start + j,
4627 					&flash_buffer[j],
4628 					cmd_flags);
4629 
4630 				if (rc)
4631 					goto nvram_write_end;
4632 
4633 				cmd_flags = 0;
4634 			}
4635 		}
4636 
4637 		/* Enable writes to flash interface (unlock write-protect) */
4638 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4639 			goto nvram_write_end;
4640 
4641 		/* Loop to write back the buffer data from page_start to
4642 		 * data_start */
4643 		i = 0;
4644 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4645 			/* Erase the page */
4646 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4647 				goto nvram_write_end;
4648 
4649 			/* Re-enable the write again for the actual write */
4650 			bnx2_enable_nvram_write(bp);
4651 
4652 			for (addr = page_start; addr < data_start;
4653 				addr += 4, i += 4) {
4654 
4655 				rc = bnx2_nvram_write_dword(bp, addr,
4656 					&flash_buffer[i], cmd_flags);
4657 
4658 				if (rc != 0)
4659 					goto nvram_write_end;
4660 
4661 				cmd_flags = 0;
4662 			}
4663 		}
4664 
4665 		/* Loop to write the new data from data_start to data_end */
4666 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4667 			if ((addr == page_end - 4) ||
4668 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4669 				 (addr == data_end - 4))) {
4670 
4671 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4672 			}
4673 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4674 				cmd_flags);
4675 
4676 			if (rc != 0)
4677 				goto nvram_write_end;
4678 
4679 			cmd_flags = 0;
4680 			buf += 4;
4681 		}
4682 
4683 		/* Loop to write back the buffer data from data_end
4684 		 * to page_end */
4685 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4686 			for (addr = data_end; addr < page_end;
4687 				addr += 4, i += 4) {
4688 
4689 				if (addr == page_end-4) {
4690 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4691                 		}
4692 				rc = bnx2_nvram_write_dword(bp, addr,
4693 					&flash_buffer[i], cmd_flags);
4694 
4695 				if (rc != 0)
4696 					goto nvram_write_end;
4697 
4698 				cmd_flags = 0;
4699 			}
4700 		}
4701 
4702 		/* Disable writes to flash interface (lock write-protect) */
4703 		bnx2_disable_nvram_write(bp);
4704 
4705 		/* Disable access to flash interface */
4706 		bnx2_disable_nvram_access(bp);
4707 		bnx2_release_nvram_lock(bp);
4708 
4709 		/* Increment written */
4710 		written += data_end - data_start;
4711 	}
4712 
4713 nvram_write_end:
4714 	kfree(flash_buffer);
4715 	kfree(align_buf);
4716 	return rc;
4717 }
4718 
4719 static void
4720 bnx2_init_fw_cap(struct bnx2 *bp)
4721 {
4722 	u32 val, sig = 0;
4723 
4724 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4725 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4726 
4727 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4728 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4729 
4730 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4731 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4732 		return;
4733 
4734 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4735 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4736 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4737 	}
4738 
4739 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4740 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4741 		u32 link;
4742 
4743 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4744 
4745 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4746 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4747 			bp->phy_port = PORT_FIBRE;
4748 		else
4749 			bp->phy_port = PORT_TP;
4750 
4751 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4752 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4753 	}
4754 
4755 	if (netif_running(bp->dev) && sig)
4756 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4757 }
4758 
4759 static void
4760 bnx2_setup_msix_tbl(struct bnx2 *bp)
4761 {
4762 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4763 
4764 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4765 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4766 }
4767 
4768 static void
4769 bnx2_wait_dma_complete(struct bnx2 *bp)
4770 {
4771 	u32 val;
4772 	int i;
4773 
4774 	/*
4775 	 * Wait for the current PCI transaction to complete before
4776 	 * issuing a reset.
4777 	 */
4778 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4779 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4780 		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4781 			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4782 			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4783 			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4784 			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4785 		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4786 		udelay(5);
4787 	} else {  /* 5709 */
4788 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4789 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4790 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4791 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4792 
4793 		for (i = 0; i < 100; i++) {
4794 			msleep(1);
4795 			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4796 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4797 				break;
4798 		}
4799 	}
4800 
4801 	return;
4802 }
4803 
4804 
4805 static int
4806 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4807 {
4808 	u32 val;
4809 	int i, rc = 0;
4810 	u8 old_port;
4811 
4812 	/* Wait for the current PCI transaction to complete before
4813 	 * issuing a reset. */
4814 	bnx2_wait_dma_complete(bp);
4815 
4816 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4817 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4818 
4819 	/* Deposit a driver reset signature so the firmware knows that
4820 	 * this is a soft reset. */
4821 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4822 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4823 
4824 	/* Do a dummy read to force the chip to complete all current transaction
4825 	 * before we issue a reset. */
4826 	val = BNX2_RD(bp, BNX2_MISC_ID);
4827 
4828 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4829 		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4830 		BNX2_RD(bp, BNX2_MISC_COMMAND);
4831 		udelay(5);
4832 
4833 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4834 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4835 
4836 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4837 
4838 	} else {
4839 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4840 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4841 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4842 
4843 		/* Chip reset. */
4844 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4845 
4846 		/* Reading back any register after chip reset will hang the
4847 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4848 		 * of margin for write posting.
4849 		 */
4850 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4851 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4852 			msleep(20);
4853 
4854 		/* Reset takes approximate 30 usec */
4855 		for (i = 0; i < 10; i++) {
4856 			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4857 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4858 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4859 				break;
4860 			udelay(10);
4861 		}
4862 
4863 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4864 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4865 			pr_err("Chip reset did not complete\n");
4866 			return -EBUSY;
4867 		}
4868 	}
4869 
4870 	/* Make sure byte swapping is properly configured. */
4871 	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4872 	if (val != 0x01020304) {
4873 		pr_err("Chip not in correct endian mode\n");
4874 		return -ENODEV;
4875 	}
4876 
4877 	/* Wait for the firmware to finish its initialization. */
4878 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4879 	if (rc)
4880 		return rc;
4881 
4882 	spin_lock_bh(&bp->phy_lock);
4883 	old_port = bp->phy_port;
4884 	bnx2_init_fw_cap(bp);
4885 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4886 	    old_port != bp->phy_port)
4887 		bnx2_set_default_remote_link(bp);
4888 	spin_unlock_bh(&bp->phy_lock);
4889 
4890 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4891 		/* Adjust the voltage regular to two steps lower.  The default
4892 		 * of this register is 0x0000000e. */
4893 		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4894 
4895 		/* Remove bad rbuf memory from the free pool. */
4896 		rc = bnx2_alloc_bad_rbuf(bp);
4897 	}
4898 
4899 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4900 		bnx2_setup_msix_tbl(bp);
4901 		/* Prevent MSIX table reads and write from timing out */
4902 		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4903 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4904 	}
4905 
4906 	return rc;
4907 }
4908 
4909 static int
4910 bnx2_init_chip(struct bnx2 *bp)
4911 {
4912 	u32 val, mtu;
4913 	int rc, i;
4914 
4915 	/* Make sure the interrupt is not active. */
4916 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4917 
4918 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4919 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4920 #ifdef __BIG_ENDIAN
4921 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4922 #endif
4923 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4924 	      DMA_READ_CHANS << 12 |
4925 	      DMA_WRITE_CHANS << 16;
4926 
4927 	val |= (0x2 << 20) | (1 << 11);
4928 
4929 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4930 		val |= (1 << 23);
4931 
4932 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4933 	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4934 	    !(bp->flags & BNX2_FLAG_PCIX))
4935 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4936 
4937 	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4938 
4939 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4940 		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4941 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4942 		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4943 	}
4944 
4945 	if (bp->flags & BNX2_FLAG_PCIX) {
4946 		u16 val16;
4947 
4948 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4949 				     &val16);
4950 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4951 				      val16 & ~PCI_X_CMD_ERO);
4952 	}
4953 
4954 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4955 		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4956 		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4957 		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4958 
4959 	/* Initialize context mapping and zero out the quick contexts.  The
4960 	 * context block must have already been enabled. */
4961 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4962 		rc = bnx2_init_5709_context(bp);
4963 		if (rc)
4964 			return rc;
4965 	} else
4966 		bnx2_init_context(bp);
4967 
4968 	if ((rc = bnx2_init_cpus(bp)) != 0)
4969 		return rc;
4970 
4971 	bnx2_init_nvram(bp);
4972 
4973 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4974 
4975 	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4976 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4977 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4978 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4979 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4980 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4981 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4982 	}
4983 
4984 	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4985 
4986 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4987 	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4988 	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4989 
4990 	val = (BNX2_PAGE_BITS - 8) << 24;
4991 	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4992 
4993 	/* Configure page size. */
4994 	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4995 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4996 	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4997 	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4998 
4999 	val = bp->mac_addr[0] +
5000 	      (bp->mac_addr[1] << 8) +
5001 	      (bp->mac_addr[2] << 16) +
5002 	      bp->mac_addr[3] +
5003 	      (bp->mac_addr[4] << 8) +
5004 	      (bp->mac_addr[5] << 16);
5005 	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
5006 
5007 	/* Program the MTU.  Also include 4 bytes for CRC32. */
5008 	mtu = bp->dev->mtu;
5009 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
5010 	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
5011 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
5012 	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
5013 
5014 	if (mtu < 1500)
5015 		mtu = 1500;
5016 
5017 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5018 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5019 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5020 
5021 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5022 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5023 		bp->bnx2_napi[i].last_status_idx = 0;
5024 
5025 	bp->idle_chk_status_idx = 0xffff;
5026 
5027 	/* Set up how to generate a link change interrupt. */
5028 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5029 
5030 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5031 		(u64) bp->status_blk_mapping & 0xffffffff);
5032 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5033 
5034 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5035 		(u64) bp->stats_blk_mapping & 0xffffffff);
5036 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5037 		(u64) bp->stats_blk_mapping >> 32);
5038 
5039 	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5040 		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5041 
5042 	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5043 		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5044 
5045 	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5046 		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5047 
5048 	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5049 
5050 	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5051 
5052 	BNX2_WR(bp, BNX2_HC_COM_TICKS,
5053 		(bp->com_ticks_int << 16) | bp->com_ticks);
5054 
5055 	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5056 		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5057 
5058 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5059 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5060 	else
5061 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5062 	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5063 
5064 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5065 		val = BNX2_HC_CONFIG_COLLECT_STATS;
5066 	else {
5067 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5068 		      BNX2_HC_CONFIG_COLLECT_STATS;
5069 	}
5070 
5071 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
5072 		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5073 			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5074 
5075 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5076 	}
5077 
5078 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5079 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5080 
5081 	BNX2_WR(bp, BNX2_HC_CONFIG, val);
5082 
5083 	if (bp->rx_ticks < 25)
5084 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5085 	else
5086 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5087 
5088 	for (i = 1; i < bp->irq_nvecs; i++) {
5089 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5090 			   BNX2_HC_SB_CONFIG_1;
5091 
5092 		BNX2_WR(bp, base,
5093 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5094 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5095 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5096 
5097 		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5098 			(bp->tx_quick_cons_trip_int << 16) |
5099 			 bp->tx_quick_cons_trip);
5100 
5101 		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5102 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5103 
5104 		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5105 			(bp->rx_quick_cons_trip_int << 16) |
5106 			bp->rx_quick_cons_trip);
5107 
5108 		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5109 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5110 	}
5111 
5112 	/* Clear internal stats counters. */
5113 	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5114 
5115 	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5116 
5117 	/* Initialize the receive filter. */
5118 	bnx2_set_rx_mode(bp->dev);
5119 
5120 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5121 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5122 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5123 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5124 	}
5125 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5126 			  1, 0);
5127 
5128 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5129 	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5130 
5131 	udelay(20);
5132 
5133 	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5134 
5135 	return rc;
5136 }
5137 
5138 static void
5139 bnx2_clear_ring_states(struct bnx2 *bp)
5140 {
5141 	struct bnx2_napi *bnapi;
5142 	struct bnx2_tx_ring_info *txr;
5143 	struct bnx2_rx_ring_info *rxr;
5144 	int i;
5145 
5146 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5147 		bnapi = &bp->bnx2_napi[i];
5148 		txr = &bnapi->tx_ring;
5149 		rxr = &bnapi->rx_ring;
5150 
5151 		txr->tx_cons = 0;
5152 		txr->hw_tx_cons = 0;
5153 		rxr->rx_prod_bseq = 0;
5154 		rxr->rx_prod = 0;
5155 		rxr->rx_cons = 0;
5156 		rxr->rx_pg_prod = 0;
5157 		rxr->rx_pg_cons = 0;
5158 	}
5159 }
5160 
5161 static void
5162 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5163 {
5164 	u32 val, offset0, offset1, offset2, offset3;
5165 	u32 cid_addr = GET_CID_ADDR(cid);
5166 
5167 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5168 		offset0 = BNX2_L2CTX_TYPE_XI;
5169 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5170 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5171 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5172 	} else {
5173 		offset0 = BNX2_L2CTX_TYPE;
5174 		offset1 = BNX2_L2CTX_CMD_TYPE;
5175 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5176 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5177 	}
5178 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5179 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5180 
5181 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5182 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5183 
5184 	val = (u64) txr->tx_desc_mapping >> 32;
5185 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5186 
5187 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5188 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5189 }
5190 
5191 static void
5192 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5193 {
5194 	struct bnx2_tx_bd *txbd;
5195 	u32 cid = TX_CID;
5196 	struct bnx2_napi *bnapi;
5197 	struct bnx2_tx_ring_info *txr;
5198 
5199 	bnapi = &bp->bnx2_napi[ring_num];
5200 	txr = &bnapi->tx_ring;
5201 
5202 	if (ring_num == 0)
5203 		cid = TX_CID;
5204 	else
5205 		cid = TX_TSS_CID + ring_num - 1;
5206 
5207 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5208 
5209 	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5210 
5211 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5212 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5213 
5214 	txr->tx_prod = 0;
5215 	txr->tx_prod_bseq = 0;
5216 
5217 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5218 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5219 
5220 	bnx2_init_tx_context(bp, cid, txr);
5221 }
5222 
5223 static void
5224 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5225 		     u32 buf_size, int num_rings)
5226 {
5227 	int i;
5228 	struct bnx2_rx_bd *rxbd;
5229 
5230 	for (i = 0; i < num_rings; i++) {
5231 		int j;
5232 
5233 		rxbd = &rx_ring[i][0];
5234 		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5235 			rxbd->rx_bd_len = buf_size;
5236 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5237 		}
5238 		if (i == (num_rings - 1))
5239 			j = 0;
5240 		else
5241 			j = i + 1;
5242 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5243 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5244 	}
5245 }
5246 
5247 static void
5248 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5249 {
5250 	int i;
5251 	u16 prod, ring_prod;
5252 	u32 cid, rx_cid_addr, val;
5253 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5254 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5255 
5256 	if (ring_num == 0)
5257 		cid = RX_CID;
5258 	else
5259 		cid = RX_RSS_CID + ring_num - 1;
5260 
5261 	rx_cid_addr = GET_CID_ADDR(cid);
5262 
5263 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5264 			     bp->rx_buf_use_size, bp->rx_max_ring);
5265 
5266 	bnx2_init_rx_context(bp, cid);
5267 
5268 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5269 		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5270 		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5271 	}
5272 
5273 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5274 	if (bp->rx_pg_ring_size) {
5275 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5276 				     rxr->rx_pg_desc_mapping,
5277 				     PAGE_SIZE, bp->rx_max_pg_ring);
5278 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5279 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5280 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5281 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5282 
5283 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5284 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5285 
5286 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5287 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5288 
5289 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5290 			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5291 	}
5292 
5293 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5294 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5295 
5296 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5297 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5298 
5299 	ring_prod = prod = rxr->rx_pg_prod;
5300 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5301 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5302 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5303 				    ring_num, i, bp->rx_pg_ring_size);
5304 			break;
5305 		}
5306 		prod = BNX2_NEXT_RX_BD(prod);
5307 		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5308 	}
5309 	rxr->rx_pg_prod = prod;
5310 
5311 	ring_prod = prod = rxr->rx_prod;
5312 	for (i = 0; i < bp->rx_ring_size; i++) {
5313 		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5314 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5315 				    ring_num, i, bp->rx_ring_size);
5316 			break;
5317 		}
5318 		prod = BNX2_NEXT_RX_BD(prod);
5319 		ring_prod = BNX2_RX_RING_IDX(prod);
5320 	}
5321 	rxr->rx_prod = prod;
5322 
5323 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5324 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5325 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5326 
5327 	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5328 	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5329 
5330 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5331 }
5332 
5333 static void
5334 bnx2_init_all_rings(struct bnx2 *bp)
5335 {
5336 	int i;
5337 	u32 val;
5338 
5339 	bnx2_clear_ring_states(bp);
5340 
5341 	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5342 	for (i = 0; i < bp->num_tx_rings; i++)
5343 		bnx2_init_tx_ring(bp, i);
5344 
5345 	if (bp->num_tx_rings > 1)
5346 		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5347 			(TX_TSS_CID << 7));
5348 
5349 	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5350 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5351 
5352 	for (i = 0; i < bp->num_rx_rings; i++)
5353 		bnx2_init_rx_ring(bp, i);
5354 
5355 	if (bp->num_rx_rings > 1) {
5356 		u32 tbl_32 = 0;
5357 
5358 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5359 			int shift = (i % 8) << 2;
5360 
5361 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5362 			if ((i % 8) == 7) {
5363 				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5364 				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5365 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5366 					BNX2_RLUP_RSS_COMMAND_WRITE |
5367 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5368 				tbl_32 = 0;
5369 			}
5370 		}
5371 
5372 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5373 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5374 
5375 		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5376 
5377 	}
5378 }
5379 
5380 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5381 {
5382 	u32 max, num_rings = 1;
5383 
5384 	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5385 		ring_size -= BNX2_MAX_RX_DESC_CNT;
5386 		num_rings++;
5387 	}
5388 	/* round to next power of 2 */
5389 	max = max_size;
5390 	while ((max & num_rings) == 0)
5391 		max >>= 1;
5392 
5393 	if (num_rings != max)
5394 		max <<= 1;
5395 
5396 	return max;
5397 }
5398 
5399 static void
5400 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5401 {
5402 	u32 rx_size, rx_space, jumbo_size;
5403 
5404 	/* 8 for CRC and VLAN */
5405 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5406 
5407 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5408 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5409 
5410 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5411 	bp->rx_pg_ring_size = 0;
5412 	bp->rx_max_pg_ring = 0;
5413 	bp->rx_max_pg_ring_idx = 0;
5414 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5415 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5416 
5417 		jumbo_size = size * pages;
5418 		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5419 			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5420 
5421 		bp->rx_pg_ring_size = jumbo_size;
5422 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5423 							BNX2_MAX_RX_PG_RINGS);
5424 		bp->rx_max_pg_ring_idx =
5425 			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5426 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5427 		bp->rx_copy_thresh = 0;
5428 	}
5429 
5430 	bp->rx_buf_use_size = rx_size;
5431 	/* hw alignment + build_skb() overhead*/
5432 	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5433 		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5434 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5435 	bp->rx_ring_size = size;
5436 	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5437 	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5438 }
5439 
5440 static void
5441 bnx2_free_tx_skbs(struct bnx2 *bp)
5442 {
5443 	int i;
5444 
5445 	for (i = 0; i < bp->num_tx_rings; i++) {
5446 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5447 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5448 		int j;
5449 
5450 		if (txr->tx_buf_ring == NULL)
5451 			continue;
5452 
5453 		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5454 			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5455 			struct sk_buff *skb = tx_buf->skb;
5456 			int k, last;
5457 
5458 			if (skb == NULL) {
5459 				j = BNX2_NEXT_TX_BD(j);
5460 				continue;
5461 			}
5462 
5463 			dma_unmap_single(&bp->pdev->dev,
5464 					 dma_unmap_addr(tx_buf, mapping),
5465 					 skb_headlen(skb),
5466 					 PCI_DMA_TODEVICE);
5467 
5468 			tx_buf->skb = NULL;
5469 
5470 			last = tx_buf->nr_frags;
5471 			j = BNX2_NEXT_TX_BD(j);
5472 			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5473 				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5474 				dma_unmap_page(&bp->pdev->dev,
5475 					dma_unmap_addr(tx_buf, mapping),
5476 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5477 					PCI_DMA_TODEVICE);
5478 			}
5479 			dev_kfree_skb(skb);
5480 		}
5481 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5482 	}
5483 }
5484 
5485 static void
5486 bnx2_free_rx_skbs(struct bnx2 *bp)
5487 {
5488 	int i;
5489 
5490 	for (i = 0; i < bp->num_rx_rings; i++) {
5491 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5492 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5493 		int j;
5494 
5495 		if (rxr->rx_buf_ring == NULL)
5496 			return;
5497 
5498 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5499 			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5500 			u8 *data = rx_buf->data;
5501 
5502 			if (data == NULL)
5503 				continue;
5504 
5505 			dma_unmap_single(&bp->pdev->dev,
5506 					 dma_unmap_addr(rx_buf, mapping),
5507 					 bp->rx_buf_use_size,
5508 					 PCI_DMA_FROMDEVICE);
5509 
5510 			rx_buf->data = NULL;
5511 
5512 			kfree(data);
5513 		}
5514 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5515 			bnx2_free_rx_page(bp, rxr, j);
5516 	}
5517 }
5518 
5519 static void
5520 bnx2_free_skbs(struct bnx2 *bp)
5521 {
5522 	bnx2_free_tx_skbs(bp);
5523 	bnx2_free_rx_skbs(bp);
5524 }
5525 
5526 static int
5527 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5528 {
5529 	int rc;
5530 
5531 	rc = bnx2_reset_chip(bp, reset_code);
5532 	bnx2_free_skbs(bp);
5533 	if (rc)
5534 		return rc;
5535 
5536 	if ((rc = bnx2_init_chip(bp)) != 0)
5537 		return rc;
5538 
5539 	bnx2_init_all_rings(bp);
5540 	return 0;
5541 }
5542 
5543 static int
5544 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5545 {
5546 	int rc;
5547 
5548 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5549 		return rc;
5550 
5551 	spin_lock_bh(&bp->phy_lock);
5552 	bnx2_init_phy(bp, reset_phy);
5553 	bnx2_set_link(bp);
5554 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5555 		bnx2_remote_phy_event(bp);
5556 	spin_unlock_bh(&bp->phy_lock);
5557 	return 0;
5558 }
5559 
5560 static int
5561 bnx2_shutdown_chip(struct bnx2 *bp)
5562 {
5563 	u32 reset_code;
5564 
5565 	if (bp->flags & BNX2_FLAG_NO_WOL)
5566 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5567 	else if (bp->wol)
5568 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5569 	else
5570 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5571 
5572 	return bnx2_reset_chip(bp, reset_code);
5573 }
5574 
5575 static int
5576 bnx2_test_registers(struct bnx2 *bp)
5577 {
5578 	int ret;
5579 	int i, is_5709;
5580 	static const struct {
5581 		u16   offset;
5582 		u16   flags;
5583 #define BNX2_FL_NOT_5709	1
5584 		u32   rw_mask;
5585 		u32   ro_mask;
5586 	} reg_tbl[] = {
5587 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5588 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5589 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5590 
5591 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5592 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5593 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5594 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5595 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5596 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5597 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5598 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5599 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5600 
5601 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5602 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5603 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5604 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5605 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5606 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5607 
5608 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5609 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5610 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5611 
5612 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5613 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5614 
5615 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5616 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5617 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5618 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5619 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5620 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5621 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5622 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5623 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5624 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5625 
5626 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5627 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5628 
5629 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5630 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5631 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5632 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5633 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5634 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5635 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5636 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5637 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5638 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5639 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5640 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5641 
5642 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5643 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5644 
5645 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5646 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5647 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5648 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5649 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5650 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5651 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5652 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5653 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5654 
5655 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5656 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5657 
5658 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5659 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5660 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5661 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5662 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5663 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5664 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5665 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5666 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5667 
5668 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5669 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5670 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5671 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5672 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5673 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5674 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5675 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5676 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5677 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5678 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5679 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5680 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5681 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5682 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5683 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5684 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5685 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5686 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5687 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5688 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5689 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5690 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5691 
5692 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5693 	};
5694 
5695 	ret = 0;
5696 	is_5709 = 0;
5697 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5698 		is_5709 = 1;
5699 
5700 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5701 		u32 offset, rw_mask, ro_mask, save_val, val;
5702 		u16 flags = reg_tbl[i].flags;
5703 
5704 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5705 			continue;
5706 
5707 		offset = (u32) reg_tbl[i].offset;
5708 		rw_mask = reg_tbl[i].rw_mask;
5709 		ro_mask = reg_tbl[i].ro_mask;
5710 
5711 		save_val = readl(bp->regview + offset);
5712 
5713 		writel(0, bp->regview + offset);
5714 
5715 		val = readl(bp->regview + offset);
5716 		if ((val & rw_mask) != 0) {
5717 			goto reg_test_err;
5718 		}
5719 
5720 		if ((val & ro_mask) != (save_val & ro_mask)) {
5721 			goto reg_test_err;
5722 		}
5723 
5724 		writel(0xffffffff, bp->regview + offset);
5725 
5726 		val = readl(bp->regview + offset);
5727 		if ((val & rw_mask) != rw_mask) {
5728 			goto reg_test_err;
5729 		}
5730 
5731 		if ((val & ro_mask) != (save_val & ro_mask)) {
5732 			goto reg_test_err;
5733 		}
5734 
5735 		writel(save_val, bp->regview + offset);
5736 		continue;
5737 
5738 reg_test_err:
5739 		writel(save_val, bp->regview + offset);
5740 		ret = -ENODEV;
5741 		break;
5742 	}
5743 	return ret;
5744 }
5745 
5746 static int
5747 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5748 {
5749 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5750 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5751 	int i;
5752 
5753 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5754 		u32 offset;
5755 
5756 		for (offset = 0; offset < size; offset += 4) {
5757 
5758 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5759 
5760 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5761 				test_pattern[i]) {
5762 				return -ENODEV;
5763 			}
5764 		}
5765 	}
5766 	return 0;
5767 }
5768 
5769 static int
5770 bnx2_test_memory(struct bnx2 *bp)
5771 {
5772 	int ret = 0;
5773 	int i;
5774 	static struct mem_entry {
5775 		u32   offset;
5776 		u32   len;
5777 	} mem_tbl_5706[] = {
5778 		{ 0x60000,  0x4000 },
5779 		{ 0xa0000,  0x3000 },
5780 		{ 0xe0000,  0x4000 },
5781 		{ 0x120000, 0x4000 },
5782 		{ 0x1a0000, 0x4000 },
5783 		{ 0x160000, 0x4000 },
5784 		{ 0xffffffff, 0    },
5785 	},
5786 	mem_tbl_5709[] = {
5787 		{ 0x60000,  0x4000 },
5788 		{ 0xa0000,  0x3000 },
5789 		{ 0xe0000,  0x4000 },
5790 		{ 0x120000, 0x4000 },
5791 		{ 0x1a0000, 0x4000 },
5792 		{ 0xffffffff, 0    },
5793 	};
5794 	struct mem_entry *mem_tbl;
5795 
5796 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5797 		mem_tbl = mem_tbl_5709;
5798 	else
5799 		mem_tbl = mem_tbl_5706;
5800 
5801 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5802 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5803 			mem_tbl[i].len)) != 0) {
5804 			return ret;
5805 		}
5806 	}
5807 
5808 	return ret;
5809 }
5810 
5811 #define BNX2_MAC_LOOPBACK	0
5812 #define BNX2_PHY_LOOPBACK	1
5813 
5814 static int
5815 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5816 {
5817 	unsigned int pkt_size, num_pkts, i;
5818 	struct sk_buff *skb;
5819 	u8 *data;
5820 	unsigned char *packet;
5821 	u16 rx_start_idx, rx_idx;
5822 	dma_addr_t map;
5823 	struct bnx2_tx_bd *txbd;
5824 	struct bnx2_sw_bd *rx_buf;
5825 	struct l2_fhdr *rx_hdr;
5826 	int ret = -ENODEV;
5827 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5828 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5829 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5830 
5831 	tx_napi = bnapi;
5832 
5833 	txr = &tx_napi->tx_ring;
5834 	rxr = &bnapi->rx_ring;
5835 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5836 		bp->loopback = MAC_LOOPBACK;
5837 		bnx2_set_mac_loopback(bp);
5838 	}
5839 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5840 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5841 			return 0;
5842 
5843 		bp->loopback = PHY_LOOPBACK;
5844 		bnx2_set_phy_loopback(bp);
5845 	}
5846 	else
5847 		return -EINVAL;
5848 
5849 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5850 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5851 	if (!skb)
5852 		return -ENOMEM;
5853 	packet = skb_put(skb, pkt_size);
5854 	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5855 	memset(packet + ETH_ALEN, 0x0, 8);
5856 	for (i = 14; i < pkt_size; i++)
5857 		packet[i] = (unsigned char) (i & 0xff);
5858 
5859 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5860 			     PCI_DMA_TODEVICE);
5861 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5862 		dev_kfree_skb(skb);
5863 		return -EIO;
5864 	}
5865 
5866 	BNX2_WR(bp, BNX2_HC_COMMAND,
5867 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5868 
5869 	BNX2_RD(bp, BNX2_HC_COMMAND);
5870 
5871 	udelay(5);
5872 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5873 
5874 	num_pkts = 0;
5875 
5876 	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5877 
5878 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5879 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5880 	txbd->tx_bd_mss_nbytes = pkt_size;
5881 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5882 
5883 	num_pkts++;
5884 	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5885 	txr->tx_prod_bseq += pkt_size;
5886 
5887 	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5888 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5889 
5890 	udelay(100);
5891 
5892 	BNX2_WR(bp, BNX2_HC_COMMAND,
5893 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5894 
5895 	BNX2_RD(bp, BNX2_HC_COMMAND);
5896 
5897 	udelay(5);
5898 
5899 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5900 	dev_kfree_skb(skb);
5901 
5902 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5903 		goto loopback_test_done;
5904 
5905 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5906 	if (rx_idx != rx_start_idx + num_pkts) {
5907 		goto loopback_test_done;
5908 	}
5909 
5910 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5911 	data = rx_buf->data;
5912 
5913 	rx_hdr = get_l2_fhdr(data);
5914 	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5915 
5916 	dma_sync_single_for_cpu(&bp->pdev->dev,
5917 		dma_unmap_addr(rx_buf, mapping),
5918 		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5919 
5920 	if (rx_hdr->l2_fhdr_status &
5921 		(L2_FHDR_ERRORS_BAD_CRC |
5922 		L2_FHDR_ERRORS_PHY_DECODE |
5923 		L2_FHDR_ERRORS_ALIGNMENT |
5924 		L2_FHDR_ERRORS_TOO_SHORT |
5925 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5926 
5927 		goto loopback_test_done;
5928 	}
5929 
5930 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5931 		goto loopback_test_done;
5932 	}
5933 
5934 	for (i = 14; i < pkt_size; i++) {
5935 		if (*(data + i) != (unsigned char) (i & 0xff)) {
5936 			goto loopback_test_done;
5937 		}
5938 	}
5939 
5940 	ret = 0;
5941 
5942 loopback_test_done:
5943 	bp->loopback = 0;
5944 	return ret;
5945 }
5946 
5947 #define BNX2_MAC_LOOPBACK_FAILED	1
5948 #define BNX2_PHY_LOOPBACK_FAILED	2
5949 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5950 					 BNX2_PHY_LOOPBACK_FAILED)
5951 
5952 static int
5953 bnx2_test_loopback(struct bnx2 *bp)
5954 {
5955 	int rc = 0;
5956 
5957 	if (!netif_running(bp->dev))
5958 		return BNX2_LOOPBACK_FAILED;
5959 
5960 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5961 	spin_lock_bh(&bp->phy_lock);
5962 	bnx2_init_phy(bp, 1);
5963 	spin_unlock_bh(&bp->phy_lock);
5964 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5965 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5966 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5967 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5968 	return rc;
5969 }
5970 
5971 #define NVRAM_SIZE 0x200
5972 #define CRC32_RESIDUAL 0xdebb20e3
5973 
5974 static int
5975 bnx2_test_nvram(struct bnx2 *bp)
5976 {
5977 	__be32 buf[NVRAM_SIZE / 4];
5978 	u8 *data = (u8 *) buf;
5979 	int rc = 0;
5980 	u32 magic, csum;
5981 
5982 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5983 		goto test_nvram_done;
5984 
5985         magic = be32_to_cpu(buf[0]);
5986 	if (magic != 0x669955aa) {
5987 		rc = -ENODEV;
5988 		goto test_nvram_done;
5989 	}
5990 
5991 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5992 		goto test_nvram_done;
5993 
5994 	csum = ether_crc_le(0x100, data);
5995 	if (csum != CRC32_RESIDUAL) {
5996 		rc = -ENODEV;
5997 		goto test_nvram_done;
5998 	}
5999 
6000 	csum = ether_crc_le(0x100, data + 0x100);
6001 	if (csum != CRC32_RESIDUAL) {
6002 		rc = -ENODEV;
6003 	}
6004 
6005 test_nvram_done:
6006 	return rc;
6007 }
6008 
6009 static int
6010 bnx2_test_link(struct bnx2 *bp)
6011 {
6012 	u32 bmsr;
6013 
6014 	if (!netif_running(bp->dev))
6015 		return -ENODEV;
6016 
6017 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6018 		if (bp->link_up)
6019 			return 0;
6020 		return -ENODEV;
6021 	}
6022 	spin_lock_bh(&bp->phy_lock);
6023 	bnx2_enable_bmsr1(bp);
6024 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6025 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6026 	bnx2_disable_bmsr1(bp);
6027 	spin_unlock_bh(&bp->phy_lock);
6028 
6029 	if (bmsr & BMSR_LSTATUS) {
6030 		return 0;
6031 	}
6032 	return -ENODEV;
6033 }
6034 
6035 static int
6036 bnx2_test_intr(struct bnx2 *bp)
6037 {
6038 	int i;
6039 	u16 status_idx;
6040 
6041 	if (!netif_running(bp->dev))
6042 		return -ENODEV;
6043 
6044 	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6045 
6046 	/* This register is not touched during run-time. */
6047 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6048 	BNX2_RD(bp, BNX2_HC_COMMAND);
6049 
6050 	for (i = 0; i < 10; i++) {
6051 		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6052 			status_idx) {
6053 
6054 			break;
6055 		}
6056 
6057 		msleep_interruptible(10);
6058 	}
6059 	if (i < 10)
6060 		return 0;
6061 
6062 	return -ENODEV;
6063 }
6064 
6065 /* Determining link for parallel detection. */
6066 static int
6067 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6068 {
6069 	u32 mode_ctl, an_dbg, exp;
6070 
6071 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6072 		return 0;
6073 
6074 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6075 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6076 
6077 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6078 		return 0;
6079 
6080 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6081 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6082 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6083 
6084 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6085 		return 0;
6086 
6087 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6088 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6089 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6090 
6091 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6092 		return 0;
6093 
6094 	return 1;
6095 }
6096 
6097 static void
6098 bnx2_5706_serdes_timer(struct bnx2 *bp)
6099 {
6100 	int check_link = 1;
6101 
6102 	spin_lock(&bp->phy_lock);
6103 	if (bp->serdes_an_pending) {
6104 		bp->serdes_an_pending--;
6105 		check_link = 0;
6106 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6107 		u32 bmcr;
6108 
6109 		bp->current_interval = BNX2_TIMER_INTERVAL;
6110 
6111 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6112 
6113 		if (bmcr & BMCR_ANENABLE) {
6114 			if (bnx2_5706_serdes_has_link(bp)) {
6115 				bmcr &= ~BMCR_ANENABLE;
6116 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6117 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6118 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6119 			}
6120 		}
6121 	}
6122 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6123 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6124 		u32 phy2;
6125 
6126 		bnx2_write_phy(bp, 0x17, 0x0f01);
6127 		bnx2_read_phy(bp, 0x15, &phy2);
6128 		if (phy2 & 0x20) {
6129 			u32 bmcr;
6130 
6131 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6132 			bmcr |= BMCR_ANENABLE;
6133 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6134 
6135 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6136 		}
6137 	} else
6138 		bp->current_interval = BNX2_TIMER_INTERVAL;
6139 
6140 	if (check_link) {
6141 		u32 val;
6142 
6143 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6144 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6145 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6146 
6147 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6148 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6149 				bnx2_5706s_force_link_dn(bp, 1);
6150 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6151 			} else
6152 				bnx2_set_link(bp);
6153 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6154 			bnx2_set_link(bp);
6155 	}
6156 	spin_unlock(&bp->phy_lock);
6157 }
6158 
6159 static void
6160 bnx2_5708_serdes_timer(struct bnx2 *bp)
6161 {
6162 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6163 		return;
6164 
6165 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6166 		bp->serdes_an_pending = 0;
6167 		return;
6168 	}
6169 
6170 	spin_lock(&bp->phy_lock);
6171 	if (bp->serdes_an_pending)
6172 		bp->serdes_an_pending--;
6173 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6174 		u32 bmcr;
6175 
6176 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6177 		if (bmcr & BMCR_ANENABLE) {
6178 			bnx2_enable_forced_2g5(bp);
6179 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6180 		} else {
6181 			bnx2_disable_forced_2g5(bp);
6182 			bp->serdes_an_pending = 2;
6183 			bp->current_interval = BNX2_TIMER_INTERVAL;
6184 		}
6185 
6186 	} else
6187 		bp->current_interval = BNX2_TIMER_INTERVAL;
6188 
6189 	spin_unlock(&bp->phy_lock);
6190 }
6191 
6192 static void
6193 bnx2_timer(unsigned long data)
6194 {
6195 	struct bnx2 *bp = (struct bnx2 *) data;
6196 
6197 	if (!netif_running(bp->dev))
6198 		return;
6199 
6200 	if (atomic_read(&bp->intr_sem) != 0)
6201 		goto bnx2_restart_timer;
6202 
6203 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6204 	     BNX2_FLAG_USING_MSI)
6205 		bnx2_chk_missed_msi(bp);
6206 
6207 	bnx2_send_heart_beat(bp);
6208 
6209 	bp->stats_blk->stat_FwRxDrop =
6210 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6211 
6212 	/* workaround occasional corrupted counters */
6213 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6214 		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6215 			BNX2_HC_COMMAND_STATS_NOW);
6216 
6217 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6218 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6219 			bnx2_5706_serdes_timer(bp);
6220 		else
6221 			bnx2_5708_serdes_timer(bp);
6222 	}
6223 
6224 bnx2_restart_timer:
6225 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6226 }
6227 
6228 static int
6229 bnx2_request_irq(struct bnx2 *bp)
6230 {
6231 	unsigned long flags;
6232 	struct bnx2_irq *irq;
6233 	int rc = 0, i;
6234 
6235 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6236 		flags = 0;
6237 	else
6238 		flags = IRQF_SHARED;
6239 
6240 	for (i = 0; i < bp->irq_nvecs; i++) {
6241 		irq = &bp->irq_tbl[i];
6242 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6243 				 &bp->bnx2_napi[i]);
6244 		if (rc)
6245 			break;
6246 		irq->requested = 1;
6247 	}
6248 	return rc;
6249 }
6250 
6251 static void
6252 __bnx2_free_irq(struct bnx2 *bp)
6253 {
6254 	struct bnx2_irq *irq;
6255 	int i;
6256 
6257 	for (i = 0; i < bp->irq_nvecs; i++) {
6258 		irq = &bp->irq_tbl[i];
6259 		if (irq->requested)
6260 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6261 		irq->requested = 0;
6262 	}
6263 }
6264 
6265 static void
6266 bnx2_free_irq(struct bnx2 *bp)
6267 {
6268 
6269 	__bnx2_free_irq(bp);
6270 	if (bp->flags & BNX2_FLAG_USING_MSI)
6271 		pci_disable_msi(bp->pdev);
6272 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6273 		pci_disable_msix(bp->pdev);
6274 
6275 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6276 }
6277 
6278 static void
6279 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6280 {
6281 	int i, total_vecs;
6282 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6283 	struct net_device *dev = bp->dev;
6284 	const int len = sizeof(bp->irq_tbl[0].name);
6285 
6286 	bnx2_setup_msix_tbl(bp);
6287 	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6288 	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6289 	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6290 
6291 	/*  Need to flush the previous three writes to ensure MSI-X
6292 	 *  is setup properly */
6293 	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6294 
6295 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6296 		msix_ent[i].entry = i;
6297 		msix_ent[i].vector = 0;
6298 	}
6299 
6300 	total_vecs = msix_vecs;
6301 #ifdef BCM_CNIC
6302 	total_vecs++;
6303 #endif
6304 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6305 					   BNX2_MIN_MSIX_VEC, total_vecs);
6306 	if (total_vecs < 0)
6307 		return;
6308 
6309 	msix_vecs = total_vecs;
6310 #ifdef BCM_CNIC
6311 	msix_vecs--;
6312 #endif
6313 	bp->irq_nvecs = msix_vecs;
6314 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6315 	for (i = 0; i < total_vecs; i++) {
6316 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6317 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6318 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6319 	}
6320 }
6321 
6322 static int
6323 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6324 {
6325 	int cpus = netif_get_num_default_rss_queues();
6326 	int msix_vecs;
6327 
6328 	if (!bp->num_req_rx_rings)
6329 		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6330 	else if (!bp->num_req_tx_rings)
6331 		msix_vecs = max(cpus, bp->num_req_rx_rings);
6332 	else
6333 		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6334 
6335 	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6336 
6337 	bp->irq_tbl[0].handler = bnx2_interrupt;
6338 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6339 	bp->irq_nvecs = 1;
6340 	bp->irq_tbl[0].vector = bp->pdev->irq;
6341 
6342 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6343 		bnx2_enable_msix(bp, msix_vecs);
6344 
6345 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6346 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6347 		if (pci_enable_msi(bp->pdev) == 0) {
6348 			bp->flags |= BNX2_FLAG_USING_MSI;
6349 			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6350 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6351 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6352 			} else
6353 				bp->irq_tbl[0].handler = bnx2_msi;
6354 
6355 			bp->irq_tbl[0].vector = bp->pdev->irq;
6356 		}
6357 	}
6358 
6359 	if (!bp->num_req_tx_rings)
6360 		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6361 	else
6362 		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6363 
6364 	if (!bp->num_req_rx_rings)
6365 		bp->num_rx_rings = bp->irq_nvecs;
6366 	else
6367 		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6368 
6369 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6370 
6371 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6372 }
6373 
6374 /* Called with rtnl_lock */
6375 static int
6376 bnx2_open(struct net_device *dev)
6377 {
6378 	struct bnx2 *bp = netdev_priv(dev);
6379 	int rc;
6380 
6381 	rc = bnx2_request_firmware(bp);
6382 	if (rc < 0)
6383 		goto out;
6384 
6385 	netif_carrier_off(dev);
6386 
6387 	bnx2_disable_int(bp);
6388 
6389 	rc = bnx2_setup_int_mode(bp, disable_msi);
6390 	if (rc)
6391 		goto open_err;
6392 	bnx2_init_napi(bp);
6393 	bnx2_napi_enable(bp);
6394 	rc = bnx2_alloc_mem(bp);
6395 	if (rc)
6396 		goto open_err;
6397 
6398 	rc = bnx2_request_irq(bp);
6399 	if (rc)
6400 		goto open_err;
6401 
6402 	rc = bnx2_init_nic(bp, 1);
6403 	if (rc)
6404 		goto open_err;
6405 
6406 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6407 
6408 	atomic_set(&bp->intr_sem, 0);
6409 
6410 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6411 
6412 	bnx2_enable_int(bp);
6413 
6414 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6415 		/* Test MSI to make sure it is working
6416 		 * If MSI test fails, go back to INTx mode
6417 		 */
6418 		if (bnx2_test_intr(bp) != 0) {
6419 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6420 
6421 			bnx2_disable_int(bp);
6422 			bnx2_free_irq(bp);
6423 
6424 			bnx2_setup_int_mode(bp, 1);
6425 
6426 			rc = bnx2_init_nic(bp, 0);
6427 
6428 			if (!rc)
6429 				rc = bnx2_request_irq(bp);
6430 
6431 			if (rc) {
6432 				del_timer_sync(&bp->timer);
6433 				goto open_err;
6434 			}
6435 			bnx2_enable_int(bp);
6436 		}
6437 	}
6438 	if (bp->flags & BNX2_FLAG_USING_MSI)
6439 		netdev_info(dev, "using MSI\n");
6440 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6441 		netdev_info(dev, "using MSIX\n");
6442 
6443 	netif_tx_start_all_queues(dev);
6444 out:
6445 	return rc;
6446 
6447 open_err:
6448 	bnx2_napi_disable(bp);
6449 	bnx2_free_skbs(bp);
6450 	bnx2_free_irq(bp);
6451 	bnx2_free_mem(bp);
6452 	bnx2_del_napi(bp);
6453 	bnx2_release_firmware(bp);
6454 	goto out;
6455 }
6456 
6457 static void
6458 bnx2_reset_task(struct work_struct *work)
6459 {
6460 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6461 	int rc;
6462 	u16 pcicmd;
6463 
6464 	rtnl_lock();
6465 	if (!netif_running(bp->dev)) {
6466 		rtnl_unlock();
6467 		return;
6468 	}
6469 
6470 	bnx2_netif_stop(bp, true);
6471 
6472 	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6473 	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6474 		/* in case PCI block has reset */
6475 		pci_restore_state(bp->pdev);
6476 		pci_save_state(bp->pdev);
6477 	}
6478 	rc = bnx2_init_nic(bp, 1);
6479 	if (rc) {
6480 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6481 		bnx2_napi_enable(bp);
6482 		dev_close(bp->dev);
6483 		rtnl_unlock();
6484 		return;
6485 	}
6486 
6487 	atomic_set(&bp->intr_sem, 1);
6488 	bnx2_netif_start(bp, true);
6489 	rtnl_unlock();
6490 }
6491 
6492 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6493 
6494 static void
6495 bnx2_dump_ftq(struct bnx2 *bp)
6496 {
6497 	int i;
6498 	u32 reg, bdidx, cid, valid;
6499 	struct net_device *dev = bp->dev;
6500 	static const struct ftq_reg {
6501 		char *name;
6502 		u32 off;
6503 	} ftq_arr[] = {
6504 		BNX2_FTQ_ENTRY(RV2P_P),
6505 		BNX2_FTQ_ENTRY(RV2P_T),
6506 		BNX2_FTQ_ENTRY(RV2P_M),
6507 		BNX2_FTQ_ENTRY(TBDR_),
6508 		BNX2_FTQ_ENTRY(TDMA_),
6509 		BNX2_FTQ_ENTRY(TXP_),
6510 		BNX2_FTQ_ENTRY(TXP_),
6511 		BNX2_FTQ_ENTRY(TPAT_),
6512 		BNX2_FTQ_ENTRY(RXP_C),
6513 		BNX2_FTQ_ENTRY(RXP_),
6514 		BNX2_FTQ_ENTRY(COM_COMXQ_),
6515 		BNX2_FTQ_ENTRY(COM_COMTQ_),
6516 		BNX2_FTQ_ENTRY(COM_COMQ_),
6517 		BNX2_FTQ_ENTRY(CP_CPQ_),
6518 	};
6519 
6520 	netdev_err(dev, "<--- start FTQ dump --->\n");
6521 	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6522 		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6523 			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6524 
6525 	netdev_err(dev, "CPU states:\n");
6526 	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6527 		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6528 			   reg, bnx2_reg_rd_ind(bp, reg),
6529 			   bnx2_reg_rd_ind(bp, reg + 4),
6530 			   bnx2_reg_rd_ind(bp, reg + 8),
6531 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6532 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6533 			   bnx2_reg_rd_ind(bp, reg + 0x20));
6534 
6535 	netdev_err(dev, "<--- end FTQ dump --->\n");
6536 	netdev_err(dev, "<--- start TBDC dump --->\n");
6537 	netdev_err(dev, "TBDC free cnt: %ld\n",
6538 		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6539 	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6540 	for (i = 0; i < 0x20; i++) {
6541 		int j = 0;
6542 
6543 		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6544 		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6545 			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6546 		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6547 		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6548 			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6549 			j++;
6550 
6551 		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6552 		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6553 		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6554 		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6555 			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6556 			   bdidx >> 24, (valid >> 8) & 0x0ff);
6557 	}
6558 	netdev_err(dev, "<--- end TBDC dump --->\n");
6559 }
6560 
6561 static void
6562 bnx2_dump_state(struct bnx2 *bp)
6563 {
6564 	struct net_device *dev = bp->dev;
6565 	u32 val1, val2;
6566 
6567 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6568 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6569 		   atomic_read(&bp->intr_sem), val1);
6570 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6571 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6572 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6573 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6574 		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6575 		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6576 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6577 		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6578 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6579 		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6580 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6581 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6582 			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6583 }
6584 
6585 static void
6586 bnx2_tx_timeout(struct net_device *dev)
6587 {
6588 	struct bnx2 *bp = netdev_priv(dev);
6589 
6590 	bnx2_dump_ftq(bp);
6591 	bnx2_dump_state(bp);
6592 	bnx2_dump_mcp_state(bp);
6593 
6594 	/* This allows the netif to be shutdown gracefully before resetting */
6595 	schedule_work(&bp->reset_task);
6596 }
6597 
6598 /* Called with netif_tx_lock.
6599  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6600  * netif_wake_queue().
6601  */
6602 static netdev_tx_t
6603 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6604 {
6605 	struct bnx2 *bp = netdev_priv(dev);
6606 	dma_addr_t mapping;
6607 	struct bnx2_tx_bd *txbd;
6608 	struct bnx2_sw_tx_bd *tx_buf;
6609 	u32 len, vlan_tag_flags, last_frag, mss;
6610 	u16 prod, ring_prod;
6611 	int i;
6612 	struct bnx2_napi *bnapi;
6613 	struct bnx2_tx_ring_info *txr;
6614 	struct netdev_queue *txq;
6615 
6616 	/*  Determine which tx ring we will be placed on */
6617 	i = skb_get_queue_mapping(skb);
6618 	bnapi = &bp->bnx2_napi[i];
6619 	txr = &bnapi->tx_ring;
6620 	txq = netdev_get_tx_queue(dev, i);
6621 
6622 	if (unlikely(bnx2_tx_avail(bp, txr) <
6623 	    (skb_shinfo(skb)->nr_frags + 1))) {
6624 		netif_tx_stop_queue(txq);
6625 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6626 
6627 		return NETDEV_TX_BUSY;
6628 	}
6629 	len = skb_headlen(skb);
6630 	prod = txr->tx_prod;
6631 	ring_prod = BNX2_TX_RING_IDX(prod);
6632 
6633 	vlan_tag_flags = 0;
6634 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6635 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6636 	}
6637 
6638 	if (skb_vlan_tag_present(skb)) {
6639 		vlan_tag_flags |=
6640 			(TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6641 	}
6642 
6643 	if ((mss = skb_shinfo(skb)->gso_size)) {
6644 		u32 tcp_opt_len;
6645 		struct iphdr *iph;
6646 
6647 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6648 
6649 		tcp_opt_len = tcp_optlen(skb);
6650 
6651 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6652 			u32 tcp_off = skb_transport_offset(skb) -
6653 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6654 
6655 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6656 					  TX_BD_FLAGS_SW_FLAGS;
6657 			if (likely(tcp_off == 0))
6658 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6659 			else {
6660 				tcp_off >>= 3;
6661 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6662 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6663 						  ((tcp_off & 0x10) <<
6664 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6665 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6666 			}
6667 		} else {
6668 			iph = ip_hdr(skb);
6669 			if (tcp_opt_len || (iph->ihl > 5)) {
6670 				vlan_tag_flags |= ((iph->ihl - 5) +
6671 						   (tcp_opt_len >> 2)) << 8;
6672 			}
6673 		}
6674 	} else
6675 		mss = 0;
6676 
6677 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6678 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6679 		dev_kfree_skb_any(skb);
6680 		return NETDEV_TX_OK;
6681 	}
6682 
6683 	tx_buf = &txr->tx_buf_ring[ring_prod];
6684 	tx_buf->skb = skb;
6685 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6686 
6687 	txbd = &txr->tx_desc_ring[ring_prod];
6688 
6689 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6690 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6691 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6692 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6693 
6694 	last_frag = skb_shinfo(skb)->nr_frags;
6695 	tx_buf->nr_frags = last_frag;
6696 	tx_buf->is_gso = skb_is_gso(skb);
6697 
6698 	for (i = 0; i < last_frag; i++) {
6699 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6700 
6701 		prod = BNX2_NEXT_TX_BD(prod);
6702 		ring_prod = BNX2_TX_RING_IDX(prod);
6703 		txbd = &txr->tx_desc_ring[ring_prod];
6704 
6705 		len = skb_frag_size(frag);
6706 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6707 					   DMA_TO_DEVICE);
6708 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6709 			goto dma_error;
6710 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6711 				   mapping);
6712 
6713 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6714 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6715 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6716 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6717 
6718 	}
6719 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6720 
6721 	/* Sync BD data before updating TX mailbox */
6722 	wmb();
6723 
6724 	netdev_tx_sent_queue(txq, skb->len);
6725 
6726 	prod = BNX2_NEXT_TX_BD(prod);
6727 	txr->tx_prod_bseq += skb->len;
6728 
6729 	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6730 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6731 
6732 	mmiowb();
6733 
6734 	txr->tx_prod = prod;
6735 
6736 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6737 		netif_tx_stop_queue(txq);
6738 
6739 		/* netif_tx_stop_queue() must be done before checking
6740 		 * tx index in bnx2_tx_avail() below, because in
6741 		 * bnx2_tx_int(), we update tx index before checking for
6742 		 * netif_tx_queue_stopped().
6743 		 */
6744 		smp_mb();
6745 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6746 			netif_tx_wake_queue(txq);
6747 	}
6748 
6749 	return NETDEV_TX_OK;
6750 dma_error:
6751 	/* save value of frag that failed */
6752 	last_frag = i;
6753 
6754 	/* start back at beginning and unmap skb */
6755 	prod = txr->tx_prod;
6756 	ring_prod = BNX2_TX_RING_IDX(prod);
6757 	tx_buf = &txr->tx_buf_ring[ring_prod];
6758 	tx_buf->skb = NULL;
6759 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6760 			 skb_headlen(skb), PCI_DMA_TODEVICE);
6761 
6762 	/* unmap remaining mapped pages */
6763 	for (i = 0; i < last_frag; i++) {
6764 		prod = BNX2_NEXT_TX_BD(prod);
6765 		ring_prod = BNX2_TX_RING_IDX(prod);
6766 		tx_buf = &txr->tx_buf_ring[ring_prod];
6767 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6768 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6769 			       PCI_DMA_TODEVICE);
6770 	}
6771 
6772 	dev_kfree_skb_any(skb);
6773 	return NETDEV_TX_OK;
6774 }
6775 
6776 /* Called with rtnl_lock */
6777 static int
6778 bnx2_close(struct net_device *dev)
6779 {
6780 	struct bnx2 *bp = netdev_priv(dev);
6781 
6782 	bnx2_disable_int_sync(bp);
6783 	bnx2_napi_disable(bp);
6784 	netif_tx_disable(dev);
6785 	del_timer_sync(&bp->timer);
6786 	bnx2_shutdown_chip(bp);
6787 	bnx2_free_irq(bp);
6788 	bnx2_free_skbs(bp);
6789 	bnx2_free_mem(bp);
6790 	bnx2_del_napi(bp);
6791 	bp->link_up = 0;
6792 	netif_carrier_off(bp->dev);
6793 	return 0;
6794 }
6795 
6796 static void
6797 bnx2_save_stats(struct bnx2 *bp)
6798 {
6799 	u32 *hw_stats = (u32 *) bp->stats_blk;
6800 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6801 	int i;
6802 
6803 	/* The 1st 10 counters are 64-bit counters */
6804 	for (i = 0; i < 20; i += 2) {
6805 		u32 hi;
6806 		u64 lo;
6807 
6808 		hi = temp_stats[i] + hw_stats[i];
6809 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6810 		if (lo > 0xffffffff)
6811 			hi++;
6812 		temp_stats[i] = hi;
6813 		temp_stats[i + 1] = lo & 0xffffffff;
6814 	}
6815 
6816 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6817 		temp_stats[i] += hw_stats[i];
6818 }
6819 
6820 #define GET_64BIT_NET_STATS64(ctr)		\
6821 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6822 
6823 #define GET_64BIT_NET_STATS(ctr)				\
6824 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6825 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6826 
6827 #define GET_32BIT_NET_STATS(ctr)				\
6828 	(unsigned long) (bp->stats_blk->ctr +			\
6829 			 bp->temp_stats_blk->ctr)
6830 
6831 static struct rtnl_link_stats64 *
6832 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6833 {
6834 	struct bnx2 *bp = netdev_priv(dev);
6835 
6836 	if (bp->stats_blk == NULL)
6837 		return net_stats;
6838 
6839 	net_stats->rx_packets =
6840 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6841 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6842 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6843 
6844 	net_stats->tx_packets =
6845 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6846 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6847 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6848 
6849 	net_stats->rx_bytes =
6850 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6851 
6852 	net_stats->tx_bytes =
6853 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6854 
6855 	net_stats->multicast =
6856 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6857 
6858 	net_stats->collisions =
6859 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6860 
6861 	net_stats->rx_length_errors =
6862 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6863 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6864 
6865 	net_stats->rx_over_errors =
6866 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6867 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6868 
6869 	net_stats->rx_frame_errors =
6870 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6871 
6872 	net_stats->rx_crc_errors =
6873 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6874 
6875 	net_stats->rx_errors = net_stats->rx_length_errors +
6876 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6877 		net_stats->rx_crc_errors;
6878 
6879 	net_stats->tx_aborted_errors =
6880 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6881 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6882 
6883 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6884 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6885 		net_stats->tx_carrier_errors = 0;
6886 	else {
6887 		net_stats->tx_carrier_errors =
6888 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6889 	}
6890 
6891 	net_stats->tx_errors =
6892 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6893 		net_stats->tx_aborted_errors +
6894 		net_stats->tx_carrier_errors;
6895 
6896 	net_stats->rx_missed_errors =
6897 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6898 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6899 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6900 
6901 	return net_stats;
6902 }
6903 
6904 /* All ethtool functions called with rtnl_lock */
6905 
6906 static int
6907 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6908 {
6909 	struct bnx2 *bp = netdev_priv(dev);
6910 	int support_serdes = 0, support_copper = 0;
6911 
6912 	cmd->supported = SUPPORTED_Autoneg;
6913 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6914 		support_serdes = 1;
6915 		support_copper = 1;
6916 	} else if (bp->phy_port == PORT_FIBRE)
6917 		support_serdes = 1;
6918 	else
6919 		support_copper = 1;
6920 
6921 	if (support_serdes) {
6922 		cmd->supported |= SUPPORTED_1000baseT_Full |
6923 			SUPPORTED_FIBRE;
6924 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6925 			cmd->supported |= SUPPORTED_2500baseX_Full;
6926 
6927 	}
6928 	if (support_copper) {
6929 		cmd->supported |= SUPPORTED_10baseT_Half |
6930 			SUPPORTED_10baseT_Full |
6931 			SUPPORTED_100baseT_Half |
6932 			SUPPORTED_100baseT_Full |
6933 			SUPPORTED_1000baseT_Full |
6934 			SUPPORTED_TP;
6935 
6936 	}
6937 
6938 	spin_lock_bh(&bp->phy_lock);
6939 	cmd->port = bp->phy_port;
6940 	cmd->advertising = bp->advertising;
6941 
6942 	if (bp->autoneg & AUTONEG_SPEED) {
6943 		cmd->autoneg = AUTONEG_ENABLE;
6944 	} else {
6945 		cmd->autoneg = AUTONEG_DISABLE;
6946 	}
6947 
6948 	if (netif_carrier_ok(dev)) {
6949 		ethtool_cmd_speed_set(cmd, bp->line_speed);
6950 		cmd->duplex = bp->duplex;
6951 		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6952 			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6953 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
6954 			else
6955 				cmd->eth_tp_mdix = ETH_TP_MDI;
6956 		}
6957 	}
6958 	else {
6959 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
6960 		cmd->duplex = DUPLEX_UNKNOWN;
6961 	}
6962 	spin_unlock_bh(&bp->phy_lock);
6963 
6964 	cmd->transceiver = XCVR_INTERNAL;
6965 	cmd->phy_address = bp->phy_addr;
6966 
6967 	return 0;
6968 }
6969 
6970 static int
6971 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6972 {
6973 	struct bnx2 *bp = netdev_priv(dev);
6974 	u8 autoneg = bp->autoneg;
6975 	u8 req_duplex = bp->req_duplex;
6976 	u16 req_line_speed = bp->req_line_speed;
6977 	u32 advertising = bp->advertising;
6978 	int err = -EINVAL;
6979 
6980 	spin_lock_bh(&bp->phy_lock);
6981 
6982 	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6983 		goto err_out_unlock;
6984 
6985 	if (cmd->port != bp->phy_port &&
6986 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6987 		goto err_out_unlock;
6988 
6989 	/* If device is down, we can store the settings only if the user
6990 	 * is setting the currently active port.
6991 	 */
6992 	if (!netif_running(dev) && cmd->port != bp->phy_port)
6993 		goto err_out_unlock;
6994 
6995 	if (cmd->autoneg == AUTONEG_ENABLE) {
6996 		autoneg |= AUTONEG_SPEED;
6997 
6998 		advertising = cmd->advertising;
6999 		if (cmd->port == PORT_TP) {
7000 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
7001 			if (!advertising)
7002 				advertising = ETHTOOL_ALL_COPPER_SPEED;
7003 		} else {
7004 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
7005 			if (!advertising)
7006 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
7007 		}
7008 		advertising |= ADVERTISED_Autoneg;
7009 	}
7010 	else {
7011 		u32 speed = ethtool_cmd_speed(cmd);
7012 		if (cmd->port == PORT_FIBRE) {
7013 			if ((speed != SPEED_1000 &&
7014 			     speed != SPEED_2500) ||
7015 			    (cmd->duplex != DUPLEX_FULL))
7016 				goto err_out_unlock;
7017 
7018 			if (speed == SPEED_2500 &&
7019 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7020 				goto err_out_unlock;
7021 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
7022 			goto err_out_unlock;
7023 
7024 		autoneg &= ~AUTONEG_SPEED;
7025 		req_line_speed = speed;
7026 		req_duplex = cmd->duplex;
7027 		advertising = 0;
7028 	}
7029 
7030 	bp->autoneg = autoneg;
7031 	bp->advertising = advertising;
7032 	bp->req_line_speed = req_line_speed;
7033 	bp->req_duplex = req_duplex;
7034 
7035 	err = 0;
7036 	/* If device is down, the new settings will be picked up when it is
7037 	 * brought up.
7038 	 */
7039 	if (netif_running(dev))
7040 		err = bnx2_setup_phy(bp, cmd->port);
7041 
7042 err_out_unlock:
7043 	spin_unlock_bh(&bp->phy_lock);
7044 
7045 	return err;
7046 }
7047 
7048 static void
7049 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7050 {
7051 	struct bnx2 *bp = netdev_priv(dev);
7052 
7053 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7054 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7055 	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7056 	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7057 }
7058 
7059 #define BNX2_REGDUMP_LEN		(32 * 1024)
7060 
7061 static int
7062 bnx2_get_regs_len(struct net_device *dev)
7063 {
7064 	return BNX2_REGDUMP_LEN;
7065 }
7066 
7067 static void
7068 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7069 {
7070 	u32 *p = _p, i, offset;
7071 	u8 *orig_p = _p;
7072 	struct bnx2 *bp = netdev_priv(dev);
7073 	static const u32 reg_boundaries[] = {
7074 		0x0000, 0x0098, 0x0400, 0x045c,
7075 		0x0800, 0x0880, 0x0c00, 0x0c10,
7076 		0x0c30, 0x0d08, 0x1000, 0x101c,
7077 		0x1040, 0x1048, 0x1080, 0x10a4,
7078 		0x1400, 0x1490, 0x1498, 0x14f0,
7079 		0x1500, 0x155c, 0x1580, 0x15dc,
7080 		0x1600, 0x1658, 0x1680, 0x16d8,
7081 		0x1800, 0x1820, 0x1840, 0x1854,
7082 		0x1880, 0x1894, 0x1900, 0x1984,
7083 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7084 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
7085 		0x2000, 0x2030, 0x23c0, 0x2400,
7086 		0x2800, 0x2820, 0x2830, 0x2850,
7087 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7088 		0x3c00, 0x3c94, 0x4000, 0x4010,
7089 		0x4080, 0x4090, 0x43c0, 0x4458,
7090 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7091 		0x4fc0, 0x5010, 0x53c0, 0x5444,
7092 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7093 		0x5fc0, 0x6000, 0x6400, 0x6428,
7094 		0x6800, 0x6848, 0x684c, 0x6860,
7095 		0x6888, 0x6910, 0x8000
7096 	};
7097 
7098 	regs->version = 0;
7099 
7100 	memset(p, 0, BNX2_REGDUMP_LEN);
7101 
7102 	if (!netif_running(bp->dev))
7103 		return;
7104 
7105 	i = 0;
7106 	offset = reg_boundaries[0];
7107 	p += offset;
7108 	while (offset < BNX2_REGDUMP_LEN) {
7109 		*p++ = BNX2_RD(bp, offset);
7110 		offset += 4;
7111 		if (offset == reg_boundaries[i + 1]) {
7112 			offset = reg_boundaries[i + 2];
7113 			p = (u32 *) (orig_p + offset);
7114 			i += 2;
7115 		}
7116 	}
7117 }
7118 
7119 static void
7120 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7121 {
7122 	struct bnx2 *bp = netdev_priv(dev);
7123 
7124 	if (bp->flags & BNX2_FLAG_NO_WOL) {
7125 		wol->supported = 0;
7126 		wol->wolopts = 0;
7127 	}
7128 	else {
7129 		wol->supported = WAKE_MAGIC;
7130 		if (bp->wol)
7131 			wol->wolopts = WAKE_MAGIC;
7132 		else
7133 			wol->wolopts = 0;
7134 	}
7135 	memset(&wol->sopass, 0, sizeof(wol->sopass));
7136 }
7137 
7138 static int
7139 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7140 {
7141 	struct bnx2 *bp = netdev_priv(dev);
7142 
7143 	if (wol->wolopts & ~WAKE_MAGIC)
7144 		return -EINVAL;
7145 
7146 	if (wol->wolopts & WAKE_MAGIC) {
7147 		if (bp->flags & BNX2_FLAG_NO_WOL)
7148 			return -EINVAL;
7149 
7150 		bp->wol = 1;
7151 	}
7152 	else {
7153 		bp->wol = 0;
7154 	}
7155 
7156 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7157 
7158 	return 0;
7159 }
7160 
7161 static int
7162 bnx2_nway_reset(struct net_device *dev)
7163 {
7164 	struct bnx2 *bp = netdev_priv(dev);
7165 	u32 bmcr;
7166 
7167 	if (!netif_running(dev))
7168 		return -EAGAIN;
7169 
7170 	if (!(bp->autoneg & AUTONEG_SPEED)) {
7171 		return -EINVAL;
7172 	}
7173 
7174 	spin_lock_bh(&bp->phy_lock);
7175 
7176 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7177 		int rc;
7178 
7179 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7180 		spin_unlock_bh(&bp->phy_lock);
7181 		return rc;
7182 	}
7183 
7184 	/* Force a link down visible on the other side */
7185 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7186 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7187 		spin_unlock_bh(&bp->phy_lock);
7188 
7189 		msleep(20);
7190 
7191 		spin_lock_bh(&bp->phy_lock);
7192 
7193 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7194 		bp->serdes_an_pending = 1;
7195 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7196 	}
7197 
7198 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7199 	bmcr &= ~BMCR_LOOPBACK;
7200 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7201 
7202 	spin_unlock_bh(&bp->phy_lock);
7203 
7204 	return 0;
7205 }
7206 
7207 static u32
7208 bnx2_get_link(struct net_device *dev)
7209 {
7210 	struct bnx2 *bp = netdev_priv(dev);
7211 
7212 	return bp->link_up;
7213 }
7214 
7215 static int
7216 bnx2_get_eeprom_len(struct net_device *dev)
7217 {
7218 	struct bnx2 *bp = netdev_priv(dev);
7219 
7220 	if (bp->flash_info == NULL)
7221 		return 0;
7222 
7223 	return (int) bp->flash_size;
7224 }
7225 
7226 static int
7227 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7228 		u8 *eebuf)
7229 {
7230 	struct bnx2 *bp = netdev_priv(dev);
7231 	int rc;
7232 
7233 	/* parameters already validated in ethtool_get_eeprom */
7234 
7235 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7236 
7237 	return rc;
7238 }
7239 
7240 static int
7241 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7242 		u8 *eebuf)
7243 {
7244 	struct bnx2 *bp = netdev_priv(dev);
7245 	int rc;
7246 
7247 	/* parameters already validated in ethtool_set_eeprom */
7248 
7249 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7250 
7251 	return rc;
7252 }
7253 
7254 static int
7255 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7256 {
7257 	struct bnx2 *bp = netdev_priv(dev);
7258 
7259 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7260 
7261 	coal->rx_coalesce_usecs = bp->rx_ticks;
7262 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7263 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7264 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7265 
7266 	coal->tx_coalesce_usecs = bp->tx_ticks;
7267 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7268 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7269 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7270 
7271 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7272 
7273 	return 0;
7274 }
7275 
7276 static int
7277 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7278 {
7279 	struct bnx2 *bp = netdev_priv(dev);
7280 
7281 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7282 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7283 
7284 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7285 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7286 
7287 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7288 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7289 
7290 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7291 	if (bp->rx_quick_cons_trip_int > 0xff)
7292 		bp->rx_quick_cons_trip_int = 0xff;
7293 
7294 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7295 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7296 
7297 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7298 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7299 
7300 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7301 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7302 
7303 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7304 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7305 		0xff;
7306 
7307 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7308 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7309 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7310 			bp->stats_ticks = USEC_PER_SEC;
7311 	}
7312 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7313 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7314 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7315 
7316 	if (netif_running(bp->dev)) {
7317 		bnx2_netif_stop(bp, true);
7318 		bnx2_init_nic(bp, 0);
7319 		bnx2_netif_start(bp, true);
7320 	}
7321 
7322 	return 0;
7323 }
7324 
7325 static void
7326 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7327 {
7328 	struct bnx2 *bp = netdev_priv(dev);
7329 
7330 	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7331 	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7332 
7333 	ering->rx_pending = bp->rx_ring_size;
7334 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7335 
7336 	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7337 	ering->tx_pending = bp->tx_ring_size;
7338 }
7339 
7340 static int
7341 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7342 {
7343 	if (netif_running(bp->dev)) {
7344 		/* Reset will erase chipset stats; save them */
7345 		bnx2_save_stats(bp);
7346 
7347 		bnx2_netif_stop(bp, true);
7348 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7349 		if (reset_irq) {
7350 			bnx2_free_irq(bp);
7351 			bnx2_del_napi(bp);
7352 		} else {
7353 			__bnx2_free_irq(bp);
7354 		}
7355 		bnx2_free_skbs(bp);
7356 		bnx2_free_mem(bp);
7357 	}
7358 
7359 	bnx2_set_rx_ring_size(bp, rx);
7360 	bp->tx_ring_size = tx;
7361 
7362 	if (netif_running(bp->dev)) {
7363 		int rc = 0;
7364 
7365 		if (reset_irq) {
7366 			rc = bnx2_setup_int_mode(bp, disable_msi);
7367 			bnx2_init_napi(bp);
7368 		}
7369 
7370 		if (!rc)
7371 			rc = bnx2_alloc_mem(bp);
7372 
7373 		if (!rc)
7374 			rc = bnx2_request_irq(bp);
7375 
7376 		if (!rc)
7377 			rc = bnx2_init_nic(bp, 0);
7378 
7379 		if (rc) {
7380 			bnx2_napi_enable(bp);
7381 			dev_close(bp->dev);
7382 			return rc;
7383 		}
7384 #ifdef BCM_CNIC
7385 		mutex_lock(&bp->cnic_lock);
7386 		/* Let cnic know about the new status block. */
7387 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7388 			bnx2_setup_cnic_irq_info(bp);
7389 		mutex_unlock(&bp->cnic_lock);
7390 #endif
7391 		bnx2_netif_start(bp, true);
7392 	}
7393 	return 0;
7394 }
7395 
7396 static int
7397 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7398 {
7399 	struct bnx2 *bp = netdev_priv(dev);
7400 	int rc;
7401 
7402 	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7403 		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7404 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7405 
7406 		return -EINVAL;
7407 	}
7408 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7409 				   false);
7410 	return rc;
7411 }
7412 
7413 static void
7414 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7415 {
7416 	struct bnx2 *bp = netdev_priv(dev);
7417 
7418 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7419 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7420 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7421 }
7422 
7423 static int
7424 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7425 {
7426 	struct bnx2 *bp = netdev_priv(dev);
7427 
7428 	bp->req_flow_ctrl = 0;
7429 	if (epause->rx_pause)
7430 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7431 	if (epause->tx_pause)
7432 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7433 
7434 	if (epause->autoneg) {
7435 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7436 	}
7437 	else {
7438 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7439 	}
7440 
7441 	if (netif_running(dev)) {
7442 		spin_lock_bh(&bp->phy_lock);
7443 		bnx2_setup_phy(bp, bp->phy_port);
7444 		spin_unlock_bh(&bp->phy_lock);
7445 	}
7446 
7447 	return 0;
7448 }
7449 
7450 static struct {
7451 	char string[ETH_GSTRING_LEN];
7452 } bnx2_stats_str_arr[] = {
7453 	{ "rx_bytes" },
7454 	{ "rx_error_bytes" },
7455 	{ "tx_bytes" },
7456 	{ "tx_error_bytes" },
7457 	{ "rx_ucast_packets" },
7458 	{ "rx_mcast_packets" },
7459 	{ "rx_bcast_packets" },
7460 	{ "tx_ucast_packets" },
7461 	{ "tx_mcast_packets" },
7462 	{ "tx_bcast_packets" },
7463 	{ "tx_mac_errors" },
7464 	{ "tx_carrier_errors" },
7465 	{ "rx_crc_errors" },
7466 	{ "rx_align_errors" },
7467 	{ "tx_single_collisions" },
7468 	{ "tx_multi_collisions" },
7469 	{ "tx_deferred" },
7470 	{ "tx_excess_collisions" },
7471 	{ "tx_late_collisions" },
7472 	{ "tx_total_collisions" },
7473 	{ "rx_fragments" },
7474 	{ "rx_jabbers" },
7475 	{ "rx_undersize_packets" },
7476 	{ "rx_oversize_packets" },
7477 	{ "rx_64_byte_packets" },
7478 	{ "rx_65_to_127_byte_packets" },
7479 	{ "rx_128_to_255_byte_packets" },
7480 	{ "rx_256_to_511_byte_packets" },
7481 	{ "rx_512_to_1023_byte_packets" },
7482 	{ "rx_1024_to_1522_byte_packets" },
7483 	{ "rx_1523_to_9022_byte_packets" },
7484 	{ "tx_64_byte_packets" },
7485 	{ "tx_65_to_127_byte_packets" },
7486 	{ "tx_128_to_255_byte_packets" },
7487 	{ "tx_256_to_511_byte_packets" },
7488 	{ "tx_512_to_1023_byte_packets" },
7489 	{ "tx_1024_to_1522_byte_packets" },
7490 	{ "tx_1523_to_9022_byte_packets" },
7491 	{ "rx_xon_frames" },
7492 	{ "rx_xoff_frames" },
7493 	{ "tx_xon_frames" },
7494 	{ "tx_xoff_frames" },
7495 	{ "rx_mac_ctrl_frames" },
7496 	{ "rx_filtered_packets" },
7497 	{ "rx_ftq_discards" },
7498 	{ "rx_discards" },
7499 	{ "rx_fw_discards" },
7500 };
7501 
7502 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7503 
7504 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7505 
7506 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7507     STATS_OFFSET32(stat_IfHCInOctets_hi),
7508     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7509     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7510     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7511     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7512     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7513     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7514     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7515     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7516     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7517     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7518     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7519     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7520     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7521     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7522     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7523     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7524     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7525     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7526     STATS_OFFSET32(stat_EtherStatsCollisions),
7527     STATS_OFFSET32(stat_EtherStatsFragments),
7528     STATS_OFFSET32(stat_EtherStatsJabbers),
7529     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7530     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7531     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7532     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7533     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7534     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7535     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7536     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7537     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7538     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7539     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7540     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7541     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7542     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7543     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7544     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7545     STATS_OFFSET32(stat_XonPauseFramesReceived),
7546     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7547     STATS_OFFSET32(stat_OutXonSent),
7548     STATS_OFFSET32(stat_OutXoffSent),
7549     STATS_OFFSET32(stat_MacControlFramesReceived),
7550     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7551     STATS_OFFSET32(stat_IfInFTQDiscards),
7552     STATS_OFFSET32(stat_IfInMBUFDiscards),
7553     STATS_OFFSET32(stat_FwRxDrop),
7554 };
7555 
7556 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7557  * skipped because of errata.
7558  */
7559 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7560 	8,0,8,8,8,8,8,8,8,8,
7561 	4,0,4,4,4,4,4,4,4,4,
7562 	4,4,4,4,4,4,4,4,4,4,
7563 	4,4,4,4,4,4,4,4,4,4,
7564 	4,4,4,4,4,4,4,
7565 };
7566 
7567 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7568 	8,0,8,8,8,8,8,8,8,8,
7569 	4,4,4,4,4,4,4,4,4,4,
7570 	4,4,4,4,4,4,4,4,4,4,
7571 	4,4,4,4,4,4,4,4,4,4,
7572 	4,4,4,4,4,4,4,
7573 };
7574 
7575 #define BNX2_NUM_TESTS 6
7576 
7577 static struct {
7578 	char string[ETH_GSTRING_LEN];
7579 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7580 	{ "register_test (offline)" },
7581 	{ "memory_test (offline)" },
7582 	{ "loopback_test (offline)" },
7583 	{ "nvram_test (online)" },
7584 	{ "interrupt_test (online)" },
7585 	{ "link_test (online)" },
7586 };
7587 
7588 static int
7589 bnx2_get_sset_count(struct net_device *dev, int sset)
7590 {
7591 	switch (sset) {
7592 	case ETH_SS_TEST:
7593 		return BNX2_NUM_TESTS;
7594 	case ETH_SS_STATS:
7595 		return BNX2_NUM_STATS;
7596 	default:
7597 		return -EOPNOTSUPP;
7598 	}
7599 }
7600 
7601 static void
7602 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7603 {
7604 	struct bnx2 *bp = netdev_priv(dev);
7605 
7606 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7607 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7608 		int i;
7609 
7610 		bnx2_netif_stop(bp, true);
7611 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7612 		bnx2_free_skbs(bp);
7613 
7614 		if (bnx2_test_registers(bp) != 0) {
7615 			buf[0] = 1;
7616 			etest->flags |= ETH_TEST_FL_FAILED;
7617 		}
7618 		if (bnx2_test_memory(bp) != 0) {
7619 			buf[1] = 1;
7620 			etest->flags |= ETH_TEST_FL_FAILED;
7621 		}
7622 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7623 			etest->flags |= ETH_TEST_FL_FAILED;
7624 
7625 		if (!netif_running(bp->dev))
7626 			bnx2_shutdown_chip(bp);
7627 		else {
7628 			bnx2_init_nic(bp, 1);
7629 			bnx2_netif_start(bp, true);
7630 		}
7631 
7632 		/* wait for link up */
7633 		for (i = 0; i < 7; i++) {
7634 			if (bp->link_up)
7635 				break;
7636 			msleep_interruptible(1000);
7637 		}
7638 	}
7639 
7640 	if (bnx2_test_nvram(bp) != 0) {
7641 		buf[3] = 1;
7642 		etest->flags |= ETH_TEST_FL_FAILED;
7643 	}
7644 	if (bnx2_test_intr(bp) != 0) {
7645 		buf[4] = 1;
7646 		etest->flags |= ETH_TEST_FL_FAILED;
7647 	}
7648 
7649 	if (bnx2_test_link(bp) != 0) {
7650 		buf[5] = 1;
7651 		etest->flags |= ETH_TEST_FL_FAILED;
7652 
7653 	}
7654 }
7655 
7656 static void
7657 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7658 {
7659 	switch (stringset) {
7660 	case ETH_SS_STATS:
7661 		memcpy(buf, bnx2_stats_str_arr,
7662 			sizeof(bnx2_stats_str_arr));
7663 		break;
7664 	case ETH_SS_TEST:
7665 		memcpy(buf, bnx2_tests_str_arr,
7666 			sizeof(bnx2_tests_str_arr));
7667 		break;
7668 	}
7669 }
7670 
7671 static void
7672 bnx2_get_ethtool_stats(struct net_device *dev,
7673 		struct ethtool_stats *stats, u64 *buf)
7674 {
7675 	struct bnx2 *bp = netdev_priv(dev);
7676 	int i;
7677 	u32 *hw_stats = (u32 *) bp->stats_blk;
7678 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7679 	u8 *stats_len_arr = NULL;
7680 
7681 	if (hw_stats == NULL) {
7682 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7683 		return;
7684 	}
7685 
7686 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7687 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7688 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7689 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7690 		stats_len_arr = bnx2_5706_stats_len_arr;
7691 	else
7692 		stats_len_arr = bnx2_5708_stats_len_arr;
7693 
7694 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7695 		unsigned long offset;
7696 
7697 		if (stats_len_arr[i] == 0) {
7698 			/* skip this counter */
7699 			buf[i] = 0;
7700 			continue;
7701 		}
7702 
7703 		offset = bnx2_stats_offset_arr[i];
7704 		if (stats_len_arr[i] == 4) {
7705 			/* 4-byte counter */
7706 			buf[i] = (u64) *(hw_stats + offset) +
7707 				 *(temp_stats + offset);
7708 			continue;
7709 		}
7710 		/* 8-byte counter */
7711 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7712 			 *(hw_stats + offset + 1) +
7713 			 (((u64) *(temp_stats + offset)) << 32) +
7714 			 *(temp_stats + offset + 1);
7715 	}
7716 }
7717 
7718 static int
7719 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7720 {
7721 	struct bnx2 *bp = netdev_priv(dev);
7722 
7723 	switch (state) {
7724 	case ETHTOOL_ID_ACTIVE:
7725 		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7726 		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7727 		return 1;	/* cycle on/off once per second */
7728 
7729 	case ETHTOOL_ID_ON:
7730 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7731 			BNX2_EMAC_LED_1000MB_OVERRIDE |
7732 			BNX2_EMAC_LED_100MB_OVERRIDE |
7733 			BNX2_EMAC_LED_10MB_OVERRIDE |
7734 			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7735 			BNX2_EMAC_LED_TRAFFIC);
7736 		break;
7737 
7738 	case ETHTOOL_ID_OFF:
7739 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7740 		break;
7741 
7742 	case ETHTOOL_ID_INACTIVE:
7743 		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7744 		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7745 		break;
7746 	}
7747 
7748 	return 0;
7749 }
7750 
7751 static int
7752 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7753 {
7754 	struct bnx2 *bp = netdev_priv(dev);
7755 
7756 	/* TSO with VLAN tag won't work with current firmware */
7757 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7758 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7759 	else
7760 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7761 
7762 	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7763 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7764 	    netif_running(dev)) {
7765 		bnx2_netif_stop(bp, false);
7766 		dev->features = features;
7767 		bnx2_set_rx_mode(dev);
7768 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7769 		bnx2_netif_start(bp, false);
7770 		return 1;
7771 	}
7772 
7773 	return 0;
7774 }
7775 
7776 static void bnx2_get_channels(struct net_device *dev,
7777 			      struct ethtool_channels *channels)
7778 {
7779 	struct bnx2 *bp = netdev_priv(dev);
7780 	u32 max_rx_rings = 1;
7781 	u32 max_tx_rings = 1;
7782 
7783 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7784 		max_rx_rings = RX_MAX_RINGS;
7785 		max_tx_rings = TX_MAX_RINGS;
7786 	}
7787 
7788 	channels->max_rx = max_rx_rings;
7789 	channels->max_tx = max_tx_rings;
7790 	channels->max_other = 0;
7791 	channels->max_combined = 0;
7792 	channels->rx_count = bp->num_rx_rings;
7793 	channels->tx_count = bp->num_tx_rings;
7794 	channels->other_count = 0;
7795 	channels->combined_count = 0;
7796 }
7797 
7798 static int bnx2_set_channels(struct net_device *dev,
7799 			      struct ethtool_channels *channels)
7800 {
7801 	struct bnx2 *bp = netdev_priv(dev);
7802 	u32 max_rx_rings = 1;
7803 	u32 max_tx_rings = 1;
7804 	int rc = 0;
7805 
7806 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7807 		max_rx_rings = RX_MAX_RINGS;
7808 		max_tx_rings = TX_MAX_RINGS;
7809 	}
7810 	if (channels->rx_count > max_rx_rings ||
7811 	    channels->tx_count > max_tx_rings)
7812 		return -EINVAL;
7813 
7814 	bp->num_req_rx_rings = channels->rx_count;
7815 	bp->num_req_tx_rings = channels->tx_count;
7816 
7817 	if (netif_running(dev))
7818 		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7819 					   bp->tx_ring_size, true);
7820 
7821 	return rc;
7822 }
7823 
7824 static const struct ethtool_ops bnx2_ethtool_ops = {
7825 	.get_settings		= bnx2_get_settings,
7826 	.set_settings		= bnx2_set_settings,
7827 	.get_drvinfo		= bnx2_get_drvinfo,
7828 	.get_regs_len		= bnx2_get_regs_len,
7829 	.get_regs		= bnx2_get_regs,
7830 	.get_wol		= bnx2_get_wol,
7831 	.set_wol		= bnx2_set_wol,
7832 	.nway_reset		= bnx2_nway_reset,
7833 	.get_link		= bnx2_get_link,
7834 	.get_eeprom_len		= bnx2_get_eeprom_len,
7835 	.get_eeprom		= bnx2_get_eeprom,
7836 	.set_eeprom		= bnx2_set_eeprom,
7837 	.get_coalesce		= bnx2_get_coalesce,
7838 	.set_coalesce		= bnx2_set_coalesce,
7839 	.get_ringparam		= bnx2_get_ringparam,
7840 	.set_ringparam		= bnx2_set_ringparam,
7841 	.get_pauseparam		= bnx2_get_pauseparam,
7842 	.set_pauseparam		= bnx2_set_pauseparam,
7843 	.self_test		= bnx2_self_test,
7844 	.get_strings		= bnx2_get_strings,
7845 	.set_phys_id		= bnx2_set_phys_id,
7846 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7847 	.get_sset_count		= bnx2_get_sset_count,
7848 	.get_channels		= bnx2_get_channels,
7849 	.set_channels		= bnx2_set_channels,
7850 };
7851 
7852 /* Called with rtnl_lock */
7853 static int
7854 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7855 {
7856 	struct mii_ioctl_data *data = if_mii(ifr);
7857 	struct bnx2 *bp = netdev_priv(dev);
7858 	int err;
7859 
7860 	switch(cmd) {
7861 	case SIOCGMIIPHY:
7862 		data->phy_id = bp->phy_addr;
7863 
7864 		/* fallthru */
7865 	case SIOCGMIIREG: {
7866 		u32 mii_regval;
7867 
7868 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7869 			return -EOPNOTSUPP;
7870 
7871 		if (!netif_running(dev))
7872 			return -EAGAIN;
7873 
7874 		spin_lock_bh(&bp->phy_lock);
7875 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7876 		spin_unlock_bh(&bp->phy_lock);
7877 
7878 		data->val_out = mii_regval;
7879 
7880 		return err;
7881 	}
7882 
7883 	case SIOCSMIIREG:
7884 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7885 			return -EOPNOTSUPP;
7886 
7887 		if (!netif_running(dev))
7888 			return -EAGAIN;
7889 
7890 		spin_lock_bh(&bp->phy_lock);
7891 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7892 		spin_unlock_bh(&bp->phy_lock);
7893 
7894 		return err;
7895 
7896 	default:
7897 		/* do nothing */
7898 		break;
7899 	}
7900 	return -EOPNOTSUPP;
7901 }
7902 
7903 /* Called with rtnl_lock */
7904 static int
7905 bnx2_change_mac_addr(struct net_device *dev, void *p)
7906 {
7907 	struct sockaddr *addr = p;
7908 	struct bnx2 *bp = netdev_priv(dev);
7909 
7910 	if (!is_valid_ether_addr(addr->sa_data))
7911 		return -EADDRNOTAVAIL;
7912 
7913 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7914 	if (netif_running(dev))
7915 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7916 
7917 	return 0;
7918 }
7919 
7920 /* Called with rtnl_lock */
7921 static int
7922 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7923 {
7924 	struct bnx2 *bp = netdev_priv(dev);
7925 
7926 	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7927 		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7928 		return -EINVAL;
7929 
7930 	dev->mtu = new_mtu;
7931 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7932 				     false);
7933 }
7934 
7935 #ifdef CONFIG_NET_POLL_CONTROLLER
7936 static void
7937 poll_bnx2(struct net_device *dev)
7938 {
7939 	struct bnx2 *bp = netdev_priv(dev);
7940 	int i;
7941 
7942 	for (i = 0; i < bp->irq_nvecs; i++) {
7943 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7944 
7945 		disable_irq(irq->vector);
7946 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7947 		enable_irq(irq->vector);
7948 	}
7949 }
7950 #endif
7951 
7952 static void
7953 bnx2_get_5709_media(struct bnx2 *bp)
7954 {
7955 	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7956 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7957 	u32 strap;
7958 
7959 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7960 		return;
7961 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7962 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7963 		return;
7964 	}
7965 
7966 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7967 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7968 	else
7969 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7970 
7971 	if (bp->func == 0) {
7972 		switch (strap) {
7973 		case 0x4:
7974 		case 0x5:
7975 		case 0x6:
7976 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7977 			return;
7978 		}
7979 	} else {
7980 		switch (strap) {
7981 		case 0x1:
7982 		case 0x2:
7983 		case 0x4:
7984 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7985 			return;
7986 		}
7987 	}
7988 }
7989 
7990 static void
7991 bnx2_get_pci_speed(struct bnx2 *bp)
7992 {
7993 	u32 reg;
7994 
7995 	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7996 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7997 		u32 clkreg;
7998 
7999 		bp->flags |= BNX2_FLAG_PCIX;
8000 
8001 		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
8002 
8003 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
8004 		switch (clkreg) {
8005 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
8006 			bp->bus_speed_mhz = 133;
8007 			break;
8008 
8009 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
8010 			bp->bus_speed_mhz = 100;
8011 			break;
8012 
8013 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
8014 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8015 			bp->bus_speed_mhz = 66;
8016 			break;
8017 
8018 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8019 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8020 			bp->bus_speed_mhz = 50;
8021 			break;
8022 
8023 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8024 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8025 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8026 			bp->bus_speed_mhz = 33;
8027 			break;
8028 		}
8029 	}
8030 	else {
8031 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8032 			bp->bus_speed_mhz = 66;
8033 		else
8034 			bp->bus_speed_mhz = 33;
8035 	}
8036 
8037 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8038 		bp->flags |= BNX2_FLAG_PCI_32BIT;
8039 
8040 }
8041 
8042 static void
8043 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8044 {
8045 	int rc, i, j;
8046 	u8 *data;
8047 	unsigned int block_end, rosize, len;
8048 
8049 #define BNX2_VPD_NVRAM_OFFSET	0x300
8050 #define BNX2_VPD_LEN		128
8051 #define BNX2_MAX_VER_SLEN	30
8052 
8053 	data = kmalloc(256, GFP_KERNEL);
8054 	if (!data)
8055 		return;
8056 
8057 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8058 			     BNX2_VPD_LEN);
8059 	if (rc)
8060 		goto vpd_done;
8061 
8062 	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8063 		data[i] = data[i + BNX2_VPD_LEN + 3];
8064 		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8065 		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8066 		data[i + 3] = data[i + BNX2_VPD_LEN];
8067 	}
8068 
8069 	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8070 	if (i < 0)
8071 		goto vpd_done;
8072 
8073 	rosize = pci_vpd_lrdt_size(&data[i]);
8074 	i += PCI_VPD_LRDT_TAG_SIZE;
8075 	block_end = i + rosize;
8076 
8077 	if (block_end > BNX2_VPD_LEN)
8078 		goto vpd_done;
8079 
8080 	j = pci_vpd_find_info_keyword(data, i, rosize,
8081 				      PCI_VPD_RO_KEYWORD_MFR_ID);
8082 	if (j < 0)
8083 		goto vpd_done;
8084 
8085 	len = pci_vpd_info_field_size(&data[j]);
8086 
8087 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8088 	if (j + len > block_end || len != 4 ||
8089 	    memcmp(&data[j], "1028", 4))
8090 		goto vpd_done;
8091 
8092 	j = pci_vpd_find_info_keyword(data, i, rosize,
8093 				      PCI_VPD_RO_KEYWORD_VENDOR0);
8094 	if (j < 0)
8095 		goto vpd_done;
8096 
8097 	len = pci_vpd_info_field_size(&data[j]);
8098 
8099 	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8100 	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8101 		goto vpd_done;
8102 
8103 	memcpy(bp->fw_version, &data[j], len);
8104 	bp->fw_version[len] = ' ';
8105 
8106 vpd_done:
8107 	kfree(data);
8108 }
8109 
8110 static int
8111 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8112 {
8113 	struct bnx2 *bp;
8114 	int rc, i, j;
8115 	u32 reg;
8116 	u64 dma_mask, persist_dma_mask;
8117 	int err;
8118 
8119 	SET_NETDEV_DEV(dev, &pdev->dev);
8120 	bp = netdev_priv(dev);
8121 
8122 	bp->flags = 0;
8123 	bp->phy_flags = 0;
8124 
8125 	bp->temp_stats_blk =
8126 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8127 
8128 	if (bp->temp_stats_blk == NULL) {
8129 		rc = -ENOMEM;
8130 		goto err_out;
8131 	}
8132 
8133 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8134 	rc = pci_enable_device(pdev);
8135 	if (rc) {
8136 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8137 		goto err_out;
8138 	}
8139 
8140 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8141 		dev_err(&pdev->dev,
8142 			"Cannot find PCI device base address, aborting\n");
8143 		rc = -ENODEV;
8144 		goto err_out_disable;
8145 	}
8146 
8147 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8148 	if (rc) {
8149 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8150 		goto err_out_disable;
8151 	}
8152 
8153 	pci_set_master(pdev);
8154 
8155 	bp->pm_cap = pdev->pm_cap;
8156 	if (bp->pm_cap == 0) {
8157 		dev_err(&pdev->dev,
8158 			"Cannot find power management capability, aborting\n");
8159 		rc = -EIO;
8160 		goto err_out_release;
8161 	}
8162 
8163 	bp->dev = dev;
8164 	bp->pdev = pdev;
8165 
8166 	spin_lock_init(&bp->phy_lock);
8167 	spin_lock_init(&bp->indirect_lock);
8168 #ifdef BCM_CNIC
8169 	mutex_init(&bp->cnic_lock);
8170 #endif
8171 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8172 
8173 	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8174 							 TX_MAX_TSS_RINGS + 1));
8175 	if (!bp->regview) {
8176 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8177 		rc = -ENOMEM;
8178 		goto err_out_release;
8179 	}
8180 
8181 	/* Configure byte swap and enable write to the reg_window registers.
8182 	 * Rely on CPU to do target byte swapping on big endian systems
8183 	 * The chip's target access swapping will not swap all accesses
8184 	 */
8185 	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8186 		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8187 		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8188 
8189 	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8190 
8191 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8192 		if (!pci_is_pcie(pdev)) {
8193 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8194 			rc = -EIO;
8195 			goto err_out_unmap;
8196 		}
8197 		bp->flags |= BNX2_FLAG_PCIE;
8198 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8199 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8200 
8201 		/* AER (Advanced Error Reporting) hooks */
8202 		err = pci_enable_pcie_error_reporting(pdev);
8203 		if (!err)
8204 			bp->flags |= BNX2_FLAG_AER_ENABLED;
8205 
8206 	} else {
8207 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8208 		if (bp->pcix_cap == 0) {
8209 			dev_err(&pdev->dev,
8210 				"Cannot find PCIX capability, aborting\n");
8211 			rc = -EIO;
8212 			goto err_out_unmap;
8213 		}
8214 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8215 	}
8216 
8217 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8218 	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8219 		if (pdev->msix_cap)
8220 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8221 	}
8222 
8223 	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8224 	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8225 		if (pdev->msi_cap)
8226 			bp->flags |= BNX2_FLAG_MSI_CAP;
8227 	}
8228 
8229 	/* 5708 cannot support DMA addresses > 40-bit.  */
8230 	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8231 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8232 	else
8233 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8234 
8235 	/* Configure DMA attributes. */
8236 	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8237 		dev->features |= NETIF_F_HIGHDMA;
8238 		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8239 		if (rc) {
8240 			dev_err(&pdev->dev,
8241 				"pci_set_consistent_dma_mask failed, aborting\n");
8242 			goto err_out_unmap;
8243 		}
8244 	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8245 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8246 		goto err_out_unmap;
8247 	}
8248 
8249 	if (!(bp->flags & BNX2_FLAG_PCIE))
8250 		bnx2_get_pci_speed(bp);
8251 
8252 	/* 5706A0 may falsely detect SERR and PERR. */
8253 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8254 		reg = BNX2_RD(bp, PCI_COMMAND);
8255 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8256 		BNX2_WR(bp, PCI_COMMAND, reg);
8257 	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8258 		!(bp->flags & BNX2_FLAG_PCIX)) {
8259 
8260 		dev_err(&pdev->dev,
8261 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8262 		goto err_out_unmap;
8263 	}
8264 
8265 	bnx2_init_nvram(bp);
8266 
8267 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8268 
8269 	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8270 		bp->func = 1;
8271 
8272 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8273 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8274 		u32 off = bp->func << 2;
8275 
8276 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8277 	} else
8278 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8279 
8280 	/* Get the permanent MAC address.  First we need to make sure the
8281 	 * firmware is actually running.
8282 	 */
8283 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8284 
8285 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8286 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8287 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8288 		rc = -ENODEV;
8289 		goto err_out_unmap;
8290 	}
8291 
8292 	bnx2_read_vpd_fw_ver(bp);
8293 
8294 	j = strlen(bp->fw_version);
8295 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8296 	for (i = 0; i < 3 && j < 24; i++) {
8297 		u8 num, k, skip0;
8298 
8299 		if (i == 0) {
8300 			bp->fw_version[j++] = 'b';
8301 			bp->fw_version[j++] = 'c';
8302 			bp->fw_version[j++] = ' ';
8303 		}
8304 		num = (u8) (reg >> (24 - (i * 8)));
8305 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8306 			if (num >= k || !skip0 || k == 1) {
8307 				bp->fw_version[j++] = (num / k) + '0';
8308 				skip0 = 0;
8309 			}
8310 		}
8311 		if (i != 2)
8312 			bp->fw_version[j++] = '.';
8313 	}
8314 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8315 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8316 		bp->wol = 1;
8317 
8318 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8319 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8320 
8321 		for (i = 0; i < 30; i++) {
8322 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8323 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8324 				break;
8325 			msleep(10);
8326 		}
8327 	}
8328 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8329 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8330 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8331 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8332 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8333 
8334 		if (j < 32)
8335 			bp->fw_version[j++] = ' ';
8336 		for (i = 0; i < 3 && j < 28; i++) {
8337 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8338 			reg = be32_to_cpu(reg);
8339 			memcpy(&bp->fw_version[j], &reg, 4);
8340 			j += 4;
8341 		}
8342 	}
8343 
8344 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8345 	bp->mac_addr[0] = (u8) (reg >> 8);
8346 	bp->mac_addr[1] = (u8) reg;
8347 
8348 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8349 	bp->mac_addr[2] = (u8) (reg >> 24);
8350 	bp->mac_addr[3] = (u8) (reg >> 16);
8351 	bp->mac_addr[4] = (u8) (reg >> 8);
8352 	bp->mac_addr[5] = (u8) reg;
8353 
8354 	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8355 	bnx2_set_rx_ring_size(bp, 255);
8356 
8357 	bp->tx_quick_cons_trip_int = 2;
8358 	bp->tx_quick_cons_trip = 20;
8359 	bp->tx_ticks_int = 18;
8360 	bp->tx_ticks = 80;
8361 
8362 	bp->rx_quick_cons_trip_int = 2;
8363 	bp->rx_quick_cons_trip = 12;
8364 	bp->rx_ticks_int = 18;
8365 	bp->rx_ticks = 18;
8366 
8367 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8368 
8369 	bp->current_interval = BNX2_TIMER_INTERVAL;
8370 
8371 	bp->phy_addr = 1;
8372 
8373 	/* allocate stats_blk */
8374 	rc = bnx2_alloc_stats_blk(dev);
8375 	if (rc)
8376 		goto err_out_unmap;
8377 
8378 	/* Disable WOL support if we are running on a SERDES chip. */
8379 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8380 		bnx2_get_5709_media(bp);
8381 	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8382 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8383 
8384 	bp->phy_port = PORT_TP;
8385 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8386 		bp->phy_port = PORT_FIBRE;
8387 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8388 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8389 			bp->flags |= BNX2_FLAG_NO_WOL;
8390 			bp->wol = 0;
8391 		}
8392 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8393 			/* Don't do parallel detect on this board because of
8394 			 * some board problems.  The link will not go down
8395 			 * if we do parallel detect.
8396 			 */
8397 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8398 			    pdev->subsystem_device == 0x310c)
8399 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8400 		} else {
8401 			bp->phy_addr = 2;
8402 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8403 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8404 		}
8405 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8406 		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8407 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8408 	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8409 		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8410 		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8411 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8412 
8413 	bnx2_init_fw_cap(bp);
8414 
8415 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8416 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8417 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8418 	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8419 		bp->flags |= BNX2_FLAG_NO_WOL;
8420 		bp->wol = 0;
8421 	}
8422 
8423 	if (bp->flags & BNX2_FLAG_NO_WOL)
8424 		device_set_wakeup_capable(&bp->pdev->dev, false);
8425 	else
8426 		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8427 
8428 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8429 		bp->tx_quick_cons_trip_int =
8430 			bp->tx_quick_cons_trip;
8431 		bp->tx_ticks_int = bp->tx_ticks;
8432 		bp->rx_quick_cons_trip_int =
8433 			bp->rx_quick_cons_trip;
8434 		bp->rx_ticks_int = bp->rx_ticks;
8435 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8436 		bp->com_ticks_int = bp->com_ticks;
8437 		bp->cmd_ticks_int = bp->cmd_ticks;
8438 	}
8439 
8440 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8441 	 *
8442 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8443 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8444 	 * but causes problems on the AMD 8132 which will eventually stop
8445 	 * responding after a while.
8446 	 *
8447 	 * AMD believes this incompatibility is unique to the 5706, and
8448 	 * prefers to locally disable MSI rather than globally disabling it.
8449 	 */
8450 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8451 		struct pci_dev *amd_8132 = NULL;
8452 
8453 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8454 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8455 						  amd_8132))) {
8456 
8457 			if (amd_8132->revision >= 0x10 &&
8458 			    amd_8132->revision <= 0x13) {
8459 				disable_msi = 1;
8460 				pci_dev_put(amd_8132);
8461 				break;
8462 			}
8463 		}
8464 	}
8465 
8466 	bnx2_set_default_link(bp);
8467 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8468 
8469 	init_timer(&bp->timer);
8470 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8471 	bp->timer.data = (unsigned long) bp;
8472 	bp->timer.function = bnx2_timer;
8473 
8474 #ifdef BCM_CNIC
8475 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8476 		bp->cnic_eth_dev.max_iscsi_conn =
8477 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8478 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8479 	bp->cnic_probe = bnx2_cnic_probe;
8480 #endif
8481 	pci_save_state(pdev);
8482 
8483 	return 0;
8484 
8485 err_out_unmap:
8486 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8487 		pci_disable_pcie_error_reporting(pdev);
8488 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8489 	}
8490 
8491 	pci_iounmap(pdev, bp->regview);
8492 	bp->regview = NULL;
8493 
8494 err_out_release:
8495 	pci_release_regions(pdev);
8496 
8497 err_out_disable:
8498 	pci_disable_device(pdev);
8499 
8500 err_out:
8501 	kfree(bp->temp_stats_blk);
8502 
8503 	return rc;
8504 }
8505 
8506 static char *
8507 bnx2_bus_string(struct bnx2 *bp, char *str)
8508 {
8509 	char *s = str;
8510 
8511 	if (bp->flags & BNX2_FLAG_PCIE) {
8512 		s += sprintf(s, "PCI Express");
8513 	} else {
8514 		s += sprintf(s, "PCI");
8515 		if (bp->flags & BNX2_FLAG_PCIX)
8516 			s += sprintf(s, "-X");
8517 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8518 			s += sprintf(s, " 32-bit");
8519 		else
8520 			s += sprintf(s, " 64-bit");
8521 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8522 	}
8523 	return str;
8524 }
8525 
8526 static void
8527 bnx2_del_napi(struct bnx2 *bp)
8528 {
8529 	int i;
8530 
8531 	for (i = 0; i < bp->irq_nvecs; i++)
8532 		netif_napi_del(&bp->bnx2_napi[i].napi);
8533 }
8534 
8535 static void
8536 bnx2_init_napi(struct bnx2 *bp)
8537 {
8538 	int i;
8539 
8540 	for (i = 0; i < bp->irq_nvecs; i++) {
8541 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8542 		int (*poll)(struct napi_struct *, int);
8543 
8544 		if (i == 0)
8545 			poll = bnx2_poll;
8546 		else
8547 			poll = bnx2_poll_msix;
8548 
8549 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8550 		bnapi->bp = bp;
8551 	}
8552 }
8553 
8554 static const struct net_device_ops bnx2_netdev_ops = {
8555 	.ndo_open		= bnx2_open,
8556 	.ndo_start_xmit		= bnx2_start_xmit,
8557 	.ndo_stop		= bnx2_close,
8558 	.ndo_get_stats64	= bnx2_get_stats64,
8559 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8560 	.ndo_do_ioctl		= bnx2_ioctl,
8561 	.ndo_validate_addr	= eth_validate_addr,
8562 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8563 	.ndo_change_mtu		= bnx2_change_mtu,
8564 	.ndo_set_features	= bnx2_set_features,
8565 	.ndo_tx_timeout		= bnx2_tx_timeout,
8566 #ifdef CONFIG_NET_POLL_CONTROLLER
8567 	.ndo_poll_controller	= poll_bnx2,
8568 #endif
8569 };
8570 
8571 static int
8572 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8573 {
8574 	static int version_printed = 0;
8575 	struct net_device *dev;
8576 	struct bnx2 *bp;
8577 	int rc;
8578 	char str[40];
8579 
8580 	if (version_printed++ == 0)
8581 		pr_info("%s", version);
8582 
8583 	/* dev zeroed in init_etherdev */
8584 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8585 	if (!dev)
8586 		return -ENOMEM;
8587 
8588 	rc = bnx2_init_board(pdev, dev);
8589 	if (rc < 0)
8590 		goto err_free;
8591 
8592 	dev->netdev_ops = &bnx2_netdev_ops;
8593 	dev->watchdog_timeo = TX_TIMEOUT;
8594 	dev->ethtool_ops = &bnx2_ethtool_ops;
8595 
8596 	bp = netdev_priv(dev);
8597 
8598 	pci_set_drvdata(pdev, dev);
8599 
8600 	/*
8601 	 * In-flight DMA from 1st kernel could continue going in kdump kernel.
8602 	 * New io-page table has been created before bnx2 does reset at open stage.
8603 	 * We have to wait for the in-flight DMA to complete to avoid it look up
8604 	 * into the newly created io-page table.
8605 	 */
8606 	if (is_kdump_kernel())
8607 		bnx2_wait_dma_complete(bp);
8608 
8609 	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8610 
8611 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8612 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8613 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8614 
8615 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8616 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8617 
8618 	dev->vlan_features = dev->hw_features;
8619 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8620 	dev->features |= dev->hw_features;
8621 	dev->priv_flags |= IFF_UNICAST_FLT;
8622 
8623 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8624 		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8625 
8626 	if ((rc = register_netdev(dev))) {
8627 		dev_err(&pdev->dev, "Cannot register net device\n");
8628 		goto error;
8629 	}
8630 
8631 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8632 		    "node addr %pM\n", board_info[ent->driver_data].name,
8633 		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8634 		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8635 		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8636 		    pdev->irq, dev->dev_addr);
8637 
8638 	return 0;
8639 
8640 error:
8641 	pci_iounmap(pdev, bp->regview);
8642 	pci_release_regions(pdev);
8643 	pci_disable_device(pdev);
8644 err_free:
8645 	bnx2_free_stats_blk(dev);
8646 	free_netdev(dev);
8647 	return rc;
8648 }
8649 
8650 static void
8651 bnx2_remove_one(struct pci_dev *pdev)
8652 {
8653 	struct net_device *dev = pci_get_drvdata(pdev);
8654 	struct bnx2 *bp = netdev_priv(dev);
8655 
8656 	unregister_netdev(dev);
8657 
8658 	del_timer_sync(&bp->timer);
8659 	cancel_work_sync(&bp->reset_task);
8660 
8661 	pci_iounmap(bp->pdev, bp->regview);
8662 
8663 	bnx2_free_stats_blk(dev);
8664 	kfree(bp->temp_stats_blk);
8665 
8666 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8667 		pci_disable_pcie_error_reporting(pdev);
8668 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8669 	}
8670 
8671 	bnx2_release_firmware(bp);
8672 
8673 	free_netdev(dev);
8674 
8675 	pci_release_regions(pdev);
8676 	pci_disable_device(pdev);
8677 }
8678 
8679 #ifdef CONFIG_PM_SLEEP
8680 static int
8681 bnx2_suspend(struct device *device)
8682 {
8683 	struct pci_dev *pdev = to_pci_dev(device);
8684 	struct net_device *dev = pci_get_drvdata(pdev);
8685 	struct bnx2 *bp = netdev_priv(dev);
8686 
8687 	if (netif_running(dev)) {
8688 		cancel_work_sync(&bp->reset_task);
8689 		bnx2_netif_stop(bp, true);
8690 		netif_device_detach(dev);
8691 		del_timer_sync(&bp->timer);
8692 		bnx2_shutdown_chip(bp);
8693 		__bnx2_free_irq(bp);
8694 		bnx2_free_skbs(bp);
8695 	}
8696 	bnx2_setup_wol(bp);
8697 	return 0;
8698 }
8699 
8700 static int
8701 bnx2_resume(struct device *device)
8702 {
8703 	struct pci_dev *pdev = to_pci_dev(device);
8704 	struct net_device *dev = pci_get_drvdata(pdev);
8705 	struct bnx2 *bp = netdev_priv(dev);
8706 
8707 	if (!netif_running(dev))
8708 		return 0;
8709 
8710 	bnx2_set_power_state(bp, PCI_D0);
8711 	netif_device_attach(dev);
8712 	bnx2_request_irq(bp);
8713 	bnx2_init_nic(bp, 1);
8714 	bnx2_netif_start(bp, true);
8715 	return 0;
8716 }
8717 
8718 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8719 #define BNX2_PM_OPS (&bnx2_pm_ops)
8720 
8721 #else
8722 
8723 #define BNX2_PM_OPS NULL
8724 
8725 #endif /* CONFIG_PM_SLEEP */
8726 /**
8727  * bnx2_io_error_detected - called when PCI error is detected
8728  * @pdev: Pointer to PCI device
8729  * @state: The current pci connection state
8730  *
8731  * This function is called after a PCI bus error affecting
8732  * this device has been detected.
8733  */
8734 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8735 					       pci_channel_state_t state)
8736 {
8737 	struct net_device *dev = pci_get_drvdata(pdev);
8738 	struct bnx2 *bp = netdev_priv(dev);
8739 
8740 	rtnl_lock();
8741 	netif_device_detach(dev);
8742 
8743 	if (state == pci_channel_io_perm_failure) {
8744 		rtnl_unlock();
8745 		return PCI_ERS_RESULT_DISCONNECT;
8746 	}
8747 
8748 	if (netif_running(dev)) {
8749 		bnx2_netif_stop(bp, true);
8750 		del_timer_sync(&bp->timer);
8751 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8752 	}
8753 
8754 	pci_disable_device(pdev);
8755 	rtnl_unlock();
8756 
8757 	/* Request a slot slot reset. */
8758 	return PCI_ERS_RESULT_NEED_RESET;
8759 }
8760 
8761 /**
8762  * bnx2_io_slot_reset - called after the pci bus has been reset.
8763  * @pdev: Pointer to PCI device
8764  *
8765  * Restart the card from scratch, as if from a cold-boot.
8766  */
8767 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8768 {
8769 	struct net_device *dev = pci_get_drvdata(pdev);
8770 	struct bnx2 *bp = netdev_priv(dev);
8771 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8772 	int err = 0;
8773 
8774 	rtnl_lock();
8775 	if (pci_enable_device(pdev)) {
8776 		dev_err(&pdev->dev,
8777 			"Cannot re-enable PCI device after reset\n");
8778 	} else {
8779 		pci_set_master(pdev);
8780 		pci_restore_state(pdev);
8781 		pci_save_state(pdev);
8782 
8783 		if (netif_running(dev))
8784 			err = bnx2_init_nic(bp, 1);
8785 
8786 		if (!err)
8787 			result = PCI_ERS_RESULT_RECOVERED;
8788 	}
8789 
8790 	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8791 		bnx2_napi_enable(bp);
8792 		dev_close(dev);
8793 	}
8794 	rtnl_unlock();
8795 
8796 	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8797 		return result;
8798 
8799 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8800 	if (err) {
8801 		dev_err(&pdev->dev,
8802 			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8803 			 err); /* non-fatal, continue */
8804 	}
8805 
8806 	return result;
8807 }
8808 
8809 /**
8810  * bnx2_io_resume - called when traffic can start flowing again.
8811  * @pdev: Pointer to PCI device
8812  *
8813  * This callback is called when the error recovery driver tells us that
8814  * its OK to resume normal operation.
8815  */
8816 static void bnx2_io_resume(struct pci_dev *pdev)
8817 {
8818 	struct net_device *dev = pci_get_drvdata(pdev);
8819 	struct bnx2 *bp = netdev_priv(dev);
8820 
8821 	rtnl_lock();
8822 	if (netif_running(dev))
8823 		bnx2_netif_start(bp, true);
8824 
8825 	netif_device_attach(dev);
8826 	rtnl_unlock();
8827 }
8828 
8829 static void bnx2_shutdown(struct pci_dev *pdev)
8830 {
8831 	struct net_device *dev = pci_get_drvdata(pdev);
8832 	struct bnx2 *bp;
8833 
8834 	if (!dev)
8835 		return;
8836 
8837 	bp = netdev_priv(dev);
8838 	if (!bp)
8839 		return;
8840 
8841 	rtnl_lock();
8842 	if (netif_running(dev))
8843 		dev_close(bp->dev);
8844 
8845 	if (system_state == SYSTEM_POWER_OFF)
8846 		bnx2_set_power_state(bp, PCI_D3hot);
8847 
8848 	rtnl_unlock();
8849 }
8850 
8851 static const struct pci_error_handlers bnx2_err_handler = {
8852 	.error_detected	= bnx2_io_error_detected,
8853 	.slot_reset	= bnx2_io_slot_reset,
8854 	.resume		= bnx2_io_resume,
8855 };
8856 
8857 static struct pci_driver bnx2_pci_driver = {
8858 	.name		= DRV_MODULE_NAME,
8859 	.id_table	= bnx2_pci_tbl,
8860 	.probe		= bnx2_init_one,
8861 	.remove		= bnx2_remove_one,
8862 	.driver.pm	= BNX2_PM_OPS,
8863 	.err_handler	= &bnx2_err_handler,
8864 	.shutdown	= bnx2_shutdown,
8865 };
8866 
8867 module_pci_driver(bnx2_pci_driver);
8868