1 /* bnx2.c: QLogic bnx2 network driver.
2 *
3 * Copyright (c) 2004-2014 Broadcom Corporation
4 * Copyright (c) 2014-2015 QLogic Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Michael Chan (mchan@broadcom.com)
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/crash_dump.h>
52
53 #if IS_ENABLED(CONFIG_CNIC)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME "bnx2"
61 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
62 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
63 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
64 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
65 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
66
67 #define RUN_AT(x) (jiffies + (x))
68
69 /* Time in jiffies before concluding the transmitter is hung. */
70 #define TX_TIMEOUT (5*HZ)
71
72 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
73 MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
74 MODULE_LICENSE("GPL");
75 MODULE_FIRMWARE(FW_MIPS_FILE_06);
76 MODULE_FIRMWARE(FW_RV2P_FILE_06);
77 MODULE_FIRMWARE(FW_MIPS_FILE_09);
78 MODULE_FIRMWARE(FW_RV2P_FILE_09);
79 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
80
81 static int disable_msi = 0;
82
83 module_param(disable_msi, int, 0444);
84 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
85
86 typedef enum {
87 BCM5706 = 0,
88 NC370T,
89 NC370I,
90 BCM5706S,
91 NC370F,
92 BCM5708,
93 BCM5708S,
94 BCM5709,
95 BCM5709S,
96 BCM5716,
97 BCM5716S,
98 } board_t;
99
100 /* indexed by board_t, above */
101 static struct {
102 char *name;
103 } board_info[] = {
104 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
105 { "HP NC370T Multifunction Gigabit Server Adapter" },
106 { "HP NC370i Multifunction Gigabit Server Adapter" },
107 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
108 { "HP NC370F Multifunction Gigabit Server Adapter" },
109 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
110 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
111 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
112 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
113 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
114 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
115 };
116
117 static const struct pci_device_id bnx2_pci_tbl[] = {
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
119 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
121 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
127 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
136 { PCI_VENDOR_ID_BROADCOM, 0x163b,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
138 { PCI_VENDOR_ID_BROADCOM, 0x163c,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
140 { 0, }
141 };
142
143 static const struct flash_spec flash_table[] =
144 {
145 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
146 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
147 /* Slow EEPROM */
148 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
149 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
150 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
151 "EEPROM - slow"},
152 /* Expansion entry 0001 */
153 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
154 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
155 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 "Entry 0001"},
157 /* Saifun SA25F010 (non-buffered flash) */
158 /* strap, cfg1, & write1 need updates */
159 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
162 "Non-buffered flash (128kB)"},
163 /* Saifun SA25F020 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
168 "Non-buffered flash (256kB)"},
169 /* Expansion entry 0100 */
170 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
171 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
173 "Entry 0100"},
174 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
175 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
176 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
177 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
178 "Entry 0101: ST M45PE10 (128kB non-buffered)"},
179 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
180 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
181 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
183 "Entry 0110: ST M45PE20 (256kB non-buffered)"},
184 /* Saifun SA25F005 (non-buffered flash) */
185 /* strap, cfg1, & write1 need updates */
186 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
189 "Non-buffered flash (64kB)"},
190 /* Fast EEPROM */
191 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
192 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
193 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
194 "EEPROM - fast"},
195 /* Expansion entry 1001 */
196 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
197 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
198 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199 "Entry 1001"},
200 /* Expansion entry 1010 */
201 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
202 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 "Entry 1010"},
205 /* ATMEL AT45DB011B (buffered flash) */
206 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
207 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
208 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
209 "Buffered flash (128kB)"},
210 /* Expansion entry 1100 */
211 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
212 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
213 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214 "Entry 1100"},
215 /* Expansion entry 1101 */
216 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
217 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 "Entry 1101"},
220 /* Ateml Expansion entry 1110 */
221 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
222 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
223 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
224 "Entry 1110 (Atmel)"},
225 /* ATMEL AT45DB021B (buffered flash) */
226 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
227 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
229 "Buffered flash (256kB)"},
230 };
231
232 static const struct flash_spec flash_5709 = {
233 .flags = BNX2_NV_BUFFERED,
234 .page_bits = BCM5709_FLASH_PAGE_BITS,
235 .page_size = BCM5709_FLASH_PAGE_SIZE,
236 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
237 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
238 .name = "5709 Buffered flash (256kB)",
239 };
240
241 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
242
243 static void bnx2_init_napi(struct bnx2 *bp);
244 static void bnx2_del_napi(struct bnx2 *bp);
245
bnx2_tx_avail(struct bnx2 * bp,struct bnx2_tx_ring_info * txr)246 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
247 {
248 u32 diff;
249
250 /* The ring uses 256 indices for 255 entries, one of them
251 * needs to be skipped.
252 */
253 diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
254 if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
255 diff &= 0xffff;
256 if (diff == BNX2_TX_DESC_CNT)
257 diff = BNX2_MAX_TX_DESC_CNT;
258 }
259 return bp->tx_ring_size - diff;
260 }
261
262 static u32
bnx2_reg_rd_ind(struct bnx2 * bp,u32 offset)263 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
264 {
265 unsigned long flags;
266 u32 val;
267
268 spin_lock_irqsave(&bp->indirect_lock, flags);
269 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
270 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
271 spin_unlock_irqrestore(&bp->indirect_lock, flags);
272 return val;
273 }
274
275 static void
bnx2_reg_wr_ind(struct bnx2 * bp,u32 offset,u32 val)276 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
277 {
278 unsigned long flags;
279
280 spin_lock_irqsave(&bp->indirect_lock, flags);
281 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
282 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
283 spin_unlock_irqrestore(&bp->indirect_lock, flags);
284 }
285
286 static void
bnx2_shmem_wr(struct bnx2 * bp,u32 offset,u32 val)287 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
288 {
289 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
290 }
291
292 static u32
bnx2_shmem_rd(struct bnx2 * bp,u32 offset)293 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
294 {
295 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
296 }
297
298 static void
bnx2_ctx_wr(struct bnx2 * bp,u32 cid_addr,u32 offset,u32 val)299 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
300 {
301 unsigned long flags;
302
303 offset += cid_addr;
304 spin_lock_irqsave(&bp->indirect_lock, flags);
305 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
306 int i;
307
308 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
309 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
310 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
311 for (i = 0; i < 5; i++) {
312 val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
313 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
314 break;
315 udelay(5);
316 }
317 } else {
318 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
319 BNX2_WR(bp, BNX2_CTX_DATA, val);
320 }
321 spin_unlock_irqrestore(&bp->indirect_lock, flags);
322 }
323
324 #ifdef BCM_CNIC
325 static int
bnx2_drv_ctl(struct net_device * dev,struct drv_ctl_info * info)326 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
327 {
328 struct bnx2 *bp = netdev_priv(dev);
329 struct drv_ctl_io *io = &info->data.io;
330
331 switch (info->cmd) {
332 case DRV_CTL_IO_WR_CMD:
333 bnx2_reg_wr_ind(bp, io->offset, io->data);
334 break;
335 case DRV_CTL_IO_RD_CMD:
336 io->data = bnx2_reg_rd_ind(bp, io->offset);
337 break;
338 case DRV_CTL_CTX_WR_CMD:
339 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
340 break;
341 default:
342 return -EINVAL;
343 }
344 return 0;
345 }
346
bnx2_setup_cnic_irq_info(struct bnx2 * bp)347 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
348 {
349 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
350 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
351 int sb_id;
352
353 if (bp->flags & BNX2_FLAG_USING_MSIX) {
354 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
355 bnapi->cnic_present = 0;
356 sb_id = bp->irq_nvecs;
357 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
358 } else {
359 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
360 bnapi->cnic_tag = bnapi->last_status_idx;
361 bnapi->cnic_present = 1;
362 sb_id = 0;
363 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
364 }
365
366 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
367 cp->irq_arr[0].status_blk = (void *)
368 ((unsigned long) bnapi->status_blk.msi +
369 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
370 cp->irq_arr[0].status_blk_map = bp->status_blk_mapping;
371 cp->irq_arr[0].status_blk_num = sb_id;
372 cp->num_irq = 1;
373 }
374
bnx2_register_cnic(struct net_device * dev,struct cnic_ops * ops,void * data)375 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
376 void *data)
377 {
378 struct bnx2 *bp = netdev_priv(dev);
379 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
380
381 if (!ops)
382 return -EINVAL;
383
384 if (cp->drv_state & CNIC_DRV_STATE_REGD)
385 return -EBUSY;
386
387 if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
388 return -ENODEV;
389
390 bp->cnic_data = data;
391 rcu_assign_pointer(bp->cnic_ops, ops);
392
393 cp->num_irq = 0;
394 cp->drv_state = CNIC_DRV_STATE_REGD;
395
396 bnx2_setup_cnic_irq_info(bp);
397
398 return 0;
399 }
400
bnx2_unregister_cnic(struct net_device * dev)401 static int bnx2_unregister_cnic(struct net_device *dev)
402 {
403 struct bnx2 *bp = netdev_priv(dev);
404 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
405 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
406
407 mutex_lock(&bp->cnic_lock);
408 cp->drv_state = 0;
409 bnapi->cnic_present = 0;
410 RCU_INIT_POINTER(bp->cnic_ops, NULL);
411 mutex_unlock(&bp->cnic_lock);
412 synchronize_rcu();
413 return 0;
414 }
415
bnx2_cnic_probe(struct net_device * dev)416 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
417 {
418 struct bnx2 *bp = netdev_priv(dev);
419 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
420
421 if (!cp->max_iscsi_conn)
422 return NULL;
423
424 cp->drv_owner = THIS_MODULE;
425 cp->chip_id = bp->chip_id;
426 cp->pdev = bp->pdev;
427 cp->io_base = bp->regview;
428 cp->drv_ctl = bnx2_drv_ctl;
429 cp->drv_register_cnic = bnx2_register_cnic;
430 cp->drv_unregister_cnic = bnx2_unregister_cnic;
431
432 return cp;
433 }
434
435 static void
bnx2_cnic_stop(struct bnx2 * bp)436 bnx2_cnic_stop(struct bnx2 *bp)
437 {
438 struct cnic_ops *c_ops;
439 struct cnic_ctl_info info;
440
441 mutex_lock(&bp->cnic_lock);
442 c_ops = rcu_dereference_protected(bp->cnic_ops,
443 lockdep_is_held(&bp->cnic_lock));
444 if (c_ops) {
445 info.cmd = CNIC_CTL_STOP_CMD;
446 c_ops->cnic_ctl(bp->cnic_data, &info);
447 }
448 mutex_unlock(&bp->cnic_lock);
449 }
450
451 static void
bnx2_cnic_start(struct bnx2 * bp)452 bnx2_cnic_start(struct bnx2 *bp)
453 {
454 struct cnic_ops *c_ops;
455 struct cnic_ctl_info info;
456
457 mutex_lock(&bp->cnic_lock);
458 c_ops = rcu_dereference_protected(bp->cnic_ops,
459 lockdep_is_held(&bp->cnic_lock));
460 if (c_ops) {
461 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
462 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
463
464 bnapi->cnic_tag = bnapi->last_status_idx;
465 }
466 info.cmd = CNIC_CTL_START_CMD;
467 c_ops->cnic_ctl(bp->cnic_data, &info);
468 }
469 mutex_unlock(&bp->cnic_lock);
470 }
471
472 #else
473
474 static void
bnx2_cnic_stop(struct bnx2 * bp)475 bnx2_cnic_stop(struct bnx2 *bp)
476 {
477 }
478
479 static void
bnx2_cnic_start(struct bnx2 * bp)480 bnx2_cnic_start(struct bnx2 *bp)
481 {
482 }
483
484 #endif
485
486 static int
bnx2_read_phy(struct bnx2 * bp,u32 reg,u32 * val)487 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
488 {
489 u32 val1;
490 int i, ret;
491
492 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
493 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
494 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
495
496 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
497 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
498
499 udelay(40);
500 }
501
502 val1 = (bp->phy_addr << 21) | (reg << 16) |
503 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
504 BNX2_EMAC_MDIO_COMM_START_BUSY;
505 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
506
507 for (i = 0; i < 50; i++) {
508 udelay(10);
509
510 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
511 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
512 udelay(5);
513
514 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
515 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
516
517 break;
518 }
519 }
520
521 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
522 *val = 0x0;
523 ret = -EBUSY;
524 }
525 else {
526 *val = val1;
527 ret = 0;
528 }
529
530 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
531 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
532 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
533
534 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
535 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
536
537 udelay(40);
538 }
539
540 return ret;
541 }
542
543 static int
bnx2_write_phy(struct bnx2 * bp,u32 reg,u32 val)544 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
545 {
546 u32 val1;
547 int i, ret;
548
549 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
550 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
551 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
552
553 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
554 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
555
556 udelay(40);
557 }
558
559 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
560 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
561 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
562 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
563
564 for (i = 0; i < 50; i++) {
565 udelay(10);
566
567 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
568 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
569 udelay(5);
570 break;
571 }
572 }
573
574 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
575 ret = -EBUSY;
576 else
577 ret = 0;
578
579 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
580 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
581 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
582
583 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
584 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
585
586 udelay(40);
587 }
588
589 return ret;
590 }
591
592 static void
bnx2_disable_int(struct bnx2 * bp)593 bnx2_disable_int(struct bnx2 *bp)
594 {
595 int i;
596 struct bnx2_napi *bnapi;
597
598 for (i = 0; i < bp->irq_nvecs; i++) {
599 bnapi = &bp->bnx2_napi[i];
600 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
601 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
602 }
603 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
604 }
605
606 static void
bnx2_enable_int(struct bnx2 * bp)607 bnx2_enable_int(struct bnx2 *bp)
608 {
609 int i;
610 struct bnx2_napi *bnapi;
611
612 for (i = 0; i < bp->irq_nvecs; i++) {
613 bnapi = &bp->bnx2_napi[i];
614
615 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
616 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
617 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
618 bnapi->last_status_idx);
619
620 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
621 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
622 bnapi->last_status_idx);
623 }
624 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
625 }
626
627 static void
bnx2_disable_int_sync(struct bnx2 * bp)628 bnx2_disable_int_sync(struct bnx2 *bp)
629 {
630 int i;
631
632 atomic_inc(&bp->intr_sem);
633 if (!netif_running(bp->dev))
634 return;
635
636 bnx2_disable_int(bp);
637 for (i = 0; i < bp->irq_nvecs; i++)
638 synchronize_irq(bp->irq_tbl[i].vector);
639 }
640
641 static void
bnx2_napi_disable(struct bnx2 * bp)642 bnx2_napi_disable(struct bnx2 *bp)
643 {
644 int i;
645
646 for (i = 0; i < bp->irq_nvecs; i++)
647 napi_disable(&bp->bnx2_napi[i].napi);
648 }
649
650 static void
bnx2_napi_enable(struct bnx2 * bp)651 bnx2_napi_enable(struct bnx2 *bp)
652 {
653 int i;
654
655 for (i = 0; i < bp->irq_nvecs; i++)
656 napi_enable(&bp->bnx2_napi[i].napi);
657 }
658
659 static void
bnx2_netif_stop(struct bnx2 * bp,bool stop_cnic)660 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
661 {
662 if (stop_cnic)
663 bnx2_cnic_stop(bp);
664 if (netif_running(bp->dev)) {
665 bnx2_napi_disable(bp);
666 netif_tx_disable(bp->dev);
667 }
668 bnx2_disable_int_sync(bp);
669 netif_carrier_off(bp->dev); /* prevent tx timeout */
670 }
671
672 static void
bnx2_netif_start(struct bnx2 * bp,bool start_cnic)673 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
674 {
675 if (atomic_dec_and_test(&bp->intr_sem)) {
676 if (netif_running(bp->dev)) {
677 netif_tx_wake_all_queues(bp->dev);
678 spin_lock_bh(&bp->phy_lock);
679 if (bp->link_up)
680 netif_carrier_on(bp->dev);
681 spin_unlock_bh(&bp->phy_lock);
682 bnx2_napi_enable(bp);
683 bnx2_enable_int(bp);
684 if (start_cnic)
685 bnx2_cnic_start(bp);
686 }
687 }
688 }
689
690 static void
bnx2_free_tx_mem(struct bnx2 * bp)691 bnx2_free_tx_mem(struct bnx2 *bp)
692 {
693 int i;
694
695 for (i = 0; i < bp->num_tx_rings; i++) {
696 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
697 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
698
699 if (txr->tx_desc_ring) {
700 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
701 txr->tx_desc_ring,
702 txr->tx_desc_mapping);
703 txr->tx_desc_ring = NULL;
704 }
705 kfree(txr->tx_buf_ring);
706 txr->tx_buf_ring = NULL;
707 }
708 }
709
710 static void
bnx2_free_rx_mem(struct bnx2 * bp)711 bnx2_free_rx_mem(struct bnx2 *bp)
712 {
713 int i;
714
715 for (i = 0; i < bp->num_rx_rings; i++) {
716 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
717 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
718 int j;
719
720 for (j = 0; j < bp->rx_max_ring; j++) {
721 if (rxr->rx_desc_ring[j])
722 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
723 rxr->rx_desc_ring[j],
724 rxr->rx_desc_mapping[j]);
725 rxr->rx_desc_ring[j] = NULL;
726 }
727 vfree(rxr->rx_buf_ring);
728 rxr->rx_buf_ring = NULL;
729
730 for (j = 0; j < bp->rx_max_pg_ring; j++) {
731 if (rxr->rx_pg_desc_ring[j])
732 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
733 rxr->rx_pg_desc_ring[j],
734 rxr->rx_pg_desc_mapping[j]);
735 rxr->rx_pg_desc_ring[j] = NULL;
736 }
737 vfree(rxr->rx_pg_ring);
738 rxr->rx_pg_ring = NULL;
739 }
740 }
741
742 static int
bnx2_alloc_tx_mem(struct bnx2 * bp)743 bnx2_alloc_tx_mem(struct bnx2 *bp)
744 {
745 int i;
746
747 for (i = 0; i < bp->num_tx_rings; i++) {
748 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
749 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
750
751 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
752 if (!txr->tx_buf_ring)
753 return -ENOMEM;
754
755 txr->tx_desc_ring =
756 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
757 &txr->tx_desc_mapping, GFP_KERNEL);
758 if (!txr->tx_desc_ring)
759 return -ENOMEM;
760 }
761 return 0;
762 }
763
764 static int
bnx2_alloc_rx_mem(struct bnx2 * bp)765 bnx2_alloc_rx_mem(struct bnx2 *bp)
766 {
767 int i;
768
769 for (i = 0; i < bp->num_rx_rings; i++) {
770 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
771 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
772 int j;
773
774 rxr->rx_buf_ring =
775 vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
776 if (!rxr->rx_buf_ring)
777 return -ENOMEM;
778
779 for (j = 0; j < bp->rx_max_ring; j++) {
780 rxr->rx_desc_ring[j] =
781 dma_alloc_coherent(&bp->pdev->dev,
782 RXBD_RING_SIZE,
783 &rxr->rx_desc_mapping[j],
784 GFP_KERNEL);
785 if (!rxr->rx_desc_ring[j])
786 return -ENOMEM;
787
788 }
789
790 if (bp->rx_pg_ring_size) {
791 rxr->rx_pg_ring =
792 vzalloc(array_size(SW_RXPG_RING_SIZE,
793 bp->rx_max_pg_ring));
794 if (!rxr->rx_pg_ring)
795 return -ENOMEM;
796
797 }
798
799 for (j = 0; j < bp->rx_max_pg_ring; j++) {
800 rxr->rx_pg_desc_ring[j] =
801 dma_alloc_coherent(&bp->pdev->dev,
802 RXBD_RING_SIZE,
803 &rxr->rx_pg_desc_mapping[j],
804 GFP_KERNEL);
805 if (!rxr->rx_pg_desc_ring[j])
806 return -ENOMEM;
807
808 }
809 }
810 return 0;
811 }
812
813 static void
bnx2_free_stats_blk(struct net_device * dev)814 bnx2_free_stats_blk(struct net_device *dev)
815 {
816 struct bnx2 *bp = netdev_priv(dev);
817
818 if (bp->status_blk) {
819 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
820 bp->status_blk,
821 bp->status_blk_mapping);
822 bp->status_blk = NULL;
823 bp->stats_blk = NULL;
824 }
825 }
826
827 static int
bnx2_alloc_stats_blk(struct net_device * dev)828 bnx2_alloc_stats_blk(struct net_device *dev)
829 {
830 int status_blk_size;
831 void *status_blk;
832 struct bnx2 *bp = netdev_priv(dev);
833
834 /* Combine status and statistics blocks into one allocation. */
835 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
836 if (bp->flags & BNX2_FLAG_MSIX_CAP)
837 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
838 BNX2_SBLK_MSIX_ALIGN_SIZE);
839 bp->status_stats_size = status_blk_size +
840 sizeof(struct statistics_block);
841 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
842 &bp->status_blk_mapping, GFP_KERNEL);
843 if (!status_blk)
844 return -ENOMEM;
845
846 bp->status_blk = status_blk;
847 bp->stats_blk = status_blk + status_blk_size;
848 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
849
850 return 0;
851 }
852
853 static void
bnx2_free_mem(struct bnx2 * bp)854 bnx2_free_mem(struct bnx2 *bp)
855 {
856 int i;
857 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
858
859 bnx2_free_tx_mem(bp);
860 bnx2_free_rx_mem(bp);
861
862 for (i = 0; i < bp->ctx_pages; i++) {
863 if (bp->ctx_blk[i]) {
864 dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
865 bp->ctx_blk[i],
866 bp->ctx_blk_mapping[i]);
867 bp->ctx_blk[i] = NULL;
868 }
869 }
870
871 if (bnapi->status_blk.msi)
872 bnapi->status_blk.msi = NULL;
873 }
874
875 static int
bnx2_alloc_mem(struct bnx2 * bp)876 bnx2_alloc_mem(struct bnx2 *bp)
877 {
878 int i, err;
879 struct bnx2_napi *bnapi;
880
881 bnapi = &bp->bnx2_napi[0];
882 bnapi->status_blk.msi = bp->status_blk;
883 bnapi->hw_tx_cons_ptr =
884 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
885 bnapi->hw_rx_cons_ptr =
886 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
887 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
888 for (i = 1; i < bp->irq_nvecs; i++) {
889 struct status_block_msix *sblk;
890
891 bnapi = &bp->bnx2_napi[i];
892
893 sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
894 bnapi->status_blk.msix = sblk;
895 bnapi->hw_tx_cons_ptr =
896 &sblk->status_tx_quick_consumer_index;
897 bnapi->hw_rx_cons_ptr =
898 &sblk->status_rx_quick_consumer_index;
899 bnapi->int_num = i << 24;
900 }
901 }
902
903 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
904 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
905 if (bp->ctx_pages == 0)
906 bp->ctx_pages = 1;
907 for (i = 0; i < bp->ctx_pages; i++) {
908 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
909 BNX2_PAGE_SIZE,
910 &bp->ctx_blk_mapping[i],
911 GFP_KERNEL);
912 if (!bp->ctx_blk[i])
913 goto alloc_mem_err;
914 }
915 }
916
917 err = bnx2_alloc_rx_mem(bp);
918 if (err)
919 goto alloc_mem_err;
920
921 err = bnx2_alloc_tx_mem(bp);
922 if (err)
923 goto alloc_mem_err;
924
925 return 0;
926
927 alloc_mem_err:
928 bnx2_free_mem(bp);
929 return -ENOMEM;
930 }
931
932 static void
bnx2_report_fw_link(struct bnx2 * bp)933 bnx2_report_fw_link(struct bnx2 *bp)
934 {
935 u32 fw_link_status = 0;
936
937 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
938 return;
939
940 if (bp->link_up) {
941 u32 bmsr;
942
943 switch (bp->line_speed) {
944 case SPEED_10:
945 if (bp->duplex == DUPLEX_HALF)
946 fw_link_status = BNX2_LINK_STATUS_10HALF;
947 else
948 fw_link_status = BNX2_LINK_STATUS_10FULL;
949 break;
950 case SPEED_100:
951 if (bp->duplex == DUPLEX_HALF)
952 fw_link_status = BNX2_LINK_STATUS_100HALF;
953 else
954 fw_link_status = BNX2_LINK_STATUS_100FULL;
955 break;
956 case SPEED_1000:
957 if (bp->duplex == DUPLEX_HALF)
958 fw_link_status = BNX2_LINK_STATUS_1000HALF;
959 else
960 fw_link_status = BNX2_LINK_STATUS_1000FULL;
961 break;
962 case SPEED_2500:
963 if (bp->duplex == DUPLEX_HALF)
964 fw_link_status = BNX2_LINK_STATUS_2500HALF;
965 else
966 fw_link_status = BNX2_LINK_STATUS_2500FULL;
967 break;
968 }
969
970 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
971
972 if (bp->autoneg) {
973 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
974
975 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
976 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
977
978 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
979 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
980 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
981 else
982 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
983 }
984 }
985 else
986 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
987
988 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
989 }
990
991 static char *
bnx2_xceiver_str(struct bnx2 * bp)992 bnx2_xceiver_str(struct bnx2 *bp)
993 {
994 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
995 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
996 "Copper");
997 }
998
999 static void
bnx2_report_link(struct bnx2 * bp)1000 bnx2_report_link(struct bnx2 *bp)
1001 {
1002 if (bp->link_up) {
1003 netif_carrier_on(bp->dev);
1004 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1005 bnx2_xceiver_str(bp),
1006 bp->line_speed,
1007 bp->duplex == DUPLEX_FULL ? "full" : "half");
1008
1009 if (bp->flow_ctrl) {
1010 if (bp->flow_ctrl & FLOW_CTRL_RX) {
1011 pr_cont(", receive ");
1012 if (bp->flow_ctrl & FLOW_CTRL_TX)
1013 pr_cont("& transmit ");
1014 }
1015 else {
1016 pr_cont(", transmit ");
1017 }
1018 pr_cont("flow control ON");
1019 }
1020 pr_cont("\n");
1021 } else {
1022 netif_carrier_off(bp->dev);
1023 netdev_err(bp->dev, "NIC %s Link is Down\n",
1024 bnx2_xceiver_str(bp));
1025 }
1026
1027 bnx2_report_fw_link(bp);
1028 }
1029
1030 static void
bnx2_resolve_flow_ctrl(struct bnx2 * bp)1031 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1032 {
1033 u32 local_adv, remote_adv;
1034
1035 bp->flow_ctrl = 0;
1036 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1037 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1038
1039 if (bp->duplex == DUPLEX_FULL) {
1040 bp->flow_ctrl = bp->req_flow_ctrl;
1041 }
1042 return;
1043 }
1044
1045 if (bp->duplex != DUPLEX_FULL) {
1046 return;
1047 }
1048
1049 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1050 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1051 u32 val;
1052
1053 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1054 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1055 bp->flow_ctrl |= FLOW_CTRL_TX;
1056 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1057 bp->flow_ctrl |= FLOW_CTRL_RX;
1058 return;
1059 }
1060
1061 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1062 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1063
1064 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1065 u32 new_local_adv = 0;
1066 u32 new_remote_adv = 0;
1067
1068 if (local_adv & ADVERTISE_1000XPAUSE)
1069 new_local_adv |= ADVERTISE_PAUSE_CAP;
1070 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1071 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1072 if (remote_adv & ADVERTISE_1000XPAUSE)
1073 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1074 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1075 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1076
1077 local_adv = new_local_adv;
1078 remote_adv = new_remote_adv;
1079 }
1080
1081 /* See Table 28B-3 of 802.3ab-1999 spec. */
1082 if (local_adv & ADVERTISE_PAUSE_CAP) {
1083 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1084 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1085 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1086 }
1087 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1088 bp->flow_ctrl = FLOW_CTRL_RX;
1089 }
1090 }
1091 else {
1092 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1093 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1094 }
1095 }
1096 }
1097 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1098 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1099 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1100
1101 bp->flow_ctrl = FLOW_CTRL_TX;
1102 }
1103 }
1104 }
1105
1106 static int
bnx2_5709s_linkup(struct bnx2 * bp)1107 bnx2_5709s_linkup(struct bnx2 *bp)
1108 {
1109 u32 val, speed;
1110
1111 bp->link_up = 1;
1112
1113 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1114 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1115 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1116
1117 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1118 bp->line_speed = bp->req_line_speed;
1119 bp->duplex = bp->req_duplex;
1120 return 0;
1121 }
1122 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1123 switch (speed) {
1124 case MII_BNX2_GP_TOP_AN_SPEED_10:
1125 bp->line_speed = SPEED_10;
1126 break;
1127 case MII_BNX2_GP_TOP_AN_SPEED_100:
1128 bp->line_speed = SPEED_100;
1129 break;
1130 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1131 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1132 bp->line_speed = SPEED_1000;
1133 break;
1134 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1135 bp->line_speed = SPEED_2500;
1136 break;
1137 }
1138 if (val & MII_BNX2_GP_TOP_AN_FD)
1139 bp->duplex = DUPLEX_FULL;
1140 else
1141 bp->duplex = DUPLEX_HALF;
1142 return 0;
1143 }
1144
1145 static int
bnx2_5708s_linkup(struct bnx2 * bp)1146 bnx2_5708s_linkup(struct bnx2 *bp)
1147 {
1148 u32 val;
1149
1150 bp->link_up = 1;
1151 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1152 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1153 case BCM5708S_1000X_STAT1_SPEED_10:
1154 bp->line_speed = SPEED_10;
1155 break;
1156 case BCM5708S_1000X_STAT1_SPEED_100:
1157 bp->line_speed = SPEED_100;
1158 break;
1159 case BCM5708S_1000X_STAT1_SPEED_1G:
1160 bp->line_speed = SPEED_1000;
1161 break;
1162 case BCM5708S_1000X_STAT1_SPEED_2G5:
1163 bp->line_speed = SPEED_2500;
1164 break;
1165 }
1166 if (val & BCM5708S_1000X_STAT1_FD)
1167 bp->duplex = DUPLEX_FULL;
1168 else
1169 bp->duplex = DUPLEX_HALF;
1170
1171 return 0;
1172 }
1173
1174 static int
bnx2_5706s_linkup(struct bnx2 * bp)1175 bnx2_5706s_linkup(struct bnx2 *bp)
1176 {
1177 u32 bmcr, local_adv, remote_adv, common;
1178
1179 bp->link_up = 1;
1180 bp->line_speed = SPEED_1000;
1181
1182 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1183 if (bmcr & BMCR_FULLDPLX) {
1184 bp->duplex = DUPLEX_FULL;
1185 }
1186 else {
1187 bp->duplex = DUPLEX_HALF;
1188 }
1189
1190 if (!(bmcr & BMCR_ANENABLE)) {
1191 return 0;
1192 }
1193
1194 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1195 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1196
1197 common = local_adv & remote_adv;
1198 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1199
1200 if (common & ADVERTISE_1000XFULL) {
1201 bp->duplex = DUPLEX_FULL;
1202 }
1203 else {
1204 bp->duplex = DUPLEX_HALF;
1205 }
1206 }
1207
1208 return 0;
1209 }
1210
1211 static int
bnx2_copper_linkup(struct bnx2 * bp)1212 bnx2_copper_linkup(struct bnx2 *bp)
1213 {
1214 u32 bmcr;
1215
1216 bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1217
1218 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1219 if (bmcr & BMCR_ANENABLE) {
1220 u32 local_adv, remote_adv, common;
1221
1222 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1223 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1224
1225 common = local_adv & (remote_adv >> 2);
1226 if (common & ADVERTISE_1000FULL) {
1227 bp->line_speed = SPEED_1000;
1228 bp->duplex = DUPLEX_FULL;
1229 }
1230 else if (common & ADVERTISE_1000HALF) {
1231 bp->line_speed = SPEED_1000;
1232 bp->duplex = DUPLEX_HALF;
1233 }
1234 else {
1235 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1236 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1237
1238 common = local_adv & remote_adv;
1239 if (common & ADVERTISE_100FULL) {
1240 bp->line_speed = SPEED_100;
1241 bp->duplex = DUPLEX_FULL;
1242 }
1243 else if (common & ADVERTISE_100HALF) {
1244 bp->line_speed = SPEED_100;
1245 bp->duplex = DUPLEX_HALF;
1246 }
1247 else if (common & ADVERTISE_10FULL) {
1248 bp->line_speed = SPEED_10;
1249 bp->duplex = DUPLEX_FULL;
1250 }
1251 else if (common & ADVERTISE_10HALF) {
1252 bp->line_speed = SPEED_10;
1253 bp->duplex = DUPLEX_HALF;
1254 }
1255 else {
1256 bp->line_speed = 0;
1257 bp->link_up = 0;
1258 }
1259 }
1260 }
1261 else {
1262 if (bmcr & BMCR_SPEED100) {
1263 bp->line_speed = SPEED_100;
1264 }
1265 else {
1266 bp->line_speed = SPEED_10;
1267 }
1268 if (bmcr & BMCR_FULLDPLX) {
1269 bp->duplex = DUPLEX_FULL;
1270 }
1271 else {
1272 bp->duplex = DUPLEX_HALF;
1273 }
1274 }
1275
1276 if (bp->link_up) {
1277 u32 ext_status;
1278
1279 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1280 if (ext_status & EXT_STATUS_MDIX)
1281 bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1282 }
1283
1284 return 0;
1285 }
1286
1287 static void
bnx2_init_rx_context(struct bnx2 * bp,u32 cid)1288 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1289 {
1290 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1291
1292 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1293 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1294 val |= 0x02 << 8;
1295
1296 if (bp->flow_ctrl & FLOW_CTRL_TX)
1297 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1298
1299 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1300 }
1301
1302 static void
bnx2_init_all_rx_contexts(struct bnx2 * bp)1303 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1304 {
1305 int i;
1306 u32 cid;
1307
1308 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1309 if (i == 1)
1310 cid = RX_RSS_CID;
1311 bnx2_init_rx_context(bp, cid);
1312 }
1313 }
1314
1315 static void
bnx2_set_mac_link(struct bnx2 * bp)1316 bnx2_set_mac_link(struct bnx2 *bp)
1317 {
1318 u32 val;
1319
1320 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1321 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1322 (bp->duplex == DUPLEX_HALF)) {
1323 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1324 }
1325
1326 /* Configure the EMAC mode register. */
1327 val = BNX2_RD(bp, BNX2_EMAC_MODE);
1328
1329 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1330 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1331 BNX2_EMAC_MODE_25G_MODE);
1332
1333 if (bp->link_up) {
1334 switch (bp->line_speed) {
1335 case SPEED_10:
1336 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1337 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1338 break;
1339 }
1340 fallthrough;
1341 case SPEED_100:
1342 val |= BNX2_EMAC_MODE_PORT_MII;
1343 break;
1344 case SPEED_2500:
1345 val |= BNX2_EMAC_MODE_25G_MODE;
1346 fallthrough;
1347 case SPEED_1000:
1348 val |= BNX2_EMAC_MODE_PORT_GMII;
1349 break;
1350 }
1351 }
1352 else {
1353 val |= BNX2_EMAC_MODE_PORT_GMII;
1354 }
1355
1356 /* Set the MAC to operate in the appropriate duplex mode. */
1357 if (bp->duplex == DUPLEX_HALF)
1358 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1359 BNX2_WR(bp, BNX2_EMAC_MODE, val);
1360
1361 /* Enable/disable rx PAUSE. */
1362 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1363
1364 if (bp->flow_ctrl & FLOW_CTRL_RX)
1365 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1366 BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1367
1368 /* Enable/disable tx PAUSE. */
1369 val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1370 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1371
1372 if (bp->flow_ctrl & FLOW_CTRL_TX)
1373 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1374 BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1375
1376 /* Acknowledge the interrupt. */
1377 BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1378
1379 bnx2_init_all_rx_contexts(bp);
1380 }
1381
1382 static void
bnx2_enable_bmsr1(struct bnx2 * bp)1383 bnx2_enable_bmsr1(struct bnx2 *bp)
1384 {
1385 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1386 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1387 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1388 MII_BNX2_BLK_ADDR_GP_STATUS);
1389 }
1390
1391 static void
bnx2_disable_bmsr1(struct bnx2 * bp)1392 bnx2_disable_bmsr1(struct bnx2 *bp)
1393 {
1394 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1395 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1396 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1397 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1398 }
1399
1400 static int
bnx2_test_and_enable_2g5(struct bnx2 * bp)1401 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1402 {
1403 u32 up1;
1404 int ret = 1;
1405
1406 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1407 return 0;
1408
1409 if (bp->autoneg & AUTONEG_SPEED)
1410 bp->advertising |= ADVERTISED_2500baseX_Full;
1411
1412 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1413 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1414
1415 bnx2_read_phy(bp, bp->mii_up1, &up1);
1416 if (!(up1 & BCM5708S_UP1_2G5)) {
1417 up1 |= BCM5708S_UP1_2G5;
1418 bnx2_write_phy(bp, bp->mii_up1, up1);
1419 ret = 0;
1420 }
1421
1422 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1423 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1424 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1425
1426 return ret;
1427 }
1428
1429 static int
bnx2_test_and_disable_2g5(struct bnx2 * bp)1430 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1431 {
1432 u32 up1;
1433 int ret = 0;
1434
1435 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1436 return 0;
1437
1438 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1439 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1440
1441 bnx2_read_phy(bp, bp->mii_up1, &up1);
1442 if (up1 & BCM5708S_UP1_2G5) {
1443 up1 &= ~BCM5708S_UP1_2G5;
1444 bnx2_write_phy(bp, bp->mii_up1, up1);
1445 ret = 1;
1446 }
1447
1448 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1449 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1450 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1451
1452 return ret;
1453 }
1454
1455 static void
bnx2_enable_forced_2g5(struct bnx2 * bp)1456 bnx2_enable_forced_2g5(struct bnx2 *bp)
1457 {
1458 u32 bmcr;
1459 int err;
1460
1461 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1462 return;
1463
1464 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1465 u32 val;
1466
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468 MII_BNX2_BLK_ADDR_SERDES_DIG);
1469 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1470 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1471 val |= MII_BNX2_SD_MISC1_FORCE |
1472 MII_BNX2_SD_MISC1_FORCE_2_5G;
1473 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1474 }
1475
1476 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1477 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1478 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1479
1480 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1481 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1482 if (!err)
1483 bmcr |= BCM5708S_BMCR_FORCE_2500;
1484 } else {
1485 return;
1486 }
1487
1488 if (err)
1489 return;
1490
1491 if (bp->autoneg & AUTONEG_SPEED) {
1492 bmcr &= ~BMCR_ANENABLE;
1493 if (bp->req_duplex == DUPLEX_FULL)
1494 bmcr |= BMCR_FULLDPLX;
1495 }
1496 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1497 }
1498
1499 static void
bnx2_disable_forced_2g5(struct bnx2 * bp)1500 bnx2_disable_forced_2g5(struct bnx2 *bp)
1501 {
1502 u32 bmcr;
1503 int err;
1504
1505 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1506 return;
1507
1508 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1509 u32 val;
1510
1511 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1512 MII_BNX2_BLK_ADDR_SERDES_DIG);
1513 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1514 val &= ~MII_BNX2_SD_MISC1_FORCE;
1515 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1516 }
1517
1518 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1519 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1520 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1521
1522 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1523 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1524 if (!err)
1525 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1526 } else {
1527 return;
1528 }
1529
1530 if (err)
1531 return;
1532
1533 if (bp->autoneg & AUTONEG_SPEED)
1534 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1535 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1536 }
1537
1538 static void
bnx2_5706s_force_link_dn(struct bnx2 * bp,int start)1539 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1540 {
1541 u32 val;
1542
1543 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1544 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1545 if (start)
1546 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1547 else
1548 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1549 }
1550
1551 static int
bnx2_set_link(struct bnx2 * bp)1552 bnx2_set_link(struct bnx2 *bp)
1553 {
1554 u32 bmsr;
1555 u8 link_up;
1556
1557 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1558 bp->link_up = 1;
1559 return 0;
1560 }
1561
1562 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1563 return 0;
1564
1565 link_up = bp->link_up;
1566
1567 bnx2_enable_bmsr1(bp);
1568 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1569 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1570 bnx2_disable_bmsr1(bp);
1571
1572 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1573 (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1574 u32 val, an_dbg;
1575
1576 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1577 bnx2_5706s_force_link_dn(bp, 0);
1578 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1579 }
1580 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1581
1582 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1583 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1584 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1585
1586 if ((val & BNX2_EMAC_STATUS_LINK) &&
1587 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1588 bmsr |= BMSR_LSTATUS;
1589 else
1590 bmsr &= ~BMSR_LSTATUS;
1591 }
1592
1593 if (bmsr & BMSR_LSTATUS) {
1594 bp->link_up = 1;
1595
1596 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1597 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1598 bnx2_5706s_linkup(bp);
1599 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1600 bnx2_5708s_linkup(bp);
1601 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1602 bnx2_5709s_linkup(bp);
1603 }
1604 else {
1605 bnx2_copper_linkup(bp);
1606 }
1607 bnx2_resolve_flow_ctrl(bp);
1608 }
1609 else {
1610 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1611 (bp->autoneg & AUTONEG_SPEED))
1612 bnx2_disable_forced_2g5(bp);
1613
1614 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1615 u32 bmcr;
1616
1617 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1618 bmcr |= BMCR_ANENABLE;
1619 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1620
1621 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1622 }
1623 bp->link_up = 0;
1624 }
1625
1626 if (bp->link_up != link_up) {
1627 bnx2_report_link(bp);
1628 }
1629
1630 bnx2_set_mac_link(bp);
1631
1632 return 0;
1633 }
1634
1635 static int
bnx2_reset_phy(struct bnx2 * bp)1636 bnx2_reset_phy(struct bnx2 *bp)
1637 {
1638 int i;
1639 u32 reg;
1640
1641 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1642
1643 #define PHY_RESET_MAX_WAIT 100
1644 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1645 udelay(10);
1646
1647 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1648 if (!(reg & BMCR_RESET)) {
1649 udelay(20);
1650 break;
1651 }
1652 }
1653 if (i == PHY_RESET_MAX_WAIT) {
1654 return -EBUSY;
1655 }
1656 return 0;
1657 }
1658
1659 static u32
bnx2_phy_get_pause_adv(struct bnx2 * bp)1660 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1661 {
1662 u32 adv = 0;
1663
1664 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1665 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1666
1667 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1668 adv = ADVERTISE_1000XPAUSE;
1669 }
1670 else {
1671 adv = ADVERTISE_PAUSE_CAP;
1672 }
1673 }
1674 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1675 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1676 adv = ADVERTISE_1000XPSE_ASYM;
1677 }
1678 else {
1679 adv = ADVERTISE_PAUSE_ASYM;
1680 }
1681 }
1682 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1683 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1684 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1685 }
1686 else {
1687 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1688 }
1689 }
1690 return adv;
1691 }
1692
1693 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1694
1695 static int
bnx2_setup_remote_phy(struct bnx2 * bp,u8 port)1696 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1697 __releases(&bp->phy_lock)
1698 __acquires(&bp->phy_lock)
1699 {
1700 u32 speed_arg = 0, pause_adv;
1701
1702 pause_adv = bnx2_phy_get_pause_adv(bp);
1703
1704 if (bp->autoneg & AUTONEG_SPEED) {
1705 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1706 if (bp->advertising & ADVERTISED_10baseT_Half)
1707 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1708 if (bp->advertising & ADVERTISED_10baseT_Full)
1709 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1710 if (bp->advertising & ADVERTISED_100baseT_Half)
1711 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1712 if (bp->advertising & ADVERTISED_100baseT_Full)
1713 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1714 if (bp->advertising & ADVERTISED_1000baseT_Full)
1715 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1716 if (bp->advertising & ADVERTISED_2500baseX_Full)
1717 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1718 } else {
1719 if (bp->req_line_speed == SPEED_2500)
1720 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1721 else if (bp->req_line_speed == SPEED_1000)
1722 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1723 else if (bp->req_line_speed == SPEED_100) {
1724 if (bp->req_duplex == DUPLEX_FULL)
1725 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1726 else
1727 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1728 } else if (bp->req_line_speed == SPEED_10) {
1729 if (bp->req_duplex == DUPLEX_FULL)
1730 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1731 else
1732 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1733 }
1734 }
1735
1736 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1737 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1738 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1739 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1740
1741 if (port == PORT_TP)
1742 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1743 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1744
1745 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1746
1747 spin_unlock_bh(&bp->phy_lock);
1748 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1749 spin_lock_bh(&bp->phy_lock);
1750
1751 return 0;
1752 }
1753
1754 static int
bnx2_setup_serdes_phy(struct bnx2 * bp,u8 port)1755 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1756 __releases(&bp->phy_lock)
1757 __acquires(&bp->phy_lock)
1758 {
1759 u32 adv, bmcr;
1760 u32 new_adv = 0;
1761
1762 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1763 return bnx2_setup_remote_phy(bp, port);
1764
1765 if (!(bp->autoneg & AUTONEG_SPEED)) {
1766 u32 new_bmcr;
1767 int force_link_down = 0;
1768
1769 if (bp->req_line_speed == SPEED_2500) {
1770 if (!bnx2_test_and_enable_2g5(bp))
1771 force_link_down = 1;
1772 } else if (bp->req_line_speed == SPEED_1000) {
1773 if (bnx2_test_and_disable_2g5(bp))
1774 force_link_down = 1;
1775 }
1776 bnx2_read_phy(bp, bp->mii_adv, &adv);
1777 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1778
1779 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1780 new_bmcr = bmcr & ~BMCR_ANENABLE;
1781 new_bmcr |= BMCR_SPEED1000;
1782
1783 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1784 if (bp->req_line_speed == SPEED_2500)
1785 bnx2_enable_forced_2g5(bp);
1786 else if (bp->req_line_speed == SPEED_1000) {
1787 bnx2_disable_forced_2g5(bp);
1788 new_bmcr &= ~0x2000;
1789 }
1790
1791 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1792 if (bp->req_line_speed == SPEED_2500)
1793 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1794 else
1795 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1796 }
1797
1798 if (bp->req_duplex == DUPLEX_FULL) {
1799 adv |= ADVERTISE_1000XFULL;
1800 new_bmcr |= BMCR_FULLDPLX;
1801 }
1802 else {
1803 adv |= ADVERTISE_1000XHALF;
1804 new_bmcr &= ~BMCR_FULLDPLX;
1805 }
1806 if ((new_bmcr != bmcr) || (force_link_down)) {
1807 /* Force a link down visible on the other side */
1808 if (bp->link_up) {
1809 bnx2_write_phy(bp, bp->mii_adv, adv &
1810 ~(ADVERTISE_1000XFULL |
1811 ADVERTISE_1000XHALF));
1812 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1813 BMCR_ANRESTART | BMCR_ANENABLE);
1814
1815 bp->link_up = 0;
1816 netif_carrier_off(bp->dev);
1817 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1818 bnx2_report_link(bp);
1819 }
1820 bnx2_write_phy(bp, bp->mii_adv, adv);
1821 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1822 } else {
1823 bnx2_resolve_flow_ctrl(bp);
1824 bnx2_set_mac_link(bp);
1825 }
1826 return 0;
1827 }
1828
1829 bnx2_test_and_enable_2g5(bp);
1830
1831 if (bp->advertising & ADVERTISED_1000baseT_Full)
1832 new_adv |= ADVERTISE_1000XFULL;
1833
1834 new_adv |= bnx2_phy_get_pause_adv(bp);
1835
1836 bnx2_read_phy(bp, bp->mii_adv, &adv);
1837 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1838
1839 bp->serdes_an_pending = 0;
1840 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1841 /* Force a link down visible on the other side */
1842 if (bp->link_up) {
1843 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1844 spin_unlock_bh(&bp->phy_lock);
1845 msleep(20);
1846 spin_lock_bh(&bp->phy_lock);
1847 }
1848
1849 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1850 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1851 BMCR_ANENABLE);
1852 /* Speed up link-up time when the link partner
1853 * does not autonegotiate which is very common
1854 * in blade servers. Some blade servers use
1855 * IPMI for kerboard input and it's important
1856 * to minimize link disruptions. Autoneg. involves
1857 * exchanging base pages plus 3 next pages and
1858 * normally completes in about 120 msec.
1859 */
1860 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1861 bp->serdes_an_pending = 1;
1862 mod_timer(&bp->timer, jiffies + bp->current_interval);
1863 } else {
1864 bnx2_resolve_flow_ctrl(bp);
1865 bnx2_set_mac_link(bp);
1866 }
1867
1868 return 0;
1869 }
1870
1871 #define ETHTOOL_ALL_FIBRE_SPEED \
1872 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1873 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1874 (ADVERTISED_1000baseT_Full)
1875
1876 #define ETHTOOL_ALL_COPPER_SPEED \
1877 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1878 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1879 ADVERTISED_1000baseT_Full)
1880
1881 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1882 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1883
1884 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1885
1886 static void
bnx2_set_default_remote_link(struct bnx2 * bp)1887 bnx2_set_default_remote_link(struct bnx2 *bp)
1888 {
1889 u32 link;
1890
1891 if (bp->phy_port == PORT_TP)
1892 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1893 else
1894 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1895
1896 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1897 bp->req_line_speed = 0;
1898 bp->autoneg |= AUTONEG_SPEED;
1899 bp->advertising = ADVERTISED_Autoneg;
1900 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1901 bp->advertising |= ADVERTISED_10baseT_Half;
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1903 bp->advertising |= ADVERTISED_10baseT_Full;
1904 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1905 bp->advertising |= ADVERTISED_100baseT_Half;
1906 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1907 bp->advertising |= ADVERTISED_100baseT_Full;
1908 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1909 bp->advertising |= ADVERTISED_1000baseT_Full;
1910 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1911 bp->advertising |= ADVERTISED_2500baseX_Full;
1912 } else {
1913 bp->autoneg = 0;
1914 bp->advertising = 0;
1915 bp->req_duplex = DUPLEX_FULL;
1916 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1917 bp->req_line_speed = SPEED_10;
1918 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1919 bp->req_duplex = DUPLEX_HALF;
1920 }
1921 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1922 bp->req_line_speed = SPEED_100;
1923 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1924 bp->req_duplex = DUPLEX_HALF;
1925 }
1926 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1927 bp->req_line_speed = SPEED_1000;
1928 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1929 bp->req_line_speed = SPEED_2500;
1930 }
1931 }
1932
1933 static void
bnx2_set_default_link(struct bnx2 * bp)1934 bnx2_set_default_link(struct bnx2 *bp)
1935 {
1936 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1937 bnx2_set_default_remote_link(bp);
1938 return;
1939 }
1940
1941 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1942 bp->req_line_speed = 0;
1943 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1944 u32 reg;
1945
1946 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1947
1948 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1949 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1950 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1951 bp->autoneg = 0;
1952 bp->req_line_speed = bp->line_speed = SPEED_1000;
1953 bp->req_duplex = DUPLEX_FULL;
1954 }
1955 } else
1956 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1957 }
1958
1959 static void
bnx2_send_heart_beat(struct bnx2 * bp)1960 bnx2_send_heart_beat(struct bnx2 *bp)
1961 {
1962 u32 msg;
1963 u32 addr;
1964
1965 spin_lock(&bp->indirect_lock);
1966 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1967 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1968 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1969 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1970 spin_unlock(&bp->indirect_lock);
1971 }
1972
1973 static void
bnx2_remote_phy_event(struct bnx2 * bp)1974 bnx2_remote_phy_event(struct bnx2 *bp)
1975 {
1976 u32 msg;
1977 u8 link_up = bp->link_up;
1978 u8 old_port;
1979
1980 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1981
1982 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1983 bnx2_send_heart_beat(bp);
1984
1985 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1986
1987 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1988 bp->link_up = 0;
1989 else {
1990 u32 speed;
1991
1992 bp->link_up = 1;
1993 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1994 bp->duplex = DUPLEX_FULL;
1995 switch (speed) {
1996 case BNX2_LINK_STATUS_10HALF:
1997 bp->duplex = DUPLEX_HALF;
1998 fallthrough;
1999 case BNX2_LINK_STATUS_10FULL:
2000 bp->line_speed = SPEED_10;
2001 break;
2002 case BNX2_LINK_STATUS_100HALF:
2003 bp->duplex = DUPLEX_HALF;
2004 fallthrough;
2005 case BNX2_LINK_STATUS_100BASE_T4:
2006 case BNX2_LINK_STATUS_100FULL:
2007 bp->line_speed = SPEED_100;
2008 break;
2009 case BNX2_LINK_STATUS_1000HALF:
2010 bp->duplex = DUPLEX_HALF;
2011 fallthrough;
2012 case BNX2_LINK_STATUS_1000FULL:
2013 bp->line_speed = SPEED_1000;
2014 break;
2015 case BNX2_LINK_STATUS_2500HALF:
2016 bp->duplex = DUPLEX_HALF;
2017 fallthrough;
2018 case BNX2_LINK_STATUS_2500FULL:
2019 bp->line_speed = SPEED_2500;
2020 break;
2021 default:
2022 bp->line_speed = 0;
2023 break;
2024 }
2025
2026 bp->flow_ctrl = 0;
2027 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2028 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2029 if (bp->duplex == DUPLEX_FULL)
2030 bp->flow_ctrl = bp->req_flow_ctrl;
2031 } else {
2032 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2033 bp->flow_ctrl |= FLOW_CTRL_TX;
2034 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2035 bp->flow_ctrl |= FLOW_CTRL_RX;
2036 }
2037
2038 old_port = bp->phy_port;
2039 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2040 bp->phy_port = PORT_FIBRE;
2041 else
2042 bp->phy_port = PORT_TP;
2043
2044 if (old_port != bp->phy_port)
2045 bnx2_set_default_link(bp);
2046
2047 }
2048 if (bp->link_up != link_up)
2049 bnx2_report_link(bp);
2050
2051 bnx2_set_mac_link(bp);
2052 }
2053
2054 static int
bnx2_set_remote_link(struct bnx2 * bp)2055 bnx2_set_remote_link(struct bnx2 *bp)
2056 {
2057 u32 evt_code;
2058
2059 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2060 switch (evt_code) {
2061 case BNX2_FW_EVT_CODE_LINK_EVENT:
2062 bnx2_remote_phy_event(bp);
2063 break;
2064 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2065 default:
2066 bnx2_send_heart_beat(bp);
2067 break;
2068 }
2069 return 0;
2070 }
2071
2072 static int
bnx2_setup_copper_phy(struct bnx2 * bp)2073 bnx2_setup_copper_phy(struct bnx2 *bp)
2074 __releases(&bp->phy_lock)
2075 __acquires(&bp->phy_lock)
2076 {
2077 u32 bmcr, adv_reg, new_adv = 0;
2078 u32 new_bmcr;
2079
2080 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2081
2082 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2083 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2084 ADVERTISE_PAUSE_ASYM);
2085
2086 new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2087
2088 if (bp->autoneg & AUTONEG_SPEED) {
2089 u32 adv1000_reg;
2090 u32 new_adv1000 = 0;
2091
2092 new_adv |= bnx2_phy_get_pause_adv(bp);
2093
2094 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2095 adv1000_reg &= PHY_ALL_1000_SPEED;
2096
2097 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2098 if ((adv1000_reg != new_adv1000) ||
2099 (adv_reg != new_adv) ||
2100 ((bmcr & BMCR_ANENABLE) == 0)) {
2101
2102 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2103 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2104 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2105 BMCR_ANENABLE);
2106 }
2107 else if (bp->link_up) {
2108 /* Flow ctrl may have changed from auto to forced */
2109 /* or vice-versa. */
2110
2111 bnx2_resolve_flow_ctrl(bp);
2112 bnx2_set_mac_link(bp);
2113 }
2114 return 0;
2115 }
2116
2117 /* advertise nothing when forcing speed */
2118 if (adv_reg != new_adv)
2119 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2120
2121 new_bmcr = 0;
2122 if (bp->req_line_speed == SPEED_100) {
2123 new_bmcr |= BMCR_SPEED100;
2124 }
2125 if (bp->req_duplex == DUPLEX_FULL) {
2126 new_bmcr |= BMCR_FULLDPLX;
2127 }
2128 if (new_bmcr != bmcr) {
2129 u32 bmsr;
2130
2131 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2132 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2133
2134 if (bmsr & BMSR_LSTATUS) {
2135 /* Force link down */
2136 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2137 spin_unlock_bh(&bp->phy_lock);
2138 msleep(50);
2139 spin_lock_bh(&bp->phy_lock);
2140
2141 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2142 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2143 }
2144
2145 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2146
2147 /* Normally, the new speed is setup after the link has
2148 * gone down and up again. In some cases, link will not go
2149 * down so we need to set up the new speed here.
2150 */
2151 if (bmsr & BMSR_LSTATUS) {
2152 bp->line_speed = bp->req_line_speed;
2153 bp->duplex = bp->req_duplex;
2154 bnx2_resolve_flow_ctrl(bp);
2155 bnx2_set_mac_link(bp);
2156 }
2157 } else {
2158 bnx2_resolve_flow_ctrl(bp);
2159 bnx2_set_mac_link(bp);
2160 }
2161 return 0;
2162 }
2163
2164 static int
bnx2_setup_phy(struct bnx2 * bp,u8 port)2165 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2166 __releases(&bp->phy_lock)
2167 __acquires(&bp->phy_lock)
2168 {
2169 if (bp->loopback == MAC_LOOPBACK)
2170 return 0;
2171
2172 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2173 return bnx2_setup_serdes_phy(bp, port);
2174 }
2175 else {
2176 return bnx2_setup_copper_phy(bp);
2177 }
2178 }
2179
2180 static int
bnx2_init_5709s_phy(struct bnx2 * bp,int reset_phy)2181 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2182 {
2183 u32 val;
2184
2185 bp->mii_bmcr = MII_BMCR + 0x10;
2186 bp->mii_bmsr = MII_BMSR + 0x10;
2187 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2188 bp->mii_adv = MII_ADVERTISE + 0x10;
2189 bp->mii_lpa = MII_LPA + 0x10;
2190 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2191
2192 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2193 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2194
2195 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2196 if (reset_phy)
2197 bnx2_reset_phy(bp);
2198
2199 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2200
2201 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2202 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2203 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2204 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2205
2206 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2207 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2208 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2209 val |= BCM5708S_UP1_2G5;
2210 else
2211 val &= ~BCM5708S_UP1_2G5;
2212 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2213
2214 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2215 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2216 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2217 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2218
2219 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2220
2221 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2222 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2223 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2224
2225 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2226
2227 return 0;
2228 }
2229
2230 static int
bnx2_init_5708s_phy(struct bnx2 * bp,int reset_phy)2231 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2232 {
2233 u32 val;
2234
2235 if (reset_phy)
2236 bnx2_reset_phy(bp);
2237
2238 bp->mii_up1 = BCM5708S_UP1;
2239
2240 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2241 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2242 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243
2244 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2245 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2246 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2247
2248 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2249 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2250 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2251
2252 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2253 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2254 val |= BCM5708S_UP1_2G5;
2255 bnx2_write_phy(bp, BCM5708S_UP1, val);
2256 }
2257
2258 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2259 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2260 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2261 /* increase tx signal amplitude */
2262 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263 BCM5708S_BLK_ADDR_TX_MISC);
2264 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2265 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2266 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2267 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2268 }
2269
2270 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2271 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2272
2273 if (val) {
2274 u32 is_backplane;
2275
2276 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2277 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2278 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2279 BCM5708S_BLK_ADDR_TX_MISC);
2280 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2281 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2282 BCM5708S_BLK_ADDR_DIG);
2283 }
2284 }
2285 return 0;
2286 }
2287
2288 static int
bnx2_init_5706s_phy(struct bnx2 * bp,int reset_phy)2289 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2290 {
2291 if (reset_phy)
2292 bnx2_reset_phy(bp);
2293
2294 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2295
2296 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2297 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2298
2299 if (bp->dev->mtu > ETH_DATA_LEN) {
2300 u32 val;
2301
2302 /* Set extended packet length bit */
2303 bnx2_write_phy(bp, 0x18, 0x7);
2304 bnx2_read_phy(bp, 0x18, &val);
2305 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2306
2307 bnx2_write_phy(bp, 0x1c, 0x6c00);
2308 bnx2_read_phy(bp, 0x1c, &val);
2309 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2310 }
2311 else {
2312 u32 val;
2313
2314 bnx2_write_phy(bp, 0x18, 0x7);
2315 bnx2_read_phy(bp, 0x18, &val);
2316 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2317
2318 bnx2_write_phy(bp, 0x1c, 0x6c00);
2319 bnx2_read_phy(bp, 0x1c, &val);
2320 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2321 }
2322
2323 return 0;
2324 }
2325
2326 static int
bnx2_init_copper_phy(struct bnx2 * bp,int reset_phy)2327 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2328 {
2329 u32 val;
2330
2331 if (reset_phy)
2332 bnx2_reset_phy(bp);
2333
2334 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2335 bnx2_write_phy(bp, 0x18, 0x0c00);
2336 bnx2_write_phy(bp, 0x17, 0x000a);
2337 bnx2_write_phy(bp, 0x15, 0x310b);
2338 bnx2_write_phy(bp, 0x17, 0x201f);
2339 bnx2_write_phy(bp, 0x15, 0x9506);
2340 bnx2_write_phy(bp, 0x17, 0x401f);
2341 bnx2_write_phy(bp, 0x15, 0x14e2);
2342 bnx2_write_phy(bp, 0x18, 0x0400);
2343 }
2344
2345 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2346 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2347 MII_BNX2_DSP_EXPAND_REG | 0x8);
2348 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2349 val &= ~(1 << 8);
2350 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2351 }
2352
2353 if (bp->dev->mtu > ETH_DATA_LEN) {
2354 /* Set extended packet length bit */
2355 bnx2_write_phy(bp, 0x18, 0x7);
2356 bnx2_read_phy(bp, 0x18, &val);
2357 bnx2_write_phy(bp, 0x18, val | 0x4000);
2358
2359 bnx2_read_phy(bp, 0x10, &val);
2360 bnx2_write_phy(bp, 0x10, val | 0x1);
2361 }
2362 else {
2363 bnx2_write_phy(bp, 0x18, 0x7);
2364 bnx2_read_phy(bp, 0x18, &val);
2365 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2366
2367 bnx2_read_phy(bp, 0x10, &val);
2368 bnx2_write_phy(bp, 0x10, val & ~0x1);
2369 }
2370
2371 /* ethernet@wirespeed */
2372 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2373 bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2374 val |= AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2375
2376 /* auto-mdix */
2377 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2378 val |= AUX_CTL_MISC_CTL_AUTOMDIX;
2379
2380 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2381 return 0;
2382 }
2383
2384
2385 static int
bnx2_init_phy(struct bnx2 * bp,int reset_phy)2386 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2387 __releases(&bp->phy_lock)
2388 __acquires(&bp->phy_lock)
2389 {
2390 u32 val;
2391 int rc = 0;
2392
2393 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2394 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2395
2396 bp->mii_bmcr = MII_BMCR;
2397 bp->mii_bmsr = MII_BMSR;
2398 bp->mii_bmsr1 = MII_BMSR;
2399 bp->mii_adv = MII_ADVERTISE;
2400 bp->mii_lpa = MII_LPA;
2401
2402 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2403
2404 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2405 goto setup_phy;
2406
2407 bnx2_read_phy(bp, MII_PHYSID1, &val);
2408 bp->phy_id = val << 16;
2409 bnx2_read_phy(bp, MII_PHYSID2, &val);
2410 bp->phy_id |= val & 0xffff;
2411
2412 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2413 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2414 rc = bnx2_init_5706s_phy(bp, reset_phy);
2415 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2416 rc = bnx2_init_5708s_phy(bp, reset_phy);
2417 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2418 rc = bnx2_init_5709s_phy(bp, reset_phy);
2419 }
2420 else {
2421 rc = bnx2_init_copper_phy(bp, reset_phy);
2422 }
2423
2424 setup_phy:
2425 if (!rc)
2426 rc = bnx2_setup_phy(bp, bp->phy_port);
2427
2428 return rc;
2429 }
2430
2431 static int
bnx2_set_mac_loopback(struct bnx2 * bp)2432 bnx2_set_mac_loopback(struct bnx2 *bp)
2433 {
2434 u32 mac_mode;
2435
2436 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2437 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2438 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2439 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2440 bp->link_up = 1;
2441 return 0;
2442 }
2443
2444 static int bnx2_test_link(struct bnx2 *);
2445
2446 static int
bnx2_set_phy_loopback(struct bnx2 * bp)2447 bnx2_set_phy_loopback(struct bnx2 *bp)
2448 {
2449 u32 mac_mode;
2450 int rc, i;
2451
2452 spin_lock_bh(&bp->phy_lock);
2453 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2454 BMCR_SPEED1000);
2455 spin_unlock_bh(&bp->phy_lock);
2456 if (rc)
2457 return rc;
2458
2459 for (i = 0; i < 10; i++) {
2460 if (bnx2_test_link(bp) == 0)
2461 break;
2462 msleep(100);
2463 }
2464
2465 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2466 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2467 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2468 BNX2_EMAC_MODE_25G_MODE);
2469
2470 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2471 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2472 bp->link_up = 1;
2473 return 0;
2474 }
2475
2476 static void
bnx2_dump_mcp_state(struct bnx2 * bp)2477 bnx2_dump_mcp_state(struct bnx2 *bp)
2478 {
2479 struct net_device *dev = bp->dev;
2480 u32 mcp_p0, mcp_p1;
2481
2482 netdev_err(dev, "<--- start MCP states dump --->\n");
2483 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2484 mcp_p0 = BNX2_MCP_STATE_P0;
2485 mcp_p1 = BNX2_MCP_STATE_P1;
2486 } else {
2487 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2488 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2489 }
2490 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2491 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2492 netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2493 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2494 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2495 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2496 netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2497 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2498 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2499 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2500 netdev_err(dev, "DEBUG: shmem states:\n");
2501 netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2502 bnx2_shmem_rd(bp, BNX2_DRV_MB),
2503 bnx2_shmem_rd(bp, BNX2_FW_MB),
2504 bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2505 pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2506 netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2507 bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2508 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2509 pr_cont(" condition[%08x]\n",
2510 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2511 DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2512 DP_SHMEM_LINE(bp, 0x3cc);
2513 DP_SHMEM_LINE(bp, 0x3dc);
2514 DP_SHMEM_LINE(bp, 0x3ec);
2515 netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2516 netdev_err(dev, "<--- end MCP states dump --->\n");
2517 }
2518
2519 static int
bnx2_fw_sync(struct bnx2 * bp,u32 msg_data,int ack,int silent)2520 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2521 {
2522 int i;
2523 u32 val;
2524
2525 bp->fw_wr_seq++;
2526 msg_data |= bp->fw_wr_seq;
2527 bp->fw_last_msg = msg_data;
2528
2529 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2530
2531 if (!ack)
2532 return 0;
2533
2534 /* wait for an acknowledgement. */
2535 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2536 msleep(10);
2537
2538 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2539
2540 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2541 break;
2542 }
2543 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2544 return 0;
2545
2546 /* If we timed out, inform the firmware that this is the case. */
2547 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2548 msg_data &= ~BNX2_DRV_MSG_CODE;
2549 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2550
2551 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2552 if (!silent) {
2553 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2554 bnx2_dump_mcp_state(bp);
2555 }
2556
2557 return -EBUSY;
2558 }
2559
2560 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2561 return -EIO;
2562
2563 return 0;
2564 }
2565
2566 static int
bnx2_init_5709_context(struct bnx2 * bp)2567 bnx2_init_5709_context(struct bnx2 *bp)
2568 {
2569 int i, ret = 0;
2570 u32 val;
2571
2572 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2573 val |= (BNX2_PAGE_BITS - 8) << 16;
2574 BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2575 for (i = 0; i < 10; i++) {
2576 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2577 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2578 break;
2579 udelay(2);
2580 }
2581 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2582 return -EBUSY;
2583
2584 for (i = 0; i < bp->ctx_pages; i++) {
2585 int j;
2586
2587 if (bp->ctx_blk[i])
2588 memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2589 else
2590 return -ENOMEM;
2591
2592 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2593 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2594 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2595 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2596 (u64) bp->ctx_blk_mapping[i] >> 32);
2597 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2598 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2599 for (j = 0; j < 10; j++) {
2600
2601 val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2602 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2603 break;
2604 udelay(5);
2605 }
2606 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2607 ret = -EBUSY;
2608 break;
2609 }
2610 }
2611 return ret;
2612 }
2613
2614 static void
bnx2_init_context(struct bnx2 * bp)2615 bnx2_init_context(struct bnx2 *bp)
2616 {
2617 u32 vcid;
2618
2619 vcid = 96;
2620 while (vcid) {
2621 u32 vcid_addr, pcid_addr, offset;
2622 int i;
2623
2624 vcid--;
2625
2626 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2627 u32 new_vcid;
2628
2629 vcid_addr = GET_PCID_ADDR(vcid);
2630 if (vcid & 0x8) {
2631 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2632 }
2633 else {
2634 new_vcid = vcid;
2635 }
2636 pcid_addr = GET_PCID_ADDR(new_vcid);
2637 }
2638 else {
2639 vcid_addr = GET_CID_ADDR(vcid);
2640 pcid_addr = vcid_addr;
2641 }
2642
2643 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2644 vcid_addr += (i << PHY_CTX_SHIFT);
2645 pcid_addr += (i << PHY_CTX_SHIFT);
2646
2647 BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2648 BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2649
2650 /* Zero out the context. */
2651 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2652 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2653 }
2654 }
2655 }
2656
2657 static int
bnx2_alloc_bad_rbuf(struct bnx2 * bp)2658 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2659 {
2660 u16 *good_mbuf;
2661 u32 good_mbuf_cnt;
2662 u32 val;
2663
2664 good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
2665 if (!good_mbuf)
2666 return -ENOMEM;
2667
2668 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2669 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2670
2671 good_mbuf_cnt = 0;
2672
2673 /* Allocate a bunch of mbufs and save the good ones in an array. */
2674 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2675 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2676 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2677 BNX2_RBUF_COMMAND_ALLOC_REQ);
2678
2679 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2680
2681 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2682
2683 /* The addresses with Bit 9 set are bad memory blocks. */
2684 if (!(val & (1 << 9))) {
2685 good_mbuf[good_mbuf_cnt] = (u16) val;
2686 good_mbuf_cnt++;
2687 }
2688
2689 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2690 }
2691
2692 /* Free the good ones back to the mbuf pool thus discarding
2693 * all the bad ones. */
2694 while (good_mbuf_cnt) {
2695 good_mbuf_cnt--;
2696
2697 val = good_mbuf[good_mbuf_cnt];
2698 val = (val << 9) | val | 1;
2699
2700 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2701 }
2702 kfree(good_mbuf);
2703 return 0;
2704 }
2705
2706 static void
bnx2_set_mac_addr(struct bnx2 * bp,const u8 * mac_addr,u32 pos)2707 bnx2_set_mac_addr(struct bnx2 *bp, const u8 *mac_addr, u32 pos)
2708 {
2709 u32 val;
2710
2711 val = (mac_addr[0] << 8) | mac_addr[1];
2712
2713 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2714
2715 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2716 (mac_addr[4] << 8) | mac_addr[5];
2717
2718 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2719 }
2720
2721 static inline int
bnx2_alloc_rx_page(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u16 index,gfp_t gfp)2722 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2723 {
2724 dma_addr_t mapping;
2725 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2726 struct bnx2_rx_bd *rxbd =
2727 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2728 struct page *page = alloc_page(gfp);
2729
2730 if (!page)
2731 return -ENOMEM;
2732 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2733 DMA_FROM_DEVICE);
2734 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2735 __free_page(page);
2736 return -EIO;
2737 }
2738
2739 rx_pg->page = page;
2740 dma_unmap_addr_set(rx_pg, mapping, mapping);
2741 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2742 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2743 return 0;
2744 }
2745
2746 static void
bnx2_free_rx_page(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u16 index)2747 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2748 {
2749 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2750 struct page *page = rx_pg->page;
2751
2752 if (!page)
2753 return;
2754
2755 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2756 PAGE_SIZE, DMA_FROM_DEVICE);
2757
2758 __free_page(page);
2759 rx_pg->page = NULL;
2760 }
2761
2762 static inline int
bnx2_alloc_rx_data(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u16 index,gfp_t gfp)2763 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2764 {
2765 u8 *data;
2766 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2767 dma_addr_t mapping;
2768 struct bnx2_rx_bd *rxbd =
2769 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2770
2771 data = kmalloc(bp->rx_buf_size, gfp);
2772 if (!data)
2773 return -ENOMEM;
2774
2775 mapping = dma_map_single(&bp->pdev->dev,
2776 get_l2_fhdr(data),
2777 bp->rx_buf_use_size,
2778 DMA_FROM_DEVICE);
2779 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2780 kfree(data);
2781 return -EIO;
2782 }
2783
2784 rx_buf->data = data;
2785 dma_unmap_addr_set(rx_buf, mapping, mapping);
2786
2787 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2788 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2789
2790 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2791
2792 return 0;
2793 }
2794
2795 static int
bnx2_phy_event_is_set(struct bnx2 * bp,struct bnx2_napi * bnapi,u32 event)2796 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2797 {
2798 struct status_block *sblk = bnapi->status_blk.msi;
2799 u32 new_link_state, old_link_state;
2800 int is_set = 1;
2801
2802 new_link_state = sblk->status_attn_bits & event;
2803 old_link_state = sblk->status_attn_bits_ack & event;
2804 if (new_link_state != old_link_state) {
2805 if (new_link_state)
2806 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2807 else
2808 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2809 } else
2810 is_set = 0;
2811
2812 return is_set;
2813 }
2814
2815 static void
bnx2_phy_int(struct bnx2 * bp,struct bnx2_napi * bnapi)2816 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2817 {
2818 spin_lock(&bp->phy_lock);
2819
2820 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2821 bnx2_set_link(bp);
2822 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2823 bnx2_set_remote_link(bp);
2824
2825 spin_unlock(&bp->phy_lock);
2826
2827 }
2828
2829 static inline u16
bnx2_get_hw_tx_cons(struct bnx2_napi * bnapi)2830 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2831 {
2832 u16 cons;
2833
2834 cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2835
2836 if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2837 cons++;
2838 return cons;
2839 }
2840
2841 static int
bnx2_tx_int(struct bnx2 * bp,struct bnx2_napi * bnapi,int budget)2842 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2843 {
2844 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2845 u16 hw_cons, sw_cons, sw_ring_cons;
2846 int tx_pkt = 0, index;
2847 unsigned int tx_bytes = 0;
2848 struct netdev_queue *txq;
2849
2850 index = (bnapi - bp->bnx2_napi);
2851 txq = netdev_get_tx_queue(bp->dev, index);
2852
2853 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2854 sw_cons = txr->tx_cons;
2855
2856 while (sw_cons != hw_cons) {
2857 struct bnx2_sw_tx_bd *tx_buf;
2858 struct sk_buff *skb;
2859 int i, last;
2860
2861 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2862
2863 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2864 skb = tx_buf->skb;
2865
2866 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2867 prefetch(&skb->end);
2868
2869 /* partial BD completions possible with TSO packets */
2870 if (tx_buf->is_gso) {
2871 u16 last_idx, last_ring_idx;
2872
2873 last_idx = sw_cons + tx_buf->nr_frags + 1;
2874 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2875 if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2876 last_idx++;
2877 }
2878 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2879 break;
2880 }
2881 }
2882
2883 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2884 skb_headlen(skb), DMA_TO_DEVICE);
2885
2886 tx_buf->skb = NULL;
2887 last = tx_buf->nr_frags;
2888
2889 for (i = 0; i < last; i++) {
2890 struct bnx2_sw_tx_bd *tx_buf;
2891
2892 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2893
2894 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2895 dma_unmap_page(&bp->pdev->dev,
2896 dma_unmap_addr(tx_buf, mapping),
2897 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2898 DMA_TO_DEVICE);
2899 }
2900
2901 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2902
2903 tx_bytes += skb->len;
2904 dev_kfree_skb_any(skb);
2905 tx_pkt++;
2906 if (tx_pkt == budget)
2907 break;
2908
2909 if (hw_cons == sw_cons)
2910 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2911 }
2912
2913 netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2914 txr->hw_tx_cons = hw_cons;
2915 txr->tx_cons = sw_cons;
2916
2917 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2918 * before checking for netif_tx_queue_stopped(). Without the
2919 * memory barrier, there is a small possibility that bnx2_start_xmit()
2920 * will miss it and cause the queue to be stopped forever.
2921 */
2922 smp_mb();
2923
2924 if (unlikely(netif_tx_queue_stopped(txq)) &&
2925 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2926 __netif_tx_lock(txq, smp_processor_id());
2927 if ((netif_tx_queue_stopped(txq)) &&
2928 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2929 netif_tx_wake_queue(txq);
2930 __netif_tx_unlock(txq);
2931 }
2932
2933 return tx_pkt;
2934 }
2935
2936 static void
bnx2_reuse_rx_skb_pages(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,struct sk_buff * skb,int count)2937 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2938 struct sk_buff *skb, int count)
2939 {
2940 struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2941 struct bnx2_rx_bd *cons_bd, *prod_bd;
2942 int i;
2943 u16 hw_prod, prod;
2944 u16 cons = rxr->rx_pg_cons;
2945
2946 cons_rx_pg = &rxr->rx_pg_ring[cons];
2947
2948 /* The caller was unable to allocate a new page to replace the
2949 * last one in the frags array, so we need to recycle that page
2950 * and then free the skb.
2951 */
2952 if (skb) {
2953 struct page *page;
2954 struct skb_shared_info *shinfo;
2955
2956 shinfo = skb_shinfo(skb);
2957 shinfo->nr_frags--;
2958 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2959
2960 cons_rx_pg->page = page;
2961 dev_kfree_skb(skb);
2962 }
2963
2964 hw_prod = rxr->rx_pg_prod;
2965
2966 for (i = 0; i < count; i++) {
2967 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2968
2969 prod_rx_pg = &rxr->rx_pg_ring[prod];
2970 cons_rx_pg = &rxr->rx_pg_ring[cons];
2971 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2972 [BNX2_RX_IDX(cons)];
2973 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2974 [BNX2_RX_IDX(prod)];
2975
2976 if (prod != cons) {
2977 prod_rx_pg->page = cons_rx_pg->page;
2978 cons_rx_pg->page = NULL;
2979 dma_unmap_addr_set(prod_rx_pg, mapping,
2980 dma_unmap_addr(cons_rx_pg, mapping));
2981
2982 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2983 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2984
2985 }
2986 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2987 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2988 }
2989 rxr->rx_pg_prod = hw_prod;
2990 rxr->rx_pg_cons = cons;
2991 }
2992
2993 static inline void
bnx2_reuse_rx_data(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u8 * data,u16 cons,u16 prod)2994 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2995 u8 *data, u16 cons, u16 prod)
2996 {
2997 struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2998 struct bnx2_rx_bd *cons_bd, *prod_bd;
2999
3000 cons_rx_buf = &rxr->rx_buf_ring[cons];
3001 prod_rx_buf = &rxr->rx_buf_ring[prod];
3002
3003 dma_sync_single_for_device(&bp->pdev->dev,
3004 dma_unmap_addr(cons_rx_buf, mapping),
3005 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, DMA_FROM_DEVICE);
3006
3007 rxr->rx_prod_bseq += bp->rx_buf_use_size;
3008
3009 prod_rx_buf->data = data;
3010
3011 if (cons == prod)
3012 return;
3013
3014 dma_unmap_addr_set(prod_rx_buf, mapping,
3015 dma_unmap_addr(cons_rx_buf, mapping));
3016
3017 cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3018 prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3019 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3020 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3021 }
3022
3023 static struct sk_buff *
bnx2_rx_skb(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u8 * data,unsigned int len,unsigned int hdr_len,dma_addr_t dma_addr,u32 ring_idx)3024 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3025 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3026 u32 ring_idx)
3027 {
3028 int err;
3029 u16 prod = ring_idx & 0xffff;
3030 struct sk_buff *skb;
3031
3032 err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3033 if (unlikely(err)) {
3034 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3035 error:
3036 if (hdr_len) {
3037 unsigned int raw_len = len + 4;
3038 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3039
3040 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3041 }
3042 return NULL;
3043 }
3044
3045 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3046 DMA_FROM_DEVICE);
3047 skb = slab_build_skb(data);
3048 if (!skb) {
3049 kfree(data);
3050 goto error;
3051 }
3052 skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3053 if (hdr_len == 0) {
3054 skb_put(skb, len);
3055 return skb;
3056 } else {
3057 unsigned int i, frag_len, frag_size, pages;
3058 struct bnx2_sw_pg *rx_pg;
3059 u16 pg_cons = rxr->rx_pg_cons;
3060 u16 pg_prod = rxr->rx_pg_prod;
3061
3062 frag_size = len + 4 - hdr_len;
3063 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3064 skb_put(skb, hdr_len);
3065
3066 for (i = 0; i < pages; i++) {
3067 dma_addr_t mapping_old;
3068
3069 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3070 if (unlikely(frag_len <= 4)) {
3071 unsigned int tail = 4 - frag_len;
3072
3073 rxr->rx_pg_cons = pg_cons;
3074 rxr->rx_pg_prod = pg_prod;
3075 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3076 pages - i);
3077 skb->len -= tail;
3078 if (i == 0) {
3079 skb->tail -= tail;
3080 } else {
3081 skb_frag_t *frag =
3082 &skb_shinfo(skb)->frags[i - 1];
3083 skb_frag_size_sub(frag, tail);
3084 skb->data_len -= tail;
3085 }
3086 return skb;
3087 }
3088 rx_pg = &rxr->rx_pg_ring[pg_cons];
3089
3090 /* Don't unmap yet. If we're unable to allocate a new
3091 * page, we need to recycle the page and the DMA addr.
3092 */
3093 mapping_old = dma_unmap_addr(rx_pg, mapping);
3094 if (i == pages - 1)
3095 frag_len -= 4;
3096
3097 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3098 rx_pg->page = NULL;
3099
3100 err = bnx2_alloc_rx_page(bp, rxr,
3101 BNX2_RX_PG_RING_IDX(pg_prod),
3102 GFP_ATOMIC);
3103 if (unlikely(err)) {
3104 rxr->rx_pg_cons = pg_cons;
3105 rxr->rx_pg_prod = pg_prod;
3106 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3107 pages - i);
3108 return NULL;
3109 }
3110
3111 dma_unmap_page(&bp->pdev->dev, mapping_old,
3112 PAGE_SIZE, DMA_FROM_DEVICE);
3113
3114 frag_size -= frag_len;
3115 skb->data_len += frag_len;
3116 skb->truesize += PAGE_SIZE;
3117 skb->len += frag_len;
3118
3119 pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3120 pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3121 }
3122 rxr->rx_pg_prod = pg_prod;
3123 rxr->rx_pg_cons = pg_cons;
3124 }
3125 return skb;
3126 }
3127
3128 static inline u16
bnx2_get_hw_rx_cons(struct bnx2_napi * bnapi)3129 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3130 {
3131 u16 cons;
3132
3133 cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3134
3135 if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3136 cons++;
3137 return cons;
3138 }
3139
3140 static int
bnx2_rx_int(struct bnx2 * bp,struct bnx2_napi * bnapi,int budget)3141 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3142 {
3143 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3144 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3145 struct l2_fhdr *rx_hdr;
3146 int rx_pkt = 0, pg_ring_used = 0;
3147
3148 if (budget <= 0)
3149 return rx_pkt;
3150
3151 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3152 sw_cons = rxr->rx_cons;
3153 sw_prod = rxr->rx_prod;
3154
3155 /* Memory barrier necessary as speculative reads of the rx
3156 * buffer can be ahead of the index in the status block
3157 */
3158 rmb();
3159 while (sw_cons != hw_cons) {
3160 unsigned int len, hdr_len;
3161 u32 status;
3162 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3163 struct sk_buff *skb;
3164 dma_addr_t dma_addr;
3165 u8 *data;
3166 u16 next_ring_idx;
3167
3168 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3169 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3170
3171 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3172 data = rx_buf->data;
3173 rx_buf->data = NULL;
3174
3175 rx_hdr = get_l2_fhdr(data);
3176 prefetch(rx_hdr);
3177
3178 dma_addr = dma_unmap_addr(rx_buf, mapping);
3179
3180 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3181 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3182 DMA_FROM_DEVICE);
3183
3184 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3185 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3186 prefetch(get_l2_fhdr(next_rx_buf->data));
3187
3188 len = rx_hdr->l2_fhdr_pkt_len;
3189 status = rx_hdr->l2_fhdr_status;
3190
3191 hdr_len = 0;
3192 if (status & L2_FHDR_STATUS_SPLIT) {
3193 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3194 pg_ring_used = 1;
3195 } else if (len > bp->rx_jumbo_thresh) {
3196 hdr_len = bp->rx_jumbo_thresh;
3197 pg_ring_used = 1;
3198 }
3199
3200 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3201 L2_FHDR_ERRORS_PHY_DECODE |
3202 L2_FHDR_ERRORS_ALIGNMENT |
3203 L2_FHDR_ERRORS_TOO_SHORT |
3204 L2_FHDR_ERRORS_GIANT_FRAME))) {
3205
3206 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3207 sw_ring_prod);
3208 if (pg_ring_used) {
3209 int pages;
3210
3211 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3212
3213 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3214 }
3215 goto next_rx;
3216 }
3217
3218 len -= 4;
3219
3220 if (len <= bp->rx_copy_thresh) {
3221 skb = netdev_alloc_skb(bp->dev, len + 6);
3222 if (!skb) {
3223 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3224 sw_ring_prod);
3225 goto next_rx;
3226 }
3227
3228 /* aligned copy */
3229 memcpy(skb->data,
3230 (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3231 len + 6);
3232 skb_reserve(skb, 6);
3233 skb_put(skb, len);
3234
3235 bnx2_reuse_rx_data(bp, rxr, data,
3236 sw_ring_cons, sw_ring_prod);
3237
3238 } else {
3239 skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3240 (sw_ring_cons << 16) | sw_ring_prod);
3241 if (!skb)
3242 goto next_rx;
3243 }
3244 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3245 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3246 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3247
3248 skb->protocol = eth_type_trans(skb, bp->dev);
3249
3250 if (len > (bp->dev->mtu + ETH_HLEN) &&
3251 skb->protocol != htons(0x8100) &&
3252 skb->protocol != htons(ETH_P_8021AD)) {
3253
3254 dev_kfree_skb(skb);
3255 goto next_rx;
3256
3257 }
3258
3259 skb_checksum_none_assert(skb);
3260 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3261 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3262 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3263
3264 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3265 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3266 skb->ip_summed = CHECKSUM_UNNECESSARY;
3267 }
3268 if ((bp->dev->features & NETIF_F_RXHASH) &&
3269 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3270 L2_FHDR_STATUS_USE_RXHASH))
3271 skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3272 PKT_HASH_TYPE_L3);
3273
3274 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3275 napi_gro_receive(&bnapi->napi, skb);
3276 rx_pkt++;
3277
3278 next_rx:
3279 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3280 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3281
3282 if (rx_pkt == budget)
3283 break;
3284
3285 /* Refresh hw_cons to see if there is new work */
3286 if (sw_cons == hw_cons) {
3287 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3288 rmb();
3289 }
3290 }
3291 rxr->rx_cons = sw_cons;
3292 rxr->rx_prod = sw_prod;
3293
3294 if (pg_ring_used)
3295 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3296
3297 BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3298
3299 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3300
3301 return rx_pkt;
3302
3303 }
3304
3305 /* MSI ISR - The only difference between this and the INTx ISR
3306 * is that the MSI interrupt is always serviced.
3307 */
3308 static irqreturn_t
bnx2_msi(int irq,void * dev_instance)3309 bnx2_msi(int irq, void *dev_instance)
3310 {
3311 struct bnx2_napi *bnapi = dev_instance;
3312 struct bnx2 *bp = bnapi->bp;
3313
3314 prefetch(bnapi->status_blk.msi);
3315 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3316 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3317 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3318
3319 /* Return here if interrupt is disabled. */
3320 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3321 return IRQ_HANDLED;
3322
3323 napi_schedule(&bnapi->napi);
3324
3325 return IRQ_HANDLED;
3326 }
3327
3328 static irqreturn_t
bnx2_msi_1shot(int irq,void * dev_instance)3329 bnx2_msi_1shot(int irq, void *dev_instance)
3330 {
3331 struct bnx2_napi *bnapi = dev_instance;
3332 struct bnx2 *bp = bnapi->bp;
3333
3334 prefetch(bnapi->status_blk.msi);
3335
3336 /* Return here if interrupt is disabled. */
3337 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3338 return IRQ_HANDLED;
3339
3340 napi_schedule(&bnapi->napi);
3341
3342 return IRQ_HANDLED;
3343 }
3344
3345 static irqreturn_t
bnx2_interrupt(int irq,void * dev_instance)3346 bnx2_interrupt(int irq, void *dev_instance)
3347 {
3348 struct bnx2_napi *bnapi = dev_instance;
3349 struct bnx2 *bp = bnapi->bp;
3350 struct status_block *sblk = bnapi->status_blk.msi;
3351
3352 /* When using INTx, it is possible for the interrupt to arrive
3353 * at the CPU before the status block posted prior to the
3354 * interrupt. Reading a register will flush the status block.
3355 * When using MSI, the MSI message will always complete after
3356 * the status block write.
3357 */
3358 if ((sblk->status_idx == bnapi->last_status_idx) &&
3359 (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3360 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3361 return IRQ_NONE;
3362
3363 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3364 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3365 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3366
3367 /* Read back to deassert IRQ immediately to avoid too many
3368 * spurious interrupts.
3369 */
3370 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3371
3372 /* Return here if interrupt is shared and is disabled. */
3373 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3374 return IRQ_HANDLED;
3375
3376 if (napi_schedule_prep(&bnapi->napi)) {
3377 bnapi->last_status_idx = sblk->status_idx;
3378 __napi_schedule(&bnapi->napi);
3379 }
3380
3381 return IRQ_HANDLED;
3382 }
3383
3384 static inline int
bnx2_has_fast_work(struct bnx2_napi * bnapi)3385 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3386 {
3387 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3388 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3389
3390 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3391 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3392 return 1;
3393 return 0;
3394 }
3395
3396 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3397 STATUS_ATTN_BITS_TIMER_ABORT)
3398
3399 static inline int
bnx2_has_work(struct bnx2_napi * bnapi)3400 bnx2_has_work(struct bnx2_napi *bnapi)
3401 {
3402 struct status_block *sblk = bnapi->status_blk.msi;
3403
3404 if (bnx2_has_fast_work(bnapi))
3405 return 1;
3406
3407 #ifdef BCM_CNIC
3408 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3409 return 1;
3410 #endif
3411
3412 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3413 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3414 return 1;
3415
3416 return 0;
3417 }
3418
3419 static void
bnx2_chk_missed_msi(struct bnx2 * bp)3420 bnx2_chk_missed_msi(struct bnx2 *bp)
3421 {
3422 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3423 u32 msi_ctrl;
3424
3425 if (bnx2_has_work(bnapi)) {
3426 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3427 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3428 return;
3429
3430 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3431 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3432 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3433 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3434 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3435 }
3436 }
3437
3438 bp->idle_chk_status_idx = bnapi->last_status_idx;
3439 }
3440
3441 #ifdef BCM_CNIC
bnx2_poll_cnic(struct bnx2 * bp,struct bnx2_napi * bnapi)3442 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3443 {
3444 struct cnic_ops *c_ops;
3445
3446 if (!bnapi->cnic_present)
3447 return;
3448
3449 rcu_read_lock();
3450 c_ops = rcu_dereference(bp->cnic_ops);
3451 if (c_ops)
3452 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3453 bnapi->status_blk.msi);
3454 rcu_read_unlock();
3455 }
3456 #endif
3457
bnx2_poll_link(struct bnx2 * bp,struct bnx2_napi * bnapi)3458 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3459 {
3460 struct status_block *sblk = bnapi->status_blk.msi;
3461 u32 status_attn_bits = sblk->status_attn_bits;
3462 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3463
3464 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3465 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3466
3467 bnx2_phy_int(bp, bnapi);
3468
3469 /* This is needed to take care of transient status
3470 * during link changes.
3471 */
3472 BNX2_WR(bp, BNX2_HC_COMMAND,
3473 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3474 BNX2_RD(bp, BNX2_HC_COMMAND);
3475 }
3476 }
3477
bnx2_poll_work(struct bnx2 * bp,struct bnx2_napi * bnapi,int work_done,int budget)3478 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3479 int work_done, int budget)
3480 {
3481 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3482 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3483
3484 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3485 bnx2_tx_int(bp, bnapi, 0);
3486
3487 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3488 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3489
3490 return work_done;
3491 }
3492
bnx2_poll_msix(struct napi_struct * napi,int budget)3493 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3494 {
3495 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3496 struct bnx2 *bp = bnapi->bp;
3497 int work_done = 0;
3498 struct status_block_msix *sblk = bnapi->status_blk.msix;
3499
3500 while (1) {
3501 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3502 if (unlikely(work_done >= budget))
3503 break;
3504
3505 bnapi->last_status_idx = sblk->status_idx;
3506 /* status idx must be read before checking for more work. */
3507 rmb();
3508 if (likely(!bnx2_has_fast_work(bnapi))) {
3509
3510 napi_complete_done(napi, work_done);
3511 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3512 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3513 bnapi->last_status_idx);
3514 break;
3515 }
3516 }
3517 return work_done;
3518 }
3519
bnx2_poll(struct napi_struct * napi,int budget)3520 static int bnx2_poll(struct napi_struct *napi, int budget)
3521 {
3522 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3523 struct bnx2 *bp = bnapi->bp;
3524 int work_done = 0;
3525 struct status_block *sblk = bnapi->status_blk.msi;
3526
3527 while (1) {
3528 bnx2_poll_link(bp, bnapi);
3529
3530 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3531
3532 #ifdef BCM_CNIC
3533 bnx2_poll_cnic(bp, bnapi);
3534 #endif
3535
3536 /* bnapi->last_status_idx is used below to tell the hw how
3537 * much work has been processed, so we must read it before
3538 * checking for more work.
3539 */
3540 bnapi->last_status_idx = sblk->status_idx;
3541
3542 if (unlikely(work_done >= budget))
3543 break;
3544
3545 rmb();
3546 if (likely(!bnx2_has_work(bnapi))) {
3547 napi_complete_done(napi, work_done);
3548 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3549 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3550 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3551 bnapi->last_status_idx);
3552 break;
3553 }
3554 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3555 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3556 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3557 bnapi->last_status_idx);
3558
3559 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3560 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3561 bnapi->last_status_idx);
3562 break;
3563 }
3564 }
3565
3566 return work_done;
3567 }
3568
3569 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3570 * from set_multicast.
3571 */
3572 static void
bnx2_set_rx_mode(struct net_device * dev)3573 bnx2_set_rx_mode(struct net_device *dev)
3574 {
3575 struct bnx2 *bp = netdev_priv(dev);
3576 u32 rx_mode, sort_mode;
3577 struct netdev_hw_addr *ha;
3578 int i;
3579
3580 if (!netif_running(dev))
3581 return;
3582
3583 spin_lock_bh(&bp->phy_lock);
3584
3585 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3586 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3587 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3588 if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3589 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3590 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3591 if (dev->flags & IFF_PROMISC) {
3592 /* Promiscuous mode. */
3593 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3594 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3595 BNX2_RPM_SORT_USER0_PROM_VLAN;
3596 }
3597 else if (dev->flags & IFF_ALLMULTI) {
3598 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3599 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3600 0xffffffff);
3601 }
3602 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3603 }
3604 else {
3605 /* Accept one or more multicast(s). */
3606 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3607 u32 regidx;
3608 u32 bit;
3609 u32 crc;
3610
3611 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3612
3613 netdev_for_each_mc_addr(ha, dev) {
3614 crc = ether_crc_le(ETH_ALEN, ha->addr);
3615 bit = crc & 0xff;
3616 regidx = (bit & 0xe0) >> 5;
3617 bit &= 0x1f;
3618 mc_filter[regidx] |= (1 << bit);
3619 }
3620
3621 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3622 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3623 mc_filter[i]);
3624 }
3625
3626 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3627 }
3628
3629 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3630 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3631 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3632 BNX2_RPM_SORT_USER0_PROM_VLAN;
3633 } else if (!(dev->flags & IFF_PROMISC)) {
3634 /* Add all entries into to the match filter list */
3635 i = 0;
3636 netdev_for_each_uc_addr(ha, dev) {
3637 bnx2_set_mac_addr(bp, ha->addr,
3638 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3639 sort_mode |= (1 <<
3640 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3641 i++;
3642 }
3643
3644 }
3645
3646 if (rx_mode != bp->rx_mode) {
3647 bp->rx_mode = rx_mode;
3648 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3649 }
3650
3651 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3652 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3653 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3654
3655 spin_unlock_bh(&bp->phy_lock);
3656 }
3657
3658 static int
check_fw_section(const struct firmware * fw,const struct bnx2_fw_file_section * section,u32 alignment,bool non_empty)3659 check_fw_section(const struct firmware *fw,
3660 const struct bnx2_fw_file_section *section,
3661 u32 alignment, bool non_empty)
3662 {
3663 u32 offset = be32_to_cpu(section->offset);
3664 u32 len = be32_to_cpu(section->len);
3665
3666 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3667 return -EINVAL;
3668 if ((non_empty && len == 0) || len > fw->size - offset ||
3669 len & (alignment - 1))
3670 return -EINVAL;
3671 return 0;
3672 }
3673
3674 static int
check_mips_fw_entry(const struct firmware * fw,const struct bnx2_mips_fw_file_entry * entry)3675 check_mips_fw_entry(const struct firmware *fw,
3676 const struct bnx2_mips_fw_file_entry *entry)
3677 {
3678 if (check_fw_section(fw, &entry->text, 4, true) ||
3679 check_fw_section(fw, &entry->data, 4, false) ||
3680 check_fw_section(fw, &entry->rodata, 4, false))
3681 return -EINVAL;
3682 return 0;
3683 }
3684
bnx2_release_firmware(struct bnx2 * bp)3685 static void bnx2_release_firmware(struct bnx2 *bp)
3686 {
3687 if (bp->rv2p_firmware) {
3688 release_firmware(bp->mips_firmware);
3689 release_firmware(bp->rv2p_firmware);
3690 bp->rv2p_firmware = NULL;
3691 }
3692 }
3693
bnx2_request_uncached_firmware(struct bnx2 * bp)3694 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3695 {
3696 const char *mips_fw_file, *rv2p_fw_file;
3697 const struct bnx2_mips_fw_file *mips_fw;
3698 const struct bnx2_rv2p_fw_file *rv2p_fw;
3699 int rc;
3700
3701 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3702 mips_fw_file = FW_MIPS_FILE_09;
3703 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3704 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3705 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3706 else
3707 rv2p_fw_file = FW_RV2P_FILE_09;
3708 } else {
3709 mips_fw_file = FW_MIPS_FILE_06;
3710 rv2p_fw_file = FW_RV2P_FILE_06;
3711 }
3712
3713 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3714 if (rc) {
3715 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3716 goto out;
3717 }
3718
3719 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3720 if (rc) {
3721 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3722 goto err_release_mips_firmware;
3723 }
3724 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3725 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3726 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3727 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3728 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3729 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3730 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3731 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3732 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3733 rc = -EINVAL;
3734 goto err_release_firmware;
3735 }
3736 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3737 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3738 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3739 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3740 rc = -EINVAL;
3741 goto err_release_firmware;
3742 }
3743 out:
3744 return rc;
3745
3746 err_release_firmware:
3747 release_firmware(bp->rv2p_firmware);
3748 bp->rv2p_firmware = NULL;
3749 err_release_mips_firmware:
3750 release_firmware(bp->mips_firmware);
3751 goto out;
3752 }
3753
bnx2_request_firmware(struct bnx2 * bp)3754 static int bnx2_request_firmware(struct bnx2 *bp)
3755 {
3756 return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3757 }
3758
3759 static u32
rv2p_fw_fixup(u32 rv2p_proc,int idx,u32 loc,u32 rv2p_code)3760 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3761 {
3762 switch (idx) {
3763 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3764 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3765 rv2p_code |= RV2P_BD_PAGE_SIZE;
3766 break;
3767 }
3768 return rv2p_code;
3769 }
3770
3771 static int
load_rv2p_fw(struct bnx2 * bp,u32 rv2p_proc,const struct bnx2_rv2p_fw_file_entry * fw_entry)3772 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3773 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3774 {
3775 u32 rv2p_code_len, file_offset;
3776 __be32 *rv2p_code;
3777 int i;
3778 u32 val, cmd, addr;
3779
3780 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3781 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3782
3783 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3784
3785 if (rv2p_proc == RV2P_PROC1) {
3786 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3787 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3788 } else {
3789 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3790 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3791 }
3792
3793 for (i = 0; i < rv2p_code_len; i += 8) {
3794 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3795 rv2p_code++;
3796 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3797 rv2p_code++;
3798
3799 val = (i / 8) | cmd;
3800 BNX2_WR(bp, addr, val);
3801 }
3802
3803 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3804 for (i = 0; i < 8; i++) {
3805 u32 loc, code;
3806
3807 loc = be32_to_cpu(fw_entry->fixup[i]);
3808 if (loc && ((loc * 4) < rv2p_code_len)) {
3809 code = be32_to_cpu(*(rv2p_code + loc - 1));
3810 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3811 code = be32_to_cpu(*(rv2p_code + loc));
3812 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3813 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3814
3815 val = (loc / 2) | cmd;
3816 BNX2_WR(bp, addr, val);
3817 }
3818 }
3819
3820 /* Reset the processor, un-stall is done later. */
3821 if (rv2p_proc == RV2P_PROC1) {
3822 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3823 }
3824 else {
3825 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3826 }
3827
3828 return 0;
3829 }
3830
3831 static void
load_cpu_fw(struct bnx2 * bp,const struct cpu_reg * cpu_reg,const struct bnx2_mips_fw_file_entry * fw_entry)3832 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3833 const struct bnx2_mips_fw_file_entry *fw_entry)
3834 {
3835 u32 addr, len, file_offset;
3836 __be32 *data;
3837 u32 offset;
3838 u32 val;
3839
3840 /* Halt the CPU. */
3841 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3842 val |= cpu_reg->mode_value_halt;
3843 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3844 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3845
3846 /* Load the Text area. */
3847 addr = be32_to_cpu(fw_entry->text.addr);
3848 len = be32_to_cpu(fw_entry->text.len);
3849 file_offset = be32_to_cpu(fw_entry->text.offset);
3850 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3851
3852 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3853 if (len) {
3854 int j;
3855
3856 for (j = 0; j < (len / 4); j++, offset += 4)
3857 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3858 }
3859
3860 /* Load the Data area. */
3861 addr = be32_to_cpu(fw_entry->data.addr);
3862 len = be32_to_cpu(fw_entry->data.len);
3863 file_offset = be32_to_cpu(fw_entry->data.offset);
3864 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3865
3866 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3867 if (len) {
3868 int j;
3869
3870 for (j = 0; j < (len / 4); j++, offset += 4)
3871 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3872 }
3873
3874 /* Load the Read-Only area. */
3875 addr = be32_to_cpu(fw_entry->rodata.addr);
3876 len = be32_to_cpu(fw_entry->rodata.len);
3877 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3878 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3879
3880 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3881 if (len) {
3882 int j;
3883
3884 for (j = 0; j < (len / 4); j++, offset += 4)
3885 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3886 }
3887
3888 /* Clear the pre-fetch instruction. */
3889 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3890
3891 val = be32_to_cpu(fw_entry->start_addr);
3892 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3893
3894 /* Start the CPU. */
3895 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3896 val &= ~cpu_reg->mode_value_halt;
3897 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3898 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3899 }
3900
3901 static void
bnx2_init_cpus(struct bnx2 * bp)3902 bnx2_init_cpus(struct bnx2 *bp)
3903 {
3904 const struct bnx2_mips_fw_file *mips_fw =
3905 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3906 const struct bnx2_rv2p_fw_file *rv2p_fw =
3907 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3908
3909 /* Initialize the RV2P processor. */
3910 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3911 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3912
3913 /* Initialize the RX Processor. */
3914 load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3915
3916 /* Initialize the TX Processor. */
3917 load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3918
3919 /* Initialize the TX Patch-up Processor. */
3920 load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3921
3922 /* Initialize the Completion Processor. */
3923 load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3924
3925 /* Initialize the Command Processor. */
3926 load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3927 }
3928
3929 static void
bnx2_setup_wol(struct bnx2 * bp)3930 bnx2_setup_wol(struct bnx2 *bp)
3931 {
3932 int i;
3933 u32 val, wol_msg;
3934
3935 if (bp->wol) {
3936 u32 advertising;
3937 u8 autoneg;
3938
3939 autoneg = bp->autoneg;
3940 advertising = bp->advertising;
3941
3942 if (bp->phy_port == PORT_TP) {
3943 bp->autoneg = AUTONEG_SPEED;
3944 bp->advertising = ADVERTISED_10baseT_Half |
3945 ADVERTISED_10baseT_Full |
3946 ADVERTISED_100baseT_Half |
3947 ADVERTISED_100baseT_Full |
3948 ADVERTISED_Autoneg;
3949 }
3950
3951 spin_lock_bh(&bp->phy_lock);
3952 bnx2_setup_phy(bp, bp->phy_port);
3953 spin_unlock_bh(&bp->phy_lock);
3954
3955 bp->autoneg = autoneg;
3956 bp->advertising = advertising;
3957
3958 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3959
3960 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3961
3962 /* Enable port mode. */
3963 val &= ~BNX2_EMAC_MODE_PORT;
3964 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3965 BNX2_EMAC_MODE_ACPI_RCVD |
3966 BNX2_EMAC_MODE_MPKT;
3967 if (bp->phy_port == PORT_TP) {
3968 val |= BNX2_EMAC_MODE_PORT_MII;
3969 } else {
3970 val |= BNX2_EMAC_MODE_PORT_GMII;
3971 if (bp->line_speed == SPEED_2500)
3972 val |= BNX2_EMAC_MODE_25G_MODE;
3973 }
3974
3975 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3976
3977 /* receive all multicast */
3978 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3979 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3980 0xffffffff);
3981 }
3982 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3983
3984 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3985 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3986 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3987 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3988
3989 /* Need to enable EMAC and RPM for WOL. */
3990 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3991 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3992 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3993 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3994
3995 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3996 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3997 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3998
3999 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4000 } else {
4001 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4002 }
4003
4004 if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4005 u32 val;
4006
4007 wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4008 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4009 bnx2_fw_sync(bp, wol_msg, 1, 0);
4010 return;
4011 }
4012 /* Tell firmware not to power down the PHY yet, otherwise
4013 * the chip will take a long time to respond to MMIO reads.
4014 */
4015 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4016 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4017 val | BNX2_PORT_FEATURE_ASF_ENABLED);
4018 bnx2_fw_sync(bp, wol_msg, 1, 0);
4019 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4020 }
4021
4022 }
4023
4024 static int
bnx2_set_power_state(struct bnx2 * bp,pci_power_t state)4025 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4026 {
4027 switch (state) {
4028 case PCI_D0: {
4029 u32 val;
4030
4031 pci_enable_wake(bp->pdev, PCI_D0, false);
4032 pci_set_power_state(bp->pdev, PCI_D0);
4033
4034 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4035 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4036 val &= ~BNX2_EMAC_MODE_MPKT;
4037 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4038
4039 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4040 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4041 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4042 break;
4043 }
4044 case PCI_D3hot: {
4045 bnx2_setup_wol(bp);
4046 pci_wake_from_d3(bp->pdev, bp->wol);
4047 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4048 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4049
4050 if (bp->wol)
4051 pci_set_power_state(bp->pdev, PCI_D3hot);
4052 break;
4053
4054 }
4055 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4056 u32 val;
4057
4058 /* Tell firmware not to power down the PHY yet,
4059 * otherwise the other port may not respond to
4060 * MMIO reads.
4061 */
4062 val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4063 val &= ~BNX2_CONDITION_PM_STATE_MASK;
4064 val |= BNX2_CONDITION_PM_STATE_UNPREP;
4065 bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4066 }
4067 pci_set_power_state(bp->pdev, PCI_D3hot);
4068
4069 /* No more memory access after this point until
4070 * device is brought back to D0.
4071 */
4072 break;
4073 }
4074 default:
4075 return -EINVAL;
4076 }
4077 return 0;
4078 }
4079
4080 static int
bnx2_acquire_nvram_lock(struct bnx2 * bp)4081 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4082 {
4083 u32 val;
4084 int j;
4085
4086 /* Request access to the flash interface. */
4087 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4088 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4089 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4090 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4091 break;
4092
4093 udelay(5);
4094 }
4095
4096 if (j >= NVRAM_TIMEOUT_COUNT)
4097 return -EBUSY;
4098
4099 return 0;
4100 }
4101
4102 static int
bnx2_release_nvram_lock(struct bnx2 * bp)4103 bnx2_release_nvram_lock(struct bnx2 *bp)
4104 {
4105 int j;
4106 u32 val;
4107
4108 /* Relinquish nvram interface. */
4109 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4110
4111 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4112 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4113 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4114 break;
4115
4116 udelay(5);
4117 }
4118
4119 if (j >= NVRAM_TIMEOUT_COUNT)
4120 return -EBUSY;
4121
4122 return 0;
4123 }
4124
4125
4126 static int
bnx2_enable_nvram_write(struct bnx2 * bp)4127 bnx2_enable_nvram_write(struct bnx2 *bp)
4128 {
4129 u32 val;
4130
4131 val = BNX2_RD(bp, BNX2_MISC_CFG);
4132 BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4133
4134 if (bp->flash_info->flags & BNX2_NV_WREN) {
4135 int j;
4136
4137 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4138 BNX2_WR(bp, BNX2_NVM_COMMAND,
4139 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4140
4141 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4142 udelay(5);
4143
4144 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4145 if (val & BNX2_NVM_COMMAND_DONE)
4146 break;
4147 }
4148
4149 if (j >= NVRAM_TIMEOUT_COUNT)
4150 return -EBUSY;
4151 }
4152 return 0;
4153 }
4154
4155 static void
bnx2_disable_nvram_write(struct bnx2 * bp)4156 bnx2_disable_nvram_write(struct bnx2 *bp)
4157 {
4158 u32 val;
4159
4160 val = BNX2_RD(bp, BNX2_MISC_CFG);
4161 BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4162 }
4163
4164
4165 static void
bnx2_enable_nvram_access(struct bnx2 * bp)4166 bnx2_enable_nvram_access(struct bnx2 *bp)
4167 {
4168 u32 val;
4169
4170 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4171 /* Enable both bits, even on read. */
4172 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4173 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4174 }
4175
4176 static void
bnx2_disable_nvram_access(struct bnx2 * bp)4177 bnx2_disable_nvram_access(struct bnx2 *bp)
4178 {
4179 u32 val;
4180
4181 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4182 /* Disable both bits, even after read. */
4183 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4184 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4185 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4186 }
4187
4188 static int
bnx2_nvram_erase_page(struct bnx2 * bp,u32 offset)4189 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4190 {
4191 u32 cmd;
4192 int j;
4193
4194 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4195 /* Buffered flash, no erase needed */
4196 return 0;
4197
4198 /* Build an erase command */
4199 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4200 BNX2_NVM_COMMAND_DOIT;
4201
4202 /* Need to clear DONE bit separately. */
4203 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4204
4205 /* Address of the NVRAM to read from. */
4206 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4207
4208 /* Issue an erase command. */
4209 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4210
4211 /* Wait for completion. */
4212 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4213 u32 val;
4214
4215 udelay(5);
4216
4217 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4218 if (val & BNX2_NVM_COMMAND_DONE)
4219 break;
4220 }
4221
4222 if (j >= NVRAM_TIMEOUT_COUNT)
4223 return -EBUSY;
4224
4225 return 0;
4226 }
4227
4228 static int
bnx2_nvram_read_dword(struct bnx2 * bp,u32 offset,u8 * ret_val,u32 cmd_flags)4229 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4230 {
4231 u32 cmd;
4232 int j;
4233
4234 /* Build the command word. */
4235 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4236
4237 /* Calculate an offset of a buffered flash, not needed for 5709. */
4238 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4239 offset = ((offset / bp->flash_info->page_size) <<
4240 bp->flash_info->page_bits) +
4241 (offset % bp->flash_info->page_size);
4242 }
4243
4244 /* Need to clear DONE bit separately. */
4245 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4246
4247 /* Address of the NVRAM to read from. */
4248 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4249
4250 /* Issue a read command. */
4251 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4252
4253 /* Wait for completion. */
4254 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4255 u32 val;
4256
4257 udelay(5);
4258
4259 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4260 if (val & BNX2_NVM_COMMAND_DONE) {
4261 __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4262 memcpy(ret_val, &v, 4);
4263 break;
4264 }
4265 }
4266 if (j >= NVRAM_TIMEOUT_COUNT)
4267 return -EBUSY;
4268
4269 return 0;
4270 }
4271
4272
4273 static int
bnx2_nvram_write_dword(struct bnx2 * bp,u32 offset,u8 * val,u32 cmd_flags)4274 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4275 {
4276 u32 cmd;
4277 __be32 val32;
4278 int j;
4279
4280 /* Build the command word. */
4281 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4282
4283 /* Calculate an offset of a buffered flash, not needed for 5709. */
4284 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4285 offset = ((offset / bp->flash_info->page_size) <<
4286 bp->flash_info->page_bits) +
4287 (offset % bp->flash_info->page_size);
4288 }
4289
4290 /* Need to clear DONE bit separately. */
4291 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4292
4293 memcpy(&val32, val, 4);
4294
4295 /* Write the data. */
4296 BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4297
4298 /* Address of the NVRAM to write to. */
4299 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4300
4301 /* Issue the write command. */
4302 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4303
4304 /* Wait for completion. */
4305 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4306 udelay(5);
4307
4308 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4309 break;
4310 }
4311 if (j >= NVRAM_TIMEOUT_COUNT)
4312 return -EBUSY;
4313
4314 return 0;
4315 }
4316
4317 static int
bnx2_init_nvram(struct bnx2 * bp)4318 bnx2_init_nvram(struct bnx2 *bp)
4319 {
4320 u32 val;
4321 int j, entry_count, rc = 0;
4322 const struct flash_spec *flash;
4323
4324 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4325 bp->flash_info = &flash_5709;
4326 goto get_flash_size;
4327 }
4328
4329 /* Determine the selected interface. */
4330 val = BNX2_RD(bp, BNX2_NVM_CFG1);
4331
4332 entry_count = ARRAY_SIZE(flash_table);
4333
4334 if (val & 0x40000000) {
4335
4336 /* Flash interface has been reconfigured */
4337 for (j = 0, flash = &flash_table[0]; j < entry_count;
4338 j++, flash++) {
4339 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4340 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4341 bp->flash_info = flash;
4342 break;
4343 }
4344 }
4345 }
4346 else {
4347 u32 mask;
4348 /* Not yet been reconfigured */
4349
4350 if (val & (1 << 23))
4351 mask = FLASH_BACKUP_STRAP_MASK;
4352 else
4353 mask = FLASH_STRAP_MASK;
4354
4355 for (j = 0, flash = &flash_table[0]; j < entry_count;
4356 j++, flash++) {
4357
4358 if ((val & mask) == (flash->strapping & mask)) {
4359 bp->flash_info = flash;
4360
4361 /* Request access to the flash interface. */
4362 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4363 return rc;
4364
4365 /* Enable access to flash interface */
4366 bnx2_enable_nvram_access(bp);
4367
4368 /* Reconfigure the flash interface */
4369 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4370 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4371 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4372 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4373
4374 /* Disable access to flash interface */
4375 bnx2_disable_nvram_access(bp);
4376 bnx2_release_nvram_lock(bp);
4377
4378 break;
4379 }
4380 }
4381 } /* if (val & 0x40000000) */
4382
4383 if (j == entry_count) {
4384 bp->flash_info = NULL;
4385 pr_alert("Unknown flash/EEPROM type\n");
4386 return -ENODEV;
4387 }
4388
4389 get_flash_size:
4390 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4391 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4392 if (val)
4393 bp->flash_size = val;
4394 else
4395 bp->flash_size = bp->flash_info->total_size;
4396
4397 return rc;
4398 }
4399
4400 static int
bnx2_nvram_read(struct bnx2 * bp,u32 offset,u8 * ret_buf,int buf_size)4401 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4402 int buf_size)
4403 {
4404 int rc = 0;
4405 u32 cmd_flags, offset32, len32, extra;
4406
4407 if (buf_size == 0)
4408 return 0;
4409
4410 /* Request access to the flash interface. */
4411 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4412 return rc;
4413
4414 /* Enable access to flash interface */
4415 bnx2_enable_nvram_access(bp);
4416
4417 len32 = buf_size;
4418 offset32 = offset;
4419 extra = 0;
4420
4421 cmd_flags = 0;
4422
4423 if (offset32 & 3) {
4424 u8 buf[4];
4425 u32 pre_len;
4426
4427 offset32 &= ~3;
4428 pre_len = 4 - (offset & 3);
4429
4430 if (pre_len >= len32) {
4431 pre_len = len32;
4432 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4433 BNX2_NVM_COMMAND_LAST;
4434 }
4435 else {
4436 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4437 }
4438
4439 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4440
4441 if (rc)
4442 return rc;
4443
4444 memcpy(ret_buf, buf + (offset & 3), pre_len);
4445
4446 offset32 += 4;
4447 ret_buf += pre_len;
4448 len32 -= pre_len;
4449 }
4450 if (len32 & 3) {
4451 extra = 4 - (len32 & 3);
4452 len32 = (len32 + 4) & ~3;
4453 }
4454
4455 if (len32 == 4) {
4456 u8 buf[4];
4457
4458 if (cmd_flags)
4459 cmd_flags = BNX2_NVM_COMMAND_LAST;
4460 else
4461 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4462 BNX2_NVM_COMMAND_LAST;
4463
4464 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4465
4466 memcpy(ret_buf, buf, 4 - extra);
4467 }
4468 else if (len32 > 0) {
4469 u8 buf[4];
4470
4471 /* Read the first word. */
4472 if (cmd_flags)
4473 cmd_flags = 0;
4474 else
4475 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4476
4477 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4478
4479 /* Advance to the next dword. */
4480 offset32 += 4;
4481 ret_buf += 4;
4482 len32 -= 4;
4483
4484 while (len32 > 4 && rc == 0) {
4485 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4486
4487 /* Advance to the next dword. */
4488 offset32 += 4;
4489 ret_buf += 4;
4490 len32 -= 4;
4491 }
4492
4493 if (rc)
4494 return rc;
4495
4496 cmd_flags = BNX2_NVM_COMMAND_LAST;
4497 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4498
4499 memcpy(ret_buf, buf, 4 - extra);
4500 }
4501
4502 /* Disable access to flash interface */
4503 bnx2_disable_nvram_access(bp);
4504
4505 bnx2_release_nvram_lock(bp);
4506
4507 return rc;
4508 }
4509
4510 static int
bnx2_nvram_write(struct bnx2 * bp,u32 offset,u8 * data_buf,int buf_size)4511 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4512 int buf_size)
4513 {
4514 u32 written, offset32, len32;
4515 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4516 int rc = 0;
4517 int align_start, align_end;
4518
4519 buf = data_buf;
4520 offset32 = offset;
4521 len32 = buf_size;
4522 align_start = align_end = 0;
4523
4524 if ((align_start = (offset32 & 3))) {
4525 offset32 &= ~3;
4526 len32 += align_start;
4527 if (len32 < 4)
4528 len32 = 4;
4529 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4530 return rc;
4531 }
4532
4533 if (len32 & 3) {
4534 align_end = 4 - (len32 & 3);
4535 len32 += align_end;
4536 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4537 return rc;
4538 }
4539
4540 if (align_start || align_end) {
4541 align_buf = kmalloc(len32, GFP_KERNEL);
4542 if (!align_buf)
4543 return -ENOMEM;
4544 if (align_start) {
4545 memcpy(align_buf, start, 4);
4546 }
4547 if (align_end) {
4548 memcpy(align_buf + len32 - 4, end, 4);
4549 }
4550 memcpy(align_buf + align_start, data_buf, buf_size);
4551 buf = align_buf;
4552 }
4553
4554 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4555 flash_buffer = kmalloc(264, GFP_KERNEL);
4556 if (!flash_buffer) {
4557 rc = -ENOMEM;
4558 goto nvram_write_end;
4559 }
4560 }
4561
4562 written = 0;
4563 while ((written < len32) && (rc == 0)) {
4564 u32 page_start, page_end, data_start, data_end;
4565 u32 addr, cmd_flags;
4566 int i;
4567
4568 /* Find the page_start addr */
4569 page_start = offset32 + written;
4570 page_start -= (page_start % bp->flash_info->page_size);
4571 /* Find the page_end addr */
4572 page_end = page_start + bp->flash_info->page_size;
4573 /* Find the data_start addr */
4574 data_start = (written == 0) ? offset32 : page_start;
4575 /* Find the data_end addr */
4576 data_end = (page_end > offset32 + len32) ?
4577 (offset32 + len32) : page_end;
4578
4579 /* Request access to the flash interface. */
4580 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4581 goto nvram_write_end;
4582
4583 /* Enable access to flash interface */
4584 bnx2_enable_nvram_access(bp);
4585
4586 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4587 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4588 int j;
4589
4590 /* Read the whole page into the buffer
4591 * (non-buffer flash only) */
4592 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4593 if (j == (bp->flash_info->page_size - 4)) {
4594 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4595 }
4596 rc = bnx2_nvram_read_dword(bp,
4597 page_start + j,
4598 &flash_buffer[j],
4599 cmd_flags);
4600
4601 if (rc)
4602 goto nvram_write_end;
4603
4604 cmd_flags = 0;
4605 }
4606 }
4607
4608 /* Enable writes to flash interface (unlock write-protect) */
4609 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4610 goto nvram_write_end;
4611
4612 /* Loop to write back the buffer data from page_start to
4613 * data_start */
4614 i = 0;
4615 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4616 /* Erase the page */
4617 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4618 goto nvram_write_end;
4619
4620 /* Re-enable the write again for the actual write */
4621 bnx2_enable_nvram_write(bp);
4622
4623 for (addr = page_start; addr < data_start;
4624 addr += 4, i += 4) {
4625
4626 rc = bnx2_nvram_write_dword(bp, addr,
4627 &flash_buffer[i], cmd_flags);
4628
4629 if (rc != 0)
4630 goto nvram_write_end;
4631
4632 cmd_flags = 0;
4633 }
4634 }
4635
4636 /* Loop to write the new data from data_start to data_end */
4637 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4638 if ((addr == page_end - 4) ||
4639 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4640 (addr == data_end - 4))) {
4641
4642 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4643 }
4644 rc = bnx2_nvram_write_dword(bp, addr, buf,
4645 cmd_flags);
4646
4647 if (rc != 0)
4648 goto nvram_write_end;
4649
4650 cmd_flags = 0;
4651 buf += 4;
4652 }
4653
4654 /* Loop to write back the buffer data from data_end
4655 * to page_end */
4656 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4657 for (addr = data_end; addr < page_end;
4658 addr += 4, i += 4) {
4659
4660 if (addr == page_end-4) {
4661 cmd_flags = BNX2_NVM_COMMAND_LAST;
4662 }
4663 rc = bnx2_nvram_write_dword(bp, addr,
4664 &flash_buffer[i], cmd_flags);
4665
4666 if (rc != 0)
4667 goto nvram_write_end;
4668
4669 cmd_flags = 0;
4670 }
4671 }
4672
4673 /* Disable writes to flash interface (lock write-protect) */
4674 bnx2_disable_nvram_write(bp);
4675
4676 /* Disable access to flash interface */
4677 bnx2_disable_nvram_access(bp);
4678 bnx2_release_nvram_lock(bp);
4679
4680 /* Increment written */
4681 written += data_end - data_start;
4682 }
4683
4684 nvram_write_end:
4685 kfree(flash_buffer);
4686 kfree(align_buf);
4687 return rc;
4688 }
4689
4690 static void
bnx2_init_fw_cap(struct bnx2 * bp)4691 bnx2_init_fw_cap(struct bnx2 *bp)
4692 {
4693 u32 val, sig = 0;
4694
4695 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4696 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4697
4698 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4699 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4700
4701 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4702 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4703 return;
4704
4705 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4706 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4707 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4708 }
4709
4710 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4711 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4712 u32 link;
4713
4714 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4715
4716 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4717 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4718 bp->phy_port = PORT_FIBRE;
4719 else
4720 bp->phy_port = PORT_TP;
4721
4722 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4723 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4724 }
4725
4726 if (netif_running(bp->dev) && sig)
4727 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4728 }
4729
4730 static void
bnx2_setup_msix_tbl(struct bnx2 * bp)4731 bnx2_setup_msix_tbl(struct bnx2 *bp)
4732 {
4733 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4734
4735 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4736 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4737 }
4738
4739 static void
bnx2_wait_dma_complete(struct bnx2 * bp)4740 bnx2_wait_dma_complete(struct bnx2 *bp)
4741 {
4742 u32 val;
4743 int i;
4744
4745 /*
4746 * Wait for the current PCI transaction to complete before
4747 * issuing a reset.
4748 */
4749 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4750 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4751 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4752 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4753 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4754 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4755 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4756 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4757 udelay(5);
4758 } else { /* 5709 */
4759 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4760 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4761 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4762 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4763
4764 for (i = 0; i < 100; i++) {
4765 msleep(1);
4766 val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4767 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4768 break;
4769 }
4770 }
4771
4772 return;
4773 }
4774
4775
4776 static int
bnx2_reset_chip(struct bnx2 * bp,u32 reset_code)4777 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4778 {
4779 u32 val;
4780 int i, rc = 0;
4781 u8 old_port;
4782
4783 /* Wait for the current PCI transaction to complete before
4784 * issuing a reset. */
4785 bnx2_wait_dma_complete(bp);
4786
4787 /* Wait for the firmware to tell us it is ok to issue a reset. */
4788 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4789
4790 /* Deposit a driver reset signature so the firmware knows that
4791 * this is a soft reset. */
4792 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4793 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4794
4795 /* Do a dummy read to force the chip to complete all current transaction
4796 * before we issue a reset. */
4797 val = BNX2_RD(bp, BNX2_MISC_ID);
4798
4799 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4800 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4801 BNX2_RD(bp, BNX2_MISC_COMMAND);
4802 udelay(5);
4803
4804 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4805 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4806
4807 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4808
4809 } else {
4810 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4811 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4812 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4813
4814 /* Chip reset. */
4815 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4816
4817 /* Reading back any register after chip reset will hang the
4818 * bus on 5706 A0 and A1. The msleep below provides plenty
4819 * of margin for write posting.
4820 */
4821 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4822 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4823 msleep(20);
4824
4825 /* Reset takes approximate 30 usec */
4826 for (i = 0; i < 10; i++) {
4827 val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4828 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4829 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4830 break;
4831 udelay(10);
4832 }
4833
4834 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4835 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4836 pr_err("Chip reset did not complete\n");
4837 return -EBUSY;
4838 }
4839 }
4840
4841 /* Make sure byte swapping is properly configured. */
4842 val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4843 if (val != 0x01020304) {
4844 pr_err("Chip not in correct endian mode\n");
4845 return -ENODEV;
4846 }
4847
4848 /* Wait for the firmware to finish its initialization. */
4849 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4850 if (rc)
4851 return rc;
4852
4853 spin_lock_bh(&bp->phy_lock);
4854 old_port = bp->phy_port;
4855 bnx2_init_fw_cap(bp);
4856 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4857 old_port != bp->phy_port)
4858 bnx2_set_default_remote_link(bp);
4859 spin_unlock_bh(&bp->phy_lock);
4860
4861 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4862 /* Adjust the voltage regular to two steps lower. The default
4863 * of this register is 0x0000000e. */
4864 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4865
4866 /* Remove bad rbuf memory from the free pool. */
4867 rc = bnx2_alloc_bad_rbuf(bp);
4868 }
4869
4870 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4871 bnx2_setup_msix_tbl(bp);
4872 /* Prevent MSIX table reads and write from timing out */
4873 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4874 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4875 }
4876
4877 return rc;
4878 }
4879
4880 static int
bnx2_init_chip(struct bnx2 * bp)4881 bnx2_init_chip(struct bnx2 *bp)
4882 {
4883 u32 val, mtu;
4884 int rc, i;
4885
4886 /* Make sure the interrupt is not active. */
4887 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4888
4889 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4890 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4891 #ifdef __BIG_ENDIAN
4892 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4893 #endif
4894 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4895 DMA_READ_CHANS << 12 |
4896 DMA_WRITE_CHANS << 16;
4897
4898 val |= (0x2 << 20) | (1 << 11);
4899
4900 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4901 val |= (1 << 23);
4902
4903 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4904 (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4905 !(bp->flags & BNX2_FLAG_PCIX))
4906 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4907
4908 BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4909
4910 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4911 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4912 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4913 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4914 }
4915
4916 if (bp->flags & BNX2_FLAG_PCIX) {
4917 u16 val16;
4918
4919 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4920 &val16);
4921 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4922 val16 & ~PCI_X_CMD_ERO);
4923 }
4924
4925 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4926 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4927 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4928 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4929
4930 /* Initialize context mapping and zero out the quick contexts. The
4931 * context block must have already been enabled. */
4932 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4933 rc = bnx2_init_5709_context(bp);
4934 if (rc)
4935 return rc;
4936 } else
4937 bnx2_init_context(bp);
4938
4939 bnx2_init_cpus(bp);
4940
4941 bnx2_init_nvram(bp);
4942
4943 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4944
4945 val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4946 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4947 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4948 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4949 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4950 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4951 val |= BNX2_MQ_CONFIG_HALT_DIS;
4952 }
4953
4954 BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4955
4956 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4957 BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4958 BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4959
4960 val = (BNX2_PAGE_BITS - 8) << 24;
4961 BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4962
4963 /* Configure page size. */
4964 val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4965 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4966 val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4967 BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4968
4969 val = bp->mac_addr[0] +
4970 (bp->mac_addr[1] << 8) +
4971 (bp->mac_addr[2] << 16) +
4972 bp->mac_addr[3] +
4973 (bp->mac_addr[4] << 8) +
4974 (bp->mac_addr[5] << 16);
4975 BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4976
4977 /* Program the MTU. Also include 4 bytes for CRC32. */
4978 mtu = bp->dev->mtu;
4979 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4980 if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
4981 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4982 BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4983
4984 if (mtu < ETH_DATA_LEN)
4985 mtu = ETH_DATA_LEN;
4986
4987 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4988 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4989 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4990
4991 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4992 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4993 bp->bnx2_napi[i].last_status_idx = 0;
4994
4995 bp->idle_chk_status_idx = 0xffff;
4996
4997 /* Set up how to generate a link change interrupt. */
4998 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4999
5000 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5001 (u64) bp->status_blk_mapping & 0xffffffff);
5002 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5003
5004 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5005 (u64) bp->stats_blk_mapping & 0xffffffff);
5006 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5007 (u64) bp->stats_blk_mapping >> 32);
5008
5009 BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5010 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5011
5012 BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5013 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5014
5015 BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5016 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5017
5018 BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5019
5020 BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5021
5022 BNX2_WR(bp, BNX2_HC_COM_TICKS,
5023 (bp->com_ticks_int << 16) | bp->com_ticks);
5024
5025 BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5026 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5027
5028 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5029 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5030 else
5031 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5032 BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
5033
5034 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5035 val = BNX2_HC_CONFIG_COLLECT_STATS;
5036 else {
5037 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5038 BNX2_HC_CONFIG_COLLECT_STATS;
5039 }
5040
5041 if (bp->flags & BNX2_FLAG_USING_MSIX) {
5042 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5043 BNX2_HC_MSIX_BIT_VECTOR_VAL);
5044
5045 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5046 }
5047
5048 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5049 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5050
5051 BNX2_WR(bp, BNX2_HC_CONFIG, val);
5052
5053 if (bp->rx_ticks < 25)
5054 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5055 else
5056 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5057
5058 for (i = 1; i < bp->irq_nvecs; i++) {
5059 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5060 BNX2_HC_SB_CONFIG_1;
5061
5062 BNX2_WR(bp, base,
5063 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5064 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5065 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5066
5067 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5068 (bp->tx_quick_cons_trip_int << 16) |
5069 bp->tx_quick_cons_trip);
5070
5071 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5072 (bp->tx_ticks_int << 16) | bp->tx_ticks);
5073
5074 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5075 (bp->rx_quick_cons_trip_int << 16) |
5076 bp->rx_quick_cons_trip);
5077
5078 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5079 (bp->rx_ticks_int << 16) | bp->rx_ticks);
5080 }
5081
5082 /* Clear internal stats counters. */
5083 BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5084
5085 BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5086
5087 /* Initialize the receive filter. */
5088 bnx2_set_rx_mode(bp->dev);
5089
5090 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5091 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5092 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5093 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5094 }
5095 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5096 1, 0);
5097
5098 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5099 BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5100
5101 udelay(20);
5102
5103 bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5104
5105 return rc;
5106 }
5107
5108 static void
bnx2_clear_ring_states(struct bnx2 * bp)5109 bnx2_clear_ring_states(struct bnx2 *bp)
5110 {
5111 struct bnx2_napi *bnapi;
5112 struct bnx2_tx_ring_info *txr;
5113 struct bnx2_rx_ring_info *rxr;
5114 int i;
5115
5116 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5117 bnapi = &bp->bnx2_napi[i];
5118 txr = &bnapi->tx_ring;
5119 rxr = &bnapi->rx_ring;
5120
5121 txr->tx_cons = 0;
5122 txr->hw_tx_cons = 0;
5123 rxr->rx_prod_bseq = 0;
5124 rxr->rx_prod = 0;
5125 rxr->rx_cons = 0;
5126 rxr->rx_pg_prod = 0;
5127 rxr->rx_pg_cons = 0;
5128 }
5129 }
5130
5131 static void
bnx2_init_tx_context(struct bnx2 * bp,u32 cid,struct bnx2_tx_ring_info * txr)5132 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5133 {
5134 u32 val, offset0, offset1, offset2, offset3;
5135 u32 cid_addr = GET_CID_ADDR(cid);
5136
5137 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5138 offset0 = BNX2_L2CTX_TYPE_XI;
5139 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5140 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5141 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5142 } else {
5143 offset0 = BNX2_L2CTX_TYPE;
5144 offset1 = BNX2_L2CTX_CMD_TYPE;
5145 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5146 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5147 }
5148 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5149 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5150
5151 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5152 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5153
5154 val = (u64) txr->tx_desc_mapping >> 32;
5155 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5156
5157 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5158 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5159 }
5160
5161 static void
bnx2_init_tx_ring(struct bnx2 * bp,int ring_num)5162 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5163 {
5164 struct bnx2_tx_bd *txbd;
5165 u32 cid = TX_CID;
5166 struct bnx2_napi *bnapi;
5167 struct bnx2_tx_ring_info *txr;
5168
5169 bnapi = &bp->bnx2_napi[ring_num];
5170 txr = &bnapi->tx_ring;
5171
5172 if (ring_num == 0)
5173 cid = TX_CID;
5174 else
5175 cid = TX_TSS_CID + ring_num - 1;
5176
5177 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5178
5179 txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5180
5181 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5182 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5183
5184 txr->tx_prod = 0;
5185 txr->tx_prod_bseq = 0;
5186
5187 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5188 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5189
5190 bnx2_init_tx_context(bp, cid, txr);
5191 }
5192
5193 static void
bnx2_init_rxbd_rings(struct bnx2_rx_bd * rx_ring[],dma_addr_t dma[],u32 buf_size,int num_rings)5194 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5195 u32 buf_size, int num_rings)
5196 {
5197 int i;
5198 struct bnx2_rx_bd *rxbd;
5199
5200 for (i = 0; i < num_rings; i++) {
5201 int j;
5202
5203 rxbd = &rx_ring[i][0];
5204 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5205 rxbd->rx_bd_len = buf_size;
5206 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5207 }
5208 if (i == (num_rings - 1))
5209 j = 0;
5210 else
5211 j = i + 1;
5212 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5213 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5214 }
5215 }
5216
5217 static void
bnx2_init_rx_ring(struct bnx2 * bp,int ring_num)5218 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5219 {
5220 int i;
5221 u16 prod, ring_prod;
5222 u32 cid, rx_cid_addr, val;
5223 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5224 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5225
5226 if (ring_num == 0)
5227 cid = RX_CID;
5228 else
5229 cid = RX_RSS_CID + ring_num - 1;
5230
5231 rx_cid_addr = GET_CID_ADDR(cid);
5232
5233 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5234 bp->rx_buf_use_size, bp->rx_max_ring);
5235
5236 bnx2_init_rx_context(bp, cid);
5237
5238 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5239 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5240 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5241 }
5242
5243 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5244 if (bp->rx_pg_ring_size) {
5245 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5246 rxr->rx_pg_desc_mapping,
5247 PAGE_SIZE, bp->rx_max_pg_ring);
5248 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5249 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5250 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5251 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5252
5253 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5254 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5255
5256 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5257 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5258
5259 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5260 BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5261 }
5262
5263 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5264 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5265
5266 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5267 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5268
5269 ring_prod = prod = rxr->rx_pg_prod;
5270 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5271 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5272 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5273 ring_num, i, bp->rx_pg_ring_size);
5274 break;
5275 }
5276 prod = BNX2_NEXT_RX_BD(prod);
5277 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5278 }
5279 rxr->rx_pg_prod = prod;
5280
5281 ring_prod = prod = rxr->rx_prod;
5282 for (i = 0; i < bp->rx_ring_size; i++) {
5283 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5284 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5285 ring_num, i, bp->rx_ring_size);
5286 break;
5287 }
5288 prod = BNX2_NEXT_RX_BD(prod);
5289 ring_prod = BNX2_RX_RING_IDX(prod);
5290 }
5291 rxr->rx_prod = prod;
5292
5293 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5294 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5295 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5296
5297 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5298 BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5299
5300 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5301 }
5302
5303 static void
bnx2_init_all_rings(struct bnx2 * bp)5304 bnx2_init_all_rings(struct bnx2 *bp)
5305 {
5306 int i;
5307 u32 val;
5308
5309 bnx2_clear_ring_states(bp);
5310
5311 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5312 for (i = 0; i < bp->num_tx_rings; i++)
5313 bnx2_init_tx_ring(bp, i);
5314
5315 if (bp->num_tx_rings > 1)
5316 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5317 (TX_TSS_CID << 7));
5318
5319 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5320 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5321
5322 for (i = 0; i < bp->num_rx_rings; i++)
5323 bnx2_init_rx_ring(bp, i);
5324
5325 if (bp->num_rx_rings > 1) {
5326 u32 tbl_32 = 0;
5327
5328 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5329 int shift = (i % 8) << 2;
5330
5331 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5332 if ((i % 8) == 7) {
5333 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5334 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5335 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5336 BNX2_RLUP_RSS_COMMAND_WRITE |
5337 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5338 tbl_32 = 0;
5339 }
5340 }
5341
5342 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5343 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5344
5345 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5346
5347 }
5348 }
5349
bnx2_find_max_ring(u32 ring_size,u32 max_size)5350 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5351 {
5352 u32 max, num_rings = 1;
5353
5354 while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5355 ring_size -= BNX2_MAX_RX_DESC_CNT;
5356 num_rings++;
5357 }
5358 /* round to next power of 2 */
5359 max = max_size;
5360 while ((max & num_rings) == 0)
5361 max >>= 1;
5362
5363 if (num_rings != max)
5364 max <<= 1;
5365
5366 return max;
5367 }
5368
5369 static void
bnx2_set_rx_ring_size(struct bnx2 * bp,u32 size)5370 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5371 {
5372 u32 rx_size, rx_space, jumbo_size;
5373
5374 /* 8 for CRC and VLAN */
5375 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5376
5377 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5378 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5379
5380 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5381 bp->rx_pg_ring_size = 0;
5382 bp->rx_max_pg_ring = 0;
5383 bp->rx_max_pg_ring_idx = 0;
5384 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5385 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5386
5387 jumbo_size = size * pages;
5388 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5389 jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5390
5391 bp->rx_pg_ring_size = jumbo_size;
5392 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5393 BNX2_MAX_RX_PG_RINGS);
5394 bp->rx_max_pg_ring_idx =
5395 (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5396 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5397 bp->rx_copy_thresh = 0;
5398 }
5399
5400 bp->rx_buf_use_size = rx_size;
5401 /* hw alignment + build_skb() overhead*/
5402 bp->rx_buf_size = kmalloc_size_roundup(
5403 SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5404 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
5405 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5406 bp->rx_ring_size = size;
5407 bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5408 bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5409 }
5410
5411 static void
bnx2_free_tx_skbs(struct bnx2 * bp)5412 bnx2_free_tx_skbs(struct bnx2 *bp)
5413 {
5414 int i;
5415
5416 for (i = 0; i < bp->num_tx_rings; i++) {
5417 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5418 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5419 int j;
5420
5421 if (!txr->tx_buf_ring)
5422 continue;
5423
5424 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5425 struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5426 struct sk_buff *skb = tx_buf->skb;
5427 int k, last;
5428
5429 if (!skb) {
5430 j = BNX2_NEXT_TX_BD(j);
5431 continue;
5432 }
5433
5434 dma_unmap_single(&bp->pdev->dev,
5435 dma_unmap_addr(tx_buf, mapping),
5436 skb_headlen(skb),
5437 DMA_TO_DEVICE);
5438
5439 tx_buf->skb = NULL;
5440
5441 last = tx_buf->nr_frags;
5442 j = BNX2_NEXT_TX_BD(j);
5443 for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5444 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5445 dma_unmap_page(&bp->pdev->dev,
5446 dma_unmap_addr(tx_buf, mapping),
5447 skb_frag_size(&skb_shinfo(skb)->frags[k]),
5448 DMA_TO_DEVICE);
5449 }
5450 dev_kfree_skb(skb);
5451 }
5452 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5453 }
5454 }
5455
5456 static void
bnx2_free_rx_skbs(struct bnx2 * bp)5457 bnx2_free_rx_skbs(struct bnx2 *bp)
5458 {
5459 int i;
5460
5461 for (i = 0; i < bp->num_rx_rings; i++) {
5462 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5463 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5464 int j;
5465
5466 if (!rxr->rx_buf_ring)
5467 return;
5468
5469 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5470 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5471 u8 *data = rx_buf->data;
5472
5473 if (!data)
5474 continue;
5475
5476 dma_unmap_single(&bp->pdev->dev,
5477 dma_unmap_addr(rx_buf, mapping),
5478 bp->rx_buf_use_size,
5479 DMA_FROM_DEVICE);
5480
5481 rx_buf->data = NULL;
5482
5483 kfree(data);
5484 }
5485 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5486 bnx2_free_rx_page(bp, rxr, j);
5487 }
5488 }
5489
5490 static void
bnx2_free_skbs(struct bnx2 * bp)5491 bnx2_free_skbs(struct bnx2 *bp)
5492 {
5493 bnx2_free_tx_skbs(bp);
5494 bnx2_free_rx_skbs(bp);
5495 }
5496
5497 static int
bnx2_reset_nic(struct bnx2 * bp,u32 reset_code)5498 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5499 {
5500 int rc;
5501
5502 rc = bnx2_reset_chip(bp, reset_code);
5503 bnx2_free_skbs(bp);
5504 if (rc)
5505 return rc;
5506
5507 if ((rc = bnx2_init_chip(bp)) != 0)
5508 return rc;
5509
5510 bnx2_init_all_rings(bp);
5511 return 0;
5512 }
5513
5514 static int
bnx2_init_nic(struct bnx2 * bp,int reset_phy)5515 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5516 {
5517 int rc;
5518
5519 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5520 return rc;
5521
5522 spin_lock_bh(&bp->phy_lock);
5523 bnx2_init_phy(bp, reset_phy);
5524 bnx2_set_link(bp);
5525 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5526 bnx2_remote_phy_event(bp);
5527 spin_unlock_bh(&bp->phy_lock);
5528 return 0;
5529 }
5530
5531 static int
bnx2_shutdown_chip(struct bnx2 * bp)5532 bnx2_shutdown_chip(struct bnx2 *bp)
5533 {
5534 u32 reset_code;
5535
5536 if (bp->flags & BNX2_FLAG_NO_WOL)
5537 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5538 else if (bp->wol)
5539 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5540 else
5541 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5542
5543 return bnx2_reset_chip(bp, reset_code);
5544 }
5545
5546 static int
bnx2_test_registers(struct bnx2 * bp)5547 bnx2_test_registers(struct bnx2 *bp)
5548 {
5549 int ret;
5550 int i, is_5709;
5551 static const struct {
5552 u16 offset;
5553 u16 flags;
5554 #define BNX2_FL_NOT_5709 1
5555 u32 rw_mask;
5556 u32 ro_mask;
5557 } reg_tbl[] = {
5558 { 0x006c, 0, 0x00000000, 0x0000003f },
5559 { 0x0090, 0, 0xffffffff, 0x00000000 },
5560 { 0x0094, 0, 0x00000000, 0x00000000 },
5561
5562 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5563 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5564 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5565 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5566 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5567 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5568 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5569 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5570 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5571
5572 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5573 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5574 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5575 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5576 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5577 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5578
5579 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5580 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5581 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5582
5583 { 0x1000, 0, 0x00000000, 0x00000001 },
5584 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5585
5586 { 0x1408, 0, 0x01c00800, 0x00000000 },
5587 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5588 { 0x14a8, 0, 0x00000000, 0x000001ff },
5589 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5590 { 0x14b0, 0, 0x00000002, 0x00000001 },
5591 { 0x14b8, 0, 0x00000000, 0x00000000 },
5592 { 0x14c0, 0, 0x00000000, 0x00000009 },
5593 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5594 { 0x14cc, 0, 0x00000000, 0x00000001 },
5595 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5596
5597 { 0x1800, 0, 0x00000000, 0x00000001 },
5598 { 0x1804, 0, 0x00000000, 0x00000003 },
5599
5600 { 0x2800, 0, 0x00000000, 0x00000001 },
5601 { 0x2804, 0, 0x00000000, 0x00003f01 },
5602 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5603 { 0x2810, 0, 0xffff0000, 0x00000000 },
5604 { 0x2814, 0, 0xffff0000, 0x00000000 },
5605 { 0x2818, 0, 0xffff0000, 0x00000000 },
5606 { 0x281c, 0, 0xffff0000, 0x00000000 },
5607 { 0x2834, 0, 0xffffffff, 0x00000000 },
5608 { 0x2840, 0, 0x00000000, 0xffffffff },
5609 { 0x2844, 0, 0x00000000, 0xffffffff },
5610 { 0x2848, 0, 0xffffffff, 0x00000000 },
5611 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5612
5613 { 0x2c00, 0, 0x00000000, 0x00000011 },
5614 { 0x2c04, 0, 0x00000000, 0x00030007 },
5615
5616 { 0x3c00, 0, 0x00000000, 0x00000001 },
5617 { 0x3c04, 0, 0x00000000, 0x00070000 },
5618 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5619 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5620 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5621 { 0x3c14, 0, 0x00000000, 0xffffffff },
5622 { 0x3c18, 0, 0x00000000, 0xffffffff },
5623 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5624 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5625
5626 { 0x5004, 0, 0x00000000, 0x0000007f },
5627 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5628
5629 { 0x5c00, 0, 0x00000000, 0x00000001 },
5630 { 0x5c04, 0, 0x00000000, 0x0003000f },
5631 { 0x5c08, 0, 0x00000003, 0x00000000 },
5632 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5633 { 0x5c10, 0, 0x00000000, 0xffffffff },
5634 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5635 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5636 { 0x5c88, 0, 0x00000000, 0x00077373 },
5637 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5638
5639 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5640 { 0x680c, 0, 0xffffffff, 0x00000000 },
5641 { 0x6810, 0, 0xffffffff, 0x00000000 },
5642 { 0x6814, 0, 0xffffffff, 0x00000000 },
5643 { 0x6818, 0, 0xffffffff, 0x00000000 },
5644 { 0x681c, 0, 0xffffffff, 0x00000000 },
5645 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5646 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5647 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5648 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5649 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5650 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5651 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5652 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5653 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5654 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5655 { 0x684c, 0, 0xffffffff, 0x00000000 },
5656 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5657 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5658 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5659 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5660 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5661 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5662
5663 { 0xffff, 0, 0x00000000, 0x00000000 },
5664 };
5665
5666 ret = 0;
5667 is_5709 = 0;
5668 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5669 is_5709 = 1;
5670
5671 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5672 u32 offset, rw_mask, ro_mask, save_val, val;
5673 u16 flags = reg_tbl[i].flags;
5674
5675 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5676 continue;
5677
5678 offset = (u32) reg_tbl[i].offset;
5679 rw_mask = reg_tbl[i].rw_mask;
5680 ro_mask = reg_tbl[i].ro_mask;
5681
5682 save_val = readl(bp->regview + offset);
5683
5684 writel(0, bp->regview + offset);
5685
5686 val = readl(bp->regview + offset);
5687 if ((val & rw_mask) != 0) {
5688 goto reg_test_err;
5689 }
5690
5691 if ((val & ro_mask) != (save_val & ro_mask)) {
5692 goto reg_test_err;
5693 }
5694
5695 writel(0xffffffff, bp->regview + offset);
5696
5697 val = readl(bp->regview + offset);
5698 if ((val & rw_mask) != rw_mask) {
5699 goto reg_test_err;
5700 }
5701
5702 if ((val & ro_mask) != (save_val & ro_mask)) {
5703 goto reg_test_err;
5704 }
5705
5706 writel(save_val, bp->regview + offset);
5707 continue;
5708
5709 reg_test_err:
5710 writel(save_val, bp->regview + offset);
5711 ret = -ENODEV;
5712 break;
5713 }
5714 return ret;
5715 }
5716
5717 static int
bnx2_do_mem_test(struct bnx2 * bp,u32 start,u32 size)5718 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5719 {
5720 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5721 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5722 int i;
5723
5724 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5725 u32 offset;
5726
5727 for (offset = 0; offset < size; offset += 4) {
5728
5729 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5730
5731 if (bnx2_reg_rd_ind(bp, start + offset) !=
5732 test_pattern[i]) {
5733 return -ENODEV;
5734 }
5735 }
5736 }
5737 return 0;
5738 }
5739
5740 static int
bnx2_test_memory(struct bnx2 * bp)5741 bnx2_test_memory(struct bnx2 *bp)
5742 {
5743 int ret = 0;
5744 int i;
5745 static struct mem_entry {
5746 u32 offset;
5747 u32 len;
5748 } mem_tbl_5706[] = {
5749 { 0x60000, 0x4000 },
5750 { 0xa0000, 0x3000 },
5751 { 0xe0000, 0x4000 },
5752 { 0x120000, 0x4000 },
5753 { 0x1a0000, 0x4000 },
5754 { 0x160000, 0x4000 },
5755 { 0xffffffff, 0 },
5756 },
5757 mem_tbl_5709[] = {
5758 { 0x60000, 0x4000 },
5759 { 0xa0000, 0x3000 },
5760 { 0xe0000, 0x4000 },
5761 { 0x120000, 0x4000 },
5762 { 0x1a0000, 0x4000 },
5763 { 0xffffffff, 0 },
5764 };
5765 struct mem_entry *mem_tbl;
5766
5767 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5768 mem_tbl = mem_tbl_5709;
5769 else
5770 mem_tbl = mem_tbl_5706;
5771
5772 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5773 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5774 mem_tbl[i].len)) != 0) {
5775 return ret;
5776 }
5777 }
5778
5779 return ret;
5780 }
5781
5782 #define BNX2_MAC_LOOPBACK 0
5783 #define BNX2_PHY_LOOPBACK 1
5784
5785 static int
bnx2_run_loopback(struct bnx2 * bp,int loopback_mode)5786 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5787 {
5788 unsigned int pkt_size, num_pkts, i;
5789 struct sk_buff *skb;
5790 u8 *data;
5791 unsigned char *packet;
5792 u16 rx_start_idx, rx_idx;
5793 dma_addr_t map;
5794 struct bnx2_tx_bd *txbd;
5795 struct bnx2_sw_bd *rx_buf;
5796 struct l2_fhdr *rx_hdr;
5797 int ret = -ENODEV;
5798 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5799 struct bnx2_tx_ring_info *txr;
5800 struct bnx2_rx_ring_info *rxr;
5801
5802 tx_napi = bnapi;
5803
5804 txr = &tx_napi->tx_ring;
5805 rxr = &bnapi->rx_ring;
5806 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5807 bp->loopback = MAC_LOOPBACK;
5808 bnx2_set_mac_loopback(bp);
5809 }
5810 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5811 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5812 return 0;
5813
5814 bp->loopback = PHY_LOOPBACK;
5815 bnx2_set_phy_loopback(bp);
5816 }
5817 else
5818 return -EINVAL;
5819
5820 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5821 skb = netdev_alloc_skb(bp->dev, pkt_size);
5822 if (!skb)
5823 return -ENOMEM;
5824 packet = skb_put(skb, pkt_size);
5825 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5826 memset(packet + ETH_ALEN, 0x0, 8);
5827 for (i = 14; i < pkt_size; i++)
5828 packet[i] = (unsigned char) (i & 0xff);
5829
5830 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5831 DMA_TO_DEVICE);
5832 if (dma_mapping_error(&bp->pdev->dev, map)) {
5833 dev_kfree_skb(skb);
5834 return -EIO;
5835 }
5836
5837 BNX2_WR(bp, BNX2_HC_COMMAND,
5838 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5839
5840 BNX2_RD(bp, BNX2_HC_COMMAND);
5841
5842 udelay(5);
5843 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5844
5845 num_pkts = 0;
5846
5847 txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5848
5849 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5850 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5851 txbd->tx_bd_mss_nbytes = pkt_size;
5852 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5853
5854 num_pkts++;
5855 txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5856 txr->tx_prod_bseq += pkt_size;
5857
5858 BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5859 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5860
5861 udelay(100);
5862
5863 BNX2_WR(bp, BNX2_HC_COMMAND,
5864 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5865
5866 BNX2_RD(bp, BNX2_HC_COMMAND);
5867
5868 udelay(5);
5869
5870 dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
5871 dev_kfree_skb(skb);
5872
5873 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5874 goto loopback_test_done;
5875
5876 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5877 if (rx_idx != rx_start_idx + num_pkts) {
5878 goto loopback_test_done;
5879 }
5880
5881 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5882 data = rx_buf->data;
5883
5884 rx_hdr = get_l2_fhdr(data);
5885 data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5886
5887 dma_sync_single_for_cpu(&bp->pdev->dev,
5888 dma_unmap_addr(rx_buf, mapping),
5889 bp->rx_buf_use_size, DMA_FROM_DEVICE);
5890
5891 if (rx_hdr->l2_fhdr_status &
5892 (L2_FHDR_ERRORS_BAD_CRC |
5893 L2_FHDR_ERRORS_PHY_DECODE |
5894 L2_FHDR_ERRORS_ALIGNMENT |
5895 L2_FHDR_ERRORS_TOO_SHORT |
5896 L2_FHDR_ERRORS_GIANT_FRAME)) {
5897
5898 goto loopback_test_done;
5899 }
5900
5901 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5902 goto loopback_test_done;
5903 }
5904
5905 for (i = 14; i < pkt_size; i++) {
5906 if (*(data + i) != (unsigned char) (i & 0xff)) {
5907 goto loopback_test_done;
5908 }
5909 }
5910
5911 ret = 0;
5912
5913 loopback_test_done:
5914 bp->loopback = 0;
5915 return ret;
5916 }
5917
5918 #define BNX2_MAC_LOOPBACK_FAILED 1
5919 #define BNX2_PHY_LOOPBACK_FAILED 2
5920 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5921 BNX2_PHY_LOOPBACK_FAILED)
5922
5923 static int
bnx2_test_loopback(struct bnx2 * bp)5924 bnx2_test_loopback(struct bnx2 *bp)
5925 {
5926 int rc = 0;
5927
5928 if (!netif_running(bp->dev))
5929 return BNX2_LOOPBACK_FAILED;
5930
5931 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5932 spin_lock_bh(&bp->phy_lock);
5933 bnx2_init_phy(bp, 1);
5934 spin_unlock_bh(&bp->phy_lock);
5935 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5936 rc |= BNX2_MAC_LOOPBACK_FAILED;
5937 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5938 rc |= BNX2_PHY_LOOPBACK_FAILED;
5939 return rc;
5940 }
5941
5942 #define NVRAM_SIZE 0x200
5943 #define CRC32_RESIDUAL 0xdebb20e3
5944
5945 static int
bnx2_test_nvram(struct bnx2 * bp)5946 bnx2_test_nvram(struct bnx2 *bp)
5947 {
5948 __be32 buf[NVRAM_SIZE / 4];
5949 u8 *data = (u8 *) buf;
5950 int rc = 0;
5951 u32 magic, csum;
5952
5953 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5954 goto test_nvram_done;
5955
5956 magic = be32_to_cpu(buf[0]);
5957 if (magic != 0x669955aa) {
5958 rc = -ENODEV;
5959 goto test_nvram_done;
5960 }
5961
5962 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5963 goto test_nvram_done;
5964
5965 csum = ether_crc_le(0x100, data);
5966 if (csum != CRC32_RESIDUAL) {
5967 rc = -ENODEV;
5968 goto test_nvram_done;
5969 }
5970
5971 csum = ether_crc_le(0x100, data + 0x100);
5972 if (csum != CRC32_RESIDUAL) {
5973 rc = -ENODEV;
5974 }
5975
5976 test_nvram_done:
5977 return rc;
5978 }
5979
5980 static int
bnx2_test_link(struct bnx2 * bp)5981 bnx2_test_link(struct bnx2 *bp)
5982 {
5983 u32 bmsr;
5984
5985 if (!netif_running(bp->dev))
5986 return -ENODEV;
5987
5988 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5989 if (bp->link_up)
5990 return 0;
5991 return -ENODEV;
5992 }
5993 spin_lock_bh(&bp->phy_lock);
5994 bnx2_enable_bmsr1(bp);
5995 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5996 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5997 bnx2_disable_bmsr1(bp);
5998 spin_unlock_bh(&bp->phy_lock);
5999
6000 if (bmsr & BMSR_LSTATUS) {
6001 return 0;
6002 }
6003 return -ENODEV;
6004 }
6005
6006 static int
bnx2_test_intr(struct bnx2 * bp)6007 bnx2_test_intr(struct bnx2 *bp)
6008 {
6009 int i;
6010 u16 status_idx;
6011
6012 if (!netif_running(bp->dev))
6013 return -ENODEV;
6014
6015 status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6016
6017 /* This register is not touched during run-time. */
6018 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6019 BNX2_RD(bp, BNX2_HC_COMMAND);
6020
6021 for (i = 0; i < 10; i++) {
6022 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6023 status_idx) {
6024
6025 break;
6026 }
6027
6028 msleep_interruptible(10);
6029 }
6030 if (i < 10)
6031 return 0;
6032
6033 return -ENODEV;
6034 }
6035
6036 /* Determining link for parallel detection. */
6037 static int
bnx2_5706_serdes_has_link(struct bnx2 * bp)6038 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6039 {
6040 u32 mode_ctl, an_dbg, exp;
6041
6042 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6043 return 0;
6044
6045 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6046 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6047
6048 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6049 return 0;
6050
6051 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6052 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6053 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6054
6055 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6056 return 0;
6057
6058 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6059 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6060 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6061
6062 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
6063 return 0;
6064
6065 return 1;
6066 }
6067
6068 static void
bnx2_5706_serdes_timer(struct bnx2 * bp)6069 bnx2_5706_serdes_timer(struct bnx2 *bp)
6070 {
6071 int check_link = 1;
6072
6073 spin_lock(&bp->phy_lock);
6074 if (bp->serdes_an_pending) {
6075 bp->serdes_an_pending--;
6076 check_link = 0;
6077 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6078 u32 bmcr;
6079
6080 bp->current_interval = BNX2_TIMER_INTERVAL;
6081
6082 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6083
6084 if (bmcr & BMCR_ANENABLE) {
6085 if (bnx2_5706_serdes_has_link(bp)) {
6086 bmcr &= ~BMCR_ANENABLE;
6087 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6088 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6089 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6090 }
6091 }
6092 }
6093 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6094 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6095 u32 phy2;
6096
6097 bnx2_write_phy(bp, 0x17, 0x0f01);
6098 bnx2_read_phy(bp, 0x15, &phy2);
6099 if (phy2 & 0x20) {
6100 u32 bmcr;
6101
6102 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6103 bmcr |= BMCR_ANENABLE;
6104 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6105
6106 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6107 }
6108 } else
6109 bp->current_interval = BNX2_TIMER_INTERVAL;
6110
6111 if (check_link) {
6112 u32 val;
6113
6114 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6115 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6116 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6117
6118 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6119 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6120 bnx2_5706s_force_link_dn(bp, 1);
6121 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6122 } else
6123 bnx2_set_link(bp);
6124 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6125 bnx2_set_link(bp);
6126 }
6127 spin_unlock(&bp->phy_lock);
6128 }
6129
6130 static void
bnx2_5708_serdes_timer(struct bnx2 * bp)6131 bnx2_5708_serdes_timer(struct bnx2 *bp)
6132 {
6133 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6134 return;
6135
6136 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6137 bp->serdes_an_pending = 0;
6138 return;
6139 }
6140
6141 spin_lock(&bp->phy_lock);
6142 if (bp->serdes_an_pending)
6143 bp->serdes_an_pending--;
6144 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6145 u32 bmcr;
6146
6147 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6148 if (bmcr & BMCR_ANENABLE) {
6149 bnx2_enable_forced_2g5(bp);
6150 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6151 } else {
6152 bnx2_disable_forced_2g5(bp);
6153 bp->serdes_an_pending = 2;
6154 bp->current_interval = BNX2_TIMER_INTERVAL;
6155 }
6156
6157 } else
6158 bp->current_interval = BNX2_TIMER_INTERVAL;
6159
6160 spin_unlock(&bp->phy_lock);
6161 }
6162
6163 static void
bnx2_timer(struct timer_list * t)6164 bnx2_timer(struct timer_list *t)
6165 {
6166 struct bnx2 *bp = timer_container_of(bp, t, timer);
6167
6168 if (!netif_running(bp->dev))
6169 return;
6170
6171 if (atomic_read(&bp->intr_sem) != 0)
6172 goto bnx2_restart_timer;
6173
6174 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6175 BNX2_FLAG_USING_MSI)
6176 bnx2_chk_missed_msi(bp);
6177
6178 bnx2_send_heart_beat(bp);
6179
6180 bp->stats_blk->stat_FwRxDrop =
6181 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6182
6183 /* workaround occasional corrupted counters */
6184 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6185 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6186 BNX2_HC_COMMAND_STATS_NOW);
6187
6188 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6189 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6190 bnx2_5706_serdes_timer(bp);
6191 else
6192 bnx2_5708_serdes_timer(bp);
6193 }
6194
6195 bnx2_restart_timer:
6196 mod_timer(&bp->timer, jiffies + bp->current_interval);
6197 }
6198
6199 static int
bnx2_request_irq(struct bnx2 * bp)6200 bnx2_request_irq(struct bnx2 *bp)
6201 {
6202 unsigned long flags;
6203 struct bnx2_irq *irq;
6204 int rc = 0, i;
6205
6206 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6207 flags = 0;
6208 else
6209 flags = IRQF_SHARED;
6210
6211 for (i = 0; i < bp->irq_nvecs; i++) {
6212 irq = &bp->irq_tbl[i];
6213 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6214 &bp->bnx2_napi[i]);
6215 if (rc)
6216 break;
6217 irq->requested = 1;
6218 }
6219 return rc;
6220 }
6221
6222 static void
__bnx2_free_irq(struct bnx2 * bp)6223 __bnx2_free_irq(struct bnx2 *bp)
6224 {
6225 struct bnx2_irq *irq;
6226 int i;
6227
6228 for (i = 0; i < bp->irq_nvecs; i++) {
6229 irq = &bp->irq_tbl[i];
6230 if (irq->requested)
6231 free_irq(irq->vector, &bp->bnx2_napi[i]);
6232 irq->requested = 0;
6233 }
6234 }
6235
6236 static void
bnx2_free_irq(struct bnx2 * bp)6237 bnx2_free_irq(struct bnx2 *bp)
6238 {
6239
6240 __bnx2_free_irq(bp);
6241 if (bp->flags & BNX2_FLAG_USING_MSI)
6242 pci_disable_msi(bp->pdev);
6243 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6244 pci_disable_msix(bp->pdev);
6245
6246 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6247 }
6248
6249 static void
bnx2_enable_msix(struct bnx2 * bp,int msix_vecs)6250 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6251 {
6252 int i, total_vecs;
6253 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6254 struct net_device *dev = bp->dev;
6255 const int len = sizeof(bp->irq_tbl[0].name);
6256
6257 bnx2_setup_msix_tbl(bp);
6258 BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6259 BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6260 BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6261
6262 /* Need to flush the previous three writes to ensure MSI-X
6263 * is setup properly */
6264 BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6265
6266 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6267 msix_ent[i].entry = i;
6268 msix_ent[i].vector = 0;
6269 }
6270
6271 total_vecs = msix_vecs;
6272 #ifdef BCM_CNIC
6273 total_vecs++;
6274 #endif
6275 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6276 BNX2_MIN_MSIX_VEC, total_vecs);
6277 if (total_vecs < 0)
6278 return;
6279
6280 msix_vecs = total_vecs;
6281 #ifdef BCM_CNIC
6282 msix_vecs--;
6283 #endif
6284 bp->irq_nvecs = msix_vecs;
6285 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6286 for (i = 0; i < total_vecs; i++) {
6287 bp->irq_tbl[i].vector = msix_ent[i].vector;
6288 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6289 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6290 }
6291 }
6292
6293 static int
bnx2_setup_int_mode(struct bnx2 * bp,int dis_msi)6294 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6295 {
6296 int cpus = netif_get_num_default_rss_queues();
6297 int msix_vecs;
6298
6299 if (!bp->num_req_rx_rings)
6300 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6301 else if (!bp->num_req_tx_rings)
6302 msix_vecs = max(cpus, bp->num_req_rx_rings);
6303 else
6304 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6305
6306 msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6307
6308 bp->irq_tbl[0].handler = bnx2_interrupt;
6309 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6310 bp->irq_nvecs = 1;
6311 bp->irq_tbl[0].vector = bp->pdev->irq;
6312
6313 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6314 bnx2_enable_msix(bp, msix_vecs);
6315
6316 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6317 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6318 if (pci_enable_msi(bp->pdev) == 0) {
6319 bp->flags |= BNX2_FLAG_USING_MSI;
6320 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6321 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6322 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6323 } else
6324 bp->irq_tbl[0].handler = bnx2_msi;
6325
6326 bp->irq_tbl[0].vector = bp->pdev->irq;
6327 }
6328 }
6329
6330 if (!bp->num_req_tx_rings)
6331 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6332 else
6333 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6334
6335 if (!bp->num_req_rx_rings)
6336 bp->num_rx_rings = bp->irq_nvecs;
6337 else
6338 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6339
6340 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6341
6342 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6343 }
6344
6345 /* Called with rtnl_lock */
6346 static int
bnx2_open(struct net_device * dev)6347 bnx2_open(struct net_device *dev)
6348 {
6349 struct bnx2 *bp = netdev_priv(dev);
6350 int rc;
6351
6352 rc = bnx2_request_firmware(bp);
6353 if (rc < 0)
6354 goto out;
6355
6356 netif_carrier_off(dev);
6357
6358 bnx2_disable_int(bp);
6359
6360 rc = bnx2_setup_int_mode(bp, disable_msi);
6361 if (rc)
6362 goto open_err;
6363 bnx2_init_napi(bp);
6364 bnx2_napi_enable(bp);
6365 rc = bnx2_alloc_mem(bp);
6366 if (rc)
6367 goto open_err;
6368
6369 rc = bnx2_request_irq(bp);
6370 if (rc)
6371 goto open_err;
6372
6373 rc = bnx2_init_nic(bp, 1);
6374 if (rc)
6375 goto open_err;
6376
6377 mod_timer(&bp->timer, jiffies + bp->current_interval);
6378
6379 atomic_set(&bp->intr_sem, 0);
6380
6381 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6382
6383 bnx2_enable_int(bp);
6384
6385 if (bp->flags & BNX2_FLAG_USING_MSI) {
6386 /* Test MSI to make sure it is working
6387 * If MSI test fails, go back to INTx mode
6388 */
6389 if (bnx2_test_intr(bp) != 0) {
6390 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6391
6392 bnx2_disable_int(bp);
6393 bnx2_free_irq(bp);
6394
6395 bnx2_setup_int_mode(bp, 1);
6396
6397 rc = bnx2_init_nic(bp, 0);
6398
6399 if (!rc)
6400 rc = bnx2_request_irq(bp);
6401
6402 if (rc) {
6403 timer_delete_sync(&bp->timer);
6404 goto open_err;
6405 }
6406 bnx2_enable_int(bp);
6407 }
6408 }
6409 if (bp->flags & BNX2_FLAG_USING_MSI)
6410 netdev_info(dev, "using MSI\n");
6411 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6412 netdev_info(dev, "using MSIX\n");
6413
6414 netif_tx_start_all_queues(dev);
6415 out:
6416 return rc;
6417
6418 open_err:
6419 bnx2_napi_disable(bp);
6420 bnx2_free_skbs(bp);
6421 bnx2_free_irq(bp);
6422 bnx2_free_mem(bp);
6423 bnx2_del_napi(bp);
6424 bnx2_release_firmware(bp);
6425 goto out;
6426 }
6427
6428 static void
bnx2_reset_task(struct work_struct * work)6429 bnx2_reset_task(struct work_struct *work)
6430 {
6431 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6432 int rc;
6433 u16 pcicmd;
6434
6435 rtnl_lock();
6436 if (!netif_running(bp->dev)) {
6437 rtnl_unlock();
6438 return;
6439 }
6440
6441 bnx2_netif_stop(bp, true);
6442
6443 pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6444 if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6445 /* in case PCI block has reset */
6446 pci_restore_state(bp->pdev);
6447 }
6448 rc = bnx2_init_nic(bp, 1);
6449 if (rc) {
6450 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6451 bnx2_napi_enable(bp);
6452 dev_close(bp->dev);
6453 rtnl_unlock();
6454 return;
6455 }
6456
6457 atomic_set(&bp->intr_sem, 1);
6458 bnx2_netif_start(bp, true);
6459 rtnl_unlock();
6460 }
6461
6462 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6463
6464 static void
bnx2_dump_ftq(struct bnx2 * bp)6465 bnx2_dump_ftq(struct bnx2 *bp)
6466 {
6467 int i;
6468 u32 reg, bdidx, cid, valid;
6469 struct net_device *dev = bp->dev;
6470 static const struct ftq_reg {
6471 char *name;
6472 u32 off;
6473 } ftq_arr[] = {
6474 BNX2_FTQ_ENTRY(RV2P_P),
6475 BNX2_FTQ_ENTRY(RV2P_T),
6476 BNX2_FTQ_ENTRY(RV2P_M),
6477 BNX2_FTQ_ENTRY(TBDR_),
6478 BNX2_FTQ_ENTRY(TDMA_),
6479 BNX2_FTQ_ENTRY(TXP_),
6480 BNX2_FTQ_ENTRY(TXP_),
6481 BNX2_FTQ_ENTRY(TPAT_),
6482 BNX2_FTQ_ENTRY(RXP_C),
6483 BNX2_FTQ_ENTRY(RXP_),
6484 BNX2_FTQ_ENTRY(COM_COMXQ_),
6485 BNX2_FTQ_ENTRY(COM_COMTQ_),
6486 BNX2_FTQ_ENTRY(COM_COMQ_),
6487 BNX2_FTQ_ENTRY(CP_CPQ_),
6488 };
6489
6490 netdev_err(dev, "<--- start FTQ dump --->\n");
6491 for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6492 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6493 bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6494
6495 netdev_err(dev, "CPU states:\n");
6496 for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6497 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6498 reg, bnx2_reg_rd_ind(bp, reg),
6499 bnx2_reg_rd_ind(bp, reg + 4),
6500 bnx2_reg_rd_ind(bp, reg + 8),
6501 bnx2_reg_rd_ind(bp, reg + 0x1c),
6502 bnx2_reg_rd_ind(bp, reg + 0x1c),
6503 bnx2_reg_rd_ind(bp, reg + 0x20));
6504
6505 netdev_err(dev, "<--- end FTQ dump --->\n");
6506 netdev_err(dev, "<--- start TBDC dump --->\n");
6507 netdev_err(dev, "TBDC free cnt: %ld\n",
6508 BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6509 netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
6510 for (i = 0; i < 0x20; i++) {
6511 int j = 0;
6512
6513 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6514 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6515 BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6516 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6517 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6518 BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6519 j++;
6520
6521 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6522 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6523 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6524 netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
6525 i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6526 bdidx >> 24, (valid >> 8) & 0x0ff);
6527 }
6528 netdev_err(dev, "<--- end TBDC dump --->\n");
6529 }
6530
6531 static void
bnx2_dump_state(struct bnx2 * bp)6532 bnx2_dump_state(struct bnx2 *bp)
6533 {
6534 struct net_device *dev = bp->dev;
6535 u32 val1, val2;
6536
6537 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6538 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6539 atomic_read(&bp->intr_sem), val1);
6540 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6541 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6542 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6543 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6544 BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6545 BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6546 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6547 BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6548 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6549 BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6550 if (bp->flags & BNX2_FLAG_USING_MSIX)
6551 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6552 BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6553 }
6554
6555 static void
bnx2_tx_timeout(struct net_device * dev,unsigned int txqueue)6556 bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue)
6557 {
6558 struct bnx2 *bp = netdev_priv(dev);
6559
6560 bnx2_dump_ftq(bp);
6561 bnx2_dump_state(bp);
6562 bnx2_dump_mcp_state(bp);
6563
6564 /* This allows the netif to be shutdown gracefully before resetting */
6565 schedule_work(&bp->reset_task);
6566 }
6567
6568 /* Called with netif_tx_lock.
6569 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6570 * netif_wake_queue().
6571 */
6572 static netdev_tx_t
bnx2_start_xmit(struct sk_buff * skb,struct net_device * dev)6573 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6574 {
6575 struct bnx2 *bp = netdev_priv(dev);
6576 dma_addr_t mapping;
6577 struct bnx2_tx_bd *txbd;
6578 struct bnx2_sw_tx_bd *tx_buf;
6579 u32 len, vlan_tag_flags, last_frag, mss;
6580 u16 prod, ring_prod;
6581 int i;
6582 struct bnx2_napi *bnapi;
6583 struct bnx2_tx_ring_info *txr;
6584 struct netdev_queue *txq;
6585
6586 /* Determine which tx ring we will be placed on */
6587 i = skb_get_queue_mapping(skb);
6588 bnapi = &bp->bnx2_napi[i];
6589 txr = &bnapi->tx_ring;
6590 txq = netdev_get_tx_queue(dev, i);
6591
6592 if (unlikely(bnx2_tx_avail(bp, txr) <
6593 (skb_shinfo(skb)->nr_frags + 1))) {
6594 netif_tx_stop_queue(txq);
6595 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6596
6597 return NETDEV_TX_BUSY;
6598 }
6599 len = skb_headlen(skb);
6600 prod = txr->tx_prod;
6601 ring_prod = BNX2_TX_RING_IDX(prod);
6602
6603 vlan_tag_flags = 0;
6604 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6605 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6606 }
6607
6608 if (skb_vlan_tag_present(skb)) {
6609 vlan_tag_flags |=
6610 (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6611 }
6612
6613 if ((mss = skb_shinfo(skb)->gso_size)) {
6614 u32 tcp_opt_len;
6615 struct iphdr *iph;
6616
6617 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6618
6619 tcp_opt_len = tcp_optlen(skb);
6620
6621 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6622 u32 tcp_off = skb_transport_offset(skb) -
6623 sizeof(struct ipv6hdr) - ETH_HLEN;
6624
6625 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6626 TX_BD_FLAGS_SW_FLAGS;
6627 if (likely(tcp_off == 0))
6628 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6629 else {
6630 tcp_off >>= 3;
6631 vlan_tag_flags |= ((tcp_off & 0x3) <<
6632 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6633 ((tcp_off & 0x10) <<
6634 TX_BD_FLAGS_TCP6_OFF4_SHL);
6635 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6636 }
6637 } else {
6638 iph = ip_hdr(skb);
6639 if (tcp_opt_len || (iph->ihl > 5)) {
6640 vlan_tag_flags |= ((iph->ihl - 5) +
6641 (tcp_opt_len >> 2)) << 8;
6642 }
6643 }
6644 } else
6645 mss = 0;
6646
6647 mapping = dma_map_single(&bp->pdev->dev, skb->data, len,
6648 DMA_TO_DEVICE);
6649 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6650 dev_kfree_skb_any(skb);
6651 return NETDEV_TX_OK;
6652 }
6653
6654 tx_buf = &txr->tx_buf_ring[ring_prod];
6655 tx_buf->skb = skb;
6656 dma_unmap_addr_set(tx_buf, mapping, mapping);
6657
6658 txbd = &txr->tx_desc_ring[ring_prod];
6659
6660 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6661 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6662 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6663 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6664
6665 last_frag = skb_shinfo(skb)->nr_frags;
6666 tx_buf->nr_frags = last_frag;
6667 tx_buf->is_gso = skb_is_gso(skb);
6668
6669 for (i = 0; i < last_frag; i++) {
6670 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6671
6672 prod = BNX2_NEXT_TX_BD(prod);
6673 ring_prod = BNX2_TX_RING_IDX(prod);
6674 txbd = &txr->tx_desc_ring[ring_prod];
6675
6676 len = skb_frag_size(frag);
6677 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6678 DMA_TO_DEVICE);
6679 if (dma_mapping_error(&bp->pdev->dev, mapping))
6680 goto dma_error;
6681 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6682 mapping);
6683
6684 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6685 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6686 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6687 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6688
6689 }
6690 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6691
6692 /* Sync BD data before updating TX mailbox */
6693 wmb();
6694
6695 netdev_tx_sent_queue(txq, skb->len);
6696
6697 prod = BNX2_NEXT_TX_BD(prod);
6698 txr->tx_prod_bseq += skb->len;
6699
6700 BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6701 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6702
6703 txr->tx_prod = prod;
6704
6705 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6706 netif_tx_stop_queue(txq);
6707
6708 /* netif_tx_stop_queue() must be done before checking
6709 * tx index in bnx2_tx_avail() below, because in
6710 * bnx2_tx_int(), we update tx index before checking for
6711 * netif_tx_queue_stopped().
6712 */
6713 smp_mb();
6714 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6715 netif_tx_wake_queue(txq);
6716 }
6717
6718 return NETDEV_TX_OK;
6719 dma_error:
6720 /* save value of frag that failed */
6721 last_frag = i;
6722
6723 /* start back at beginning and unmap skb */
6724 prod = txr->tx_prod;
6725 ring_prod = BNX2_TX_RING_IDX(prod);
6726 tx_buf = &txr->tx_buf_ring[ring_prod];
6727 tx_buf->skb = NULL;
6728 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6729 skb_headlen(skb), DMA_TO_DEVICE);
6730
6731 /* unmap remaining mapped pages */
6732 for (i = 0; i < last_frag; i++) {
6733 prod = BNX2_NEXT_TX_BD(prod);
6734 ring_prod = BNX2_TX_RING_IDX(prod);
6735 tx_buf = &txr->tx_buf_ring[ring_prod];
6736 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6737 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6738 DMA_TO_DEVICE);
6739 }
6740
6741 dev_kfree_skb_any(skb);
6742 return NETDEV_TX_OK;
6743 }
6744
6745 /* Called with rtnl_lock */
6746 static int
bnx2_close(struct net_device * dev)6747 bnx2_close(struct net_device *dev)
6748 {
6749 struct bnx2 *bp = netdev_priv(dev);
6750
6751 bnx2_disable_int_sync(bp);
6752 bnx2_napi_disable(bp);
6753 netif_tx_disable(dev);
6754 timer_delete_sync(&bp->timer);
6755 bnx2_shutdown_chip(bp);
6756 bnx2_free_irq(bp);
6757 bnx2_free_skbs(bp);
6758 bnx2_free_mem(bp);
6759 bnx2_del_napi(bp);
6760 bp->link_up = 0;
6761 netif_carrier_off(bp->dev);
6762 return 0;
6763 }
6764
6765 static void
bnx2_save_stats(struct bnx2 * bp)6766 bnx2_save_stats(struct bnx2 *bp)
6767 {
6768 u32 *hw_stats = (u32 *) bp->stats_blk;
6769 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6770 int i;
6771
6772 /* The 1st 10 counters are 64-bit counters */
6773 for (i = 0; i < 20; i += 2) {
6774 u32 hi;
6775 u64 lo;
6776
6777 hi = temp_stats[i] + hw_stats[i];
6778 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6779 if (lo > 0xffffffff)
6780 hi++;
6781 temp_stats[i] = hi;
6782 temp_stats[i + 1] = lo & 0xffffffff;
6783 }
6784
6785 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6786 temp_stats[i] += hw_stats[i];
6787 }
6788
6789 #define GET_64BIT_NET_STATS64(ctr) \
6790 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6791
6792 #define GET_64BIT_NET_STATS(ctr) \
6793 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6794 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6795
6796 #define GET_32BIT_NET_STATS(ctr) \
6797 (unsigned long) (bp->stats_blk->ctr + \
6798 bp->temp_stats_blk->ctr)
6799
6800 static void
bnx2_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * net_stats)6801 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6802 {
6803 struct bnx2 *bp = netdev_priv(dev);
6804
6805 if (!bp->stats_blk)
6806 return;
6807
6808 net_stats->rx_packets =
6809 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6810 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6811 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6812
6813 net_stats->tx_packets =
6814 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6815 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6816 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6817
6818 net_stats->rx_bytes =
6819 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6820
6821 net_stats->tx_bytes =
6822 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6823
6824 net_stats->multicast =
6825 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6826
6827 net_stats->collisions =
6828 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6829
6830 net_stats->rx_length_errors =
6831 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6832 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6833
6834 net_stats->rx_over_errors =
6835 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6836 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6837
6838 net_stats->rx_frame_errors =
6839 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6840
6841 net_stats->rx_crc_errors =
6842 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6843
6844 net_stats->rx_errors = net_stats->rx_length_errors +
6845 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6846 net_stats->rx_crc_errors;
6847
6848 net_stats->tx_aborted_errors =
6849 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6850 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6851
6852 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6853 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6854 net_stats->tx_carrier_errors = 0;
6855 else {
6856 net_stats->tx_carrier_errors =
6857 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6858 }
6859
6860 net_stats->tx_errors =
6861 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6862 net_stats->tx_aborted_errors +
6863 net_stats->tx_carrier_errors;
6864
6865 net_stats->rx_missed_errors =
6866 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6867 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6868 GET_32BIT_NET_STATS(stat_FwRxDrop);
6869
6870 }
6871
6872 /* All ethtool functions called with rtnl_lock */
6873
6874 static int
bnx2_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)6875 bnx2_get_link_ksettings(struct net_device *dev,
6876 struct ethtool_link_ksettings *cmd)
6877 {
6878 struct bnx2 *bp = netdev_priv(dev);
6879 int support_serdes = 0, support_copper = 0;
6880 u32 supported, advertising;
6881
6882 supported = SUPPORTED_Autoneg;
6883 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6884 support_serdes = 1;
6885 support_copper = 1;
6886 } else if (bp->phy_port == PORT_FIBRE)
6887 support_serdes = 1;
6888 else
6889 support_copper = 1;
6890
6891 if (support_serdes) {
6892 supported |= SUPPORTED_1000baseT_Full |
6893 SUPPORTED_FIBRE;
6894 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6895 supported |= SUPPORTED_2500baseX_Full;
6896 }
6897 if (support_copper) {
6898 supported |= SUPPORTED_10baseT_Half |
6899 SUPPORTED_10baseT_Full |
6900 SUPPORTED_100baseT_Half |
6901 SUPPORTED_100baseT_Full |
6902 SUPPORTED_1000baseT_Full |
6903 SUPPORTED_TP;
6904 }
6905
6906 spin_lock_bh(&bp->phy_lock);
6907 cmd->base.port = bp->phy_port;
6908 advertising = bp->advertising;
6909
6910 if (bp->autoneg & AUTONEG_SPEED) {
6911 cmd->base.autoneg = AUTONEG_ENABLE;
6912 } else {
6913 cmd->base.autoneg = AUTONEG_DISABLE;
6914 }
6915
6916 if (netif_carrier_ok(dev)) {
6917 cmd->base.speed = bp->line_speed;
6918 cmd->base.duplex = bp->duplex;
6919 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6920 if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6921 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6922 else
6923 cmd->base.eth_tp_mdix = ETH_TP_MDI;
6924 }
6925 }
6926 else {
6927 cmd->base.speed = SPEED_UNKNOWN;
6928 cmd->base.duplex = DUPLEX_UNKNOWN;
6929 }
6930 spin_unlock_bh(&bp->phy_lock);
6931
6932 cmd->base.phy_address = bp->phy_addr;
6933
6934 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6935 supported);
6936 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6937 advertising);
6938
6939 return 0;
6940 }
6941
6942 static int
bnx2_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)6943 bnx2_set_link_ksettings(struct net_device *dev,
6944 const struct ethtool_link_ksettings *cmd)
6945 {
6946 struct bnx2 *bp = netdev_priv(dev);
6947 u8 autoneg = bp->autoneg;
6948 u8 req_duplex = bp->req_duplex;
6949 u16 req_line_speed = bp->req_line_speed;
6950 u32 advertising = bp->advertising;
6951 int err = -EINVAL;
6952
6953 spin_lock_bh(&bp->phy_lock);
6954
6955 if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6956 goto err_out_unlock;
6957
6958 if (cmd->base.port != bp->phy_port &&
6959 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6960 goto err_out_unlock;
6961
6962 /* If device is down, we can store the settings only if the user
6963 * is setting the currently active port.
6964 */
6965 if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6966 goto err_out_unlock;
6967
6968 if (cmd->base.autoneg == AUTONEG_ENABLE) {
6969 autoneg |= AUTONEG_SPEED;
6970
6971 ethtool_convert_link_mode_to_legacy_u32(
6972 &advertising, cmd->link_modes.advertising);
6973
6974 if (cmd->base.port == PORT_TP) {
6975 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6976 if (!advertising)
6977 advertising = ETHTOOL_ALL_COPPER_SPEED;
6978 } else {
6979 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6980 if (!advertising)
6981 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6982 }
6983 advertising |= ADVERTISED_Autoneg;
6984 }
6985 else {
6986 u32 speed = cmd->base.speed;
6987
6988 if (cmd->base.port == PORT_FIBRE) {
6989 if ((speed != SPEED_1000 &&
6990 speed != SPEED_2500) ||
6991 (cmd->base.duplex != DUPLEX_FULL))
6992 goto err_out_unlock;
6993
6994 if (speed == SPEED_2500 &&
6995 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6996 goto err_out_unlock;
6997 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6998 goto err_out_unlock;
6999
7000 autoneg &= ~AUTONEG_SPEED;
7001 req_line_speed = speed;
7002 req_duplex = cmd->base.duplex;
7003 advertising = 0;
7004 }
7005
7006 bp->autoneg = autoneg;
7007 bp->advertising = advertising;
7008 bp->req_line_speed = req_line_speed;
7009 bp->req_duplex = req_duplex;
7010
7011 err = 0;
7012 /* If device is down, the new settings will be picked up when it is
7013 * brought up.
7014 */
7015 if (netif_running(dev))
7016 err = bnx2_setup_phy(bp, cmd->base.port);
7017
7018 err_out_unlock:
7019 spin_unlock_bh(&bp->phy_lock);
7020
7021 return err;
7022 }
7023
7024 static void
bnx2_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)7025 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7026 {
7027 struct bnx2 *bp = netdev_priv(dev);
7028
7029 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7030 strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7031 strscpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7032 }
7033
7034 #define BNX2_REGDUMP_LEN (32 * 1024)
7035
7036 static int
bnx2_get_regs_len(struct net_device * dev)7037 bnx2_get_regs_len(struct net_device *dev)
7038 {
7039 return BNX2_REGDUMP_LEN;
7040 }
7041
7042 static void
bnx2_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)7043 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7044 {
7045 u32 *p = _p, i, offset;
7046 u8 *orig_p = _p;
7047 struct bnx2 *bp = netdev_priv(dev);
7048 static const u32 reg_boundaries[] = {
7049 0x0000, 0x0098, 0x0400, 0x045c,
7050 0x0800, 0x0880, 0x0c00, 0x0c10,
7051 0x0c30, 0x0d08, 0x1000, 0x101c,
7052 0x1040, 0x1048, 0x1080, 0x10a4,
7053 0x1400, 0x1490, 0x1498, 0x14f0,
7054 0x1500, 0x155c, 0x1580, 0x15dc,
7055 0x1600, 0x1658, 0x1680, 0x16d8,
7056 0x1800, 0x1820, 0x1840, 0x1854,
7057 0x1880, 0x1894, 0x1900, 0x1984,
7058 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7059 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7060 0x2000, 0x2030, 0x23c0, 0x2400,
7061 0x2800, 0x2820, 0x2830, 0x2850,
7062 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7063 0x3c00, 0x3c94, 0x4000, 0x4010,
7064 0x4080, 0x4090, 0x43c0, 0x4458,
7065 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7066 0x4fc0, 0x5010, 0x53c0, 0x5444,
7067 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7068 0x5fc0, 0x6000, 0x6400, 0x6428,
7069 0x6800, 0x6848, 0x684c, 0x6860,
7070 0x6888, 0x6910, 0x8000
7071 };
7072
7073 regs->version = 0;
7074
7075 memset(p, 0, BNX2_REGDUMP_LEN);
7076
7077 if (!netif_running(bp->dev))
7078 return;
7079
7080 i = 0;
7081 offset = reg_boundaries[0];
7082 p += offset;
7083 while (offset < BNX2_REGDUMP_LEN) {
7084 *p++ = BNX2_RD(bp, offset);
7085 offset += 4;
7086 if (offset == reg_boundaries[i + 1]) {
7087 offset = reg_boundaries[i + 2];
7088 p = (u32 *) (orig_p + offset);
7089 i += 2;
7090 }
7091 }
7092 }
7093
7094 static void
bnx2_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)7095 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7096 {
7097 struct bnx2 *bp = netdev_priv(dev);
7098
7099 if (bp->flags & BNX2_FLAG_NO_WOL) {
7100 wol->supported = 0;
7101 wol->wolopts = 0;
7102 }
7103 else {
7104 wol->supported = WAKE_MAGIC;
7105 if (bp->wol)
7106 wol->wolopts = WAKE_MAGIC;
7107 else
7108 wol->wolopts = 0;
7109 }
7110 memset(&wol->sopass, 0, sizeof(wol->sopass));
7111 }
7112
7113 static int
bnx2_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)7114 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7115 {
7116 struct bnx2 *bp = netdev_priv(dev);
7117
7118 if (wol->wolopts & ~WAKE_MAGIC)
7119 return -EINVAL;
7120
7121 if (wol->wolopts & WAKE_MAGIC) {
7122 if (bp->flags & BNX2_FLAG_NO_WOL)
7123 return -EINVAL;
7124
7125 bp->wol = 1;
7126 }
7127 else {
7128 bp->wol = 0;
7129 }
7130
7131 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7132
7133 return 0;
7134 }
7135
7136 static int
bnx2_nway_reset(struct net_device * dev)7137 bnx2_nway_reset(struct net_device *dev)
7138 {
7139 struct bnx2 *bp = netdev_priv(dev);
7140 u32 bmcr;
7141
7142 if (!netif_running(dev))
7143 return -EAGAIN;
7144
7145 if (!(bp->autoneg & AUTONEG_SPEED)) {
7146 return -EINVAL;
7147 }
7148
7149 spin_lock_bh(&bp->phy_lock);
7150
7151 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7152 int rc;
7153
7154 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7155 spin_unlock_bh(&bp->phy_lock);
7156 return rc;
7157 }
7158
7159 /* Force a link down visible on the other side */
7160 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7161 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7162 spin_unlock_bh(&bp->phy_lock);
7163
7164 msleep(20);
7165
7166 spin_lock_bh(&bp->phy_lock);
7167
7168 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7169 bp->serdes_an_pending = 1;
7170 mod_timer(&bp->timer, jiffies + bp->current_interval);
7171 }
7172
7173 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7174 bmcr &= ~BMCR_LOOPBACK;
7175 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7176
7177 spin_unlock_bh(&bp->phy_lock);
7178
7179 return 0;
7180 }
7181
7182 static u32
bnx2_get_link(struct net_device * dev)7183 bnx2_get_link(struct net_device *dev)
7184 {
7185 struct bnx2 *bp = netdev_priv(dev);
7186
7187 return bp->link_up;
7188 }
7189
7190 static int
bnx2_get_eeprom_len(struct net_device * dev)7191 bnx2_get_eeprom_len(struct net_device *dev)
7192 {
7193 struct bnx2 *bp = netdev_priv(dev);
7194
7195 if (!bp->flash_info)
7196 return 0;
7197
7198 return (int) bp->flash_size;
7199 }
7200
7201 static int
bnx2_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * eebuf)7202 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7203 u8 *eebuf)
7204 {
7205 struct bnx2 *bp = netdev_priv(dev);
7206 int rc;
7207
7208 /* parameters already validated in ethtool_get_eeprom */
7209
7210 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7211
7212 return rc;
7213 }
7214
7215 static int
bnx2_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * eebuf)7216 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7217 u8 *eebuf)
7218 {
7219 struct bnx2 *bp = netdev_priv(dev);
7220 int rc;
7221
7222 /* parameters already validated in ethtool_set_eeprom */
7223
7224 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7225
7226 return rc;
7227 }
7228
bnx2_get_coalesce(struct net_device * dev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)7229 static int bnx2_get_coalesce(struct net_device *dev,
7230 struct ethtool_coalesce *coal,
7231 struct kernel_ethtool_coalesce *kernel_coal,
7232 struct netlink_ext_ack *extack)
7233 {
7234 struct bnx2 *bp = netdev_priv(dev);
7235
7236 memset(coal, 0, sizeof(struct ethtool_coalesce));
7237
7238 coal->rx_coalesce_usecs = bp->rx_ticks;
7239 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7240 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7241 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7242
7243 coal->tx_coalesce_usecs = bp->tx_ticks;
7244 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7245 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7246 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7247
7248 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7249
7250 return 0;
7251 }
7252
bnx2_set_coalesce(struct net_device * dev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)7253 static int bnx2_set_coalesce(struct net_device *dev,
7254 struct ethtool_coalesce *coal,
7255 struct kernel_ethtool_coalesce *kernel_coal,
7256 struct netlink_ext_ack *extack)
7257 {
7258 struct bnx2 *bp = netdev_priv(dev);
7259
7260 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7261 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7262
7263 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7264 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7265
7266 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7267 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7268
7269 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7270 if (bp->rx_quick_cons_trip_int > 0xff)
7271 bp->rx_quick_cons_trip_int = 0xff;
7272
7273 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7274 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7275
7276 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7277 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7278
7279 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7280 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7281
7282 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7283 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7284 0xff;
7285
7286 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7287 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7288 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7289 bp->stats_ticks = USEC_PER_SEC;
7290 }
7291 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7292 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7293 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7294
7295 if (netif_running(bp->dev)) {
7296 bnx2_netif_stop(bp, true);
7297 bnx2_init_nic(bp, 0);
7298 bnx2_netif_start(bp, true);
7299 }
7300
7301 return 0;
7302 }
7303
7304 static void
bnx2_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)7305 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
7306 struct kernel_ethtool_ringparam *kernel_ering,
7307 struct netlink_ext_ack *extack)
7308 {
7309 struct bnx2 *bp = netdev_priv(dev);
7310
7311 ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7312 ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7313
7314 ering->rx_pending = bp->rx_ring_size;
7315 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7316
7317 ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7318 ering->tx_pending = bp->tx_ring_size;
7319 }
7320
7321 static int
bnx2_change_ring_size(struct bnx2 * bp,u32 rx,u32 tx,bool reset_irq)7322 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7323 {
7324 if (netif_running(bp->dev)) {
7325 /* Reset will erase chipset stats; save them */
7326 bnx2_save_stats(bp);
7327
7328 bnx2_netif_stop(bp, true);
7329 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7330 if (reset_irq) {
7331 bnx2_free_irq(bp);
7332 bnx2_del_napi(bp);
7333 } else {
7334 __bnx2_free_irq(bp);
7335 }
7336 bnx2_free_skbs(bp);
7337 bnx2_free_mem(bp);
7338 }
7339
7340 bnx2_set_rx_ring_size(bp, rx);
7341 bp->tx_ring_size = tx;
7342
7343 if (netif_running(bp->dev)) {
7344 int rc = 0;
7345
7346 if (reset_irq) {
7347 rc = bnx2_setup_int_mode(bp, disable_msi);
7348 bnx2_init_napi(bp);
7349 }
7350
7351 if (!rc)
7352 rc = bnx2_alloc_mem(bp);
7353
7354 if (!rc)
7355 rc = bnx2_request_irq(bp);
7356
7357 if (!rc)
7358 rc = bnx2_init_nic(bp, 0);
7359
7360 if (rc) {
7361 bnx2_napi_enable(bp);
7362 dev_close(bp->dev);
7363 return rc;
7364 }
7365 #ifdef BCM_CNIC
7366 mutex_lock(&bp->cnic_lock);
7367 /* Let cnic know about the new status block. */
7368 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7369 bnx2_setup_cnic_irq_info(bp);
7370 mutex_unlock(&bp->cnic_lock);
7371 #endif
7372 bnx2_netif_start(bp, true);
7373 }
7374 return 0;
7375 }
7376
7377 static int
bnx2_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)7378 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
7379 struct kernel_ethtool_ringparam *kernel_ering,
7380 struct netlink_ext_ack *extack)
7381 {
7382 struct bnx2 *bp = netdev_priv(dev);
7383 int rc;
7384
7385 if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7386 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7387 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7388
7389 return -EINVAL;
7390 }
7391 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7392 false);
7393 return rc;
7394 }
7395
7396 static void
bnx2_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)7397 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7398 {
7399 struct bnx2 *bp = netdev_priv(dev);
7400
7401 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7402 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7403 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7404 }
7405
7406 static int
bnx2_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)7407 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7408 {
7409 struct bnx2 *bp = netdev_priv(dev);
7410
7411 bp->req_flow_ctrl = 0;
7412 if (epause->rx_pause)
7413 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7414 if (epause->tx_pause)
7415 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7416
7417 if (epause->autoneg) {
7418 bp->autoneg |= AUTONEG_FLOW_CTRL;
7419 }
7420 else {
7421 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7422 }
7423
7424 if (netif_running(dev)) {
7425 spin_lock_bh(&bp->phy_lock);
7426 bnx2_setup_phy(bp, bp->phy_port);
7427 spin_unlock_bh(&bp->phy_lock);
7428 }
7429
7430 return 0;
7431 }
7432
7433 static struct {
7434 char string[ETH_GSTRING_LEN];
7435 } bnx2_stats_str_arr[] = {
7436 { "rx_bytes" },
7437 { "rx_error_bytes" },
7438 { "tx_bytes" },
7439 { "tx_error_bytes" },
7440 { "rx_ucast_packets" },
7441 { "rx_mcast_packets" },
7442 { "rx_bcast_packets" },
7443 { "tx_ucast_packets" },
7444 { "tx_mcast_packets" },
7445 { "tx_bcast_packets" },
7446 { "tx_mac_errors" },
7447 { "tx_carrier_errors" },
7448 { "rx_crc_errors" },
7449 { "rx_align_errors" },
7450 { "tx_single_collisions" },
7451 { "tx_multi_collisions" },
7452 { "tx_deferred" },
7453 { "tx_excess_collisions" },
7454 { "tx_late_collisions" },
7455 { "tx_total_collisions" },
7456 { "rx_fragments" },
7457 { "rx_jabbers" },
7458 { "rx_undersize_packets" },
7459 { "rx_oversize_packets" },
7460 { "rx_64_byte_packets" },
7461 { "rx_65_to_127_byte_packets" },
7462 { "rx_128_to_255_byte_packets" },
7463 { "rx_256_to_511_byte_packets" },
7464 { "rx_512_to_1023_byte_packets" },
7465 { "rx_1024_to_1522_byte_packets" },
7466 { "rx_1523_to_9022_byte_packets" },
7467 { "tx_64_byte_packets" },
7468 { "tx_65_to_127_byte_packets" },
7469 { "tx_128_to_255_byte_packets" },
7470 { "tx_256_to_511_byte_packets" },
7471 { "tx_512_to_1023_byte_packets" },
7472 { "tx_1024_to_1522_byte_packets" },
7473 { "tx_1523_to_9022_byte_packets" },
7474 { "rx_xon_frames" },
7475 { "rx_xoff_frames" },
7476 { "tx_xon_frames" },
7477 { "tx_xoff_frames" },
7478 { "rx_mac_ctrl_frames" },
7479 { "rx_filtered_packets" },
7480 { "rx_ftq_discards" },
7481 { "rx_discards" },
7482 { "rx_fw_discards" },
7483 };
7484
7485 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7486
7487 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7488
7489 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7490 STATS_OFFSET32(stat_IfHCInOctets_hi),
7491 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7492 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7493 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7494 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7495 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7496 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7497 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7498 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7499 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7500 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7501 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7502 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7503 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7504 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7505 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7506 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7507 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7508 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7509 STATS_OFFSET32(stat_EtherStatsCollisions),
7510 STATS_OFFSET32(stat_EtherStatsFragments),
7511 STATS_OFFSET32(stat_EtherStatsJabbers),
7512 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7513 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7514 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7515 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7516 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7517 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7518 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7519 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7520 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7521 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7522 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7523 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7524 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7525 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7526 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7527 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7528 STATS_OFFSET32(stat_XonPauseFramesReceived),
7529 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7530 STATS_OFFSET32(stat_OutXonSent),
7531 STATS_OFFSET32(stat_OutXoffSent),
7532 STATS_OFFSET32(stat_MacControlFramesReceived),
7533 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7534 STATS_OFFSET32(stat_IfInFTQDiscards),
7535 STATS_OFFSET32(stat_IfInMBUFDiscards),
7536 STATS_OFFSET32(stat_FwRxDrop),
7537 };
7538
7539 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7540 * skipped because of errata.
7541 */
7542 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7543 8,0,8,8,8,8,8,8,8,8,
7544 4,0,4,4,4,4,4,4,4,4,
7545 4,4,4,4,4,4,4,4,4,4,
7546 4,4,4,4,4,4,4,4,4,4,
7547 4,4,4,4,4,4,4,
7548 };
7549
7550 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7551 8,0,8,8,8,8,8,8,8,8,
7552 4,4,4,4,4,4,4,4,4,4,
7553 4,4,4,4,4,4,4,4,4,4,
7554 4,4,4,4,4,4,4,4,4,4,
7555 4,4,4,4,4,4,4,
7556 };
7557
7558 #define BNX2_NUM_TESTS 6
7559
7560 static struct {
7561 char string[ETH_GSTRING_LEN];
7562 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7563 { "register_test (offline)" },
7564 { "memory_test (offline)" },
7565 { "loopback_test (offline)" },
7566 { "nvram_test (online)" },
7567 { "interrupt_test (online)" },
7568 { "link_test (online)" },
7569 };
7570
7571 static int
bnx2_get_sset_count(struct net_device * dev,int sset)7572 bnx2_get_sset_count(struct net_device *dev, int sset)
7573 {
7574 switch (sset) {
7575 case ETH_SS_TEST:
7576 return BNX2_NUM_TESTS;
7577 case ETH_SS_STATS:
7578 return BNX2_NUM_STATS;
7579 default:
7580 return -EOPNOTSUPP;
7581 }
7582 }
7583
7584 static void
bnx2_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * buf)7585 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7586 {
7587 struct bnx2 *bp = netdev_priv(dev);
7588
7589 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7590 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7591 int i;
7592
7593 bnx2_netif_stop(bp, true);
7594 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7595 bnx2_free_skbs(bp);
7596
7597 if (bnx2_test_registers(bp) != 0) {
7598 buf[0] = 1;
7599 etest->flags |= ETH_TEST_FL_FAILED;
7600 }
7601 if (bnx2_test_memory(bp) != 0) {
7602 buf[1] = 1;
7603 etest->flags |= ETH_TEST_FL_FAILED;
7604 }
7605 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7606 etest->flags |= ETH_TEST_FL_FAILED;
7607
7608 if (!netif_running(bp->dev))
7609 bnx2_shutdown_chip(bp);
7610 else {
7611 bnx2_init_nic(bp, 1);
7612 bnx2_netif_start(bp, true);
7613 }
7614
7615 /* wait for link up */
7616 for (i = 0; i < 7; i++) {
7617 if (bp->link_up)
7618 break;
7619 msleep_interruptible(1000);
7620 }
7621 }
7622
7623 if (bnx2_test_nvram(bp) != 0) {
7624 buf[3] = 1;
7625 etest->flags |= ETH_TEST_FL_FAILED;
7626 }
7627 if (bnx2_test_intr(bp) != 0) {
7628 buf[4] = 1;
7629 etest->flags |= ETH_TEST_FL_FAILED;
7630 }
7631
7632 if (bnx2_test_link(bp) != 0) {
7633 buf[5] = 1;
7634 etest->flags |= ETH_TEST_FL_FAILED;
7635
7636 }
7637 }
7638
7639 static void
bnx2_get_strings(struct net_device * dev,u32 stringset,u8 * buf)7640 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7641 {
7642 switch (stringset) {
7643 case ETH_SS_STATS:
7644 memcpy(buf, bnx2_stats_str_arr,
7645 sizeof(bnx2_stats_str_arr));
7646 break;
7647 case ETH_SS_TEST:
7648 memcpy(buf, bnx2_tests_str_arr,
7649 sizeof(bnx2_tests_str_arr));
7650 break;
7651 }
7652 }
7653
7654 static void
bnx2_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * buf)7655 bnx2_get_ethtool_stats(struct net_device *dev,
7656 struct ethtool_stats *stats, u64 *buf)
7657 {
7658 struct bnx2 *bp = netdev_priv(dev);
7659 int i;
7660 u32 *hw_stats = (u32 *) bp->stats_blk;
7661 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7662 u8 *stats_len_arr = NULL;
7663
7664 if (!hw_stats) {
7665 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7666 return;
7667 }
7668
7669 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7670 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7671 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7672 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7673 stats_len_arr = bnx2_5706_stats_len_arr;
7674 else
7675 stats_len_arr = bnx2_5708_stats_len_arr;
7676
7677 for (i = 0; i < BNX2_NUM_STATS; i++) {
7678 unsigned long offset;
7679
7680 if (stats_len_arr[i] == 0) {
7681 /* skip this counter */
7682 buf[i] = 0;
7683 continue;
7684 }
7685
7686 offset = bnx2_stats_offset_arr[i];
7687 if (stats_len_arr[i] == 4) {
7688 /* 4-byte counter */
7689 buf[i] = (u64) *(hw_stats + offset) +
7690 *(temp_stats + offset);
7691 continue;
7692 }
7693 /* 8-byte counter */
7694 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7695 *(hw_stats + offset + 1) +
7696 (((u64) *(temp_stats + offset)) << 32) +
7697 *(temp_stats + offset + 1);
7698 }
7699 }
7700
7701 static int
bnx2_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)7702 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7703 {
7704 struct bnx2 *bp = netdev_priv(dev);
7705
7706 switch (state) {
7707 case ETHTOOL_ID_ACTIVE:
7708 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7709 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7710 return 1; /* cycle on/off once per second */
7711
7712 case ETHTOOL_ID_ON:
7713 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7714 BNX2_EMAC_LED_1000MB_OVERRIDE |
7715 BNX2_EMAC_LED_100MB_OVERRIDE |
7716 BNX2_EMAC_LED_10MB_OVERRIDE |
7717 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7718 BNX2_EMAC_LED_TRAFFIC);
7719 break;
7720
7721 case ETHTOOL_ID_OFF:
7722 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7723 break;
7724
7725 case ETHTOOL_ID_INACTIVE:
7726 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7727 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7728 break;
7729 }
7730
7731 return 0;
7732 }
7733
7734 static int
bnx2_set_features(struct net_device * dev,netdev_features_t features)7735 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7736 {
7737 struct bnx2 *bp = netdev_priv(dev);
7738
7739 /* TSO with VLAN tag won't work with current firmware */
7740 if (features & NETIF_F_HW_VLAN_CTAG_TX)
7741 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7742 else
7743 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7744
7745 if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7746 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7747 netif_running(dev)) {
7748 bnx2_netif_stop(bp, false);
7749 dev->features = features;
7750 bnx2_set_rx_mode(dev);
7751 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7752 bnx2_netif_start(bp, false);
7753 return 1;
7754 }
7755
7756 return 0;
7757 }
7758
bnx2_get_channels(struct net_device * dev,struct ethtool_channels * channels)7759 static void bnx2_get_channels(struct net_device *dev,
7760 struct ethtool_channels *channels)
7761 {
7762 struct bnx2 *bp = netdev_priv(dev);
7763 u32 max_rx_rings = 1;
7764 u32 max_tx_rings = 1;
7765
7766 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7767 max_rx_rings = RX_MAX_RINGS;
7768 max_tx_rings = TX_MAX_RINGS;
7769 }
7770
7771 channels->max_rx = max_rx_rings;
7772 channels->max_tx = max_tx_rings;
7773 channels->max_other = 0;
7774 channels->max_combined = 0;
7775 channels->rx_count = bp->num_rx_rings;
7776 channels->tx_count = bp->num_tx_rings;
7777 channels->other_count = 0;
7778 channels->combined_count = 0;
7779 }
7780
bnx2_set_channels(struct net_device * dev,struct ethtool_channels * channels)7781 static int bnx2_set_channels(struct net_device *dev,
7782 struct ethtool_channels *channels)
7783 {
7784 struct bnx2 *bp = netdev_priv(dev);
7785 u32 max_rx_rings = 1;
7786 u32 max_tx_rings = 1;
7787 int rc = 0;
7788
7789 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7790 max_rx_rings = RX_MAX_RINGS;
7791 max_tx_rings = TX_MAX_RINGS;
7792 }
7793 if (channels->rx_count > max_rx_rings ||
7794 channels->tx_count > max_tx_rings)
7795 return -EINVAL;
7796
7797 bp->num_req_rx_rings = channels->rx_count;
7798 bp->num_req_tx_rings = channels->tx_count;
7799
7800 if (netif_running(dev))
7801 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7802 bp->tx_ring_size, true);
7803
7804 return rc;
7805 }
7806
7807 static const struct ethtool_ops bnx2_ethtool_ops = {
7808 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
7809 ETHTOOL_COALESCE_MAX_FRAMES |
7810 ETHTOOL_COALESCE_USECS_IRQ |
7811 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
7812 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
7813 .get_drvinfo = bnx2_get_drvinfo,
7814 .get_regs_len = bnx2_get_regs_len,
7815 .get_regs = bnx2_get_regs,
7816 .get_wol = bnx2_get_wol,
7817 .set_wol = bnx2_set_wol,
7818 .nway_reset = bnx2_nway_reset,
7819 .get_link = bnx2_get_link,
7820 .get_eeprom_len = bnx2_get_eeprom_len,
7821 .get_eeprom = bnx2_get_eeprom,
7822 .set_eeprom = bnx2_set_eeprom,
7823 .get_coalesce = bnx2_get_coalesce,
7824 .set_coalesce = bnx2_set_coalesce,
7825 .get_ringparam = bnx2_get_ringparam,
7826 .set_ringparam = bnx2_set_ringparam,
7827 .get_pauseparam = bnx2_get_pauseparam,
7828 .set_pauseparam = bnx2_set_pauseparam,
7829 .self_test = bnx2_self_test,
7830 .get_strings = bnx2_get_strings,
7831 .set_phys_id = bnx2_set_phys_id,
7832 .get_ethtool_stats = bnx2_get_ethtool_stats,
7833 .get_sset_count = bnx2_get_sset_count,
7834 .get_channels = bnx2_get_channels,
7835 .set_channels = bnx2_set_channels,
7836 .get_link_ksettings = bnx2_get_link_ksettings,
7837 .set_link_ksettings = bnx2_set_link_ksettings,
7838 };
7839
7840 /* Called with rtnl_lock */
7841 static int
bnx2_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)7842 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7843 {
7844 struct mii_ioctl_data *data = if_mii(ifr);
7845 struct bnx2 *bp = netdev_priv(dev);
7846 int err;
7847
7848 switch(cmd) {
7849 case SIOCGMIIPHY:
7850 data->phy_id = bp->phy_addr;
7851
7852 fallthrough;
7853 case SIOCGMIIREG: {
7854 u32 mii_regval;
7855
7856 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7857 return -EOPNOTSUPP;
7858
7859 if (!netif_running(dev))
7860 return -EAGAIN;
7861
7862 spin_lock_bh(&bp->phy_lock);
7863 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7864 spin_unlock_bh(&bp->phy_lock);
7865
7866 data->val_out = mii_regval;
7867
7868 return err;
7869 }
7870
7871 case SIOCSMIIREG:
7872 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7873 return -EOPNOTSUPP;
7874
7875 if (!netif_running(dev))
7876 return -EAGAIN;
7877
7878 spin_lock_bh(&bp->phy_lock);
7879 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7880 spin_unlock_bh(&bp->phy_lock);
7881
7882 return err;
7883
7884 default:
7885 /* do nothing */
7886 break;
7887 }
7888 return -EOPNOTSUPP;
7889 }
7890
7891 /* Called with rtnl_lock */
7892 static int
bnx2_change_mac_addr(struct net_device * dev,void * p)7893 bnx2_change_mac_addr(struct net_device *dev, void *p)
7894 {
7895 struct sockaddr *addr = p;
7896 struct bnx2 *bp = netdev_priv(dev);
7897
7898 if (!is_valid_ether_addr(addr->sa_data))
7899 return -EADDRNOTAVAIL;
7900
7901 eth_hw_addr_set(dev, addr->sa_data);
7902 if (netif_running(dev))
7903 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7904
7905 return 0;
7906 }
7907
7908 /* Called with rtnl_lock */
7909 static int
bnx2_change_mtu(struct net_device * dev,int new_mtu)7910 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7911 {
7912 struct bnx2 *bp = netdev_priv(dev);
7913
7914 WRITE_ONCE(dev->mtu, new_mtu);
7915 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7916 false);
7917 }
7918
7919 #ifdef CONFIG_NET_POLL_CONTROLLER
7920 static void
poll_bnx2(struct net_device * dev)7921 poll_bnx2(struct net_device *dev)
7922 {
7923 struct bnx2 *bp = netdev_priv(dev);
7924 int i;
7925
7926 for (i = 0; i < bp->irq_nvecs; i++) {
7927 struct bnx2_irq *irq = &bp->irq_tbl[i];
7928
7929 disable_irq(irq->vector);
7930 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7931 enable_irq(irq->vector);
7932 }
7933 }
7934 #endif
7935
7936 static void
bnx2_get_5709_media(struct bnx2 * bp)7937 bnx2_get_5709_media(struct bnx2 *bp)
7938 {
7939 u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7940 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7941 u32 strap;
7942
7943 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7944 return;
7945 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7946 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7947 return;
7948 }
7949
7950 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7951 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7952 else
7953 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7954
7955 if (bp->func == 0) {
7956 switch (strap) {
7957 case 0x4:
7958 case 0x5:
7959 case 0x6:
7960 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7961 return;
7962 }
7963 } else {
7964 switch (strap) {
7965 case 0x1:
7966 case 0x2:
7967 case 0x4:
7968 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7969 return;
7970 }
7971 }
7972 }
7973
7974 static void
bnx2_get_pci_speed(struct bnx2 * bp)7975 bnx2_get_pci_speed(struct bnx2 *bp)
7976 {
7977 u32 reg;
7978
7979 reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7980 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7981 u32 clkreg;
7982
7983 bp->flags |= BNX2_FLAG_PCIX;
7984
7985 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7986
7987 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7988 switch (clkreg) {
7989 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7990 bp->bus_speed_mhz = 133;
7991 break;
7992
7993 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7994 bp->bus_speed_mhz = 100;
7995 break;
7996
7997 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7998 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7999 bp->bus_speed_mhz = 66;
8000 break;
8001
8002 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8003 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8004 bp->bus_speed_mhz = 50;
8005 break;
8006
8007 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8008 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8009 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8010 bp->bus_speed_mhz = 33;
8011 break;
8012 }
8013 }
8014 else {
8015 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8016 bp->bus_speed_mhz = 66;
8017 else
8018 bp->bus_speed_mhz = 33;
8019 }
8020
8021 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8022 bp->flags |= BNX2_FLAG_PCI_32BIT;
8023
8024 }
8025
8026 static void
bnx2_read_vpd_fw_ver(struct bnx2 * bp)8027 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8028 {
8029 unsigned int len;
8030 int rc, i, j;
8031 u8 *data;
8032
8033 #define BNX2_VPD_NVRAM_OFFSET 0x300
8034 #define BNX2_VPD_LEN 128
8035 #define BNX2_MAX_VER_SLEN 30
8036
8037 data = kmalloc(BNX2_VPD_LEN, GFP_KERNEL);
8038 if (!data)
8039 return;
8040
8041 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data, BNX2_VPD_LEN);
8042 if (rc)
8043 goto vpd_done;
8044
8045 for (i = 0; i < BNX2_VPD_LEN; i += 4)
8046 swab32s((u32 *)&data[i]);
8047
8048 j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
8049 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
8050 if (j < 0)
8051 goto vpd_done;
8052
8053 if (len != 4 || memcmp(&data[j], "1028", 4))
8054 goto vpd_done;
8055
8056 j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
8057 PCI_VPD_RO_KEYWORD_VENDOR0,
8058 &len);
8059 if (j < 0)
8060 goto vpd_done;
8061
8062 if (len > BNX2_MAX_VER_SLEN)
8063 goto vpd_done;
8064
8065 memcpy(bp->fw_version, &data[j], len);
8066 bp->fw_version[len] = ' ';
8067
8068 vpd_done:
8069 kfree(data);
8070 }
8071
8072 static int
bnx2_init_board(struct pci_dev * pdev,struct net_device * dev)8073 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8074 {
8075 struct bnx2 *bp;
8076 int rc, i, j;
8077 u32 reg;
8078 u64 dma_mask, persist_dma_mask;
8079
8080 SET_NETDEV_DEV(dev, &pdev->dev);
8081 bp = netdev_priv(dev);
8082
8083 bp->flags = 0;
8084 bp->phy_flags = 0;
8085
8086 bp->temp_stats_blk =
8087 kzalloc_obj(struct statistics_block);
8088
8089 if (!bp->temp_stats_blk) {
8090 rc = -ENOMEM;
8091 goto err_out;
8092 }
8093
8094 /* enable device (incl. PCI PM wakeup), and bus-mastering */
8095 rc = pci_enable_device(pdev);
8096 if (rc) {
8097 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8098 goto err_out;
8099 }
8100
8101 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8102 dev_err(&pdev->dev,
8103 "Cannot find PCI device base address, aborting\n");
8104 rc = -ENODEV;
8105 goto err_out_disable;
8106 }
8107
8108 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8109 if (rc) {
8110 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8111 goto err_out_disable;
8112 }
8113
8114 pci_set_master(pdev);
8115
8116 bp->pm_cap = pdev->pm_cap;
8117 if (bp->pm_cap == 0) {
8118 dev_err(&pdev->dev,
8119 "Cannot find power management capability, aborting\n");
8120 rc = -EIO;
8121 goto err_out_release;
8122 }
8123
8124 bp->dev = dev;
8125 bp->pdev = pdev;
8126
8127 spin_lock_init(&bp->phy_lock);
8128 spin_lock_init(&bp->indirect_lock);
8129 #ifdef BCM_CNIC
8130 mutex_init(&bp->cnic_lock);
8131 #endif
8132 INIT_WORK(&bp->reset_task, bnx2_reset_task);
8133
8134 bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8135 TX_MAX_TSS_RINGS + 1));
8136 if (!bp->regview) {
8137 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8138 rc = -ENOMEM;
8139 goto err_out_release;
8140 }
8141
8142 /* Configure byte swap and enable write to the reg_window registers.
8143 * Rely on CPU to do target byte swapping on big endian systems
8144 * The chip's target access swapping will not swap all accesses
8145 */
8146 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8147 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8148 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8149
8150 bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8151
8152 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8153 if (!pci_is_pcie(pdev)) {
8154 dev_err(&pdev->dev, "Not PCIE, aborting\n");
8155 rc = -EIO;
8156 goto err_out_unmap;
8157 }
8158 bp->flags |= BNX2_FLAG_PCIE;
8159 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8160 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8161 } else {
8162 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8163 if (bp->pcix_cap == 0) {
8164 dev_err(&pdev->dev,
8165 "Cannot find PCIX capability, aborting\n");
8166 rc = -EIO;
8167 goto err_out_unmap;
8168 }
8169 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8170 }
8171
8172 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8173 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8174 if (pdev->msix_cap)
8175 bp->flags |= BNX2_FLAG_MSIX_CAP;
8176 }
8177
8178 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8179 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8180 if (pdev->msi_cap)
8181 bp->flags |= BNX2_FLAG_MSI_CAP;
8182 }
8183
8184 /* 5708 cannot support DMA addresses > 40-bit. */
8185 if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8186 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8187 else
8188 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8189
8190 /* Configure DMA attributes. */
8191 if (dma_set_mask(&pdev->dev, dma_mask) == 0) {
8192 dev->features |= NETIF_F_HIGHDMA;
8193 rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask);
8194 if (rc) {
8195 dev_err(&pdev->dev,
8196 "dma_set_coherent_mask failed, aborting\n");
8197 goto err_out_unmap;
8198 }
8199 } else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
8200 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8201 goto err_out_unmap;
8202 }
8203
8204 if (!(bp->flags & BNX2_FLAG_PCIE))
8205 bnx2_get_pci_speed(bp);
8206
8207 /* 5706A0 may falsely detect SERR and PERR. */
8208 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8209 reg = BNX2_RD(bp, PCI_COMMAND);
8210 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8211 BNX2_WR(bp, PCI_COMMAND, reg);
8212 } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8213 !(bp->flags & BNX2_FLAG_PCIX)) {
8214 dev_err(&pdev->dev,
8215 "5706 A1 can only be used in a PCIX bus, aborting\n");
8216 rc = -EPERM;
8217 goto err_out_unmap;
8218 }
8219
8220 bnx2_init_nvram(bp);
8221
8222 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8223
8224 if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8225 bp->func = 1;
8226
8227 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8228 BNX2_SHM_HDR_SIGNATURE_SIG) {
8229 u32 off = bp->func << 2;
8230
8231 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8232 } else
8233 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8234
8235 /* Get the permanent MAC address. First we need to make sure the
8236 * firmware is actually running.
8237 */
8238 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8239
8240 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8241 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8242 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8243 rc = -ENODEV;
8244 goto err_out_unmap;
8245 }
8246
8247 bnx2_read_vpd_fw_ver(bp);
8248
8249 j = strlen(bp->fw_version);
8250 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8251 for (i = 0; i < 3 && j < 24; i++) {
8252 u8 num, k, skip0;
8253
8254 if (i == 0) {
8255 bp->fw_version[j++] = 'b';
8256 bp->fw_version[j++] = 'c';
8257 bp->fw_version[j++] = ' ';
8258 }
8259 num = (u8) (reg >> (24 - (i * 8)));
8260 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8261 if (num >= k || !skip0 || k == 1) {
8262 bp->fw_version[j++] = (num / k) + '0';
8263 skip0 = 0;
8264 }
8265 }
8266 if (i != 2)
8267 bp->fw_version[j++] = '.';
8268 }
8269 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8270 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8271 bp->wol = 1;
8272
8273 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8274 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8275
8276 for (i = 0; i < 30; i++) {
8277 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8278 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8279 break;
8280 msleep(10);
8281 }
8282 }
8283 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8284 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8285 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8286 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8287 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8288
8289 if (j < 32)
8290 bp->fw_version[j++] = ' ';
8291 for (i = 0; i < 3 && j < 28; i++) {
8292 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8293 reg = be32_to_cpu(reg);
8294 memcpy(&bp->fw_version[j], ®, 4);
8295 j += 4;
8296 }
8297 }
8298
8299 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8300 bp->mac_addr[0] = (u8) (reg >> 8);
8301 bp->mac_addr[1] = (u8) reg;
8302
8303 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8304 bp->mac_addr[2] = (u8) (reg >> 24);
8305 bp->mac_addr[3] = (u8) (reg >> 16);
8306 bp->mac_addr[4] = (u8) (reg >> 8);
8307 bp->mac_addr[5] = (u8) reg;
8308
8309 bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8310 bnx2_set_rx_ring_size(bp, 255);
8311
8312 bp->tx_quick_cons_trip_int = 2;
8313 bp->tx_quick_cons_trip = 20;
8314 bp->tx_ticks_int = 18;
8315 bp->tx_ticks = 80;
8316
8317 bp->rx_quick_cons_trip_int = 2;
8318 bp->rx_quick_cons_trip = 12;
8319 bp->rx_ticks_int = 18;
8320 bp->rx_ticks = 18;
8321
8322 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8323
8324 bp->current_interval = BNX2_TIMER_INTERVAL;
8325
8326 bp->phy_addr = 1;
8327
8328 /* allocate stats_blk */
8329 rc = bnx2_alloc_stats_blk(dev);
8330 if (rc)
8331 goto err_out_unmap;
8332
8333 /* Disable WOL support if we are running on a SERDES chip. */
8334 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8335 bnx2_get_5709_media(bp);
8336 else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8337 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8338
8339 bp->phy_port = PORT_TP;
8340 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8341 bp->phy_port = PORT_FIBRE;
8342 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8343 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8344 bp->flags |= BNX2_FLAG_NO_WOL;
8345 bp->wol = 0;
8346 }
8347 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8348 /* Don't do parallel detect on this board because of
8349 * some board problems. The link will not go down
8350 * if we do parallel detect.
8351 */
8352 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8353 pdev->subsystem_device == 0x310c)
8354 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8355 } else {
8356 bp->phy_addr = 2;
8357 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8358 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8359 }
8360 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8361 BNX2_CHIP(bp) == BNX2_CHIP_5708)
8362 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8363 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8364 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8365 BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8366 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8367
8368 bnx2_init_fw_cap(bp);
8369
8370 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8371 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8372 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8373 !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8374 bp->flags |= BNX2_FLAG_NO_WOL;
8375 bp->wol = 0;
8376 }
8377
8378 if (bp->flags & BNX2_FLAG_NO_WOL)
8379 device_set_wakeup_capable(&bp->pdev->dev, false);
8380 else
8381 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8382
8383 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8384 bp->tx_quick_cons_trip_int =
8385 bp->tx_quick_cons_trip;
8386 bp->tx_ticks_int = bp->tx_ticks;
8387 bp->rx_quick_cons_trip_int =
8388 bp->rx_quick_cons_trip;
8389 bp->rx_ticks_int = bp->rx_ticks;
8390 bp->comp_prod_trip_int = bp->comp_prod_trip;
8391 bp->com_ticks_int = bp->com_ticks;
8392 bp->cmd_ticks_int = bp->cmd_ticks;
8393 }
8394
8395 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8396 *
8397 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8398 * with byte enables disabled on the unused 32-bit word. This is legal
8399 * but causes problems on the AMD 8132 which will eventually stop
8400 * responding after a while.
8401 *
8402 * AMD believes this incompatibility is unique to the 5706, and
8403 * prefers to locally disable MSI rather than globally disabling it.
8404 */
8405 if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8406 struct pci_dev *amd_8132 = NULL;
8407
8408 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8409 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8410 amd_8132))) {
8411
8412 if (amd_8132->revision >= 0x10 &&
8413 amd_8132->revision <= 0x13) {
8414 disable_msi = 1;
8415 pci_dev_put(amd_8132);
8416 break;
8417 }
8418 }
8419 }
8420
8421 bnx2_set_default_link(bp);
8422 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8423
8424 timer_setup(&bp->timer, bnx2_timer, 0);
8425 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8426
8427 #ifdef BCM_CNIC
8428 if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8429 bp->cnic_eth_dev.max_iscsi_conn =
8430 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8431 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8432 bp->cnic_probe = bnx2_cnic_probe;
8433 #endif
8434 pci_save_state(pdev);
8435
8436 return 0;
8437
8438 err_out_unmap:
8439 pci_iounmap(pdev, bp->regview);
8440 bp->regview = NULL;
8441
8442 err_out_release:
8443 pci_release_regions(pdev);
8444
8445 err_out_disable:
8446 pci_disable_device(pdev);
8447
8448 err_out:
8449 kfree(bp->temp_stats_blk);
8450
8451 return rc;
8452 }
8453
8454 static char *
bnx2_bus_string(struct bnx2 * bp,char * str)8455 bnx2_bus_string(struct bnx2 *bp, char *str)
8456 {
8457 char *s = str;
8458
8459 if (bp->flags & BNX2_FLAG_PCIE) {
8460 s += sprintf(s, "PCI Express");
8461 } else {
8462 s += sprintf(s, "PCI");
8463 if (bp->flags & BNX2_FLAG_PCIX)
8464 s += sprintf(s, "-X");
8465 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8466 s += sprintf(s, " 32-bit");
8467 else
8468 s += sprintf(s, " 64-bit");
8469 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8470 }
8471 return str;
8472 }
8473
8474 static void
bnx2_del_napi(struct bnx2 * bp)8475 bnx2_del_napi(struct bnx2 *bp)
8476 {
8477 int i;
8478
8479 for (i = 0; i < bp->irq_nvecs; i++)
8480 netif_napi_del(&bp->bnx2_napi[i].napi);
8481 }
8482
8483 static void
bnx2_init_napi(struct bnx2 * bp)8484 bnx2_init_napi(struct bnx2 *bp)
8485 {
8486 int i;
8487
8488 for (i = 0; i < bp->irq_nvecs; i++) {
8489 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8490 int (*poll)(struct napi_struct *, int);
8491
8492 if (i == 0)
8493 poll = bnx2_poll;
8494 else
8495 poll = bnx2_poll_msix;
8496
8497 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll);
8498 bnapi->bp = bp;
8499 }
8500 }
8501
8502 static const struct net_device_ops bnx2_netdev_ops = {
8503 .ndo_open = bnx2_open,
8504 .ndo_start_xmit = bnx2_start_xmit,
8505 .ndo_stop = bnx2_close,
8506 .ndo_get_stats64 = bnx2_get_stats64,
8507 .ndo_set_rx_mode = bnx2_set_rx_mode,
8508 .ndo_eth_ioctl = bnx2_ioctl,
8509 .ndo_validate_addr = eth_validate_addr,
8510 .ndo_set_mac_address = bnx2_change_mac_addr,
8511 .ndo_change_mtu = bnx2_change_mtu,
8512 .ndo_set_features = bnx2_set_features,
8513 .ndo_tx_timeout = bnx2_tx_timeout,
8514 #ifdef CONFIG_NET_POLL_CONTROLLER
8515 .ndo_poll_controller = poll_bnx2,
8516 #endif
8517 };
8518
8519 static int
bnx2_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)8520 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8521 {
8522 struct net_device *dev;
8523 struct bnx2 *bp;
8524 int rc;
8525 char str[40];
8526
8527 /* dev zeroed in init_etherdev */
8528 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8529 if (!dev)
8530 return -ENOMEM;
8531
8532 rc = bnx2_init_board(pdev, dev);
8533 if (rc < 0)
8534 goto err_free;
8535
8536 dev->netdev_ops = &bnx2_netdev_ops;
8537 dev->watchdog_timeo = TX_TIMEOUT;
8538 dev->ethtool_ops = &bnx2_ethtool_ops;
8539
8540 bp = netdev_priv(dev);
8541
8542 pci_set_drvdata(pdev, dev);
8543
8544 /*
8545 * In-flight DMA from 1st kernel could continue going in kdump kernel.
8546 * New io-page table has been created before bnx2 does reset at open stage.
8547 * We have to wait for the in-flight DMA to complete to avoid it look up
8548 * into the newly created io-page table.
8549 */
8550 if (is_kdump_kernel())
8551 bnx2_wait_dma_complete(bp);
8552
8553 eth_hw_addr_set(dev, bp->mac_addr);
8554
8555 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8556 NETIF_F_TSO | NETIF_F_TSO_ECN |
8557 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8558
8559 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8560 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8561
8562 dev->vlan_features = dev->hw_features;
8563 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8564 dev->features |= dev->hw_features;
8565 dev->priv_flags |= IFF_UNICAST_FLT;
8566 dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8567 dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8568
8569 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8570 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8571
8572 if ((rc = register_netdev(dev))) {
8573 dev_err(&pdev->dev, "Cannot register net device\n");
8574 goto error;
8575 }
8576
8577 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8578 "node addr %pM\n", board_info[ent->driver_data].name,
8579 ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8580 ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8581 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8582 pdev->irq, dev->dev_addr);
8583
8584 return 0;
8585
8586 error:
8587 pci_iounmap(pdev, bp->regview);
8588 pci_release_regions(pdev);
8589 pci_disable_device(pdev);
8590 err_free:
8591 bnx2_free_stats_blk(dev);
8592 free_netdev(dev);
8593 return rc;
8594 }
8595
8596 static void
bnx2_remove_one(struct pci_dev * pdev)8597 bnx2_remove_one(struct pci_dev *pdev)
8598 {
8599 struct net_device *dev = pci_get_drvdata(pdev);
8600 struct bnx2 *bp = netdev_priv(dev);
8601
8602 unregister_netdev(dev);
8603
8604 timer_delete_sync(&bp->timer);
8605 cancel_work_sync(&bp->reset_task);
8606
8607 pci_iounmap(bp->pdev, bp->regview);
8608
8609 bnx2_free_stats_blk(dev);
8610 kfree(bp->temp_stats_blk);
8611
8612 bnx2_release_firmware(bp);
8613
8614 free_netdev(dev);
8615
8616 pci_release_regions(pdev);
8617 pci_disable_device(pdev);
8618 }
8619
8620 #ifdef CONFIG_PM_SLEEP
8621 static int
bnx2_suspend(struct device * device)8622 bnx2_suspend(struct device *device)
8623 {
8624 struct net_device *dev = dev_get_drvdata(device);
8625 struct bnx2 *bp = netdev_priv(dev);
8626
8627 if (netif_running(dev)) {
8628 cancel_work_sync(&bp->reset_task);
8629 bnx2_netif_stop(bp, true);
8630 netif_device_detach(dev);
8631 timer_delete_sync(&bp->timer);
8632 bnx2_shutdown_chip(bp);
8633 __bnx2_free_irq(bp);
8634 bnx2_free_skbs(bp);
8635 }
8636 bnx2_setup_wol(bp);
8637 return 0;
8638 }
8639
8640 static int
bnx2_resume(struct device * device)8641 bnx2_resume(struct device *device)
8642 {
8643 struct net_device *dev = dev_get_drvdata(device);
8644 struct bnx2 *bp = netdev_priv(dev);
8645
8646 if (!netif_running(dev))
8647 return 0;
8648
8649 bnx2_set_power_state(bp, PCI_D0);
8650 netif_device_attach(dev);
8651 bnx2_request_irq(bp);
8652 bnx2_init_nic(bp, 1);
8653 bnx2_netif_start(bp, true);
8654 return 0;
8655 }
8656
8657 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8658 #define BNX2_PM_OPS (&bnx2_pm_ops)
8659
8660 #else
8661
8662 #define BNX2_PM_OPS NULL
8663
8664 #endif /* CONFIG_PM_SLEEP */
8665 /**
8666 * bnx2_io_error_detected - called when PCI error is detected
8667 * @pdev: Pointer to PCI device
8668 * @state: The current pci connection state
8669 *
8670 * This function is called after a PCI bus error affecting
8671 * this device has been detected.
8672 */
bnx2_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)8673 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8674 pci_channel_state_t state)
8675 {
8676 struct net_device *dev = pci_get_drvdata(pdev);
8677 struct bnx2 *bp = netdev_priv(dev);
8678
8679 rtnl_lock();
8680 netif_device_detach(dev);
8681
8682 if (state == pci_channel_io_perm_failure) {
8683 rtnl_unlock();
8684 return PCI_ERS_RESULT_DISCONNECT;
8685 }
8686
8687 if (netif_running(dev)) {
8688 bnx2_netif_stop(bp, true);
8689 timer_delete_sync(&bp->timer);
8690 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8691 }
8692
8693 pci_disable_device(pdev);
8694 rtnl_unlock();
8695
8696 /* Request a slot slot reset. */
8697 return PCI_ERS_RESULT_NEED_RESET;
8698 }
8699
8700 /**
8701 * bnx2_io_slot_reset - called after the pci bus has been reset.
8702 * @pdev: Pointer to PCI device
8703 *
8704 * Restart the card from scratch, as if from a cold-boot.
8705 */
bnx2_io_slot_reset(struct pci_dev * pdev)8706 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8707 {
8708 struct net_device *dev = pci_get_drvdata(pdev);
8709 struct bnx2 *bp = netdev_priv(dev);
8710 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8711 int err = 0;
8712
8713 rtnl_lock();
8714 if (pci_enable_device(pdev)) {
8715 dev_err(&pdev->dev,
8716 "Cannot re-enable PCI device after reset\n");
8717 } else {
8718 pci_set_master(pdev);
8719 pci_restore_state(pdev);
8720
8721 if (netif_running(dev))
8722 err = bnx2_init_nic(bp, 1);
8723
8724 if (!err)
8725 result = PCI_ERS_RESULT_RECOVERED;
8726 }
8727
8728 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8729 bnx2_napi_enable(bp);
8730 dev_close(dev);
8731 }
8732 rtnl_unlock();
8733
8734 return result;
8735 }
8736
8737 /**
8738 * bnx2_io_resume - called when traffic can start flowing again.
8739 * @pdev: Pointer to PCI device
8740 *
8741 * This callback is called when the error recovery driver tells us that
8742 * its OK to resume normal operation.
8743 */
bnx2_io_resume(struct pci_dev * pdev)8744 static void bnx2_io_resume(struct pci_dev *pdev)
8745 {
8746 struct net_device *dev = pci_get_drvdata(pdev);
8747 struct bnx2 *bp = netdev_priv(dev);
8748
8749 rtnl_lock();
8750 if (netif_running(dev))
8751 bnx2_netif_start(bp, true);
8752
8753 netif_device_attach(dev);
8754 rtnl_unlock();
8755 }
8756
bnx2_shutdown(struct pci_dev * pdev)8757 static void bnx2_shutdown(struct pci_dev *pdev)
8758 {
8759 struct net_device *dev = pci_get_drvdata(pdev);
8760 struct bnx2 *bp;
8761
8762 if (!dev)
8763 return;
8764
8765 bp = netdev_priv(dev);
8766 if (!bp)
8767 return;
8768
8769 rtnl_lock();
8770 if (netif_running(dev))
8771 dev_close(bp->dev);
8772
8773 if (system_state == SYSTEM_POWER_OFF)
8774 bnx2_set_power_state(bp, PCI_D3hot);
8775
8776 rtnl_unlock();
8777 }
8778
8779 static const struct pci_error_handlers bnx2_err_handler = {
8780 .error_detected = bnx2_io_error_detected,
8781 .slot_reset = bnx2_io_slot_reset,
8782 .resume = bnx2_io_resume,
8783 };
8784
8785 static struct pci_driver bnx2_pci_driver = {
8786 .name = DRV_MODULE_NAME,
8787 .id_table = bnx2_pci_tbl,
8788 .probe = bnx2_init_one,
8789 .remove = bnx2_remove_one,
8790 .driver.pm = BNX2_PM_OPS,
8791 .err_handler = &bnx2_err_handler,
8792 .shutdown = bnx2_shutdown,
8793 };
8794
8795 module_pci_driver(bnx2_pci_driver);
8796