xref: /linux/drivers/net/ethernet/emulex/benet/be_main.c (revision a508da6cc0093171833efb8376b00473f24221b9)
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17 
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29 
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33 
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37 
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46 	{ 0 }
47 };
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
51 	"CEV",
52 	"CTX",
53 	"DBUF",
54 	"ERX",
55 	"Host",
56 	"MPU",
57 	"NDMA",
58 	"PTC ",
59 	"RDMA ",
60 	"RXF ",
61 	"RXIPS ",
62 	"RXULP0 ",
63 	"RXULP1 ",
64 	"RXULP2 ",
65 	"TIM ",
66 	"TPOST ",
67 	"TPRE ",
68 	"TXIPS ",
69 	"TXULP0 ",
70 	"TXULP1 ",
71 	"UC ",
72 	"WDMA ",
73 	"TXULP2 ",
74 	"HOST1 ",
75 	"P0_OB_LINK ",
76 	"P1_OB_LINK ",
77 	"HOST_GPIO ",
78 	"MBOX ",
79 	"AXGMAC0",
80 	"AXGMAC1",
81 	"JTAG",
82 	"MPU_INTPEND"
83 };
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
86 	"LPCMEMHOST",
87 	"MGMT_MAC",
88 	"PCS0ONLINE",
89 	"MPU_IRAM",
90 	"PCS1ONLINE",
91 	"PCTL0",
92 	"PCTL1",
93 	"PMEM",
94 	"RR",
95 	"TXPB",
96 	"RXPP",
97 	"XAUI",
98 	"TXP",
99 	"ARM",
100 	"IPC",
101 	"HOST2",
102 	"HOST3",
103 	"HOST4",
104 	"HOST5",
105 	"HOST6",
106 	"HOST7",
107 	"HOST8",
108 	"HOST9",
109 	"NETC",
110 	"Unknown",
111 	"Unknown",
112 	"Unknown",
113 	"Unknown",
114 	"Unknown",
115 	"Unknown",
116 	"Unknown",
117 	"Unknown"
118 };
119 
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter *adapter) {
122 	return (adapter->function_mode & FLEX10_MODE ||
123 		adapter->function_mode & VNIC_MODE ||
124 		adapter->function_mode & UMC_ENABLED);
125 }
126 
127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128 {
129 	struct be_dma_mem *mem = &q->dma_mem;
130 	if (mem->va) {
131 		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 				  mem->dma);
133 		mem->va = NULL;
134 	}
135 }
136 
137 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 		u16 len, u16 entry_size)
139 {
140 	struct be_dma_mem *mem = &q->dma_mem;
141 
142 	memset(q, 0, sizeof(*q));
143 	q->len = len;
144 	q->entry_size = entry_size;
145 	mem->size = len * entry_size;
146 	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 				     GFP_KERNEL);
148 	if (!mem->va)
149 		return -ENOMEM;
150 	memset(mem->va, 0, mem->size);
151 	return 0;
152 }
153 
154 static void be_intr_set(struct be_adapter *adapter, bool enable)
155 {
156 	u32 reg, enabled;
157 
158 	if (adapter->eeh_err)
159 		return;
160 
161 	pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 				&reg);
163 	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164 
165 	if (!enabled && enable)
166 		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 	else if (enabled && !enable)
168 		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169 	else
170 		return;
171 
172 	pci_write_config_dword(adapter->pdev,
173 			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175 
176 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 {
178 	u32 val = 0;
179 	val |= qid & DB_RQ_RING_ID_MASK;
180 	val |= posted << DB_RQ_NUM_POSTED_SHIFT;
181 
182 	wmb();
183 	iowrite32(val, adapter->db + DB_RQ_OFFSET);
184 }
185 
186 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188 	u32 val = 0;
189 	val |= qid & DB_TXULP_RING_ID_MASK;
190 	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
191 
192 	wmb();
193 	iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
194 }
195 
196 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
197 		bool arm, bool clear_int, u16 num_popped)
198 {
199 	u32 val = 0;
200 	val |= qid & DB_EQ_RING_ID_MASK;
201 	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 			DB_EQ_RING_ID_EXT_MASK_SHIFT);
203 
204 	if (adapter->eeh_err)
205 		return;
206 
207 	if (arm)
208 		val |= 1 << DB_EQ_REARM_SHIFT;
209 	if (clear_int)
210 		val |= 1 << DB_EQ_CLR_SHIFT;
211 	val |= 1 << DB_EQ_EVNT_SHIFT;
212 	val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
213 	iowrite32(val, adapter->db + DB_EQ_OFFSET);
214 }
215 
216 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
217 {
218 	u32 val = 0;
219 	val |= qid & DB_CQ_RING_ID_MASK;
220 	val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 			DB_CQ_RING_ID_EXT_MASK_SHIFT);
222 
223 	if (adapter->eeh_err)
224 		return;
225 
226 	if (arm)
227 		val |= 1 << DB_CQ_REARM_SHIFT;
228 	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
229 	iowrite32(val, adapter->db + DB_CQ_OFFSET);
230 }
231 
232 static int be_mac_addr_set(struct net_device *netdev, void *p)
233 {
234 	struct be_adapter *adapter = netdev_priv(netdev);
235 	struct sockaddr *addr = p;
236 	int status = 0;
237 	u8 current_mac[ETH_ALEN];
238 	u32 pmac_id = adapter->pmac_id[0];
239 
240 	if (!is_valid_ether_addr(addr->sa_data))
241 		return -EADDRNOTAVAIL;
242 
243 	status = be_cmd_mac_addr_query(adapter, current_mac,
244 				MAC_ADDRESS_TYPE_NETWORK, false,
245 				adapter->if_handle, 0);
246 	if (status)
247 		goto err;
248 
249 	if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 		status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251 				adapter->if_handle, &adapter->pmac_id[0], 0);
252 		if (status)
253 			goto err;
254 
255 		be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 	}
257 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 	return 0;
259 err:
260 	dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261 	return status;
262 }
263 
264 static void populate_be2_stats(struct be_adapter *adapter)
265 {
266 	struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 	struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269 	struct be_port_rxf_stats_v0 *port_stats =
270 					&rxf_stats->port[adapter->port_num];
271 	struct be_drv_stats *drvs = &adapter->drv_stats;
272 
273 	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274 	drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 	drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 	drvs->rx_control_frames = port_stats->rx_control_frames;
277 	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 	drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288 	drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289 	drvs->rx_dropped_header_too_small =
290 		port_stats->rx_dropped_header_too_small;
291 	drvs->rx_address_mismatch_drops =
292 					port_stats->rx_address_mismatch_drops +
293 					port_stats->rx_vlan_mismatch_drops;
294 	drvs->rx_alignment_symbol_errors =
295 		port_stats->rx_alignment_symbol_errors;
296 
297 	drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 	drvs->tx_controlframes = port_stats->tx_controlframes;
299 
300 	if (adapter->port_num)
301 		drvs->jabber_events = rxf_stats->port1_jabber_events;
302 	else
303 		drvs->jabber_events = rxf_stats->port0_jabber_events;
304 	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305 	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306 	drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308 	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310 	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 }
312 
313 static void populate_be3_stats(struct be_adapter *adapter)
314 {
315 	struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 	struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318 	struct be_port_rxf_stats_v1 *port_stats =
319 					&rxf_stats->port[adapter->port_num];
320 	struct be_drv_stats *drvs = &adapter->drv_stats;
321 
322 	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323 	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325 	drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 	drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 	drvs->rx_control_frames = port_stats->rx_control_frames;
328 	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 	drvs->rx_dropped_header_too_small =
339 		port_stats->rx_dropped_header_too_small;
340 	drvs->rx_input_fifo_overflow_drop =
341 		port_stats->rx_input_fifo_overflow_drop;
342 	drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343 	drvs->rx_alignment_symbol_errors =
344 		port_stats->rx_alignment_symbol_errors;
345 	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346 	drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 	drvs->tx_controlframes = port_stats->tx_controlframes;
348 	drvs->jabber_events = port_stats->jabber_events;
349 	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350 	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351 	drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353 	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355 	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357 
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360 
361 	struct be_drv_stats *drvs = &adapter->drv_stats;
362 	struct lancer_pport_stats *pport_stats =
363 					pport_stats_from_cmd(adapter);
364 
365 	be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 	drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 	drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 	drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369 	drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370 	drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371 	drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 	drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 	drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 	drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 	drvs->rx_dropped_tcp_length =
376 				pport_stats->rx_dropped_invalid_tcp_length;
377 	drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 	drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 	drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 	drvs->rx_dropped_header_too_small =
381 				pport_stats->rx_dropped_header_too_small;
382 	drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 	drvs->rx_address_mismatch_drops =
384 					pport_stats->rx_address_mismatch_drops +
385 					pport_stats->rx_vlan_mismatch_drops;
386 	drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387 	drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388 	drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 	drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390 	drvs->jabber_events = pport_stats->rx_jabbers;
391 	drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 	drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393 	drvs->rx_drops_too_many_frags =
394 				pport_stats->rx_drops_too_many_frags_lo;
395 }
396 
397 static void accumulate_16bit_val(u32 *acc, u16 val)
398 {
399 #define lo(x)			(x & 0xFFFF)
400 #define hi(x)			(x & 0xFFFF0000)
401 	bool wrapped = val < lo(*acc);
402 	u32 newacc = hi(*acc) + val;
403 
404 	if (wrapped)
405 		newacc += 65536;
406 	ACCESS_ONCE(*acc) = newacc;
407 }
408 
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411 	struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 	struct be_rx_obj *rxo;
413 	int i;
414 
415 	if (adapter->generation == BE_GEN3) {
416 		if (lancer_chip(adapter))
417 			populate_lancer_stats(adapter);
418 		 else
419 			populate_be3_stats(adapter);
420 	} else {
421 		populate_be2_stats(adapter);
422 	}
423 
424 	if (lancer_chip(adapter))
425 		goto done;
426 
427 	/* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
428 	for_all_rx_queues(adapter, rxo, i) {
429 		/* below erx HW counter can actually wrap around after
430 		 * 65535. Driver accumulates a 32-bit value
431 		 */
432 		accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 				(u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 	}
435 done:
436 	return;
437 }
438 
439 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 					struct rtnl_link_stats64 *stats)
441 {
442 	struct be_adapter *adapter = netdev_priv(netdev);
443 	struct be_drv_stats *drvs = &adapter->drv_stats;
444 	struct be_rx_obj *rxo;
445 	struct be_tx_obj *txo;
446 	u64 pkts, bytes;
447 	unsigned int start;
448 	int i;
449 
450 	for_all_rx_queues(adapter, rxo, i) {
451 		const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 		do {
453 			start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 			pkts = rx_stats(rxo)->rx_pkts;
455 			bytes = rx_stats(rxo)->rx_bytes;
456 		} while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 		stats->rx_packets += pkts;
458 		stats->rx_bytes += bytes;
459 		stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 		stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 					rx_stats(rxo)->rx_drops_no_frags;
462 	}
463 
464 	for_all_tx_queues(adapter, txo, i) {
465 		const struct be_tx_stats *tx_stats = tx_stats(txo);
466 		do {
467 			start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 			pkts = tx_stats(txo)->tx_pkts;
469 			bytes = tx_stats(txo)->tx_bytes;
470 		} while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 		stats->tx_packets += pkts;
472 		stats->tx_bytes += bytes;
473 	}
474 
475 	/* bad pkts received */
476 	stats->rx_errors = drvs->rx_crc_errors +
477 		drvs->rx_alignment_symbol_errors +
478 		drvs->rx_in_range_errors +
479 		drvs->rx_out_range_errors +
480 		drvs->rx_frame_too_long +
481 		drvs->rx_dropped_too_small +
482 		drvs->rx_dropped_too_short +
483 		drvs->rx_dropped_header_too_small +
484 		drvs->rx_dropped_tcp_length +
485 		drvs->rx_dropped_runt;
486 
487 	/* detailed rx errors */
488 	stats->rx_length_errors = drvs->rx_in_range_errors +
489 		drvs->rx_out_range_errors +
490 		drvs->rx_frame_too_long;
491 
492 	stats->rx_crc_errors = drvs->rx_crc_errors;
493 
494 	/* frame alignment errors */
495 	stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
496 
497 	/* receiver fifo overrun */
498 	/* drops_no_pbuf is no per i/f, it's per BE card */
499 	stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
500 				drvs->rx_input_fifo_overflow_drop +
501 				drvs->rx_drops_no_pbuf;
502 	return stats;
503 }
504 
505 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
506 {
507 	struct net_device *netdev = adapter->netdev;
508 
509 	if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
510 		netif_carrier_off(netdev);
511 		adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
512 	}
513 
514 	if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 		netif_carrier_on(netdev);
516 	else
517 		netif_carrier_off(netdev);
518 }
519 
520 static void be_tx_stats_update(struct be_tx_obj *txo,
521 			u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
522 {
523 	struct be_tx_stats *stats = tx_stats(txo);
524 
525 	u64_stats_update_begin(&stats->sync);
526 	stats->tx_reqs++;
527 	stats->tx_wrbs += wrb_cnt;
528 	stats->tx_bytes += copied;
529 	stats->tx_pkts += (gso_segs ? gso_segs : 1);
530 	if (stopped)
531 		stats->tx_stops++;
532 	u64_stats_update_end(&stats->sync);
533 }
534 
535 /* Determine number of WRB entries needed to xmit data in an skb */
536 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 								bool *dummy)
538 {
539 	int cnt = (skb->len > skb->data_len);
540 
541 	cnt += skb_shinfo(skb)->nr_frags;
542 
543 	/* to account for hdr wrb */
544 	cnt++;
545 	if (lancer_chip(adapter) || !(cnt & 1)) {
546 		*dummy = false;
547 	} else {
548 		/* add a dummy to make it an even num */
549 		cnt++;
550 		*dummy = true;
551 	}
552 	BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 	return cnt;
554 }
555 
556 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557 {
558 	wrb->frag_pa_hi = upper_32_bits(addr);
559 	wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 	wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561 }
562 
563 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564 					struct sk_buff *skb)
565 {
566 	u8 vlan_prio;
567 	u16 vlan_tag;
568 
569 	vlan_tag = vlan_tx_tag_get(skb);
570 	vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571 	/* If vlan priority provided by OS is NOT in available bmap */
572 	if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573 		vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574 				adapter->recommended_prio;
575 
576 	return vlan_tag;
577 }
578 
579 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 		struct sk_buff *skb, u32 wrb_cnt, u32 len)
581 {
582 	u16 vlan_tag;
583 
584 	memset(hdr, 0, sizeof(*hdr));
585 
586 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587 
588 	if (skb_is_gso(skb)) {
589 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591 			hdr, skb_shinfo(skb)->gso_size);
592 		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
593 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
594 		if (lancer_chip(adapter) && adapter->sli_family  ==
595 							LANCER_A0_SLI_FAMILY) {
596 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597 			if (is_tcp_pkt(skb))
598 				AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599 								tcpcs, hdr, 1);
600 			else if (is_udp_pkt(skb))
601 				AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602 								udpcs, hdr, 1);
603 		}
604 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605 		if (is_tcp_pkt(skb))
606 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607 		else if (is_udp_pkt(skb))
608 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609 	}
610 
611 	if (vlan_tx_tag_present(skb)) {
612 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
613 		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
614 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
615 	}
616 
617 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621 }
622 
623 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
624 		bool unmap_single)
625 {
626 	dma_addr_t dma;
627 
628 	be_dws_le_to_cpu(wrb, sizeof(*wrb));
629 
630 	dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
631 	if (wrb->frag_len) {
632 		if (unmap_single)
633 			dma_unmap_single(dev, dma, wrb->frag_len,
634 					 DMA_TO_DEVICE);
635 		else
636 			dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
637 	}
638 }
639 
640 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
641 		struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642 {
643 	dma_addr_t busaddr;
644 	int i, copied = 0;
645 	struct device *dev = &adapter->pdev->dev;
646 	struct sk_buff *first_skb = skb;
647 	struct be_eth_wrb *wrb;
648 	struct be_eth_hdr_wrb *hdr;
649 	bool map_single = false;
650 	u16 map_head;
651 
652 	hdr = queue_head_node(txq);
653 	queue_head_inc(txq);
654 	map_head = txq->head;
655 
656 	if (skb->len > skb->data_len) {
657 		int len = skb_headlen(skb);
658 		busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659 		if (dma_mapping_error(dev, busaddr))
660 			goto dma_err;
661 		map_single = true;
662 		wrb = queue_head_node(txq);
663 		wrb_fill(wrb, busaddr, len);
664 		be_dws_cpu_to_le(wrb, sizeof(*wrb));
665 		queue_head_inc(txq);
666 		copied += len;
667 	}
668 
669 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
670 		const struct skb_frag_struct *frag =
671 			&skb_shinfo(skb)->frags[i];
672 		busaddr = skb_frag_dma_map(dev, frag, 0,
673 					   skb_frag_size(frag), DMA_TO_DEVICE);
674 		if (dma_mapping_error(dev, busaddr))
675 			goto dma_err;
676 		wrb = queue_head_node(txq);
677 		wrb_fill(wrb, busaddr, skb_frag_size(frag));
678 		be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 		queue_head_inc(txq);
680 		copied += skb_frag_size(frag);
681 	}
682 
683 	if (dummy_wrb) {
684 		wrb = queue_head_node(txq);
685 		wrb_fill(wrb, 0, 0);
686 		be_dws_cpu_to_le(wrb, sizeof(*wrb));
687 		queue_head_inc(txq);
688 	}
689 
690 	wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
691 	be_dws_cpu_to_le(hdr, sizeof(*hdr));
692 
693 	return copied;
694 dma_err:
695 	txq->head = map_head;
696 	while (copied) {
697 		wrb = queue_head_node(txq);
698 		unmap_tx_frag(dev, wrb, map_single);
699 		map_single = false;
700 		copied -= wrb->frag_len;
701 		queue_head_inc(txq);
702 	}
703 	return 0;
704 }
705 
706 static netdev_tx_t be_xmit(struct sk_buff *skb,
707 			struct net_device *netdev)
708 {
709 	struct be_adapter *adapter = netdev_priv(netdev);
710 	struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 	struct be_queue_info *txq = &txo->q;
712 	u32 wrb_cnt = 0, copied = 0;
713 	u32 start = txq->head;
714 	bool dummy_wrb, stopped = false;
715 
716 	/* For vlan tagged pkts, BE
717 	 * 1) calculates checksum even when CSO is not requested
718 	 * 2) calculates checksum wrongly for padded pkt less than
719 	 * 60 bytes long.
720 	 * As a workaround disable TX vlan offloading in such cases.
721 	 */
722 	if (unlikely(vlan_tx_tag_present(skb) &&
723 		     (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
724 		skb = skb_share_check(skb, GFP_ATOMIC);
725 		if (unlikely(!skb))
726 			goto tx_drop;
727 
728 		skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729 		if (unlikely(!skb))
730 			goto tx_drop;
731 
732 		skb->vlan_tci = 0;
733 	}
734 
735 	wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
736 
737 	copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
738 	if (copied) {
739 		/* record the sent skb in the sent_skb table */
740 		BUG_ON(txo->sent_skb_list[start]);
741 		txo->sent_skb_list[start] = skb;
742 
743 		/* Ensure txq has space for the next skb; Else stop the queue
744 		 * *BEFORE* ringing the tx doorbell, so that we serialze the
745 		 * tx compls of the current transmit which'll wake up the queue
746 		 */
747 		atomic_add(wrb_cnt, &txq->used);
748 		if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
749 								txq->len) {
750 			netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
751 			stopped = true;
752 		}
753 
754 		be_txq_notify(adapter, txq->id, wrb_cnt);
755 
756 		be_tx_stats_update(txo, wrb_cnt, copied,
757 				skb_shinfo(skb)->gso_segs, stopped);
758 	} else {
759 		txq->head = start;
760 		dev_kfree_skb_any(skb);
761 	}
762 tx_drop:
763 	return NETDEV_TX_OK;
764 }
765 
766 static int be_change_mtu(struct net_device *netdev, int new_mtu)
767 {
768 	struct be_adapter *adapter = netdev_priv(netdev);
769 	if (new_mtu < BE_MIN_MTU ||
770 			new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
771 					(ETH_HLEN + ETH_FCS_LEN))) {
772 		dev_info(&adapter->pdev->dev,
773 			"MTU must be between %d and %d bytes\n",
774 			BE_MIN_MTU,
775 			(BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
776 		return -EINVAL;
777 	}
778 	dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
779 			netdev->mtu, new_mtu);
780 	netdev->mtu = new_mtu;
781 	return 0;
782 }
783 
784 /*
785  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
786  * If the user configures more, place BE in vlan promiscuous mode.
787  */
788 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
789 {
790 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
791 	u16 vtag[BE_NUM_VLANS_SUPPORTED];
792 	u16 ntags = 0, i;
793 	int status = 0;
794 
795 	if (vf) {
796 		vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
797 		status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
798 					    1, 1, 0);
799 	}
800 
801 	/* No need to further configure vids if in promiscuous mode */
802 	if (adapter->promiscuous)
803 		return 0;
804 
805 	if (adapter->vlans_added > adapter->max_vlans)
806 		goto set_vlan_promisc;
807 
808 	/* Construct VLAN Table to give to HW */
809 	for (i = 0; i < VLAN_N_VID; i++)
810 		if (adapter->vlan_tag[i])
811 			vtag[ntags++] = cpu_to_le16(i);
812 
813 	status = be_cmd_vlan_config(adapter, adapter->if_handle,
814 				    vtag, ntags, 1, 0);
815 
816 	/* Set to VLAN promisc mode as setting VLAN filter failed */
817 	if (status) {
818 		dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
819 		dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
820 		goto set_vlan_promisc;
821 	}
822 
823 	return status;
824 
825 set_vlan_promisc:
826 	status = be_cmd_vlan_config(adapter, adapter->if_handle,
827 				    NULL, 0, 1, 1);
828 	return status;
829 }
830 
831 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
832 {
833 	struct be_adapter *adapter = netdev_priv(netdev);
834 	int status = 0;
835 
836 	if (!be_physfn(adapter)) {
837 		status = -EINVAL;
838 		goto ret;
839 	}
840 
841 	adapter->vlan_tag[vid] = 1;
842 	if (adapter->vlans_added <= (adapter->max_vlans + 1))
843 		status = be_vid_config(adapter, false, 0);
844 
845 	if (!status)
846 		adapter->vlans_added++;
847 	else
848 		adapter->vlan_tag[vid] = 0;
849 ret:
850 	return status;
851 }
852 
853 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
854 {
855 	struct be_adapter *adapter = netdev_priv(netdev);
856 	int status = 0;
857 
858 	if (!be_physfn(adapter)) {
859 		status = -EINVAL;
860 		goto ret;
861 	}
862 
863 	adapter->vlan_tag[vid] = 0;
864 	if (adapter->vlans_added <= adapter->max_vlans)
865 		status = be_vid_config(adapter, false, 0);
866 
867 	if (!status)
868 		adapter->vlans_added--;
869 	else
870 		adapter->vlan_tag[vid] = 1;
871 ret:
872 	return status;
873 }
874 
875 static void be_set_rx_mode(struct net_device *netdev)
876 {
877 	struct be_adapter *adapter = netdev_priv(netdev);
878 	int status;
879 
880 	if (netdev->flags & IFF_PROMISC) {
881 		be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
882 		adapter->promiscuous = true;
883 		goto done;
884 	}
885 
886 	/* BE was previously in promiscuous mode; disable it */
887 	if (adapter->promiscuous) {
888 		adapter->promiscuous = false;
889 		be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
890 
891 		if (adapter->vlans_added)
892 			be_vid_config(adapter, false, 0);
893 	}
894 
895 	/* Enable multicast promisc if num configured exceeds what we support */
896 	if (netdev->flags & IFF_ALLMULTI ||
897 			netdev_mc_count(netdev) > BE_MAX_MC) {
898 		be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
899 		goto done;
900 	}
901 
902 	if (netdev_uc_count(netdev) != adapter->uc_macs) {
903 		struct netdev_hw_addr *ha;
904 		int i = 1; /* First slot is claimed by the Primary MAC */
905 
906 		for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
907 			be_cmd_pmac_del(adapter, adapter->if_handle,
908 					adapter->pmac_id[i], 0);
909 		}
910 
911 		if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
912 			be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
913 			adapter->promiscuous = true;
914 			goto done;
915 		}
916 
917 		netdev_for_each_uc_addr(ha, adapter->netdev) {
918 			adapter->uc_macs++; /* First slot is for Primary MAC */
919 			be_cmd_pmac_add(adapter, (u8 *)ha->addr,
920 					adapter->if_handle,
921 					&adapter->pmac_id[adapter->uc_macs], 0);
922 		}
923 	}
924 
925 	status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
926 
927 	/* Set to MCAST promisc mode if setting MULTICAST address fails */
928 	if (status) {
929 		dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
930 		dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
931 		be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
932 	}
933 done:
934 	return;
935 }
936 
937 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
938 {
939 	struct be_adapter *adapter = netdev_priv(netdev);
940 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
941 	int status;
942 
943 	if (!sriov_enabled(adapter))
944 		return -EPERM;
945 
946 	if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
947 		return -EINVAL;
948 
949 	if (lancer_chip(adapter)) {
950 		status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
951 	} else {
952 		status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
953 					 vf_cfg->pmac_id, vf + 1);
954 
955 		status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
956 					 &vf_cfg->pmac_id, vf + 1);
957 	}
958 
959 	if (status)
960 		dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
961 				mac, vf);
962 	else
963 		memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
964 
965 	return status;
966 }
967 
968 static int be_get_vf_config(struct net_device *netdev, int vf,
969 			struct ifla_vf_info *vi)
970 {
971 	struct be_adapter *adapter = netdev_priv(netdev);
972 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
973 
974 	if (!sriov_enabled(adapter))
975 		return -EPERM;
976 
977 	if (vf >= adapter->num_vfs)
978 		return -EINVAL;
979 
980 	vi->vf = vf;
981 	vi->tx_rate = vf_cfg->tx_rate;
982 	vi->vlan = vf_cfg->vlan_tag;
983 	vi->qos = 0;
984 	memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
985 
986 	return 0;
987 }
988 
989 static int be_set_vf_vlan(struct net_device *netdev,
990 			int vf, u16 vlan, u8 qos)
991 {
992 	struct be_adapter *adapter = netdev_priv(netdev);
993 	int status = 0;
994 
995 	if (!sriov_enabled(adapter))
996 		return -EPERM;
997 
998 	if (vf >= adapter->num_vfs || vlan > 4095)
999 		return -EINVAL;
1000 
1001 	if (vlan) {
1002 		if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1003 			/* If this is new value, program it. Else skip. */
1004 			adapter->vf_cfg[vf].vlan_tag = vlan;
1005 
1006 			status = be_cmd_set_hsw_config(adapter, vlan,
1007 				vf + 1, adapter->vf_cfg[vf].if_handle);
1008 		}
1009 	} else {
1010 		/* Reset Transparent Vlan Tagging. */
1011 		adapter->vf_cfg[vf].vlan_tag = 0;
1012 		vlan = adapter->vf_cfg[vf].def_vid;
1013 		status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1014 			adapter->vf_cfg[vf].if_handle);
1015 	}
1016 
1017 
1018 	if (status)
1019 		dev_info(&adapter->pdev->dev,
1020 				"VLAN %d config on VF %d failed\n", vlan, vf);
1021 	return status;
1022 }
1023 
1024 static int be_set_vf_tx_rate(struct net_device *netdev,
1025 			int vf, int rate)
1026 {
1027 	struct be_adapter *adapter = netdev_priv(netdev);
1028 	int status = 0;
1029 
1030 	if (!sriov_enabled(adapter))
1031 		return -EPERM;
1032 
1033 	if (vf >= adapter->num_vfs)
1034 		return -EINVAL;
1035 
1036 	if (rate < 100 || rate > 10000) {
1037 		dev_err(&adapter->pdev->dev,
1038 			"tx rate must be between 100 and 10000 Mbps\n");
1039 		return -EINVAL;
1040 	}
1041 
1042 	status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1043 
1044 	if (status)
1045 		dev_err(&adapter->pdev->dev,
1046 				"tx rate %d on VF %d failed\n", rate, vf);
1047 	else
1048 		adapter->vf_cfg[vf].tx_rate = rate;
1049 	return status;
1050 }
1051 
1052 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1053 {
1054 	struct pci_dev *dev, *pdev = adapter->pdev;
1055 	int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1056 	u16 offset, stride;
1057 
1058 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1059 	pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1060 	pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1061 
1062 	dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1063 	while (dev) {
1064 		vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1065 		if (dev->is_virtfn && dev->devfn == vf_fn) {
1066 			vfs++;
1067 			if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1068 				assigned_vfs++;
1069 		}
1070 		dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1071 	}
1072 	return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1073 }
1074 
1075 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1076 {
1077 	struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1078 	ulong now = jiffies;
1079 	ulong delta = now - stats->rx_jiffies;
1080 	u64 pkts;
1081 	unsigned int start, eqd;
1082 
1083 	if (!eqo->enable_aic) {
1084 		eqd = eqo->eqd;
1085 		goto modify_eqd;
1086 	}
1087 
1088 	if (eqo->idx >= adapter->num_rx_qs)
1089 		return;
1090 
1091 	stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1092 
1093 	/* Wrapped around */
1094 	if (time_before(now, stats->rx_jiffies)) {
1095 		stats->rx_jiffies = now;
1096 		return;
1097 	}
1098 
1099 	/* Update once a second */
1100 	if (delta < HZ)
1101 		return;
1102 
1103 	do {
1104 		start = u64_stats_fetch_begin_bh(&stats->sync);
1105 		pkts = stats->rx_pkts;
1106 	} while (u64_stats_fetch_retry_bh(&stats->sync, start));
1107 
1108 	stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1109 	stats->rx_pkts_prev = pkts;
1110 	stats->rx_jiffies = now;
1111 	eqd = (stats->rx_pps / 110000) << 3;
1112 	eqd = min(eqd, eqo->max_eqd);
1113 	eqd = max(eqd, eqo->min_eqd);
1114 	if (eqd < 10)
1115 		eqd = 0;
1116 
1117 modify_eqd:
1118 	if (eqd != eqo->cur_eqd) {
1119 		be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1120 		eqo->cur_eqd = eqd;
1121 	}
1122 }
1123 
1124 static void be_rx_stats_update(struct be_rx_obj *rxo,
1125 		struct be_rx_compl_info *rxcp)
1126 {
1127 	struct be_rx_stats *stats = rx_stats(rxo);
1128 
1129 	u64_stats_update_begin(&stats->sync);
1130 	stats->rx_compl++;
1131 	stats->rx_bytes += rxcp->pkt_size;
1132 	stats->rx_pkts++;
1133 	if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1134 		stats->rx_mcast_pkts++;
1135 	if (rxcp->err)
1136 		stats->rx_compl_err++;
1137 	u64_stats_update_end(&stats->sync);
1138 }
1139 
1140 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1141 {
1142 	/* L4 checksum is not reliable for non TCP/UDP packets.
1143 	 * Also ignore ipcksm for ipv6 pkts */
1144 	return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1145 				(rxcp->ip_csum || rxcp->ipv6);
1146 }
1147 
1148 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1149 						u16 frag_idx)
1150 {
1151 	struct be_adapter *adapter = rxo->adapter;
1152 	struct be_rx_page_info *rx_page_info;
1153 	struct be_queue_info *rxq = &rxo->q;
1154 
1155 	rx_page_info = &rxo->page_info_tbl[frag_idx];
1156 	BUG_ON(!rx_page_info->page);
1157 
1158 	if (rx_page_info->last_page_user) {
1159 		dma_unmap_page(&adapter->pdev->dev,
1160 			       dma_unmap_addr(rx_page_info, bus),
1161 			       adapter->big_page_size, DMA_FROM_DEVICE);
1162 		rx_page_info->last_page_user = false;
1163 	}
1164 
1165 	atomic_dec(&rxq->used);
1166 	return rx_page_info;
1167 }
1168 
1169 /* Throwaway the data in the Rx completion */
1170 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1171 				struct be_rx_compl_info *rxcp)
1172 {
1173 	struct be_queue_info *rxq = &rxo->q;
1174 	struct be_rx_page_info *page_info;
1175 	u16 i, num_rcvd = rxcp->num_rcvd;
1176 
1177 	for (i = 0; i < num_rcvd; i++) {
1178 		page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1179 		put_page(page_info->page);
1180 		memset(page_info, 0, sizeof(*page_info));
1181 		index_inc(&rxcp->rxq_idx, rxq->len);
1182 	}
1183 }
1184 
1185 /*
1186  * skb_fill_rx_data forms a complete skb for an ether frame
1187  * indicated by rxcp.
1188  */
1189 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1190 			     struct be_rx_compl_info *rxcp)
1191 {
1192 	struct be_queue_info *rxq = &rxo->q;
1193 	struct be_rx_page_info *page_info;
1194 	u16 i, j;
1195 	u16 hdr_len, curr_frag_len, remaining;
1196 	u8 *start;
1197 
1198 	page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1199 	start = page_address(page_info->page) + page_info->page_offset;
1200 	prefetch(start);
1201 
1202 	/* Copy data in the first descriptor of this completion */
1203 	curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1204 
1205 	/* Copy the header portion into skb_data */
1206 	hdr_len = min(BE_HDR_LEN, curr_frag_len);
1207 	memcpy(skb->data, start, hdr_len);
1208 	skb->len = curr_frag_len;
1209 	if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1210 		/* Complete packet has now been moved to data */
1211 		put_page(page_info->page);
1212 		skb->data_len = 0;
1213 		skb->tail += curr_frag_len;
1214 	} else {
1215 		skb_shinfo(skb)->nr_frags = 1;
1216 		skb_frag_set_page(skb, 0, page_info->page);
1217 		skb_shinfo(skb)->frags[0].page_offset =
1218 					page_info->page_offset + hdr_len;
1219 		skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1220 		skb->data_len = curr_frag_len - hdr_len;
1221 		skb->truesize += rx_frag_size;
1222 		skb->tail += hdr_len;
1223 	}
1224 	page_info->page = NULL;
1225 
1226 	if (rxcp->pkt_size <= rx_frag_size) {
1227 		BUG_ON(rxcp->num_rcvd != 1);
1228 		return;
1229 	}
1230 
1231 	/* More frags present for this completion */
1232 	index_inc(&rxcp->rxq_idx, rxq->len);
1233 	remaining = rxcp->pkt_size - curr_frag_len;
1234 	for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1235 		page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1236 		curr_frag_len = min(remaining, rx_frag_size);
1237 
1238 		/* Coalesce all frags from the same physical page in one slot */
1239 		if (page_info->page_offset == 0) {
1240 			/* Fresh page */
1241 			j++;
1242 			skb_frag_set_page(skb, j, page_info->page);
1243 			skb_shinfo(skb)->frags[j].page_offset =
1244 							page_info->page_offset;
1245 			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1246 			skb_shinfo(skb)->nr_frags++;
1247 		} else {
1248 			put_page(page_info->page);
1249 		}
1250 
1251 		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1252 		skb->len += curr_frag_len;
1253 		skb->data_len += curr_frag_len;
1254 		skb->truesize += rx_frag_size;
1255 		remaining -= curr_frag_len;
1256 		index_inc(&rxcp->rxq_idx, rxq->len);
1257 		page_info->page = NULL;
1258 	}
1259 	BUG_ON(j > MAX_SKB_FRAGS);
1260 }
1261 
1262 /* Process the RX completion indicated by rxcp when GRO is disabled */
1263 static void be_rx_compl_process(struct be_rx_obj *rxo,
1264 				struct be_rx_compl_info *rxcp)
1265 {
1266 	struct be_adapter *adapter = rxo->adapter;
1267 	struct net_device *netdev = adapter->netdev;
1268 	struct sk_buff *skb;
1269 
1270 	skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1271 	if (unlikely(!skb)) {
1272 		rx_stats(rxo)->rx_drops_no_skbs++;
1273 		be_rx_compl_discard(rxo, rxcp);
1274 		return;
1275 	}
1276 
1277 	skb_fill_rx_data(rxo, skb, rxcp);
1278 
1279 	if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1280 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1281 	else
1282 		skb_checksum_none_assert(skb);
1283 
1284 	skb->protocol = eth_type_trans(skb, netdev);
1285 	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1286 	if (netdev->features & NETIF_F_RXHASH)
1287 		skb->rxhash = rxcp->rss_hash;
1288 
1289 
1290 	if (rxcp->vlanf)
1291 		__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1292 
1293 	netif_receive_skb(skb);
1294 }
1295 
1296 /* Process the RX completion indicated by rxcp when GRO is enabled */
1297 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1298 			     struct be_rx_compl_info *rxcp)
1299 {
1300 	struct be_adapter *adapter = rxo->adapter;
1301 	struct be_rx_page_info *page_info;
1302 	struct sk_buff *skb = NULL;
1303 	struct be_queue_info *rxq = &rxo->q;
1304 	u16 remaining, curr_frag_len;
1305 	u16 i, j;
1306 
1307 	skb = napi_get_frags(napi);
1308 	if (!skb) {
1309 		be_rx_compl_discard(rxo, rxcp);
1310 		return;
1311 	}
1312 
1313 	remaining = rxcp->pkt_size;
1314 	for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1315 		page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1316 
1317 		curr_frag_len = min(remaining, rx_frag_size);
1318 
1319 		/* Coalesce all frags from the same physical page in one slot */
1320 		if (i == 0 || page_info->page_offset == 0) {
1321 			/* First frag or Fresh page */
1322 			j++;
1323 			skb_frag_set_page(skb, j, page_info->page);
1324 			skb_shinfo(skb)->frags[j].page_offset =
1325 							page_info->page_offset;
1326 			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1327 		} else {
1328 			put_page(page_info->page);
1329 		}
1330 		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1331 		skb->truesize += rx_frag_size;
1332 		remaining -= curr_frag_len;
1333 		index_inc(&rxcp->rxq_idx, rxq->len);
1334 		memset(page_info, 0, sizeof(*page_info));
1335 	}
1336 	BUG_ON(j > MAX_SKB_FRAGS);
1337 
1338 	skb_shinfo(skb)->nr_frags = j + 1;
1339 	skb->len = rxcp->pkt_size;
1340 	skb->data_len = rxcp->pkt_size;
1341 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1342 	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1343 	if (adapter->netdev->features & NETIF_F_RXHASH)
1344 		skb->rxhash = rxcp->rss_hash;
1345 
1346 	if (rxcp->vlanf)
1347 		__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1348 
1349 	napi_gro_frags(napi);
1350 }
1351 
1352 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1353 				 struct be_rx_compl_info *rxcp)
1354 {
1355 	rxcp->pkt_size =
1356 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1357 	rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1358 	rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1359 	rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1360 	rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1361 	rxcp->ip_csum =
1362 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1363 	rxcp->l4_csum =
1364 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1365 	rxcp->ipv6 =
1366 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1367 	rxcp->rxq_idx =
1368 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1369 	rxcp->num_rcvd =
1370 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1371 	rxcp->pkt_type =
1372 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1373 	rxcp->rss_hash =
1374 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1375 	if (rxcp->vlanf) {
1376 		rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1377 					  compl);
1378 		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1379 					       compl);
1380 	}
1381 	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1382 }
1383 
1384 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1385 				 struct be_rx_compl_info *rxcp)
1386 {
1387 	rxcp->pkt_size =
1388 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1389 	rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1390 	rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1391 	rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1392 	rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1393 	rxcp->ip_csum =
1394 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1395 	rxcp->l4_csum =
1396 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1397 	rxcp->ipv6 =
1398 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1399 	rxcp->rxq_idx =
1400 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1401 	rxcp->num_rcvd =
1402 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1403 	rxcp->pkt_type =
1404 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1405 	rxcp->rss_hash =
1406 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1407 	if (rxcp->vlanf) {
1408 		rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1409 					  compl);
1410 		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1411 					       compl);
1412 	}
1413 	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1414 }
1415 
1416 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1417 {
1418 	struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1419 	struct be_rx_compl_info *rxcp = &rxo->rxcp;
1420 	struct be_adapter *adapter = rxo->adapter;
1421 
1422 	/* For checking the valid bit it is Ok to use either definition as the
1423 	 * valid bit is at the same position in both v0 and v1 Rx compl */
1424 	if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1425 		return NULL;
1426 
1427 	rmb();
1428 	be_dws_le_to_cpu(compl, sizeof(*compl));
1429 
1430 	if (adapter->be3_native)
1431 		be_parse_rx_compl_v1(compl, rxcp);
1432 	else
1433 		be_parse_rx_compl_v0(compl, rxcp);
1434 
1435 	if (rxcp->vlanf) {
1436 		/* vlanf could be wrongly set in some cards.
1437 		 * ignore if vtm is not set */
1438 		if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1439 			rxcp->vlanf = 0;
1440 
1441 		if (!lancer_chip(adapter))
1442 			rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1443 
1444 		if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1445 		    !adapter->vlan_tag[rxcp->vlan_tag])
1446 			rxcp->vlanf = 0;
1447 	}
1448 
1449 	/* As the compl has been parsed, reset it; we wont touch it again */
1450 	compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1451 
1452 	queue_tail_inc(&rxo->cq);
1453 	return rxcp;
1454 }
1455 
1456 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1457 {
1458 	u32 order = get_order(size);
1459 
1460 	if (order > 0)
1461 		gfp |= __GFP_COMP;
1462 	return  alloc_pages(gfp, order);
1463 }
1464 
1465 /*
1466  * Allocate a page, split it to fragments of size rx_frag_size and post as
1467  * receive buffers to BE
1468  */
1469 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1470 {
1471 	struct be_adapter *adapter = rxo->adapter;
1472 	struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1473 	struct be_queue_info *rxq = &rxo->q;
1474 	struct page *pagep = NULL;
1475 	struct be_eth_rx_d *rxd;
1476 	u64 page_dmaaddr = 0, frag_dmaaddr;
1477 	u32 posted, page_offset = 0;
1478 
1479 	page_info = &rxo->page_info_tbl[rxq->head];
1480 	for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1481 		if (!pagep) {
1482 			pagep = be_alloc_pages(adapter->big_page_size, gfp);
1483 			if (unlikely(!pagep)) {
1484 				rx_stats(rxo)->rx_post_fail++;
1485 				break;
1486 			}
1487 			page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1488 						    0, adapter->big_page_size,
1489 						    DMA_FROM_DEVICE);
1490 			page_info->page_offset = 0;
1491 		} else {
1492 			get_page(pagep);
1493 			page_info->page_offset = page_offset + rx_frag_size;
1494 		}
1495 		page_offset = page_info->page_offset;
1496 		page_info->page = pagep;
1497 		dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1498 		frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1499 
1500 		rxd = queue_head_node(rxq);
1501 		rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1502 		rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1503 
1504 		/* Any space left in the current big page for another frag? */
1505 		if ((page_offset + rx_frag_size + rx_frag_size) >
1506 					adapter->big_page_size) {
1507 			pagep = NULL;
1508 			page_info->last_page_user = true;
1509 		}
1510 
1511 		prev_page_info = page_info;
1512 		queue_head_inc(rxq);
1513 		page_info = &rxo->page_info_tbl[rxq->head];
1514 	}
1515 	if (pagep)
1516 		prev_page_info->last_page_user = true;
1517 
1518 	if (posted) {
1519 		atomic_add(posted, &rxq->used);
1520 		be_rxq_notify(adapter, rxq->id, posted);
1521 	} else if (atomic_read(&rxq->used) == 0) {
1522 		/* Let be_worker replenish when memory is available */
1523 		rxo->rx_post_starved = true;
1524 	}
1525 }
1526 
1527 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1528 {
1529 	struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1530 
1531 	if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1532 		return NULL;
1533 
1534 	rmb();
1535 	be_dws_le_to_cpu(txcp, sizeof(*txcp));
1536 
1537 	txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1538 
1539 	queue_tail_inc(tx_cq);
1540 	return txcp;
1541 }
1542 
1543 static u16 be_tx_compl_process(struct be_adapter *adapter,
1544 		struct be_tx_obj *txo, u16 last_index)
1545 {
1546 	struct be_queue_info *txq = &txo->q;
1547 	struct be_eth_wrb *wrb;
1548 	struct sk_buff **sent_skbs = txo->sent_skb_list;
1549 	struct sk_buff *sent_skb;
1550 	u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1551 	bool unmap_skb_hdr = true;
1552 
1553 	sent_skb = sent_skbs[txq->tail];
1554 	BUG_ON(!sent_skb);
1555 	sent_skbs[txq->tail] = NULL;
1556 
1557 	/* skip header wrb */
1558 	queue_tail_inc(txq);
1559 
1560 	do {
1561 		cur_index = txq->tail;
1562 		wrb = queue_tail_node(txq);
1563 		unmap_tx_frag(&adapter->pdev->dev, wrb,
1564 			      (unmap_skb_hdr && skb_headlen(sent_skb)));
1565 		unmap_skb_hdr = false;
1566 
1567 		num_wrbs++;
1568 		queue_tail_inc(txq);
1569 	} while (cur_index != last_index);
1570 
1571 	kfree_skb(sent_skb);
1572 	return num_wrbs;
1573 }
1574 
1575 /* Return the number of events in the event queue */
1576 static inline int events_get(struct be_eq_obj *eqo)
1577 {
1578 	struct be_eq_entry *eqe;
1579 	int num = 0;
1580 
1581 	do {
1582 		eqe = queue_tail_node(&eqo->q);
1583 		if (eqe->evt == 0)
1584 			break;
1585 
1586 		rmb();
1587 		eqe->evt = 0;
1588 		num++;
1589 		queue_tail_inc(&eqo->q);
1590 	} while (true);
1591 
1592 	return num;
1593 }
1594 
1595 static int event_handle(struct be_eq_obj *eqo)
1596 {
1597 	bool rearm = false;
1598 	int num = events_get(eqo);
1599 
1600 	/* Deal with any spurious interrupts that come without events */
1601 	if (!num)
1602 		rearm = true;
1603 
1604 	if (num || msix_enabled(eqo->adapter))
1605 		be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1606 
1607 	if (num)
1608 		napi_schedule(&eqo->napi);
1609 
1610 	return num;
1611 }
1612 
1613 /* Leaves the EQ is disarmed state */
1614 static void be_eq_clean(struct be_eq_obj *eqo)
1615 {
1616 	int num = events_get(eqo);
1617 
1618 	be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1619 }
1620 
1621 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1622 {
1623 	struct be_rx_page_info *page_info;
1624 	struct be_queue_info *rxq = &rxo->q;
1625 	struct be_queue_info *rx_cq = &rxo->cq;
1626 	struct be_rx_compl_info *rxcp;
1627 	u16 tail;
1628 
1629 	/* First cleanup pending rx completions */
1630 	while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1631 		be_rx_compl_discard(rxo, rxcp);
1632 		be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1633 	}
1634 
1635 	/* Then free posted rx buffer that were not used */
1636 	tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1637 	for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1638 		page_info = get_rx_page_info(rxo, tail);
1639 		put_page(page_info->page);
1640 		memset(page_info, 0, sizeof(*page_info));
1641 	}
1642 	BUG_ON(atomic_read(&rxq->used));
1643 	rxq->tail = rxq->head = 0;
1644 }
1645 
1646 static void be_tx_compl_clean(struct be_adapter *adapter)
1647 {
1648 	struct be_tx_obj *txo;
1649 	struct be_queue_info *txq;
1650 	struct be_eth_tx_compl *txcp;
1651 	u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1652 	struct sk_buff *sent_skb;
1653 	bool dummy_wrb;
1654 	int i, pending_txqs;
1655 
1656 	/* Wait for a max of 200ms for all the tx-completions to arrive. */
1657 	do {
1658 		pending_txqs = adapter->num_tx_qs;
1659 
1660 		for_all_tx_queues(adapter, txo, i) {
1661 			txq = &txo->q;
1662 			while ((txcp = be_tx_compl_get(&txo->cq))) {
1663 				end_idx =
1664 					AMAP_GET_BITS(struct amap_eth_tx_compl,
1665 						      wrb_index, txcp);
1666 				num_wrbs += be_tx_compl_process(adapter, txo,
1667 								end_idx);
1668 				cmpl++;
1669 			}
1670 			if (cmpl) {
1671 				be_cq_notify(adapter, txo->cq.id, false, cmpl);
1672 				atomic_sub(num_wrbs, &txq->used);
1673 				cmpl = 0;
1674 				num_wrbs = 0;
1675 			}
1676 			if (atomic_read(&txq->used) == 0)
1677 				pending_txqs--;
1678 		}
1679 
1680 		if (pending_txqs == 0 || ++timeo > 200)
1681 			break;
1682 
1683 		mdelay(1);
1684 	} while (true);
1685 
1686 	for_all_tx_queues(adapter, txo, i) {
1687 		txq = &txo->q;
1688 		if (atomic_read(&txq->used))
1689 			dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1690 				atomic_read(&txq->used));
1691 
1692 		/* free posted tx for which compls will never arrive */
1693 		while (atomic_read(&txq->used)) {
1694 			sent_skb = txo->sent_skb_list[txq->tail];
1695 			end_idx = txq->tail;
1696 			num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1697 						   &dummy_wrb);
1698 			index_adv(&end_idx, num_wrbs - 1, txq->len);
1699 			num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1700 			atomic_sub(num_wrbs, &txq->used);
1701 		}
1702 	}
1703 }
1704 
1705 static void be_evt_queues_destroy(struct be_adapter *adapter)
1706 {
1707 	struct be_eq_obj *eqo;
1708 	int i;
1709 
1710 	for_all_evt_queues(adapter, eqo, i) {
1711 		be_eq_clean(eqo);
1712 		if (eqo->q.created)
1713 			be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1714 		be_queue_free(adapter, &eqo->q);
1715 	}
1716 }
1717 
1718 static int be_evt_queues_create(struct be_adapter *adapter)
1719 {
1720 	struct be_queue_info *eq;
1721 	struct be_eq_obj *eqo;
1722 	int i, rc;
1723 
1724 	adapter->num_evt_qs = num_irqs(adapter);
1725 
1726 	for_all_evt_queues(adapter, eqo, i) {
1727 		eqo->adapter = adapter;
1728 		eqo->tx_budget = BE_TX_BUDGET;
1729 		eqo->idx = i;
1730 		eqo->max_eqd = BE_MAX_EQD;
1731 		eqo->enable_aic = true;
1732 
1733 		eq = &eqo->q;
1734 		rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1735 					sizeof(struct be_eq_entry));
1736 		if (rc)
1737 			return rc;
1738 
1739 		rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1740 		if (rc)
1741 			return rc;
1742 	}
1743 	return 0;
1744 }
1745 
1746 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1747 {
1748 	struct be_queue_info *q;
1749 
1750 	q = &adapter->mcc_obj.q;
1751 	if (q->created)
1752 		be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1753 	be_queue_free(adapter, q);
1754 
1755 	q = &adapter->mcc_obj.cq;
1756 	if (q->created)
1757 		be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1758 	be_queue_free(adapter, q);
1759 }
1760 
1761 /* Must be called only after TX qs are created as MCC shares TX EQ */
1762 static int be_mcc_queues_create(struct be_adapter *adapter)
1763 {
1764 	struct be_queue_info *q, *cq;
1765 
1766 	cq = &adapter->mcc_obj.cq;
1767 	if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1768 			sizeof(struct be_mcc_compl)))
1769 		goto err;
1770 
1771 	/* Use the default EQ for MCC completions */
1772 	if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1773 		goto mcc_cq_free;
1774 
1775 	q = &adapter->mcc_obj.q;
1776 	if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1777 		goto mcc_cq_destroy;
1778 
1779 	if (be_cmd_mccq_create(adapter, q, cq))
1780 		goto mcc_q_free;
1781 
1782 	return 0;
1783 
1784 mcc_q_free:
1785 	be_queue_free(adapter, q);
1786 mcc_cq_destroy:
1787 	be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1788 mcc_cq_free:
1789 	be_queue_free(adapter, cq);
1790 err:
1791 	return -1;
1792 }
1793 
1794 static void be_tx_queues_destroy(struct be_adapter *adapter)
1795 {
1796 	struct be_queue_info *q;
1797 	struct be_tx_obj *txo;
1798 	u8 i;
1799 
1800 	for_all_tx_queues(adapter, txo, i) {
1801 		q = &txo->q;
1802 		if (q->created)
1803 			be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1804 		be_queue_free(adapter, q);
1805 
1806 		q = &txo->cq;
1807 		if (q->created)
1808 			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1809 		be_queue_free(adapter, q);
1810 	}
1811 }
1812 
1813 static int be_num_txqs_want(struct be_adapter *adapter)
1814 {
1815 	if (sriov_want(adapter) || be_is_mc(adapter) ||
1816 	    lancer_chip(adapter) || !be_physfn(adapter) ||
1817 	    adapter->generation == BE_GEN2)
1818 		return 1;
1819 	else
1820 		return MAX_TX_QS;
1821 }
1822 
1823 static int be_tx_cqs_create(struct be_adapter *adapter)
1824 {
1825 	struct be_queue_info *cq, *eq;
1826 	int status;
1827 	struct be_tx_obj *txo;
1828 	u8 i;
1829 
1830 	adapter->num_tx_qs = be_num_txqs_want(adapter);
1831 	if (adapter->num_tx_qs != MAX_TX_QS) {
1832 		rtnl_lock();
1833 		netif_set_real_num_tx_queues(adapter->netdev,
1834 			adapter->num_tx_qs);
1835 		rtnl_unlock();
1836 	}
1837 
1838 	for_all_tx_queues(adapter, txo, i) {
1839 		cq = &txo->cq;
1840 		status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1841 					sizeof(struct be_eth_tx_compl));
1842 		if (status)
1843 			return status;
1844 
1845 		/* If num_evt_qs is less than num_tx_qs, then more than
1846 		 * one txq share an eq
1847 		 */
1848 		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1849 		status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1850 		if (status)
1851 			return status;
1852 	}
1853 	return 0;
1854 }
1855 
1856 static int be_tx_qs_create(struct be_adapter *adapter)
1857 {
1858 	struct be_tx_obj *txo;
1859 	int i, status;
1860 
1861 	for_all_tx_queues(adapter, txo, i) {
1862 		status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1863 					sizeof(struct be_eth_wrb));
1864 		if (status)
1865 			return status;
1866 
1867 		status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1868 		if (status)
1869 			return status;
1870 	}
1871 
1872 	return 0;
1873 }
1874 
1875 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1876 {
1877 	struct be_queue_info *q;
1878 	struct be_rx_obj *rxo;
1879 	int i;
1880 
1881 	for_all_rx_queues(adapter, rxo, i) {
1882 		q = &rxo->cq;
1883 		if (q->created)
1884 			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1885 		be_queue_free(adapter, q);
1886 	}
1887 }
1888 
1889 static int be_rx_cqs_create(struct be_adapter *adapter)
1890 {
1891 	struct be_queue_info *eq, *cq;
1892 	struct be_rx_obj *rxo;
1893 	int rc, i;
1894 
1895 	/* We'll create as many RSS rings as there are irqs.
1896 	 * But when there's only one irq there's no use creating RSS rings
1897 	 */
1898 	adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1899 				num_irqs(adapter) + 1 : 1;
1900 
1901 	adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1902 	for_all_rx_queues(adapter, rxo, i) {
1903 		rxo->adapter = adapter;
1904 		cq = &rxo->cq;
1905 		rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1906 				sizeof(struct be_eth_rx_compl));
1907 		if (rc)
1908 			return rc;
1909 
1910 		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1911 		rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1912 		if (rc)
1913 			return rc;
1914 	}
1915 
1916 	if (adapter->num_rx_qs != MAX_RX_QS)
1917 		dev_info(&adapter->pdev->dev,
1918 			"Created only %d receive queues", adapter->num_rx_qs);
1919 
1920 	return 0;
1921 }
1922 
1923 static irqreturn_t be_intx(int irq, void *dev)
1924 {
1925 	struct be_adapter *adapter = dev;
1926 	int num_evts;
1927 
1928 	/* With INTx only one EQ is used */
1929 	num_evts = event_handle(&adapter->eq_obj[0]);
1930 	if (num_evts)
1931 		return IRQ_HANDLED;
1932 	else
1933 		return IRQ_NONE;
1934 }
1935 
1936 static irqreturn_t be_msix(int irq, void *dev)
1937 {
1938 	struct be_eq_obj *eqo = dev;
1939 
1940 	event_handle(eqo);
1941 	return IRQ_HANDLED;
1942 }
1943 
1944 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1945 {
1946 	return (rxcp->tcpf && !rxcp->err) ? true : false;
1947 }
1948 
1949 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1950 			int budget)
1951 {
1952 	struct be_adapter *adapter = rxo->adapter;
1953 	struct be_queue_info *rx_cq = &rxo->cq;
1954 	struct be_rx_compl_info *rxcp;
1955 	u32 work_done;
1956 
1957 	for (work_done = 0; work_done < budget; work_done++) {
1958 		rxcp = be_rx_compl_get(rxo);
1959 		if (!rxcp)
1960 			break;
1961 
1962 		/* Is it a flush compl that has no data */
1963 		if (unlikely(rxcp->num_rcvd == 0))
1964 			goto loop_continue;
1965 
1966 		/* Discard compl with partial DMA Lancer B0 */
1967 		if (unlikely(!rxcp->pkt_size)) {
1968 			be_rx_compl_discard(rxo, rxcp);
1969 			goto loop_continue;
1970 		}
1971 
1972 		/* On BE drop pkts that arrive due to imperfect filtering in
1973 		 * promiscuous mode on some skews
1974 		 */
1975 		if (unlikely(rxcp->port != adapter->port_num &&
1976 				!lancer_chip(adapter))) {
1977 			be_rx_compl_discard(rxo, rxcp);
1978 			goto loop_continue;
1979 		}
1980 
1981 		if (do_gro(rxcp))
1982 			be_rx_compl_process_gro(rxo, napi, rxcp);
1983 		else
1984 			be_rx_compl_process(rxo, rxcp);
1985 loop_continue:
1986 		be_rx_stats_update(rxo, rxcp);
1987 	}
1988 
1989 	if (work_done) {
1990 		be_cq_notify(adapter, rx_cq->id, true, work_done);
1991 
1992 		if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1993 			be_post_rx_frags(rxo, GFP_ATOMIC);
1994 	}
1995 
1996 	return work_done;
1997 }
1998 
1999 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2000 			  int budget, int idx)
2001 {
2002 	struct be_eth_tx_compl *txcp;
2003 	int num_wrbs = 0, work_done;
2004 
2005 	for (work_done = 0; work_done < budget; work_done++) {
2006 		txcp = be_tx_compl_get(&txo->cq);
2007 		if (!txcp)
2008 			break;
2009 		num_wrbs += be_tx_compl_process(adapter, txo,
2010 				AMAP_GET_BITS(struct amap_eth_tx_compl,
2011 					wrb_index, txcp));
2012 	}
2013 
2014 	if (work_done) {
2015 		be_cq_notify(adapter, txo->cq.id, true, work_done);
2016 		atomic_sub(num_wrbs, &txo->q.used);
2017 
2018 		/* As Tx wrbs have been freed up, wake up netdev queue
2019 		 * if it was stopped due to lack of tx wrbs.  */
2020 		if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2021 			atomic_read(&txo->q.used) < txo->q.len / 2) {
2022 			netif_wake_subqueue(adapter->netdev, idx);
2023 		}
2024 
2025 		u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2026 		tx_stats(txo)->tx_compl += work_done;
2027 		u64_stats_update_end(&tx_stats(txo)->sync_compl);
2028 	}
2029 	return (work_done < budget); /* Done */
2030 }
2031 
2032 int be_poll(struct napi_struct *napi, int budget)
2033 {
2034 	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2035 	struct be_adapter *adapter = eqo->adapter;
2036 	int max_work = 0, work, i;
2037 	bool tx_done;
2038 
2039 	/* Process all TXQs serviced by this EQ */
2040 	for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2041 		tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2042 					eqo->tx_budget, i);
2043 		if (!tx_done)
2044 			max_work = budget;
2045 	}
2046 
2047 	/* This loop will iterate twice for EQ0 in which
2048 	 * completions of the last RXQ (default one) are also processed
2049 	 * For other EQs the loop iterates only once
2050 	 */
2051 	for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2052 		work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2053 		max_work = max(work, max_work);
2054 	}
2055 
2056 	if (is_mcc_eqo(eqo))
2057 		be_process_mcc(adapter);
2058 
2059 	if (max_work < budget) {
2060 		napi_complete(napi);
2061 		be_eq_notify(adapter, eqo->q.id, true, false, 0);
2062 	} else {
2063 		/* As we'll continue in polling mode, count and clear events */
2064 		be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2065 	}
2066 	return max_work;
2067 }
2068 
2069 void be_detect_dump_ue(struct be_adapter *adapter)
2070 {
2071 	u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2072 	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2073 	u32 i;
2074 
2075 	if (adapter->eeh_err || adapter->ue_detected)
2076 		return;
2077 
2078 	if (lancer_chip(adapter)) {
2079 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2080 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2081 			sliport_err1 = ioread32(adapter->db +
2082 					SLIPORT_ERROR1_OFFSET);
2083 			sliport_err2 = ioread32(adapter->db +
2084 					SLIPORT_ERROR2_OFFSET);
2085 		}
2086 	} else {
2087 		pci_read_config_dword(adapter->pdev,
2088 				PCICFG_UE_STATUS_LOW, &ue_lo);
2089 		pci_read_config_dword(adapter->pdev,
2090 				PCICFG_UE_STATUS_HIGH, &ue_hi);
2091 		pci_read_config_dword(adapter->pdev,
2092 				PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2093 		pci_read_config_dword(adapter->pdev,
2094 				PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2095 
2096 		ue_lo = (ue_lo & (~ue_lo_mask));
2097 		ue_hi = (ue_hi & (~ue_hi_mask));
2098 	}
2099 
2100 	if (ue_lo || ue_hi ||
2101 		sliport_status & SLIPORT_STATUS_ERR_MASK) {
2102 		adapter->ue_detected = true;
2103 		adapter->eeh_err = true;
2104 		dev_err(&adapter->pdev->dev,
2105 			"Unrecoverable error in the card\n");
2106 	}
2107 
2108 	if (ue_lo) {
2109 		for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2110 			if (ue_lo & 1)
2111 				dev_err(&adapter->pdev->dev,
2112 				"UE: %s bit set\n", ue_status_low_desc[i]);
2113 		}
2114 	}
2115 	if (ue_hi) {
2116 		for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2117 			if (ue_hi & 1)
2118 				dev_err(&adapter->pdev->dev,
2119 				"UE: %s bit set\n", ue_status_hi_desc[i]);
2120 		}
2121 	}
2122 
2123 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2124 		dev_err(&adapter->pdev->dev,
2125 			"sliport status 0x%x\n", sliport_status);
2126 		dev_err(&adapter->pdev->dev,
2127 			"sliport error1 0x%x\n", sliport_err1);
2128 		dev_err(&adapter->pdev->dev,
2129 			"sliport error2 0x%x\n", sliport_err2);
2130 	}
2131 }
2132 
2133 static void be_msix_disable(struct be_adapter *adapter)
2134 {
2135 	if (msix_enabled(adapter)) {
2136 		pci_disable_msix(adapter->pdev);
2137 		adapter->num_msix_vec = 0;
2138 	}
2139 }
2140 
2141 static uint be_num_rss_want(struct be_adapter *adapter)
2142 {
2143 	if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2144 	     !sriov_want(adapter) && be_physfn(adapter) &&
2145 	     !be_is_mc(adapter))
2146 		return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2147 	else
2148 		return 0;
2149 }
2150 
2151 static void be_msix_enable(struct be_adapter *adapter)
2152 {
2153 #define BE_MIN_MSIX_VECTORS		1
2154 	int i, status, num_vec;
2155 
2156 	/* If RSS queues are not used, need a vec for default RX Q */
2157 	num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2158 	num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2159 
2160 	for (i = 0; i < num_vec; i++)
2161 		adapter->msix_entries[i].entry = i;
2162 
2163 	status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2164 	if (status == 0) {
2165 		goto done;
2166 	} else if (status >= BE_MIN_MSIX_VECTORS) {
2167 		num_vec = status;
2168 		if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2169 				num_vec) == 0)
2170 			goto done;
2171 	}
2172 	return;
2173 done:
2174 	adapter->num_msix_vec = num_vec;
2175 	return;
2176 }
2177 
2178 static inline int be_msix_vec_get(struct be_adapter *adapter,
2179 				struct be_eq_obj *eqo)
2180 {
2181 	return adapter->msix_entries[eqo->idx].vector;
2182 }
2183 
2184 static int be_msix_register(struct be_adapter *adapter)
2185 {
2186 	struct net_device *netdev = adapter->netdev;
2187 	struct be_eq_obj *eqo;
2188 	int status, i, vec;
2189 
2190 	for_all_evt_queues(adapter, eqo, i) {
2191 		sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2192 		vec = be_msix_vec_get(adapter, eqo);
2193 		status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2194 		if (status)
2195 			goto err_msix;
2196 	}
2197 
2198 	return 0;
2199 err_msix:
2200 	for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2201 		free_irq(be_msix_vec_get(adapter, eqo), eqo);
2202 	dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2203 		status);
2204 	be_msix_disable(adapter);
2205 	return status;
2206 }
2207 
2208 static int be_irq_register(struct be_adapter *adapter)
2209 {
2210 	struct net_device *netdev = adapter->netdev;
2211 	int status;
2212 
2213 	if (msix_enabled(adapter)) {
2214 		status = be_msix_register(adapter);
2215 		if (status == 0)
2216 			goto done;
2217 		/* INTx is not supported for VF */
2218 		if (!be_physfn(adapter))
2219 			return status;
2220 	}
2221 
2222 	/* INTx */
2223 	netdev->irq = adapter->pdev->irq;
2224 	status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2225 			adapter);
2226 	if (status) {
2227 		dev_err(&adapter->pdev->dev,
2228 			"INTx request IRQ failed - err %d\n", status);
2229 		return status;
2230 	}
2231 done:
2232 	adapter->isr_registered = true;
2233 	return 0;
2234 }
2235 
2236 static void be_irq_unregister(struct be_adapter *adapter)
2237 {
2238 	struct net_device *netdev = adapter->netdev;
2239 	struct be_eq_obj *eqo;
2240 	int i;
2241 
2242 	if (!adapter->isr_registered)
2243 		return;
2244 
2245 	/* INTx */
2246 	if (!msix_enabled(adapter)) {
2247 		free_irq(netdev->irq, adapter);
2248 		goto done;
2249 	}
2250 
2251 	/* MSIx */
2252 	for_all_evt_queues(adapter, eqo, i)
2253 		free_irq(be_msix_vec_get(adapter, eqo), eqo);
2254 
2255 done:
2256 	adapter->isr_registered = false;
2257 }
2258 
2259 static void be_rx_qs_destroy(struct be_adapter *adapter)
2260 {
2261 	struct be_queue_info *q;
2262 	struct be_rx_obj *rxo;
2263 	int i;
2264 
2265 	for_all_rx_queues(adapter, rxo, i) {
2266 		q = &rxo->q;
2267 		if (q->created) {
2268 			be_cmd_rxq_destroy(adapter, q);
2269 			/* After the rxq is invalidated, wait for a grace time
2270 			 * of 1ms for all dma to end and the flush compl to
2271 			 * arrive
2272 			 */
2273 			mdelay(1);
2274 			be_rx_cq_clean(rxo);
2275 		}
2276 		be_queue_free(adapter, q);
2277 	}
2278 }
2279 
2280 static int be_close(struct net_device *netdev)
2281 {
2282 	struct be_adapter *adapter = netdev_priv(netdev);
2283 	struct be_eq_obj *eqo;
2284 	int i;
2285 
2286 	be_async_mcc_disable(adapter);
2287 
2288 	if (!lancer_chip(adapter))
2289 		be_intr_set(adapter, false);
2290 
2291 	for_all_evt_queues(adapter, eqo, i) {
2292 		napi_disable(&eqo->napi);
2293 		if (msix_enabled(adapter))
2294 			synchronize_irq(be_msix_vec_get(adapter, eqo));
2295 		else
2296 			synchronize_irq(netdev->irq);
2297 		be_eq_clean(eqo);
2298 	}
2299 
2300 	be_irq_unregister(adapter);
2301 
2302 	/* Wait for all pending tx completions to arrive so that
2303 	 * all tx skbs are freed.
2304 	 */
2305 	be_tx_compl_clean(adapter);
2306 
2307 	be_rx_qs_destroy(adapter);
2308 	return 0;
2309 }
2310 
2311 static int be_rx_qs_create(struct be_adapter *adapter)
2312 {
2313 	struct be_rx_obj *rxo;
2314 	int rc, i, j;
2315 	u8 rsstable[128];
2316 
2317 	for_all_rx_queues(adapter, rxo, i) {
2318 		rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2319 				    sizeof(struct be_eth_rx_d));
2320 		if (rc)
2321 			return rc;
2322 	}
2323 
2324 	/* The FW would like the default RXQ to be created first */
2325 	rxo = default_rxo(adapter);
2326 	rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2327 			       adapter->if_handle, false, &rxo->rss_id);
2328 	if (rc)
2329 		return rc;
2330 
2331 	for_all_rss_queues(adapter, rxo, i) {
2332 		rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2333 				       rx_frag_size, adapter->if_handle,
2334 				       true, &rxo->rss_id);
2335 		if (rc)
2336 			return rc;
2337 	}
2338 
2339 	if (be_multi_rxq(adapter)) {
2340 		for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2341 			for_all_rss_queues(adapter, rxo, i) {
2342 				if ((j + i) >= 128)
2343 					break;
2344 				rsstable[j + i] = rxo->rss_id;
2345 			}
2346 		}
2347 		rc = be_cmd_rss_config(adapter, rsstable, 128);
2348 		if (rc)
2349 			return rc;
2350 	}
2351 
2352 	/* First time posting */
2353 	for_all_rx_queues(adapter, rxo, i)
2354 		be_post_rx_frags(rxo, GFP_KERNEL);
2355 	return 0;
2356 }
2357 
2358 static int be_open(struct net_device *netdev)
2359 {
2360 	struct be_adapter *adapter = netdev_priv(netdev);
2361 	struct be_eq_obj *eqo;
2362 	struct be_rx_obj *rxo;
2363 	struct be_tx_obj *txo;
2364 	u8 link_status;
2365 	int status, i;
2366 
2367 	status = be_rx_qs_create(adapter);
2368 	if (status)
2369 		goto err;
2370 
2371 	be_irq_register(adapter);
2372 
2373 	if (!lancer_chip(adapter))
2374 		be_intr_set(adapter, true);
2375 
2376 	for_all_rx_queues(adapter, rxo, i)
2377 		be_cq_notify(adapter, rxo->cq.id, true, 0);
2378 
2379 	for_all_tx_queues(adapter, txo, i)
2380 		be_cq_notify(adapter, txo->cq.id, true, 0);
2381 
2382 	be_async_mcc_enable(adapter);
2383 
2384 	for_all_evt_queues(adapter, eqo, i) {
2385 		napi_enable(&eqo->napi);
2386 		be_eq_notify(adapter, eqo->q.id, true, false, 0);
2387 	}
2388 
2389 	status = be_cmd_link_status_query(adapter, NULL, NULL,
2390 					  &link_status, 0);
2391 	if (!status)
2392 		be_link_status_update(adapter, link_status);
2393 
2394 	return 0;
2395 err:
2396 	be_close(adapter->netdev);
2397 	return -EIO;
2398 }
2399 
2400 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2401 {
2402 	struct be_dma_mem cmd;
2403 	int status = 0;
2404 	u8 mac[ETH_ALEN];
2405 
2406 	memset(mac, 0, ETH_ALEN);
2407 
2408 	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2409 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2410 				    GFP_KERNEL);
2411 	if (cmd.va == NULL)
2412 		return -1;
2413 	memset(cmd.va, 0, cmd.size);
2414 
2415 	if (enable) {
2416 		status = pci_write_config_dword(adapter->pdev,
2417 			PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2418 		if (status) {
2419 			dev_err(&adapter->pdev->dev,
2420 				"Could not enable Wake-on-lan\n");
2421 			dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2422 					  cmd.dma);
2423 			return status;
2424 		}
2425 		status = be_cmd_enable_magic_wol(adapter,
2426 				adapter->netdev->dev_addr, &cmd);
2427 		pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2428 		pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2429 	} else {
2430 		status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2431 		pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2432 		pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2433 	}
2434 
2435 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2436 	return status;
2437 }
2438 
2439 /*
2440  * Generate a seed MAC address from the PF MAC Address using jhash.
2441  * MAC Address for VFs are assigned incrementally starting from the seed.
2442  * These addresses are programmed in the ASIC by the PF and the VF driver
2443  * queries for the MAC address during its probe.
2444  */
2445 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2446 {
2447 	u32 vf;
2448 	int status = 0;
2449 	u8 mac[ETH_ALEN];
2450 	struct be_vf_cfg *vf_cfg;
2451 
2452 	be_vf_eth_addr_generate(adapter, mac);
2453 
2454 	for_all_vfs(adapter, vf_cfg, vf) {
2455 		if (lancer_chip(adapter)) {
2456 			status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2457 		} else {
2458 			status = be_cmd_pmac_add(adapter, mac,
2459 						 vf_cfg->if_handle,
2460 						 &vf_cfg->pmac_id, vf + 1);
2461 		}
2462 
2463 		if (status)
2464 			dev_err(&adapter->pdev->dev,
2465 			"Mac address assignment failed for VF %d\n", vf);
2466 		else
2467 			memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2468 
2469 		mac[5] += 1;
2470 	}
2471 	return status;
2472 }
2473 
2474 static void be_vf_clear(struct be_adapter *adapter)
2475 {
2476 	struct be_vf_cfg *vf_cfg;
2477 	u32 vf;
2478 
2479 	if (be_find_vfs(adapter, ASSIGNED)) {
2480 		dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2481 		goto done;
2482 	}
2483 
2484 	for_all_vfs(adapter, vf_cfg, vf) {
2485 		if (lancer_chip(adapter))
2486 			be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2487 		else
2488 			be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2489 					vf_cfg->pmac_id, vf + 1);
2490 
2491 		be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2492 	}
2493 	pci_disable_sriov(adapter->pdev);
2494 done:
2495 	kfree(adapter->vf_cfg);
2496 	adapter->num_vfs = 0;
2497 }
2498 
2499 static int be_clear(struct be_adapter *adapter)
2500 {
2501 	int i = 1;
2502 
2503 	if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2504 		cancel_delayed_work_sync(&adapter->work);
2505 		adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2506 	}
2507 
2508 	if (sriov_enabled(adapter))
2509 		be_vf_clear(adapter);
2510 
2511 	for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2512 		be_cmd_pmac_del(adapter, adapter->if_handle,
2513 			adapter->pmac_id[i], 0);
2514 
2515 	be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2516 
2517 	be_mcc_queues_destroy(adapter);
2518 	be_rx_cqs_destroy(adapter);
2519 	be_tx_queues_destroy(adapter);
2520 	be_evt_queues_destroy(adapter);
2521 
2522 	/* tell fw we're done with firing cmds */
2523 	be_cmd_fw_clean(adapter);
2524 
2525 	be_msix_disable(adapter);
2526 	pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
2527 	return 0;
2528 }
2529 
2530 static int be_vf_setup_init(struct be_adapter *adapter)
2531 {
2532 	struct be_vf_cfg *vf_cfg;
2533 	int vf;
2534 
2535 	adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2536 				  GFP_KERNEL);
2537 	if (!adapter->vf_cfg)
2538 		return -ENOMEM;
2539 
2540 	for_all_vfs(adapter, vf_cfg, vf) {
2541 		vf_cfg->if_handle = -1;
2542 		vf_cfg->pmac_id = -1;
2543 	}
2544 	return 0;
2545 }
2546 
2547 static int be_vf_setup(struct be_adapter *adapter)
2548 {
2549 	struct be_vf_cfg *vf_cfg;
2550 	struct device *dev = &adapter->pdev->dev;
2551 	u32 cap_flags, en_flags, vf;
2552 	u16 def_vlan, lnk_speed;
2553 	int status, enabled_vfs;
2554 
2555 	enabled_vfs = be_find_vfs(adapter, ENABLED);
2556 	if (enabled_vfs) {
2557 		dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2558 		dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2559 		return 0;
2560 	}
2561 
2562 	if (num_vfs > adapter->dev_num_vfs) {
2563 		dev_warn(dev, "Device supports %d VFs and not %d\n",
2564 			 adapter->dev_num_vfs, num_vfs);
2565 		num_vfs = adapter->dev_num_vfs;
2566 	}
2567 
2568 	status = pci_enable_sriov(adapter->pdev, num_vfs);
2569 	if (!status) {
2570 		adapter->num_vfs = num_vfs;
2571 	} else {
2572 		/* Platform doesn't support SRIOV though device supports it */
2573 		dev_warn(dev, "SRIOV enable failed\n");
2574 		return 0;
2575 	}
2576 
2577 	status = be_vf_setup_init(adapter);
2578 	if (status)
2579 		goto err;
2580 
2581 	cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2582 				BE_IF_FLAGS_MULTICAST;
2583 	for_all_vfs(adapter, vf_cfg, vf) {
2584 		status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2585 					  &vf_cfg->if_handle, NULL, vf + 1);
2586 		if (status)
2587 			goto err;
2588 	}
2589 
2590 	if (!enabled_vfs) {
2591 		status = be_vf_eth_addr_config(adapter);
2592 		if (status)
2593 			goto err;
2594 	}
2595 
2596 	for_all_vfs(adapter, vf_cfg, vf) {
2597 		status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2598 						  NULL, vf + 1);
2599 		if (status)
2600 			goto err;
2601 		vf_cfg->tx_rate = lnk_speed * 10;
2602 
2603 		status = be_cmd_get_hsw_config(adapter, &def_vlan,
2604 				vf + 1, vf_cfg->if_handle);
2605 		if (status)
2606 			goto err;
2607 		vf_cfg->def_vid = def_vlan;
2608 	}
2609 	return 0;
2610 err:
2611 	return status;
2612 }
2613 
2614 static void be_setup_init(struct be_adapter *adapter)
2615 {
2616 	adapter->vlan_prio_bmap = 0xff;
2617 	adapter->phy.link_speed = -1;
2618 	adapter->if_handle = -1;
2619 	adapter->be3_native = false;
2620 	adapter->promiscuous = false;
2621 	adapter->eq_next_idx = 0;
2622 	adapter->phy.forced_port_speed = -1;
2623 }
2624 
2625 static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
2626 {
2627 	u32 pmac_id;
2628 	int status;
2629 	bool pmac_id_active;
2630 
2631 	status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2632 							&pmac_id, mac);
2633 	if (status != 0)
2634 		goto do_none;
2635 
2636 	if (pmac_id_active) {
2637 		status = be_cmd_mac_addr_query(adapter, mac,
2638 				MAC_ADDRESS_TYPE_NETWORK,
2639 				false, adapter->if_handle, pmac_id);
2640 
2641 		if (!status)
2642 			adapter->pmac_id[0] = pmac_id;
2643 	} else {
2644 		status = be_cmd_pmac_add(adapter, mac,
2645 				adapter->if_handle, &adapter->pmac_id[0], 0);
2646 	}
2647 do_none:
2648 	return status;
2649 }
2650 
2651 /* Routine to query per function resource limits */
2652 static int be_get_config(struct be_adapter *adapter)
2653 {
2654 	int pos;
2655 	u16 dev_num_vfs;
2656 
2657 	pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2658 	if (pos) {
2659 		pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2660 				     &dev_num_vfs);
2661 		adapter->dev_num_vfs = dev_num_vfs;
2662 	}
2663 	return 0;
2664 }
2665 
2666 static int be_setup(struct be_adapter *adapter)
2667 {
2668 	struct net_device *netdev = adapter->netdev;
2669 	struct device *dev = &adapter->pdev->dev;
2670 	u32 cap_flags, en_flags;
2671 	u32 tx_fc, rx_fc;
2672 	int status;
2673 	u8 mac[ETH_ALEN];
2674 
2675 	be_setup_init(adapter);
2676 
2677 	be_get_config(adapter);
2678 
2679 	be_cmd_req_native_mode(adapter);
2680 
2681 	be_msix_enable(adapter);
2682 
2683 	status = be_evt_queues_create(adapter);
2684 	if (status)
2685 		goto err;
2686 
2687 	status = be_tx_cqs_create(adapter);
2688 	if (status)
2689 		goto err;
2690 
2691 	status = be_rx_cqs_create(adapter);
2692 	if (status)
2693 		goto err;
2694 
2695 	status = be_mcc_queues_create(adapter);
2696 	if (status)
2697 		goto err;
2698 
2699 	memset(mac, 0, ETH_ALEN);
2700 	status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2701 			true /*permanent */, 0, 0);
2702 	if (status)
2703 		return status;
2704 	memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2705 	memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2706 
2707 	en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2708 			BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2709 	cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2710 			BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2711 
2712 	if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2713 		cap_flags |= BE_IF_FLAGS_RSS;
2714 		en_flags |= BE_IF_FLAGS_RSS;
2715 	}
2716 	status = be_cmd_if_create(adapter, cap_flags, en_flags,
2717 			netdev->dev_addr, &adapter->if_handle,
2718 			&adapter->pmac_id[0], 0);
2719 	if (status != 0)
2720 		goto err;
2721 
2722 	 /* The VF's permanent mac queried from card is incorrect.
2723 	  * For BEx: Query the mac configued by the PF using if_handle
2724 	  * For Lancer: Get and use mac_list to obtain mac address.
2725 	  */
2726 	if (!be_physfn(adapter)) {
2727 		if (lancer_chip(adapter))
2728 			status = be_add_mac_from_list(adapter, mac);
2729 		else
2730 			status = be_cmd_mac_addr_query(adapter, mac,
2731 					MAC_ADDRESS_TYPE_NETWORK, false,
2732 					adapter->if_handle, 0);
2733 		if (!status) {
2734 			memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2735 			memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2736 		}
2737 	}
2738 
2739 	status = be_tx_qs_create(adapter);
2740 	if (status)
2741 		goto err;
2742 
2743 	be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2744 
2745 	be_vid_config(adapter, false, 0);
2746 
2747 	be_set_rx_mode(adapter->netdev);
2748 
2749 	be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2750 
2751 	if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2752 		be_cmd_set_flow_control(adapter, adapter->tx_fc,
2753 					adapter->rx_fc);
2754 
2755 	pcie_set_readrq(adapter->pdev, 4096);
2756 
2757 	if (be_physfn(adapter) && num_vfs) {
2758 		if (adapter->dev_num_vfs)
2759 			be_vf_setup(adapter);
2760 		else
2761 			dev_warn(dev, "device doesn't support SRIOV\n");
2762 	}
2763 
2764 	be_cmd_get_phy_info(adapter);
2765 	if (be_pause_supported(adapter))
2766 		adapter->phy.fc_autoneg = 1;
2767 
2768 	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2769 	adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2770 
2771 	pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
2772 	return 0;
2773 err:
2774 	be_clear(adapter);
2775 	return status;
2776 }
2777 
2778 #ifdef CONFIG_NET_POLL_CONTROLLER
2779 static void be_netpoll(struct net_device *netdev)
2780 {
2781 	struct be_adapter *adapter = netdev_priv(netdev);
2782 	struct be_eq_obj *eqo;
2783 	int i;
2784 
2785 	for_all_evt_queues(adapter, eqo, i)
2786 		event_handle(eqo);
2787 
2788 	return;
2789 }
2790 #endif
2791 
2792 #define FW_FILE_HDR_SIGN 	"ServerEngines Corp. "
2793 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
2794 
2795 static bool be_flash_redboot(struct be_adapter *adapter,
2796 			const u8 *p, u32 img_start, int image_size,
2797 			int hdr_size)
2798 {
2799 	u32 crc_offset;
2800 	u8 flashed_crc[4];
2801 	int status;
2802 
2803 	crc_offset = hdr_size + img_start + image_size - 4;
2804 
2805 	p += crc_offset;
2806 
2807 	status = be_cmd_get_flash_crc(adapter, flashed_crc,
2808 			(image_size - 4));
2809 	if (status) {
2810 		dev_err(&adapter->pdev->dev,
2811 		"could not get crc from flash, not flashing redboot\n");
2812 		return false;
2813 	}
2814 
2815 	/*update redboot only if crc does not match*/
2816 	if (!memcmp(flashed_crc, p, 4))
2817 		return false;
2818 	else
2819 		return true;
2820 }
2821 
2822 static bool phy_flashing_required(struct be_adapter *adapter)
2823 {
2824 	return (adapter->phy.phy_type == TN_8022 &&
2825 		adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2826 }
2827 
2828 static bool is_comp_in_ufi(struct be_adapter *adapter,
2829 			   struct flash_section_info *fsec, int type)
2830 {
2831 	int i = 0, img_type = 0;
2832 	struct flash_section_info_g2 *fsec_g2 = NULL;
2833 
2834 	if (adapter->generation != BE_GEN3)
2835 		fsec_g2 = (struct flash_section_info_g2 *)fsec;
2836 
2837 	for (i = 0; i < MAX_FLASH_COMP; i++) {
2838 		if (fsec_g2)
2839 			img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2840 		else
2841 			img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2842 
2843 		if (img_type == type)
2844 			return true;
2845 	}
2846 	return false;
2847 
2848 }
2849 
2850 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2851 					 int header_size,
2852 					 const struct firmware *fw)
2853 {
2854 	struct flash_section_info *fsec = NULL;
2855 	const u8 *p = fw->data;
2856 
2857 	p += header_size;
2858 	while (p < (fw->data + fw->size)) {
2859 		fsec = (struct flash_section_info *)p;
2860 		if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2861 			return fsec;
2862 		p += 32;
2863 	}
2864 	return NULL;
2865 }
2866 
2867 static int be_flash_data(struct be_adapter *adapter,
2868 			 const struct firmware *fw,
2869 			 struct be_dma_mem *flash_cmd,
2870 			 int num_of_images)
2871 
2872 {
2873 	int status = 0, i, filehdr_size = 0;
2874 	int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2875 	u32 total_bytes = 0, flash_op;
2876 	int num_bytes;
2877 	const u8 *p = fw->data;
2878 	struct be_cmd_write_flashrom *req = flash_cmd->va;
2879 	const struct flash_comp *pflashcomp;
2880 	int num_comp, hdr_size;
2881 	struct flash_section_info *fsec = NULL;
2882 
2883 	struct flash_comp gen3_flash_types[] = {
2884 		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2885 			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2886 		{ FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2887 			FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2888 		{ FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2889 			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2890 		{ FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2891 			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2892 		{ FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2893 			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2894 		{ FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2895 			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2896 		{ FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2897 			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2898 		{ FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2899 			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2900 		{ FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2901 			FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2902 		{ FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2903 			FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2904 	};
2905 
2906 	struct flash_comp gen2_flash_types[] = {
2907 		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2908 			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2909 		{ FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2910 			FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2911 		{ FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2912 			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2913 		{ FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2914 			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2915 		{ FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2916 			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2917 		{ FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2918 			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2919 		{ FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2920 			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2921 		{ FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2922 			 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2923 	};
2924 
2925 	if (adapter->generation == BE_GEN3) {
2926 		pflashcomp = gen3_flash_types;
2927 		filehdr_size = sizeof(struct flash_file_hdr_g3);
2928 		num_comp = ARRAY_SIZE(gen3_flash_types);
2929 	} else {
2930 		pflashcomp = gen2_flash_types;
2931 		filehdr_size = sizeof(struct flash_file_hdr_g2);
2932 		num_comp = ARRAY_SIZE(gen2_flash_types);
2933 	}
2934 	/* Get flash section info*/
2935 	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2936 	if (!fsec) {
2937 		dev_err(&adapter->pdev->dev,
2938 			"Invalid Cookie. UFI corrupted ?\n");
2939 		return -1;
2940 	}
2941 	for (i = 0; i < num_comp; i++) {
2942 		if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
2943 			continue;
2944 
2945 		if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2946 		    memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2947 			continue;
2948 
2949 		if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
2950 			if (!phy_flashing_required(adapter))
2951 				continue;
2952 		}
2953 
2954 		hdr_size = filehdr_size +
2955 			   (num_of_images * sizeof(struct image_hdr));
2956 
2957 		if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2958 		    (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2959 				       pflashcomp[i].size, hdr_size)))
2960 			continue;
2961 
2962 		/* Flash the component */
2963 		p = fw->data;
2964 		p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
2965 		if (p + pflashcomp[i].size > fw->data + fw->size)
2966 			return -1;
2967 		total_bytes = pflashcomp[i].size;
2968 		while (total_bytes) {
2969 			if (total_bytes > 32*1024)
2970 				num_bytes = 32*1024;
2971 			else
2972 				num_bytes = total_bytes;
2973 			total_bytes -= num_bytes;
2974 			if (!total_bytes) {
2975 				if (pflashcomp[i].optype == OPTYPE_PHY_FW)
2976 					flash_op = FLASHROM_OPER_PHY_FLASH;
2977 				else
2978 					flash_op = FLASHROM_OPER_FLASH;
2979 			} else {
2980 				if (pflashcomp[i].optype == OPTYPE_PHY_FW)
2981 					flash_op = FLASHROM_OPER_PHY_SAVE;
2982 				else
2983 					flash_op = FLASHROM_OPER_SAVE;
2984 			}
2985 			memcpy(req->params.data_buf, p, num_bytes);
2986 			p += num_bytes;
2987 			status = be_cmd_write_flashrom(adapter, flash_cmd,
2988 				pflashcomp[i].optype, flash_op, num_bytes);
2989 			if (status) {
2990 				if ((status == ILLEGAL_IOCTL_REQ) &&
2991 					(pflashcomp[i].optype ==
2992 						OPTYPE_PHY_FW))
2993 					break;
2994 				dev_err(&adapter->pdev->dev,
2995 					"cmd to write to flash rom failed.\n");
2996 				return -1;
2997 			}
2998 		}
2999 	}
3000 	return 0;
3001 }
3002 
3003 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3004 {
3005 	if (fhdr == NULL)
3006 		return 0;
3007 	if (fhdr->build[0] == '3')
3008 		return BE_GEN3;
3009 	else if (fhdr->build[0] == '2')
3010 		return BE_GEN2;
3011 	else
3012 		return 0;
3013 }
3014 
3015 static int lancer_fw_download(struct be_adapter *adapter,
3016 				const struct firmware *fw)
3017 {
3018 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3019 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3020 	struct be_dma_mem flash_cmd;
3021 	const u8 *data_ptr = NULL;
3022 	u8 *dest_image_ptr = NULL;
3023 	size_t image_size = 0;
3024 	u32 chunk_size = 0;
3025 	u32 data_written = 0;
3026 	u32 offset = 0;
3027 	int status = 0;
3028 	u8 add_status = 0;
3029 
3030 	if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3031 		dev_err(&adapter->pdev->dev,
3032 			"FW Image not properly aligned. "
3033 			"Length must be 4 byte aligned.\n");
3034 		status = -EINVAL;
3035 		goto lancer_fw_exit;
3036 	}
3037 
3038 	flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3039 				+ LANCER_FW_DOWNLOAD_CHUNK;
3040 	flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3041 						&flash_cmd.dma, GFP_KERNEL);
3042 	if (!flash_cmd.va) {
3043 		status = -ENOMEM;
3044 		dev_err(&adapter->pdev->dev,
3045 			"Memory allocation failure while flashing\n");
3046 		goto lancer_fw_exit;
3047 	}
3048 
3049 	dest_image_ptr = flash_cmd.va +
3050 				sizeof(struct lancer_cmd_req_write_object);
3051 	image_size = fw->size;
3052 	data_ptr = fw->data;
3053 
3054 	while (image_size) {
3055 		chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3056 
3057 		/* Copy the image chunk content. */
3058 		memcpy(dest_image_ptr, data_ptr, chunk_size);
3059 
3060 		status = lancer_cmd_write_object(adapter, &flash_cmd,
3061 				chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3062 				&data_written, &add_status);
3063 
3064 		if (status)
3065 			break;
3066 
3067 		offset += data_written;
3068 		data_ptr += data_written;
3069 		image_size -= data_written;
3070 	}
3071 
3072 	if (!status) {
3073 		/* Commit the FW written */
3074 		status = lancer_cmd_write_object(adapter, &flash_cmd,
3075 					0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3076 					&data_written, &add_status);
3077 	}
3078 
3079 	dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3080 				flash_cmd.dma);
3081 	if (status) {
3082 		dev_err(&adapter->pdev->dev,
3083 			"Firmware load error. "
3084 			"Status code: 0x%x Additional Status: 0x%x\n",
3085 			status, add_status);
3086 		goto lancer_fw_exit;
3087 	}
3088 
3089 	dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3090 lancer_fw_exit:
3091 	return status;
3092 }
3093 
3094 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3095 {
3096 	struct flash_file_hdr_g2 *fhdr;
3097 	struct flash_file_hdr_g3 *fhdr3;
3098 	struct image_hdr *img_hdr_ptr = NULL;
3099 	struct be_dma_mem flash_cmd;
3100 	const u8 *p;
3101 	int status = 0, i = 0, num_imgs = 0;
3102 
3103 	p = fw->data;
3104 	fhdr = (struct flash_file_hdr_g2 *) p;
3105 
3106 	flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3107 	flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3108 					  &flash_cmd.dma, GFP_KERNEL);
3109 	if (!flash_cmd.va) {
3110 		status = -ENOMEM;
3111 		dev_err(&adapter->pdev->dev,
3112 			"Memory allocation failure while flashing\n");
3113 		goto be_fw_exit;
3114 	}
3115 
3116 	if ((adapter->generation == BE_GEN3) &&
3117 			(get_ufigen_type(fhdr) == BE_GEN3)) {
3118 		fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3119 		num_imgs = le32_to_cpu(fhdr3->num_imgs);
3120 		for (i = 0; i < num_imgs; i++) {
3121 			img_hdr_ptr = (struct image_hdr *) (fw->data +
3122 					(sizeof(struct flash_file_hdr_g3) +
3123 					 i * sizeof(struct image_hdr)));
3124 			if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3125 				status = be_flash_data(adapter, fw, &flash_cmd,
3126 							num_imgs);
3127 		}
3128 	} else if ((adapter->generation == BE_GEN2) &&
3129 			(get_ufigen_type(fhdr) == BE_GEN2)) {
3130 		status = be_flash_data(adapter, fw, &flash_cmd, 0);
3131 	} else {
3132 		dev_err(&adapter->pdev->dev,
3133 			"UFI and Interface are not compatible for flashing\n");
3134 		status = -1;
3135 	}
3136 
3137 	dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3138 			  flash_cmd.dma);
3139 	if (status) {
3140 		dev_err(&adapter->pdev->dev, "Firmware load error\n");
3141 		goto be_fw_exit;
3142 	}
3143 
3144 	dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3145 
3146 be_fw_exit:
3147 	return status;
3148 }
3149 
3150 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3151 {
3152 	const struct firmware *fw;
3153 	int status;
3154 
3155 	if (!netif_running(adapter->netdev)) {
3156 		dev_err(&adapter->pdev->dev,
3157 			"Firmware load not allowed (interface is down)\n");
3158 		return -1;
3159 	}
3160 
3161 	status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3162 	if (status)
3163 		goto fw_exit;
3164 
3165 	dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3166 
3167 	if (lancer_chip(adapter))
3168 		status = lancer_fw_download(adapter, fw);
3169 	else
3170 		status = be_fw_download(adapter, fw);
3171 
3172 fw_exit:
3173 	release_firmware(fw);
3174 	return status;
3175 }
3176 
3177 static const struct net_device_ops be_netdev_ops = {
3178 	.ndo_open		= be_open,
3179 	.ndo_stop		= be_close,
3180 	.ndo_start_xmit		= be_xmit,
3181 	.ndo_set_rx_mode	= be_set_rx_mode,
3182 	.ndo_set_mac_address	= be_mac_addr_set,
3183 	.ndo_change_mtu		= be_change_mtu,
3184 	.ndo_get_stats64	= be_get_stats64,
3185 	.ndo_validate_addr	= eth_validate_addr,
3186 	.ndo_vlan_rx_add_vid	= be_vlan_add_vid,
3187 	.ndo_vlan_rx_kill_vid	= be_vlan_rem_vid,
3188 	.ndo_set_vf_mac		= be_set_vf_mac,
3189 	.ndo_set_vf_vlan	= be_set_vf_vlan,
3190 	.ndo_set_vf_tx_rate	= be_set_vf_tx_rate,
3191 	.ndo_get_vf_config	= be_get_vf_config,
3192 #ifdef CONFIG_NET_POLL_CONTROLLER
3193 	.ndo_poll_controller	= be_netpoll,
3194 #endif
3195 };
3196 
3197 static void be_netdev_init(struct net_device *netdev)
3198 {
3199 	struct be_adapter *adapter = netdev_priv(netdev);
3200 	struct be_eq_obj *eqo;
3201 	int i;
3202 
3203 	netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3204 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3205 		NETIF_F_HW_VLAN_TX;
3206 	if (be_multi_rxq(adapter))
3207 		netdev->hw_features |= NETIF_F_RXHASH;
3208 
3209 	netdev->features |= netdev->hw_features |
3210 		NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3211 
3212 	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3213 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3214 
3215 	netdev->priv_flags |= IFF_UNICAST_FLT;
3216 
3217 	netdev->flags |= IFF_MULTICAST;
3218 
3219 	netif_set_gso_max_size(netdev, 65535);
3220 
3221 	netdev->netdev_ops = &be_netdev_ops;
3222 
3223 	SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3224 
3225 	for_all_evt_queues(adapter, eqo, i)
3226 		netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3227 }
3228 
3229 static void be_unmap_pci_bars(struct be_adapter *adapter)
3230 {
3231 	if (adapter->csr)
3232 		iounmap(adapter->csr);
3233 	if (adapter->db)
3234 		iounmap(adapter->db);
3235 }
3236 
3237 static int be_map_pci_bars(struct be_adapter *adapter)
3238 {
3239 	u8 __iomem *addr;
3240 	int db_reg;
3241 
3242 	if (lancer_chip(adapter)) {
3243 		addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3244 			pci_resource_len(adapter->pdev, 0));
3245 		if (addr == NULL)
3246 			return -ENOMEM;
3247 		adapter->db = addr;
3248 		return 0;
3249 	}
3250 
3251 	if (be_physfn(adapter)) {
3252 		addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3253 				pci_resource_len(adapter->pdev, 2));
3254 		if (addr == NULL)
3255 			return -ENOMEM;
3256 		adapter->csr = addr;
3257 	}
3258 
3259 	if (adapter->generation == BE_GEN2) {
3260 		db_reg = 4;
3261 	} else {
3262 		if (be_physfn(adapter))
3263 			db_reg = 4;
3264 		else
3265 			db_reg = 0;
3266 	}
3267 	addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3268 				pci_resource_len(adapter->pdev, db_reg));
3269 	if (addr == NULL)
3270 		goto pci_map_err;
3271 	adapter->db = addr;
3272 
3273 	return 0;
3274 pci_map_err:
3275 	be_unmap_pci_bars(adapter);
3276 	return -ENOMEM;
3277 }
3278 
3279 
3280 static void be_ctrl_cleanup(struct be_adapter *adapter)
3281 {
3282 	struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3283 
3284 	be_unmap_pci_bars(adapter);
3285 
3286 	if (mem->va)
3287 		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3288 				  mem->dma);
3289 
3290 	mem = &adapter->rx_filter;
3291 	if (mem->va)
3292 		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3293 				  mem->dma);
3294 }
3295 
3296 static int be_ctrl_init(struct be_adapter *adapter)
3297 {
3298 	struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3299 	struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3300 	struct be_dma_mem *rx_filter = &adapter->rx_filter;
3301 	int status;
3302 
3303 	status = be_map_pci_bars(adapter);
3304 	if (status)
3305 		goto done;
3306 
3307 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3308 	mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3309 						mbox_mem_alloc->size,
3310 						&mbox_mem_alloc->dma,
3311 						GFP_KERNEL);
3312 	if (!mbox_mem_alloc->va) {
3313 		status = -ENOMEM;
3314 		goto unmap_pci_bars;
3315 	}
3316 	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3317 	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3318 	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3319 	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3320 
3321 	rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3322 	rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3323 					&rx_filter->dma, GFP_KERNEL);
3324 	if (rx_filter->va == NULL) {
3325 		status = -ENOMEM;
3326 		goto free_mbox;
3327 	}
3328 	memset(rx_filter->va, 0, rx_filter->size);
3329 
3330 	mutex_init(&adapter->mbox_lock);
3331 	spin_lock_init(&adapter->mcc_lock);
3332 	spin_lock_init(&adapter->mcc_cq_lock);
3333 
3334 	init_completion(&adapter->flash_compl);
3335 	pci_save_state(adapter->pdev);
3336 	return 0;
3337 
3338 free_mbox:
3339 	dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3340 			  mbox_mem_alloc->va, mbox_mem_alloc->dma);
3341 
3342 unmap_pci_bars:
3343 	be_unmap_pci_bars(adapter);
3344 
3345 done:
3346 	return status;
3347 }
3348 
3349 static void be_stats_cleanup(struct be_adapter *adapter)
3350 {
3351 	struct be_dma_mem *cmd = &adapter->stats_cmd;
3352 
3353 	if (cmd->va)
3354 		dma_free_coherent(&adapter->pdev->dev, cmd->size,
3355 				  cmd->va, cmd->dma);
3356 }
3357 
3358 static int be_stats_init(struct be_adapter *adapter)
3359 {
3360 	struct be_dma_mem *cmd = &adapter->stats_cmd;
3361 
3362 	if (adapter->generation == BE_GEN2) {
3363 		cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3364 	} else {
3365 		if (lancer_chip(adapter))
3366 			cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3367 		else
3368 			cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3369 	}
3370 	cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3371 				     GFP_KERNEL);
3372 	if (cmd->va == NULL)
3373 		return -1;
3374 	memset(cmd->va, 0, cmd->size);
3375 	return 0;
3376 }
3377 
3378 static void __devexit be_remove(struct pci_dev *pdev)
3379 {
3380 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3381 
3382 	if (!adapter)
3383 		return;
3384 
3385 	unregister_netdev(adapter->netdev);
3386 
3387 	be_clear(adapter);
3388 
3389 	be_stats_cleanup(adapter);
3390 
3391 	be_ctrl_cleanup(adapter);
3392 
3393 	pci_set_drvdata(pdev, NULL);
3394 	pci_release_regions(pdev);
3395 	pci_disable_device(pdev);
3396 
3397 	free_netdev(adapter->netdev);
3398 }
3399 
3400 bool be_is_wol_supported(struct be_adapter *adapter)
3401 {
3402 	return ((adapter->wol_cap & BE_WOL_CAP) &&
3403 		!be_is_wol_excluded(adapter)) ? true : false;
3404 }
3405 
3406 static int be_get_initial_config(struct be_adapter *adapter)
3407 {
3408 	int status;
3409 
3410 	status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3411 			&adapter->function_mode, &adapter->function_caps);
3412 	if (status)
3413 		return status;
3414 
3415 	if (adapter->function_mode & FLEX10_MODE)
3416 		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3417 	else
3418 		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3419 
3420 	if (be_physfn(adapter))
3421 		adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3422 	else
3423 		adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3424 
3425 	/* primary mac needs 1 pmac entry */
3426 	adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3427 				  sizeof(u32), GFP_KERNEL);
3428 	if (!adapter->pmac_id)
3429 		return -ENOMEM;
3430 
3431 	status = be_cmd_get_cntl_attributes(adapter);
3432 	if (status)
3433 		return status;
3434 
3435 	status = be_cmd_get_acpi_wol_cap(adapter);
3436 	if (status) {
3437 		/* in case of a failure to get wol capabillities
3438 		 * check the exclusion list to determine WOL capability */
3439 		if (!be_is_wol_excluded(adapter))
3440 			adapter->wol_cap |= BE_WOL_CAP;
3441 	}
3442 
3443 	if (be_is_wol_supported(adapter))
3444 		adapter->wol = true;
3445 
3446 	return 0;
3447 }
3448 
3449 static int be_dev_type_check(struct be_adapter *adapter)
3450 {
3451 	struct pci_dev *pdev = adapter->pdev;
3452 	u32 sli_intf = 0, if_type;
3453 
3454 	switch (pdev->device) {
3455 	case BE_DEVICE_ID1:
3456 	case OC_DEVICE_ID1:
3457 		adapter->generation = BE_GEN2;
3458 		break;
3459 	case BE_DEVICE_ID2:
3460 	case OC_DEVICE_ID2:
3461 	case OC_DEVICE_ID5:
3462 		adapter->generation = BE_GEN3;
3463 		break;
3464 	case OC_DEVICE_ID3:
3465 	case OC_DEVICE_ID4:
3466 		pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3467 		if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3468 						SLI_INTF_IF_TYPE_SHIFT;
3469 
3470 		if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3471 			if_type != 0x02) {
3472 			dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3473 			return -EINVAL;
3474 		}
3475 		adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3476 					 SLI_INTF_FAMILY_SHIFT);
3477 		adapter->generation = BE_GEN3;
3478 		break;
3479 	default:
3480 		adapter->generation = 0;
3481 	}
3482 
3483 	pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3484 	adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3485 	return 0;
3486 }
3487 
3488 static int lancer_wait_ready(struct be_adapter *adapter)
3489 {
3490 #define SLIPORT_READY_TIMEOUT 30
3491 	u32 sliport_status;
3492 	int status = 0, i;
3493 
3494 	for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3495 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3496 		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3497 			break;
3498 
3499 		msleep(1000);
3500 	}
3501 
3502 	if (i == SLIPORT_READY_TIMEOUT)
3503 		status = -1;
3504 
3505 	return status;
3506 }
3507 
3508 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3509 {
3510 	int status;
3511 	u32 sliport_status, err, reset_needed;
3512 	status = lancer_wait_ready(adapter);
3513 	if (!status) {
3514 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3515 		err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3516 		reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3517 		if (err && reset_needed) {
3518 			iowrite32(SLI_PORT_CONTROL_IP_MASK,
3519 					adapter->db + SLIPORT_CONTROL_OFFSET);
3520 
3521 			/* check adapter has corrected the error */
3522 			status = lancer_wait_ready(adapter);
3523 			sliport_status = ioread32(adapter->db +
3524 							SLIPORT_STATUS_OFFSET);
3525 			sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3526 						SLIPORT_STATUS_RN_MASK);
3527 			if (status || sliport_status)
3528 				status = -1;
3529 		} else if (err || reset_needed) {
3530 			status = -1;
3531 		}
3532 	}
3533 	return status;
3534 }
3535 
3536 static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3537 {
3538 	int status;
3539 	u32 sliport_status;
3540 
3541 	if (adapter->eeh_err || adapter->ue_detected)
3542 		return;
3543 
3544 	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3545 
3546 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3547 		dev_err(&adapter->pdev->dev,
3548 				"Adapter in error state."
3549 				"Trying to recover.\n");
3550 
3551 		status = lancer_test_and_set_rdy_state(adapter);
3552 		if (status)
3553 			goto err;
3554 
3555 		netif_device_detach(adapter->netdev);
3556 
3557 		if (netif_running(adapter->netdev))
3558 			be_close(adapter->netdev);
3559 
3560 		be_clear(adapter);
3561 
3562 		adapter->fw_timeout = false;
3563 
3564 		status = be_setup(adapter);
3565 		if (status)
3566 			goto err;
3567 
3568 		if (netif_running(adapter->netdev)) {
3569 			status = be_open(adapter->netdev);
3570 			if (status)
3571 				goto err;
3572 		}
3573 
3574 		netif_device_attach(adapter->netdev);
3575 
3576 		dev_err(&adapter->pdev->dev,
3577 				"Adapter error recovery succeeded\n");
3578 	}
3579 	return;
3580 err:
3581 	dev_err(&adapter->pdev->dev,
3582 			"Adapter error recovery failed\n");
3583 }
3584 
3585 static void be_worker(struct work_struct *work)
3586 {
3587 	struct be_adapter *adapter =
3588 		container_of(work, struct be_adapter, work.work);
3589 	struct be_rx_obj *rxo;
3590 	struct be_eq_obj *eqo;
3591 	int i;
3592 
3593 	if (lancer_chip(adapter))
3594 		lancer_test_and_recover_fn_err(adapter);
3595 
3596 	be_detect_dump_ue(adapter);
3597 
3598 	/* when interrupts are not yet enabled, just reap any pending
3599 	* mcc completions */
3600 	if (!netif_running(adapter->netdev)) {
3601 		be_process_mcc(adapter);
3602 		goto reschedule;
3603 	}
3604 
3605 	if (!adapter->stats_cmd_sent) {
3606 		if (lancer_chip(adapter))
3607 			lancer_cmd_get_pport_stats(adapter,
3608 						&adapter->stats_cmd);
3609 		else
3610 			be_cmd_get_stats(adapter, &adapter->stats_cmd);
3611 	}
3612 
3613 	for_all_rx_queues(adapter, rxo, i) {
3614 		if (rxo->rx_post_starved) {
3615 			rxo->rx_post_starved = false;
3616 			be_post_rx_frags(rxo, GFP_KERNEL);
3617 		}
3618 	}
3619 
3620 	for_all_evt_queues(adapter, eqo, i)
3621 		be_eqd_update(adapter, eqo);
3622 
3623 reschedule:
3624 	adapter->work_counter++;
3625 	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3626 }
3627 
3628 static bool be_reset_required(struct be_adapter *adapter)
3629 {
3630 	u32 reg;
3631 
3632 	pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
3633 	return reg;
3634 }
3635 
3636 static int __devinit be_probe(struct pci_dev *pdev,
3637 			const struct pci_device_id *pdev_id)
3638 {
3639 	int status = 0;
3640 	struct be_adapter *adapter;
3641 	struct net_device *netdev;
3642 
3643 	status = pci_enable_device(pdev);
3644 	if (status)
3645 		goto do_none;
3646 
3647 	status = pci_request_regions(pdev, DRV_NAME);
3648 	if (status)
3649 		goto disable_dev;
3650 	pci_set_master(pdev);
3651 
3652 	netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3653 	if (netdev == NULL) {
3654 		status = -ENOMEM;
3655 		goto rel_reg;
3656 	}
3657 	adapter = netdev_priv(netdev);
3658 	adapter->pdev = pdev;
3659 	pci_set_drvdata(pdev, adapter);
3660 
3661 	status = be_dev_type_check(adapter);
3662 	if (status)
3663 		goto free_netdev;
3664 
3665 	adapter->netdev = netdev;
3666 	SET_NETDEV_DEV(netdev, &pdev->dev);
3667 
3668 	status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3669 	if (!status) {
3670 		netdev->features |= NETIF_F_HIGHDMA;
3671 	} else {
3672 		status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3673 		if (status) {
3674 			dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3675 			goto free_netdev;
3676 		}
3677 	}
3678 
3679 	status = be_ctrl_init(adapter);
3680 	if (status)
3681 		goto free_netdev;
3682 
3683 	if (lancer_chip(adapter)) {
3684 		status = lancer_wait_ready(adapter);
3685 		if (!status) {
3686 			iowrite32(SLI_PORT_CONTROL_IP_MASK,
3687 					adapter->db + SLIPORT_CONTROL_OFFSET);
3688 			status = lancer_test_and_set_rdy_state(adapter);
3689 		}
3690 		if (status) {
3691 			dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3692 			goto ctrl_clean;
3693 		}
3694 	}
3695 
3696 	/* sync up with fw's ready state */
3697 	if (be_physfn(adapter)) {
3698 		status = be_cmd_POST(adapter);
3699 		if (status)
3700 			goto ctrl_clean;
3701 	}
3702 
3703 	/* tell fw we're ready to fire cmds */
3704 	status = be_cmd_fw_init(adapter);
3705 	if (status)
3706 		goto ctrl_clean;
3707 
3708 	if (be_reset_required(adapter)) {
3709 		status = be_cmd_reset_function(adapter);
3710 		if (status)
3711 			goto ctrl_clean;
3712 	}
3713 
3714 	/* The INTR bit may be set in the card when probed by a kdump kernel
3715 	 * after a crash.
3716 	 */
3717 	if (!lancer_chip(adapter))
3718 		be_intr_set(adapter, false);
3719 
3720 	status = be_stats_init(adapter);
3721 	if (status)
3722 		goto ctrl_clean;
3723 
3724 	status = be_get_initial_config(adapter);
3725 	if (status)
3726 		goto stats_clean;
3727 
3728 	INIT_DELAYED_WORK(&adapter->work, be_worker);
3729 	adapter->rx_fc = adapter->tx_fc = true;
3730 
3731 	status = be_setup(adapter);
3732 	if (status)
3733 		goto msix_disable;
3734 
3735 	be_netdev_init(netdev);
3736 	status = register_netdev(netdev);
3737 	if (status != 0)
3738 		goto unsetup;
3739 
3740 	dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3741 		adapter->port_num);
3742 
3743 	return 0;
3744 
3745 unsetup:
3746 	be_clear(adapter);
3747 msix_disable:
3748 	be_msix_disable(adapter);
3749 stats_clean:
3750 	be_stats_cleanup(adapter);
3751 ctrl_clean:
3752 	be_ctrl_cleanup(adapter);
3753 free_netdev:
3754 	free_netdev(netdev);
3755 	pci_set_drvdata(pdev, NULL);
3756 rel_reg:
3757 	pci_release_regions(pdev);
3758 disable_dev:
3759 	pci_disable_device(pdev);
3760 do_none:
3761 	dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3762 	return status;
3763 }
3764 
3765 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3766 {
3767 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3768 	struct net_device *netdev =  adapter->netdev;
3769 
3770 	if (adapter->wol)
3771 		be_setup_wol(adapter, true);
3772 
3773 	netif_device_detach(netdev);
3774 	if (netif_running(netdev)) {
3775 		rtnl_lock();
3776 		be_close(netdev);
3777 		rtnl_unlock();
3778 	}
3779 	be_clear(adapter);
3780 
3781 	pci_save_state(pdev);
3782 	pci_disable_device(pdev);
3783 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
3784 	return 0;
3785 }
3786 
3787 static int be_resume(struct pci_dev *pdev)
3788 {
3789 	int status = 0;
3790 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3791 	struct net_device *netdev =  adapter->netdev;
3792 
3793 	netif_device_detach(netdev);
3794 
3795 	status = pci_enable_device(pdev);
3796 	if (status)
3797 		return status;
3798 
3799 	pci_set_power_state(pdev, 0);
3800 	pci_restore_state(pdev);
3801 
3802 	/* tell fw we're ready to fire cmds */
3803 	status = be_cmd_fw_init(adapter);
3804 	if (status)
3805 		return status;
3806 
3807 	be_setup(adapter);
3808 	if (netif_running(netdev)) {
3809 		rtnl_lock();
3810 		be_open(netdev);
3811 		rtnl_unlock();
3812 	}
3813 	netif_device_attach(netdev);
3814 
3815 	if (adapter->wol)
3816 		be_setup_wol(adapter, false);
3817 
3818 	return 0;
3819 }
3820 
3821 /*
3822  * An FLR will stop BE from DMAing any data.
3823  */
3824 static void be_shutdown(struct pci_dev *pdev)
3825 {
3826 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3827 
3828 	if (!adapter)
3829 		return;
3830 
3831 	cancel_delayed_work_sync(&adapter->work);
3832 
3833 	netif_device_detach(adapter->netdev);
3834 
3835 	if (adapter->wol)
3836 		be_setup_wol(adapter, true);
3837 
3838 	be_cmd_reset_function(adapter);
3839 
3840 	pci_disable_device(pdev);
3841 }
3842 
3843 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3844 				pci_channel_state_t state)
3845 {
3846 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3847 	struct net_device *netdev =  adapter->netdev;
3848 
3849 	dev_err(&adapter->pdev->dev, "EEH error detected\n");
3850 
3851 	adapter->eeh_err = true;
3852 
3853 	netif_device_detach(netdev);
3854 
3855 	if (netif_running(netdev)) {
3856 		rtnl_lock();
3857 		be_close(netdev);
3858 		rtnl_unlock();
3859 	}
3860 	be_clear(adapter);
3861 
3862 	if (state == pci_channel_io_perm_failure)
3863 		return PCI_ERS_RESULT_DISCONNECT;
3864 
3865 	pci_disable_device(pdev);
3866 
3867 	/* The error could cause the FW to trigger a flash debug dump.
3868 	 * Resetting the card while flash dump is in progress
3869 	 * can cause it not to recover; wait for it to finish
3870 	 */
3871 	ssleep(30);
3872 	return PCI_ERS_RESULT_NEED_RESET;
3873 }
3874 
3875 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3876 {
3877 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3878 	int status;
3879 
3880 	dev_info(&adapter->pdev->dev, "EEH reset\n");
3881 	adapter->eeh_err = false;
3882 	adapter->ue_detected = false;
3883 	adapter->fw_timeout = false;
3884 
3885 	status = pci_enable_device(pdev);
3886 	if (status)
3887 		return PCI_ERS_RESULT_DISCONNECT;
3888 
3889 	pci_set_master(pdev);
3890 	pci_set_power_state(pdev, 0);
3891 	pci_restore_state(pdev);
3892 
3893 	/* Check if card is ok and fw is ready */
3894 	status = be_cmd_POST(adapter);
3895 	if (status)
3896 		return PCI_ERS_RESULT_DISCONNECT;
3897 
3898 	return PCI_ERS_RESULT_RECOVERED;
3899 }
3900 
3901 static void be_eeh_resume(struct pci_dev *pdev)
3902 {
3903 	int status = 0;
3904 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3905 	struct net_device *netdev =  adapter->netdev;
3906 
3907 	dev_info(&adapter->pdev->dev, "EEH resume\n");
3908 
3909 	pci_save_state(pdev);
3910 
3911 	/* tell fw we're ready to fire cmds */
3912 	status = be_cmd_fw_init(adapter);
3913 	if (status)
3914 		goto err;
3915 
3916 	status = be_setup(adapter);
3917 	if (status)
3918 		goto err;
3919 
3920 	if (netif_running(netdev)) {
3921 		status = be_open(netdev);
3922 		if (status)
3923 			goto err;
3924 	}
3925 	netif_device_attach(netdev);
3926 	return;
3927 err:
3928 	dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3929 }
3930 
3931 static struct pci_error_handlers be_eeh_handlers = {
3932 	.error_detected = be_eeh_err_detected,
3933 	.slot_reset = be_eeh_reset,
3934 	.resume = be_eeh_resume,
3935 };
3936 
3937 static struct pci_driver be_driver = {
3938 	.name = DRV_NAME,
3939 	.id_table = be_dev_ids,
3940 	.probe = be_probe,
3941 	.remove = be_remove,
3942 	.suspend = be_suspend,
3943 	.resume = be_resume,
3944 	.shutdown = be_shutdown,
3945 	.err_handler = &be_eeh_handlers
3946 };
3947 
3948 static int __init be_init_module(void)
3949 {
3950 	if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3951 	    rx_frag_size != 2048) {
3952 		printk(KERN_WARNING DRV_NAME
3953 			" : Module param rx_frag_size must be 2048/4096/8192."
3954 			" Using 2048\n");
3955 		rx_frag_size = 2048;
3956 	}
3957 
3958 	return pci_register_driver(&be_driver);
3959 }
3960 module_init(be_init_module);
3961 
3962 static void __exit be_exit_module(void)
3963 {
3964 	pci_unregister_driver(&be_driver);
3965 }
3966 module_exit(be_exit_module);
3967