xref: /linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /* bnx2x_cmn.c: QLogic Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  * Copyright (c) 2014 QLogic Corporation
5  * All rights reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation.
10  *
11  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12  * Written by: Eliezer Tamir
13  * Based on code from Michael Chan's bnx2 driver
14  * UDP CSUM errata workaround by Arik Gendelman
15  * Slowpath and fastpath rework by Vladislav Zolotarov
16  * Statistics and Link management by Yitchak Gertner
17  *
18  */
19 
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/ip.h>
26 #include <linux/crash_dump.h>
27 #include <net/tcp.h>
28 #include <net/ipv6.h>
29 #include <net/ip6_checksum.h>
30 #include <net/busy_poll.h>
31 #include <linux/prefetch.h>
32 #include "bnx2x_cmn.h"
33 #include "bnx2x_init.h"
34 #include "bnx2x_sp.h"
35 
36 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39 static int bnx2x_poll(struct napi_struct *napi, int budget);
40 
41 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42 {
43 	int i;
44 
45 	/* Add NAPI objects */
46 	for_each_rx_queue_cnic(bp, i) {
47 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48 			       bnx2x_poll, NAPI_POLL_WEIGHT);
49 	}
50 }
51 
52 static void bnx2x_add_all_napi(struct bnx2x *bp)
53 {
54 	int i;
55 
56 	/* Add NAPI objects */
57 	for_each_eth_queue(bp, i) {
58 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
59 			       bnx2x_poll, NAPI_POLL_WEIGHT);
60 	}
61 }
62 
63 static int bnx2x_calc_num_queues(struct bnx2x *bp)
64 {
65 	int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
66 
67 	/* Reduce memory usage in kdump environment by using only one queue */
68 	if (is_kdump_kernel())
69 		nq = 1;
70 
71 	nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
72 	return nq;
73 }
74 
75 /**
76  * bnx2x_move_fp - move content of the fastpath structure.
77  *
78  * @bp:		driver handle
79  * @from:	source FP index
80  * @to:		destination FP index
81  *
82  * Makes sure the contents of the bp->fp[to].napi is kept
83  * intact. This is done by first copying the napi struct from
84  * the target to the source, and then mem copying the entire
85  * source onto the target. Update txdata pointers and related
86  * content.
87  */
88 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89 {
90 	struct bnx2x_fastpath *from_fp = &bp->fp[from];
91 	struct bnx2x_fastpath *to_fp = &bp->fp[to];
92 	struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93 	struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94 	struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95 	struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
96 	int old_max_eth_txqs, new_max_eth_txqs;
97 	int old_txdata_index = 0, new_txdata_index = 0;
98 	struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
99 
100 	/* Copy the NAPI object as it has been already initialized */
101 	from_fp->napi = to_fp->napi;
102 
103 	/* Move bnx2x_fastpath contents */
104 	memcpy(to_fp, from_fp, sizeof(*to_fp));
105 	to_fp->index = to;
106 
107 	/* Retain the tpa_info of the original `to' version as we don't want
108 	 * 2 FPs to contain the same tpa_info pointer.
109 	 */
110 	to_fp->tpa_info = old_tpa_info;
111 
112 	/* move sp_objs contents as well, as their indices match fp ones */
113 	memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114 
115 	/* move fp_stats contents as well, as their indices match fp ones */
116 	memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117 
118 	/* Update txdata pointers in fp and move txdata content accordingly:
119 	 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 	 * decremented by max_cos x delta.
121 	 */
122 
123 	old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124 	new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 				(bp)->max_cos;
126 	if (from == FCOE_IDX(bp)) {
127 		old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 		new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
129 	}
130 
131 	memcpy(&bp->bnx2x_txq[new_txdata_index],
132 	       &bp->bnx2x_txq[old_txdata_index],
133 	       sizeof(struct bnx2x_fp_txdata));
134 	to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
135 }
136 
137 /**
138  * bnx2x_fill_fw_str - Fill buffer with FW version string.
139  *
140  * @bp:        driver handle
141  * @buf:       character buffer to fill with the fw name
142  * @buf_len:   length of the above buffer
143  *
144  */
145 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
146 {
147 	if (IS_PF(bp)) {
148 		u8 phy_fw_ver[PHY_FW_VER_LEN];
149 
150 		phy_fw_ver[0] = '\0';
151 		bnx2x_get_ext_phy_fw_version(&bp->link_params,
152 					     phy_fw_ver, PHY_FW_VER_LEN);
153 		strlcpy(buf, bp->fw_ver, buf_len);
154 		snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 			 "bc %d.%d.%d%s%s",
156 			 (bp->common.bc_ver & 0xff0000) >> 16,
157 			 (bp->common.bc_ver & 0xff00) >> 8,
158 			 (bp->common.bc_ver & 0xff),
159 			 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 	} else {
161 		bnx2x_vf_fill_fw_str(bp, buf, buf_len);
162 	}
163 }
164 
165 /**
166  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
167  *
168  * @bp:	driver handle
169  * @delta:	number of eth queues which were not allocated
170  */
171 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172 {
173 	int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174 
175 	/* Queue pointer cannot be re-set on an fp-basis, as moving pointer
176 	 * backward along the array could cause memory to be overridden
177 	 */
178 	for (cos = 1; cos < bp->max_cos; cos++) {
179 		for (i = 0; i < old_eth_num - delta; i++) {
180 			struct bnx2x_fastpath *fp = &bp->fp[i];
181 			int new_idx = cos * (old_eth_num - delta) + i;
182 
183 			memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184 			       sizeof(struct bnx2x_fp_txdata));
185 			fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
186 		}
187 	}
188 }
189 
190 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
191 
192 /* free skb in the packet ring at pos idx
193  * return idx of last bd freed
194  */
195 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
196 			     u16 idx, unsigned int *pkts_compl,
197 			     unsigned int *bytes_compl)
198 {
199 	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
200 	struct eth_tx_start_bd *tx_start_bd;
201 	struct eth_tx_bd *tx_data_bd;
202 	struct sk_buff *skb = tx_buf->skb;
203 	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 	int nbd;
205 	u16 split_bd_len = 0;
206 
207 	/* prefetch skb end pointer to speedup dev_kfree_skb() */
208 	prefetch(&skb->end);
209 
210 	DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
211 	   txdata->txq_index, idx, tx_buf, skb);
212 
213 	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
214 
215 	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216 #ifdef BNX2X_STOP_ON_ERROR
217 	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218 		BNX2X_ERR("BAD nbd!\n");
219 		bnx2x_panic();
220 	}
221 #endif
222 	new_cons = nbd + tx_buf->first_bd;
223 
224 	/* Get the next bd */
225 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226 
227 	/* Skip a parse bd... */
228 	--nbd;
229 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230 
231 	if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232 		/* Skip second parse bd... */
233 		--nbd;
234 		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
235 	}
236 
237 	/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
238 	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
239 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240 		split_bd_len = BD_UNMAP_LEN(tx_data_bd);
241 		--nbd;
242 		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
243 	}
244 
245 	/* unmap first bd */
246 	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247 			 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
248 			 DMA_TO_DEVICE);
249 
250 	/* now free frags */
251 	while (nbd > 0) {
252 
253 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
254 		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255 			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256 		if (--nbd)
257 			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
258 	}
259 
260 	/* release skb */
261 	WARN_ON(!skb);
262 	if (likely(skb)) {
263 		(*pkts_compl)++;
264 		(*bytes_compl) += skb->len;
265 		dev_kfree_skb_any(skb);
266 	}
267 
268 	tx_buf->first_bd = 0;
269 	tx_buf->skb = NULL;
270 
271 	return new_cons;
272 }
273 
274 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
275 {
276 	struct netdev_queue *txq;
277 	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
278 	unsigned int pkts_compl = 0, bytes_compl = 0;
279 
280 #ifdef BNX2X_STOP_ON_ERROR
281 	if (unlikely(bp->panic))
282 		return -1;
283 #endif
284 
285 	txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286 	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287 	sw_cons = txdata->tx_pkt_cons;
288 
289 	while (sw_cons != hw_cons) {
290 		u16 pkt_cons;
291 
292 		pkt_cons = TX_BD(sw_cons);
293 
294 		DP(NETIF_MSG_TX_DONE,
295 		   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
296 		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
297 
298 		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
299 					    &pkts_compl, &bytes_compl);
300 
301 		sw_cons++;
302 	}
303 
304 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
305 
306 	txdata->tx_pkt_cons = sw_cons;
307 	txdata->tx_bd_cons = bd_cons;
308 
309 	/* Need to make the tx_bd_cons update visible to start_xmit()
310 	 * before checking for netif_tx_queue_stopped().  Without the
311 	 * memory barrier, there is a small possibility that
312 	 * start_xmit() will miss it and cause the queue to be stopped
313 	 * forever.
314 	 * On the other hand we need an rmb() here to ensure the proper
315 	 * ordering of bit testing in the following
316 	 * netif_tx_queue_stopped(txq) call.
317 	 */
318 	smp_mb();
319 
320 	if (unlikely(netif_tx_queue_stopped(txq))) {
321 		/* Taking tx_lock() is needed to prevent re-enabling the queue
322 		 * while it's empty. This could have happen if rx_action() gets
323 		 * suspended in bnx2x_tx_int() after the condition before
324 		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
325 		 *
326 		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
327 		 * sends some packets consuming the whole queue again->
328 		 * stops the queue
329 		 */
330 
331 		__netif_tx_lock(txq, smp_processor_id());
332 
333 		if ((netif_tx_queue_stopped(txq)) &&
334 		    (bp->state == BNX2X_STATE_OPEN) &&
335 		    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
336 			netif_tx_wake_queue(txq);
337 
338 		__netif_tx_unlock(txq);
339 	}
340 	return 0;
341 }
342 
343 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
344 					     u16 idx)
345 {
346 	u16 last_max = fp->last_max_sge;
347 
348 	if (SUB_S16(idx, last_max) > 0)
349 		fp->last_max_sge = idx;
350 }
351 
352 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353 					 u16 sge_len,
354 					 struct eth_end_agg_rx_cqe *cqe)
355 {
356 	struct bnx2x *bp = fp->bp;
357 	u16 last_max, last_elem, first_elem;
358 	u16 delta = 0;
359 	u16 i;
360 
361 	if (!sge_len)
362 		return;
363 
364 	/* First mark all used pages */
365 	for (i = 0; i < sge_len; i++)
366 		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
367 			RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
368 
369 	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
370 	   sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
371 
372 	/* Here we assume that the last SGE index is the biggest */
373 	prefetch((void *)(fp->sge_mask));
374 	bnx2x_update_last_max_sge(fp,
375 		le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
376 
377 	last_max = RX_SGE(fp->last_max_sge);
378 	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
379 	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
380 
381 	/* If ring is not full */
382 	if (last_elem + 1 != first_elem)
383 		last_elem++;
384 
385 	/* Now update the prod */
386 	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
387 		if (likely(fp->sge_mask[i]))
388 			break;
389 
390 		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
391 		delta += BIT_VEC64_ELEM_SZ;
392 	}
393 
394 	if (delta > 0) {
395 		fp->rx_sge_prod += delta;
396 		/* clear page-end entries */
397 		bnx2x_clear_sge_mask_next_elems(fp);
398 	}
399 
400 	DP(NETIF_MSG_RX_STATUS,
401 	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
402 	   fp->last_max_sge, fp->rx_sge_prod);
403 }
404 
405 /* Get Toeplitz hash value in the skb using the value from the
406  * CQE (calculated by HW).
407  */
408 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
409 			    const struct eth_fast_path_rx_cqe *cqe,
410 			    enum pkt_hash_types *rxhash_type)
411 {
412 	/* Get Toeplitz hash from CQE */
413 	if ((bp->dev->features & NETIF_F_RXHASH) &&
414 	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
415 		enum eth_rss_hash_type htype;
416 
417 		htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
418 		*rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
419 				(htype == TCP_IPV6_HASH_TYPE)) ?
420 			       PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
421 
422 		return le32_to_cpu(cqe->rss_hash_result);
423 	}
424 	*rxhash_type = PKT_HASH_TYPE_NONE;
425 	return 0;
426 }
427 
428 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
429 			    u16 cons, u16 prod,
430 			    struct eth_fast_path_rx_cqe *cqe)
431 {
432 	struct bnx2x *bp = fp->bp;
433 	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
434 	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
435 	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
436 	dma_addr_t mapping;
437 	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
438 	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
439 
440 	/* print error if current state != stop */
441 	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
442 		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
443 
444 	/* Try to map an empty data buffer from the aggregation info  */
445 	mapping = dma_map_single(&bp->pdev->dev,
446 				 first_buf->data + NET_SKB_PAD,
447 				 fp->rx_buf_size, DMA_FROM_DEVICE);
448 	/*
449 	 *  ...if it fails - move the skb from the consumer to the producer
450 	 *  and set the current aggregation state as ERROR to drop it
451 	 *  when TPA_STOP arrives.
452 	 */
453 
454 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
455 		/* Move the BD from the consumer to the producer */
456 		bnx2x_reuse_rx_data(fp, cons, prod);
457 		tpa_info->tpa_state = BNX2X_TPA_ERROR;
458 		return;
459 	}
460 
461 	/* move empty data from pool to prod */
462 	prod_rx_buf->data = first_buf->data;
463 	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
464 	/* point prod_bd to new data */
465 	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
466 	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
467 
468 	/* move partial skb from cons to pool (don't unmap yet) */
469 	*first_buf = *cons_rx_buf;
470 
471 	/* mark bin state as START */
472 	tpa_info->parsing_flags =
473 		le16_to_cpu(cqe->pars_flags.flags);
474 	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
475 	tpa_info->tpa_state = BNX2X_TPA_START;
476 	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
477 	tpa_info->placement_offset = cqe->placement_offset;
478 	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
479 	if (fp->mode == TPA_MODE_GRO) {
480 		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
481 		tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
482 		tpa_info->gro_size = gro_size;
483 	}
484 
485 #ifdef BNX2X_STOP_ON_ERROR
486 	fp->tpa_queue_used |= (1 << queue);
487 	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
488 	   fp->tpa_queue_used);
489 #endif
490 }
491 
492 /* Timestamp option length allowed for TPA aggregation:
493  *
494  *		nop nop kind length echo val
495  */
496 #define TPA_TSTAMP_OPT_LEN	12
497 /**
498  * bnx2x_set_gro_params - compute GRO values
499  *
500  * @skb:		packet skb
501  * @parsing_flags:	parsing flags from the START CQE
502  * @len_on_bd:		total length of the first packet for the
503  *			aggregation.
504  * @pkt_len:		length of all segments
505  *
506  * Approximate value of the MSS for this aggregation calculated using
507  * the first packet of it.
508  * Compute number of aggregated segments, and gso_type.
509  */
510 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
511 				 u16 len_on_bd, unsigned int pkt_len,
512 				 u16 num_of_coalesced_segs)
513 {
514 	/* TPA aggregation won't have either IP options or TCP options
515 	 * other than timestamp or IPv6 extension headers.
516 	 */
517 	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
518 
519 	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
520 	    PRS_FLAG_OVERETH_IPV6) {
521 		hdrs_len += sizeof(struct ipv6hdr);
522 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
523 	} else {
524 		hdrs_len += sizeof(struct iphdr);
525 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
526 	}
527 
528 	/* Check if there was a TCP timestamp, if there is it's will
529 	 * always be 12 bytes length: nop nop kind length echo val.
530 	 *
531 	 * Otherwise FW would close the aggregation.
532 	 */
533 	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
534 		hdrs_len += TPA_TSTAMP_OPT_LEN;
535 
536 	skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
537 
538 	/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539 	 * to skb_shinfo(skb)->gso_segs
540 	 */
541 	NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
542 }
543 
544 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
545 			      u16 index, gfp_t gfp_mask)
546 {
547 	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
548 	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
549 	struct bnx2x_alloc_pool *pool = &fp->page_pool;
550 	dma_addr_t mapping;
551 
552 	if (!pool->page) {
553 		pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
554 		if (unlikely(!pool->page))
555 			return -ENOMEM;
556 
557 		pool->offset = 0;
558 	}
559 
560 	mapping = dma_map_page(&bp->pdev->dev, pool->page,
561 			       pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
562 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
563 		BNX2X_ERR("Can't map sge\n");
564 		return -ENOMEM;
565 	}
566 
567 	sw_buf->page = pool->page;
568 	sw_buf->offset = pool->offset;
569 
570 	dma_unmap_addr_set(sw_buf, mapping, mapping);
571 
572 	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
573 	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
574 
575 	pool->offset += SGE_PAGE_SIZE;
576 	if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
577 		get_page(pool->page);
578 	else
579 		pool->page = NULL;
580 	return 0;
581 }
582 
583 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
584 			       struct bnx2x_agg_info *tpa_info,
585 			       u16 pages,
586 			       struct sk_buff *skb,
587 			       struct eth_end_agg_rx_cqe *cqe,
588 			       u16 cqe_idx)
589 {
590 	struct sw_rx_page *rx_pg, old_rx_pg;
591 	u32 i, frag_len, frag_size;
592 	int err, j, frag_id = 0;
593 	u16 len_on_bd = tpa_info->len_on_bd;
594 	u16 full_page = 0, gro_size = 0;
595 
596 	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
597 
598 	if (fp->mode == TPA_MODE_GRO) {
599 		gro_size = tpa_info->gro_size;
600 		full_page = tpa_info->full_page;
601 	}
602 
603 	/* This is needed in order to enable forwarding support */
604 	if (frag_size)
605 		bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
606 				     le16_to_cpu(cqe->pkt_len),
607 				     le16_to_cpu(cqe->num_of_coalesced_segs));
608 
609 #ifdef BNX2X_STOP_ON_ERROR
610 	if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
611 		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
612 			  pages, cqe_idx);
613 		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
614 		bnx2x_panic();
615 		return -EINVAL;
616 	}
617 #endif
618 
619 	/* Run through the SGL and compose the fragmented skb */
620 	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
621 		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
622 
623 		/* FW gives the indices of the SGE as if the ring is an array
624 		   (meaning that "next" element will consume 2 indices) */
625 		if (fp->mode == TPA_MODE_GRO)
626 			frag_len = min_t(u32, frag_size, (u32)full_page);
627 		else /* LRO */
628 			frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
629 
630 		rx_pg = &fp->rx_page_ring[sge_idx];
631 		old_rx_pg = *rx_pg;
632 
633 		/* If we fail to allocate a substitute page, we simply stop
634 		   where we are and drop the whole packet */
635 		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
636 		if (unlikely(err)) {
637 			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
638 			return err;
639 		}
640 
641 		dma_unmap_page(&bp->pdev->dev,
642 			       dma_unmap_addr(&old_rx_pg, mapping),
643 			       SGE_PAGE_SIZE, DMA_FROM_DEVICE);
644 		/* Add one frag and update the appropriate fields in the skb */
645 		if (fp->mode == TPA_MODE_LRO)
646 			skb_fill_page_desc(skb, j, old_rx_pg.page,
647 					   old_rx_pg.offset, frag_len);
648 		else { /* GRO */
649 			int rem;
650 			int offset = 0;
651 			for (rem = frag_len; rem > 0; rem -= gro_size) {
652 				int len = rem > gro_size ? gro_size : rem;
653 				skb_fill_page_desc(skb, frag_id++,
654 						   old_rx_pg.page,
655 						   old_rx_pg.offset + offset,
656 						   len);
657 				if (offset)
658 					get_page(old_rx_pg.page);
659 				offset += len;
660 			}
661 		}
662 
663 		skb->data_len += frag_len;
664 		skb->truesize += SGE_PAGES;
665 		skb->len += frag_len;
666 
667 		frag_size -= frag_len;
668 	}
669 
670 	return 0;
671 }
672 
673 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
674 {
675 	if (fp->rx_frag_size)
676 		skb_free_frag(data);
677 	else
678 		kfree(data);
679 }
680 
681 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
682 {
683 	if (fp->rx_frag_size) {
684 		/* GFP_KERNEL allocations are used only during initialization */
685 		if (unlikely(gfpflags_allow_blocking(gfp_mask)))
686 			return (void *)__get_free_page(gfp_mask);
687 
688 		return netdev_alloc_frag(fp->rx_frag_size);
689 	}
690 
691 	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
692 }
693 
694 #ifdef CONFIG_INET
695 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
696 {
697 	const struct iphdr *iph = ip_hdr(skb);
698 	struct tcphdr *th;
699 
700 	skb_set_transport_header(skb, sizeof(struct iphdr));
701 	th = tcp_hdr(skb);
702 
703 	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
704 				  iph->saddr, iph->daddr, 0);
705 }
706 
707 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
708 {
709 	struct ipv6hdr *iph = ipv6_hdr(skb);
710 	struct tcphdr *th;
711 
712 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
713 	th = tcp_hdr(skb);
714 
715 	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
716 				  &iph->saddr, &iph->daddr, 0);
717 }
718 
719 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
720 			    void (*gro_func)(struct bnx2x*, struct sk_buff*))
721 {
722 	skb_reset_network_header(skb);
723 	gro_func(bp, skb);
724 	tcp_gro_complete(skb);
725 }
726 #endif
727 
728 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
729 			       struct sk_buff *skb)
730 {
731 #ifdef CONFIG_INET
732 	if (skb_shinfo(skb)->gso_size) {
733 		switch (be16_to_cpu(skb->protocol)) {
734 		case ETH_P_IP:
735 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
736 			break;
737 		case ETH_P_IPV6:
738 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
739 			break;
740 		default:
741 			WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
742 				  be16_to_cpu(skb->protocol));
743 		}
744 	}
745 #endif
746 	skb_record_rx_queue(skb, fp->rx_queue);
747 	napi_gro_receive(&fp->napi, skb);
748 }
749 
750 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
751 			   struct bnx2x_agg_info *tpa_info,
752 			   u16 pages,
753 			   struct eth_end_agg_rx_cqe *cqe,
754 			   u16 cqe_idx)
755 {
756 	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
757 	u8 pad = tpa_info->placement_offset;
758 	u16 len = tpa_info->len_on_bd;
759 	struct sk_buff *skb = NULL;
760 	u8 *new_data, *data = rx_buf->data;
761 	u8 old_tpa_state = tpa_info->tpa_state;
762 
763 	tpa_info->tpa_state = BNX2X_TPA_STOP;
764 
765 	/* If we there was an error during the handling of the TPA_START -
766 	 * drop this aggregation.
767 	 */
768 	if (old_tpa_state == BNX2X_TPA_ERROR)
769 		goto drop;
770 
771 	/* Try to allocate the new data */
772 	new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
773 	/* Unmap skb in the pool anyway, as we are going to change
774 	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
775 	   fails. */
776 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
777 			 fp->rx_buf_size, DMA_FROM_DEVICE);
778 	if (likely(new_data))
779 		skb = build_skb(data, fp->rx_frag_size);
780 
781 	if (likely(skb)) {
782 #ifdef BNX2X_STOP_ON_ERROR
783 		if (pad + len > fp->rx_buf_size) {
784 			BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
785 				  pad, len, fp->rx_buf_size);
786 			bnx2x_panic();
787 			return;
788 		}
789 #endif
790 
791 		skb_reserve(skb, pad + NET_SKB_PAD);
792 		skb_put(skb, len);
793 		skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
794 
795 		skb->protocol = eth_type_trans(skb, bp->dev);
796 		skb->ip_summed = CHECKSUM_UNNECESSARY;
797 
798 		if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
799 					 skb, cqe, cqe_idx)) {
800 			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
801 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
802 			bnx2x_gro_receive(bp, fp, skb);
803 		} else {
804 			DP(NETIF_MSG_RX_STATUS,
805 			   "Failed to allocate new pages - dropping packet!\n");
806 			dev_kfree_skb_any(skb);
807 		}
808 
809 		/* put new data in bin */
810 		rx_buf->data = new_data;
811 
812 		return;
813 	}
814 	if (new_data)
815 		bnx2x_frag_free(fp, new_data);
816 drop:
817 	/* drop the packet and keep the buffer in the bin */
818 	DP(NETIF_MSG_RX_STATUS,
819 	   "Failed to allocate or map a new skb - dropping packet!\n");
820 	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
821 }
822 
823 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
824 			       u16 index, gfp_t gfp_mask)
825 {
826 	u8 *data;
827 	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
828 	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
829 	dma_addr_t mapping;
830 
831 	data = bnx2x_frag_alloc(fp, gfp_mask);
832 	if (unlikely(data == NULL))
833 		return -ENOMEM;
834 
835 	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
836 				 fp->rx_buf_size,
837 				 DMA_FROM_DEVICE);
838 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
839 		bnx2x_frag_free(fp, data);
840 		BNX2X_ERR("Can't map rx data\n");
841 		return -ENOMEM;
842 	}
843 
844 	rx_buf->data = data;
845 	dma_unmap_addr_set(rx_buf, mapping, mapping);
846 
847 	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
848 	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
849 
850 	return 0;
851 }
852 
853 static
854 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
855 				 struct bnx2x_fastpath *fp,
856 				 struct bnx2x_eth_q_stats *qstats)
857 {
858 	/* Do nothing if no L4 csum validation was done.
859 	 * We do not check whether IP csum was validated. For IPv4 we assume
860 	 * that if the card got as far as validating the L4 csum, it also
861 	 * validated the IP csum. IPv6 has no IP csum.
862 	 */
863 	if (cqe->fast_path_cqe.status_flags &
864 	    ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
865 		return;
866 
867 	/* If L4 validation was done, check if an error was found. */
868 
869 	if (cqe->fast_path_cqe.type_error_flags &
870 	    (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
871 	     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
872 		qstats->hw_csum_err++;
873 	else
874 		skb->ip_summed = CHECKSUM_UNNECESSARY;
875 }
876 
877 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
878 {
879 	struct bnx2x *bp = fp->bp;
880 	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
881 	u16 sw_comp_cons, sw_comp_prod;
882 	int rx_pkt = 0;
883 	union eth_rx_cqe *cqe;
884 	struct eth_fast_path_rx_cqe *cqe_fp;
885 
886 #ifdef BNX2X_STOP_ON_ERROR
887 	if (unlikely(bp->panic))
888 		return 0;
889 #endif
890 	if (budget <= 0)
891 		return rx_pkt;
892 
893 	bd_cons = fp->rx_bd_cons;
894 	bd_prod = fp->rx_bd_prod;
895 	bd_prod_fw = bd_prod;
896 	sw_comp_cons = fp->rx_comp_cons;
897 	sw_comp_prod = fp->rx_comp_prod;
898 
899 	comp_ring_cons = RCQ_BD(sw_comp_cons);
900 	cqe = &fp->rx_comp_ring[comp_ring_cons];
901 	cqe_fp = &cqe->fast_path_cqe;
902 
903 	DP(NETIF_MSG_RX_STATUS,
904 	   "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
905 
906 	while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
907 		struct sw_rx_bd *rx_buf = NULL;
908 		struct sk_buff *skb;
909 		u8 cqe_fp_flags;
910 		enum eth_rx_cqe_type cqe_fp_type;
911 		u16 len, pad, queue;
912 		u8 *data;
913 		u32 rxhash;
914 		enum pkt_hash_types rxhash_type;
915 
916 #ifdef BNX2X_STOP_ON_ERROR
917 		if (unlikely(bp->panic))
918 			return 0;
919 #endif
920 
921 		bd_prod = RX_BD(bd_prod);
922 		bd_cons = RX_BD(bd_cons);
923 
924 		/* A rmb() is required to ensure that the CQE is not read
925 		 * before it is written by the adapter DMA.  PCI ordering
926 		 * rules will make sure the other fields are written before
927 		 * the marker at the end of struct eth_fast_path_rx_cqe
928 		 * but without rmb() a weakly ordered processor can process
929 		 * stale data.  Without the barrier TPA state-machine might
930 		 * enter inconsistent state and kernel stack might be
931 		 * provided with incorrect packet description - these lead
932 		 * to various kernel crashed.
933 		 */
934 		rmb();
935 
936 		cqe_fp_flags = cqe_fp->type_error_flags;
937 		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
938 
939 		DP(NETIF_MSG_RX_STATUS,
940 		   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
941 		   CQE_TYPE(cqe_fp_flags),
942 		   cqe_fp_flags, cqe_fp->status_flags,
943 		   le32_to_cpu(cqe_fp->rss_hash_result),
944 		   le16_to_cpu(cqe_fp->vlan_tag),
945 		   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
946 
947 		/* is this a slowpath msg? */
948 		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
949 			bnx2x_sp_event(fp, cqe);
950 			goto next_cqe;
951 		}
952 
953 		rx_buf = &fp->rx_buf_ring[bd_cons];
954 		data = rx_buf->data;
955 
956 		if (!CQE_TYPE_FAST(cqe_fp_type)) {
957 			struct bnx2x_agg_info *tpa_info;
958 			u16 frag_size, pages;
959 #ifdef BNX2X_STOP_ON_ERROR
960 			/* sanity check */
961 			if (fp->mode == TPA_MODE_DISABLED &&
962 			    (CQE_TYPE_START(cqe_fp_type) ||
963 			     CQE_TYPE_STOP(cqe_fp_type)))
964 				BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
965 					  CQE_TYPE(cqe_fp_type));
966 #endif
967 
968 			if (CQE_TYPE_START(cqe_fp_type)) {
969 				u16 queue = cqe_fp->queue_index;
970 				DP(NETIF_MSG_RX_STATUS,
971 				   "calling tpa_start on queue %d\n",
972 				   queue);
973 
974 				bnx2x_tpa_start(fp, queue,
975 						bd_cons, bd_prod,
976 						cqe_fp);
977 
978 				goto next_rx;
979 			}
980 			queue = cqe->end_agg_cqe.queue_index;
981 			tpa_info = &fp->tpa_info[queue];
982 			DP(NETIF_MSG_RX_STATUS,
983 			   "calling tpa_stop on queue %d\n",
984 			   queue);
985 
986 			frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
987 				    tpa_info->len_on_bd;
988 
989 			if (fp->mode == TPA_MODE_GRO)
990 				pages = (frag_size + tpa_info->full_page - 1) /
991 					 tpa_info->full_page;
992 			else
993 				pages = SGE_PAGE_ALIGN(frag_size) >>
994 					SGE_PAGE_SHIFT;
995 
996 			bnx2x_tpa_stop(bp, fp, tpa_info, pages,
997 				       &cqe->end_agg_cqe, comp_ring_cons);
998 #ifdef BNX2X_STOP_ON_ERROR
999 			if (bp->panic)
1000 				return 0;
1001 #endif
1002 
1003 			bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1004 			goto next_cqe;
1005 		}
1006 		/* non TPA */
1007 		len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1008 		pad = cqe_fp->placement_offset;
1009 		dma_sync_single_for_cpu(&bp->pdev->dev,
1010 					dma_unmap_addr(rx_buf, mapping),
1011 					pad + RX_COPY_THRESH,
1012 					DMA_FROM_DEVICE);
1013 		pad += NET_SKB_PAD;
1014 		prefetch(data + pad); /* speedup eth_type_trans() */
1015 		/* is this an error packet? */
1016 		if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1017 			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1018 			   "ERROR  flags %x  rx packet %u\n",
1019 			   cqe_fp_flags, sw_comp_cons);
1020 			bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1021 			goto reuse_rx;
1022 		}
1023 
1024 		/* Since we don't have a jumbo ring
1025 		 * copy small packets if mtu > 1500
1026 		 */
1027 		if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1028 		    (len <= RX_COPY_THRESH)) {
1029 			skb = napi_alloc_skb(&fp->napi, len);
1030 			if (skb == NULL) {
1031 				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1032 				   "ERROR  packet dropped because of alloc failure\n");
1033 				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1034 				goto reuse_rx;
1035 			}
1036 			memcpy(skb->data, data + pad, len);
1037 			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1038 		} else {
1039 			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1040 						       GFP_ATOMIC) == 0)) {
1041 				dma_unmap_single(&bp->pdev->dev,
1042 						 dma_unmap_addr(rx_buf, mapping),
1043 						 fp->rx_buf_size,
1044 						 DMA_FROM_DEVICE);
1045 				skb = build_skb(data, fp->rx_frag_size);
1046 				if (unlikely(!skb)) {
1047 					bnx2x_frag_free(fp, data);
1048 					bnx2x_fp_qstats(bp, fp)->
1049 							rx_skb_alloc_failed++;
1050 					goto next_rx;
1051 				}
1052 				skb_reserve(skb, pad);
1053 			} else {
1054 				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1055 				   "ERROR  packet dropped because of alloc failure\n");
1056 				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1057 reuse_rx:
1058 				bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1059 				goto next_rx;
1060 			}
1061 		}
1062 
1063 		skb_put(skb, len);
1064 		skb->protocol = eth_type_trans(skb, bp->dev);
1065 
1066 		/* Set Toeplitz hash for a none-LRO skb */
1067 		rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1068 		skb_set_hash(skb, rxhash, rxhash_type);
1069 
1070 		skb_checksum_none_assert(skb);
1071 
1072 		if (bp->dev->features & NETIF_F_RXCSUM)
1073 			bnx2x_csum_validate(skb, cqe, fp,
1074 					    bnx2x_fp_qstats(bp, fp));
1075 
1076 		skb_record_rx_queue(skb, fp->rx_queue);
1077 
1078 		/* Check if this packet was timestamped */
1079 		if (unlikely(cqe->fast_path_cqe.type_error_flags &
1080 			     (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1081 			bnx2x_set_rx_ts(bp, skb);
1082 
1083 		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1084 		    PARSING_FLAGS_VLAN)
1085 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1086 					       le16_to_cpu(cqe_fp->vlan_tag));
1087 
1088 		napi_gro_receive(&fp->napi, skb);
1089 next_rx:
1090 		rx_buf->data = NULL;
1091 
1092 		bd_cons = NEXT_RX_IDX(bd_cons);
1093 		bd_prod = NEXT_RX_IDX(bd_prod);
1094 		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1095 		rx_pkt++;
1096 next_cqe:
1097 		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1098 		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1099 
1100 		/* mark CQE as free */
1101 		BNX2X_SEED_CQE(cqe_fp);
1102 
1103 		if (rx_pkt == budget)
1104 			break;
1105 
1106 		comp_ring_cons = RCQ_BD(sw_comp_cons);
1107 		cqe = &fp->rx_comp_ring[comp_ring_cons];
1108 		cqe_fp = &cqe->fast_path_cqe;
1109 	} /* while */
1110 
1111 	fp->rx_bd_cons = bd_cons;
1112 	fp->rx_bd_prod = bd_prod_fw;
1113 	fp->rx_comp_cons = sw_comp_cons;
1114 	fp->rx_comp_prod = sw_comp_prod;
1115 
1116 	/* Update producers */
1117 	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1118 			     fp->rx_sge_prod);
1119 
1120 	return rx_pkt;
1121 }
1122 
1123 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1124 {
1125 	struct bnx2x_fastpath *fp = fp_cookie;
1126 	struct bnx2x *bp = fp->bp;
1127 	u8 cos;
1128 
1129 	DP(NETIF_MSG_INTR,
1130 	   "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1131 	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
1132 
1133 	bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1134 
1135 #ifdef BNX2X_STOP_ON_ERROR
1136 	if (unlikely(bp->panic))
1137 		return IRQ_HANDLED;
1138 #endif
1139 
1140 	/* Handle Rx and Tx according to MSI-X vector */
1141 	for_each_cos_in_tx_queue(fp, cos)
1142 		prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1143 
1144 	prefetch(&fp->sb_running_index[SM_RX_ID]);
1145 	napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1146 
1147 	return IRQ_HANDLED;
1148 }
1149 
1150 /* HW Lock for shared dual port PHYs */
1151 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1152 {
1153 	mutex_lock(&bp->port.phy_mutex);
1154 
1155 	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1156 }
1157 
1158 void bnx2x_release_phy_lock(struct bnx2x *bp)
1159 {
1160 	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1161 
1162 	mutex_unlock(&bp->port.phy_mutex);
1163 }
1164 
1165 /* calculates MF speed according to current linespeed and MF configuration */
1166 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1167 {
1168 	u16 line_speed = bp->link_vars.line_speed;
1169 	if (IS_MF(bp)) {
1170 		u16 maxCfg = bnx2x_extract_max_cfg(bp,
1171 						   bp->mf_config[BP_VN(bp)]);
1172 
1173 		/* Calculate the current MAX line speed limit for the MF
1174 		 * devices
1175 		 */
1176 		if (IS_MF_PERCENT_BW(bp))
1177 			line_speed = (line_speed * maxCfg) / 100;
1178 		else { /* SD mode */
1179 			u16 vn_max_rate = maxCfg * 100;
1180 
1181 			if (vn_max_rate < line_speed)
1182 				line_speed = vn_max_rate;
1183 		}
1184 	}
1185 
1186 	return line_speed;
1187 }
1188 
1189 /**
1190  * bnx2x_fill_report_data - fill link report data to report
1191  *
1192  * @bp:		driver handle
1193  * @data:	link state to update
1194  *
1195  * It uses a none-atomic bit operations because is called under the mutex.
1196  */
1197 static void bnx2x_fill_report_data(struct bnx2x *bp,
1198 				   struct bnx2x_link_report_data *data)
1199 {
1200 	memset(data, 0, sizeof(*data));
1201 
1202 	if (IS_PF(bp)) {
1203 		/* Fill the report data: effective line speed */
1204 		data->line_speed = bnx2x_get_mf_speed(bp);
1205 
1206 		/* Link is down */
1207 		if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1208 			__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1209 				  &data->link_report_flags);
1210 
1211 		if (!BNX2X_NUM_ETH_QUEUES(bp))
1212 			__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1213 				  &data->link_report_flags);
1214 
1215 		/* Full DUPLEX */
1216 		if (bp->link_vars.duplex == DUPLEX_FULL)
1217 			__set_bit(BNX2X_LINK_REPORT_FD,
1218 				  &data->link_report_flags);
1219 
1220 		/* Rx Flow Control is ON */
1221 		if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1222 			__set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1223 				  &data->link_report_flags);
1224 
1225 		/* Tx Flow Control is ON */
1226 		if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1227 			__set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1228 				  &data->link_report_flags);
1229 	} else { /* VF */
1230 		*data = bp->vf_link_vars;
1231 	}
1232 }
1233 
1234 /**
1235  * bnx2x_link_report - report link status to OS.
1236  *
1237  * @bp:		driver handle
1238  *
1239  * Calls the __bnx2x_link_report() under the same locking scheme
1240  * as a link/PHY state managing code to ensure a consistent link
1241  * reporting.
1242  */
1243 
1244 void bnx2x_link_report(struct bnx2x *bp)
1245 {
1246 	bnx2x_acquire_phy_lock(bp);
1247 	__bnx2x_link_report(bp);
1248 	bnx2x_release_phy_lock(bp);
1249 }
1250 
1251 /**
1252  * __bnx2x_link_report - report link status to OS.
1253  *
1254  * @bp:		driver handle
1255  *
1256  * None atomic implementation.
1257  * Should be called under the phy_lock.
1258  */
1259 void __bnx2x_link_report(struct bnx2x *bp)
1260 {
1261 	struct bnx2x_link_report_data cur_data;
1262 
1263 	/* reread mf_cfg */
1264 	if (IS_PF(bp) && !CHIP_IS_E1(bp))
1265 		bnx2x_read_mf_cfg(bp);
1266 
1267 	/* Read the current link report info */
1268 	bnx2x_fill_report_data(bp, &cur_data);
1269 
1270 	/* Don't report link down or exactly the same link status twice */
1271 	if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1272 	    (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1273 		      &bp->last_reported_link.link_report_flags) &&
1274 	     test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1275 		      &cur_data.link_report_flags)))
1276 		return;
1277 
1278 	bp->link_cnt++;
1279 
1280 	/* We are going to report a new link parameters now -
1281 	 * remember the current data for the next time.
1282 	 */
1283 	memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1284 
1285 	/* propagate status to VFs */
1286 	if (IS_PF(bp))
1287 		bnx2x_iov_link_update(bp);
1288 
1289 	if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1290 		     &cur_data.link_report_flags)) {
1291 		netif_carrier_off(bp->dev);
1292 		netdev_err(bp->dev, "NIC Link is Down\n");
1293 		return;
1294 	} else {
1295 		const char *duplex;
1296 		const char *flow;
1297 
1298 		netif_carrier_on(bp->dev);
1299 
1300 		if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1301 				       &cur_data.link_report_flags))
1302 			duplex = "full";
1303 		else
1304 			duplex = "half";
1305 
1306 		/* Handle the FC at the end so that only these flags would be
1307 		 * possibly set. This way we may easily check if there is no FC
1308 		 * enabled.
1309 		 */
1310 		if (cur_data.link_report_flags) {
1311 			if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1312 				     &cur_data.link_report_flags)) {
1313 				if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1314 				     &cur_data.link_report_flags))
1315 					flow = "ON - receive & transmit";
1316 				else
1317 					flow = "ON - receive";
1318 			} else {
1319 				flow = "ON - transmit";
1320 			}
1321 		} else {
1322 			flow = "none";
1323 		}
1324 		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1325 			    cur_data.line_speed, duplex, flow);
1326 	}
1327 }
1328 
1329 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1330 {
1331 	int i;
1332 
1333 	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1334 		struct eth_rx_sge *sge;
1335 
1336 		sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1337 		sge->addr_hi =
1338 			cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1339 			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1340 
1341 		sge->addr_lo =
1342 			cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1343 			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1344 	}
1345 }
1346 
1347 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1348 				struct bnx2x_fastpath *fp, int last)
1349 {
1350 	int i;
1351 
1352 	for (i = 0; i < last; i++) {
1353 		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1354 		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1355 		u8 *data = first_buf->data;
1356 
1357 		if (data == NULL) {
1358 			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1359 			continue;
1360 		}
1361 		if (tpa_info->tpa_state == BNX2X_TPA_START)
1362 			dma_unmap_single(&bp->pdev->dev,
1363 					 dma_unmap_addr(first_buf, mapping),
1364 					 fp->rx_buf_size, DMA_FROM_DEVICE);
1365 		bnx2x_frag_free(fp, data);
1366 		first_buf->data = NULL;
1367 	}
1368 }
1369 
1370 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1371 {
1372 	int j;
1373 
1374 	for_each_rx_queue_cnic(bp, j) {
1375 		struct bnx2x_fastpath *fp = &bp->fp[j];
1376 
1377 		fp->rx_bd_cons = 0;
1378 
1379 		/* Activate BD ring */
1380 		/* Warning!
1381 		 * this will generate an interrupt (to the TSTORM)
1382 		 * must only be done after chip is initialized
1383 		 */
1384 		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1385 				     fp->rx_sge_prod);
1386 	}
1387 }
1388 
1389 void bnx2x_init_rx_rings(struct bnx2x *bp)
1390 {
1391 	int func = BP_FUNC(bp);
1392 	u16 ring_prod;
1393 	int i, j;
1394 
1395 	/* Allocate TPA resources */
1396 	for_each_eth_queue(bp, j) {
1397 		struct bnx2x_fastpath *fp = &bp->fp[j];
1398 
1399 		DP(NETIF_MSG_IFUP,
1400 		   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1401 
1402 		if (fp->mode != TPA_MODE_DISABLED) {
1403 			/* Fill the per-aggregation pool */
1404 			for (i = 0; i < MAX_AGG_QS(bp); i++) {
1405 				struct bnx2x_agg_info *tpa_info =
1406 					&fp->tpa_info[i];
1407 				struct sw_rx_bd *first_buf =
1408 					&tpa_info->first_buf;
1409 
1410 				first_buf->data =
1411 					bnx2x_frag_alloc(fp, GFP_KERNEL);
1412 				if (!first_buf->data) {
1413 					BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1414 						  j);
1415 					bnx2x_free_tpa_pool(bp, fp, i);
1416 					fp->mode = TPA_MODE_DISABLED;
1417 					break;
1418 				}
1419 				dma_unmap_addr_set(first_buf, mapping, 0);
1420 				tpa_info->tpa_state = BNX2X_TPA_STOP;
1421 			}
1422 
1423 			/* "next page" elements initialization */
1424 			bnx2x_set_next_page_sgl(fp);
1425 
1426 			/* set SGEs bit mask */
1427 			bnx2x_init_sge_ring_bit_mask(fp);
1428 
1429 			/* Allocate SGEs and initialize the ring elements */
1430 			for (i = 0, ring_prod = 0;
1431 			     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1432 
1433 				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1434 						       GFP_KERNEL) < 0) {
1435 					BNX2X_ERR("was only able to allocate %d rx sges\n",
1436 						  i);
1437 					BNX2X_ERR("disabling TPA for queue[%d]\n",
1438 						  j);
1439 					/* Cleanup already allocated elements */
1440 					bnx2x_free_rx_sge_range(bp, fp,
1441 								ring_prod);
1442 					bnx2x_free_tpa_pool(bp, fp,
1443 							    MAX_AGG_QS(bp));
1444 					fp->mode = TPA_MODE_DISABLED;
1445 					ring_prod = 0;
1446 					break;
1447 				}
1448 				ring_prod = NEXT_SGE_IDX(ring_prod);
1449 			}
1450 
1451 			fp->rx_sge_prod = ring_prod;
1452 		}
1453 	}
1454 
1455 	for_each_eth_queue(bp, j) {
1456 		struct bnx2x_fastpath *fp = &bp->fp[j];
1457 
1458 		fp->rx_bd_cons = 0;
1459 
1460 		/* Activate BD ring */
1461 		/* Warning!
1462 		 * this will generate an interrupt (to the TSTORM)
1463 		 * must only be done after chip is initialized
1464 		 */
1465 		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1466 				     fp->rx_sge_prod);
1467 
1468 		if (j != 0)
1469 			continue;
1470 
1471 		if (CHIP_IS_E1(bp)) {
1472 			REG_WR(bp, BAR_USTRORM_INTMEM +
1473 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1474 			       U64_LO(fp->rx_comp_mapping));
1475 			REG_WR(bp, BAR_USTRORM_INTMEM +
1476 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1477 			       U64_HI(fp->rx_comp_mapping));
1478 		}
1479 	}
1480 }
1481 
1482 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1483 {
1484 	u8 cos;
1485 	struct bnx2x *bp = fp->bp;
1486 
1487 	for_each_cos_in_tx_queue(fp, cos) {
1488 		struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1489 		unsigned pkts_compl = 0, bytes_compl = 0;
1490 
1491 		u16 sw_prod = txdata->tx_pkt_prod;
1492 		u16 sw_cons = txdata->tx_pkt_cons;
1493 
1494 		while (sw_cons != sw_prod) {
1495 			bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1496 					  &pkts_compl, &bytes_compl);
1497 			sw_cons++;
1498 		}
1499 
1500 		netdev_tx_reset_queue(
1501 			netdev_get_tx_queue(bp->dev,
1502 					    txdata->txq_index));
1503 	}
1504 }
1505 
1506 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1507 {
1508 	int i;
1509 
1510 	for_each_tx_queue_cnic(bp, i) {
1511 		bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1512 	}
1513 }
1514 
1515 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1516 {
1517 	int i;
1518 
1519 	for_each_eth_queue(bp, i) {
1520 		bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1521 	}
1522 }
1523 
1524 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1525 {
1526 	struct bnx2x *bp = fp->bp;
1527 	int i;
1528 
1529 	/* ring wasn't allocated */
1530 	if (fp->rx_buf_ring == NULL)
1531 		return;
1532 
1533 	for (i = 0; i < NUM_RX_BD; i++) {
1534 		struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1535 		u8 *data = rx_buf->data;
1536 
1537 		if (data == NULL)
1538 			continue;
1539 		dma_unmap_single(&bp->pdev->dev,
1540 				 dma_unmap_addr(rx_buf, mapping),
1541 				 fp->rx_buf_size, DMA_FROM_DEVICE);
1542 
1543 		rx_buf->data = NULL;
1544 		bnx2x_frag_free(fp, data);
1545 	}
1546 }
1547 
1548 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1549 {
1550 	int j;
1551 
1552 	for_each_rx_queue_cnic(bp, j) {
1553 		bnx2x_free_rx_bds(&bp->fp[j]);
1554 	}
1555 }
1556 
1557 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1558 {
1559 	int j;
1560 
1561 	for_each_eth_queue(bp, j) {
1562 		struct bnx2x_fastpath *fp = &bp->fp[j];
1563 
1564 		bnx2x_free_rx_bds(fp);
1565 
1566 		if (fp->mode != TPA_MODE_DISABLED)
1567 			bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1568 	}
1569 }
1570 
1571 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1572 {
1573 	bnx2x_free_tx_skbs_cnic(bp);
1574 	bnx2x_free_rx_skbs_cnic(bp);
1575 }
1576 
1577 void bnx2x_free_skbs(struct bnx2x *bp)
1578 {
1579 	bnx2x_free_tx_skbs(bp);
1580 	bnx2x_free_rx_skbs(bp);
1581 }
1582 
1583 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1584 {
1585 	/* load old values */
1586 	u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1587 
1588 	if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1589 		/* leave all but MAX value */
1590 		mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1591 
1592 		/* set new MAX value */
1593 		mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1594 				& FUNC_MF_CFG_MAX_BW_MASK;
1595 
1596 		bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1597 	}
1598 }
1599 
1600 /**
1601  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1602  *
1603  * @bp:		driver handle
1604  * @nvecs:	number of vectors to be released
1605  */
1606 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1607 {
1608 	int i, offset = 0;
1609 
1610 	if (nvecs == offset)
1611 		return;
1612 
1613 	/* VFs don't have a default SB */
1614 	if (IS_PF(bp)) {
1615 		free_irq(bp->msix_table[offset].vector, bp->dev);
1616 		DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1617 		   bp->msix_table[offset].vector);
1618 		offset++;
1619 	}
1620 
1621 	if (CNIC_SUPPORT(bp)) {
1622 		if (nvecs == offset)
1623 			return;
1624 		offset++;
1625 	}
1626 
1627 	for_each_eth_queue(bp, i) {
1628 		if (nvecs == offset)
1629 			return;
1630 		DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1631 		   i, bp->msix_table[offset].vector);
1632 
1633 		free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1634 	}
1635 }
1636 
1637 void bnx2x_free_irq(struct bnx2x *bp)
1638 {
1639 	if (bp->flags & USING_MSIX_FLAG &&
1640 	    !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1641 		int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1642 
1643 		/* vfs don't have a default status block */
1644 		if (IS_PF(bp))
1645 			nvecs++;
1646 
1647 		bnx2x_free_msix_irqs(bp, nvecs);
1648 	} else {
1649 		free_irq(bp->dev->irq, bp->dev);
1650 	}
1651 }
1652 
1653 int bnx2x_enable_msix(struct bnx2x *bp)
1654 {
1655 	int msix_vec = 0, i, rc;
1656 
1657 	/* VFs don't have a default status block */
1658 	if (IS_PF(bp)) {
1659 		bp->msix_table[msix_vec].entry = msix_vec;
1660 		BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1661 			       bp->msix_table[0].entry);
1662 		msix_vec++;
1663 	}
1664 
1665 	/* Cnic requires an msix vector for itself */
1666 	if (CNIC_SUPPORT(bp)) {
1667 		bp->msix_table[msix_vec].entry = msix_vec;
1668 		BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1669 			       msix_vec, bp->msix_table[msix_vec].entry);
1670 		msix_vec++;
1671 	}
1672 
1673 	/* We need separate vectors for ETH queues only (not FCoE) */
1674 	for_each_eth_queue(bp, i) {
1675 		bp->msix_table[msix_vec].entry = msix_vec;
1676 		BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1677 			       msix_vec, msix_vec, i);
1678 		msix_vec++;
1679 	}
1680 
1681 	DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1682 	   msix_vec);
1683 
1684 	rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1685 				   BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1686 	/*
1687 	 * reconfigure number of tx/rx queues according to available
1688 	 * MSI-X vectors
1689 	 */
1690 	if (rc == -ENOSPC) {
1691 		/* Get by with single vector */
1692 		rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1693 		if (rc < 0) {
1694 			BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1695 				       rc);
1696 			goto no_msix;
1697 		}
1698 
1699 		BNX2X_DEV_INFO("Using single MSI-X vector\n");
1700 		bp->flags |= USING_SINGLE_MSIX_FLAG;
1701 
1702 		BNX2X_DEV_INFO("set number of queues to 1\n");
1703 		bp->num_ethernet_queues = 1;
1704 		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1705 	} else if (rc < 0) {
1706 		BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1707 		goto no_msix;
1708 	} else if (rc < msix_vec) {
1709 		/* how less vectors we will have? */
1710 		int diff = msix_vec - rc;
1711 
1712 		BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1713 
1714 		/*
1715 		 * decrease number of queues by number of unallocated entries
1716 		 */
1717 		bp->num_ethernet_queues -= diff;
1718 		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1719 
1720 		BNX2X_DEV_INFO("New queue configuration set: %d\n",
1721 			       bp->num_queues);
1722 	}
1723 
1724 	bp->flags |= USING_MSIX_FLAG;
1725 
1726 	return 0;
1727 
1728 no_msix:
1729 	/* fall to INTx if not enough memory */
1730 	if (rc == -ENOMEM)
1731 		bp->flags |= DISABLE_MSI_FLAG;
1732 
1733 	return rc;
1734 }
1735 
1736 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1737 {
1738 	int i, rc, offset = 0;
1739 
1740 	/* no default status block for vf */
1741 	if (IS_PF(bp)) {
1742 		rc = request_irq(bp->msix_table[offset++].vector,
1743 				 bnx2x_msix_sp_int, 0,
1744 				 bp->dev->name, bp->dev);
1745 		if (rc) {
1746 			BNX2X_ERR("request sp irq failed\n");
1747 			return -EBUSY;
1748 		}
1749 	}
1750 
1751 	if (CNIC_SUPPORT(bp))
1752 		offset++;
1753 
1754 	for_each_eth_queue(bp, i) {
1755 		struct bnx2x_fastpath *fp = &bp->fp[i];
1756 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1757 			 bp->dev->name, i);
1758 
1759 		rc = request_irq(bp->msix_table[offset].vector,
1760 				 bnx2x_msix_fp_int, 0, fp->name, fp);
1761 		if (rc) {
1762 			BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1763 			      bp->msix_table[offset].vector, rc);
1764 			bnx2x_free_msix_irqs(bp, offset);
1765 			return -EBUSY;
1766 		}
1767 
1768 		offset++;
1769 	}
1770 
1771 	i = BNX2X_NUM_ETH_QUEUES(bp);
1772 	if (IS_PF(bp)) {
1773 		offset = 1 + CNIC_SUPPORT(bp);
1774 		netdev_info(bp->dev,
1775 			    "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1776 			    bp->msix_table[0].vector,
1777 			    0, bp->msix_table[offset].vector,
1778 			    i - 1, bp->msix_table[offset + i - 1].vector);
1779 	} else {
1780 		offset = CNIC_SUPPORT(bp);
1781 		netdev_info(bp->dev,
1782 			    "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1783 			    0, bp->msix_table[offset].vector,
1784 			    i - 1, bp->msix_table[offset + i - 1].vector);
1785 	}
1786 	return 0;
1787 }
1788 
1789 int bnx2x_enable_msi(struct bnx2x *bp)
1790 {
1791 	int rc;
1792 
1793 	rc = pci_enable_msi(bp->pdev);
1794 	if (rc) {
1795 		BNX2X_DEV_INFO("MSI is not attainable\n");
1796 		return -1;
1797 	}
1798 	bp->flags |= USING_MSI_FLAG;
1799 
1800 	return 0;
1801 }
1802 
1803 static int bnx2x_req_irq(struct bnx2x *bp)
1804 {
1805 	unsigned long flags;
1806 	unsigned int irq;
1807 
1808 	if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1809 		flags = 0;
1810 	else
1811 		flags = IRQF_SHARED;
1812 
1813 	if (bp->flags & USING_MSIX_FLAG)
1814 		irq = bp->msix_table[0].vector;
1815 	else
1816 		irq = bp->pdev->irq;
1817 
1818 	return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1819 }
1820 
1821 static int bnx2x_setup_irqs(struct bnx2x *bp)
1822 {
1823 	int rc = 0;
1824 	if (bp->flags & USING_MSIX_FLAG &&
1825 	    !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1826 		rc = bnx2x_req_msix_irqs(bp);
1827 		if (rc)
1828 			return rc;
1829 	} else {
1830 		rc = bnx2x_req_irq(bp);
1831 		if (rc) {
1832 			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1833 			return rc;
1834 		}
1835 		if (bp->flags & USING_MSI_FLAG) {
1836 			bp->dev->irq = bp->pdev->irq;
1837 			netdev_info(bp->dev, "using MSI IRQ %d\n",
1838 				    bp->dev->irq);
1839 		}
1840 		if (bp->flags & USING_MSIX_FLAG) {
1841 			bp->dev->irq = bp->msix_table[0].vector;
1842 			netdev_info(bp->dev, "using MSIX IRQ %d\n",
1843 				    bp->dev->irq);
1844 		}
1845 	}
1846 
1847 	return 0;
1848 }
1849 
1850 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1851 {
1852 	int i;
1853 
1854 	for_each_rx_queue_cnic(bp, i) {
1855 		napi_enable(&bnx2x_fp(bp, i, napi));
1856 	}
1857 }
1858 
1859 static void bnx2x_napi_enable(struct bnx2x *bp)
1860 {
1861 	int i;
1862 
1863 	for_each_eth_queue(bp, i) {
1864 		napi_enable(&bnx2x_fp(bp, i, napi));
1865 	}
1866 }
1867 
1868 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1869 {
1870 	int i;
1871 
1872 	for_each_rx_queue_cnic(bp, i) {
1873 		napi_disable(&bnx2x_fp(bp, i, napi));
1874 	}
1875 }
1876 
1877 static void bnx2x_napi_disable(struct bnx2x *bp)
1878 {
1879 	int i;
1880 
1881 	for_each_eth_queue(bp, i) {
1882 		napi_disable(&bnx2x_fp(bp, i, napi));
1883 	}
1884 }
1885 
1886 void bnx2x_netif_start(struct bnx2x *bp)
1887 {
1888 	if (netif_running(bp->dev)) {
1889 		bnx2x_napi_enable(bp);
1890 		if (CNIC_LOADED(bp))
1891 			bnx2x_napi_enable_cnic(bp);
1892 		bnx2x_int_enable(bp);
1893 		if (bp->state == BNX2X_STATE_OPEN)
1894 			netif_tx_wake_all_queues(bp->dev);
1895 	}
1896 }
1897 
1898 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1899 {
1900 	bnx2x_int_disable_sync(bp, disable_hw);
1901 	bnx2x_napi_disable(bp);
1902 	if (CNIC_LOADED(bp))
1903 		bnx2x_napi_disable_cnic(bp);
1904 }
1905 
1906 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1907 		       void *accel_priv, select_queue_fallback_t fallback)
1908 {
1909 	struct bnx2x *bp = netdev_priv(dev);
1910 
1911 	if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1912 		struct ethhdr *hdr = (struct ethhdr *)skb->data;
1913 		u16 ether_type = ntohs(hdr->h_proto);
1914 
1915 		/* Skip VLAN tag if present */
1916 		if (ether_type == ETH_P_8021Q) {
1917 			struct vlan_ethhdr *vhdr =
1918 				(struct vlan_ethhdr *)skb->data;
1919 
1920 			ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1921 		}
1922 
1923 		/* If ethertype is FCoE or FIP - use FCoE ring */
1924 		if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1925 			return bnx2x_fcoe_tx(bp, txq_index);
1926 	}
1927 
1928 	/* select a non-FCoE queue */
1929 	return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1930 }
1931 
1932 void bnx2x_set_num_queues(struct bnx2x *bp)
1933 {
1934 	/* RSS queues */
1935 	bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1936 
1937 	/* override in STORAGE SD modes */
1938 	if (IS_MF_STORAGE_ONLY(bp))
1939 		bp->num_ethernet_queues = 1;
1940 
1941 	/* Add special queues */
1942 	bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1943 	bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1944 
1945 	BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1946 }
1947 
1948 /**
1949  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1950  *
1951  * @bp:		Driver handle
1952  *
1953  * We currently support for at most 16 Tx queues for each CoS thus we will
1954  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1955  * bp->max_cos.
1956  *
1957  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1958  * index after all ETH L2 indices.
1959  *
1960  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1961  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1962  * 16..31,...) with indices that are not coupled with any real Tx queue.
1963  *
1964  * The proper configuration of skb->queue_mapping is handled by
1965  * bnx2x_select_queue() and __skb_tx_hash().
1966  *
1967  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1968  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1969  */
1970 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1971 {
1972 	int rc, tx, rx;
1973 
1974 	tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1975 	rx = BNX2X_NUM_ETH_QUEUES(bp);
1976 
1977 /* account for fcoe queue */
1978 	if (include_cnic && !NO_FCOE(bp)) {
1979 		rx++;
1980 		tx++;
1981 	}
1982 
1983 	rc = netif_set_real_num_tx_queues(bp->dev, tx);
1984 	if (rc) {
1985 		BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1986 		return rc;
1987 	}
1988 	rc = netif_set_real_num_rx_queues(bp->dev, rx);
1989 	if (rc) {
1990 		BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1991 		return rc;
1992 	}
1993 
1994 	DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1995 			  tx, rx);
1996 
1997 	return rc;
1998 }
1999 
2000 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2001 {
2002 	int i;
2003 
2004 	for_each_queue(bp, i) {
2005 		struct bnx2x_fastpath *fp = &bp->fp[i];
2006 		u32 mtu;
2007 
2008 		/* Always use a mini-jumbo MTU for the FCoE L2 ring */
2009 		if (IS_FCOE_IDX(i))
2010 			/*
2011 			 * Although there are no IP frames expected to arrive to
2012 			 * this ring we still want to add an
2013 			 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2014 			 * overrun attack.
2015 			 */
2016 			mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2017 		else
2018 			mtu = bp->dev->mtu;
2019 		fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2020 				  IP_HEADER_ALIGNMENT_PADDING +
2021 				  ETH_OVERHEAD +
2022 				  mtu +
2023 				  BNX2X_FW_RX_ALIGN_END;
2024 		/* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2025 		if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2026 			fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2027 		else
2028 			fp->rx_frag_size = 0;
2029 	}
2030 }
2031 
2032 static int bnx2x_init_rss(struct bnx2x *bp)
2033 {
2034 	int i;
2035 	u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2036 
2037 	/* Prepare the initial contents for the indirection table if RSS is
2038 	 * enabled
2039 	 */
2040 	for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2041 		bp->rss_conf_obj.ind_table[i] =
2042 			bp->fp->cl_id +
2043 			ethtool_rxfh_indir_default(i, num_eth_queues);
2044 
2045 	/*
2046 	 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2047 	 * per-port, so if explicit configuration is needed , do it only
2048 	 * for a PMF.
2049 	 *
2050 	 * For 57712 and newer on the other hand it's a per-function
2051 	 * configuration.
2052 	 */
2053 	return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2054 }
2055 
2056 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2057 	      bool config_hash, bool enable)
2058 {
2059 	struct bnx2x_config_rss_params params = {NULL};
2060 
2061 	/* Although RSS is meaningless when there is a single HW queue we
2062 	 * still need it enabled in order to have HW Rx hash generated.
2063 	 *
2064 	 * if (!is_eth_multi(bp))
2065 	 *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2066 	 */
2067 
2068 	params.rss_obj = rss_obj;
2069 
2070 	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2071 
2072 	if (enable) {
2073 		__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2074 
2075 		/* RSS configuration */
2076 		__set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2077 		__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2078 		__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2079 		__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2080 		if (rss_obj->udp_rss_v4)
2081 			__set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2082 		if (rss_obj->udp_rss_v6)
2083 			__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2084 
2085 		if (!CHIP_IS_E1x(bp)) {
2086 			/* valid only for TUNN_MODE_VXLAN tunnel mode */
2087 			__set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2088 			__set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2089 
2090 			/* valid only for TUNN_MODE_GRE tunnel mode */
2091 			__set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2092 		}
2093 	} else {
2094 		__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2095 	}
2096 
2097 	/* Hash bits */
2098 	params.rss_result_mask = MULTI_MASK;
2099 
2100 	memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2101 
2102 	if (config_hash) {
2103 		/* RSS keys */
2104 		netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2105 		__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2106 	}
2107 
2108 	if (IS_PF(bp))
2109 		return bnx2x_config_rss(bp, &params);
2110 	else
2111 		return bnx2x_vfpf_config_rss(bp, &params);
2112 }
2113 
2114 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2115 {
2116 	struct bnx2x_func_state_params func_params = {NULL};
2117 
2118 	/* Prepare parameters for function state transitions */
2119 	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2120 
2121 	func_params.f_obj = &bp->func_obj;
2122 	func_params.cmd = BNX2X_F_CMD_HW_INIT;
2123 
2124 	func_params.params.hw_init.load_phase = load_code;
2125 
2126 	return bnx2x_func_state_change(bp, &func_params);
2127 }
2128 
2129 /*
2130  * Cleans the object that have internal lists without sending
2131  * ramrods. Should be run when interrupts are disabled.
2132  */
2133 void bnx2x_squeeze_objects(struct bnx2x *bp)
2134 {
2135 	int rc;
2136 	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2137 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
2138 	struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2139 
2140 	/***************** Cleanup MACs' object first *************************/
2141 
2142 	/* Wait for completion of requested */
2143 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2144 	/* Perform a dry cleanup */
2145 	__set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2146 
2147 	/* Clean ETH primary MAC */
2148 	__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2149 	rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2150 				 &ramrod_flags);
2151 	if (rc != 0)
2152 		BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2153 
2154 	/* Cleanup UC list */
2155 	vlan_mac_flags = 0;
2156 	__set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2157 	rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2158 				 &ramrod_flags);
2159 	if (rc != 0)
2160 		BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2161 
2162 	/***************** Now clean mcast object *****************************/
2163 	rparam.mcast_obj = &bp->mcast_obj;
2164 	__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2165 
2166 	/* Add a DEL command... - Since we're doing a driver cleanup only,
2167 	 * we take a lock surrounding both the initial send and the CONTs,
2168 	 * as we don't want a true completion to disrupt us in the middle.
2169 	 */
2170 	netif_addr_lock_bh(bp->dev);
2171 	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2172 	if (rc < 0)
2173 		BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2174 			  rc);
2175 
2176 	/* ...and wait until all pending commands are cleared */
2177 	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2178 	while (rc != 0) {
2179 		if (rc < 0) {
2180 			BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2181 				  rc);
2182 			netif_addr_unlock_bh(bp->dev);
2183 			return;
2184 		}
2185 
2186 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2187 	}
2188 	netif_addr_unlock_bh(bp->dev);
2189 }
2190 
2191 #ifndef BNX2X_STOP_ON_ERROR
2192 #define LOAD_ERROR_EXIT(bp, label) \
2193 	do { \
2194 		(bp)->state = BNX2X_STATE_ERROR; \
2195 		goto label; \
2196 	} while (0)
2197 
2198 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2199 	do { \
2200 		bp->cnic_loaded = false; \
2201 		goto label; \
2202 	} while (0)
2203 #else /*BNX2X_STOP_ON_ERROR*/
2204 #define LOAD_ERROR_EXIT(bp, label) \
2205 	do { \
2206 		(bp)->state = BNX2X_STATE_ERROR; \
2207 		(bp)->panic = 1; \
2208 		return -EBUSY; \
2209 	} while (0)
2210 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2211 	do { \
2212 		bp->cnic_loaded = false; \
2213 		(bp)->panic = 1; \
2214 		return -EBUSY; \
2215 	} while (0)
2216 #endif /*BNX2X_STOP_ON_ERROR*/
2217 
2218 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2219 {
2220 	BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2221 		       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2222 	return;
2223 }
2224 
2225 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2226 {
2227 	int num_groups, vf_headroom = 0;
2228 	int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2229 
2230 	/* number of queues for statistics is number of eth queues + FCoE */
2231 	u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2232 
2233 	/* Total number of FW statistics requests =
2234 	 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2235 	 * and fcoe l2 queue) stats + num of queues (which includes another 1
2236 	 * for fcoe l2 queue if applicable)
2237 	 */
2238 	bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2239 
2240 	/* vf stats appear in the request list, but their data is allocated by
2241 	 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2242 	 * it is used to determine where to place the vf stats queries in the
2243 	 * request struct
2244 	 */
2245 	if (IS_SRIOV(bp))
2246 		vf_headroom = bnx2x_vf_headroom(bp);
2247 
2248 	/* Request is built from stats_query_header and an array of
2249 	 * stats_query_cmd_group each of which contains
2250 	 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2251 	 * configured in the stats_query_header.
2252 	 */
2253 	num_groups =
2254 		(((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2255 		 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2256 		 1 : 0));
2257 
2258 	DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2259 	   bp->fw_stats_num, vf_headroom, num_groups);
2260 	bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2261 		num_groups * sizeof(struct stats_query_cmd_group);
2262 
2263 	/* Data for statistics requests + stats_counter
2264 	 * stats_counter holds per-STORM counters that are incremented
2265 	 * when STORM has finished with the current request.
2266 	 * memory for FCoE offloaded statistics are counted anyway,
2267 	 * even if they will not be sent.
2268 	 * VF stats are not accounted for here as the data of VF stats is stored
2269 	 * in memory allocated by the VF, not here.
2270 	 */
2271 	bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2272 		sizeof(struct per_pf_stats) +
2273 		sizeof(struct fcoe_statistics_params) +
2274 		sizeof(struct per_queue_stats) * num_queue_stats +
2275 		sizeof(struct stats_counter);
2276 
2277 	bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2278 				       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2279 	if (!bp->fw_stats)
2280 		goto alloc_mem_err;
2281 
2282 	/* Set shortcuts */
2283 	bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2284 	bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2285 	bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2286 		((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2287 	bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2288 		bp->fw_stats_req_sz;
2289 
2290 	DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2291 	   U64_HI(bp->fw_stats_req_mapping),
2292 	   U64_LO(bp->fw_stats_req_mapping));
2293 	DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2294 	   U64_HI(bp->fw_stats_data_mapping),
2295 	   U64_LO(bp->fw_stats_data_mapping));
2296 	return 0;
2297 
2298 alloc_mem_err:
2299 	bnx2x_free_fw_stats_mem(bp);
2300 	BNX2X_ERR("Can't allocate FW stats memory\n");
2301 	return -ENOMEM;
2302 }
2303 
2304 /* send load request to mcp and analyze response */
2305 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2306 {
2307 	u32 param;
2308 
2309 	/* init fw_seq */
2310 	bp->fw_seq =
2311 		(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2312 		 DRV_MSG_SEQ_NUMBER_MASK);
2313 	BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2314 
2315 	/* Get current FW pulse sequence */
2316 	bp->fw_drv_pulse_wr_seq =
2317 		(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2318 		 DRV_PULSE_SEQ_MASK);
2319 	BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2320 
2321 	param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2322 
2323 	if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2324 		param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2325 
2326 	/* load request */
2327 	(*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2328 
2329 	/* if mcp fails to respond we must abort */
2330 	if (!(*load_code)) {
2331 		BNX2X_ERR("MCP response failure, aborting\n");
2332 		return -EBUSY;
2333 	}
2334 
2335 	/* If mcp refused (e.g. other port is in diagnostic mode) we
2336 	 * must abort
2337 	 */
2338 	if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2339 		BNX2X_ERR("MCP refused load request, aborting\n");
2340 		return -EBUSY;
2341 	}
2342 	return 0;
2343 }
2344 
2345 /* check whether another PF has already loaded FW to chip. In
2346  * virtualized environments a pf from another VM may have already
2347  * initialized the device including loading FW
2348  */
2349 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2350 {
2351 	/* is another pf loaded on this engine? */
2352 	if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2353 	    load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2354 		/* build my FW version dword */
2355 		u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2356 			(BCM_5710_FW_MINOR_VERSION << 8) +
2357 			(BCM_5710_FW_REVISION_VERSION << 16) +
2358 			(BCM_5710_FW_ENGINEERING_VERSION << 24);
2359 
2360 		/* read loaded FW from chip */
2361 		u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2362 
2363 		DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2364 		   loaded_fw, my_fw);
2365 
2366 		/* abort nic load if version mismatch */
2367 		if (my_fw != loaded_fw) {
2368 			if (print_err)
2369 				BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2370 					  loaded_fw, my_fw);
2371 			else
2372 				BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2373 					       loaded_fw, my_fw);
2374 			return -EBUSY;
2375 		}
2376 	}
2377 	return 0;
2378 }
2379 
2380 /* returns the "mcp load_code" according to global load_count array */
2381 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2382 {
2383 	int path = BP_PATH(bp);
2384 
2385 	DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2386 	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2387 	   bnx2x_load_count[path][2]);
2388 	bnx2x_load_count[path][0]++;
2389 	bnx2x_load_count[path][1 + port]++;
2390 	DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2391 	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2392 	   bnx2x_load_count[path][2]);
2393 	if (bnx2x_load_count[path][0] == 1)
2394 		return FW_MSG_CODE_DRV_LOAD_COMMON;
2395 	else if (bnx2x_load_count[path][1 + port] == 1)
2396 		return FW_MSG_CODE_DRV_LOAD_PORT;
2397 	else
2398 		return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2399 }
2400 
2401 /* mark PMF if applicable */
2402 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2403 {
2404 	if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2405 	    (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2406 	    (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2407 		bp->port.pmf = 1;
2408 		/* We need the barrier to ensure the ordering between the
2409 		 * writing to bp->port.pmf here and reading it from the
2410 		 * bnx2x_periodic_task().
2411 		 */
2412 		smp_mb();
2413 	} else {
2414 		bp->port.pmf = 0;
2415 	}
2416 
2417 	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2418 }
2419 
2420 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2421 {
2422 	if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2423 	     (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2424 	    (bp->common.shmem2_base)) {
2425 		if (SHMEM2_HAS(bp, dcc_support))
2426 			SHMEM2_WR(bp, dcc_support,
2427 				  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2428 				   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2429 		if (SHMEM2_HAS(bp, afex_driver_support))
2430 			SHMEM2_WR(bp, afex_driver_support,
2431 				  SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2432 	}
2433 
2434 	/* Set AFEX default VLAN tag to an invalid value */
2435 	bp->afex_def_vlan_tag = -1;
2436 }
2437 
2438 /**
2439  * bnx2x_bz_fp - zero content of the fastpath structure.
2440  *
2441  * @bp:		driver handle
2442  * @index:	fastpath index to be zeroed
2443  *
2444  * Makes sure the contents of the bp->fp[index].napi is kept
2445  * intact.
2446  */
2447 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2448 {
2449 	struct bnx2x_fastpath *fp = &bp->fp[index];
2450 	int cos;
2451 	struct napi_struct orig_napi = fp->napi;
2452 	struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2453 
2454 	/* bzero bnx2x_fastpath contents */
2455 	if (fp->tpa_info)
2456 		memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2457 		       sizeof(struct bnx2x_agg_info));
2458 	memset(fp, 0, sizeof(*fp));
2459 
2460 	/* Restore the NAPI object as it has been already initialized */
2461 	fp->napi = orig_napi;
2462 	fp->tpa_info = orig_tpa_info;
2463 	fp->bp = bp;
2464 	fp->index = index;
2465 	if (IS_ETH_FP(fp))
2466 		fp->max_cos = bp->max_cos;
2467 	else
2468 		/* Special queues support only one CoS */
2469 		fp->max_cos = 1;
2470 
2471 	/* Init txdata pointers */
2472 	if (IS_FCOE_FP(fp))
2473 		fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2474 	if (IS_ETH_FP(fp))
2475 		for_each_cos_in_tx_queue(fp, cos)
2476 			fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2477 				BNX2X_NUM_ETH_QUEUES(bp) + index];
2478 
2479 	/* set the tpa flag for each queue. The tpa flag determines the queue
2480 	 * minimal size so it must be set prior to queue memory allocation
2481 	 */
2482 	if (bp->dev->features & NETIF_F_LRO)
2483 		fp->mode = TPA_MODE_LRO;
2484 	else if (bp->dev->features & NETIF_F_GRO &&
2485 		 bnx2x_mtu_allows_gro(bp->dev->mtu))
2486 		fp->mode = TPA_MODE_GRO;
2487 	else
2488 		fp->mode = TPA_MODE_DISABLED;
2489 
2490 	/* We don't want TPA if it's disabled in bp
2491 	 * or if this is an FCoE L2 ring.
2492 	 */
2493 	if (bp->disable_tpa || IS_FCOE_FP(fp))
2494 		fp->mode = TPA_MODE_DISABLED;
2495 }
2496 
2497 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2498 {
2499 	u32 cur;
2500 
2501 	if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2502 		return;
2503 
2504 	cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2505 	DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2506 	   cur, state);
2507 
2508 	SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2509 }
2510 
2511 int bnx2x_load_cnic(struct bnx2x *bp)
2512 {
2513 	int i, rc, port = BP_PORT(bp);
2514 
2515 	DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2516 
2517 	mutex_init(&bp->cnic_mutex);
2518 
2519 	if (IS_PF(bp)) {
2520 		rc = bnx2x_alloc_mem_cnic(bp);
2521 		if (rc) {
2522 			BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2523 			LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2524 		}
2525 	}
2526 
2527 	rc = bnx2x_alloc_fp_mem_cnic(bp);
2528 	if (rc) {
2529 		BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2530 		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2531 	}
2532 
2533 	/* Update the number of queues with the cnic queues */
2534 	rc = bnx2x_set_real_num_queues(bp, 1);
2535 	if (rc) {
2536 		BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2537 		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2538 	}
2539 
2540 	/* Add all CNIC NAPI objects */
2541 	bnx2x_add_all_napi_cnic(bp);
2542 	DP(NETIF_MSG_IFUP, "cnic napi added\n");
2543 	bnx2x_napi_enable_cnic(bp);
2544 
2545 	rc = bnx2x_init_hw_func_cnic(bp);
2546 	if (rc)
2547 		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2548 
2549 	bnx2x_nic_init_cnic(bp);
2550 
2551 	if (IS_PF(bp)) {
2552 		/* Enable Timer scan */
2553 		REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2554 
2555 		/* setup cnic queues */
2556 		for_each_cnic_queue(bp, i) {
2557 			rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2558 			if (rc) {
2559 				BNX2X_ERR("Queue setup failed\n");
2560 				LOAD_ERROR_EXIT(bp, load_error_cnic2);
2561 			}
2562 		}
2563 	}
2564 
2565 	/* Initialize Rx filter. */
2566 	bnx2x_set_rx_mode_inner(bp);
2567 
2568 	/* re-read iscsi info */
2569 	bnx2x_get_iscsi_info(bp);
2570 	bnx2x_setup_cnic_irq_info(bp);
2571 	bnx2x_setup_cnic_info(bp);
2572 	bp->cnic_loaded = true;
2573 	if (bp->state == BNX2X_STATE_OPEN)
2574 		bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2575 
2576 	DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2577 
2578 	return 0;
2579 
2580 #ifndef BNX2X_STOP_ON_ERROR
2581 load_error_cnic2:
2582 	/* Disable Timer scan */
2583 	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2584 
2585 load_error_cnic1:
2586 	bnx2x_napi_disable_cnic(bp);
2587 	/* Update the number of queues without the cnic queues */
2588 	if (bnx2x_set_real_num_queues(bp, 0))
2589 		BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2590 load_error_cnic0:
2591 	BNX2X_ERR("CNIC-related load failed\n");
2592 	bnx2x_free_fp_mem_cnic(bp);
2593 	bnx2x_free_mem_cnic(bp);
2594 	return rc;
2595 #endif /* ! BNX2X_STOP_ON_ERROR */
2596 }
2597 
2598 /* must be called with rtnl_lock */
2599 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2600 {
2601 	int port = BP_PORT(bp);
2602 	int i, rc = 0, load_code = 0;
2603 
2604 	DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2605 	DP(NETIF_MSG_IFUP,
2606 	   "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2607 
2608 #ifdef BNX2X_STOP_ON_ERROR
2609 	if (unlikely(bp->panic)) {
2610 		BNX2X_ERR("Can't load NIC when there is panic\n");
2611 		return -EPERM;
2612 	}
2613 #endif
2614 
2615 	bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2616 
2617 	/* zero the structure w/o any lock, before SP handler is initialized */
2618 	memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2619 	__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2620 		&bp->last_reported_link.link_report_flags);
2621 
2622 	if (IS_PF(bp))
2623 		/* must be called before memory allocation and HW init */
2624 		bnx2x_ilt_set_info(bp);
2625 
2626 	/*
2627 	 * Zero fastpath structures preserving invariants like napi, which are
2628 	 * allocated only once, fp index, max_cos, bp pointer.
2629 	 * Also set fp->mode and txdata_ptr.
2630 	 */
2631 	DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2632 	for_each_queue(bp, i)
2633 		bnx2x_bz_fp(bp, i);
2634 	memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2635 				  bp->num_cnic_queues) *
2636 				  sizeof(struct bnx2x_fp_txdata));
2637 
2638 	bp->fcoe_init = false;
2639 
2640 	/* Set the receive queues buffer size */
2641 	bnx2x_set_rx_buf_size(bp);
2642 
2643 	if (IS_PF(bp)) {
2644 		rc = bnx2x_alloc_mem(bp);
2645 		if (rc) {
2646 			BNX2X_ERR("Unable to allocate bp memory\n");
2647 			return rc;
2648 		}
2649 	}
2650 
2651 	/* need to be done after alloc mem, since it's self adjusting to amount
2652 	 * of memory available for RSS queues
2653 	 */
2654 	rc = bnx2x_alloc_fp_mem(bp);
2655 	if (rc) {
2656 		BNX2X_ERR("Unable to allocate memory for fps\n");
2657 		LOAD_ERROR_EXIT(bp, load_error0);
2658 	}
2659 
2660 	/* Allocated memory for FW statistics  */
2661 	if (bnx2x_alloc_fw_stats_mem(bp))
2662 		LOAD_ERROR_EXIT(bp, load_error0);
2663 
2664 	/* request pf to initialize status blocks */
2665 	if (IS_VF(bp)) {
2666 		rc = bnx2x_vfpf_init(bp);
2667 		if (rc)
2668 			LOAD_ERROR_EXIT(bp, load_error0);
2669 	}
2670 
2671 	/* As long as bnx2x_alloc_mem() may possibly update
2672 	 * bp->num_queues, bnx2x_set_real_num_queues() should always
2673 	 * come after it. At this stage cnic queues are not counted.
2674 	 */
2675 	rc = bnx2x_set_real_num_queues(bp, 0);
2676 	if (rc) {
2677 		BNX2X_ERR("Unable to set real_num_queues\n");
2678 		LOAD_ERROR_EXIT(bp, load_error0);
2679 	}
2680 
2681 	/* configure multi cos mappings in kernel.
2682 	 * this configuration may be overridden by a multi class queue
2683 	 * discipline or by a dcbx negotiation result.
2684 	 */
2685 	bnx2x_setup_tc(bp->dev, bp->max_cos);
2686 
2687 	/* Add all NAPI objects */
2688 	bnx2x_add_all_napi(bp);
2689 	DP(NETIF_MSG_IFUP, "napi added\n");
2690 	bnx2x_napi_enable(bp);
2691 
2692 	if (IS_PF(bp)) {
2693 		/* set pf load just before approaching the MCP */
2694 		bnx2x_set_pf_load(bp);
2695 
2696 		/* if mcp exists send load request and analyze response */
2697 		if (!BP_NOMCP(bp)) {
2698 			/* attempt to load pf */
2699 			rc = bnx2x_nic_load_request(bp, &load_code);
2700 			if (rc)
2701 				LOAD_ERROR_EXIT(bp, load_error1);
2702 
2703 			/* what did mcp say? */
2704 			rc = bnx2x_compare_fw_ver(bp, load_code, true);
2705 			if (rc) {
2706 				bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2707 				LOAD_ERROR_EXIT(bp, load_error2);
2708 			}
2709 		} else {
2710 			load_code = bnx2x_nic_load_no_mcp(bp, port);
2711 		}
2712 
2713 		/* mark pmf if applicable */
2714 		bnx2x_nic_load_pmf(bp, load_code);
2715 
2716 		/* Init Function state controlling object */
2717 		bnx2x__init_func_obj(bp);
2718 
2719 		/* Initialize HW */
2720 		rc = bnx2x_init_hw(bp, load_code);
2721 		if (rc) {
2722 			BNX2X_ERR("HW init failed, aborting\n");
2723 			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2724 			LOAD_ERROR_EXIT(bp, load_error2);
2725 		}
2726 	}
2727 
2728 	bnx2x_pre_irq_nic_init(bp);
2729 
2730 	/* Connect to IRQs */
2731 	rc = bnx2x_setup_irqs(bp);
2732 	if (rc) {
2733 		BNX2X_ERR("setup irqs failed\n");
2734 		if (IS_PF(bp))
2735 			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2736 		LOAD_ERROR_EXIT(bp, load_error2);
2737 	}
2738 
2739 	/* Init per-function objects */
2740 	if (IS_PF(bp)) {
2741 		/* Setup NIC internals and enable interrupts */
2742 		bnx2x_post_irq_nic_init(bp, load_code);
2743 
2744 		bnx2x_init_bp_objs(bp);
2745 		bnx2x_iov_nic_init(bp);
2746 
2747 		/* Set AFEX default VLAN tag to an invalid value */
2748 		bp->afex_def_vlan_tag = -1;
2749 		bnx2x_nic_load_afex_dcc(bp, load_code);
2750 		bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2751 		rc = bnx2x_func_start(bp);
2752 		if (rc) {
2753 			BNX2X_ERR("Function start failed!\n");
2754 			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2755 
2756 			LOAD_ERROR_EXIT(bp, load_error3);
2757 		}
2758 
2759 		/* Send LOAD_DONE command to MCP */
2760 		if (!BP_NOMCP(bp)) {
2761 			load_code = bnx2x_fw_command(bp,
2762 						     DRV_MSG_CODE_LOAD_DONE, 0);
2763 			if (!load_code) {
2764 				BNX2X_ERR("MCP response failure, aborting\n");
2765 				rc = -EBUSY;
2766 				LOAD_ERROR_EXIT(bp, load_error3);
2767 			}
2768 		}
2769 
2770 		/* initialize FW coalescing state machines in RAM */
2771 		bnx2x_update_coalesce(bp);
2772 	}
2773 
2774 	/* setup the leading queue */
2775 	rc = bnx2x_setup_leading(bp);
2776 	if (rc) {
2777 		BNX2X_ERR("Setup leading failed!\n");
2778 		LOAD_ERROR_EXIT(bp, load_error3);
2779 	}
2780 
2781 	/* set up the rest of the queues */
2782 	for_each_nondefault_eth_queue(bp, i) {
2783 		if (IS_PF(bp))
2784 			rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2785 		else /* VF */
2786 			rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2787 		if (rc) {
2788 			BNX2X_ERR("Queue %d setup failed\n", i);
2789 			LOAD_ERROR_EXIT(bp, load_error3);
2790 		}
2791 	}
2792 
2793 	/* setup rss */
2794 	rc = bnx2x_init_rss(bp);
2795 	if (rc) {
2796 		BNX2X_ERR("PF RSS init failed\n");
2797 		LOAD_ERROR_EXIT(bp, load_error3);
2798 	}
2799 
2800 	/* Now when Clients are configured we are ready to work */
2801 	bp->state = BNX2X_STATE_OPEN;
2802 
2803 	/* Configure a ucast MAC */
2804 	if (IS_PF(bp))
2805 		rc = bnx2x_set_eth_mac(bp, true);
2806 	else /* vf */
2807 		rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2808 					   true);
2809 	if (rc) {
2810 		BNX2X_ERR("Setting Ethernet MAC failed\n");
2811 		LOAD_ERROR_EXIT(bp, load_error3);
2812 	}
2813 
2814 	if (IS_PF(bp) && bp->pending_max) {
2815 		bnx2x_update_max_mf_config(bp, bp->pending_max);
2816 		bp->pending_max = 0;
2817 	}
2818 
2819 	if (bp->port.pmf) {
2820 		rc = bnx2x_initial_phy_init(bp, load_mode);
2821 		if (rc)
2822 			LOAD_ERROR_EXIT(bp, load_error3);
2823 	}
2824 	bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2825 
2826 	/* Start fast path */
2827 
2828 	/* Re-configure vlan filters */
2829 	rc = bnx2x_vlan_reconfigure_vid(bp);
2830 	if (rc)
2831 		LOAD_ERROR_EXIT(bp, load_error3);
2832 
2833 	/* Initialize Rx filter. */
2834 	bnx2x_set_rx_mode_inner(bp);
2835 
2836 	if (bp->flags & PTP_SUPPORTED) {
2837 		bnx2x_init_ptp(bp);
2838 		bnx2x_configure_ptp_filters(bp);
2839 	}
2840 	/* Start Tx */
2841 	switch (load_mode) {
2842 	case LOAD_NORMAL:
2843 		/* Tx queue should be only re-enabled */
2844 		netif_tx_wake_all_queues(bp->dev);
2845 		break;
2846 
2847 	case LOAD_OPEN:
2848 		netif_tx_start_all_queues(bp->dev);
2849 		smp_mb__after_atomic();
2850 		break;
2851 
2852 	case LOAD_DIAG:
2853 	case LOAD_LOOPBACK_EXT:
2854 		bp->state = BNX2X_STATE_DIAG;
2855 		break;
2856 
2857 	default:
2858 		break;
2859 	}
2860 
2861 	if (bp->port.pmf)
2862 		bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2863 	else
2864 		bnx2x__link_status_update(bp);
2865 
2866 	/* start the timer */
2867 	mod_timer(&bp->timer, jiffies + bp->current_interval);
2868 
2869 	if (CNIC_ENABLED(bp))
2870 		bnx2x_load_cnic(bp);
2871 
2872 	if (IS_PF(bp))
2873 		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2874 
2875 	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2876 		/* mark driver is loaded in shmem2 */
2877 		u32 val;
2878 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2879 		val &= ~DRV_FLAGS_MTU_MASK;
2880 		val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2881 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2882 			  val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2883 			  DRV_FLAGS_CAPABILITIES_LOADED_L2);
2884 	}
2885 
2886 	/* Wait for all pending SP commands to complete */
2887 	if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2888 		BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2889 		bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2890 		return -EBUSY;
2891 	}
2892 
2893 	/* Update driver data for On-Chip MFW dump. */
2894 	if (IS_PF(bp))
2895 		bnx2x_update_mfw_dump(bp);
2896 
2897 	/* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2898 	if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2899 		bnx2x_dcbx_init(bp, false);
2900 
2901 	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2902 		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2903 
2904 	DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2905 
2906 	return 0;
2907 
2908 #ifndef BNX2X_STOP_ON_ERROR
2909 load_error3:
2910 	if (IS_PF(bp)) {
2911 		bnx2x_int_disable_sync(bp, 1);
2912 
2913 		/* Clean queueable objects */
2914 		bnx2x_squeeze_objects(bp);
2915 	}
2916 
2917 	/* Free SKBs, SGEs, TPA pool and driver internals */
2918 	bnx2x_free_skbs(bp);
2919 	for_each_rx_queue(bp, i)
2920 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2921 
2922 	/* Release IRQs */
2923 	bnx2x_free_irq(bp);
2924 load_error2:
2925 	if (IS_PF(bp) && !BP_NOMCP(bp)) {
2926 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2927 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2928 	}
2929 
2930 	bp->port.pmf = 0;
2931 load_error1:
2932 	bnx2x_napi_disable(bp);
2933 	bnx2x_del_all_napi(bp);
2934 
2935 	/* clear pf_load status, as it was already set */
2936 	if (IS_PF(bp))
2937 		bnx2x_clear_pf_load(bp);
2938 load_error0:
2939 	bnx2x_free_fw_stats_mem(bp);
2940 	bnx2x_free_fp_mem(bp);
2941 	bnx2x_free_mem(bp);
2942 
2943 	return rc;
2944 #endif /* ! BNX2X_STOP_ON_ERROR */
2945 }
2946 
2947 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2948 {
2949 	u8 rc = 0, cos, i;
2950 
2951 	/* Wait until tx fastpath tasks complete */
2952 	for_each_tx_queue(bp, i) {
2953 		struct bnx2x_fastpath *fp = &bp->fp[i];
2954 
2955 		for_each_cos_in_tx_queue(fp, cos)
2956 			rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2957 		if (rc)
2958 			return rc;
2959 	}
2960 	return 0;
2961 }
2962 
2963 /* must be called with rtnl_lock */
2964 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2965 {
2966 	int i;
2967 	bool global = false;
2968 
2969 	DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2970 
2971 	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2972 		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2973 
2974 	/* mark driver is unloaded in shmem2 */
2975 	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2976 		u32 val;
2977 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2978 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2979 			  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2980 	}
2981 
2982 	if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2983 	    (bp->state == BNX2X_STATE_CLOSED ||
2984 	     bp->state == BNX2X_STATE_ERROR)) {
2985 		/* We can get here if the driver has been unloaded
2986 		 * during parity error recovery and is either waiting for a
2987 		 * leader to complete or for other functions to unload and
2988 		 * then ifdown has been issued. In this case we want to
2989 		 * unload and let other functions to complete a recovery
2990 		 * process.
2991 		 */
2992 		bp->recovery_state = BNX2X_RECOVERY_DONE;
2993 		bp->is_leader = 0;
2994 		bnx2x_release_leader_lock(bp);
2995 		smp_mb();
2996 
2997 		DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2998 		BNX2X_ERR("Can't unload in closed or error state\n");
2999 		return -EINVAL;
3000 	}
3001 
3002 	/* Nothing to do during unload if previous bnx2x_nic_load()
3003 	 * have not completed successfully - all resources are released.
3004 	 *
3005 	 * we can get here only after unsuccessful ndo_* callback, during which
3006 	 * dev->IFF_UP flag is still on.
3007 	 */
3008 	if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3009 		return 0;
3010 
3011 	/* It's important to set the bp->state to the value different from
3012 	 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3013 	 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3014 	 */
3015 	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3016 	smp_mb();
3017 
3018 	/* indicate to VFs that the PF is going down */
3019 	bnx2x_iov_channel_down(bp);
3020 
3021 	if (CNIC_LOADED(bp))
3022 		bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3023 
3024 	/* Stop Tx */
3025 	bnx2x_tx_disable(bp);
3026 	netdev_reset_tc(bp->dev);
3027 
3028 	bp->rx_mode = BNX2X_RX_MODE_NONE;
3029 
3030 	del_timer_sync(&bp->timer);
3031 
3032 	if (IS_PF(bp)) {
3033 		/* Set ALWAYS_ALIVE bit in shmem */
3034 		bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3035 		bnx2x_drv_pulse(bp);
3036 		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3037 		bnx2x_save_statistics(bp);
3038 	}
3039 
3040 	/* wait till consumers catch up with producers in all queues.
3041 	 * If we're recovering, FW can't write to host so no reason
3042 	 * to wait for the queues to complete all Tx.
3043 	 */
3044 	if (unload_mode != UNLOAD_RECOVERY)
3045 		bnx2x_drain_tx_queues(bp);
3046 
3047 	/* if VF indicate to PF this function is going down (PF will delete sp
3048 	 * elements and clear initializations
3049 	 */
3050 	if (IS_VF(bp))
3051 		bnx2x_vfpf_close_vf(bp);
3052 	else if (unload_mode != UNLOAD_RECOVERY)
3053 		/* if this is a normal/close unload need to clean up chip*/
3054 		bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3055 	else {
3056 		/* Send the UNLOAD_REQUEST to the MCP */
3057 		bnx2x_send_unload_req(bp, unload_mode);
3058 
3059 		/* Prevent transactions to host from the functions on the
3060 		 * engine that doesn't reset global blocks in case of global
3061 		 * attention once global blocks are reset and gates are opened
3062 		 * (the engine which leader will perform the recovery
3063 		 * last).
3064 		 */
3065 		if (!CHIP_IS_E1x(bp))
3066 			bnx2x_pf_disable(bp);
3067 
3068 		/* Disable HW interrupts, NAPI */
3069 		bnx2x_netif_stop(bp, 1);
3070 		/* Delete all NAPI objects */
3071 		bnx2x_del_all_napi(bp);
3072 		if (CNIC_LOADED(bp))
3073 			bnx2x_del_all_napi_cnic(bp);
3074 		/* Release IRQs */
3075 		bnx2x_free_irq(bp);
3076 
3077 		/* Report UNLOAD_DONE to MCP */
3078 		bnx2x_send_unload_done(bp, false);
3079 	}
3080 
3081 	/*
3082 	 * At this stage no more interrupts will arrive so we may safely clean
3083 	 * the queueable objects here in case they failed to get cleaned so far.
3084 	 */
3085 	if (IS_PF(bp))
3086 		bnx2x_squeeze_objects(bp);
3087 
3088 	/* There should be no more pending SP commands at this stage */
3089 	bp->sp_state = 0;
3090 
3091 	bp->port.pmf = 0;
3092 
3093 	/* clear pending work in rtnl task */
3094 	bp->sp_rtnl_state = 0;
3095 	smp_mb();
3096 
3097 	/* Free SKBs, SGEs, TPA pool and driver internals */
3098 	bnx2x_free_skbs(bp);
3099 	if (CNIC_LOADED(bp))
3100 		bnx2x_free_skbs_cnic(bp);
3101 	for_each_rx_queue(bp, i)
3102 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3103 
3104 	bnx2x_free_fp_mem(bp);
3105 	if (CNIC_LOADED(bp))
3106 		bnx2x_free_fp_mem_cnic(bp);
3107 
3108 	if (IS_PF(bp)) {
3109 		if (CNIC_LOADED(bp))
3110 			bnx2x_free_mem_cnic(bp);
3111 	}
3112 	bnx2x_free_mem(bp);
3113 
3114 	bp->state = BNX2X_STATE_CLOSED;
3115 	bp->cnic_loaded = false;
3116 
3117 	/* Clear driver version indication in shmem */
3118 	if (IS_PF(bp))
3119 		bnx2x_update_mng_version(bp);
3120 
3121 	/* Check if there are pending parity attentions. If there are - set
3122 	 * RECOVERY_IN_PROGRESS.
3123 	 */
3124 	if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3125 		bnx2x_set_reset_in_progress(bp);
3126 
3127 		/* Set RESET_IS_GLOBAL if needed */
3128 		if (global)
3129 			bnx2x_set_reset_global(bp);
3130 	}
3131 
3132 	/* The last driver must disable a "close the gate" if there is no
3133 	 * parity attention or "process kill" pending.
3134 	 */
3135 	if (IS_PF(bp) &&
3136 	    !bnx2x_clear_pf_load(bp) &&
3137 	    bnx2x_reset_is_done(bp, BP_PATH(bp)))
3138 		bnx2x_disable_close_the_gate(bp);
3139 
3140 	DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3141 
3142 	return 0;
3143 }
3144 
3145 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3146 {
3147 	u16 pmcsr;
3148 
3149 	/* If there is no power capability, silently succeed */
3150 	if (!bp->pdev->pm_cap) {
3151 		BNX2X_DEV_INFO("No power capability. Breaking.\n");
3152 		return 0;
3153 	}
3154 
3155 	pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3156 
3157 	switch (state) {
3158 	case PCI_D0:
3159 		pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3160 				      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3161 				       PCI_PM_CTRL_PME_STATUS));
3162 
3163 		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3164 			/* delay required during transition out of D3hot */
3165 			msleep(20);
3166 		break;
3167 
3168 	case PCI_D3hot:
3169 		/* If there are other clients above don't
3170 		   shut down the power */
3171 		if (atomic_read(&bp->pdev->enable_cnt) != 1)
3172 			return 0;
3173 		/* Don't shut down the power for emulation and FPGA */
3174 		if (CHIP_REV_IS_SLOW(bp))
3175 			return 0;
3176 
3177 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3178 		pmcsr |= 3;
3179 
3180 		if (bp->wol)
3181 			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3182 
3183 		pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3184 				      pmcsr);
3185 
3186 		/* No more memory access after this point until
3187 		* device is brought back to D0.
3188 		*/
3189 		break;
3190 
3191 	default:
3192 		dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3193 		return -EINVAL;
3194 	}
3195 	return 0;
3196 }
3197 
3198 /*
3199  * net_device service functions
3200  */
3201 static int bnx2x_poll(struct napi_struct *napi, int budget)
3202 {
3203 	struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3204 						 napi);
3205 	struct bnx2x *bp = fp->bp;
3206 	int rx_work_done;
3207 	u8 cos;
3208 
3209 #ifdef BNX2X_STOP_ON_ERROR
3210 	if (unlikely(bp->panic)) {
3211 		napi_complete(napi);
3212 		return 0;
3213 	}
3214 #endif
3215 	for_each_cos_in_tx_queue(fp, cos)
3216 		if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3217 			bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3218 
3219 	rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3220 
3221 	if (rx_work_done < budget) {
3222 		/* No need to update SB for FCoE L2 ring as long as
3223 		 * it's connected to the default SB and the SB
3224 		 * has been updated when NAPI was scheduled.
3225 		 */
3226 		if (IS_FCOE_FP(fp)) {
3227 			napi_complete_done(napi, rx_work_done);
3228 		} else {
3229 			bnx2x_update_fpsb_idx(fp);
3230 			/* bnx2x_has_rx_work() reads the status block,
3231 			 * thus we need to ensure that status block indices
3232 			 * have been actually read (bnx2x_update_fpsb_idx)
3233 			 * prior to this check (bnx2x_has_rx_work) so that
3234 			 * we won't write the "newer" value of the status block
3235 			 * to IGU (if there was a DMA right after
3236 			 * bnx2x_has_rx_work and if there is no rmb, the memory
3237 			 * reading (bnx2x_update_fpsb_idx) may be postponed
3238 			 * to right before bnx2x_ack_sb). In this case there
3239 			 * will never be another interrupt until there is
3240 			 * another update of the status block, while there
3241 			 * is still unhandled work.
3242 			 */
3243 			rmb();
3244 
3245 			if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3246 				if (napi_complete_done(napi, rx_work_done)) {
3247 					/* Re-enable interrupts */
3248 					DP(NETIF_MSG_RX_STATUS,
3249 					   "Update index to %d\n", fp->fp_hc_idx);
3250 					bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3251 						     le16_to_cpu(fp->fp_hc_idx),
3252 						     IGU_INT_ENABLE, 1);
3253 				}
3254 			} else {
3255 				rx_work_done = budget;
3256 			}
3257 		}
3258 	}
3259 
3260 	return rx_work_done;
3261 }
3262 
3263 /* we split the first BD into headers and data BDs
3264  * to ease the pain of our fellow microcode engineers
3265  * we use one mapping for both BDs
3266  */
3267 static u16 bnx2x_tx_split(struct bnx2x *bp,
3268 			  struct bnx2x_fp_txdata *txdata,
3269 			  struct sw_tx_bd *tx_buf,
3270 			  struct eth_tx_start_bd **tx_bd, u16 hlen,
3271 			  u16 bd_prod)
3272 {
3273 	struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3274 	struct eth_tx_bd *d_tx_bd;
3275 	dma_addr_t mapping;
3276 	int old_len = le16_to_cpu(h_tx_bd->nbytes);
3277 
3278 	/* first fix first BD */
3279 	h_tx_bd->nbytes = cpu_to_le16(hlen);
3280 
3281 	DP(NETIF_MSG_TX_QUEUED,	"TSO split header size is %d (%x:%x)\n",
3282 	   h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3283 
3284 	/* now get a new data BD
3285 	 * (after the pbd) and fill it */
3286 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3287 	d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3288 
3289 	mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3290 			   le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3291 
3292 	d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3293 	d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3294 	d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3295 
3296 	/* this marks the BD as one that has no individual mapping */
3297 	tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3298 
3299 	DP(NETIF_MSG_TX_QUEUED,
3300 	   "TSO split data size is %d (%x:%x)\n",
3301 	   d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3302 
3303 	/* update tx_bd */
3304 	*tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3305 
3306 	return bd_prod;
3307 }
3308 
3309 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3310 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3311 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3312 {
3313 	__sum16 tsum = (__force __sum16) csum;
3314 
3315 	if (fix > 0)
3316 		tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3317 				  csum_partial(t_header - fix, fix, 0)));
3318 
3319 	else if (fix < 0)
3320 		tsum = ~csum_fold(csum_add((__force __wsum) csum,
3321 				  csum_partial(t_header, -fix, 0)));
3322 
3323 	return bswab16(tsum);
3324 }
3325 
3326 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3327 {
3328 	u32 rc;
3329 	__u8 prot = 0;
3330 	__be16 protocol;
3331 
3332 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3333 		return XMIT_PLAIN;
3334 
3335 	protocol = vlan_get_protocol(skb);
3336 	if (protocol == htons(ETH_P_IPV6)) {
3337 		rc = XMIT_CSUM_V6;
3338 		prot = ipv6_hdr(skb)->nexthdr;
3339 	} else {
3340 		rc = XMIT_CSUM_V4;
3341 		prot = ip_hdr(skb)->protocol;
3342 	}
3343 
3344 	if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3345 		if (inner_ip_hdr(skb)->version == 6) {
3346 			rc |= XMIT_CSUM_ENC_V6;
3347 			if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3348 				rc |= XMIT_CSUM_TCP;
3349 		} else {
3350 			rc |= XMIT_CSUM_ENC_V4;
3351 			if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3352 				rc |= XMIT_CSUM_TCP;
3353 		}
3354 	}
3355 	if (prot == IPPROTO_TCP)
3356 		rc |= XMIT_CSUM_TCP;
3357 
3358 	if (skb_is_gso(skb)) {
3359 		if (skb_is_gso_v6(skb)) {
3360 			rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3361 			if (rc & XMIT_CSUM_ENC)
3362 				rc |= XMIT_GSO_ENC_V6;
3363 		} else {
3364 			rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3365 			if (rc & XMIT_CSUM_ENC)
3366 				rc |= XMIT_GSO_ENC_V4;
3367 		}
3368 	}
3369 
3370 	return rc;
3371 }
3372 
3373 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3374 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS         4
3375 
3376 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3377 #define BNX2X_NUM_TSO_WIN_SUB_BDS               3
3378 
3379 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3380 /* check if packet requires linearization (packet is too fragmented)
3381    no need to check fragmentation if page size > 8K (there will be no
3382    violation to FW restrictions) */
3383 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3384 			     u32 xmit_type)
3385 {
3386 	int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3387 	int to_copy = 0, hlen = 0;
3388 
3389 	if (xmit_type & XMIT_GSO_ENC)
3390 		num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3391 
3392 	if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3393 		if (xmit_type & XMIT_GSO) {
3394 			unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3395 			int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3396 			/* Number of windows to check */
3397 			int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3398 			int wnd_idx = 0;
3399 			int frag_idx = 0;
3400 			u32 wnd_sum = 0;
3401 
3402 			/* Headers length */
3403 			if (xmit_type & XMIT_GSO_ENC)
3404 				hlen = (int)(skb_inner_transport_header(skb) -
3405 					     skb->data) +
3406 					     inner_tcp_hdrlen(skb);
3407 			else
3408 				hlen = (int)(skb_transport_header(skb) -
3409 					     skb->data) + tcp_hdrlen(skb);
3410 
3411 			/* Amount of data (w/o headers) on linear part of SKB*/
3412 			first_bd_sz = skb_headlen(skb) - hlen;
3413 
3414 			wnd_sum  = first_bd_sz;
3415 
3416 			/* Calculate the first sum - it's special */
3417 			for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3418 				wnd_sum +=
3419 					skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3420 
3421 			/* If there was data on linear skb data - check it */
3422 			if (first_bd_sz > 0) {
3423 				if (unlikely(wnd_sum < lso_mss)) {
3424 					to_copy = 1;
3425 					goto exit_lbl;
3426 				}
3427 
3428 				wnd_sum -= first_bd_sz;
3429 			}
3430 
3431 			/* Others are easier: run through the frag list and
3432 			   check all windows */
3433 			for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3434 				wnd_sum +=
3435 			  skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3436 
3437 				if (unlikely(wnd_sum < lso_mss)) {
3438 					to_copy = 1;
3439 					break;
3440 				}
3441 				wnd_sum -=
3442 					skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3443 			}
3444 		} else {
3445 			/* in non-LSO too fragmented packet should always
3446 			   be linearized */
3447 			to_copy = 1;
3448 		}
3449 	}
3450 
3451 exit_lbl:
3452 	if (unlikely(to_copy))
3453 		DP(NETIF_MSG_TX_QUEUED,
3454 		   "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3455 		   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3456 		   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3457 
3458 	return to_copy;
3459 }
3460 #endif
3461 
3462 /**
3463  * bnx2x_set_pbd_gso - update PBD in GSO case.
3464  *
3465  * @skb:	packet skb
3466  * @pbd:	parse BD
3467  * @xmit_type:	xmit flags
3468  */
3469 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3470 			      struct eth_tx_parse_bd_e1x *pbd,
3471 			      u32 xmit_type)
3472 {
3473 	pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3474 	pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3475 	pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3476 
3477 	if (xmit_type & XMIT_GSO_V4) {
3478 		pbd->ip_id = bswab16(ip_hdr(skb)->id);
3479 		pbd->tcp_pseudo_csum =
3480 			bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3481 						   ip_hdr(skb)->daddr,
3482 						   0, IPPROTO_TCP, 0));
3483 	} else {
3484 		pbd->tcp_pseudo_csum =
3485 			bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3486 						 &ipv6_hdr(skb)->daddr,
3487 						 0, IPPROTO_TCP, 0));
3488 	}
3489 
3490 	pbd->global_data |=
3491 		cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3492 }
3493 
3494 /**
3495  * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3496  *
3497  * @bp:			driver handle
3498  * @skb:		packet skb
3499  * @parsing_data:	data to be updated
3500  * @xmit_type:		xmit flags
3501  *
3502  * 57712/578xx related, when skb has encapsulation
3503  */
3504 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3505 				 u32 *parsing_data, u32 xmit_type)
3506 {
3507 	*parsing_data |=
3508 		((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3509 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3510 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3511 
3512 	if (xmit_type & XMIT_CSUM_TCP) {
3513 		*parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3514 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3515 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3516 
3517 		return skb_inner_transport_header(skb) +
3518 			inner_tcp_hdrlen(skb) - skb->data;
3519 	}
3520 
3521 	/* We support checksum offload for TCP and UDP only.
3522 	 * No need to pass the UDP header length - it's a constant.
3523 	 */
3524 	return skb_inner_transport_header(skb) +
3525 		sizeof(struct udphdr) - skb->data;
3526 }
3527 
3528 /**
3529  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3530  *
3531  * @bp:			driver handle
3532  * @skb:		packet skb
3533  * @parsing_data:	data to be updated
3534  * @xmit_type:		xmit flags
3535  *
3536  * 57712/578xx related
3537  */
3538 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3539 				u32 *parsing_data, u32 xmit_type)
3540 {
3541 	*parsing_data |=
3542 		((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3543 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3544 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3545 
3546 	if (xmit_type & XMIT_CSUM_TCP) {
3547 		*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3548 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3549 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3550 
3551 		return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3552 	}
3553 	/* We support checksum offload for TCP and UDP only.
3554 	 * No need to pass the UDP header length - it's a constant.
3555 	 */
3556 	return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3557 }
3558 
3559 /* set FW indication according to inner or outer protocols if tunneled */
3560 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3561 			       struct eth_tx_start_bd *tx_start_bd,
3562 			       u32 xmit_type)
3563 {
3564 	tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3565 
3566 	if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3567 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3568 
3569 	if (!(xmit_type & XMIT_CSUM_TCP))
3570 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3571 }
3572 
3573 /**
3574  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3575  *
3576  * @bp:		driver handle
3577  * @skb:	packet skb
3578  * @pbd:	parse BD to be updated
3579  * @xmit_type:	xmit flags
3580  */
3581 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3582 			     struct eth_tx_parse_bd_e1x *pbd,
3583 			     u32 xmit_type)
3584 {
3585 	u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3586 
3587 	/* for now NS flag is not used in Linux */
3588 	pbd->global_data =
3589 		cpu_to_le16(hlen |
3590 			    ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3591 			     ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3592 
3593 	pbd->ip_hlen_w = (skb_transport_header(skb) -
3594 			skb_network_header(skb)) >> 1;
3595 
3596 	hlen += pbd->ip_hlen_w;
3597 
3598 	/* We support checksum offload for TCP and UDP only */
3599 	if (xmit_type & XMIT_CSUM_TCP)
3600 		hlen += tcp_hdrlen(skb) / 2;
3601 	else
3602 		hlen += sizeof(struct udphdr) / 2;
3603 
3604 	pbd->total_hlen_w = cpu_to_le16(hlen);
3605 	hlen = hlen*2;
3606 
3607 	if (xmit_type & XMIT_CSUM_TCP) {
3608 		pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3609 
3610 	} else {
3611 		s8 fix = SKB_CS_OFF(skb); /* signed! */
3612 
3613 		DP(NETIF_MSG_TX_QUEUED,
3614 		   "hlen %d  fix %d  csum before fix %x\n",
3615 		   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3616 
3617 		/* HW bug: fixup the CSUM */
3618 		pbd->tcp_pseudo_csum =
3619 			bnx2x_csum_fix(skb_transport_header(skb),
3620 				       SKB_CS(skb), fix);
3621 
3622 		DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3623 		   pbd->tcp_pseudo_csum);
3624 	}
3625 
3626 	return hlen;
3627 }
3628 
3629 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3630 				      struct eth_tx_parse_bd_e2 *pbd_e2,
3631 				      struct eth_tx_parse_2nd_bd *pbd2,
3632 				      u16 *global_data,
3633 				      u32 xmit_type)
3634 {
3635 	u16 hlen_w = 0;
3636 	u8 outerip_off, outerip_len = 0;
3637 
3638 	/* from outer IP to transport */
3639 	hlen_w = (skb_inner_transport_header(skb) -
3640 		  skb_network_header(skb)) >> 1;
3641 
3642 	/* transport len */
3643 	hlen_w += inner_tcp_hdrlen(skb) >> 1;
3644 
3645 	pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3646 
3647 	/* outer IP header info */
3648 	if (xmit_type & XMIT_CSUM_V4) {
3649 		struct iphdr *iph = ip_hdr(skb);
3650 		u32 csum = (__force u32)(~iph->check) -
3651 			   (__force u32)iph->tot_len -
3652 			   (__force u32)iph->frag_off;
3653 
3654 		outerip_len = iph->ihl << 1;
3655 
3656 		pbd2->fw_ip_csum_wo_len_flags_frag =
3657 			bswab16(csum_fold((__force __wsum)csum));
3658 	} else {
3659 		pbd2->fw_ip_hdr_to_payload_w =
3660 			hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3661 		pbd_e2->data.tunnel_data.flags |=
3662 			ETH_TUNNEL_DATA_IPV6_OUTER;
3663 	}
3664 
3665 	pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3666 
3667 	pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3668 
3669 	/* inner IP header info */
3670 	if (xmit_type & XMIT_CSUM_ENC_V4) {
3671 		pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3672 
3673 		pbd_e2->data.tunnel_data.pseudo_csum =
3674 			bswab16(~csum_tcpudp_magic(
3675 					inner_ip_hdr(skb)->saddr,
3676 					inner_ip_hdr(skb)->daddr,
3677 					0, IPPROTO_TCP, 0));
3678 	} else {
3679 		pbd_e2->data.tunnel_data.pseudo_csum =
3680 			bswab16(~csum_ipv6_magic(
3681 					&inner_ipv6_hdr(skb)->saddr,
3682 					&inner_ipv6_hdr(skb)->daddr,
3683 					0, IPPROTO_TCP, 0));
3684 	}
3685 
3686 	outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3687 
3688 	*global_data |=
3689 		outerip_off |
3690 		(outerip_len <<
3691 			ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3692 		((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3693 			ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3694 
3695 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3696 		SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3697 		pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3698 	}
3699 }
3700 
3701 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3702 					 u32 xmit_type)
3703 {
3704 	struct ipv6hdr *ipv6;
3705 
3706 	if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3707 		return;
3708 
3709 	if (xmit_type & XMIT_GSO_ENC_V6)
3710 		ipv6 = inner_ipv6_hdr(skb);
3711 	else /* XMIT_GSO_V6 */
3712 		ipv6 = ipv6_hdr(skb);
3713 
3714 	if (ipv6->nexthdr == NEXTHDR_IPV6)
3715 		*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3716 }
3717 
3718 /* called with netif_tx_lock
3719  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3720  * netif_wake_queue()
3721  */
3722 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3723 {
3724 	struct bnx2x *bp = netdev_priv(dev);
3725 
3726 	struct netdev_queue *txq;
3727 	struct bnx2x_fp_txdata *txdata;
3728 	struct sw_tx_bd *tx_buf;
3729 	struct eth_tx_start_bd *tx_start_bd, *first_bd;
3730 	struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3731 	struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3732 	struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3733 	struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3734 	u32 pbd_e2_parsing_data = 0;
3735 	u16 pkt_prod, bd_prod;
3736 	int nbd, txq_index;
3737 	dma_addr_t mapping;
3738 	u32 xmit_type = bnx2x_xmit_type(bp, skb);
3739 	int i;
3740 	u8 hlen = 0;
3741 	__le16 pkt_size = 0;
3742 	struct ethhdr *eth;
3743 	u8 mac_type = UNICAST_ADDRESS;
3744 
3745 #ifdef BNX2X_STOP_ON_ERROR
3746 	if (unlikely(bp->panic))
3747 		return NETDEV_TX_BUSY;
3748 #endif
3749 
3750 	txq_index = skb_get_queue_mapping(skb);
3751 	txq = netdev_get_tx_queue(dev, txq_index);
3752 
3753 	BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3754 
3755 	txdata = &bp->bnx2x_txq[txq_index];
3756 
3757 	/* enable this debug print to view the transmission queue being used
3758 	DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3759 	   txq_index, fp_index, txdata_index); */
3760 
3761 	/* enable this debug print to view the transmission details
3762 	DP(NETIF_MSG_TX_QUEUED,
3763 	   "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3764 	   txdata->cid, fp_index, txdata_index, txdata, fp); */
3765 
3766 	if (unlikely(bnx2x_tx_avail(bp, txdata) <
3767 			skb_shinfo(skb)->nr_frags +
3768 			BDS_PER_TX_PKT +
3769 			NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3770 		/* Handle special storage cases separately */
3771 		if (txdata->tx_ring_size == 0) {
3772 			struct bnx2x_eth_q_stats *q_stats =
3773 				bnx2x_fp_qstats(bp, txdata->parent_fp);
3774 			q_stats->driver_filtered_tx_pkt++;
3775 			dev_kfree_skb(skb);
3776 			return NETDEV_TX_OK;
3777 		}
3778 		bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3779 		netif_tx_stop_queue(txq);
3780 		BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3781 
3782 		return NETDEV_TX_BUSY;
3783 	}
3784 
3785 	DP(NETIF_MSG_TX_QUEUED,
3786 	   "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3787 	   txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3788 	   ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3789 	   skb->len);
3790 
3791 	eth = (struct ethhdr *)skb->data;
3792 
3793 	/* set flag according to packet type (UNICAST_ADDRESS is default)*/
3794 	if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3795 		if (is_broadcast_ether_addr(eth->h_dest))
3796 			mac_type = BROADCAST_ADDRESS;
3797 		else
3798 			mac_type = MULTICAST_ADDRESS;
3799 	}
3800 
3801 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3802 	/* First, check if we need to linearize the skb (due to FW
3803 	   restrictions). No need to check fragmentation if page size > 8K
3804 	   (there will be no violation to FW restrictions) */
3805 	if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3806 		/* Statistics of linearization */
3807 		bp->lin_cnt++;
3808 		if (skb_linearize(skb) != 0) {
3809 			DP(NETIF_MSG_TX_QUEUED,
3810 			   "SKB linearization failed - silently dropping this SKB\n");
3811 			dev_kfree_skb_any(skb);
3812 			return NETDEV_TX_OK;
3813 		}
3814 	}
3815 #endif
3816 	/* Map skb linear data for DMA */
3817 	mapping = dma_map_single(&bp->pdev->dev, skb->data,
3818 				 skb_headlen(skb), DMA_TO_DEVICE);
3819 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3820 		DP(NETIF_MSG_TX_QUEUED,
3821 		   "SKB mapping failed - silently dropping this SKB\n");
3822 		dev_kfree_skb_any(skb);
3823 		return NETDEV_TX_OK;
3824 	}
3825 	/*
3826 	Please read carefully. First we use one BD which we mark as start,
3827 	then we have a parsing info BD (used for TSO or xsum),
3828 	and only then we have the rest of the TSO BDs.
3829 	(don't forget to mark the last one as last,
3830 	and to unmap only AFTER you write to the BD ...)
3831 	And above all, all pdb sizes are in words - NOT DWORDS!
3832 	*/
3833 
3834 	/* get current pkt produced now - advance it just before sending packet
3835 	 * since mapping of pages may fail and cause packet to be dropped
3836 	 */
3837 	pkt_prod = txdata->tx_pkt_prod;
3838 	bd_prod = TX_BD(txdata->tx_bd_prod);
3839 
3840 	/* get a tx_buf and first BD
3841 	 * tx_start_bd may be changed during SPLIT,
3842 	 * but first_bd will always stay first
3843 	 */
3844 	tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3845 	tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3846 	first_bd = tx_start_bd;
3847 
3848 	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3849 
3850 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3851 		if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3852 			BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3853 		} else if (bp->ptp_tx_skb) {
3854 			BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3855 		} else {
3856 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3857 			/* schedule check for Tx timestamp */
3858 			bp->ptp_tx_skb = skb_get(skb);
3859 			bp->ptp_tx_start = jiffies;
3860 			schedule_work(&bp->ptp_task);
3861 		}
3862 	}
3863 
3864 	/* header nbd: indirectly zero other flags! */
3865 	tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3866 
3867 	/* remember the first BD of the packet */
3868 	tx_buf->first_bd = txdata->tx_bd_prod;
3869 	tx_buf->skb = skb;
3870 	tx_buf->flags = 0;
3871 
3872 	DP(NETIF_MSG_TX_QUEUED,
3873 	   "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3874 	   pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3875 
3876 	if (skb_vlan_tag_present(skb)) {
3877 		tx_start_bd->vlan_or_ethertype =
3878 		    cpu_to_le16(skb_vlan_tag_get(skb));
3879 		tx_start_bd->bd_flags.as_bitfield |=
3880 		    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3881 	} else {
3882 		/* when transmitting in a vf, start bd must hold the ethertype
3883 		 * for fw to enforce it
3884 		 */
3885 #ifndef BNX2X_STOP_ON_ERROR
3886 		if (IS_VF(bp))
3887 #endif
3888 			tx_start_bd->vlan_or_ethertype =
3889 				cpu_to_le16(ntohs(eth->h_proto));
3890 #ifndef BNX2X_STOP_ON_ERROR
3891 		else
3892 			/* used by FW for packet accounting */
3893 			tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3894 #endif
3895 	}
3896 
3897 	nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3898 
3899 	/* turn on parsing and get a BD */
3900 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3901 
3902 	if (xmit_type & XMIT_CSUM)
3903 		bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3904 
3905 	if (!CHIP_IS_E1x(bp)) {
3906 		pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3907 		memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3908 
3909 		if (xmit_type & XMIT_CSUM_ENC) {
3910 			u16 global_data = 0;
3911 
3912 			/* Set PBD in enc checksum offload case */
3913 			hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3914 						      &pbd_e2_parsing_data,
3915 						      xmit_type);
3916 
3917 			/* turn on 2nd parsing and get a BD */
3918 			bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3919 
3920 			pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3921 
3922 			memset(pbd2, 0, sizeof(*pbd2));
3923 
3924 			pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3925 				(skb_inner_network_header(skb) -
3926 				 skb->data) >> 1;
3927 
3928 			if (xmit_type & XMIT_GSO_ENC)
3929 				bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3930 							  &global_data,
3931 							  xmit_type);
3932 
3933 			pbd2->global_data = cpu_to_le16(global_data);
3934 
3935 			/* add addition parse BD indication to start BD */
3936 			SET_FLAG(tx_start_bd->general_data,
3937 				 ETH_TX_START_BD_PARSE_NBDS, 1);
3938 			/* set encapsulation flag in start BD */
3939 			SET_FLAG(tx_start_bd->general_data,
3940 				 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3941 
3942 			tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3943 
3944 			nbd++;
3945 		} else if (xmit_type & XMIT_CSUM) {
3946 			/* Set PBD in checksum offload case w/o encapsulation */
3947 			hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3948 						     &pbd_e2_parsing_data,
3949 						     xmit_type);
3950 		}
3951 
3952 		bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3953 		/* Add the macs to the parsing BD if this is a vf or if
3954 		 * Tx Switching is enabled.
3955 		 */
3956 		if (IS_VF(bp)) {
3957 			/* override GRE parameters in BD */
3958 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3959 					      &pbd_e2->data.mac_addr.src_mid,
3960 					      &pbd_e2->data.mac_addr.src_lo,
3961 					      eth->h_source);
3962 
3963 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3964 					      &pbd_e2->data.mac_addr.dst_mid,
3965 					      &pbd_e2->data.mac_addr.dst_lo,
3966 					      eth->h_dest);
3967 		} else {
3968 			if (bp->flags & TX_SWITCHING)
3969 				bnx2x_set_fw_mac_addr(
3970 						&pbd_e2->data.mac_addr.dst_hi,
3971 						&pbd_e2->data.mac_addr.dst_mid,
3972 						&pbd_e2->data.mac_addr.dst_lo,
3973 						eth->h_dest);
3974 #ifdef BNX2X_STOP_ON_ERROR
3975 			/* Enforce security is always set in Stop on Error -
3976 			 * source mac should be present in the parsing BD
3977 			 */
3978 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3979 					      &pbd_e2->data.mac_addr.src_mid,
3980 					      &pbd_e2->data.mac_addr.src_lo,
3981 					      eth->h_source);
3982 #endif
3983 		}
3984 
3985 		SET_FLAG(pbd_e2_parsing_data,
3986 			 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3987 	} else {
3988 		u16 global_data = 0;
3989 		pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3990 		memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3991 		/* Set PBD in checksum offload case */
3992 		if (xmit_type & XMIT_CSUM)
3993 			hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3994 
3995 		SET_FLAG(global_data,
3996 			 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3997 		pbd_e1x->global_data |= cpu_to_le16(global_data);
3998 	}
3999 
4000 	/* Setup the data pointer of the first BD of the packet */
4001 	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4002 	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4003 	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4004 	pkt_size = tx_start_bd->nbytes;
4005 
4006 	DP(NETIF_MSG_TX_QUEUED,
4007 	   "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
4008 	   tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4009 	   le16_to_cpu(tx_start_bd->nbytes),
4010 	   tx_start_bd->bd_flags.as_bitfield,
4011 	   le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4012 
4013 	if (xmit_type & XMIT_GSO) {
4014 
4015 		DP(NETIF_MSG_TX_QUEUED,
4016 		   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
4017 		   skb->len, hlen, skb_headlen(skb),
4018 		   skb_shinfo(skb)->gso_size);
4019 
4020 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4021 
4022 		if (unlikely(skb_headlen(skb) > hlen)) {
4023 			nbd++;
4024 			bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4025 						 &tx_start_bd, hlen,
4026 						 bd_prod);
4027 		}
4028 		if (!CHIP_IS_E1x(bp))
4029 			pbd_e2_parsing_data |=
4030 				(skb_shinfo(skb)->gso_size <<
4031 				 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4032 				 ETH_TX_PARSE_BD_E2_LSO_MSS;
4033 		else
4034 			bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4035 	}
4036 
4037 	/* Set the PBD's parsing_data field if not zero
4038 	 * (for the chips newer than 57711).
4039 	 */
4040 	if (pbd_e2_parsing_data)
4041 		pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4042 
4043 	tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4044 
4045 	/* Handle fragmented skb */
4046 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4047 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4048 
4049 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4050 					   skb_frag_size(frag), DMA_TO_DEVICE);
4051 		if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4052 			unsigned int pkts_compl = 0, bytes_compl = 0;
4053 
4054 			DP(NETIF_MSG_TX_QUEUED,
4055 			   "Unable to map page - dropping packet...\n");
4056 
4057 			/* we need unmap all buffers already mapped
4058 			 * for this SKB;
4059 			 * first_bd->nbd need to be properly updated
4060 			 * before call to bnx2x_free_tx_pkt
4061 			 */
4062 			first_bd->nbd = cpu_to_le16(nbd);
4063 			bnx2x_free_tx_pkt(bp, txdata,
4064 					  TX_BD(txdata->tx_pkt_prod),
4065 					  &pkts_compl, &bytes_compl);
4066 			return NETDEV_TX_OK;
4067 		}
4068 
4069 		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4070 		tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4071 		if (total_pkt_bd == NULL)
4072 			total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4073 
4074 		tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4075 		tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4076 		tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4077 		le16_add_cpu(&pkt_size, skb_frag_size(frag));
4078 		nbd++;
4079 
4080 		DP(NETIF_MSG_TX_QUEUED,
4081 		   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4082 		   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4083 		   le16_to_cpu(tx_data_bd->nbytes));
4084 	}
4085 
4086 	DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4087 
4088 	/* update with actual num BDs */
4089 	first_bd->nbd = cpu_to_le16(nbd);
4090 
4091 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4092 
4093 	/* now send a tx doorbell, counting the next BD
4094 	 * if the packet contains or ends with it
4095 	 */
4096 	if (TX_BD_POFF(bd_prod) < nbd)
4097 		nbd++;
4098 
4099 	/* total_pkt_bytes should be set on the first data BD if
4100 	 * it's not an LSO packet and there is more than one
4101 	 * data BD. In this case pkt_size is limited by an MTU value.
4102 	 * However we prefer to set it for an LSO packet (while we don't
4103 	 * have to) in order to save some CPU cycles in a none-LSO
4104 	 * case, when we much more care about them.
4105 	 */
4106 	if (total_pkt_bd != NULL)
4107 		total_pkt_bd->total_pkt_bytes = pkt_size;
4108 
4109 	if (pbd_e1x)
4110 		DP(NETIF_MSG_TX_QUEUED,
4111 		   "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4112 		   pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4113 		   pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4114 		   pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4115 		    le16_to_cpu(pbd_e1x->total_hlen_w));
4116 	if (pbd_e2)
4117 		DP(NETIF_MSG_TX_QUEUED,
4118 		   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4119 		   pbd_e2,
4120 		   pbd_e2->data.mac_addr.dst_hi,
4121 		   pbd_e2->data.mac_addr.dst_mid,
4122 		   pbd_e2->data.mac_addr.dst_lo,
4123 		   pbd_e2->data.mac_addr.src_hi,
4124 		   pbd_e2->data.mac_addr.src_mid,
4125 		   pbd_e2->data.mac_addr.src_lo,
4126 		   pbd_e2->parsing_data);
4127 	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4128 
4129 	netdev_tx_sent_queue(txq, skb->len);
4130 
4131 	skb_tx_timestamp(skb);
4132 
4133 	txdata->tx_pkt_prod++;
4134 	/*
4135 	 * Make sure that the BD data is updated before updating the producer
4136 	 * since FW might read the BD right after the producer is updated.
4137 	 * This is only applicable for weak-ordered memory model archs such
4138 	 * as IA-64. The following barrier is also mandatory since FW will
4139 	 * assumes packets must have BDs.
4140 	 */
4141 	wmb();
4142 
4143 	txdata->tx_db.data.prod += nbd;
4144 	barrier();
4145 
4146 	DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4147 
4148 	mmiowb();
4149 
4150 	txdata->tx_bd_prod += nbd;
4151 
4152 	if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4153 		netif_tx_stop_queue(txq);
4154 
4155 		/* paired memory barrier is in bnx2x_tx_int(), we have to keep
4156 		 * ordering of set_bit() in netif_tx_stop_queue() and read of
4157 		 * fp->bd_tx_cons */
4158 		smp_mb();
4159 
4160 		bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4161 		if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4162 			netif_tx_wake_queue(txq);
4163 	}
4164 	txdata->tx_pkt++;
4165 
4166 	return NETDEV_TX_OK;
4167 }
4168 
4169 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4170 {
4171 	int mfw_vn = BP_FW_MB_IDX(bp);
4172 	u32 tmp;
4173 
4174 	/* If the shmem shouldn't affect configuration, reflect */
4175 	if (!IS_MF_BD(bp)) {
4176 		int i;
4177 
4178 		for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4179 			c2s_map[i] = i;
4180 		*c2s_default = 0;
4181 
4182 		return;
4183 	}
4184 
4185 	tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4186 	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4187 	c2s_map[0] = tmp & 0xff;
4188 	c2s_map[1] = (tmp >> 8) & 0xff;
4189 	c2s_map[2] = (tmp >> 16) & 0xff;
4190 	c2s_map[3] = (tmp >> 24) & 0xff;
4191 
4192 	tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4193 	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4194 	c2s_map[4] = tmp & 0xff;
4195 	c2s_map[5] = (tmp >> 8) & 0xff;
4196 	c2s_map[6] = (tmp >> 16) & 0xff;
4197 	c2s_map[7] = (tmp >> 24) & 0xff;
4198 
4199 	tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4200 	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4201 	*c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4202 }
4203 
4204 /**
4205  * bnx2x_setup_tc - routine to configure net_device for multi tc
4206  *
4207  * @netdev: net device to configure
4208  * @tc: number of traffic classes to enable
4209  *
4210  * callback connected to the ndo_setup_tc function pointer
4211  */
4212 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4213 {
4214 	struct bnx2x *bp = netdev_priv(dev);
4215 	u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4216 	int cos, prio, count, offset;
4217 
4218 	/* setup tc must be called under rtnl lock */
4219 	ASSERT_RTNL();
4220 
4221 	/* no traffic classes requested. Aborting */
4222 	if (!num_tc) {
4223 		netdev_reset_tc(dev);
4224 		return 0;
4225 	}
4226 
4227 	/* requested to support too many traffic classes */
4228 	if (num_tc > bp->max_cos) {
4229 		BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4230 			  num_tc, bp->max_cos);
4231 		return -EINVAL;
4232 	}
4233 
4234 	/* declare amount of supported traffic classes */
4235 	if (netdev_set_num_tc(dev, num_tc)) {
4236 		BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4237 		return -EINVAL;
4238 	}
4239 
4240 	bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4241 
4242 	/* configure priority to traffic class mapping */
4243 	for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4244 		int outer_prio = c2s_map[prio];
4245 
4246 		netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4247 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4248 		   "mapping priority %d to tc %d\n",
4249 		   outer_prio, bp->prio_to_cos[outer_prio]);
4250 	}
4251 
4252 	/* Use this configuration to differentiate tc0 from other COSes
4253 	   This can be used for ets or pfc, and save the effort of setting
4254 	   up a multio class queue disc or negotiating DCBX with a switch
4255 	netdev_set_prio_tc_map(dev, 0, 0);
4256 	DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4257 	for (prio = 1; prio < 16; prio++) {
4258 		netdev_set_prio_tc_map(dev, prio, 1);
4259 		DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4260 	} */
4261 
4262 	/* configure traffic class to transmission queue mapping */
4263 	for (cos = 0; cos < bp->max_cos; cos++) {
4264 		count = BNX2X_NUM_ETH_QUEUES(bp);
4265 		offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4266 		netdev_set_tc_queue(dev, cos, count, offset);
4267 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4268 		   "mapping tc %d to offset %d count %d\n",
4269 		   cos, offset, count);
4270 	}
4271 
4272 	return 0;
4273 }
4274 
4275 int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
4276 		     struct tc_to_netdev *tc)
4277 {
4278 	if (tc->type != TC_SETUP_MQPRIO)
4279 		return -EINVAL;
4280 	return bnx2x_setup_tc(dev, tc->tc);
4281 }
4282 
4283 /* called with rtnl_lock */
4284 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4285 {
4286 	struct sockaddr *addr = p;
4287 	struct bnx2x *bp = netdev_priv(dev);
4288 	int rc = 0;
4289 
4290 	if (!is_valid_ether_addr(addr->sa_data)) {
4291 		BNX2X_ERR("Requested MAC address is not valid\n");
4292 		return -EINVAL;
4293 	}
4294 
4295 	if (IS_MF_STORAGE_ONLY(bp)) {
4296 		BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4297 		return -EINVAL;
4298 	}
4299 
4300 	if (netif_running(dev))  {
4301 		rc = bnx2x_set_eth_mac(bp, false);
4302 		if (rc)
4303 			return rc;
4304 	}
4305 
4306 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4307 
4308 	if (netif_running(dev))
4309 		rc = bnx2x_set_eth_mac(bp, true);
4310 
4311 	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4312 		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4313 
4314 	return rc;
4315 }
4316 
4317 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4318 {
4319 	union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4320 	struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4321 	u8 cos;
4322 
4323 	/* Common */
4324 
4325 	if (IS_FCOE_IDX(fp_index)) {
4326 		memset(sb, 0, sizeof(union host_hc_status_block));
4327 		fp->status_blk_mapping = 0;
4328 	} else {
4329 		/* status blocks */
4330 		if (!CHIP_IS_E1x(bp))
4331 			BNX2X_PCI_FREE(sb->e2_sb,
4332 				       bnx2x_fp(bp, fp_index,
4333 						status_blk_mapping),
4334 				       sizeof(struct host_hc_status_block_e2));
4335 		else
4336 			BNX2X_PCI_FREE(sb->e1x_sb,
4337 				       bnx2x_fp(bp, fp_index,
4338 						status_blk_mapping),
4339 				       sizeof(struct host_hc_status_block_e1x));
4340 	}
4341 
4342 	/* Rx */
4343 	if (!skip_rx_queue(bp, fp_index)) {
4344 		bnx2x_free_rx_bds(fp);
4345 
4346 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
4347 		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4348 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4349 			       bnx2x_fp(bp, fp_index, rx_desc_mapping),
4350 			       sizeof(struct eth_rx_bd) * NUM_RX_BD);
4351 
4352 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4353 			       bnx2x_fp(bp, fp_index, rx_comp_mapping),
4354 			       sizeof(struct eth_fast_path_rx_cqe) *
4355 			       NUM_RCQ_BD);
4356 
4357 		/* SGE ring */
4358 		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4359 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4360 			       bnx2x_fp(bp, fp_index, rx_sge_mapping),
4361 			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4362 	}
4363 
4364 	/* Tx */
4365 	if (!skip_tx_queue(bp, fp_index)) {
4366 		/* fastpath tx rings: tx_buf tx_desc */
4367 		for_each_cos_in_tx_queue(fp, cos) {
4368 			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4369 
4370 			DP(NETIF_MSG_IFDOWN,
4371 			   "freeing tx memory of fp %d cos %d cid %d\n",
4372 			   fp_index, cos, txdata->cid);
4373 
4374 			BNX2X_FREE(txdata->tx_buf_ring);
4375 			BNX2X_PCI_FREE(txdata->tx_desc_ring,
4376 				txdata->tx_desc_mapping,
4377 				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4378 		}
4379 	}
4380 	/* end of fastpath */
4381 }
4382 
4383 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4384 {
4385 	int i;
4386 	for_each_cnic_queue(bp, i)
4387 		bnx2x_free_fp_mem_at(bp, i);
4388 }
4389 
4390 void bnx2x_free_fp_mem(struct bnx2x *bp)
4391 {
4392 	int i;
4393 	for_each_eth_queue(bp, i)
4394 		bnx2x_free_fp_mem_at(bp, i);
4395 }
4396 
4397 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4398 {
4399 	union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4400 	if (!CHIP_IS_E1x(bp)) {
4401 		bnx2x_fp(bp, index, sb_index_values) =
4402 			(__le16 *)status_blk.e2_sb->sb.index_values;
4403 		bnx2x_fp(bp, index, sb_running_index) =
4404 			(__le16 *)status_blk.e2_sb->sb.running_index;
4405 	} else {
4406 		bnx2x_fp(bp, index, sb_index_values) =
4407 			(__le16 *)status_blk.e1x_sb->sb.index_values;
4408 		bnx2x_fp(bp, index, sb_running_index) =
4409 			(__le16 *)status_blk.e1x_sb->sb.running_index;
4410 	}
4411 }
4412 
4413 /* Returns the number of actually allocated BDs */
4414 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4415 			      int rx_ring_size)
4416 {
4417 	struct bnx2x *bp = fp->bp;
4418 	u16 ring_prod, cqe_ring_prod;
4419 	int i, failure_cnt = 0;
4420 
4421 	fp->rx_comp_cons = 0;
4422 	cqe_ring_prod = ring_prod = 0;
4423 
4424 	/* This routine is called only during fo init so
4425 	 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4426 	 */
4427 	for (i = 0; i < rx_ring_size; i++) {
4428 		if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4429 			failure_cnt++;
4430 			continue;
4431 		}
4432 		ring_prod = NEXT_RX_IDX(ring_prod);
4433 		cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4434 		WARN_ON(ring_prod <= (i - failure_cnt));
4435 	}
4436 
4437 	if (failure_cnt)
4438 		BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4439 			  i - failure_cnt, fp->index);
4440 
4441 	fp->rx_bd_prod = ring_prod;
4442 	/* Limit the CQE producer by the CQE ring size */
4443 	fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4444 			       cqe_ring_prod);
4445 
4446 	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4447 
4448 	return i - failure_cnt;
4449 }
4450 
4451 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4452 {
4453 	int i;
4454 
4455 	for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4456 		struct eth_rx_cqe_next_page *nextpg;
4457 
4458 		nextpg = (struct eth_rx_cqe_next_page *)
4459 			&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4460 		nextpg->addr_hi =
4461 			cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4462 				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4463 		nextpg->addr_lo =
4464 			cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4465 				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4466 	}
4467 }
4468 
4469 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4470 {
4471 	union host_hc_status_block *sb;
4472 	struct bnx2x_fastpath *fp = &bp->fp[index];
4473 	int ring_size = 0;
4474 	u8 cos;
4475 	int rx_ring_size = 0;
4476 
4477 	if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4478 		rx_ring_size = MIN_RX_SIZE_NONTPA;
4479 		bp->rx_ring_size = rx_ring_size;
4480 	} else if (!bp->rx_ring_size) {
4481 		rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4482 
4483 		if (CHIP_IS_E3(bp)) {
4484 			u32 cfg = SHMEM_RD(bp,
4485 					   dev_info.port_hw_config[BP_PORT(bp)].
4486 					   default_cfg);
4487 
4488 			/* Decrease ring size for 1G functions */
4489 			if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4490 			    PORT_HW_CFG_NET_SERDES_IF_SGMII)
4491 				rx_ring_size /= 10;
4492 		}
4493 
4494 		/* allocate at least number of buffers required by FW */
4495 		rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4496 				     MIN_RX_SIZE_TPA, rx_ring_size);
4497 
4498 		bp->rx_ring_size = rx_ring_size;
4499 	} else /* if rx_ring_size specified - use it */
4500 		rx_ring_size = bp->rx_ring_size;
4501 
4502 	DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4503 
4504 	/* Common */
4505 	sb = &bnx2x_fp(bp, index, status_blk);
4506 
4507 	if (!IS_FCOE_IDX(index)) {
4508 		/* status blocks */
4509 		if (!CHIP_IS_E1x(bp)) {
4510 			sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4511 						    sizeof(struct host_hc_status_block_e2));
4512 			if (!sb->e2_sb)
4513 				goto alloc_mem_err;
4514 		} else {
4515 			sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4516 						     sizeof(struct host_hc_status_block_e1x));
4517 			if (!sb->e1x_sb)
4518 				goto alloc_mem_err;
4519 		}
4520 	}
4521 
4522 	/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4523 	 * set shortcuts for it.
4524 	 */
4525 	if (!IS_FCOE_IDX(index))
4526 		set_sb_shortcuts(bp, index);
4527 
4528 	/* Tx */
4529 	if (!skip_tx_queue(bp, index)) {
4530 		/* fastpath tx rings: tx_buf tx_desc */
4531 		for_each_cos_in_tx_queue(fp, cos) {
4532 			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4533 
4534 			DP(NETIF_MSG_IFUP,
4535 			   "allocating tx memory of fp %d cos %d\n",
4536 			   index, cos);
4537 
4538 			txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4539 						      sizeof(struct sw_tx_bd),
4540 						      GFP_KERNEL);
4541 			if (!txdata->tx_buf_ring)
4542 				goto alloc_mem_err;
4543 			txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4544 							       sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4545 			if (!txdata->tx_desc_ring)
4546 				goto alloc_mem_err;
4547 		}
4548 	}
4549 
4550 	/* Rx */
4551 	if (!skip_rx_queue(bp, index)) {
4552 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
4553 		bnx2x_fp(bp, index, rx_buf_ring) =
4554 			kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4555 		if (!bnx2x_fp(bp, index, rx_buf_ring))
4556 			goto alloc_mem_err;
4557 		bnx2x_fp(bp, index, rx_desc_ring) =
4558 			BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4559 					sizeof(struct eth_rx_bd) * NUM_RX_BD);
4560 		if (!bnx2x_fp(bp, index, rx_desc_ring))
4561 			goto alloc_mem_err;
4562 
4563 		/* Seed all CQEs by 1s */
4564 		bnx2x_fp(bp, index, rx_comp_ring) =
4565 			BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4566 					 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4567 		if (!bnx2x_fp(bp, index, rx_comp_ring))
4568 			goto alloc_mem_err;
4569 
4570 		/* SGE ring */
4571 		bnx2x_fp(bp, index, rx_page_ring) =
4572 			kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4573 				GFP_KERNEL);
4574 		if (!bnx2x_fp(bp, index, rx_page_ring))
4575 			goto alloc_mem_err;
4576 		bnx2x_fp(bp, index, rx_sge_ring) =
4577 			BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4578 					BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4579 		if (!bnx2x_fp(bp, index, rx_sge_ring))
4580 			goto alloc_mem_err;
4581 		/* RX BD ring */
4582 		bnx2x_set_next_page_rx_bd(fp);
4583 
4584 		/* CQ ring */
4585 		bnx2x_set_next_page_rx_cq(fp);
4586 
4587 		/* BDs */
4588 		ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4589 		if (ring_size < rx_ring_size)
4590 			goto alloc_mem_err;
4591 	}
4592 
4593 	return 0;
4594 
4595 /* handles low memory cases */
4596 alloc_mem_err:
4597 	BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4598 						index, ring_size);
4599 	/* FW will drop all packets if queue is not big enough,
4600 	 * In these cases we disable the queue
4601 	 * Min size is different for OOO, TPA and non-TPA queues
4602 	 */
4603 	if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4604 				MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4605 			/* release memory allocated for this queue */
4606 			bnx2x_free_fp_mem_at(bp, index);
4607 			return -ENOMEM;
4608 	}
4609 	return 0;
4610 }
4611 
4612 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4613 {
4614 	if (!NO_FCOE(bp))
4615 		/* FCoE */
4616 		if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4617 			/* we will fail load process instead of mark
4618 			 * NO_FCOE_FLAG
4619 			 */
4620 			return -ENOMEM;
4621 
4622 	return 0;
4623 }
4624 
4625 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4626 {
4627 	int i;
4628 
4629 	/* 1. Allocate FP for leading - fatal if error
4630 	 * 2. Allocate RSS - fix number of queues if error
4631 	 */
4632 
4633 	/* leading */
4634 	if (bnx2x_alloc_fp_mem_at(bp, 0))
4635 		return -ENOMEM;
4636 
4637 	/* RSS */
4638 	for_each_nondefault_eth_queue(bp, i)
4639 		if (bnx2x_alloc_fp_mem_at(bp, i))
4640 			break;
4641 
4642 	/* handle memory failures */
4643 	if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4644 		int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4645 
4646 		WARN_ON(delta < 0);
4647 		bnx2x_shrink_eth_fp(bp, delta);
4648 		if (CNIC_SUPPORT(bp))
4649 			/* move non eth FPs next to last eth FP
4650 			 * must be done in that order
4651 			 * FCOE_IDX < FWD_IDX < OOO_IDX
4652 			 */
4653 
4654 			/* move FCoE fp even NO_FCOE_FLAG is on */
4655 			bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4656 		bp->num_ethernet_queues -= delta;
4657 		bp->num_queues = bp->num_ethernet_queues +
4658 				 bp->num_cnic_queues;
4659 		BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4660 			  bp->num_queues + delta, bp->num_queues);
4661 	}
4662 
4663 	return 0;
4664 }
4665 
4666 void bnx2x_free_mem_bp(struct bnx2x *bp)
4667 {
4668 	int i;
4669 
4670 	for (i = 0; i < bp->fp_array_size; i++)
4671 		kfree(bp->fp[i].tpa_info);
4672 	kfree(bp->fp);
4673 	kfree(bp->sp_objs);
4674 	kfree(bp->fp_stats);
4675 	kfree(bp->bnx2x_txq);
4676 	kfree(bp->msix_table);
4677 	kfree(bp->ilt);
4678 }
4679 
4680 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4681 {
4682 	struct bnx2x_fastpath *fp;
4683 	struct msix_entry *tbl;
4684 	struct bnx2x_ilt *ilt;
4685 	int msix_table_size = 0;
4686 	int fp_array_size, txq_array_size;
4687 	int i;
4688 
4689 	/*
4690 	 * The biggest MSI-X table we might need is as a maximum number of fast
4691 	 * path IGU SBs plus default SB (for PF only).
4692 	 */
4693 	msix_table_size = bp->igu_sb_cnt;
4694 	if (IS_PF(bp))
4695 		msix_table_size++;
4696 	BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4697 
4698 	/* fp array: RSS plus CNIC related L2 queues */
4699 	fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4700 	bp->fp_array_size = fp_array_size;
4701 	BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4702 
4703 	fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4704 	if (!fp)
4705 		goto alloc_err;
4706 	for (i = 0; i < bp->fp_array_size; i++) {
4707 		fp[i].tpa_info =
4708 			kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4709 				sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4710 		if (!(fp[i].tpa_info))
4711 			goto alloc_err;
4712 	}
4713 
4714 	bp->fp = fp;
4715 
4716 	/* allocate sp objs */
4717 	bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4718 			      GFP_KERNEL);
4719 	if (!bp->sp_objs)
4720 		goto alloc_err;
4721 
4722 	/* allocate fp_stats */
4723 	bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4724 			       GFP_KERNEL);
4725 	if (!bp->fp_stats)
4726 		goto alloc_err;
4727 
4728 	/* Allocate memory for the transmission queues array */
4729 	txq_array_size =
4730 		BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4731 	BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4732 
4733 	bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4734 				GFP_KERNEL);
4735 	if (!bp->bnx2x_txq)
4736 		goto alloc_err;
4737 
4738 	/* msix table */
4739 	tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4740 	if (!tbl)
4741 		goto alloc_err;
4742 	bp->msix_table = tbl;
4743 
4744 	/* ilt */
4745 	ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4746 	if (!ilt)
4747 		goto alloc_err;
4748 	bp->ilt = ilt;
4749 
4750 	return 0;
4751 alloc_err:
4752 	bnx2x_free_mem_bp(bp);
4753 	return -ENOMEM;
4754 }
4755 
4756 int bnx2x_reload_if_running(struct net_device *dev)
4757 {
4758 	struct bnx2x *bp = netdev_priv(dev);
4759 
4760 	if (unlikely(!netif_running(dev)))
4761 		return 0;
4762 
4763 	bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4764 	return bnx2x_nic_load(bp, LOAD_NORMAL);
4765 }
4766 
4767 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4768 {
4769 	u32 sel_phy_idx = 0;
4770 	if (bp->link_params.num_phys <= 1)
4771 		return INT_PHY;
4772 
4773 	if (bp->link_vars.link_up) {
4774 		sel_phy_idx = EXT_PHY1;
4775 		/* In case link is SERDES, check if the EXT_PHY2 is the one */
4776 		if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4777 		    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4778 			sel_phy_idx = EXT_PHY2;
4779 	} else {
4780 
4781 		switch (bnx2x_phy_selection(&bp->link_params)) {
4782 		case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4783 		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4784 		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4785 		       sel_phy_idx = EXT_PHY1;
4786 		       break;
4787 		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4788 		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4789 		       sel_phy_idx = EXT_PHY2;
4790 		       break;
4791 		}
4792 	}
4793 
4794 	return sel_phy_idx;
4795 }
4796 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4797 {
4798 	u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4799 	/*
4800 	 * The selected activated PHY is always after swapping (in case PHY
4801 	 * swapping is enabled). So when swapping is enabled, we need to reverse
4802 	 * the configuration
4803 	 */
4804 
4805 	if (bp->link_params.multi_phy_config &
4806 	    PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4807 		if (sel_phy_idx == EXT_PHY1)
4808 			sel_phy_idx = EXT_PHY2;
4809 		else if (sel_phy_idx == EXT_PHY2)
4810 			sel_phy_idx = EXT_PHY1;
4811 	}
4812 	return LINK_CONFIG_IDX(sel_phy_idx);
4813 }
4814 
4815 #ifdef NETDEV_FCOE_WWNN
4816 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4817 {
4818 	struct bnx2x *bp = netdev_priv(dev);
4819 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4820 
4821 	switch (type) {
4822 	case NETDEV_FCOE_WWNN:
4823 		*wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4824 				cp->fcoe_wwn_node_name_lo);
4825 		break;
4826 	case NETDEV_FCOE_WWPN:
4827 		*wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4828 				cp->fcoe_wwn_port_name_lo);
4829 		break;
4830 	default:
4831 		BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4832 		return -EINVAL;
4833 	}
4834 
4835 	return 0;
4836 }
4837 #endif
4838 
4839 /* called with rtnl_lock */
4840 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4841 {
4842 	struct bnx2x *bp = netdev_priv(dev);
4843 
4844 	if (pci_num_vf(bp->pdev)) {
4845 		DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4846 		return -EPERM;
4847 	}
4848 
4849 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4850 		BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4851 		return -EAGAIN;
4852 	}
4853 
4854 	/* This does not race with packet allocation
4855 	 * because the actual alloc size is
4856 	 * only updated as part of load
4857 	 */
4858 	dev->mtu = new_mtu;
4859 
4860 	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4861 		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4862 
4863 	return bnx2x_reload_if_running(dev);
4864 }
4865 
4866 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4867 				     netdev_features_t features)
4868 {
4869 	struct bnx2x *bp = netdev_priv(dev);
4870 
4871 	if (pci_num_vf(bp->pdev)) {
4872 		netdev_features_t changed = dev->features ^ features;
4873 
4874 		/* Revert the requested changes in features if they
4875 		 * would require internal reload of PF in bnx2x_set_features().
4876 		 */
4877 		if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4878 			features &= ~NETIF_F_RXCSUM;
4879 			features |= dev->features & NETIF_F_RXCSUM;
4880 		}
4881 
4882 		if (changed & NETIF_F_LOOPBACK) {
4883 			features &= ~NETIF_F_LOOPBACK;
4884 			features |= dev->features & NETIF_F_LOOPBACK;
4885 		}
4886 	}
4887 
4888 	/* TPA requires Rx CSUM offloading */
4889 	if (!(features & NETIF_F_RXCSUM)) {
4890 		features &= ~NETIF_F_LRO;
4891 		features &= ~NETIF_F_GRO;
4892 	}
4893 
4894 	return features;
4895 }
4896 
4897 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4898 {
4899 	struct bnx2x *bp = netdev_priv(dev);
4900 	netdev_features_t changes = features ^ dev->features;
4901 	bool bnx2x_reload = false;
4902 	int rc;
4903 
4904 	/* VFs or non SRIOV PFs should be able to change loopback feature */
4905 	if (!pci_num_vf(bp->pdev)) {
4906 		if (features & NETIF_F_LOOPBACK) {
4907 			if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4908 				bp->link_params.loopback_mode = LOOPBACK_BMAC;
4909 				bnx2x_reload = true;
4910 			}
4911 		} else {
4912 			if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4913 				bp->link_params.loopback_mode = LOOPBACK_NONE;
4914 				bnx2x_reload = true;
4915 			}
4916 		}
4917 	}
4918 
4919 	/* if GRO is changed while LRO is enabled, don't force a reload */
4920 	if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4921 		changes &= ~NETIF_F_GRO;
4922 
4923 	/* if GRO is changed while HW TPA is off, don't force a reload */
4924 	if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4925 		changes &= ~NETIF_F_GRO;
4926 
4927 	if (changes)
4928 		bnx2x_reload = true;
4929 
4930 	if (bnx2x_reload) {
4931 		if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4932 			dev->features = features;
4933 			rc = bnx2x_reload_if_running(dev);
4934 			return rc ? rc : 1;
4935 		}
4936 		/* else: bnx2x_nic_load() will be called at end of recovery */
4937 	}
4938 
4939 	return 0;
4940 }
4941 
4942 void bnx2x_tx_timeout(struct net_device *dev)
4943 {
4944 	struct bnx2x *bp = netdev_priv(dev);
4945 
4946 #ifdef BNX2X_STOP_ON_ERROR
4947 	if (!bp->panic)
4948 		bnx2x_panic();
4949 #endif
4950 
4951 	/* This allows the netif to be shutdown gracefully before resetting */
4952 	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4953 }
4954 
4955 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4956 {
4957 	struct net_device *dev = pci_get_drvdata(pdev);
4958 	struct bnx2x *bp;
4959 
4960 	if (!dev) {
4961 		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4962 		return -ENODEV;
4963 	}
4964 	bp = netdev_priv(dev);
4965 
4966 	rtnl_lock();
4967 
4968 	pci_save_state(pdev);
4969 
4970 	if (!netif_running(dev)) {
4971 		rtnl_unlock();
4972 		return 0;
4973 	}
4974 
4975 	netif_device_detach(dev);
4976 
4977 	bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4978 
4979 	bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4980 
4981 	rtnl_unlock();
4982 
4983 	return 0;
4984 }
4985 
4986 int bnx2x_resume(struct pci_dev *pdev)
4987 {
4988 	struct net_device *dev = pci_get_drvdata(pdev);
4989 	struct bnx2x *bp;
4990 	int rc;
4991 
4992 	if (!dev) {
4993 		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4994 		return -ENODEV;
4995 	}
4996 	bp = netdev_priv(dev);
4997 
4998 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4999 		BNX2X_ERR("Handling parity error recovery. Try again later\n");
5000 		return -EAGAIN;
5001 	}
5002 
5003 	rtnl_lock();
5004 
5005 	pci_restore_state(pdev);
5006 
5007 	if (!netif_running(dev)) {
5008 		rtnl_unlock();
5009 		return 0;
5010 	}
5011 
5012 	bnx2x_set_power_state(bp, PCI_D0);
5013 	netif_device_attach(dev);
5014 
5015 	rc = bnx2x_nic_load(bp, LOAD_OPEN);
5016 
5017 	rtnl_unlock();
5018 
5019 	return rc;
5020 }
5021 
5022 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5023 			      u32 cid)
5024 {
5025 	if (!cxt) {
5026 		BNX2X_ERR("bad context pointer %p\n", cxt);
5027 		return;
5028 	}
5029 
5030 	/* ustorm cxt validation */
5031 	cxt->ustorm_ag_context.cdu_usage =
5032 		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5033 			CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5034 	/* xcontext validation */
5035 	cxt->xstorm_ag_context.cdu_reserved =
5036 		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5037 			CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5038 }
5039 
5040 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5041 				    u8 fw_sb_id, u8 sb_index,
5042 				    u8 ticks)
5043 {
5044 	u32 addr = BAR_CSTRORM_INTMEM +
5045 		   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5046 	REG_WR8(bp, addr, ticks);
5047 	DP(NETIF_MSG_IFUP,
5048 	   "port %x fw_sb_id %d sb_index %d ticks %d\n",
5049 	   port, fw_sb_id, sb_index, ticks);
5050 }
5051 
5052 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5053 				    u16 fw_sb_id, u8 sb_index,
5054 				    u8 disable)
5055 {
5056 	u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5057 	u32 addr = BAR_CSTRORM_INTMEM +
5058 		   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5059 	u8 flags = REG_RD8(bp, addr);
5060 	/* clear and set */
5061 	flags &= ~HC_INDEX_DATA_HC_ENABLED;
5062 	flags |= enable_flag;
5063 	REG_WR8(bp, addr, flags);
5064 	DP(NETIF_MSG_IFUP,
5065 	   "port %x fw_sb_id %d sb_index %d disable %d\n",
5066 	   port, fw_sb_id, sb_index, disable);
5067 }
5068 
5069 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5070 				    u8 sb_index, u8 disable, u16 usec)
5071 {
5072 	int port = BP_PORT(bp);
5073 	u8 ticks = usec / BNX2X_BTR;
5074 
5075 	storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5076 
5077 	disable = disable ? 1 : (usec ? 0 : 1);
5078 	storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5079 }
5080 
5081 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5082 			    u32 verbose)
5083 {
5084 	smp_mb__before_atomic();
5085 	set_bit(flag, &bp->sp_rtnl_state);
5086 	smp_mb__after_atomic();
5087 	DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5088 	   flag);
5089 	schedule_delayed_work(&bp->sp_rtnl_task, 0);
5090 }
5091